// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <asm/page.h>
#include <kunit/test.h>
#include "../mm/kasan/kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
/*
* Some tests use these global variables to store return values from function
* calls that could otherwise be eliminated by the compiler as dead code.
*/
void *kasan_ptr_result;
int kasan_int_result;
static struct kunit_resource resource;
static struct kunit_kasan_expectation fail_data;
static bool multishot;
/*
* Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
* first detected bug and panic the kernel if panic_on_warn is enabled. For
* hardware tag-based KASAN also allow tag checking to be reenabled for each
* test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
*/
static int kasan_test_init(struct kunit *test)
{
if (!kasan_enabled()) {
kunit_err(test, "can't run KASAN tests with KASAN disabled");
return -1;
}
multishot = kasan_save_enable_multi_shot();
kasan_set_tagging_report_once(false);
return 0;
}
static void kasan_test_exit(struct kunit *test)
{
kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
}
/**
* KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
* KASAN report; causes a test failure otherwise. This relies on a KUnit
* resource named "kasan_data". Do not use this name for KUnit resources
* outside of KASAN tests.
*
* For hardware tag-based KASAN, when a tag fault happens, tag checking is
* normally auto-disabled. When this happens, this test handler reenables
* tag checking. As tag checking can be only disabled or enabled per CPU, this
* handler disables migration (preemption).
*
* Since the compiler doesn't see that the expression can change the fail_data
* fields, it can reorder or optimize away the accesses to those fields.
* Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
* expression to prevent that.
*/
#define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \
migrate_disable(); \
WRITE_ONCE(fail_data.report_expected, true); \
WRITE_ONCE(fail_data.report_found, false); \
kunit_add_named_resource(test, \
NULL, \
NULL, \
&resource, \
"kasan_data", &fail_data); \
barrier(); \
expression; \
barrier(); \
KUNIT_EXPECT_EQ(test, \
READ_ONCE(fail_data.report_expected), \
READ_ONCE(fail_data.report_found)); \
if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) { \
if (READ_ONCE(fail_data.report_found)) \
kasan_enable_tagging(); \
migrate_enable(); \
} \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
if (!IS_ENABLED(config)) { \
kunit_info((test), "skipping, " #config " required"); \
return; \
} \
} while (0)
#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
if (IS_ENABLED(config)) { \
kunit_info((test), "skipping, " #config " enabled"); \
return; \
} \
} while (0)
static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = 123;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
kfree(ptr);
}
static void kmalloc_oob_left(struct kunit *test)
{
char *ptr;
size_t size = 15;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
kfree(ptr);
}
static void kmalloc_node_oob_right(struct kunit *test)
{
char *ptr;
size_t size = 4096;
ptr = kmalloc_node(size, GFP_KERNEL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
kfree(ptr);
}
/*
* These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
* fit into a slab cache and therefore is allocated via the page allocator
* fallback. Since this kind of fallback is only implemented for SLUB, these
* tests are limited to that allocator.
*/
static void kmalloc_pagealloc_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
kfree(ptr);
}
static void kmalloc_pagealloc_uaf(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
}
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
}
static void pagealloc_oob_right