diff options
author | Liam R. Howlett <Liam.Howlett@Oracle.com> | 2022-09-06 19:48:45 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2022-09-26 19:46:14 -0700 |
commit | e15e06a8392321a19d8ebdbdd7643b7fa8874c17 (patch) | |
tree | db9c8971405837eb23f7a370ecfa27975eac2305 | |
parent | c349fa1818d405c8d9ab77ddcaff919189a0b346 (diff) | |
download | linux-e15e06a8392321a19d8ebdbdd7643b7fa8874c17.tar.gz linux-e15e06a8392321a19d8ebdbdd7643b7fa8874c17.tar.bz2 linux-e15e06a8392321a19d8ebdbdd7643b7fa8874c17.zip |
lib/test_maple_tree: add testing for maple tree
This is a test suite that uses the radix test infrastructure. It has been
split into its own commit to allow for easier review of the maple tree
code.
The testing includes:
- Allocation of nodes
- gfp flag allocation checks
- Expansion & contraction of tree
- preallocation checks
- tree navigation by next/prev
- tree navigation by iterators (mas_for_each, etc)
- Number of nodes for a given number of entries
- Generic tree construction tests
- Addition and removal of entries in forward and reverse numerical indexes
- gap searching both forward and reverse
- Combining gaps by overwriting entries in different ways
- splitting right-most node
- splitting left-most node
- overwriting multiple slots
- overwriting across different levels of the tree
- overwriting the middle of a tree
- causing a 3-way split up to the root by overwriting the last slot and
first slot of different nodes and spanning different levels
- RCU stress testing of the tree with threads
- Duplication of the tree by entry count
- Tests which were generated by fuzzers have been added.
- A large number of tests which come from recording crashing in a VM and
reconstructing the tree (see check_erase2_set())
Link: https://lkml.kernel.org/r/20220906194824.2110408-8-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | lib/test_maple_tree.c | 38307 | ||||
-rw-r--r-- | tools/testing/radix-tree/Makefile | 9 |
2 files changed, 38314 insertions, 2 deletions
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c new file mode 100644 index 000000000000..4f69e009a015 --- /dev/null +++ b/lib/test_maple_tree.c @@ -0,0 +1,38307 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * test_maple_tree.c: Test the maple tree API + * Copyright (c) 2018 Liam R. Howlett + * Author: Liam R. Howlett <Liam.Howlett@Oracle.com> + */ + +#include <linux/maple_tree.h> +#include <linux/module.h> +#include <stdlib.h> +#include <time.h> + +#define MTREE_ALLOC_MAX 0x2000000000000Ul +#define CONFIG_DEBUG_MAPLE_TREE +#define CONFIG_MAPLE_SEARCH +/* #define BENCH_SLOT_STORE */ +/* #define BENCH_NODE_STORE */ +/* #define BENCH_AWALK */ +/* #define BENCH_WALK */ +/* #define BENCH_MT_FOR_EACH */ +/* #define BENCH_FORK */ +static +int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) +{ + return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp); +} + +static void mtree_erase_index(struct maple_tree *mt, unsigned long index) +{ + MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX)); + MT_BUG_ON(mt, mtree_load(mt, index) != NULL); +} + +static int mtree_test_insert(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + return mtree_insert(mt, index, ptr, GFP_KERNEL); +} + +static int mtree_test_store_range(struct maple_tree *mt, unsigned long start, + unsigned long end, void *ptr) +{ + return mtree_store_range(mt, start, end, ptr, GFP_KERNEL); +} + +static int mtree_test_store(struct maple_tree *mt, unsigned long start, + void *ptr) +{ + return mtree_test_store_range(mt, start, start, ptr); +} + +static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start, + unsigned long end, void *ptr) +{ + return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL); +} + +static void *mtree_test_load(struct maple_tree *mt, unsigned long index) +{ + return mtree_load(mt, index); +} + +static void *mtree_test_erase(struct maple_tree *mt, unsigned long index) +{ + return mtree_erase(mt, index); +} + +static noinline void check_mtree_alloc_range(struct maple_tree *mt, + unsigned long start, unsigned long end, unsigned long size, + unsigned long expected, int eret, void *ptr) +{ + + unsigned long result = expected + 1; + int ret; + + ret = mtree_alloc_range(mt, &result, ptr, size, start, end, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) + return; + + MT_BUG_ON(mt, result != expected); +} + +static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, + unsigned long start, unsigned long end, unsigned long size, + unsigned long expected, int eret, void *ptr) +{ + + unsigned long result = expected + 1; + int ret; + + ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, + GFP_KERNEL); + MT_BUG_ON(mt, ret != eret); + if (ret) + return; + + MT_BUG_ON(mt, result != expected); +} + +static noinline void check_load(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + void *ret = mtree_test_load(mt, index); + + if (ret != ptr) + pr_err("Load %lu returned %p expect %p\n", index, ret, ptr); + MT_BUG_ON(mt, ret != ptr); +} + +static noinline void check_store_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr, int expected) +{ + int ret = -EINVAL; + unsigned long i; + + ret = mtree_test_store_range(mt, start, end, ptr); + MT_BUG_ON(mt, ret != expected); + + if (ret) + return; + + for (i = start; i <= end; i++) + check_load(mt, i, ptr); +} + +static noinline void check_insert_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr, int expected) +{ + int ret = -EINVAL; + unsigned long i; + + ret = mtree_test_insert_range(mt, start, end, ptr); + MT_BUG_ON(mt, ret != expected); + + if (ret) + return; + + for (i = start; i <= end; i++) + check_load(mt, i, ptr); +} + +static noinline void check_insert(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + int ret = -EINVAL; + + ret = mtree_test_insert(mt, index, ptr); + MT_BUG_ON(mt, ret != 0); +} + +static noinline void check_erase(struct maple_tree *mt, unsigned long index, + void *ptr) +{ + MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr); +} + +static noinline void check_dup_insert(struct maple_tree *mt, + unsigned long index, void *ptr) +{ + int ret = -EINVAL; + + ret = mtree_test_insert(mt, index, ptr); + MT_BUG_ON(mt, ret != -EEXIST); +} + + +static noinline +void check_index_load(struct maple_tree *mt, unsigned long index) +{ + return check_load(mt, index, xa_mk_value(index & LONG_MAX)); +} + +static noinline void check_nomem(struct maple_tree *mt) +{ + MA_STATE(ms, mt, 1, 1); + + MT_BUG_ON(mt, !mtree_empty(mt)); + /* Ensure no bypassing of allocation failures */ + mt_set_non_kernel(0); + + /* Storing something at 1 requires memory allocation */ + MT_BUG_ON(mt, mtree_insert(mt, 1, &ms, GFP_ATOMIC) != -ENOMEM); + /* Storing something at 0 does not */ + MT_BUG_ON(mt, mtree_insert(mt, 0, &ms, GFP_ATOMIC) != 0); + + /* + * Simulate two threads racing; the first one fails to allocate + * memory to insert an entry at 1, then the second one succeeds + * in allocating memory to insert an entry at 2. The first one + * then needs to free the node it allocated. LeakSanitizer will + * notice this, as will the 'nr_allocated' debugging aid in the + * userspace test suite. + */ + mtree_lock(mt); + mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */ + MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); + mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ + MT_BUG_ON(mt, ms.node != MAS_START); + mtree_unlock(mt); + MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0); + mtree_lock(mt); + mas_store(&ms, &ms); /* insert 1 -> &ms */ + mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ + mtree_unlock(mt); + mtree_destroy(mt); +} + +static inline int not_empty(struct maple_node *node) +{ + int i; + + if (node->parent) + return 1; + + for (i = 0; i < ARRAY_SIZE(node->slot); i++) + if (node->slot[i]) + return 1; + + return 0; +} + +static noinline void check_new_node(struct maple_tree *mt) +{ + + struct maple_node *mn, *mn2, *mn3; + struct maple_alloc *smn; + struct maple_node *nodes[100]; + int i, j, total; + + MA_STATE(mas, mt, 0, 0); + + /* Try allocating 3 nodes */ + mtree_lock(mt); + /* request 3 nodes to be allocated. */ + mas_node_count(&mas, 3); + /* Allocation request of 3. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); + /* Allocate failed. */ + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + mas_push_node(&mas, mn); + mas_nomem(&mas, GFP_KERNEL); /* free */ + mtree_unlock(mt); + + + /* Try allocating 1 node, then 2 more */ + mtree_lock(mt); + /* Set allocation request to 1. */ + mas_set_alloc_req(&mas, 1); + /* Check Allocation request of 1. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + mas_set_err(&mas, -ENOMEM); + /* Validate allocation request. */ + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + /* Eat the requested node. */ + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mn->slot[0] != NULL); + MT_BUG_ON(mt, mn->slot[1] != NULL); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + ma_free_rcu(mn); + mas.node = MAS_START; + mas_nomem(&mas, GFP_KERNEL); + /* Allocate 3 nodes, will fail. */ + mas_node_count(&mas, 3); + /* Drop the lock and allocate 3 nodes. */ + mas_nomem(&mas, GFP_KERNEL); + /* Ensure 3 are allocated. */ + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + /* Allocation request of 0. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); + + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); + /* Ensure we counted 3. */ + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + /* Free. */ + mas_nomem(&mas, GFP_KERNEL); + + /* Set allocation request to 1. */ + mas_set_alloc_req(&mas, 1); + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + mas_set_err(&mas, -ENOMEM); + /* Validate allocation request. */ + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + /* Check the node is only one node. */ + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, mn->slot[0] != NULL); + MT_BUG_ON(mt, mn->slot[1] != NULL); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != 1); + MT_BUG_ON(mt, mas.alloc->node_count); + + mas_set_alloc_req(&mas, 2); /* request 2 more. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != 3); + MT_BUG_ON(mt, mas.alloc == NULL); + MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); + MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); + for (i = 2; i >= 0; i--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i); + MT_BUG_ON(mt, !mn); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + + total = 64; + mas_set_alloc_req(&mas, total); /* request 2 more. */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != total); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (i = total; i > 0; i--) { + unsigned int e = 0; /* expected node_count */ + + if (i >= 35) + e = i - 35; + else if (i >= 5) + e = i - 5; + else if (i >= 2) + e = i - 2; + MT_BUG_ON(mt, mas.alloc->node_count != e); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); + MT_BUG_ON(mt, !mn); + ma_free_rcu(mn); + } + + total = 100; + for (i = 1; i < total; i++) { + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = i; j > 0; j--) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + MT_BUG_ON(mt, !mn); + MT_BUG_ON(mt, not_empty(mn)); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != j); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + mas_set_alloc_req(&mas, i); + mas_set_err(&mas, -ENOMEM); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + for (j = 0; j <= i/2; j++) { + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + nodes[j] = mas_pop_node(&mas); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); + } + + while (j) { + j--; + mas_push_node(&mas, nodes[j]); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + } + MT_BUG_ON(mt, mas_allocated(&mas) != i); + for (j = 0; j <= i/2; j++) { + MT_BUG_ON(mt, mas_allocated(&mas) != i - j); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); + } + MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); + + } + + /* Set allocation request. */ + total = 500; + mas_node_count(&mas, total); + /* Drop the lock and allocate the nodes. */ + mas_nomem(&mas, GFP_KERNEL); + MT_BUG_ON(mt, !mas.alloc); + i = 1; + smn = mas.alloc; + while (i < total) { + for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { + i++; + MT_BUG_ON(mt, !smn->slot[j]); + if (i == total) + break; + } + smn = smn->slot[0]; /* next. */ + } + MT_BUG_ON(mt, mas_allocated(&mas) != total); + mas_nomem(&mas, GFP_KERNEL); /* Free. */ + + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + for (i = 1; i < 128; i++) { + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ + for (j = i; j > 0; j--) { /*Free the requests */ + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + } + + for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { + MA_STATE(mas2, mt, 0, 0); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ + for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + mas_push_node(&mas2, mn); + MT_BUG_ON(mt, mas_allocated(&mas2) != j); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + MT_BUG_ON(mt, mas_allocated(&mas2) != i); + + for (j = i; j > 0; j--) { /*Free the requests */ + MT_BUG_ON(mt, mas_allocated(&mas2) != j); + mn = mas_pop_node(&mas2); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas2) != 0); + } + + + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + + mn = mas_pop_node(&mas); /* get the next node. */ + MT_BUG_ON(mt, mn == NULL); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2); + + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + + /* Check the limit of pop/push/pop */ + mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ + MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); + MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); + MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); + MT_BUG_ON(mt, mas_alloc_req(&mas)); + MT_BUG_ON(mt, mas.alloc->node_count); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); + MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); + mas_push_node(&mas, mn); + MT_BUG_ON(mt, mas.alloc->node_count); + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { + mn = mas_pop_node(&mas); + MT_BUG_ON(mt, not_empty(mn)); + ma_free_rcu(mn); + } + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + + for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put it back */ + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mn2 = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put them back */ + mas_push_node(&mas, mn2); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + mn2 = mas_pop_node(&mas); /* get the next node. */ + mn3 = mas_pop_node(&mas); /* get the next node. */ + mas_push_node(&mas, mn); /* put them back */ + mas_push_node(&mas, mn2); + mas_push_node(&mas, mn3); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, i); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mn = mas_pop_node(&mas); /* get the next node. */ + ma_free_rcu(mn); + mas_destroy(&mas); + } + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 5); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != 5); + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 10); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mas.node = MAS_START; + MT_BUG_ON(mt, mas_allocated(&mas) != 10); + mas_destroy(&mas); + + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); + mas.node = MA_ERROR(-ENOMEM); + mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ + mas_nomem(&mas, GFP_KERNEL); /* Fill request */ + mas.node = MAS_START; + MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); + mas_destroy(&mas); + + mtree_unlock(mt); +} + +static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, + bool verbose) +{ + unsigned long i = max, j; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + mt_zero_nr_tallocated(); + while (i) { + MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); + for (j = i; j <= max; j++) + check_index_load(mt, j); + + check_load(mt, i - 1, NULL); + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_height(mt)); + mt_clear_in_rcu(mt); + MT_BUG_ON(mt, !mt_height(mt)); + i--; + } + check_load(mt, max + 1, NULL); + + if (verbose) { + rcu_barrier(); + mt_dump(mt); + pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n", + __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), + mt_nr_tallocated()); + } +} + +static noinline void check_seq(struct maple_tree *mt, unsigned long max, + bool verbose) +{ + unsigned long i, j; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + mt_zero_nr_tallocated(); + for (i = 0; i <= max; i++) { + MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL)); + for (j = 0; j <= i; j++) + check_index_load(mt, j); + + if (i) + MT_BUG_ON(mt, !mt_height(mt)); + check_load(mt, i + 1, NULL); + } + if (verbose) { + rcu_barrier(); + mt_dump(mt); + pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", + max, mt_get_alloc_size()/1024, mt_nr_allocated(), + mt_nr_tallocated()); + } +} + +static noinline void check_lb_not_empty(struct maple_tree *mt) +{ + unsigned long i, j; + unsigned long huge = 4000UL * 1000 * 1000; + + + i = huge; + while (i > 4096) { + check_insert(mt, i, (void *) i); + for (j = huge; j >= i; j /= 2) { + check_load(mt, j-1, NULL); + check_load(mt, j, (void *) j); + check_load(mt, j+1, NULL); + } + i /= 2; + } + mtree_destroy(mt); +} + +static noinline void check_lower_bound_split(struct maple_tree *mt) +{ + MT_BUG_ON(mt, !mtree_empty(mt)); + check_lb_not_empty(mt); +} + +static noinline void check_upper_bound_split(struct maple_tree *mt) +{ + unsigned long i, j; + unsigned long huge = 4000UL * 1000 * 1000; + + MT_BUG_ON(mt, !mtree_empty(mt)); + + i = 4096; + while (i < huge) { + check_insert(mt, i, (void *) i); + for (j = i; j >= huge; j *= 2) { + check_load(mt, j-1, NULL); + check_load(mt, j, (void *) j); + check_load(mt, j+1, NULL); + } + i *= 2; + } + mtree_destroy(mt); +} + +static noinline void check_mid_split(struct maple_tree *mt) +{ + unsigned long huge = 8000UL * 1000 * 1000; + + check_insert(mt, huge, (void *) huge); + check_insert(mt, 0, xa_mk_value(0)); + check_lb_not_empty(mt); +} + +static noinline void check_rev_find(struct maple_tree *mt) +{ + int i, nr_entries = 200; + void *val; + MA_STATE(mas, mt, 0, 0); + + for (i = 0; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 5, + xa_mk_value(i), GFP_KERNEL); + + mas_set(&mas, 1000); + val = mas_find_rev(&mas, 1000); + MT_BUG_ON(mt, val != xa_mk_value(100)); + val = mas_find_rev(&mas, 1000); + MT_BUG_ON(mt, val != NULL); + + mas_set(&mas, 999); + val = mas_find_rev(&mas, 997); + MT_BUG_ON(mt, val != NULL); + + mas_set(&mas, 1000); + val = mas_find_rev(&mas, 900); + MT_BUG_ON(mt, val != xa_mk_value(100)); + val = mas_find_rev(&mas, 900); + MT_BUG_ON(mt, val != xa_mk_value(99)); + + mas_set(&mas, 20); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(2)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(1)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != xa_mk_value(0)); + val = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, val != NULL); +} + +static noinline void check_find(struct maple_tree *mt) +{ + unsigned long val = 0; + unsigned long count = 20; + unsigned long max; + unsigned long last = 0, index = 0; + void *entry, *entry2; + + MA_STATE(mas, mt, 0, 0); + + /* Insert 0. */ + MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL)); + + for (int i = 0; i <= count; i++) { + if (val != 64) + MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL)); + else + MT_BUG_ON(mt, mtree_insert(mt, val, + XA_ZERO_ENTRY, GFP_KERNEL)); + + val <<= 2; + } + + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + while ((entry = mas_find(&mas, 268435456)) != NULL) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + mas_unlock(&mas); + + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + mas_unlock(&mas); + + /* Test mas_pause */ + val = 0; + mas_set(&mas, val); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val != 64) + MT_BUG_ON(mt, xa_mk_value(val) != entry); + else + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + + mas_pause(&mas); + mas_unlock(&mas); + mas_lock(&mas); + } + mas_unlock(&mas); + + val = 0; + max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */ + mt_for_each(mt, entry, index, max) { + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + if (val == 64) /* Skip zero entry. */ + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + } + + val = 0; + max = 0; + index = 0; + MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); + mt_for_each(mt, entry, index, ULONG_MAX) { + if (val == 4398046511104) + MT_BUG_ON(mt, entry != + xa_mk_value(ULONG_MAX & LONG_MAX)); + else + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + if (val == 64) /* Skip zero entry. */ + val <<= 2; + /* For zero check. */ + if (!val) + val = 1; + max++; + MT_BUG_ON(mt, max > 25); + } + mtree_erase_index(mt, ULONG_MAX); + + mas_reset(&mas); + index = 17; + entry = mt_find(mt, &index, 512); + MT_BUG_ON(mt, xa_mk_value(256) != entry); + + mas_reset(&mas); + index = 17; + entry = mt_find(mt, &index, 20); + MT_BUG_ON(mt, entry != NULL); + + + /* Range check.. */ + /* Insert ULONG_MAX */ + MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); + + val = 0; + mas_set(&mas, 0); + mas_lock(&mas); + mas_for_each(&mas, entry, ULONG_MAX) { + if (val == 64) + MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); + else if (val == 4398046511104) + MT_BUG_ON(mt, entry != xa_mk_value(ULONG_MAX & LONG_MAX)); + else + MT_BUG_ON(mt, xa_mk_value(val) != entry); + val <<= 2; + + /* For zero check. */ + if (!val) + val = 1; + mas_pause(&mas); + mas_unlock(&mas); + mas_lock(&mas); + } + mas_unlock(&mas); + + mas_set(&mas, 1048576); + mas_lock(&mas); + entry = mas_find(&mas, 1048576); + mas_unlock(&mas); + MT_BUG_ON(mas.tree, entry == NULL); + + /* + * Find last value. + * 1. get the expected value, leveraging the existence of an end entry + * 2. delete end entry + * 3. find the last value but searching for ULONG_MAX and then using + * prev + */ + /* First, get the expected result. */ + mas_lock(&mas); + mas_reset(&mas); + mas.index = ULONG_MAX; /* start at max.. */ + entry = mas_find(&mas, ULONG_MAX); + entry = mas_prev(&mas, 0); + index = mas.index; + last = mas.last; + + /* Erase the last entry. */ + mas_reset(&mas); + mas.index = ULONG_MAX; + mas.last = ULONG_MAX; + mas_erase(&mas); + + /* Get the previous value from MAS_START */ + mas_reset(&mas); + entry2 = mas_prev(&mas, 0); + + /* Check results. */ + MT_BUG_ON(mt, entry != entry2); + MT_BUG_ON(mt, index != mas.index); + MT_BUG_ON(mt, last != mas.last); + + + mas.node = MAS_NONE; + mas.index = ULONG_MAX; + mas.last = ULONG_MAX; + entry2 = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != entry2); + + mas_set(&mas, 0); + MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL); + + mas_unlock(&mas); + mtree_destroy(mt); +} + +static noinline void check_find_2(struct maple_tree *mt) +{ + unsigned long i, j; + void *entry; + + MA_STATE(mas, mt, 0, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) + MT_BUG_ON(mt, true); + rcu_read_unlock(); + + for (i = 0; i < 256; i++) { + mtree_insert_index(mt, i, GFP_KERNEL); + j = 0; + mas_set(&mas, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + MT_BUG_ON(mt, entry != xa_mk_value(j)); + j++; + } + rcu_read_unlock(); + MT_BUG_ON(mt, j != i + 1); + } + + for (i = 0; i < 256; i++) { + mtree_erase_index(mt, i); + j = i + 1; + mas_set(&mas, 0); + rcu_read_lock(); + mas_for_each(&mas, entry, ULONG_MAX) { + if (xa_is_zero(entry)) + continue; + + MT_BUG_ON(mt, entry != xa_mk_value(j)); + j++; + } + rcu_read_unlock(); + MT_BUG_ON(mt, j != 256); + } + + /*MT_BUG_ON(mt, !mtree_empty(mt)); */ +} + +#define erase_ptr(i) entry[i%2] +#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2]) +#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2]) +#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2]) + +static noinline void check_erase_testset(struct maple_tree *mt) +{ + unsigned long set[] = { 5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 6003, 6002, 6008, 6012, 6015, + 7003, 7002, 7008, 7012, 7015, + 8003, 8002, 8008, 8012, 8015, + 9003, 9002, 9008, 9012, 9015, + 10003, 10002, 10008, 10012, 10015, + 11003, 11002, 11008, 11012, 11015, + 12003, 12002, 12008, 12012, 12015, + 13003, 13002, 13008, 13012, 13015, + 14003, 14002, 14008, 14012, 14015, + 15003, 15002, 15008, 15012, 15015, + }; + + + void *ptr = &set; + void *entry[2] = { ptr, mt }; + void *root_node; + + + rcu_register_thread(); + mt_set_in_rcu(mt); + for (int i = 0; i < 4; i++) + erase_check_insert(mt, i); + for (int i = 0; i < 4; i++) + erase_check_load(mt, i); + + mt_set_non_kernel(2); + erase_check_erase(mt, 1); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + for (int i = 2; i < 4; i++) + erase_check_load(mt, i); + + + erase_check_erase(mt, 2); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + check_load(mt, set[2], NULL); + + erase_check_insert(mt, 1); + erase_check_insert(mt, 2); + + for (int i = 0; i < 4; i++) + erase_check_load(mt, i); + + /* Check erase and load without an allocation. */ + erase_check_load(mt, 3); + erase_check_erase(mt, 1); + erase_check_load(mt, 0); + check_load(mt, set[1], NULL); + for (int i = 2; i < 4; i++) + erase_check_load(mt, i); + + /* + * Set the newly erased node. This will produce a different allocated + * node to avoid busy slots. + */ + root_node = mt->ma_root; + erase_check_insert(mt, 1); + + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + erase_check_load(mt, 2); + check_load(mt, 5018, NULL); + erase_check_load(mt, 3); + + erase_check_erase(mt, 2); /* erase 5017 to check append */ + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + check_load(mt, set[2], NULL); + check_load(mt, 5018, NULL); + + erase_check_load(mt, 3); + + root_node = mt->ma_root; + erase_check_insert(mt, 2); + + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + erase_check_load(mt, 2); + check_load(mt, 5018, NULL); + erase_check_load(mt, 3); + + mt_set_non_kernel(1); + erase_check_erase(mt, 2); /* erase 5017 to check append */ + erase_check_load(mt, 0); + check_load(mt, 5016, NULL); + check_load(mt, set[2], NULL); + erase_check_erase(mt, 0); /* erase 5015 to check append */ + check_load(mt, set[0], NULL); + check_load(mt, 5016, NULL); + erase_check_insert(mt, 4); /* 1000 < Should not split. */ + check_load(mt, set[0], NULL); + check_load(mt, 5016, NULL); + erase_check_load(mt, 1); + check_load(mt, 5013, NULL); + check_load(mt, set[2], NULL); + check_load(mt, 5018, NULL); + erase_check_load(mt, 4); + check_load(mt, 999, NULL); + check_load(mt, 1001, NULL); + erase_check_load(mt, 4); + if (mt_in_rcu(mt)) + MT_BUG_ON(mt, root_node == mt->ma_root); + else + MT_BUG_ON(mt, root_node != mt->ma_root); + + /* Should not have split. */ + MT_BUG_ON(mt, !mte_is_leaf(mt->ma_root)); + + + /* Coalesce testing */ + erase_check_insert(mt, 0); + erase_check_insert(mt, 2); + + for (int i = 5; i < 25; i++) { + erase_check_insert(mt, i); + for (int j = i; j >= 0; j--) + erase_check_load(mt, j); + } + + erase_check_erase(mt, 14); /*6015 */ + for (int i = 0; i < 25; i++) { + if (i == 14) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + erase_check_erase(mt, 16); /*7002 */ + for (int i = 0; i < 25; i++) { + if (i == 16 || i == 14) + check_load(mt, set[i], NULL); + else + erase_check_load(mt, i); + } + |