diff options
| author | Liam Howlett <liam.howlett@oracle.com> | 2022-10-28 18:04:30 +0000 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2022-11-08 15:57:22 -0800 |
| commit | 120b116208a0877227fc82e3f0df81e7a3ed4ab1 (patch) | |
| tree | 5a10a0b015449fbb0771716c642f8299bb11baec /lib/test_maple_tree.c | |
| parent | 9a887877ef981e5a185a84339603300cf2eb1900 (diff) | |
| download | linux-120b116208a0877227fc82e3f0df81e7a3ed4ab1.tar.gz linux-120b116208a0877227fc82e3f0df81e7a3ed4ab1.tar.bz2 linux-120b116208a0877227fc82e3f0df81e7a3ed4ab1.zip | |
maple_tree: reorganize testing to restore module testing
Along the development cycle, the testing code support for module/in-kernel
compiles was removed. Restore this functionality by moving any internal
API tests to the userspace side, as well as threading tests. Fix the
lockdep issues and add a way to reduce memory usage so the tests can
complete with KASAN + memleak detection. Make the tests work on 32 bit
hosts where possible and detect 32 bit hosts in the radix test suite.
[akpm@linux-foundation.org: fix module export]
[akpm@linux-foundation.org: fix it some more]
[liam.howlett@oracle.com: fix compile warnings on 32bit build in check_find()]
Link: https://lkml.kernel.org/r/20221107203816.1260327-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20221028180415.3074673-1-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/test_maple_tree.c')
| -rw-r--r-- | lib/test_maple_tree.c | 35930 |
1 files changed, 195 insertions, 35735 deletions
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 4f69e009a015..f425f169ef08 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -1,24 +1,35 @@ // SPDX-License-Identifier: GPL-2.0+ /* * test_maple_tree.c: Test the maple tree API - * Copyright (c) 2018 Liam R. Howlett + * Copyright (c) 2018-2022 Oracle Corporation * Author: Liam R. Howlett <Liam.Howlett@Oracle.com> + * + * Any tests that only require the interface of the tree. */ #include <linux/maple_tree.h> #include <linux/module.h> -#include <stdlib.h> -#include <time.h> #define MTREE_ALLOC_MAX 0x2000000000000Ul +#ifndef CONFIG_DEBUG_MAPLE_TREE #define CONFIG_DEBUG_MAPLE_TREE +#endif #define CONFIG_MAPLE_SEARCH +#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31) + /* #define BENCH_SLOT_STORE */ /* #define BENCH_NODE_STORE */ /* #define BENCH_AWALK */ /* #define BENCH_WALK */ /* #define BENCH_MT_FOR_EACH */ /* #define BENCH_FORK */ + +#ifdef __KERNEL__ +#define mt_set_non_kernel(x) do {} while (0) +#define mt_zero_nr_tallocated(x) do {} while (0) +#else +#define cond_resched() do {} while (0) +#endif static int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) { @@ -65,6 +76,7 @@ static void *mtree_test_erase(struct maple_tree *mt, unsigned long index) return mtree_erase(mt, index); } +#if defined(CONFIG_64BIT) static noinline void check_mtree_alloc_range(struct maple_tree *mt, unsigned long start, unsigned long end, unsigned long size, unsigned long expected, int eret, void *ptr) @@ -98,6 +110,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, MT_BUG_ON(mt, result != expected); } +#endif static noinline void check_load(struct maple_tree *mt, unsigned long index, void *ptr) @@ -150,12 +163,6 @@ static noinline void check_insert(struct maple_tree *mt, unsigned long index, MT_BUG_ON(mt, ret != 0); } -static noinline void check_erase(struct maple_tree *mt, unsigned long index, - void *ptr) -{ - MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr); -} - static noinline void check_dup_insert(struct maple_tree *mt, unsigned long index, void *ptr) { @@ -172,41 +179,6 @@ void check_index_load(struct maple_tree *mt, unsigned long index) return check_load(mt, index, xa_mk_value(index & LONG_MAX)); } -static noinline void check_nomem(struct maple_tree *mt) -{ - MA_STATE(ms, mt, 1, 1); - - MT_BUG_ON(mt, !mtree_empty(mt)); - /* Ensure no bypassing of allocation failures */ - mt_set_non_kernel(0); - - /* Storing something at 1 requires memory allocation */ - MT_BUG_ON(mt, mtree_insert(mt, 1, &ms, GFP_ATOMIC) != -ENOMEM); - /* Storing something at 0 does not */ - MT_BUG_ON(mt, mtree_insert(mt, 0, &ms, GFP_ATOMIC) != 0); - - /* - * Simulate two threads racing; the first one fails to allocate - * memory to insert an entry at 1, then the second one succeeds - * in allocating memory to insert an entry at 2. The first one - * then needs to free the node it allocated. LeakSanitizer will - * notice this, as will the 'nr_allocated' debugging aid in the - * userspace test suite. - */ - mtree_lock(mt); - mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */ - MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); - mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ - MT_BUG_ON(mt, ms.node != MAS_START); - mtree_unlock(mt); - MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0); - mtree_lock(mt); - mas_store(&ms, &ms); /* insert 1 -> &ms */ - mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ - mtree_unlock(mt); - mtree_destroy(mt); -} - static inline int not_empty(struct maple_node *node) { int i; @@ -221,350 +193,6 @@ static inline int not_empty(struct maple_node *node) return 0; } -static noinline void check_new_node(struct maple_tree *mt) -{ - - struct maple_node *mn, *mn2, *mn3; - struct maple_alloc *smn; - struct maple_node *nodes[100]; - int i, j, total; - - MA_STATE(mas, mt, 0, 0); - - /* Try allocating 3 nodes */ - mtree_lock(mt); - /* request 3 nodes to be allocated. */ - mas_node_count(&mas, 3); - /* Allocation request of 3. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); - /* Allocate failed. */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - mas_push_node(&mas, mn); - mas_nomem(&mas, GFP_KERNEL); /* free */ - mtree_unlock(mt); - - - /* Try allocating 1 node, then 2 more */ - mtree_lock(mt); - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - /* Check Allocation request of 1. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - /* Eat the requested node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - ma_free_rcu(mn); - mas.node = MAS_START; - mas_nomem(&mas, GFP_KERNEL); - /* Allocate 3 nodes, will fail. */ - mas_node_count(&mas, 3); - /* Drop the lock and allocate 3 nodes. */ - mas_nomem(&mas, GFP_KERNEL); - /* Ensure 3 are allocated. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Allocation request of 0. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); - - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - /* Ensure we counted 3. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Free. */ - mas_nomem(&mas, GFP_KERNEL); - - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - /* Check the node is only one node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - MT_BUG_ON(mt, mas.alloc->node_count); - - mas_set_alloc_req(&mas, 2); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - for (i = 2; i >= 0; i--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - - total = 64; - mas_set_alloc_req(&mas, total); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != total); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (i = total; i > 0; i--) { - unsigned int e = 0; /* expected node_count */ - - if (i >= 35) - e = i - 35; - else if (i >= 5) - e = i - 5; - else if (i >= 2) - e = i - 2; - MT_BUG_ON(mt, mas.alloc->node_count != e); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); - MT_BUG_ON(mt, !mn); - ma_free_rcu(mn); - } - - total = 100; - for (i = 1; i < total; i++) { - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = i; j > 0; j--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - nodes[j] = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - - while (j) { - j--; - mas_push_node(&mas, nodes[j]); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != i); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); - - } - - /* Set allocation request. */ - total = 500; - mas_node_count(&mas, total); - /* Drop the lock and allocate the nodes. */ - mas_nomem(&mas, GFP_KERNEL); - MT_BUG_ON(mt, !mas.alloc); - i = 1; - smn = mas.alloc; - while (i < total) { - for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { - i++; - MT_BUG_ON(mt, !smn->slot[j]); - if (i == total) - break; - } - smn = smn->slot[0]; /* next. */ - } - MT_BUG_ON(mt, mas_allocated(&mas) != total); - mas_nomem(&mas, GFP_KERNEL); /* Free. */ - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - for (i = 1; i < 128; i++) { - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = i; j > 0; j--) { /*Free the requests */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - } - - for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { - MA_STATE(mas2, mt, 0, 0); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas2, mn); - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mas_allocated(&mas2) != i); - - for (j = i; j > 0; j--) { /*Free the requests */ - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - mn = mas_pop_node(&mas2); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas2) != 0); - } - - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2); - - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - - /* Check the limit of pop/push/pop */ - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_alloc_req(&mas)); - MT_BUG_ON(mt, mas.alloc->node_count); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas.alloc->node_count); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - - for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put it back */ - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mn3 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_push_node(&mas, mn3); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mas_destroy(&mas); - } - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 5); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != 5); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.node = MAS_START; - MT_BUG_ON(mt, mas_allocated(&mas) != 10); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.node = MAS_START; - MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); - mas_destroy(&mas); - - mtree_unlock(mt); -} static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, bool verbose) @@ -588,6 +216,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, } check_load(mt, max + 1, NULL); +#ifndef __KERNEL__ if (verbose) { rcu_barrier(); mt_dump(mt); @@ -595,6 +224,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); } +#endif } static noinline void check_seq(struct maple_tree *mt, unsigned long max, @@ -614,6 +244,8 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, MT_BUG_ON(mt, !mt_height(mt)); check_load(mt, i + 1, NULL); } + +#ifndef __KERNEL__ if (verbose) { rcu_barrier(); mt_dump(mt); @@ -621,6 +253,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); } +#endif } static noinline void check_lb_not_empty(struct maple_tree *mt) @@ -651,10 +284,15 @@ static noinline void check_lower_bound_split(struct maple_tree *mt) static noinline void check_upper_bound_split(struct maple_tree *mt) { unsigned long i, j; - unsigned long huge = 4000UL * 1000 * 1000; + unsigned long huge; MT_BUG_ON(mt, !mtree_empty(mt)); + if (MAPLE_32BIT) + huge = 2147483647UL; + else + huge = 4000UL * 1000 * 1000; + i = 4096; while (i < huge) { check_insert(mt, i, (void *) i); @@ -687,6 +325,7 @@ static noinline void check_rev_find(struct maple_tree *mt) mtree_store_range(mt, i*10, i*10 + 5, xa_mk_value(i), GFP_KERNEL); + rcu_read_lock(); mas_set(&mas, 1000); val = mas_find_rev(&mas, 1000); MT_BUG_ON(mt, val != xa_mk_value(100)); @@ -712,13 +351,15 @@ static noinline void check_rev_find(struct maple_tree *mt) MT_BUG_ON(mt, val != xa_mk_value(0)); val = mas_find_rev(&mas, 0); MT_BUG_ON(mt, val != NULL); + rcu_read_unlock(); } static noinline void check_find(struct maple_tree *mt) { unsigned long val = 0; - unsigned long count = 20; + unsigned long count; unsigned long max; + unsigned long top; unsigned long last = 0, index = 0; void *entry, *entry2; @@ -727,6 +368,18 @@ static noinline void check_find(struct maple_tree *mt) /* Insert 0. */ MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL)); +#if defined(CONFIG_64BIT) + top = 4398046511104UL; +#else + top = ULONG_MAX; +#endif + + if (MAPLE_32BIT) { + count = 15; + } else { + count = 20; + } + for (int i = 0; i <= count; i++) { if (val != 64) MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL)); @@ -805,12 +458,17 @@ static noinline void check_find(struct maple_tree *mt) index = 0; MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL)); mt_for_each(mt, entry, index, ULONG_MAX) { - if (val == 4398046511104) - MT_BUG_ON(mt, entry != - xa_mk_value(ULONG_MAX & LONG_MAX)); + if (val == top) + MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX)); else MT_BUG_ON(mt, xa_mk_value(val) != entry); - val <<= 2; + + /* Workaround for 32bit */ + if ((val << 2) < val) + val = ULONG_MAX; + else + val <<= 2; + if (val == 64) /* Skip zero entry. */ val <<= 2; /* For zero check. */ @@ -842,11 +500,16 @@ static noinline void check_find(struct maple_tree *mt) mas_for_each(&mas, entry, ULONG_MAX) { if (val == 64) MT_BUG_ON(mt, entry != XA_ZERO_ENTRY); - else if (val == 4398046511104) - MT_BUG_ON(mt, entry != xa_mk_value(ULONG_MAX & LONG_MAX)); + else if (val == top) + MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX)); else MT_BUG_ON(mt, xa_mk_value(val) != entry); - val <<= 2; + + /* Workaround for 32bit */ + if ((val << 2) < val) + val = ULONG_MAX; + else + val <<= 2; /* For zero check. */ if (!val) @@ -951,33847 +614,8 @@ static noinline void check_find_2(struct maple_tree *mt) /*MT_BUG_ON(mt, !mtree_empty(mt)); */ } -#define erase_ptr(i) entry[i%2] -#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2]) -#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2]) -#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2]) - -static noinline void check_erase_testset(struct maple_tree *mt) -{ - unsigned long set[] = { 5015, 5014, 5017, 25, 1000, - 1001, 1002, 1003, 1005, 0, - 6003, 6002, 6008, 6012, 6015, - 7003, 7002, 7008, 7012, 7015, - 8003, 8002, 8008, 8012, 8015, - 9003, 9002, 9008, 9012, 9015, - 10003, 10002, 10008, 10012, 10015, - 11003, 11002, 11008, 11012, 11015, - 12003, 12002, 12008, 12012, 12015, - 13003, 13002, 13008, 13012, 13015, - 14003, 14002, 14008, 14012, 14015, - 15003, 15002, 15008, 15012, 15015, - }; - - - void *ptr = &set; - void *entry[2] = { ptr, mt }; - void *root_node; - - - rcu_register_thread(); - mt_set_in_rcu(mt); - for (int i = 0; i < 4; i++) - erase_check_insert(mt, i); - for (int i = 0; i < 4; i++) - erase_check_load(mt, i); - - mt_set_non_kernel(2); - erase_check_erase(mt, 1); - erase_check_load(mt, 0); - check_load(mt, set[1], NULL); - for (int i = 2; i < 4; i++) - erase_check_load(mt, i); - - - erase_check_erase(mt, 2); - erase_check_load(mt, 0); - check_load(mt, set[1], NULL); - check_load(mt, set[2], NULL); - - erase_check_insert(mt, 1); - erase_check_insert(mt, 2); - - for (int i = 0; i < 4; i++) - erase_check_load(mt, i); - - /* Check erase and load without an allocation. */ - erase_check_load(mt, 3); - erase_check_erase(mt, 1); - erase_check_load(mt, 0); - check_load(mt, set[1], NULL); - for (int i = 2; i < 4; i++) - erase_check_load(mt, i); - - /* - * Set the newly erased node. This will produce a different allocated - * node to avoid busy slots. - */ - root_node = mt->ma_root; - erase_check_insert(mt, 1); - - erase_check_load(mt, 0); - check_load(mt, 5016, NULL); - erase_check_load(mt, 1); - check_load(mt, 5013, NULL); - erase_check_load(mt, 2); - check_load(mt, 5018, NULL); - erase_check_load(mt, 3); - - erase_check_erase(mt, 2); /* erase 5017 to check append */ - erase_check_load(mt, 0); - check_load(mt, 5016, NULL); - erase_check_load(mt, 1); - check_load(mt, 5013, NULL); - check_load(mt, set[2], NULL); - check_load(mt, 5018, NULL); - - erase_check_load(mt, 3); - - root_node = mt->ma_root; - erase_check_insert(mt, 2); - - erase_check_load(mt, 0); - check_load(mt, 5016, NULL); - erase_check_load(mt, 1); - check_load(mt, 5013, NULL); - erase_check_load(mt, 2); - check_load(mt, 5018, NULL); - erase_check_load(mt, 3); - - mt_set_non_kernel(1); - erase_check_erase(mt, 2); /* erase 5017 to check append */ - erase_check_load(mt, 0); - check_load(mt, 5016, NULL); - check_load(mt, set[2], NULL); - erase_check_erase(mt, 0); /* erase 5015 to check append */ - check_load(mt, set[0], NULL); - check_load(mt, 5016, NULL); - erase_check_insert(mt, 4); /* 1000 < Should not split. */ - check_load(mt, set[0], NULL); - check_load(mt, 5016, NULL); - erase_check_load(mt, 1); - check_load(mt, 5013, NULL); - check_load(mt, set[2], NULL); - check_load(mt, 5018, NULL); - erase_check_load(mt, 4); - check_load(mt, 999, NULL); - check_load(mt, 1001, NULL); - erase_check_load(mt, 4); - if (mt_in_rcu(mt)) - MT_BUG_ON(mt, root_node == mt->ma_root); - else - MT_BUG_ON(mt, root_node != mt->ma_root); - - /* Should not have split. */ - MT_BUG_ON(mt, !mte_is_leaf(mt->ma_root)); - - - /* Coalesce testing */ - erase_check_insert(mt, 0); - erase_check_insert(mt, 2); - - for (int i = 5; i < 25; i++) { - erase_check_insert(mt, i); - for (int j = i; j >= 0; j--) - erase_check_load(mt, j); - } - - erase_check_erase(mt, 14); /*6015 */ - for (int i = 0; i < 25; i++) { - if (i == 14) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - erase_check_erase(mt, 16); /*7002 */ - for (int i = 0; i < 25; i++) { - if (i == 16 || i == 14) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - - mt_set_non_kernel(1); - erase_check_erase(mt, 13); /*6012 */ - for (int i = 0; i < 25; i++) { - if (i == 16 || i == 14 || i == 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - erase_check_erase(mt, 15); /*7003 */ - for (int i = 0; i < 25; i++) { - if (i <= 16 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - mt_set_non_kernel(2); - erase_check_erase(mt, 17); /*7008 *should* cause coalesce. */ - for (int i = 0; i < 25; i++) { - if (i <= 17 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - erase_check_erase(mt, 18); /*7012 */ - for (int i = 0; i < 25; i++) { - if (i <= 18 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - mt_set_non_kernel(2); - erase_check_erase(mt, 19); /*7015 */ - for (int i = 0; i < 25; i++) { - if (i <= 19 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - erase_check_erase(mt, 20); /*8003 */ - for (int i = 0; i < 25; i++) { - if (i <= 20 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - erase_check_erase(mt, 21); /*8002 */ - for (int i = 0; i < 25; i++) { - if (i <= 21 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - mt_set_non_kernel(2); - erase_check_erase(mt, 22); /*8008 */ - for (int i = 0; i < 25; i++) { - if (i <= 22 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - for (int i = 23; i < 25; i++) - erase_check_erase(mt, i); - - for (int i = 0; i < 25; i++) { - if (i <= 25 && i >= 13) - check_load(mt, set[i], NULL); - else - erase_check_load(mt, i); - } - - /* Shrinking tree test. */ - - for (int i = 13; i < ARRAY_SIZE(set); i++) - erase_check_insert(mt, i); - - mt_set_non_kernel(99); - for (int i = 18; i < ARRAY_SIZE(set); i++) { - erase_check_erase(mt, i); - for (int j = 0; j < ARRAY_SIZE(set); j++) { - if (j < 18 || j > i) - erase_check_load(mt, j); - else - check_load(mt, set[j], NULL); - } - } - mt_set_non_kernel(35); - for (int i = 0; i < 18; i++) { - erase_check_erase(mt, i); - for (int j = 0; j < ARRAY_SIZE(set); j++) { - if (j < 18 && j > i) - erase_check_load(mt, j); - else - check_load(mt, set[j], NULL); - } - } - erase_check_insert(mt, 8); - erase_check_insert(mt, 9); - erase_check_erase(mt, 8); - rcu_unregister_thread(); -} - -#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, \ - a[(i)], a[(i + 1)], ptr) -#define STORE 1 -#define SNULL 2 -#define ERASE 3 -#define ec_type_str(x) \ - (((x) == STORE) ? \ - "STORE" : \ - (((x) == SNULL) ? \ - "SNULL" : "ERASE") \ - ) -#define check_erase2_debug 0 -void *mas_next(struct ma_state *mas, unsigned long max); - -/* Calculate the overwritten entries. */ -int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end, - void *s_entry, unsigned long s_min, - void *e_entry, unsigned long e_max, - unsigned long *set, int i, bool null_entry) -{ - int count = 0, span = 0; - unsigned long retry = 0; - void *entry; - struct ma_state tmp; - - - /* count slots */ - memcpy(&tmp, mas_start, sizeof(tmp)); - entry = mas_next(&tmp, mas_end->last); - while (entry) { - BUG_ON(retry > 50); /* stop infinite retry on testing. */ - if (xa_is_zero(s_entry)) { - retry++; - continue; - } - count++; - span++; - entry = mas_next(&tmp, mas_end->last); - } - - if (null_entry) { - /* Check splitting end. */ - if (e_entry && (e_max > mas_end->last)) - count--; - - /* check overwrite of entire start */ - if (s_entry && (s_min == mas_start->index)) - count++; - } else { /* !null_entry (store) */ - bool esplit = e_max > mas_end->last; - bool ssplit = s_min != mas_start->index; - - if (s_entry && e_entry) { - if (esplit && ssplit) - count--; - else if (ssplit) - count--; - else if (esplit) { - if (span) - count--; - } - } else if (s_entry && !e_entry) { - if (ssplit) - count--; - } else if (!s_entry && e_entry) { - if (esplit) - count--; - count--; - } else { - count--; - } - } - return count; -} - -/* - * mas_node_walk() - Walk a maple node to offset of the index. - * @mas: The maple state - * @type: The maple node type - * @*range_min: Pointer to store the minimum range of the offset - * @*range_max: Pointer to store the maximum range of the offset - * - * The offset will be stored in the maple state. - * - */ -static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node, - enum maple_type type, unsigned long *range_min, - unsigned long *range_max) - -{ - unsigned long *pivots; - unsigned char count; - unsigned long prev, max; - unsigned char offset; - unsigned long index; - - if (unlikely(ma_is_dense(type))) { - (*range_max) = (*range_min) = mas->index; - if (unlikely(ma_dead_node(node))) - return; - - mas->offset = mas->index = mas->min; - return; - } - - pivots = ma_pivots(node, type); - max = pivots[0]; - if (unlikely(ma_dead_node(node))) - return; - - offset = 0; - prev = mas->min; - index = mas->index; - if (unlikely(index <= max)) - goto offset_zero; - - count = mt_pivots[type]; - while (++offset < count) { - prev = max; - max = pivots[offset]; - if (unlikely(ma_dead_node(node))) - return; - - if (index <= max) - goto offset_found; - else if (unlikely(!max)) - goto mas_max; - } - - prev = max; -mas_max: - max = mas->max; -offset_found: - prev++; -offset_zero: - mas->offset = offset; - if (ma_is_leaf(type)) { - *range_max = max; - *range_min = prev; - } else { - mas->max = max; - mas->min = prev; - } -} - -/* - * mas_descend_walk(): Locates a value and sets the mas->node and slot - * accordingly. range_min and range_max are set to the range which the entry is - * valid. - * @mas: The maple state - * @*range_min: A pointer to store the minimum of the range - * @*range_max: A pointer to store the maximum of the range - * - * Check mas->node is still valid on return of any value. - * - * Return: true if pointing to a valid node and offset. False otherwise. - */ -static inline bool mas_descend_walk(struct ma_state *mas, - unsigned long *range_min, unsigned long *range_max) -{ - struct maple_enode *next; - struct maple_node *node; - enum maple_type type; - - next = mas->node; - while (true) { - node = mte_to_node(next); - type = mte_node_type(next); - mas_node_walk(mas, node, type, range_min, range_max); - next = mas_slot(mas, ma_slots(node, type), mas->offset); - if (unlikely(ma_dead_node(node))) - return false; - - if (unlikely(ma_is_leaf(type))) - return true; - - /* Descend. */ - mas->node = next; - } - return false; -} - -/* - * mas_tree_walk() - Walk to @mas->index and set the range values. - * @mas: The maple state. - * @*range_min: The minimum range to be set. - * @*range_max: The maximum range to be set. - * - * Ranges are only valid if there is a valid entry at @mas->index. - * - * Return: True if a value exists, false otherwise. - */ -static inline bool mas_tree_walk(struct ma_state *mas, unsigned long *range_min, - unsigned long *range_max) -{ - bool ret; - -retry: - ret = false; - mas_start(mas); - if (mas_is_none(mas)) - goto not_found; - - if (mas_is_ptr(mas)) { - *range_min = *range_max = 0; - if (!mas->index) - return true; - - goto not_found; - } - - ret = mas_descend_walk(mas, range_min, range_max); - if (unlikely(mte_dead_node(mas->node))) { - mas->node = MAS_START; - goto retry; - } - - return ret; - -not_found: - mas->offset = MAPLE_NODE_SLOTS; - return false; -} - -static inline void *mas_range_load(struct ma_state *mas, - unsigned long *range_min, unsi |
