diff options
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/map_btf.c | 98 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/map_in_map_btf.c | 73 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/progs/normal_map_btf.c | 56 |
3 files changed, 227 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/map_btf.c b/tools/testing/selftests/bpf/prog_tests/map_btf.c new file mode 100644 index 000000000000..2c4ef6037573 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_btf.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <test_progs.h> + +#include "normal_map_btf.skel.h" +#include "map_in_map_btf.skel.h" + +static void do_test_normal_map_btf(void) +{ + struct normal_map_btf *skel; + int i, err, new_fd = -1; + int map_fd_arr[64]; + + skel = normal_map_btf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + err = normal_map_btf__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto out; + + skel->bss->pid = getpid(); + usleep(1); + ASSERT_TRUE(skel->bss->done, "done"); + + /* Use percpu_array to slow bpf_map_free_deferred() down. + * The memory allocation may fail, so doesn't check the returned fd. + */ + for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++) + map_fd_arr[i] = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, 4, 4, 256, NULL); + + /* Close array fd later */ + new_fd = dup(bpf_map__fd(skel->maps.array)); +out: + normal_map_btf__destroy(skel); + if (new_fd < 0) + return; + /* Use kern_sync_rcu() to wait for the start of the free of the bpf + * program and use an assumed delay to wait for the release of the map + * btf which is held by other maps (e.g, bss). After that, array map + * holds the last reference of map btf. + */ + kern_sync_rcu(); + usleep(4000); + /* Spawn multiple kworkers to delay the invocation of + * bpf_map_free_deferred() for array map. + */ + for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++) { + if (map_fd_arr[i] < 0) + continue; + close(map_fd_arr[i]); + } + close(new_fd); +} + +static void do_test_map_in_map_btf(void) +{ + int err, zero = 0, new_fd = -1; + struct map_in_map_btf *skel; + + skel = map_in_map_btf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + err = map_in_map_btf__attach(skel); + if (!ASSERT_OK(err, "attach")) + goto out; + + skel->bss->pid = getpid(); + usleep(1); + ASSERT_TRUE(skel->bss->done, "done"); + + /* Close inner_array fd later */ + new_fd = dup(bpf_map__fd(skel->maps.inner_array)); + /* Defer the free of inner_array */ + err = bpf_map__delete_elem(skel->maps.outer_array, &zero, sizeof(zero), 0); + ASSERT_OK(err, "delete inner map"); +out: + map_in_map_btf__destroy(skel); + if (new_fd < 0) + return; + /* Use kern_sync_rcu() to wait for the start of the free of the bpf + * program and use an assumed delay to wait for the free of the outer + * map and the release of map btf. After that, inner map holds the last + * reference of map btf. + */ + kern_sync_rcu(); + usleep(10000); + close(new_fd); +} + +void test_map_btf(void) +{ + if (test__start_subtest("array_btf")) + do_test_normal_map_btf(); + if (test__start_subtest("inner_array_btf")) + do_test_map_in_map_btf(); +} diff --git a/tools/testing/selftests/bpf/progs/map_in_map_btf.c b/tools/testing/selftests/bpf/progs/map_in_map_btf.c new file mode 100644 index 000000000000..7a1336d7b16a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/map_in_map_btf.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + __u64 data; + struct bpf_list_node node; +}; + +struct map_value { + struct bpf_list_head head __contains(node_data, node); + struct bpf_spin_lock lock; +}; + +struct inner_array_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} inner_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); + __array(values, struct inner_array_type); +} outer_array SEC(".maps") = { + .values = { + [0] = &inner_array, + }, +}; + +char _license[] SEC("license") = "GPL"; + +int pid = 0; +bool done = false; + +SEC("fentry/" SYS_PREFIX "sys_nanosleep") +int add_to_list_in_inner_array(void *ctx) +{ + struct map_value *value; + struct node_data *new; + struct bpf_map *map; + int zero = 0; + + if (done || (u32)bpf_get_current_pid_tgid() != pid) + return 0; + + map = bpf_map_lookup_elem(&outer_array, &zero); + if (!map) + return 0; + + value = bpf_map_lookup_elem(map, &zero); + if (!value) + return 0; + + new = bpf_obj_new(typeof(*new)); + if (!new) + return 0; + + bpf_spin_lock(&value->lock); + bpf_list_push_back(&value->head, &new->node); + bpf_spin_unlock(&value->lock); + done = true; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/normal_map_btf.c b/tools/testing/selftests/bpf/progs/normal_map_btf.c new file mode 100644 index 000000000000..66cde82aa86d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/normal_map_btf.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + __u64 data; + struct bpf_list_node node; +}; + +struct map_value { + struct bpf_list_head head __contains(node_data, node); + struct bpf_spin_lock lock; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} array SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +int pid = 0; +bool done = false; + +SEC("fentry/" SYS_PREFIX "sys_nanosleep") +int add_to_list_in_array(void *ctx) +{ + struct map_value *value; + struct node_data *new; + int zero = 0; + + if (done || (u32)bpf_get_current_pid_tgid() != pid) + return 0; + + value = bpf_map_lookup_elem(&array, &zero); + if (!value) + return 0; + + new = bpf_obj_new(typeof(*new)); + if (!new) + return 0; + + bpf_spin_lock(&value->lock); + bpf_list_push_back(&value->head, &new->node); + bpf_spin_unlock(&value->lock); + done = true; + + return 0; +} |