diff options
| author | Namhyung Kim <namhyung@kernel.org> | 2023-03-14 16:42:29 -0700 |
|---|---|---|
| committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2023-03-15 10:34:33 -0300 |
| commit | 56ec9457a4a20c5e07ad94bfb6e23077d54cb28e (patch) | |
| tree | cafe1144949eb53e95bfbbfc4e2ce9fb87e04c1f /tools/perf/util/bpf-filter.c | |
| parent | 990a71e904f6ec2d7d84eecb37e5127b75721985 (diff) | |
| download | linux-56ec9457a4a20c5e07ad94bfb6e23077d54cb28e.tar.gz linux-56ec9457a4a20c5e07ad94bfb6e23077d54cb28e.tar.bz2 linux-56ec9457a4a20c5e07ad94bfb6e23077d54cb28e.zip | |
perf bpf filter: Implement event sample filtering
The BPF program will be attached to a perf_event and be triggered when
it overflows. It'd iterate the filters map and compare the sample
value according to the expression. If any of them fails, the sample
would be dropped.
Also it needs to have the corresponding sample data for the expression
so it compares data->sample_flags with the given value. To access the
sample data, it uses the bpf_cast_to_kern_ctx() kfunc which was added
in v6.2 kernel.
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Song Liu <song@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20230314234237.3008956-2-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/bpf-filter.c')
| -rw-r--r-- | tools/perf/util/bpf-filter.c | 64 |
1 files changed, 64 insertions, 0 deletions
diff --git a/tools/perf/util/bpf-filter.c b/tools/perf/util/bpf-filter.c index c72e35d51240..f20e1bc03778 100644 --- a/tools/perf/util/bpf-filter.c +++ b/tools/perf/util/bpf-filter.c @@ -1,10 +1,74 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <stdlib.h> +#include <bpf/bpf.h> +#include <linux/err.h> +#include <internal/xyarray.h> + +#include "util/debug.h" +#include "util/evsel.h" + #include "util/bpf-filter.h" #include "util/bpf-filter-flex.h" #include "util/bpf-filter-bison.h" +#include "bpf_skel/sample-filter.h" +#include "bpf_skel/sample_filter.skel.h" + +#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) + +int perf_bpf_filter__prepare(struct evsel *evsel) +{ + int i, x, y, fd; + struct sample_filter_bpf *skel; + struct bpf_program *prog; + struct bpf_link *link; + struct perf_bpf_filter_expr *expr; + + skel = sample_filter_bpf__open_and_load(); + if (!skel) { + pr_err("Failed to load perf sample-filter BPF skeleton\n"); + return -1; + } + + i = 0; + fd = bpf_map__fd(skel->maps.filters); + list_for_each_entry(expr, &evsel->bpf_filters, list) { + struct perf_bpf_filter_entry entry = { + .op = expr->op, + .flags = expr->sample_flags, + .value = expr->val, + }; + bpf_map_update_elem(fd, &i, &entry, BPF_ANY); + i++; + } + + prog = skel->progs.perf_sample_filter; + for (x = 0; x < xyarray__max_x(evsel->core.fd); x++) { + for (y = 0; y < xyarray__max_y(evsel->core.fd); y++) { + link = bpf_program__attach_perf_event(prog, FD(evsel, x, y)); + if (IS_ERR(link)) { + pr_err("Failed to attach perf sample-filter program\n"); + return PTR_ERR(link); + } + } + } + evsel->bpf_skel = skel; + return 0; +} + +int perf_bpf_filter__destroy(struct evsel *evsel) +{ + struct perf_bpf_filter_expr *expr, *tmp; + + list_for_each_entry_safe(expr, tmp, &evsel->bpf_filters, list) { + list_del(&expr->list); + free(expr); + } + sample_filter_bpf__destroy(evsel->bpf_skel); + return 0; +} + struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags, enum perf_bpf_filter_op op, unsigned long val) |
