// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <net/if.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include "json_writer.h"
#include "main.h"
static struct hashmap *map_table;
static bool map_is_per_cpu(__u32 type)
{
return type == BPF_MAP_TYPE_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_ARRAY ||
type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
}
static bool map_is_map_of_maps(__u32 type)
{
return type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
type == BPF_MAP_TYPE_HASH_OF_MAPS;
}
static bool map_is_map_of_progs(__u32 type)
{
return type == BPF_MAP_TYPE_PROG_ARRAY;
}
static int map_type_from_str(const char *type)
{
const char *map_type_str;
unsigned int i;
for (i = 0; ; i++) {
map_type_str = libbpf_bpf_map_type_str(i);
if (!map_type_str)
break;
/* Don't allow prefixing in case of possible future shadowing */
if (!strcmp(map_type_str, type))
return i;
}
return -1;
}
static void *alloc_value(struct bpf_map_info *info)
{
if (map_is_per_cpu(info->type))
return malloc(round_up(info->value_size, 8) *
get_possible_cpus());
else
return malloc(info->value_size);
}
static int do_dump_btf(const struct btf_dumper *d,
struct bpf_map_info *map_info, void *key,
void *value)
{
__u32 value_id;
int ret = 0;
/* start of key-value pair */
jsonw_start_object(d->jw);
if (map_info->btf_key_type_id) {
jsonw_name(d->jw, "key");
ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
if (ret)
goto err_end_obj;
}
value_id = map_info->btf_vmlinux_value_type_id ?
: map_info->btf_value_type_id;
if (!map_is_per_cpu(map_info->type)) {
jsonw_name(d->jw, "value");
ret = btf_dumper_type(d, value_id, value);
} else {
unsigned int i, n, step;
jsonw_name(d->jw, "values");
jsonw_start_array(d->jw);
n = get_possible_cpus();
step = round_up(map_info->value_size, 8);
for (i = 0; i < n; i++) {
jsonw_start_object(d->jw);
jsonw_int_field(d->jw, "cpu", i);
jsonw_name(d->jw, "value");
ret = btf_dumper_type(d, value_id, value + i * step);
jsonw_end_object(d->jw);
if (ret)
break;
}
jsonw_end_array(d->jw);
}
err_end_obj:
/* end of key-value pair */
jsonw_end_object(d->jw);
return ret;
}
static json_writer_t *get_btf_writer(void)
{
json_writer_t *jw = jsonw_new(stdout);
if (!jw)
return NULL;
jsonw_pretty(jw, true);
return jw;
}
static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
unsigned char *value, struct btf *btf)
{
jsonw_start_object(json_wtr);
if (!map_is_per_cpu(info->type)) {
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
if (map_is_map_of_maps(info->type))
jsonw_uint_field(json_wtr, "inner_map_id",
*(unsigned int *)value);
if (btf) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
jsonw_name(json_wtr, "formatted");
do_dump_btf(&d, info, key, value);
}
} else {
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
jsonw_name(json_wtr, "key");
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "values");
jsonw_start_array(json_wtr);
for (i = 0; i < n; i++) {
jsonw_start_object(json_wtr);
jsonw_int_field(json_wtr, "cpu", i);
jsonw_name(json_wtr, "value");
print_hex_data_json(value + i * step,
info->value_size);
jsonw_end_object(json_wtr);
}
jsonw_end_array(json_wtr);
if (btf) {
struct btf_dumper d = {
.btf = btf,
.jw = json_wtr,
.is_plain_text = false,
};
jsonw_name(json_wtr, "formatted");
do_dump_btf(&d, info, key, value);
}
}
jsonw_end_object(json_wtr);
}
static void
print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
const char *error_msg)
{
int msg_size = strlen(error_msg);
bool single_line, break_names;
break_names = info->key_size > 16 || msg_size > 16;
single_line = info->key_size + msg_size <= 24 && !break_names;
printf("key:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, key, info->key_size, " ");
printf(single_line ? " " : "\n");
printf("value:%c%s", break_names ? '\n' : ' ', error_msg);
printf("\n");
}
static void
print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
{
/* For prog_array maps or arrays of maps, failure to lookup the value
* means there is no entry for that key. Do not print an error message
* in that case.
*/
if ((map_is_map_of_maps(map_info->type) ||
map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
return;
if (json_output) {
jsonw_start_object(json_wtr); /* entry */
jsonw_name(json_wtr, "key");
print_hex_data_json(key, map_info->key_size);
jsonw_name(json_wtr, "value");
jsonw_start_object(json_wtr); /* error */
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
jsonw_end_object(json_wtr); /* error */
jsonw_end_object(json_wtr); /* entry */
} else {
const char *msg = NULL;
if (lookup_errno == ENOENT)
msg = "<no entry>";
else if (lookup_errno == ENOSPC &&
map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
msg = "<cannot read>";
print_entry_error_msg(map_info, key,
msg ? : strerror(lookup_errno));
}
}
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
if (!map_is_per_cpu(info->type)) {
bool single_line, break_names;
break_names = info->key_size > 16 || info->value_size > 16;
single_line = info->key_size + info->value_size <= 24 &&
!break_names;
if (info->key_size) {
printf("key:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, key, info->key_size, " ");
printf(single_line ? " " : "\n");
}
if (info->value_size) {
if (map_is_map_of_maps(info->type)) {
printf("inner_map_id:%c", break_names ? '\n' : ' ');
printf("%u ", *(unsigned int *)value);
} else {
printf("value:%c", break_names ? '\n' : ' ');
fprint_hex(stdout, value, info->value_size, " ");
}
}
printf("\n");
} else {
unsigned int i, n, step;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
if (info->key_size) {
printf("key:\n");
fprint_hex(stdout, key, info->key_size, " ");
printf("\n");
}
if (info->value_size) {
for (i = 0; i < n; i++) {
printf("value (CPU %02u):%c",
i, info->value_size > 16 ? '\n' : ' ');
fprint_hex(stdout, value + i * step,
info->value_size, " ");
printf("\n");
}
}
}
}
static char **parse_bytes(char **argv, const char *name, unsigned char *val,
unsigned int n)
{
unsigned int i = 0, base = 0;
char *endptr;
if (is_prefix(*argv, "hex")) {
base = 16;
argv++;
}
while (i < n && argv[i]) {
val[i] = strtoul(argv[i], &endptr, base);
if (*endptr) {
p_err("error parsing byte: %s", argv[i]);
return NULL;
}
i++;
}
if (i != n) {
p_err("%s expected %u bytes got %u", name, n, i);
return NULL;
}
return argv + i;
}
/* on per cpu maps we must copy the provided value on all value instances */
static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
{
unsigned int i, n, step;
if (!map_is_per_cpu(info->type))
return;
n = get_possible_cpus();
step = round_up(info->value_size, 8);
for (i = 1; i < n; i++)
memcpy(value + i * step, value, info->value_size);
}
static int parse_elem(char **argv, struct bpf_map_info *info, void *key,
void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd, __u32 open_flags)
{
if (!*argv) {
if (!key && !value)
return 0;
p_err("did not find %s", key ? "key" : "value");
return -1;
}
if (is_prefix(*argv, "key")) {
if (!key) {
if (key_size)
p_err("duplicate key");
else
p_err("unnecessary key");
return
|