summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-08-10 10:12:48 -0700
committerAlexei Starovoitov <ast@kernel.org>2022-08-10 10:12:49 -0700
commite7c677bdd03d54e9a1bafcaf1faf5c573a506bba (patch)
treec52167a536ba9d17dff5c4762b881bfaafd49899 /net
parent86f44fcec22ce2979507742bc53db8400e454f46 (diff)
parentc5c0981fd81d35233d625631f13000544c108c53 (diff)
downloadlinux-e7c677bdd03d54e9a1bafcaf1faf5c573a506bba.tar.gz
linux-e7c677bdd03d54e9a1bafcaf1faf5c573a506bba.tar.bz2
linux-e7c677bdd03d54e9a1bafcaf1faf5c573a506bba.zip
Merge branch 'fixes for bpf map iterator'
Hou Tao says: ==================== From: Hou Tao <houtao1@huawei.com> Hi, The patchset constitues three fixes for bpf map iterator: (1) patch 1~4: fix user-after-free during reading map iterator fd It is possible when both the corresponding link fd and map fd are closed bfore reading the iterator fd. I had squashed these four patches into one, but it was not friendly for stable backport, so I break these fixes into four single patches in the end. Patch 7 is its testing patch. (2) patch 5: fix invalidity check for values in sk local storage map Patch 8 adds two tests for it. (3) patch 6: reject sleepable program for non-resched map iterator Patch 9 add a test for it. Please check the individual patches for more details. And comments are always welcome. Regards, Tao Changes since v2: * patch 1~6: update commit messages (from Yonghong & Martin) * patch 7: add more detailed comments (from Yonghong) * patch 8: use NULL directly instead of (void *)0 v1: https://lore.kernel.org/bpf/20220806074019.2756957-1-houtao@huaweicloud.com ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/bpf_sk_storage.c12
-rw-r--r--net/core/sock_map.c20
2 files changed, 29 insertions, 3 deletions
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index a25ec93729b9..1b7f385643b4 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -875,10 +875,18 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
{
struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
seq_info->map = aux->map;
return 0;
}
+static void bpf_iter_fini_sk_storage_map(void *priv_data)
+{
+ struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
+
+ bpf_map_put_with_uref(seq_info->map);
+}
+
static int bpf_iter_attach_map(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux)
@@ -896,7 +904,7 @@ static int bpf_iter_attach_map(struct bpf_prog *prog,
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
goto put_map;
- if (prog->aux->max_rdonly_access > map->value_size) {
+ if (prog->aux->max_rdwr_access > map->value_size) {
err = -EACCES;
goto put_map;
}
@@ -924,7 +932,7 @@ static const struct seq_operations bpf_sk_storage_map_seq_ops = {
static const struct bpf_iter_seq_info iter_seq_info = {
.seq_ops = &bpf_sk_storage_map_seq_ops,
.init_seq_private = bpf_iter_init_sk_storage_map,
- .fini_seq_private = NULL,
+ .fini_seq_private = bpf_iter_fini_sk_storage_map,
.seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
};
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 028813dfecb0..9a9fb9487d63 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -783,13 +783,22 @@ static int sock_map_init_seq_private(void *priv_data,
{
struct sock_map_seq_info *info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
info->map = aux->map;
return 0;
}
+static void sock_map_fini_seq_private(void *priv_data)
+{
+ struct sock_map_seq_info *info = priv_data;
+
+ bpf_map_put_with_uref(info->map);
+}
+
static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_ops = &sock_map_seq_ops,
.init_seq_private = sock_map_init_seq_private,
+ .fini_seq_private = sock_map_fini_seq_private,
.seq_priv_size = sizeof(struct sock_map_seq_info),
};
@@ -1369,18 +1378,27 @@ static const struct seq_operations sock_hash_seq_ops = {
};
static int sock_hash_init_seq_private(void *priv_data,
- struct bpf_iter_aux_info *aux)
+ struct bpf_iter_aux_info *aux)
{
struct sock_hash_seq_info *info = priv_data;
+ bpf_map_inc_with_uref(aux->map);
info->map = aux->map;
info->htab = container_of(aux->map, struct bpf_shtab, map);
return 0;
}
+static void sock_hash_fini_seq_private(void *priv_data)
+{
+ struct sock_hash_seq_info *info = priv_data;
+
+ bpf_map_put_with_uref(info->map);
+}
+
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_ops = &sock_hash_seq_ops,
.init_seq_private = sock_hash_init_seq_private,
+ .fini_seq_private = sock_hash_fini_seq_private,
.seq_priv_size = sizeof(struct sock_hash_seq_info),
};