summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2025-01-28 17:22:46 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-02-27 04:30:18 -0800
commitd95607a5f2f9bb08194c9deaf4a5f3e8ba59a9d4 (patch)
treecdd9092f6b869f88c7faac4eefe7ac2770279e18 /kernel
parent1c81ba1913fba71b413123e7364c654247f09eca (diff)
downloadlinux-d95607a5f2f9bb08194c9deaf4a5f3e8ba59a9d4.tar.gz
linux-d95607a5f2f9bb08194c9deaf4a5f3e8ba59a9d4.tar.bz2
linux-d95607a5f2f9bb08194c9deaf4a5f3e8ba59a9d4.zip
bpf: avoid holding freeze_mutex during mmap operation
[ Upstream commit bc27c52eea189e8f7492d40739b7746d67b65beb ] We use map->freeze_mutex to prevent races between map_freeze() and memory mapping BPF map contents with writable permissions. The way we naively do this means we'll hold freeze_mutex for entire duration of all the mm and VMA manipulations, which is completely unnecessary. This can potentially also lead to deadlocks, as reported by syzbot in [0]. So, instead, hold freeze_mutex only during writeability checks, bump (proactively) "write active" count for the map, unlock the mutex and proceed with mmap logic. And only if something went wrong during mmap logic, then undo that "write active" counter increment. [0] https://lore.kernel.org/bpf/678dcbc9.050a0220.303755.0066.GAE@google.com/ Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY") Reported-by: syzbot+4dc041c686b7c816a71e@syzkaller.appspotmail.com Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20250129012246.1515826-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/syscall.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fa43f26ce0da..3200372ea28c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -936,7 +936,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = {
static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct bpf_map *map = filp->private_data;
- int err;
+ int err = 0;
if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record))
return -ENOTSUPP;
@@ -960,7 +960,12 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
err = -EACCES;
goto out;
}
+ bpf_map_write_active_inc(map);
}
+out:
+ mutex_unlock(&map->freeze_mutex);
+ if (err)
+ return err;
/* set default open/close callbacks */
vma->vm_ops = &bpf_map_default_vmops;
@@ -977,13 +982,11 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
vm_flags_clear(vma, VM_MAYWRITE);
err = map->ops->map_mmap(map, vma);
- if (err)
- goto out;
+ if (err) {
+ if (vma->vm_flags & VM_WRITE)
+ bpf_map_write_active_dec(map);
+ }
- if (vma->vm_flags & VM_WRITE)
- bpf_map_write_active_inc(map);
-out:
- mutex_unlock(&map->freeze_mutex);
return err;
}