diff options
author | Johannes Berg <johannes.berg@intel.com> | 2021-01-13 22:09:44 +0100 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2021-02-12 21:37:38 +0100 |
commit | bfc58e2b98e99737409cd9f4d86a79677c5b887c (patch) | |
tree | e73fa8fa0c10f6129ed4b80028cba003dc5d69c5 /arch/um/kernel/tlb.c | |
parent | 9f0b4807a44ff81cf59421c8a86641efec586610 (diff) | |
download | linux-bfc58e2b98e99737409cd9f4d86a79677c5b887c.tar.gz linux-bfc58e2b98e99737409cd9f4d86a79677c5b887c.tar.bz2 linux-bfc58e2b98e99737409cd9f4d86a79677c5b887c.zip |
um: remove process stub VMA
This mostly reverts the old commit 3963333fe676 ("uml: cover stubs
with a VMA") which had added a VMA to the existing PTEs. However,
there's no real reason to have the PTEs in the first place and the
VMA cannot be 'fixed' in place, which leads to bugs that userspace
could try to unmap them and be forcefully killed, or such. Also,
there's a bit of an ugly hole in userspace's address space.
Simplify all this: just install the stub code/page at the top of
the (inner) address space, i.e. put it just above TASK_SIZE. The
pages are simply hard-coded to be mapped in the userspace process
we use to implement an mm context, and they're out of reach of the
inner mmap/munmap/mprotect etc. since they're above TASK_SIZE.
Getting rid of the VMA also makes vma_merge() no longer hit one of
the VM_WARN_ON()s there because we installed a VMA while the code
assumes the stack VMA is the first one.
It also removes a lockdep warning about mmap_sem usage since we no
longer have uml_setup_stubs() and thus no longer need to do any
manipulation that would require mmap_sem in activate_mm().
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r-- | arch/um/kernel/tlb.c | 15 |
1 files changed, 0 insertions, 15 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 5be1b0da9f3b..bc38f79ca3a3 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -125,9 +125,6 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len, struct host_vm_op *last; int fd = -1, ret = 0; - if (virt + len > STUB_START && virt < STUB_END) - return -EINVAL; - if (hvc->userspace) fd = phys_mapping(phys, &offset); else @@ -165,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if (addr + len > STUB_START && addr < STUB_END) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MUNMAP) && @@ -195,9 +189,6 @@ static int add_mprotect(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if (addr + len > STUB_START && addr < STUB_END) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MPROTECT) && @@ -232,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - if ((addr >= STUB_START) && (addr < STUB_END)) - continue; - r = pte_read(*pte); w = pte_write(*pte); x = pte_exec(*pte); @@ -478,9 +466,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) address &= PAGE_MASK; - if (address >= STUB_START && address < STUB_END) - goto kill; - pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto kill; |