summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/init.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab+huawei@kernel.org>2024-08-05 08:25:09 +0200
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2024-08-05 08:25:09 +0200
commit2c25dcc2361949bc7da730d22de36c019c6bf1e3 (patch)
tree3422f89adb17748bde8419918cb7959d79c773f3 /arch/riscv/mm/init.c
parentba5c778cab1dd3e4918f940989e771e2818afee8 (diff)
parentde9c2c66ad8e787abec7c9d7eff4f8c3cdd28aed (diff)
downloadlinux-2c25dcc2361949bc7da730d22de36c019c6bf1e3.tar.gz
linux-2c25dcc2361949bc7da730d22de36c019c6bf1e3.tar.bz2
linux-2c25dcc2361949bc7da730d22de36c019c6bf1e3.zip
Merge tag 'v6.11-rc2' into media_stage
Linux 6.11-rc2 * tag 'v6.11-rc2': (283 commits) Linux 6.11-rc2 profiling: remove profile=sleep support arm: dts: arm: versatile-ab: Fix duplicate clock node name runtime constants: deal with old decrepit linkers clocksource: Fix brown-bag boolean thinko in cs_watchdog_read() cifs: update internal version number smb: client: fix FSCTL_GET_REPARSE_POINT against NetApp smb3: add dynamic tracepoints for shutdown ioctl cifs: Remove cifs_aio_ctx smb: client: handle lack of FSCTL_GET_REPARSE_POINT support arm64: jump_label: Ensure patched jump_labels are visible to all CPUs syscalls: fix syscall macros for newfstat/newfstatat uretprobe: change syscall number, again thermal: core: Update thermal zone registration documentation Revert "nouveau: rip out busy fence waits" protect the fetch of ->fd[fd] in do_dup2() from mispredictions x86/uaccess: Zero the 8-byte get_range case on failure on 32-bit riscv: Fix linear mapping checks for non-contiguous memory regions KVM: x86/mmu: fix determination of max NPT mapping level for private pages PCI: pciehp: Retain Power Indicator bits for userspace indicators ...
Diffstat (limited to 'arch/riscv/mm/init.c')
-rw-r--r--arch/riscv/mm/init.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index bfa2dea95354..8b698d9609e7 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- phys_ram_end = memblock_end_of_DRAM();
-
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
@@ -251,6 +249,16 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
+ * The size of the linear page mapping may restrict the amount of
+ * usable RAM.
+ */
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ }
+
+ /*
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
* - This memory would overlap with ERR_PTR
@@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
+ phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
@@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
- if (end >= __pa(PAGE_OFFSET) + memory_limit)
- end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0, NULL);
}