From 0936243cabf0caf46f1a42606325ab93cfa05a6a Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Apr 2023 16:36:03 +0200 Subject: arm64: entry: Preserve/restore X29 even for compat tasks Currently, the KPTI trampoline code for returning to user space takes care to only preserve X29 into FAR_EL1 for native tasks, as compat tasks don't have access to this register anyway, and so preserving it is not necessary. It also means it does not need to be restored, and so we have two code paths for returning back to user space: the native one that restores X29 from FAR_EL1, and the compat one that leaves X29 clobbered, containing the value of TTBR1_EL1, which carries a physical address pointing somewhere into the kernel image. This is needlessly complex, and given that FAR_EL1 becomes UNKNOWN after an exception return anway, the only benefit of avoiding the preserve and restore is that we can skip the system register write and read. So let's simplify this, and collapse the two code paths into one that always preserves X29 into FAR_EL1, and always restores it again after the TTBR switch. Signed-off-by: Ard Biesheuvel Reviewed-by: Will Deacon Link: https://lore.kernel.org/r/20230418143604.1176437-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index ab2a6e33c052..16fbd0d9790d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -435,13 +435,9 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 eret alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - bne 4f msr far_el1, x29 tramp_alias x30, tramp_exit_native, x29 br x30 -4: - tramp_alias x30, tramp_exit_compat, x29 - br x30 #endif .else ldr lr, [sp, #S_LR] @@ -740,9 +736,7 @@ alternative_else_nop_endif msr vbar_el1, x30 ldr lr, [sp, #S_LR] tramp_unmap_kernel x29 - .if \regsize == 64 mrs x29, far_el1 - .endif add sp, sp, #PT_REGS_SIZE // restore sp eret sb @@ -780,10 +774,6 @@ SYM_CODE_END(tramp_vectors) SYM_CODE_START(tramp_exit_native) tramp_exit SYM_CODE_END(tramp_exit_native) - -SYM_CODE_START(tramp_exit_compat) - tramp_exit 32 -SYM_CODE_END(tramp_exit_compat) .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ -- cgit v1.2.3 From 211ceca377f40ff46aef8ceb6e26cf4e7efecaf1 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 18 Apr 2023 16:36:04 +0200 Subject: arm64: entry: Simplify tramp_alias macro and tramp_exit routine The tramp_alias macro constructs the virtual alias of a symbol in the trampoline text mapping, based on its kernel text address, and does so in a way that is more convoluted than necessary. So let's simplify that. Also, now that the address of the vector table is kept in a per-CPU variable, there is no need to defer the load and the assignment of VBAR_EL1 to tramp_exit(). This means we can use a PC-relative reference to the per-CPU variable instead of storing its absolute address in a global variable in the trampoline rodata. And given that tramp_alias no longer needs a temp register, this means we can restore X30 earlier as well, and only leave X29 for tramp_exit() to restore. While at it, give some related symbols static linkage, considering that they are only referenced from the object file that defines them. Signed-off-by: Ard Biesheuvel Reviewed-by: Will Deacon Link: https://lore.kernel.org/r/20230418143604.1176437-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 47 ++++++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 16fbd0d9790d..a40e5e50fa55 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -101,12 +101,11 @@ .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm - .macro tramp_alias, dst, sym, tmp - mov_q \dst, TRAMP_VALIAS - adr_l \tmp, \sym - add \dst, \dst, \tmp - adr_l \tmp, .entry.tramp.text - sub \dst, \dst, \tmp + .macro tramp_alias, dst, sym + .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text + movz \dst, :abs_g2_s:.Lalias\@ + movk \dst, :abs_g1_nc:.Lalias\@ + movk \dst, :abs_g0_nc:.Lalias\@ .endm /* @@ -436,8 +435,13 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 msr far_el1, x29 - tramp_alias x30, tramp_exit_native, x29 - br x30 + + ldr_this_cpu x30, this_cpu_vector, x29 + tramp_alias x29, tramp_exit + msr vbar_el1, x30 // install vector table + ldr lr, [sp, #S_LR] // restore x30 + add sp, sp, #PT_REGS_SIZE // restore sp + br x29 #endif .else ldr lr, [sp, #S_LR] @@ -728,20 +732,6 @@ alternative_else_nop_endif .org 1b + 128 // Did we overflow the ventry slot? .endm - .macro tramp_exit, regsize = 64 - tramp_data_read_var x30, this_cpu_vector - get_this_cpu_offset x29 - ldr x30, [x30, x29] - - msr vbar_el1, x30 - ldr lr, [sp, #S_LR] - tramp_unmap_kernel x29 - mrs x29, far_el1 - add sp, sp, #PT_REGS_SIZE // restore sp - eret - sb - .endm - .macro generate_tramp_vector, kpti, bhb .Lvector_start\@: .space 0x400 @@ -762,7 +752,7 @@ alternative_else_nop_endif */ .pushsection ".entry.tramp.text", "ax" .align 11 -SYM_CODE_START_NOALIGN(tramp_vectors) +SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW @@ -771,9 +761,12 @@ SYM_CODE_START_NOALIGN(tramp_vectors) generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE SYM_CODE_END(tramp_vectors) -SYM_CODE_START(tramp_exit_native) - tramp_exit -SYM_CODE_END(tramp_exit_native) +SYM_CODE_START_LOCAL(tramp_exit) + tramp_unmap_kernel x29 + mrs x29, far_el1 // restore x29 + eret + sb +SYM_CODE_END(tramp_exit) .popsection // .entry.tramp.text #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ @@ -1067,7 +1060,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline br x5 #endif SYM_CODE_END(__sdei_asm_handler) -- cgit v1.2.3 From 320a93d4df48a378ebf923639fa62770676b80db Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:28 +0200 Subject: arm64: xor-neon: mark xor_arm64_neon_*() static The only references to these functions are in the same file, and there is no prototype, which causes a harmless warning: arch/arm64/lib/xor-neon.c:13:6: error: no previous prototype for 'xor_arm64_neon_2' [-Werror=missing-prototypes] arch/arm64/lib/xor-neon.c:40:6: error: no previous prototype for 'xor_arm64_neon_3' [-Werror=missing-prototypes] arch/arm64/lib/xor-neon.c:76:6: error: no previous prototype for 'xor_arm64_neon_4' [-Werror=missing-prototypes] arch/arm64/lib/xor-neon.c:121:6: error: no previous prototype for 'xor_arm64_neon_5' [-Werror=missing-prototypes] Fixes: cc9f8349cb33 ("arm64: crypto: add NEON accelerated XOR implementation") Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-2-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/lib/xor-neon.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c index 96b171995d19..f9a53b7f9842 100644 --- a/arch/arm64/lib/xor-neon.c +++ b/arch/arm64/lib/xor-neon.c @@ -10,7 +10,7 @@ #include #include -void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, +static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2) { uint64_t *dp1 = (uint64_t *)p1; @@ -37,7 +37,7 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1, } while (--lines > 0); } -void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, +static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3) { @@ -73,7 +73,7 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1, } while (--lines > 0); } -void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, +static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4) @@ -118,7 +118,7 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1, } while (--lines > 0); } -void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1, +static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, -- cgit v1.2.3 From aea197160d7426d876a8040f370373cf412e3ad6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:29 +0200 Subject: arm64: add scs_patch_vmlinux prototype scs_patch_vmlinux() is only called from assembler code, so there is no prototype, but adding one avoids this warning: arch/arm64/kernel/patch-scs.c:254:24: error: no previous prototype for function 'scs_patch_vmlinux' [-Werror,-Wmissing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-3-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/scs.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index 13df982a0808..3fdae5fe3142 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -73,6 +73,7 @@ static inline void dynamic_scs_init(void) {} #endif int scs_patch(const u8 eh_frame[], int size); +asmlinkage void scs_patch_vmlinux(void); #endif /* __ASSEMBLY __ */ -- cgit v1.2.3 From 6ac19f96515e1f5198503701d3aecf60d5bc83e5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:30 +0200 Subject: arm64: avoid prototype warnings for syscalls With W=1 warnings enabled, there are lots of complaints about missing prototypes for system calls, plus a few other related ones: arch/arm64/kernel/sys_compat.c:68:6: error: no previous prototype for 'compat_arm_syscall' [-Werror=missing-prototypes] arch/arm64/include/asm/syscall_wrapper.h:76:32: error: no previous prototype for '__arm64_sys_io_setup' [-Werror=missing-prototypes] arch/arm64/include/asm/syscall_wrapper.h:41:32: error: no previous prototype for '__arm64_compat_sys_io_setup' [-Werror=missing-prototypes] arch/arm64/include/asm/syscall_wrapper.h:76:32: error: no previous prototype for '__arm64_sys_io_destroy' [-Werror=missing-prototypes] arch/arm64/include/asm/syscall_wrapper.h:76:32: error: no previous prototype for '__arm64_sys_io_submit' [-Werror=missing-prototypes] Add declarations to the syscall macros to avoid all of these, plus one for the compat syscall entry. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-4-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/compat.h | 2 ++ arch/arm64/include/asm/syscall_wrapper.h | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 74575c3d6987..ae904a1ad529 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -96,6 +96,8 @@ static inline int is_compat_thread(struct thread_info *thread) return test_ti_thread_flag(thread, TIF_32BIT); } +long compat_arm_syscall(struct pt_regs *regs, int scno); + #else /* !CONFIG_COMPAT */ static inline int is_compat_thread(struct thread_info *thread) diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h index d30217c21eff..17f687510c48 100644 --- a/arch/arm64/include/asm/syscall_wrapper.h +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -38,6 +38,7 @@ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL_COMPAT(name) \ + asmlinkage long __arm64_compat_sys_##name(const struct pt_regs *regs); \ asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \ { \ return sys_ni_syscall(); \ @@ -53,6 +54,7 @@ ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ { \ return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ @@ -73,11 +75,13 @@ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL(name) \ + asmlinkage long __arm64_sys_##name(const struct pt_regs *regs); \ asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \ { \ return sys_ni_syscall(); \ } +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused); #define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); #endif /* __ASM_SYSCALL_WRAPPER_H */ -- cgit v1.2.3 From ec3a3db7100ddebb32ed5b1052c1cd1136057230 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:31 +0200 Subject: arm64: move cpu_suspend_set_dbg_restorer() prototype to header The cpu_suspend_set_dbg_restorer() function is called by the hw_breakpoint code but defined in another file. Since the declaration is in the same file as the caller, the compiler warns about the definition without a prior prototype: arch/arm64/kernel/suspend.c:35:13: error: no previous prototype for 'cpu_suspend_set_dbg_restorer' [-Werror=missing-prototypes] Move it into the corresponding header instead to avoid the warning. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-5-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/hw_breakpoint.h | 8 ++++++++ arch/arm64/kernel/hw_breakpoint.c | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index fa4c6ff3aa9b..84055329cd8b 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -154,4 +154,12 @@ static inline int get_num_wrps(void) ID_AA64DFR0_EL1_WRPs_SHIFT); } +#ifdef CONFIG_CPU_PM +extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)); +#else +static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) +{ +} +#endif + #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index b29a311bb055..db2a1861bb97 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -973,14 +973,6 @@ static int hw_breakpoint_reset(unsigned int cpu) return 0; } -#ifdef CONFIG_CPU_PM -extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)); -#else -static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) -{ -} -#endif - /* * One-time initialisation. */ -- cgit v1.2.3 From 010089e9d3fe689e439fe3b78ed859a0782fbad0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:32 +0200 Subject: arm64: spectre: provide prototypes for internal functions The helpers in proton-pack.c are called from assembler and have no prototype in a header, which causes a W=1 warning: arch/arm64/kernel/proton-pack.c:568:13: error: no previous prototype for 'spectre_v4_patch_fw_mitigation_enable' [-Werror=missing-prototypes] arch/arm64/kernel/proton-pack.c:588:13: error: no previous prototype for 'smccc_patch_fw_mitigation_conduit' [-Werror=missing-prototypes] arch/arm64/kernel/proton-pack.c:1064:14: error: no previous prototype for 'spectre_bhb_patch_loop_mitigation_enable' [-Werror=missing-prototypes] arch/arm64/kernel/proton-pack.c:1075:14: error: no previous prototype for 'spectre_bhb_patch_fw_mitigation_enabled' [-Werror=missing-prototypes] Add these to asm/spectre.h, which contains related declarations already. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-6-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/spectre.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index db7b371b367c..9cc501450486 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -100,5 +100,21 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int sco u8 spectre_bhb_loop_affected(int scope); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); + +void spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); +void smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); +void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); +void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); +void spectre_bhb_patch_loop_iter(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void spectre_bhb_patch_wa3(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void spectre_bhb_patch_clearbhb(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_SPECTRE_H */ -- cgit v1.2.3 From 05d557a5cf59b08590cfc1bfc44bfdc0f9ac9681 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:33 +0200 Subject: arm64: kvm: add prototypes for functions called in asm A lot of kvm specific functions are called only from assembler and have no extern prototype, but that causes a W=1 warnings: arch/arm64/kvm/handle_exit.c:365:24: error: no previous prototype for 'nvhe_hyp_panic_handler' [-Werror=missing-prototypes] arch/arm64/kvm/va_layout.c:188:6: error: no previous prototype for 'kvm_patch_vector_branch' [-Werror=mi ssing-prototypes] arch/arm64/kvm/va_layout.c:287:6: error: no previous prototype for 'kvm_get_kimage_voffset' [-Werror=mis sing-prototypes] arch/arm64/kvm/va_layout.c:293:6: error: no previous prototype for 'kvm_compute_final_ctr_el0' [-Werror= missing-prototypes] arch/arm64/kvm/hyp/vhe/switch.c:259:17: error: no previous prototype for 'hyp_panic' [-Werror=missing-pr arch/arm64/kvm/hyp/nvhe/switch.c:389:17: error: no previous prototype for 'kvm_unexpected_el2_exception' arch/arm64/kvm/hyp/nvhe/switch.c:384:28: error: no previous prototype for 'hyp_panic_bad_stack' [-Werror arch/arm64/kvm/hyp/nvhe/hyp-main.c:383:6: error: no previous prototype for 'handle_trap' [-Werror=missin arch/arm64/kvm/hyp/nvhe/psci-relay.c:203:28: error: no previous prototype for 'kvm_host_psci_cpu_entry' [-Werror=missing-prototypes] Declare those in asm/kvm_asm.h, which already has related declarations. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-7-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_asm.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 43c3bc0f9544..86042afa86c3 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -267,6 +267,24 @@ extern u64 __kvm_get_mdcr_el2(void); __kvm_at_err; \ } ) +void __noreturn hyp_panic(void); +asmlinkage void kvm_unexpected_el2_exception(void); +asmlinkage void __noreturn hyp_panic(void); +asmlinkage void __noreturn hyp_panic_bad_stack(void); +asmlinkage void kvm_unexpected_el2_exception(void); +struct kvm_cpu_context; +void handle_trap(struct kvm_cpu_context *host_ctxt); +asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on); +void __noreturn __pkvm_init_finalise(void); +void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); +void kvm_patch_vector_branch(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_get_kimage_voffset(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void kvm_compute_final_ctr_el0(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst); +void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt, + u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar); #else /* __ASSEMBLY__ */ -- cgit v1.2.3 From 68a879b55346588de936c99a04e665d9cdc326e6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:34 +0200 Subject: arm64: cpuidle: fix #ifdef for acpi functions The acpi_processor_ffh_lpi_* functions are defined whenever CONFIG_ACPI is enabled, but only called and declared when CONFIG_ACPI_PROCESSOR_IDLE is also enabled. Without that, a W=1 build triggers missing-prototope warnings, so the #ifdef needs to be adapted: arch/arm64/kernel/cpuidle.c:60:5: error: no previous prototype for 'acpi_processor_ffh_lpi_probe' [-Werror=missing-prototypes] arch/arm64/kernel/cpuidle.c:65:15: error: no previous prototype for 'acpi_processor_ffh_lpi_enter' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-8-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpuidle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 42e19fff40ee..d1f68599c29f 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -13,7 +13,7 @@ #include #include -#ifdef CONFIG_ACPI +#ifdef CONFIG_ACPI_PROCESSOR_IDLE #include -- cgit v1.2.3 From fbc0cd6f60443264af0b7aed050cae2ef38036f9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:35 +0200 Subject: arm64: efi: add efi_handle_corrupted_x18 prototype This functions is only called from assembler and lacks a prototype, which is seen from this W=1 warning: arch/arm64/kernel/efi.c:155:25: error: no previous prototype for 'efi_handle_corrupted_x18' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-9-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/efi.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index f86b157a5da3..ef46f2daca62 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -166,4 +166,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size) dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size); } +efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f); + #endif /* _ASM_EFI_H */ -- cgit v1.2.3 From b925b4314c9155b32d17e9dfda37d64c229063b7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:36 +0200 Subject: arm64: hide unused is_valid_bugaddr() When generic BUG() support is disabled, this function has no declaration and no callers but causes a W=1 warning: arch/arm64/kernel/traps.c:950:5: error: no previous prototype for 'is_valid_bugaddr' [-Werror=missing-prototypes] Add an #ifdef that matches the one around the declaration. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-10-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4bb1b8f47298..720d3780d8fd 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -947,7 +947,7 @@ void do_serror(struct pt_regs *regs, unsigned long esr) } /* GENERIC_BUG traps */ - +#ifdef CONFIG_GENERIC_BUG int is_valid_bugaddr(unsigned long addr) { /* @@ -959,6 +959,7 @@ int is_valid_bugaddr(unsigned long addr) */ return 1; } +#endif static int bug_handler(struct pt_regs *regs, unsigned long esr) { -- cgit v1.2.3 From 60a0aab7463ee69296692d980b96510ccce3934e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:37 +0200 Subject: arm64: module-plts: inline linux/moduleloader.h module_frob_arch_sections() is declared in moduleloader.h, but that is not included before the definition: arch/arm64/kernel/module-plts.c:286:5: error: no previous prototype for 'module_frob_arch_sections' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-11-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/module-plts.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 543493bf924d..ad02058756b5 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -7,6 +7,7 @@ #include #include #include +#include #include static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc, -- cgit v1.2.3 From 1a1183938946fc1e06425d830a85e5aad63c049d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:38 +0200 Subject: arm64: flush: include linux/libnvdimm.h The two cache management functions are declared in libnvdimm.h but provided by architecture specific code. Without including the header, this causes a W=1 warning: arch/arm64/mm/flush.c:96:6: error: no previous prototype for 'arch_wb_cache_pmem' [-Werror=missing-prototypes] arch/arm64/mm/flush.c:104:6: error: no previous prototype for 'arch_invalidate_pmem' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-12-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/flush.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 5f9379b3c8c8..4e6476094952 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -8,6 +8,7 @@ #include #include +#include #include #include -- cgit v1.2.3 From a7f5cb606e9993daf2d129efc5e3b6ca46ad9227 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:39 +0200 Subject: arm64: kaslr: add kaslr_early_init() declaration kaslr_early_init() is called from assembler code and does not need a declaration to work, but adding one anyway shuts up this W=1 warning: arch/arm64/kernel/pi/kaslr_early.c:88:16: error: no previous prototype for 'kaslr_early_init' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-13-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/archrandom.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h index 2f5f3da34782..b0abc64f86b0 100644 --- a/arch/arm64/include/asm/archrandom.h +++ b/arch/arm64/include/asm/archrandom.h @@ -129,4 +129,6 @@ static inline bool __init __early_cpu_has_rndr(void) return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf; } +u64 kaslr_early_init(void *fdt); + #endif /* _ASM_ARCHRANDOM_H */ -- cgit v1.2.3 From 8ada7aab02ee9b07c8539a5c9cc452520b183a72 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:40 +0200 Subject: arm64: signal: include asm/exception.h The do_notify_resume() is in a header that is not included for the definition, which causes a W=1 warning: arch/arm64/kernel/signal.c:1280:6: error: no previous prototype for 'do_notify_resume' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-14-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/signal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 2cfc810d0a5b..3457906ba117 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From e13d32e99264e0b63b01417e2f2db627f4507b97 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:41 +0200 Subject: arm64: move early_brk64 prototype to header The prototype used for calling early_brk64() is in the file that calls it, which is the wrong place, as it is not included for the definition: arch/arm64/kernel/traps.c:1100:12: error: no previous prototype for 'early_brk64' [-Werror=missing-prototypes] Move it to an appropriate header instead. Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-15-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/traps.h | 2 ++ arch/arm64/mm/fault.c | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 1f361e2da516..d66dfb3a72dd 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -29,6 +29,8 @@ void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *s void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str); +int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs); + /* * Move regs->pc to next instruction and do necessary setup before it * is executed. diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cb21ccd7940d..e99eacebb6c1 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -886,9 +886,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) } NOKPROBE_SYMBOL(do_sp_pc_abort); -int __init early_brk64(unsigned long addr, unsigned long esr, - struct pt_regs *regs); - /* * __refdata because early_brk64 is __init, but the reference to it is * clobbered at arch_initcall time. -- cgit v1.2.3 From c152aed4dcc21e6d496e700148fdd85c2b0ff09c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 18:06:42 +0200 Subject: arm64: add alt_cb_patch_nops prototype alt_cb_patch_nops() is called through an inline asm macro, so it does not need a prototype for the caller, but adding it avoids this W=1 build warning: arch/arm64/kernel/alternative.c:295:14: error: no previous prototype for 'alt_cb_patch_nops' [-Werror=missing-prototypes] Signed-off-by: Arnd Bergmann Reviewed-by: Kees Cook Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20230516160642.523862-16-arnd@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/alternative.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index a38b92e11811..55da3fedcfe2 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -31,5 +31,8 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif +void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr, + __le32 *updptr, int nr_inst); + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ALTERNATIVE_H */ -- cgit v1.2.3 From de847275449a99343393a5f2a4179cf7f4d12372 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 11 May 2023 15:05:14 +0900 Subject: arm64/esr: Use GENMASK() for the ISS mask We express the mask for ESR_ELx.ISS in a non-standard manner, not using the standard helpers. In preparation for adding decode for ISS2 convert to use GENMASK(). No functional change. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230417-arm64-iss2-dabt-decode-v3-1-c1fa503e503a@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 8487aec9b658..0bd879007168 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -75,7 +75,7 @@ #define ESR_ELx_IL_SHIFT (25) #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) -#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) +#define ESR_ELx_ISS_MASK (GENMASK(24, 0)) #define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK) /* ISS field definitions shared by different classes */ -- cgit v1.2.3 From 1f9d4ba6839cc77717ed603fc6df1f36995da76d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 11 May 2023 15:05:15 +0900 Subject: arm64/esr: Add decode of ISS2 to data abort reporting The architecture has added more information about faults to ISS2 within ESR. Add decode of this to our data abort fault decode to aid diagnostics. Features that are not currently enabled are included here for completeness. Since the architecture specifies the values of bits within ISS2 in terms of ISS2 rather than in terms of the register as a whole we do so for our definitions as well, this makes it easier to review bitfield definitions. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230417-arm64-iss2-dabt-decode-v3-2-c1fa503e503a@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/esr.h | 17 +++++++++++++++++ arch/arm64/mm/fault.c | 17 ++++++++++++++--- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 0bd879007168..0552a29f026b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -77,6 +77,9 @@ #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) #define ESR_ELx_ISS_MASK (GENMASK(24, 0)) #define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK) +#define ESR_ELx_ISS2_SHIFT (32) +#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32)) +#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT) /* ISS field definitions shared by different classes */ #define ESR_ELx_WNR_SHIFT (6) @@ -140,6 +143,20 @@ #define ESR_ELx_CM_SHIFT (8) #define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT) +/* ISS2 field definitions for Data Aborts */ +#define ESR_ELx_TnD_SHIFT (10) +#define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT) +#define ESR_ELx_TagAccess_SHIFT (9) +#define ESR_ELx_TagAccess (UL(1) << ESR_ELx_TagAccess_SHIFT) +#define ESR_ELx_GCS_SHIFT (8) +#define ESR_ELx_GCS (UL(1) << ESR_ELx_GCS_SHIFT) +#define ESR_ELx_Overlay_SHIFT (6) +#define ESR_ELx_Overlay (UL(1) << ESR_ELx_Overlay_SHIFT) +#define ESR_ELx_DirtyBit_SHIFT (5) +#define ESR_ELx_DirtyBit (UL(1) << ESR_ELx_DirtyBit_SHIFT) +#define ESR_ELx_Xs_SHIFT (0) +#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0)) + /* ISS field definitions for exceptions taken in to Hyp */ #define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_COND_SHIFT (20) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cb21ccd7940d..fb5e0fb8b2a7 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -66,6 +66,8 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr static void data_abort_decode(unsigned long esr) { + unsigned long iss2 = ESR_ELx_ISS2(esr); + pr_alert("Data abort info:\n"); if (esr & ESR_ELx_ISV) { @@ -78,12 +80,21 @@ static void data_abort_decode(unsigned long esr) (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); } else { - pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); + pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n", + esr & ESR_ELx_ISS_MASK, iss2); } - pr_alert(" CM = %lu, WnR = %lu\n", + pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n", (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, - (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); + (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT, + (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT, + (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT); + + pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n", + (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT, + (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT, + (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT, + (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT); } static void mem_abort_decode(unsigned long esr) -- cgit v1.2.3 From cb5aa637943857f7f937a51d1e621dbe925f9f67 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 22 May 2023 15:28:00 +0100 Subject: kselftest/arm64: Add a smoke test for ptracing hardware break/watch points There was a report that the hardware breakpoints and watch points weren't reporting the debug architecture version as expected, they were reporting a version of 0 which is not defined in the architecture. This happens when running in a KVM guest if the host has a debug architecture version not supported by KVM, it in turn confuses GDB which rejects any debug architecture version it does not know about. Add a test that covers that situation and while we're at it reports the debug architecture version and number of slots available to aid with figuring out problems that may arise. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230414-arm64-test-hw-breakpoint-v2-1-90a19e3b1059@kernel.org Signed-off-by: Catalin Marinas --- tools/testing/selftests/arm64/abi/ptrace.c | 32 +++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/arm64/abi/ptrace.c b/tools/testing/selftests/arm64/abi/ptrace.c index be952511af22..abe4d58d731d 100644 --- a/tools/testing/selftests/arm64/abi/ptrace.c +++ b/tools/testing/selftests/arm64/abi/ptrace.c @@ -20,7 +20,7 @@ #include "../../kselftest.h" -#define EXPECTED_TESTS 7 +#define EXPECTED_TESTS 11 #define MAX_TPIDRS 2 @@ -132,6 +132,34 @@ static void test_tpidr(pid_t child) } } +static void test_hw_debug(pid_t child, int type, const char *type_name) +{ + struct user_hwdebug_state state; + struct iovec iov; + int slots, arch, ret; + + iov.iov_len = sizeof(state); + iov.iov_base = &state; + + /* Should be able to read the values */ + ret = ptrace(PTRACE_GETREGSET, child, type, &iov); + ksft_test_result(ret == 0, "read_%s\n", type_name); + + if (ret == 0) { + /* Low 8 bits is the number of slots, next 4 bits the arch */ + slots = state.dbg_info & 0xff; + arch = (state.dbg_info >> 8) & 0xf; + + ksft_print_msg("%s version %d with %d slots\n", type_name, + arch, slots); + + /* Zero is not currently architecturally valid */ + ksft_test_result(arch, "%s_arch_set\n", type_name); + } else { + ksft_test_result_skip("%s_arch_set\n"); + } +} + static int do_child(void) { if (ptrace(PTRACE_TRACEME, -1, NULL, NULL)) @@ -207,6 +235,8 @@ static int do_parent(pid_t child) ksft_print_msg("Parent is %d, child is %d\n", getpid(), child); test_tpidr(child); + test_hw_debug(child, NT_ARM_HW_WATCH, "NT_ARM_HW_WATCH"); + test_hw_debug(child, NT_ARM_HW_BREAK, "NT_ARM_HW_BREAK"); ret = EXIT_SUCCESS; -- cgit v1.2.3 From e34f78b970ea51d17841f6168e50223efc690c76 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 23 May 2023 22:49:00 +0100 Subject: arm64/cpufeature: Use helper for ECV CNTPOFF cpufeature The newly added support for ECV CNTPOFF open codes the recently added helper ARM64_CPUID_FIELDS(), make use of the helper. No functional change. Signed-off-by: Mark Brown Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20230523-arm64-ecv-helper-v1-1-506dfb5fb199@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7d7128c65161..27326f35b646 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2235,11 +2235,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_ECV_CNTPOFF, .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_cpuid_feature, - .sys_reg = SYS_ID_AA64MMFR0_EL1, - .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT, - .field_width = 4, - .sign = FTR_UNSIGNED, - .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF, + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF) }, #ifdef CONFIG_ARM64_PAN { -- cgit v1.2.3 From f818947a06183dee7c2afc6648c75149586bf288 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 20 Apr 2023 22:27:24 +0200 Subject: perf/arm-cci: Slightly optimize cci_pmu_sync_counters() When the 'mask' bitmap is cleared, it is better to use its full maximum size instead of only the needed size. This lets the compiler optimize it because the size is now known at compile time. HW_CNTRS_MAX is small (i.e. currently 9), so a call to memset() is saved. Also, as 'mask' is local to the function, the non-atomic __set_bit() can also safely be used here. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/88d4e20d595f771396e9d558c1587eb4494057db.1682022422.git.christophe.jaillet@wanadoo.fr Signed-off-by: Will Deacon --- drivers/perf/arm-cci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 03b1309875ae..998259f1d973 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -645,7 +645,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; DECLARE_BITMAP(mask, HW_CNTRS_MAX); - bitmap_zero(mask, cci_pmu->num_cntrs); + bitmap_zero(mask, HW_CNTRS_MAX); for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { struct perf_event *event = cci_hw->events[i]; @@ -656,7 +656,7 @@ static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) if (event->hw.state & PERF_HES_STOPPED) continue; if (event->hw.state & PERF_HES_ARCH) { - set_bit(i, mask); + __set_bit(i, mask); event->hw.state &= ~PERF_HES_ARCH; } } -- cgit v1.2.3 From 7bd42f122c7cf1e8101519dced3e07866b81e0d2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 18 May 2023 10:18:08 +0200 Subject: perf: qcom_l2_pmu: Make l2_cache_pmu_probe_cluster() more robust If an error occurs after calling list_add(), the &l2cache_pmu->clusters list will reference some memory that will be freed when the managed resources will be released. Move the list_add() at the end of the function when everything is in fine. This is harmless because if l2_cache_pmu_probe_cluster() fails, then l2_cache_pmu_probe() will fail as well and 'l2cache_pmu' will be released as well. But it looks cleaner and could silence static checker warning. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/6a0f5bdb6b7b2ed4ef194fc49693e902ad5b95ea.1684397879.git.christophe.jaillet@wanadoo.fr Signed-off-by: Will Deacon --- drivers/perf/qcom_l2_pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index aaca6db7d8f6..3f9a98c17a89 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -857,7 +857,6 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) return -ENOMEM; INIT_LIST_HEAD(&cluster->next); - list_add(&cluster->next, &l2cache_pmu->clusters); cluster->cluster_id = fw_cluster_id; irq = platform_get_irq(sdev, 0); @@ -883,6 +882,7 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) spin_lock_init(&cluster->pmu_lock); + list_add(&cluster->next, &l2cache_pmu->clusters); l2cache_pmu->num_pmus++; return 0; -- cgit v1.2.3 From 71746c995cac92fcf6a65661b51211cf2009d7f0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 24 May 2023 17:44:32 +0100 Subject: perf/arm-cmn: Fix DTC reset It turns out that my naive DTC reset logic fails to work as intended, since, after checking with the hardware designers, the PMU actually needs to be fully enabled in order to correctly clear any pending overflows. Therefore, invert the sequence to start with turning on both enables so that we can reliably get the DTCs into a known state, then moving to our normal counters-stopped state from there. Since all the DTM counters have already been unpaired during the initial discovery pass, we just need to additionally reset the cycle counters to ensure that no other unexpected overflows occur during this period. Fixes: 0ba64770a2f2 ("perf: Add Arm CMN-600 PMU driver") Reported-by: Geoff Blake Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/0ea4559261ea394f827c9aee5168c77a60aaee03.1684946389.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/perf/arm-cmn.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 47d359f72957..89a685a09d84 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1899,9 +1899,10 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id if (dtc->irq < 0) return dtc->irq; - writel_relaxed(0, dtc->base + CMN_DT_PMCR); + writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); + writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); + writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); - writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); return 0; } @@ -1961,7 +1962,7 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) dn->type = CMN_TYPE_CCLA; } - writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL); + arm_cmn_set_state(cmn, CMN_STATE_DISABLED); return 0; } -- cgit v1.2.3 From 8be3593b9efa8903d2ee7bb9cdf57a8e56c66f36 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 28 May 2023 09:02:05 +0100 Subject: drivers/perf: apple_m1: Force 63bit counters for M2 CPUs Sidharth reports that on M2, the PMU never generates any interrupt when using 'perf record', which is a annoying as you get no sample. I'm temped to say "no sample, no problem", but others may have a different opinion. Upon investigation, it appears that the counters on M2 are significantly different from the ones on M1, as they count on 64 bits instead of 48. Which of course, in the fine M1 tradition, means that we can only use 63 bits, as the top bit is used to signal the interrupt... This results in having to introduce yet another flag to indicate yet another odd counter width. Who knows what the next crazy implementation will do... With this, perf can work out the correct offset, and 'perf record' works as intended. Tested on M2 and M2-Pro CPUs. Cc: Janne Grunau Cc: Hector Martin Cc: Mark Rutland Cc: Will Deacon Fixes: 7d0bfb7c9977 ("drivers/perf: apple_m1: Add Apple M2 support") Reported-by: Sidharth Kshatriya Tested-by: Sidharth Kshatriya Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230528080205.288446-1-maz@kernel.org Signed-off-by: Will Deacon --- drivers/perf/apple_m1_cpu_pmu.c | 30 ++++++++++++++++++++++++------ drivers/perf/arm_pmu.c | 2 ++ include/linux/perf/arm_pmu.h | 2 ++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c index 8574c6e58c83..cd2de44b61b9 100644 --- a/drivers/perf/apple_m1_cpu_pmu.c +++ b/drivers/perf/apple_m1_cpu_pmu.c @@ -493,6 +493,17 @@ static int m1_pmu_map_event(struct perf_event *event) return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); } +static int m2_pmu_map_event(struct perf_event *event) +{ + /* + * Same deal as the above, except that M2 has 64bit counters. + * Which, as far as we're concerned, actually means 63 bits. + * Yes, this is getting awkward. + */ + event->hw.flags |= ARMPMU_EVT_63BIT; + return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT); +} + static void m1_pmu_reset(void *info) { int i; @@ -525,7 +536,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event, return 0; } -static int m1_pmu_init(struct arm_pmu *cpu_pmu) +static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags) { cpu_pmu->handle_irq = m1_pmu_handle_irq; cpu_pmu->enable = m1_pmu_enable_event; @@ -536,7 +547,14 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->clear_event_idx = m1_pmu_clear_event_idx; cpu_pmu->start = m1_pmu_start; cpu_pmu->stop = m1_pmu_stop; - cpu_pmu->map_event = m1_pmu_map_event; + + if (flags & ARMPMU_EVT_47BIT) + cpu_pmu->map_event = m1_pmu_map_event; + else if (flags & ARMPMU_EVT_63BIT) + cpu_pmu->map_event = m2_pmu_map_event; + else + return WARN_ON(-EINVAL); + cpu_pmu->reset = m1_pmu_reset; cpu_pmu->set_event_filter = m1_pmu_set_event_filter; @@ -550,25 +568,25 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu) static int m1_pmu_ice_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_icestorm_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT); } static int m1_pmu_fire_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_firestorm_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_47BIT); } static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_avalanche_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT); } static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "apple_blizzard_pmu"; - return m1_pmu_init(cpu_pmu); + return m1_pmu_init(cpu_pmu, ARMPMU_EVT_63BIT); } static const struct of_device_id m1_pmu_of_device_ids[] = { diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 15bd1e34a88e..277e29fbd504 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -109,6 +109,8 @@ static inline u64 arm_pmu_event_max_period(struct perf_event *event) { if (event->hw.flags & ARMPMU_EVT_64BIT) return GENMASK_ULL(63, 0); + else if (event->hw.flags & ARMPMU_EVT_63BIT) + return GENMASK_ULL(62, 0); else if (event->hw.flags & ARMPMU_EVT_47BIT) return GENMASK_ULL(46, 0); else diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 525b5d64e394..c0e4baf940dc 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -26,9 +26,11 @@ */ #define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */ #define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */ +#define ARMPMU_EVT_63BIT 0x00004 /* Event uses a 63bit counter */ static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT); static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT); +static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_63BIT) == ARMPMU_EVT_63BIT); #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) PERF_COUNT_HW_CACHE_##_x -- cgit v1.2.3 From af94aad4c9150cca6781ad134c950fb05dff43f9 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Tue, 9 May 2023 15:22:25 +0100 Subject: KVM: arm64: initialize HCRX_EL2 ARMv8.7/9.2 adds a new hypervisor configuration register HCRX_EL2. Initialize the register to a safe value (all fields 0), to be robust against firmware that has not initialized it. This is also needed to ensure that the register is reinitialized after a kexec by a future kernel. In addition, move SMPME setup over to the new flags, as it would otherwise get overridden. It is safe to set the bit even if SME is not (uniformly) supported, as it will write to a RES0 bit (having no effect), and SME will be disabled by the cpufeature framework. (Similar to how e.g. the API bit is handled in HCR_HOST_NVHE_FLAGS.) Signed-off-by: Kristina Martsenko Acked-by: Marc Zyngier Acked-by: Oliver Upton Link: https://lore.kernel.org/r/20230509142235.3284028-2-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/el2_setup.h | 18 ++++++++++-------- arch/arm64/include/asm/kvm_arm.h | 3 +++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 037724b19c5c..0201577863ca 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -22,6 +22,15 @@ isb .endm +.macro __init_el2_hcrx + mrs x0, id_aa64mmfr1_el1 + ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 + cbz x0, .Lskip_hcrx_\@ + mov_q x0, HCRX_HOST_FLAGS + msr_s SYS_HCRX_EL2, x0 +.Lskip_hcrx_\@: +.endm + /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, @@ -184,6 +193,7 @@ */ .macro init_el2_state __init_el2_sctlr + __init_el2_hcrx __init_el2_timers __init_el2_debug __init_el2_lor @@ -284,14 +294,6 @@ cbz x1, .Lskip_sme_\@ msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal - - mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? - ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 - cbz x1, .Lskip_sme_\@ - - mrs_s x1, SYS_HCRX_EL2 - orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping - msr_s SYS_HCRX_EL2, x1 .Lskip_sme_\@: .endm diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index baef29fcbeee..fb7fe28b8eb8 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -9,6 +9,7 @@ #include #include +#include #include /* Hyp Configuration Register (HCR) bits */ @@ -92,6 +93,8 @@ #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) +#define HCRX_HOST_FLAGS (HCRX_EL2_SMPME) + /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) #define TCR_EL2_TBI (1 << 20) -- cgit v1.2.3 From b0c756fe996ac930033882ca56410639e5cad1ec Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Tue, 9 May 2023 15:22:26 +0100 Subject: arm64: cpufeature: detect FEAT_HCX Detect if the system has the new HCRX_EL2 register added in ARMv8.7/9.2, so that subsequent patches can check for its presence. KVM currently relies on the register being present on all CPUs (or none), so the kernel will panic if that is not the case. Fortunately no such systems currently exist, but this can be revisited if they appear. Note that the kernel will not panic if CONFIG_KVM is disabled. Reviewed-by: Catalin Marinas Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20230509142235.3284028-3-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 8 ++++++++ arch/arm64/tools/cpucaps | 1 + 2 files changed, 9 insertions(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7d7128c65161..9898ad77b1db 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -364,6 +364,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0), @@ -2309,6 +2310,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = is_kvm_protected_mode, }, + { + .desc = "HCRX_EL2 register", + .capability = ARM64_HAS_HCX, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP) + }, #endif { .desc = "Kernel page table isolation (KPTI)", diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 40ba95472594..e1de10fa080e 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -32,6 +32,7 @@ HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS HAS_GIC_PRIO_MASKING HAS_GIC_PRIO_RELAXED_SYNC +HAS_HCX HAS_LDAPR HAS_LSE_ATOMICS HAS_NESTED_VIRT -- cgit v1.2.3 From 306b4c9f7120c485607cbbfa1ac3ecec005d8231 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Tue, 9 May 2023 15:22:27 +0100 Subject: KVM: arm64: switch HCRX_EL2 between host and guest Switch the HCRX_EL2 register between host and guest configurations, in order to enable different features in the host and guest. Now that there are separate guest flags, we can also remove SMPME from the host flags, as SMPME is used for virtualizing SME priorities and has no use in the host. Signed-off-by: Kristina Martsenko Acked-by: Marc Zyngier Acked-by: Oliver Upton Link: https://lore.kernel.org/r/20230509142235.3284028-4-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_arm.h | 3 ++- arch/arm64/kvm/hyp/include/hyp/switch.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index fb7fe28b8eb8..7bb2fbddda54 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -93,7 +93,8 @@ #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -#define HCRX_HOST_FLAGS (HCRX_EL2_SMPME) +#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME) +#define HCRX_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e78a08a72a3c..eb123aa7479d 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -130,6 +130,9 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu) if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); + + if (cpus_have_final_cap(ARM64_HAS_HCX)) + write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); } static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) @@ -144,6 +147,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~HCR_VSE; vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; } + + if (cpus_have_final_cap(ARM64_HAS_HCX)) + write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); } static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From f32c053b9806e42209d40e9ce7ed6f7f8be3be3b Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Tue, 9 May 2023 15:22:28 +0100 Subject: arm64: mops: document boot requirements for MOPS FEAT_MOPS introduces new instructions, we require that these instructions not execute as UNDEFINED when we identify that the feature is supported. Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20230509142235.3284028-5-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- Documentation/arm64/booting.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/arm64/booting.rst b/Documentation/arm64/booting.rst index ffeccdd6bdac..b3bbf330ed0a 100644 --- a/Documentation/arm64/booting.rst +++ b/Documentation/arm64/booting.rst @@ -379,6 +379,12 @@ Before jumping into the kernel, the following conditions must be met: - SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1. + For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS): + + - If the kernel is entered at EL1 and EL2 is present: + + - HCRX_EL2.MSCEn (bit 11) must be initialised to 0b1. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented -- cgit v1.2.3 From b1319c0e955933eae918a8a95c5361378ca86968 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Tue, 9 May 2023 15:22:29 +0100 Subject: arm64: mops: don't disable host MOPS instructions from EL2 To allow nVHE host EL0 and EL1 to use FEAT_MOPS instructions, configure EL2 to not cause these instructions to be treated as UNDEFINED. A VHE host is unaffected by this control. Reviewed-by: Catalin Marinas Signed-off-by: Kristina Martsenko Link: https://lore.kernel.org/r/20230509142235.3284028-6-kristina.martsenko@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kvm_arm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 7bb2fbddda54..d2d4f4cd12b8 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -94,7 +94,7 @@ #define HCR_HOST_VHE_FLAGS (HCR