summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/include/asm/assembler.h18
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/bugs.h6
-rw-r--r--arch/arm/include/asm/cp15.h3
-rw-r--r--arch/arm/include/asm/cputype.h8
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h14
-rw-r--r--arch/arm/include/asm/kvm_mmu.h23
-rw-r--r--arch/arm/include/asm/proc-fns.h4
-rw-r--r--arch/arm/include/asm/system_misc.h15
-rw-r--r--arch/arm/include/uapi/asm/siginfo.h13
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/bugs.c18
-rw-r--r--arch/arm/kernel/entry-common.S18
-rw-r--r--arch/arm/kernel/entry-header.S25
-rw-r--r--arch/arm/kernel/machine_kexec.c36
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S112
-rw-r--r--arch/arm/lib/getuser.S10
-rw-r--r--arch/arm/mm/Kconfig23
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/proc-macros.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-bugs.c174
-rw-r--r--arch/arm/mm/proc-v7.S154
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c4
-rw-r--r--arch/arm/vfp/vfpmodule.c2
32 files changed, 663 insertions, 105 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6a5c4ac97703..a3c5fbcad4ab 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -117,11 +117,9 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
- perl -e 'while (<>) { \
- $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \
- $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \
- }; printf "%d\n", $$bss_end - $$bss_start;')
+KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
+ sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
+ -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 45c8823c3750..517e0e18f0b8 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -29,19 +29,19 @@
#if defined(CONFIG_DEBUG_ICEDCC)
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
- .macro loadsp, rb, tmp
+ .macro loadsp, rb, tmp1, tmp2
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c0, c5, 0
.endm
#elif defined(CONFIG_CPU_XSCALE)
- .macro loadsp, rb, tmp
+ .macro loadsp, rb, tmp1, tmp2
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c8, c0, 0
.endm
#else
- .macro loadsp, rb, tmp
+ .macro loadsp, rb, tmp1, tmp2
.endm
.macro writeb, ch, rb
mcr p14, 0, \ch, c1, c0, 0
@@ -57,7 +57,7 @@
.endm
#if defined(CONFIG_ARCH_SA1100)
- .macro loadsp, rb, tmp
+ .macro loadsp, rb, tmp1, tmp2
mov \rb, #0x80000000 @ physical base address
#ifdef CONFIG_DEBUG_LL_SER3
add \rb, \rb, #0x00050000 @ Ser3
@@ -66,8 +66,8 @@
#endif
.endm
#else
- .macro loadsp, rb, tmp
- addruart \rb, \tmp
+ .macro loadsp, rb, tmp1, tmp2
+ addruart \rb, \tmp1, \tmp2
.endm
#endif
#endif
@@ -561,8 +561,6 @@ not_relocated: mov r0, #0
bl decompress_kernel
bl cache_clean_flush
bl cache_off
- mov r1, r7 @ restore architecture number
- mov r2, r8 @ restore atags pointer
#ifdef CONFIG_ARM_VIRT_EXT
mrs r0, spsr @ Get saved CPU boot mode
@@ -1297,7 +1295,7 @@ phex: adr r3, phexbuf
b 1b
@ puts corrupts {r0, r1, r2, r3}
-puts: loadsp r3, r1
+puts: loadsp r3, r2, r1
1: ldrb r2, [r0], #1
teq r2, #0
moveq pc, lr
@@ -1314,8 +1312,8 @@ puts: loadsp r3, r1
@ putc corrupts {r0, r1, r2, r3}
putc:
mov r2, r0
+ loadsp r3, r1, r0
mov r0, #0
- loadsp r3, r1
b 2b
@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
@@ -1365,6 +1363,8 @@ __hyp_reentry_vectors:
__enter_kernel:
mov r0, #0 @ must be 0
+ mov r1, r7 @ restore architecture number
+ mov r2, r8 @ restore atags pointer
ARM( mov pc, r4 ) @ call kernel
M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
THUMB( bx r4 ) @ entry point is always ARM for A/R classes
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc8d4bbd82e2..0cd4dccbae78 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.size \name , . - \name
.endm
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
@@ -536,4 +544,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist", "aw" ; \
+ .balign 4 ; \
+ .long entry; \
+ .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 40f5c410fd8c..69772e742a0a 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -17,6 +17,12 @@
#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
+#ifdef CONFIG_THUMB2_KERNEL
+#define CSDB ".inst.w 0xf3af8014"
+#else
+#define CSDB ".inst 0xe320f014"
+#endif
+#define csdb() __asm__ __volatile__(CSDB : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
@@ -37,6 +43,13 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
+#ifndef CSDB
+#define CSDB
+#endif
+#ifndef csdb
+#define csdb()
+#endif
+
#ifdef CONFIG_ARM_HEAVY_MB
extern void (*soc_mb)(void);
extern void arm_heavy_mb(void);
@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
#define __smp_rmb() __smp_mb()
#define __smp_wmb() dmb(ishst)
+#ifdef CONFIG_CPU_SPECTRE
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ "cmp %1, %2\n"
+ " sbc %0, %1, %1\n"
+ CSDB
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ return mask;
+}
+#define array_index_mask_nospec array_index_mask_nospec
+#endif
+
#include <asm-generic/barrier.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index a97f1ea708d1..73a99c72a930 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -10,12 +10,14 @@
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
-#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
-#define check_bugs() check_writebuffer_bugs()
+#ifdef CONFIG_MMU
+extern void check_bugs(void);
+extern void check_other_bugs(void);
#else
#define check_bugs() do { } while (0)
+#define check_other_bugs() do { } while (0)
#endif
#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 4c9fa72b59f5..07e27f212dc7 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -65,6 +65,9 @@
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
+#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb546425da8a..26021980504d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -77,8 +77,16 @@
#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
+#define ARM_CPU_PART_CORTEX_A53 0x4100d030
+#define ARM_CPU_PART_CORTEX_A57 0x4100d070
+#define ARM_CPU_PART_CORTEX_A72 0x4100d080
+#define ARM_CPU_PART_CORTEX_A73 0x4100d090
+#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
#define ARM_CPU_PART_MASK 0xff00fff0
+/* Broadcom cores */
+#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
+
/* DEC implemented cores */
#define ARM_CPU_PART_SA1100 0x4400a110
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 5a953ecb0d78..231e87ad45d5 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,8 +61,6 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
-extern char __kvm_hyp_vector[];
-
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c6a749568dd6..8467e05360d7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/cputype.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -308,8 +309,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {}
static inline bool kvm_arm_harden_branch_predictor(void)
{
- /* No way to detect it yet, pretend it is not there. */
- return false;
+ switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ case ARM_CPU_PART_BRAHMA_B15:
+ case ARM_CPU_PART_CORTEX_A12:
+ case ARM_CPU_PART_CORTEX_A15:
+ case ARM_CPU_PART_CORTEX_A17:
+ return true;
+#endif
+ default:
+ return false;
+ }
}
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 707a1f06dc5d..cf2eae51f9a0 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -311,7 +311,28 @@ static inline unsigned int kvm_get_vmid_bits(void)
static inline void *kvm_get_hyp_vector(void)
{
- return kvm_ksym_ref(__kvm_hyp_vector);
+ switch(read_cpuid_part()) {
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ case ARM_CPU_PART_CORTEX_A12:
+ case ARM_CPU_PART_CORTEX_A17:
+ {
+ extern char __kvm_hyp_vector_bp_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
+ }
+
+ case ARM_CPU_PART_BRAHMA_B15:
+ case ARM_CPU_PART_CORTEX_A15:
+ {
+ extern char __kvm_hyp_vector_ic_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
+ }
+#endif
+ default:
+ {
+ extern char __kvm_hyp_vector[];
+ return kvm_ksym_ref(__kvm_hyp_vector);
+ }
+ }
}
static inline int kvm_map_vectors(void)
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f2e1af45bd6f..e25f4392e1b2 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -37,6 +37,10 @@ extern struct processor {
*/
void (*_proc_init)(void);
/*
+ * Check for processor bugs
+ */
+ void (*check_bugs)(void);
+ /*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 78f6db114faf..8e76db83c498 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <linux/reboot.h>
+#include <linux/percpu.h>
extern void cpu_init(void);
@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
extern void (*arm_pm_idle)(void);
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+typedef void (*harden_branch_predictor_fn_t)(void);
+DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
+static inline void harden_branch_predictor(void)
+{
+ harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
+ smp_processor_id());
+ if (fn)
+ fn();
+}
+#else
+#define harden_branch_predictor() do { } while (0)
+#endif
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
deleted file mode 100644
index d0513880be21..000000000000
--- a/arch/arm/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SIGINFO_H
-#define __ASM_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-/*
- * SIGFPE si_codes
- */
-#ifdef __KERNEL__
-#define FPE_FIXME 0 /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
-#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index b59ac4bf82b8..8cad59465af3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -31,6 +31,7 @@ else
obj-y += entry-armv.o
endif
+obj-$(CONFIG_MMU) += bugs.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
new file mode 100644
index 000000000000..7be511310191
--- /dev/null
+++ b/arch/arm/kernel/bugs.c
@@ -0,0 +1,18 @@
+// SPDX-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <asm/bugs.h>
+#include <asm/proc-fns.h>
+
+void check_other_bugs(void)
+{
+#ifdef MULTI_CPU
+ if (processor.check_bugs)
+ processor.check_bugs();
+#endif
+}
+
+void __init check_bugs(void)
+{
+ check_writebuffer_bugs();
+ check_other_bugs();
+}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3c4f88701f22..20df608bf343 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -242,9 +242,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- cmp scno, #NR_syscalls @ check upper syscall limit
- badr lr, ret_fast_syscall @ return address
- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+ invoke_syscall tbl, scno, r10, ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -278,14 +276,8 @@ __sys_trace:
mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
-
- badr lr, __sys_trace_return @ return address
- mov scno, r0 @ syscall number (possibly new)
- add r1, sp, #S_R0 + S_OFF @ pointer to regs
- cmp scno, #NR_syscalls @ check upper syscall limit
- ldmccia r1, {r0 - r6} @ have to reload r0 - r6
- stmccia sp, {r4, r5} @ and update the stack args
- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+ mov scno, r0
+ invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
@@ -363,6 +355,10 @@ sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
+#ifdef CONFIG_CPU_SPECTRE
+ movhs scno, #0
+ csdb
+#endif
stmloia sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 0f07579af472..773424843d6e 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -378,6 +378,31 @@
#endif
.endm
+ .macro invoke_syscall, table, nr, tmp, ret, reload=0
+#ifdef CONFIG_CPU_SPECTRE
+ mov \tmp, \nr
+ cmp \tmp, #NR_syscalls @ check upper syscall limit
+ movcs \tmp, #0
+ csdb
+ badr lr, \ret @ return address
+ .if \reload
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ ldmccia r1, {r0 - r6} @ reload r0-r6
+ stmccia sp, {r4, r5} @ update stack arguments
+ .endif
+ ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
+#else
+ cmp \nr, #NR_syscalls @ check upper syscall limit
+ badr lr, \ret @ return address
+ .if \reload
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ ldmccia r1, {r0 - r6} @ reload r0-r6
+ stmccia sp, {r4, r5} @ update stack arguments
+ .endif
+ ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
+#endif
+ .endm
+
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 6b38d7a634c1..dd2eb5f76b9f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused)
{
struct pt_regs regs;
- crash_setup_regs(&regs, NULL);
+ crash_setup_regs(&regs, get_irq_regs());
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
cpu_relax();
}
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+ unsigned long msecs;
+
+ if (cpus_stopped)
+ return;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ smp_call_function(machine_crash_nonpanic_core, NULL, false);
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+ msecs--;
+ }
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
+ pr_warn("Non-crashing CPUs did not react to IPI\n");
+
+ cpus_stopped = 1;
+}
+
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
void machine_crash_shutdown(struct pt_regs *regs)
{
- unsigned long msecs;
-
local_irq_disable();
-
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
- smp_call_function(machine_crash_nonpanic_core, NULL, false);
- msecs = 1000; /* Wait at most a second for the other cpus to stop */
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
- mdelay(1);
- msecs--;
- }
- if (atomic_read(&waiting_for_crash_ipi) > 0)
- pr_warn("Non-crashing CPUs did not react to IPI\n");
+ crash_smp_send_stop();
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b9e08f50df41..0978282d5fc2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
#include <linux/irq_work.h>
#include <linux/atomic.h>
+#include <asm/bugs.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@@ -404,6 +405,9 @@ asmlinkage void secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+
+ check_other_bugs();
+
complete(&cpu_running);
local_irq_enable();
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index a40ebb7c0896..d08099269e35 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
local_flush_tlb_all();
+ check_other_bugs();
}
return ret;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5e3633c24e63..2fe87109ae46 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
@@ -490,6 +492,7 @@ die_sig:
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
+NOKPROBE_SYMBOL(do_undefinstr)
/*
* Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 95a2faefc070..aa3f9a9837ac 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -16,6 +16,7 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
@@ -71,6 +72,90 @@ __kvm_hyp_vector:
W(b) hyp_irq
W(b) hyp_fiq
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ .align 5
+__kvm_hyp_vector_ic_inv:
+ .global __kvm_hyp_vector_ic_inv
+
+ /*
+ * We encode the exception entry in the bottom 3 bits of
+ * SP, and we have to guarantee to be 8 bytes aligned.
+ */
+ W(add) sp, sp, #1 /* Reset 7 */
+ W(add) sp, sp, #1 /* Undef 6 */
+ W(add) sp, sp, #1 /* Syscall 5 */
+ W(add) sp, sp, #1 /* Prefetch abort 4 */
+ W(add) sp, sp, #1 /* Data abort 3 */
+ W(add) sp, sp, #1 /* HVC 2 */
+ W(add) sp, sp, #1 /* IRQ 1 */
+ W(nop) /* FIQ 0 */
+
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ isb
+
+ b decode_vectors
+
+ .align 5
+__kvm_hyp_vector_bp_inv:
+ .global __kvm_hyp_vector_bp_inv
+
+ /*
+ * We encode the exception entry in the bottom 3 bits of
+ * SP, and we have to guarantee to be 8 bytes aligned.
+ */
+ W(add) sp, sp, #1 /* Reset 7 */
+ W(add) sp, sp, #1 /* Undef 6 */
+ W(add) sp, sp, #1 /* Syscall 5 */
+ W(add) sp, sp, #1 /* Prefetch abort 4 */
+ W(add) sp, sp, #1 /* Data abort 3 */
+ W(add) sp, sp, #1 /* HVC 2 */
+ W(add) sp, sp, #1 /* IRQ 1 */
+ W(nop) /* FIQ 0 */
+
+ mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
+ isb
+
+decode_vectors:
+
+#ifdef CONFIG_THUMB2_KERNEL
+ /*
+ * Yet another silly hack: Use VPIDR as a temp register.
+ * Thumb2 is really a pain, as SP cannot be used with most
+ * of the bitwise instructions. The vect_br macro ensures
+ * things gets cleaned-up.
+ */
+ mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
+ mov r0, sp
+ and r0, r0, #7
+ sub sp, sp, r0
+ push {r1, r2}
+ mov r1, r0
+ mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
+ mrc p15, 0, r2, c0, c0, 0 /* MIDR */
+ mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
+#endif
+
+.macro vect_br val, targ
+ARM( eor sp, sp, #\val )
+ARM( tst sp, #7 )
+ARM( eorne sp, sp, #\val )
+
+THUMB( cmp r1, #\val )
+THUMB( popeq {r1, r2} )
+
+ beq \targ
+.endm
+
+ vect_br 0, hyp_fiq
+ vect_br 1, hyp_irq
+ vect_br 2, hyp_hvc
+ vect_br 3, hyp_dabt
+ vect_br 4, hyp_pabt
+ vect_br 5, hyp_svc
+ vect_br 6, hyp_undef
+ vect_br 7, hyp_reset
+#endif
+
.macro invalid_vector label, cause
.align
\label: mov r0, #\cause
@@ -118,7 +203,7 @@ hyp_hvc:
lsr r2, r2, #16
and r2, r2, #0xff
cmp r2, #0
- bne guest_trap @ Guest called HVC
+ bne guest_hvc_trap @ Guest called HVC
/*
* Getting here means host called HVC, we shift parameters and branch
@@ -149,7 +234,14 @@ hyp_hvc:
bx ip
1:
- push {lr}
+ /*
+ * Pushing r2 here is just a way of keeping the stack aligned to
+ * 8 bytes on any path that can trigger a HYP exception. Here,
+ * we may well be about to jump into the guest, and the guest
+ * exit would otherwise be badly decoded by our fancy
+ * "decode-exception-without-a-branch" code...
+ */
+ push {r2, lr}
mov lr, r0
mov r0, r1
@@ -159,7 +251,21 @@ hyp_hvc:
THUMB( orr lr, #1)
blx lr @ Call the HYP function
- pop {lr}
+ pop {r2, lr}
+ eret
+
+guest_hvc_trap:
+ movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
+ movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
+ ldr r0, [sp] @ Guest's r0
+ teq r0, r2
+ bne guest_trap
+ add sp, sp, #12
+ @ Returns:
+ @ r0 = 0
+ @ r1 = HSR value (perfectly predictable)
+ @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
+ mov r0, #0
eret
guest_trap:
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914e81c8..746e7801dcdf 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDP