summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/arch_gicv3.h114
-rw-r--r--arch/arm/include/asm/kvm_arm.h239
-rw-r--r--arch/arm/include/asm/kvm_asm.h77
-rw-r--r--arch/arm/include/asm/kvm_coproc.h36
-rw-r--r--arch/arm/include/asm/kvm_emulate.h372
-rw-r--r--arch/arm/include/asm/kvm_host.h456
-rw-r--r--arch/arm/include/asm/kvm_hyp.h127
-rw-r--r--arch/arm/include/asm/kvm_mmu.h435
-rw-r--r--arch/arm/include/asm/kvm_ras.h14
-rw-r--r--arch/arm/include/asm/pgtable-3level.h20
-rw-r--r--arch/arm/include/asm/pgtable.h9
-rw-r--r--arch/arm/include/asm/sections.h6
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h75
-rw-r--r--arch/arm/include/asm/virt.h17
14 files changed, 1 insertions, 1996 deletions
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index c815477b4303..413abfb42989 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -38,71 +38,6 @@
#define ICC_AP1R2 __ICC_AP1Rx(2)
#define ICC_AP1R3 __ICC_AP1Rx(3)
-#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
-
-#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
-#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
-#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
-#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
-#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
-#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
-#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
-
-#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
-#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
-
-#define ICH_LR0 __LR0(0)
-#define ICH_LR1 __LR0(1)
-#define ICH_LR2 __LR0(2)
-#define ICH_LR3 __LR0(3)
-#define ICH_LR4 __LR0(4)
-#define ICH_LR5 __LR0(5)
-#define ICH_LR6 __LR0(6)
-#define ICH_LR7 __LR0(7)
-#define ICH_LR8 __LR8(0)
-#define ICH_LR9 __LR8(1)
-#define ICH_LR10 __LR8(2)
-#define ICH_LR11 __LR8(3)
-#define ICH_LR12 __LR8(4)
-#define ICH_LR13 __LR8(5)
-#define ICH_LR14 __LR8(6)
-#define ICH_LR15 __LR8(7)
-
-/* LR top half */
-#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
-#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
-
-#define ICH_LRC0 __LRC0(0)
-#define ICH_LRC1 __LRC0(1)
-#define ICH_LRC2 __LRC0(2)
-#define ICH_LRC3 __LRC0(3)
-#define ICH_LRC4 __LRC0(4)
-#define ICH_LRC5 __LRC0(5)
-#define ICH_LRC6 __LRC0(6)
-#define ICH_LRC7 __LRC0(7)
-#define ICH_LRC8 __LRC8(0)
-#define ICH_LRC9 __LRC8(1)
-#define ICH_LRC10 __LRC8(2)
-#define ICH_LRC11 __LRC8(3)
-#define ICH_LRC12 __LRC8(4)
-#define ICH_LRC13 __LRC8(5)
-#define ICH_LRC14 __LRC8(6)
-#define ICH_LRC15 __LRC8(7)
-
-#define __ICH_AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
-#define ICH_AP0R0 __ICH_AP0Rx(0)
-#define ICH_AP0R1 __ICH_AP0Rx(1)
-#define ICH_AP0R2 __ICH_AP0Rx(2)
-#define ICH_AP0R3 __ICH_AP0Rx(3)
-
-#define __ICH_AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
-#define ICH_AP1R0 __ICH_AP1Rx(0)
-#define ICH_AP1R1 __ICH_AP1Rx(1)
-#define ICH_AP1R2 __ICH_AP1Rx(2)
-#define ICH_AP1R3 __ICH_AP1Rx(3)
-
-/* A32-to-A64 mappings used by VGIC save/restore */
-
#define CPUIF_MAP(a32, a64) \
static inline void write_ ## a64(u32 val) \
{ \
@@ -113,21 +48,6 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \
} \
-#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
-static inline void write_ ## a64(u64 val) \
-{ \
- write_sysreg(lower_32_bits(val), a32lo);\
- write_sysreg(upper_32_bits(val), a32hi);\
-} \
-static inline u64 read_ ## a64(void) \
-{ \
- u64 val = read_sysreg(a32lo); \
- \
- val |= (u64)read_sysreg(a32hi) << 32; \
- \
- return val; \
-}
-
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
@@ -138,40 +58,6 @@ CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
-CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
-CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
-CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
-CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
-CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
-CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
-CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
-CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
-CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
-CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
-CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
-CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
-CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
-CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
-CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
-CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
-
-CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
-CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
-CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
-CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
-CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
-CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
-CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
-CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
-CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
-CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
-CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
-CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
-CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
-CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
-CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
-CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
-
#define read_gicreg(r) read_##r()
#define write_gicreg(v, r) write_##r(v)
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
deleted file mode 100644
index 9c04bd810d07..000000000000
--- a/arch/arm/include/asm/kvm_arm.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ARM_H__
-#define __ARM_KVM_ARM_H__
-
-#include <linux/const.h>
-#include <linux/types.h>
-
-/* Hyp Configuration Register (HCR) bits */
-#define HCR_TGE (1 << 27)
-#define HCR_TVM (1 << 26)
-#define HCR_TTLB (1 << 25)
-#define HCR_TPU (1 << 24)
-#define HCR_TPC (1 << 23)
-#define HCR_TSW (1 << 22)
-#define HCR_TAC (1 << 21)
-#define HCR_TIDCP (1 << 20)
-#define HCR_TSC (1 << 19)
-#define HCR_TID3 (1 << 18)
-#define HCR_TID2 (1 << 17)
-#define HCR_TID1 (1 << 16)
-#define HCR_TID0 (1 << 15)
-#define HCR_TWE (1 << 14)
-#define HCR_TWI (1 << 13)
-#define HCR_DC (1 << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (1 << 10)
-#define HCR_FB (1 << 9)
-#define HCR_VA (1 << 8)
-#define HCR_VI (1 << 7)
-#define HCR_VF (1 << 6)
-#define HCR_AMO (1 << 5)
-#define HCR_IMO (1 << 4)
-#define HCR_FMO (1 << 3)
-#define HCR_PTW (1 << 2)
-#define HCR_SWIO (1 << 1)
-#define HCR_VM 1
-
-/*
- * The bits we set in HCR:
- * TAC: Trap ACTLR
- * TSC: Trap SMC
- * TVM: Trap VM ops (until MMU and caches are on)
- * TSW: Trap cache operations by set/way
- * TWI: Trap WFI
- * TWE: Trap WFE
- * TIDCP: Trap L2CTLR/L2ECTLR
- * BSU_IS: Upgrade barriers to the inner shareable domain
- * FB: Force broadcast of all maintainance operations
- * AMO: Override CPSR.A and enable signaling with VA
- * IMO: Override CPSR.I and enable signaling with VI
- * FMO: Override CPSR.F and enable signaling with VF
- * SWIO: Turn set/way invalidates into set/way clean+invalidate
- */
-#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
- HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
- HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
-
-/* System Control Register (SCTLR) bits */
-#define SCTLR_TE (1 << 30)
-#define SCTLR_EE (1 << 25)
-#define SCTLR_V (1 << 13)
-
-/* Hyp System Control Register (HSCTLR) bits */
-#define HSCTLR_TE (1 << 30)
-#define HSCTLR_EE (1 << 25)
-#define HSCTLR_FI (1 << 21)
-#define HSCTLR_WXN (1 << 19)
-#define HSCTLR_I (1 << 12)
-#define HSCTLR_C (1 << 2)
-#define HSCTLR_A (1 << 1)
-#define HSCTLR_M 1
-#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
- HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
-
-/* TTBCR and HTCR Registers bits */
-#define TTBCR_EAE (1 << 31)
-#define TTBCR_IMP (1 << 30)
-#define TTBCR_SH1 (3 << 28)
-#define TTBCR_ORGN1 (3 << 26)
-#define TTBCR_IRGN1 (3 << 24)
-#define TTBCR_EPD1 (1 << 23)
-#define TTBCR_A1 (1 << 22)
-#define TTBCR_T1SZ (7 << 16)
-#define TTBCR_SH0 (3 << 12)
-#define TTBCR_ORGN0 (3 << 10)
-#define TTBCR_IRGN0 (3 << 8)
-#define TTBCR_EPD0 (1 << 7)
-#define TTBCR_T0SZ (7 << 0)
-#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
-
-/* Hyp System Trap Register */
-#define HSTR_T(x) (1 << x)
-#define HSTR_TTEE (1 << 16)
-#define HSTR_TJDBX (1 << 17)
-
-/* Hyp Coprocessor Trap Register */
-#define HCPTR_TCP(x) (1 << x)
-#define HCPTR_TCP_MASK (0x3fff)
-#define HCPTR_TASE (1 << 15)
-#define HCPTR_TTA (1 << 20)
-#define HCPTR_TCPAC (1 << 31)
-
-/* Hyp Debug Configuration Register bits */
-#define HDCR_TDRA (1 << 11)
-#define HDCR_TDOSA (1 << 10)
-#define HDCR_TDA (1 << 9)
-#define HDCR_TDE (1 << 8)
-#define HDCR_HPME (1 << 7)
-#define HDCR_TPM (1 << 6)
-#define HDCR_TPMCR (1 << 5)
-#define HDCR_HPMN_MASK (0x1F)
-
-/*
- * The architecture supports 40-bit IPA as input to the 2nd stage translations
- * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
- * space.
- */
-#define KVM_PHYS_SHIFT (40)
-
-#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
-
-/* Virtualization Translation Control Register (VTCR) bits */
-#define VTCR_SH0 (3 << 12)
-#define VTCR_ORGN0 (3 << 10)
-#define VTCR_IRGN0 (3 << 8)
-#define VTCR_SL0 (3 << 6)
-#define VTCR_S (1 << 4)
-#define VTCR_T0SZ (0xf)
-#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
- VTCR_S | VTCR_T0SZ)
-#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
-#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
-#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
-#define KVM_VTCR_SL0 VTCR_SL_L1
-/* stage-2 input address range defined as 2^(32-T0SZ) */
-#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
-#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
-#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
-
-/* Virtualization Translation Table Base Register (VTTBR) bits */
-#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
-#define VTTBR_X (14 - KVM_T0SZ)
-#else
-#define VTTBR_X (5 - KVM_T0SZ)
-#endif
-#define VTTBR_CNP_BIT _AC(1, UL)
-#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
-#define VTTBR_VMID_SHIFT _AC(48, ULL)
-#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
-
-/* Hyp Syndrome Register (HSR) bits */
-#define HSR_EC_SHIFT (26)
-#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
-#define HSR_IL (_AC(1, UL) << 25)
-#define HSR_ISS (HSR_IL - 1)
-#define HSR_ISV_SHIFT (24)
-#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
-#define HSR_SRT_SHIFT (16)
-#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
-#define HSR_CM (1 << 8)
-#define HSR_FSC (0x3f)
-#define HSR_FSC_TYPE (0x3c)
-#define HSR_SSE (1 << 21)
-#define HSR_WNR (1 << 6)
-#define HSR_CV_SHIFT (24)
-#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
-#define HSR_COND_SHIFT (20)
-#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
-
-#define FSC_FAULT (0x04)
-#define FSC_ACCESS (0x08)
-#define FSC_PERM (0x0c)
-#define FSC_SEA (0x10)
-#define FSC_SEA_TTW0 (0x14)
-#define FSC_SEA_TTW1 (0x15)
-#define FSC_SEA_TTW2 (0x16)
-#define FSC_SEA_TTW3 (0x17)
-#define FSC_SECC (0x18)
-#define FSC_SECC_TTW0 (0x1c)
-#define FSC_SECC_TTW1 (0x1d)
-#define FSC_SECC_TTW2 (0x1e)
-#define FSC_SECC_TTW3 (0x1f)
-
-/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
-#define HPFAR_MASK (~0xf)
-
-#define HSR_EC_UNKNOWN (0x00)
-#define HSR_EC_WFI (0x01)
-#define HSR_EC_CP15_32 (0x03)
-#define HSR_EC_CP15_64 (0x04)
-#define HSR_EC_CP14_MR (0x05)
-#define HSR_EC_CP14_LS (0x06)
-#define HSR_EC_CP_0_13 (0x07)
-#define HSR_EC_CP10_ID (0x08)
-#define HSR_EC_JAZELLE (0x09)
-#define HSR_EC_BXJ (0x0A)
-#define HSR_EC_CP14_64 (0x0C)
-#define HSR_EC_SVC_HYP (0x11)
-#define HSR_EC_HVC (0x12)
-#define HSR_EC_SMC (0x13)
-#define HSR_EC_IABT (0x20)
-#define HSR_EC_IABT_HYP (0x21)
-#define HSR_EC_DABT (0x24)
-#define HSR_EC_DABT_HYP (0x25)
-#define HSR_EC_MAX (0x3f)
-
-#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
-
-#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
-
-#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
-#define HSR_DABT_CM (_AC(1, UL) << 8)
-
-#define kvm_arm_exception_type \
- {0, "RESET" }, \
- {1, "UNDEFINED" }, \
- {2, "SOFTWARE" }, \
- {3, "PREF_ABORT" }, \
- {4, "DATA_ABORT" }, \
- {5, "IRQ" }, \
- {6, "FIQ" }, \
- {7, "HVC" }
-
-#define HSRECN(x) { HSR_EC_##x, #x }
-
-#define kvm_arm_exception_class \
- HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
- HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
- HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
- HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
- HSRECN(DABT), HSRECN(DABT_HYP)
-
-
-#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
deleted file mode 100644
index f615830f9f57..000000000000
--- a/arch/arm/include/asm/kvm_asm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_ASM_H__
-#define __ARM_KVM_ASM_H__
-
-#include <asm/virt.h>
-
-#define ARM_EXIT_WITH_ABORT_BIT 31
-#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
-#define ARM_EXCEPTION_IS_TRAP(x) \
- (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
- ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
-#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
-
-#define ARM_EXCEPTION_RESET 0
-#define ARM_EXCEPTION_UNDEFINED 1
-#define ARM_EXCEPTION_SOFTWARE 2
-#define ARM_EXCEPTION_PREF_ABORT 3
-#define ARM_EXCEPTION_DATA_ABORT 4
-#define ARM_EXCEPTION_IRQ 5
-#define ARM_EXCEPTION_FIQ 6
-#define ARM_EXCEPTION_HVC 7
-#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
-/*
- * The rr_lo_hi macro swaps a pair of registers depending on
- * current endianness. It is used in conjunction with ldrd and strd
- * instructions that load/store a 64-bit value from/to memory to/from
- * a pair of registers which are used with the mrrc and mcrr instructions.
- * If used with the ldrd/strd instructions, the a1 parameter is the first
- * source/destination register and the a2 parameter is the second
- * source/destination register. Note that the ldrd/strd instructions
- * already swap the bytes within the words correctly according to the
- * endianness setting, but the order of the registers need to be effectively
- * swapped when used with the mrrc/mcrr instructions.
- */
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define rr_lo_hi(a1, a2) a2, a1
-#else
-#define rr_lo_hi(a1, a2) a1, a2
-#endif
-
-#define kvm_ksym_ref(kva) (kva)
-
-#ifndef __ASSEMBLY__
-struct kvm;
-struct kvm_vcpu;
-
-extern char __kvm_hyp_init[];
-extern char __kvm_hyp_init_end[];
-
-extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
-
-extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
-
-/* no VHE on 32-bit :( */
-static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
-
-extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
-
-extern void __init_stage2_translation(void);
-
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern u64 __vgic_v3_read_vmcr(void);
-extern void __vgic_v3_write_vmcr(u32 vmcr);
-extern void __vgic_v3_init_lrs(void);
-
-#endif
-
-#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
deleted file mode 100644
index a23826117dd6..000000000000
--- a/arch/arm/include/asm/kvm_coproc.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Rusty Russell IBM Corporation
- */
-
-#ifndef __ARM_KVM_COPROC_H__
-#define __ARM_KVM_COPROC_H__
-#include <linux/kvm_host.h>
-
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_coproc_target_table {
- unsigned target;
- const struct coproc_reg *table;
- size_t num;
-};
-void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
-
-int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
-
-unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-void kvm_coproc_table_init(void);
-
-struct kvm_one_reg;
-int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
-unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
-#endif /* __ARM_KVM_COPROC_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
deleted file mode 100644
index 3944305e81df..000000000000
--- a/arch/arm/include/asm/kvm_emulate.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_EMULATE_H__
-#define __ARM_KVM_EMULATE_H__
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/cputype.h>
-
-/* arm64 compatibility macros */
-#define PSR_AA32_MODE_FIQ FIQ_MODE
-#define PSR_AA32_MODE_SVC SVC_MODE
-#define PSR_AA32_MODE_ABT ABT_MODE
-#define PSR_AA32_MODE_UND UND_MODE
-#define PSR_AA32_T_BIT PSR_T_BIT
-#define PSR_AA32_F_BIT PSR_F_BIT
-#define PSR_AA32_I_BIT PSR_I_BIT
-#define PSR_AA32_A_BIT PSR_A_BIT
-#define PSR_AA32_E_BIT PSR_E_BIT
-#define PSR_AA32_IT_MASK PSR_IT_MASK
-#define PSR_AA32_GE_MASK 0x000f0000
-#define PSR_AA32_DIT_BIT 0x00200000
-#define PSR_AA32_PAN_BIT 0x00400000
-#define PSR_AA32_SSBS_BIT 0x00800000
-#define PSR_AA32_Q_BIT PSR_Q_BIT
-#define PSR_AA32_V_BIT PSR_V_BIT
-#define PSR_AA32_C_BIT PSR_C_BIT
-#define PSR_AA32_Z_BIT PSR_Z_BIT
-#define PSR_AA32_N_BIT PSR_N_BIT
-
-unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-
-static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
-{
- return vcpu_reg(vcpu, reg_num);
-}
-
-unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
-
-static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
-{
- return *__vcpu_spsr(vcpu);
-}
-
-static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
-{
- *__vcpu_spsr(vcpu) = v;
-}
-
-static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
-{
- return spsr;
-}
-
-static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
- u8 reg_num)
-{
- return *vcpu_reg(vcpu, reg_num);
-}
-
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
- unsigned long val)
-{
- *vcpu_reg(vcpu, reg_num) = val;
-}
-
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
-void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
-void kvm_inject_undef32(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-
-static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
-{
- kvm_inject_undef32(vcpu);
-}
-
-static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_dabt32(vcpu, addr);
-}
-
-static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- kvm_inject_pabt32(vcpu, addr);
-}
-
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
-{
- return kvm_condition_valid32(vcpu);
-}
-
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- kvm_skip_instr32(vcpu, is_wide_instr);
-}
-
-static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr = HCR_GUEST_MASK;
-}
-
-static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.hcr;
-}
-
-static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr &= ~HCR_TWE;
-}
-
-static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr |= HCR_TWE;
-}
-
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
-{
- return true;
-}
-
-static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
-{
- return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
-}
-
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
-{
- return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
-}
-
-static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-}
-
-static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
-}
-
-static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
-{
- unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;
-}
-
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hsr;
-}
-
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
-{
- u32 hsr = kvm_vcpu_get_hsr(vcpu);
-
- if (hsr & HSR_CV)
- return (hsr & HSR_COND) >> HSR_COND_SHIFT;
-
- return -1;
-}
-
-static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hxfar;
-}
-
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
-{
- return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
-}
-
-static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
-}
-
-static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
-}
-
-static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
-}
-
-static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
-}
-
-static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
-{
- return false;
-}
-
-static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
-{
- return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
-}
-
-static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
-}
-
-static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
-}
-
-/* Get Access Size from a data abort */
-static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
-{
- switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
- case 0:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- default:
- kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
- return 4;
- }
-}
-
-/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
-}
-
-static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
-}
-
-static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
-}
-
-static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
-}
-
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- switch (kvm_vcpu_trap_get_fault(vcpu)) {
- case FSC_SEA:
- case FSC_SEA_TTW0:
- case FSC_SEA_TTW1:
- case FSC_SEA_TTW2:
- case FSC_SEA_TTW3:
- case FSC_SECC:
- case FSC_SECC_TTW0:
- case FSC_SECC_TTW1:
- case FSC_SECC_TTW2:
- case FSC_SECC_TTW3:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
-}
-
-static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
-{
- return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
-}
-
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
- bool flag)
-{
-}
-
-static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
-{
- *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-}
-
-static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
-{
- return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
-}
-
-static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return be16_to_cpu(data & 0xffff);
- default:
- return be32_to_cpu(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return le16_to_cpu(data & 0xffff);
- default:
- return le32_to_cpu(data);
- }
- }
-}
-
-static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
- unsigned long data,
- unsigned int len)
-{
- if (kvm_vcpu_is_be(vcpu)) {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_be16(data & 0xffff);
- default:
- return cpu_to_be32(data);
- }
- } else {
- switch (len) {
- case 1:
- return data & 0xff;
- case 2:
- return cpu_to_le16(data & 0xffff);
- default:
- return cpu_to_le32(data);
- }
- }
-}
-
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
-
-#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
deleted file mode 100644
index a827b4d60d38..000000000000
--- a/arch/arm/include/asm/kvm_host.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- */
-
-#ifndef __ARM_KVM_HOST_H__
-#define __ARM_KVM_HOST_H__
-
-#include <linux/arm-smccc.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/kvm_types.h>
-#include <asm/cputype.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/fpstate.h>
-#include <kvm/arm_arch_timer.h>
-
-#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_HAVE_ONE_REG
-#define KVM_HALT_POLL_NS_DEFAULT 500000
-
-#define KVM_VCPU_MAX_FEATURES 2
-
-#include <kvm/arm_vgic.h>
-
-
-#ifdef CONFIG_ARM_GIC_V3
-#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#else
-#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
-#endif
-
-#define KVM_REQ_SLEEP \
- KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-
-DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-
-static inline int kvm_arm_init_sve(void) { return 0; }
-
-u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
-int __attribute_const__ kvm_target_cpu(void);
-int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
-
-struct kvm_vmid {
- /* The VMID generation used for the virt. memory system */
- u64 vmid_gen;
- u32 vmid;
-};
-
-struct kvm_arch {
- /* The last vcpu id that ran on each physical CPU */
- int __percpu *last_vcpu_ran;
-
- /*
- * Anything that is not used directly from assembly code goes
- * here.
- */
-
- /* The VMID generation used for the virt. memory system */
- struct kvm_vmid vmid;
-
- /* Stage-2 page table */
- pgd_t *pgd;
- phys_addr_t pgd_phys;
-
- /* Interrupt controller */
- struct vgic_dist vgic;
- int max_vcpus;
-
- /* Mandated version of PSCI */
- u32 psci_version;
-
- /*
- * If we encounter a data abort without valid instruction syndrome
- * information, report this to user space. User space can (and
- * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
- * supported.
- */
- bool return_nisv_io_abort_to_user;
-};
-
-#define KVM_NR_MEM_OBJS 40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
- int nobjs;
- void *objects[KVM_NR_MEM_OBJS];
-};
-
-struct kvm_vcpu_fault_info {
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
-};
-
-/*
- * 0 is reserved as an invalid value.
- * Order should be kept in sync with the save/restore code.
- */
-enum vcpu_sysreg {
- __INVALID_SYSREG__,
- c0_MPIDR, /* MultiProcessor ID Register */
- c0_CSSELR, /* Cache Size Selection Register */
- c1_SCTLR, /* System Control Register */
- c1_ACTLR, /* Auxiliary Control Register */
- c1_CPACR, /* Coprocessor Access Control */
- c2_TTBR0, /* Translation Table Base Register 0 */
- c2_TTBR0_high, /* TTBR0 top 32 bits */
- c2_TTBR1, /* Translation Table Base Register 1 */
- c2_TTBR1_high, /* TTBR1 top 32 bits */
- c2_TTBCR, /* Translation Table Base Control R. */
- c3_DACR, /* Domain Access Control Register */
- c5_DFSR, /* Data Fault Status Register */
- c5_IFSR, /* Instruction Fault Status Register */
- c5_ADFSR, /* Auxilary Data Fault Status R */
- c5_AIFSR, /* Auxilary Instrunction Fault Status R */
- c6_DFAR, /* Data Fault Address Register */
- c6_IFAR, /* Instruction Fault Address Register */
- c7_PAR, /* Physical Address Register */
- c7_PAR_high, /* PAR top 32 bits */
- c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
- c10_PRRR, /* Primary Region Remap Register */
- c10_NMRR, /* Normal Memory Remap Register */
- c12_