summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi14
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c2
-rw-r--r--arch/arm64/crypto/aes-glue.c2
-rw-r--r--arch/arm64/kvm/nested.c2
-rw-r--r--arch/arm64/kvm/pmu-emul.c62
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c5
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c77
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v2.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c13
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.c38
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic.c43
-rw-r--r--arch/arm64/kvm/vgic/vgic.h27
19 files changed, 202 insertions, 123 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index d743737bf9ce..100570a048c5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -14,7 +14,6 @@ config ARM64
select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_EXTRA_PHDRS
select ARCH_BINFMT_ELF_STATE
- select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
@@ -1236,6 +1235,17 @@ config HISILICON_ERRATUM_161600802
If unsure, say Y.
+config HISILICON_ERRATUM_162100801
+ bool "Hip09 162100801 erratum support"
+ default y
+ help
+ When enabling GICv4.1 in hip09, VMAPP will fail to clear some caches
+ during unmapping operation, which will cause some vSGIs lost.
+ To fix the issue, invalidate related vPE cache through GICR_INVALLR
+ after VMOVP.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 9efd3f37c2fd..358c68565bfd 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -71,7 +71,7 @@ stack_protector_prepare: prepare0
-mstack-protector-guard-reg=sp_el0 \
-mstack-protector-guard-offset=$(shell \
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
- include/generated/asm-offsets.h))
+ $(objtree)/include/generated/asm-offsets.h))
endif
ifeq ($(CONFIG_ARM64_BTI_KERNEL),y)
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
index ae0379fd42a9..dfc5c2f0ddef 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi
@@ -14,6 +14,7 @@
compatible = "melfas,mip4_ts";
reg = <0x34>;
interrupts-extended = <&pio 88 IRQ_TYPE_LEVEL_LOW>;
+ status = "fail-needs-probe";
};
/*
@@ -26,6 +27,7 @@
reg = <0x20>;
hid-descr-addr = <0x0020>;
interrupts-extended = <&pio 88 IRQ_TYPE_LEVEL_LOW>;
+ status = "fail-needs-probe";
};
/* Lenovo Ideapad C330 uses G2Touch touchscreen as a 2nd source touchscreen */
@@ -35,6 +37,7 @@
hid-descr-addr = <0x0001>;
interrupt-parent = <&pio>;
interrupts = <88 IRQ_TYPE_LEVEL_LOW>;
+ status = "fail-needs-probe";
};
};
@@ -47,6 +50,8 @@
trackpad2: trackpad@2c {
compatible = "hid-over-i2c";
interrupts-extended = <&pio 117 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_irq>;
reg = <0x2c>;
hid-descr-addr = <0x0020>;
/*
@@ -58,6 +63,7 @@
*/
vdd-supply = <&mt6397_vgp6_reg>;
wakeup-source;
+ status = "fail-needs-probe";
};
};
@@ -82,3 +88,11 @@
};
};
};
+
+&touchscreen {
+ status = "fail-needs-probe";
+};
+
+&trackpad {
+ status = "fail-needs-probe";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
index b4d85147b77b..eee64461421f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -358,12 +358,12 @@
&i2c4 {
clock-frequency = <400000>;
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&trackpad_irq>;
trackpad: trackpad@15 {
compatible = "elan,ekth3000";
interrupts-extended = <&pio 117 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_irq>;
reg = <0x15>;
vcc-supply = <&mt6397_vgp6_reg>;
wakeup-source;
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index a523b519700f..a2b5d6f20f4d 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -18,7 +18,7 @@
#include "aes-ce-setkey.h"
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
+MODULE_IMPORT_NS("CRYPTO_INTERNAL");
static int num_rounds(struct crypto_aes_ctx *ctx)
{
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index a147e847a5a1..b0150999743f 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -1048,7 +1048,7 @@ unregister_ciphers:
#ifdef USE_V8_CRYPTO_EXTENSIONS
module_cpu_feature_match(AES, aes_init);
-EXPORT_SYMBOL_NS(ce_aes_mac_update, CRYPTO_INTERNAL);
+EXPORT_SYMBOL_NS(ce_aes_mac_update, "CRYPTO_INTERNAL");
#else
module_init(aes_init);
EXPORT_SYMBOL(neon_aes_ecb_encrypt);
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index aeaa6017ffd8..9b36218b48de 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -951,7 +951,7 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
return v;
}
-static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
+static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
{
int i = sr - __SANITISED_REG_START__;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 8ad62284fa23..456102bc0b55 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
-bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
+static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
{
- unsigned int hpmn;
+ unsigned int hpmn, n;
- if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
- return false;
+ if (!vcpu_has_nv(vcpu))
+ return 0;
+
+ hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
+ n = vcpu->kvm->arch.pmcr_n;
+
+ /*
+ * Programming HPMN to a value greater than PMCR_EL0.N is
+ * CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
+ * UNKNOWN number of counters (in our case, zero) are reserved for EL2.
+ */
+ if (hpmn >= n)
+ return 0;
/*
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
@@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
* implementation choice that all counters are included in the second
* range reserved for EL2/EL3.
*/
- hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
- return idx >= hpmn;
+ return GENMASK(n - 1, hpmn);
+}
+
+bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
+{
+ return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
}
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
{
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
- u64 hpmn;
if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
return mask;
- hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
- return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
+ return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
}
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
@@ -375,15 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
}
}
-static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+/*
+ * Returns the PMU overflow state, which is true if there exists an event
+ * counter where the values of the global enable control, PMOVSSET_EL0[n], and
+ * PMINTENSET_EL1[n] are all 1.
+ */
+static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
- u64 reg = 0;
+ u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
- if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
- reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
- reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
- reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
- }
+ reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+
+ /*
+ * PMCR_EL0.E is the global enable control for event counters available
+ * to EL0 and EL1.
+ */
+ if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
+ reg &= kvm_pmu_hyp_counter_mask(vcpu);
+
+ /*
+ * Otherwise, MDCR_EL2.HPME is the global enable control for event
+ * counters reserved for EL2.
+ */
+ if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
+ reg &= ~kvm_pmu_hyp_counter_mask(vcpu);
return reg;
}
@@ -396,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
if (!kvm_vcpu_has_pmu(vcpu))
return;
- overflow = !!kvm_pmu_overflow_status(vcpu);
+ overflow = kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index e1397ab2072a..afb018528bc3 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -287,7 +287,10 @@ static int vgic_debug_show(struct seq_file *s, void *v)
* Expect this to succeed, as iter_mark_lpis() takes a reference on
* every LPI to be visited.
*/
- irq = vgic_get_irq(kvm, vcpu, iter->intid);
+ if (iter->intid < VGIC_NR_PRIVATE_IRQS)
+ irq = vgic_get_vcpu_irq(vcpu, iter->intid);
+ else
+ irq = vgic_get_irq(kvm, iter->intid);
if (WARN_ON_ONCE(!irq))
return -EINVAL;
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 48c952563e85..bc7e22ab5d81 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -322,7 +322,7 @@ int vgic_init(struct kvm *kvm)
goto out;
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
- struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
switch (dist->vgic_model) {
case KVM_DEV_TYPE_ARM_VGIC_V3:
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 198296933e7e..f4c4494645c3 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -31,6 +31,41 @@ static int vgic_its_commit_v0(struct vgic_its *its);
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
struct kvm_vcpu *filter_vcpu, bool needs_inv);
+#define vgic_its_read_entry_lock(i, g, valp, t) \
+ ({ \
+ int __sz = vgic_its_get_abi(i)->t##_esz; \
+ struct kvm *__k = (i)->dev->kvm; \
+ int __ret; \
+ \
+ BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
+ sizeof(*(valp)) != ABI_0_ESZ); \
+ if (NR_ITS_ABIS > 1 && \
+ KVM_BUG_ON(__sz != sizeof(*(valp)), __k)) \
+ __ret = -EINVAL; \
+ else \
+ __ret = kvm_read_guest_lock(__k, (g), \
+ valp, __sz); \
+ __ret; \
+ })
+
+#define vgic_its_write_entry_lock(i, g, val, t) \
+ ({ \
+ int __sz = vgic_its_get_abi(i)->t##_esz; \
+ struct kvm *__k = (i)->dev->kvm; \
+ typeof(val) __v = (val); \
+ int __ret; \
+ \
+ BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
+ sizeof(__v) != ABI_0_ESZ); \
+ if (NR_ITS_ABIS > 1 && \
+ KVM_BUG_ON(__sz != sizeof(__v), __k)) \
+ __ret = -EINVAL; \
+ else \
+ __ret = vgic_write_guest_lock(__k, (g), \
+ &__v, __sz); \
+ __ret; \
+ })
+
/*
* Creates a new (reference to a) struct vgic_irq for a given LPI.
* If this LPI is already mapped on another ITS, we increase its refcount
@@ -42,7 +77,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
struct kvm_vcpu *vcpu)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
+ struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
unsigned long flags;
int ret;
@@ -419,7 +454,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
last_byte_offset = byte_offset;
}
- irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ irq = vgic_get_irq(vcpu->kvm, intid);
if (!irq)
continue;
@@ -794,7 +829,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
its_free_ite(kvm, ite);
- return vgic_its_write_entry_lock(its, gpa, 0, ite_esz);
+ return vgic_its_write_entry_lock(its, gpa, 0ULL, ite);
}
return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
@@ -1143,7 +1178,6 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
bool valid = its_cmd_get_validbit(its_cmd);
u8 num_eventid_bits = its_cmd_get_size(its_cmd);
gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
- int dte_esz = vgic_its_get_abi(its)->dte_esz;
struct its_device *device;
gpa_t gpa;
@@ -1168,7 +1202,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
* is an error, so we are done in any case.
*/
if (!valid)
- return vgic_its_write_entry_lock(its, gpa, 0, dte_esz);
+ return vgic_its_write_entry_lock(its, gpa, 0ULL, dte);
device = vgic_its_alloc_device(its, device_id, itt_addr,
num_eventid_bits);
@@ -1288,7 +1322,7 @@ int vgic_its_invall(struct kvm_vcpu *vcpu)
unsigned long intid;
xa_for_each(&dist->lpi_xa, intid, irq) {
- irq = vgic_get_irq(kvm, NULL, intid);
+ irq = vgic_get_irq(kvm, intid);
if (!irq)
continue;
@@ -1354,7 +1388,7 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
return 0;
xa_for_each(&dist->lpi_xa, intid, irq) {
- irq = vgic_get_irq(kvm, NULL, intid);
+ irq = vgic_get_irq(kvm, intid);
if (!irq)
continue;
@@ -2090,7 +2124,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
*/
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
- struct its_ite *ite, gpa_t gpa, int ite_esz)
+ struct its_ite *ite, gpa_t gpa)
{
u32 next_offset;
u64 val;
@@ -2101,7 +2135,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
ite->collection->collection_id;
val = cpu_to_le64(val);
- return vgic_its_write_entry_lock(its, gpa, val, ite_esz);
+ return vgic_its_write_entry_lock(its, gpa, val, ite);
}
/**
@@ -2201,7 +2235,7 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
return -EACCES;
- ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
+ ret = vgic_its_save_ite(its, device, ite, gpa);
if (ret)
return ret;
}
@@ -2240,10 +2274,9 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
* @its: ITS handle
* @dev: ITS device
* @ptr: GPA
- * @dte_esz: device table entry size
*/
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
- gpa_t ptr, int dte_esz)
+ gpa_t ptr)
{
u64 val, itt_addr_field;
u32 next_offset;
@@ -2256,7 +2289,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
- return vgic_its_write_entry_lock(its, ptr, val, dte_esz);
+ return vgic_its_write_entry_lock(its, ptr, val, dte);
}
/**
@@ -2332,10 +2365,8 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
*/
static int vgic_its_save_device_tables(struct vgic_its *its)
{
- const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_device_table;
struct its_device *dev;
- int dte_esz = abi->dte_esz;
if (!(baser & GITS_BASER_VALID))
return 0;
@@ -2354,7 +2385,7 @@ static int vgic_its_save_device_tables(struct vgic_its *its)
if (ret)
return ret;
- ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
+ ret = vgic_its_save_dte(its, dev, eaddr);
if (ret)
return ret;
}
@@ -2435,7 +2466,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
static int vgic_its_save_cte(struct vgic_its *its,
struct its_collection *collection,
- gpa_t gpa, int esz)
+ gpa_t gpa)
{
u64 val;
@@ -2444,7 +2475,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
collection->collection_id);
val = cpu_to_le64(val);
- return vgic_its_write_entry_lock(its, gpa, val, esz);
+ return vgic_its_write_entry_lock(its, gpa, val, cte);
}
/*
@@ -2452,7 +2483,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
* Return +1 on success, 0 if the entry was invalid (which should be
* interpreted as end-of-table), and a negative error value for generic errors.
*/
-static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
+static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa)
{
struct its_collection *collection;
struct kvm *kvm = its->dev->kvm;
@@ -2460,7 +2491,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
u64 val;
int ret;
- ret = vgic_its_read_entry_lock(its, gpa, &val, esz);
+ ret = vgic_its_read_entry_lock(its, gpa, &val, cte);
if (ret)
return ret;
val = le64_to_cpu(val);
@@ -2507,7 +2538,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
list_for_each_entry(collection, &its->collection_list, coll_list) {
- ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
+ ret = vgic_its_save_cte(its, collection, gpa);
if (ret)
return ret;
gpa += cte_esz;
@@ -2521,7 +2552,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
* table is not fully filled, add a last dummy element
* with valid bit unset
*/
- return vgic_its_write_entry_lock(its, gpa, 0, cte_esz);
+ return vgic_its_write_entry_lock(its, gpa, 0ULL, cte);
}
/*
@@ -2546,7 +2577,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
while (read < max_size) {
- ret = vgic_its_restore_cte(its, gpa, cte_esz);
+ ret = vgic_its_restore_cte(its, gpa);
if (ret <= 0)
break;
gpa += cte_esz;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
index e070cda86e12..f25fccb1f8e6 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
@@ -148,7 +148,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
if (!(targets & (1U << c)))
continue;
- irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
+ irq = vgic_get_vcpu_irq(vcpu, intid);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
@@ -167,7 +167,7 @@ static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
u64 val = 0;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
val |= (u64)irq->targets << (i * 8);
@@ -191,7 +191,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
return;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, intid + i);
int target;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
@@ -213,7 +213,7 @@ static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
u64 val = 0;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
val |= (u64)irq->source << (i * 8);
@@ -231,7 +231,7 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
unsigned long flags;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
@@ -253,7 +253,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
unsigned long flags;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 9e50928f5d7d..ae4c0593d114 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -194,7 +194,7 @@ static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, intid);
unsigned long ret = 0;
if (!irq)
@@ -220,7 +220,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
if (addr & 4)
return;
- irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+ irq = vgic_get_irq(vcpu->kvm, intid);
if (!irq)
return;
@@ -530,6 +530,7 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct vgic_irq *irq;
+ u32 intid;
/*
* If the guest wrote only to the upper 32bit part of the
@@ -541,9 +542,13 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu,
if ((addr & 4) || !vgic_lpis_enabled(vcpu))
return;
+ intid = lower_32_bits(val);
+ if (intid < VGIC_MIN_LPI)
+ return;
+
vgic_set_rdist_busy(vcpu, true);
- irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val));
+ irq = vgic_get_irq(vcpu->kvm, intid);
if (irq) {
vgic_its_inv_lpi(vcpu->kvm, irq);
vgic_put_irq(vcpu->kvm, irq);
@@ -1020,7 +1025,7 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, sgi);
unsigned long flags;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index cf76523a2194..e416e433baff 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -50,7 +50,7 @@ unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
for (i = 0; i < len * 8; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
if (irq->group)
value |= BIT(i);
@@ -74,7 +74,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned long flags;
for (i = 0; i < len * 8; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->group = !!(val & BIT(i));
@@ -102,7 +102,7 @@ unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
for (i = 0; i < len * 8; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
if (irq->enabled)
value |= (1U << i);
@@ -122,7 +122,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
@@ -171,7 +171,7 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
@@ -193,7 +193,7 @@ int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = true;
@@ -214,7 +214,7 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->enabled = false;
@@ -236,7 +236,7 @@ static unsigned long __read_pending(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
for (i = 0; i < len * 8; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
unsigned long flags;
bool val;
@@ -309,7 +309,7 @@ static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
/* GICD_ISPENDR0 SGI bits are WI when written from the guest. */
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
@@ -395,7 +395,7 @@ static void __clear_pending(struct kvm_vcpu *vcpu,
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
/* GICD_ICPENDR0 SGI bits are WI when written from the guest. */
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
@@ -494,7 +494,7 @@ static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */
for (i = 0; i < len * 8; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
/*
* Even for HW interrupts, don't evaluate the HW state as
@@ -598,7 +598,7 @@ static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
int i;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -635,7 +635,7 @@ static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
int i;
for_each_set_bit(i, &val, len * 8) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -672,7 +672,7 @@ unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
u64 val = 0;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
val |= (u64)irq->priority << (i * 8);
@@ -698,7 +698,7 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
unsigned long flags;
for (i = 0; i < len; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/* Narrow the priority range to what we actually support */
@@ -719,7 +719,7 @@ unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
int i;
for (i = 0; i < len * 4; i++) {
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
if (irq->config == VGIC_CONFIG_EDGE)
value |= (2U << (i * 2));
@@ -750,7 +750,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
if (intid + i < VGIC_NR_PRIVATE_IRQS)
continue;
- irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ irq = vgic_get_irq(vcpu->kvm, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i * 2 + 1, &val))
@@ -775,7 +775,7 @@ u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
continue;
- irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ irq = vgic_get_vcpu_irq(vcpu, intid + i);
if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
val |= (1U << i);
@@ -799,7 +799,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
continue;
- irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ irq = vgic_get_vcpu_irq(vcpu, intid + i);
/*
* Line level is set irrespective of irq type
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ae5a44d5702d..381673f03c39 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -72,7 +72,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
- irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+ irq = vgic_get_vcpu_irq(vcpu, intid);
raw_spin_lock(&irq->irq_lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c