summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2025-07-30 09:57:45 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2025-08-15 12:05:12 +0200
commit1fec416c03d0a64cc21aa04ce4aa14254b017e6a (patch)
treee0d0ebf5a5c840a53f557a8d87b106c4ccf39cb5
parenta47974c06912f524764594003dd70d0d6adbef75 (diff)
downloadlinux-1fec416c03d0a64cc21aa04ce4aa14254b017e6a.tar.gz
linux-1fec416c03d0a64cc21aa04ce4aa14254b017e6a.tar.bz2
linux-1fec416c03d0a64cc21aa04ce4aa14254b017e6a.zip
x86/sev: Evict cache lines during SNP memory validation
Commit 7b306dfa326f70114312b320d083b21fa9481e1e upstream. An SNP cache coherency vulnerability requires a cache line eviction mitigation when validating memory after a page state change to private. The specific mitigation is to touch the first and last byte of each 4K page that is being validated. There is no need to perform the mitigation when performing a page state change to shared and rescinding validation. CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that, when set, indicates that the software mitigation for this vulnerability is not needed. Implement the mitigation and invoke it when validating memory (making it private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest is vulnerable. Co-developed-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/boot/compressed/sev.c7
-rw-r--r--arch/x86/boot/cpuflags.c13
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/sev-shared.c18
-rw-r--r--arch/x86/kernel/sev.c11
6 files changed, 50 insertions, 1 deletions
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 3c5d5c97f8f7..4f61d48f2575 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -164,6 +164,13 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
*/
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+
+ /*
+ * If validating memory (making it private) and affected by the
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
+ */
+ if (op == SNP_PAGE_STATE_PRIVATE && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
+ sev_evict_cache((void *)paddr, 1);
}
void snp_set_page_private(unsigned long paddr)
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
index a83d67ec627d..aa4943432dcf 100644
--- a/arch/x86/boot/cpuflags.c
+++ b/arch/x86/boot/cpuflags.c
@@ -124,5 +124,18 @@ void get_cpuflags(void)
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
&cpu.flags[1]);
}
+
+ if (max_amd_level >= 0x8000001f) {
+ u32 ebx;
+
+ /*
+ * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
+ * the virtualization flags entry (word 8) and set by
+ * scattered.c, so the bit needs to be explicitly set.
+ */
+ cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
+ if (ebx & BIT(31))
+ set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
+ }
}
}
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 6f6ea3b9a95e..c48a9733e906 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -230,6 +230,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 5) /* "" SNP cache coherency software work around not needed */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index b9e39c9eb274..0d019e6972be 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 3fe76bf17d95..e658e83c62ae 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1064,3 +1064,21 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
}
}
+
+static inline void sev_evict_cache(void *va, int npages)
+{
+ volatile u8 val __always_unused;
+ u8 *bytes = va;
+ int page_idx;
+
+ /*
+ * For SEV guests, a read from the first/last cache-lines of a 4K page
+ * using the guest key is sufficient to cause a flush of all cache-lines
+ * associated with that 4K page without incurring all the overhead of a
+ * full CLFLUSH sequence.
+ */
+ for (page_idx = 0; page_idx < npages; page_idx++) {
+ val = bytes[page_idx * PAGE_SIZE];
+ val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
+ }
+}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index f8a8249ae117..7b7fa85d1547 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -676,10 +676,12 @@ static u64 __init get_jump_table_addr(void)
static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
{
- unsigned long vaddr_end;
+ unsigned long vaddr_begin, vaddr_end;
int rc;
vaddr = vaddr & PAGE_MASK;
+
+ vaddr_begin = vaddr;
vaddr_end = vaddr + (npages << PAGE_SHIFT);
while (vaddr < vaddr_end) {
@@ -689,6 +691,13 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
vaddr = vaddr + PAGE_SIZE;
}
+
+ /*
+ * If validating memory (making it private) and affected by the
+ * cache-coherency vulnerability, perform the cache eviction mitigation.
+ */
+ if (validate && !cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
+ sev_evict_cache((void *)vaddr_begin, npages);
}
static void __head early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)