From 216d106c7ff7b3dcabacf2b5dc6c9c40eba7495c Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 25 Jan 2024 22:11:04 -0600 Subject: x86/sev: Add SEV-SNP host initialization support The memory integrity guarantees of SEV-SNP are enforced through a new structure called the Reverse Map Table (RMP). The RMP is a single data structure shared across the system that contains one entry for every 4K page of DRAM that may be used by SEV-SNP VMs. The APM Volume 2 section on Secure Nested Paging (SEV-SNP) details a number of steps needed to detect/enable SEV-SNP and RMP table support on the host: - Detect SEV-SNP support based on CPUID bit - Initialize the RMP table memory reported by the RMP base/end MSR registers and configure IOMMU to be compatible with RMP access restrictions - Set the MtrrFixDramModEn bit in SYSCFG MSR - Set the SecureNestedPagingEn and VMPLEn bits in the SYSCFG MSR - Configure IOMMU RMP table entry format is non-architectural and it can vary by processor. It is defined by the PPR document for each respective CPU family. Restrict SNP support to CPU models/families which are compatible with the current RMP table entry format to guard against any undefined behavior when running on other system types. Future models/support will handle this through an architectural mechanism to allow for broader compatibility. SNP host code depends on CONFIG_KVM_AMD_SEV config flag which may be enabled even when CONFIG_AMD_MEM_ENCRYPT isn't set, so update the SNP-specific IOMMU helpers used here to rely on CONFIG_KVM_AMD_SEV instead of CONFIG_AMD_MEM_ENCRYPT. Signed-off-by: Brijesh Singh Co-developed-by: Ashish Kalra Signed-off-by: Ashish Kalra Co-developed-by: Tom Lendacky Signed-off-by: Tom Lendacky Co-developed-by: Borislav Petkov (AMD) Signed-off-by: Borislav Petkov (AMD) Co-developed-by: Michael Roth Signed-off-by: Michael Roth Link: https://lore.kernel.org/r/20240126041126.1927228-5-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 5b4a1ce3d368..1f59d8ba9776 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -243,4 +243,10 @@ static inline u64 snp_get_unsupported_features(u64 status) { return 0; } static inline u64 sev_get_status(void) { return 0; } #endif +#ifdef CONFIG_KVM_AMD_SEV +bool snp_probe_rmptable_info(void); +#else +static inline bool snp_probe_rmptable_info(void) { return false; } +#endif + #endif -- cgit v1.2.3 From 94b36bc244bb134ec616dd3f2d37343cd8c1be54 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 25 Jan 2024 22:11:06 -0600 Subject: x86/sev: Add RMP entry lookup helpers Add a helper that can be used to access information contained in the RMP entry corresponding to a particular PFN. This will be needed to make decisions on how to handle setting up mappings in the NPT in response to guest page-faults and handling things like cleaning up pages and setting them back to the default hypervisor-owned state when they are no longer being used for private data. [ mdr: separate 'assigned' indicator from return code, and simplify function signatures for various helpers. ] Signed-off-by: Brijesh Singh Co-developed-by: Ashish Kalra Signed-off-by: Ashish Kalra Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240126041126.1927228-7-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 1f59d8ba9776..01ce61b283a3 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -90,6 +90,7 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs); /* RMP page size */ #define RMP_PG_SIZE_4K 0 #define RMP_PG_SIZE_2M 1 +#define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) #define RMPADJUST_VMSA_PAGE_BIT BIT(16) @@ -245,8 +246,10 @@ static inline u64 sev_get_status(void) { return 0; } #ifdef CONFIG_KVM_AMD_SEV bool snp_probe_rmptable_info(void); +int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); #else static inline bool snp_probe_rmptable_info(void) { return false; } +static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } #endif #endif -- cgit v1.2.3 From 1f568d36361b4891696280b719ca4b142db872ba Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 25 Jan 2024 22:11:07 -0600 Subject: x86/fault: Add helper for dumping RMP entries This information will be useful for debugging things like page faults due to RMP access violations and RMPUPDATE failures. [ mdr: move helper to standalone patch, rework dump logic as suggested by Boris. ] Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240126041126.1927228-8-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 01ce61b283a3..2c53e3de0b71 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -247,9 +247,11 @@ static inline u64 sev_get_status(void) { return 0; } #ifdef CONFIG_KVM_AMD_SEV bool snp_probe_rmptable_info(void); int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); +void snp_dump_hva_rmpentry(unsigned long address); #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } +static inline void snp_dump_hva_rmpentry(unsigned long address) {} #endif #endif -- cgit v1.2.3 From 2c35819ee00b8893626914b3384cdef2afea7dbd Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 25 Jan 2024 22:11:10 -0600 Subject: x86/sev: Add helper functions for RMPUPDATE and PSMASH instruction The RMPUPDATE instruction updates the access restrictions for a page via its corresponding entry in the RMP Table. The hypervisor will use the instruction to enforce various access restrictions on pages used for confidential guests and other specialized functionality. See APM3 for details on the instruction operations. The PSMASH instruction expands a 2MB RMP entry in the RMP table into a corresponding set of contiguous 4KB RMP entries while retaining the state of the validated bit from the original 2MB RMP entry. The hypervisor will use this instruction in cases where it needs to re-map a page as 4K rather than 2MB in a guest's nested page table. Add helpers to make use of these instructions. [ mdr: add RMPUPDATE retry logic for transient FAIL_OVERLAP errors. ] Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Liam Merwick Link: https://lore.kernel.org/r/20240126041126.1927228-11-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 2c53e3de0b71..57fd95a5e6b2 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -87,10 +87,23 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs); /* Software defined (when rFlags.CF = 1) */ #define PVALIDATE_FAIL_NOUPDATE 255 +/* RMUPDATE detected 4K page and 2MB page overlap. */ +#define RMPUPDATE_FAIL_OVERLAP 4 + /* RMP page size */ #define RMP_PG_SIZE_4K 0 #define RMP_PG_SIZE_2M 1 #define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) +#define PG_LEVEL_TO_RMP(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M) + +struct rmp_state { + u64 gpa; + u8 assigned; + u8 pagesize; + u8 immutable; + u8 rsvd; + u32 asid; +} __packed; #define RMPADJUST_VMSA_PAGE_BIT BIT(16) @@ -248,10 +261,20 @@ static inline u64 sev_get_status(void) { return 0; } bool snp_probe_rmptable_info(void); int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); void snp_dump_hva_rmpentry(unsigned long address); +int psmash(u64 pfn); +int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); +int rmp_make_shared(u64 pfn, enum pg_level level); #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } static inline void snp_dump_hva_rmpentry(unsigned long address) {} +static inline int psmash(u64 pfn) { return -ENODEV; } +static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, + bool immutable) +{ + return -ENODEV; +} +static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } #endif #endif -- cgit v1.2.3 From 8dac642999b1542e0f0abefba100d8bd11226c83 Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Thu, 25 Jan 2024 22:11:15 -0600 Subject: x86/sev: Introduce an SNP leaked pages list Pages are unsafe to be released back to the page-allocator if they have been transitioned to firmware/guest state and can't be reclaimed or transitioned back to hypervisor/shared state. In this case, add them to an internal leaked pages list to ensure that they are not freed or touched/accessed to cause fatal page faults. [ mdr: Relocate to arch/x86/virt/svm/sev.c ] Suggested-by: Vlastimil Babka Signed-off-by: Ashish Kalra Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Vlastimil Babka Link: https://lore.kernel.org/r/20240126041126.1927228-16-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 57fd95a5e6b2..60de1b43a729 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -264,6 +264,7 @@ void snp_dump_hva_rmpentry(unsigned long address); int psmash(u64 pfn); int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); int rmp_make_shared(u64 pfn, enum pg_level level); +void snp_leak_pages(u64 pfn, unsigned int npages); #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } @@ -275,6 +276,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as return -ENODEV; } static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } +static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} #endif #endif -- cgit v1.2.3 From 8ef979584ea86c247b768f4420148721a842835f Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Thu, 25 Jan 2024 22:11:20 -0600 Subject: crypto: ccp: Add panic notifier for SEV/SNP firmware shutdown on kdump Add a kdump safe version of sev_firmware_shutdown() and register it as a crash_kexec_post_notifier so it will be invoked during panic/crash to do SEV/SNP shutdown. This is required for transitioning all IOMMU pages to reclaim/hypervisor state, otherwise re-init of IOMMU pages during crashdump kernel boot fails and panics the crashdump kernel. This panic notifier runs in atomic context, hence it ensures not to acquire any locks/mutexes and polls for PSP command completion instead of depending on PSP command completion interrupt. [ mdr: Remove use of "we" in comments. ] Signed-off-by: Ashish Kalra Signed-off-by: Michael Roth Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240126041126.1927228-21-michael.roth@amd.com --- arch/x86/include/asm/sev.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 60de1b43a729..bed95e1f4d52 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -227,6 +227,7 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn void snp_accept_memory(phys_addr_t start, phys_addr_t end); u64 snp_get_unsupported_features(u64 status); u64 sev_get_status(void); +void kdump_sev_callback(void); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } @@ -255,6 +256,7 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline u64 snp_get_unsupported_features(u64 status) { return 0; } static inline u64 sev_get_status(void) { return 0; } +static inline void kdump_sev_callback(void) { } #endif #ifdef CONFIG_KVM_AMD_SEV -- cgit v1.2.3 From d7b69b590bc9a5d299c82d3b27772cece0238d38 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 19 Feb 2024 10:42:16 +0100 Subject: x86/sev: Dump SEV_STATUS It is, and will be even more useful in the future, to dump the SEV features enabled according to SEV_STATUS. Do so: [ 0.542753] Memory Encryption Features active: AMD SEV SEV-ES SEV-SNP [ 0.544425] SEV: Status: SEV SEV-ES SEV-SNP DebugSwap Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikunj A Dadhania Link: https://lore.kernel.org/r/20240219094216.GAZdMieDHKiI8aaP3n@fat_crate.local --- arch/x86/include/asm/sev.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include/asm/sev.h') diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index bed95e1f4d52..f000635d6061 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -228,6 +228,7 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end); u64 snp_get_unsupported_features(u64 status); u64 sev_get_status(void); void kdump_sev_callback(void); +void sev_show_status(void); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } @@ -257,6 +258,7 @@ static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline u64 snp_get_unsupported_features(u64 status) { return 0; } static inline u64 sev_get_status(void) { return 0; } static inline void kdump_sev_callback(void) { } +static inline void sev_show_status(void) { } #endif #ifdef CONFIG_KVM_AMD_SEV -- cgit v1.2.3