diff options
66 files changed, 92 insertions, 96 deletions
diff --git a/Documentation/arch/x86/pti.rst b/Documentation/arch/x86/pti.rst index 4b858a9bad8d..e08d35177bc0 100644 --- a/Documentation/arch/x86/pti.rst +++ b/Documentation/arch/x86/pti.rst @@ -81,11 +81,9 @@ this protection comes at a cost: and exit (it can be skipped when the kernel is interrupted, though.) Moves to CR3 are on the order of a hundred cycles, and are required at every entry and exit. - b. A "trampoline" must be used for SYSCALL entry. This - trampoline depends on a smaller set of resources than the - non-PTI SYSCALL entry code, so requires mapping fewer - things into the userspace page tables. The downside is - that stacks must be switched at entry time. + b. Percpu TSS is mapped into the user page tables to allow SYSCALL64 path + to work under PTI. This doesn't have a direct runtime cost but it can + be argued it opens certain timing attack scenarios. c. Global pages are disabled for all kernel structures not mapped into both kernel and userspace page tables. This feature of the MMU allows different processes to share TLB @@ -167,7 +165,7 @@ that are worth noting here. * Failures of the selftests/x86 code. Usually a bug in one of the more obscure corners of entry_64.S * Crashes in early boot, especially around CPU bringup. Bugs - in the trampoline code or mappings cause these. + in the mappings cause these. * Crashes at the first interrupt. Caused by bugs in entry_64.S, like screwing up a page table switch. Also caused by incorrectly mapping the IRQ handler entry code. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d3a43efac2d5..69901c29bee7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -384,10 +384,6 @@ config HAVE_INTEL_TXT def_bool y depends on INTEL_IOMMU && ACPI -config X86_32_SMP - def_bool y - depends on X86_32 && SMP - config X86_64_SMP def_bool y depends on X86_64 && SMP diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 71fc531b95b4..f19c038409aa 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -53,7 +53,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS KBUILD_CFLAGS += $(call cc-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h -# sev.c indirectly inludes inat-table.h which is generated during +# sev.c indirectly includes inat-table.h which is generated during # compilation and stored in $(objtree). Add the directory to the includes so # that the compiler finds it even with out-of-tree builds (make O=/some/path). CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/ diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c index b3c3a4be7471..dbba332e4a12 100644 --- a/arch/x86/boot/compressed/mem.c +++ b/arch/x86/boot/compressed/mem.c @@ -8,7 +8,7 @@ /* * accept_memory() and process_unaccepted_memory() called from EFI stub which - * runs before decompresser and its early_tdx_detect(). + * runs before decompressor and its early_tdx_detect(). * * Enumerate TDX directly from the early users. */ diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index cf1f13c82175..c1cb90369915 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -887,7 +887,7 @@ void __init tdx_early_init(void) * there. * * Intel-TDX has a secure RDMSR hypercall, but that needs to be - * implemented seperately in the low level startup ASM code. + * implemented separately in the low level startup ASM code. * Until that is in place, disable parallel bringup for TDX. */ x86_cpuinit.parallel_bringup = false; diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 187f913cc239..411d8c83e88a 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -666,7 +666,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff .ifc \operation, dec movdqa %xmm1, %xmm3 - pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) + pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 74dd230973cf..8c9749ed0651 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -747,7 +747,7 @@ VARIABLE_OFFSET = 16*8 .if \ENC_DEC == DEC vmovdqa %xmm1, %xmm3 - pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn) + pxor %xmm1, %xmm9 # Ciphertext XOR E(K, Yn) mov \PLAIN_CYPH_LEN, %r10 add %r13, %r10 diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 81ce0f4db555..bbcff1fb78cb 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -184,7 +184,7 @@ SYM_FUNC_START(crc_pcl) xor crc1,crc1 xor crc2,crc2 - # Fall thruogh into top of crc array (crc_128) + # Fall through into top of crc array (crc_128) ################################################################ ## 3) CRC Array: diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index d902b8ea0721..5bfce4b045fd 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -84,7 +84,7 @@ frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays -# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even +# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index 65be30156816..30a2c4777f9d 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -82,7 +82,7 @@ frame_size = frame_WK + WK_SIZE # Useful QWORD "arrays" for simpler memory references # MSG, DIGEST, K_t, W_t are arrays -# WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even +# WK_2(t) points to 1 of 2 qwords at frame.WK depending on t being odd/even # Input message (arg1) #define MSG(i) 8*i(msg) diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c index ed308719236c..780acd3dff22 100644 --- a/arch/x86/events/amd/brs.c +++ b/arch/x86/events/amd/brs.c @@ -125,7 +125,7 @@ int amd_brs_hw_config(struct perf_event *event) * Where X is the number of taken branches due to interrupt * skid. Skid is large. * - * Where Y is the occurences of the event while BRS is + * Where Y is the occurrences of the event while BRS is * capturing the lbr_nr entries. * * By using retired taken branches, we limit the impact on the diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index e24976593a29..25ad6fd98166 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -1184,7 +1184,7 @@ static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc, * period of each one and given that the BRS saturates, it would not be possible * to guarantee correlated content for all events. Therefore, in situations * where multiple events want to use BRS, the kernel enforces mutual exclusion. - * Exclusion is enforced by chosing only one counter for events using BRS. + * Exclusion is enforced by choosing only one counter for events using BRS. * The event scheduling logic will then automatically multiplex the * events and ensure that at most one event is actively using BRS. * diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0f2786d4e405..d50b3e066e89 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4027,7 +4027,7 @@ static int intel_pmu_hw_config(struct perf_event *event) /* * Currently, the only caller of this function is the atomic_switch_perf_msrs(). - * The host perf conext helps to prepare the values of the real hardware for + * The host perf context helps to prepare the values of the real hardware for * a set of msrs that need to be switched atomically in a vmx transaction. * * For example, the pseudocode needed to add a new msr should look like: diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 97bfe5f0531f..5fc45543e955 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -209,7 +209,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector, /* * This particular version of the IPI hypercall can - * only target upto 64 CPUs. + * only target up to 64 CPUs. */ if (vcpu >= 64) goto do_ex_hypercall; diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c index 42c70d28ef27..3215a4a07408 100644 --- a/arch/x86/hyperv/irqdomain.c +++ b/arch/x86/hyperv/irqdomain.c @@ -212,7 +212,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) * This interrupt is already mapped. Let's unmap first. * * We don't use retarget interrupt hypercalls here because - * Microsoft Hypervisor doens't allow root to change the vector + * Microsoft Hypervisor doesn't allow root to change the vector * or specify VPs outside of the set that is initially used * during mapping. */ diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index 02e55237d919..7dcbf153ad72 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -144,7 +144,7 @@ void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason) /* Tell the hypervisor what went wrong. */ val |= GHCB_SEV_TERM_REASON(set, reason); - /* Request Guest Termination from Hypvervisor */ + /* Request Guest Termination from Hypervisor */ wr_ghcb_msr(val); VMGEXIT(); diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index ed0eaf65c437..5c37944c8a5e 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -104,7 +104,7 @@ static inline bool amd_gart_present(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return false; - /* GART present only on Fam15h, upto model 0fh */ + /* GART present only on Fam15h, up to model 0fh */ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10)) return true; diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h index 991e31cfde94..fe6312045042 100644 --- a/arch/x86/include/asm/extable_fixup_types.h +++ b/arch/x86/include/asm/extable_fixup_types.h @@ -4,7 +4,7 @@ /* * Our IMM is signed, as such it must live at the top end of the word. Also, - * since C99 hex constants are of ambigious type, force cast the mask to 'int' + * since C99 hex constants are of ambiguous type, force cast the mask to 'int' * so that FIELD_GET() will DTRT and sign extend the value when it extracts it. */ #define EX_DATA_TYPE_MASK ((int)0x000000FF) diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index eb810074f1e7..f1fadc318a88 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -415,7 +415,7 @@ struct fpu_state_perm { * * This master permission field is only to be used when * task.fpu.fpstate based checks fail to validate whether the task - * is allowed to expand it's xfeatures set which requires to + * is allowed to expand its xfeatures set which requires to * allocate a larger sized fpstate buffer. * * Do not access this field directly. Use the provided helper diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 76238842406a..3814a9263d64 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -242,7 +242,7 @@ static inline void slow_down_io(void) #endif -#define BUILDIO(bwl, bw, type) \ +#define BUILDIO(bwl, type) \ static inline void out##bwl##_p(type value, u16 port) \ { \ out##bwl(value, port); \ @@ -288,9 +288,9 @@ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \ } \ } -BUILDIO(b, b, u8) -BUILDIO(w, w, u16) -BUILDIO(l, , u32) +BUILDIO(b, u8) +BUILDIO(w, u16) +BUILDIO(l, u32) #undef BUILDIO #define inb_p inb_p diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h index a1911fea8739..af7541c11821 100644 --- a/arch/x86/include/asm/iosf_mbi.h +++ b/arch/x86/include/asm/iosf_mbi.h @@ -111,7 +111,7 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask); * This function will block all kernel access to the PMIC I2C bus, so that the * P-Unit can safely access the PMIC over the shared I2C bus. * - * Note on these systems the i2c-bus driver will request a sempahore from the + * Note on these systems the i2c-bus driver will request a semaphore from the * P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing * it, but this does not appear to be sufficient, we still need to avoid making * certain P-Unit requests during the access window to avoid problems. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d7036982332e..6711da000bb7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1652,7 +1652,7 @@ struct kvm_x86_ops { /* Whether or not a virtual NMI is pending in hardware. */ bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu); /* - * Attempt to pend a virtual NMI in harware. Returns %true on success + * Attempt to pend a virtual NMI in hardware. Returns %true on success * to allow using static_call_ret0 as the fallback. */ bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index f93e9b96927a..262e65539f83 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -49,7 +49,7 @@ * but there is still a cushion vs. the RSB depth. The algorithm does not * claim to be perfect and it can be speculated around by the CPU, but it * is considered that it obfuscates the problem enough to make exploitation - * extremly difficult. + * extremely difficult. */ #define RET_DEPTH_SHIFT 5 #define RSB_RET_STUFF_LOOPS 16 @@ -208,7 +208,7 @@ /* * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should - * eventually turn into it's own annotation. + * eventually turn into its own annotation. */ .macro VALIDATE_UNRET_END #if defined(CONFIG_NOINSTR_VALIDATION) && \ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index a629b1b9f65a..24af25b1551a 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -203,7 +203,7 @@ static inline void native_pgd_clear(pgd_t *pgd) * F (2) in swp entry is used to record when a pagetable is * writeprotected by userfaultfd WP support. * - * E (3) in swp entry is used to rememeber PG_anon_exclusive. + * E (3) in swp entry is used to remember PG_anon_exclusive. * * Bit 7 in swp entry should be 0 because pmd_present checks not only P, * but also L and G. diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index bf483fcb4e57..5c83729c8e71 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,8 +31,6 @@ #include <asm/bootparam.h> #include <asm/x86_init.h> -extern u64 relocated_ramdisk; - /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 5fa76c2ced51..ea877fd83114 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h @@ -653,7 +653,7 @@ static inline int uv_blade_to_node(int blade) return uv_socket_to_node(blade); } -/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */ +/* Blade number of current cpu. Numbered 0 .. <#blades -1> */ static inline int uv_numa_blade_id(void) { return uv_hub_info->numa_blade_id; diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index c81858d903dc..923053f366d7 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -321,7 +321,7 @@ static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) { /* - * Due to the MSB/Sign-bit being used as invald marker (see + * Due to the MSB/Sign-bit being used as invalid marker (see * arch_vdso_cycles_valid() above), the effective mask is S64_MAX. */ u64 delta = (cycles - last) & S64_MAX; diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h index c599ec269a25..c10f279aae93 100644 --- a/arch/x86/include/asm/xen/interface_64.h +++ b/arch/x86/include/asm/xen/interface_64.h @@ -61,7 +61,7 @@ * RING1 -> RING3 kernel mode. * RING2 -> RING3 kernel mode. * RING3 -> RING3 user mode. - * However RING0 indicates that the guest kernel should return to iteself + * However RING0 indicates that the guest kernel should return to itself * directly with * orb $3,1*8(%rsp) * iretq diff --git a/arch/x86/include/uapi/asm/amd_hsmp.h b/arch/x86/include/uapi/asm/amd_hsmp.h index fce22686c834..e5d182c7373c 100644 --- a/arch/x86/include/uapi/asm/amd_hsmp.h +++ b/arch/x86/include/uapi/asm/amd_hsmp.h @@ -238,7 +238,7 @@ static const struct hsmp_msg_desc hsmp_msg_desc_table[] = { /* * HSMP_GET_DIMM_THERMAL, num_args = 1, response_sz = 1 * input: args[0] = DIMM address[7:0] - * output: args[0] = temperature in degree celcius[31:21] + update rate in ms[16:8] + + * output: args[0] = temperature in degree celsius[31:21] + update rate in ms[16:8] + * DIMM address[7:0] */ {1, 1, HSMP_GET}, diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 95e21596e2f9..55e205b21271 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1906,7 +1906,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l * Note that the caller must ensure that if the modified code is part of a * module, the module would not be removed during poking. This can be achieved * by registering a module notifier, and ordering module removal and patching - * trough a mutex. + * through a mutex. */ void *text_poke(void *addr, const void *opcode, size_t len) { |