/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 1991,1992 Linus Torvalds
*
* entry_32.S contains the system-call and low-level fault and trap handling routines.
*
* Stack layout while running C code:
* ptrace needs to have all registers on the stack.
* If the order here is changed, it needs to be
* updated in fork.c:copy_process(), signal.c:do_signal(),
* ptrace.c and ptrace.h
*
* 0(%esp) - %ebx
* 4(%esp) - %ecx
* 8(%esp) - %edx
* C(%esp) - %esi
* 10(%esp) - %edi
* 14(%esp) - %ebp
* 18(%esp) - %eax
* 1C(%esp) - %ds
* 20(%esp) - %es
* 24(%esp) - %fs
* 28(%esp) - unused -- was %gs on old stackprotector kernels
* 2C(%esp) - orig_eax
* 30(%esp) - %eip
* 34(%esp) - %cs
* 38(%esp) - %eflags
* 3C(%esp) - %oldesp
* 40(%esp) - %oldss
*/
#include <linux/linkage.h>
#include <linux/err.h>
#include <asm/thread_info.h>
#include <asm/irqflags.h>
#include <asm/errno.h>
#include <asm/segment.h>
#include <asm/smp.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
#include <asm/irq_vectors.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
#include "calling.h"
.section .entry.text, "ax"
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
/* Unconditionally switch to user cr3 */
.macro SWITCH_TO_USER_CR3 scratch_reg:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
movl %cr3, \scratch_reg
orl $PTI_SWITCH_MASK, \scratch_reg
movl \scratch_reg, %cr3
.Lend_\@:
.endm
.macro BUG_IF_WRONG_CR3 no_user_check=0
#ifdef CONFIG_DEBUG_ENTRY
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
.if \no_user_check == 0
/* coming from usermode? */
testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
jz .Lend_\@
.endif
/* On user-cr3? */
movl %cr3, %eax
testl $PTI_SWITCH_MASK, %eax
jnz .Lend_\@
/* From userspace with kernel cr3 - BUG */
ud2
.Lend_\@:
#endif
.endm
/*
* Switch to kernel cr3 if not already loaded and return current cr3 in
* \scratch_reg
*/
.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
movl %cr3, \scratch_reg
/* Test if we are already on kernel CR3 */
testl $PTI_SWITCH_MASK, \scratch_reg
jz .Lend_\@
andl $(~PTI_SWITCH_MASK), \scratch_reg
movl \scratch_reg, %cr3
/* Return original CR3 in \scratch_reg */
orl $PTI_SWITCH_MASK, \scratch_reg
.Lend_\@:
.endm
#define CS_FROM_ENTRY_STACK (1 << 31)
#define CS_FROM_USER_CR3 (1 << 30)
#define CS_FROM_KERNEL (1 << 29)
#define CS_FROM_ESPFIX (1 << 28)
.macro FIXUP_FRAME
/*
* The high bits of the CS dword (__csh) are used for CS_FROM_*.
* Clear them in case hardware didn't do this for us.
*/
andl $0x0000ffff, 4*4(%esp)
#ifdef CONFIG_VM86
testl $X86_EFLAGS_VM, 5*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@
#endif
testl $USER_SEGMENT_RPL_MASK, 4*4(%esp)
jnz .Lfrom_usermode_no_fixup_\@
orl $CS_FROM_KERNEL, 4*4(%esp)
/*
* When we're here from kernel mode; the (exception) stack looks like:
*
* 6*4(%esp) - <previous context>
* 5*4(%esp) - flags
* 4*4(%esp) - cs
* 3*4(%esp) - ip
* 2*4(%esp) - orig_eax
* 1*4(%esp) - gs / function
* 0*4(%esp) - fs
*
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us
* the original 6 entries as gap:
*
* 14*4(%esp) - <previous context>
* 13*4(%esp) - gap / flags
* 12*4(%esp) - gap / cs
* 11*4(%esp) - gap / ip
* 10*4(%esp) - gap / orig_eax
* 9*4(%esp) - gap / gs / function
* 8*4(%esp) - gap / fs
* 7*4(%esp) - ss
* 6*4(%esp) - sp
* 5*4(%esp) - flags
* 4*4(%esp) - cs
* 3*4(%esp) - ip
* 2*4(%esp) - orig_eax
* 1*4(%esp) - gs / function
* 0*4(%esp) - fs
*/
pushl %ss # ss
pushl %esp # sp (points at ss)
addl $7*4, (%esp) # point sp back at the previous context
pushl 7*4(%esp) # flags
pushl 7*4(%esp) # cs
pushl 7*4(%esp) # ip
pushl 7*4(%esp) # orig_eax
pushl 7*4(%esp) # gs / function
pushl 7*4(%esp) # fs
.Lfrom_usermode_no_fixup_\@:
.endm
.macro IRET_FRAME
/*
* We're called with %ds, %es, %fs, and %gs from the interrupted
* frame, so we shouldn't use them. Also, we may be in ESPFIX
* mode and therefore have a nonzero SS base and an offset ESP,
* so any attempt to access the stack needs to use SS. (except for
* accesses through %esp, which automatically use SS.)
*/
testl $CS_FROM_KERNEL, 1*4(%esp)
jz .Lfinished_frame_\@
/*
* Reconstruct the 3 entry IRET frame right after the (modified)
* regs->sp without lowering %esp in between, such that an NMI in the
* middle doesn't scribble our stack.
*/
pushl %eax
pushl %ecx
movl 5*4(%esp), %eax # (modified) regs->sp
movl 4*4(%esp), %ecx # flags
movl %ecx, %ss:-1*4(%eax)
movl 3*4(%esp), %ecx # cs
andl $0x0000ffff