diff options
| author | Juergen Gross <jgross@suse.com> | 2022-11-09 14:44:18 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2022-11-24 13:56:44 +0100 |
| commit | f1a033cc6b9eb6d80322008422df3c87aa5d47a0 (patch) | |
| tree | 1aba4827c74f4032696e26aa5232761b76d0ae35 /arch/x86/kernel/kvm.c | |
| parent | 5736b1b70170e15d66ec02e500db917ef42ade83 (diff) | |
| download | linux-f1a033cc6b9eb6d80322008422df3c87aa5d47a0.tar.gz linux-f1a033cc6b9eb6d80322008422df3c87aa5d47a0.tar.bz2 linux-f1a033cc6b9eb6d80322008422df3c87aa5d47a0.zip | |
x86/paravirt: Use common macro for creating simple asm paravirt functions
There are some paravirt assembler functions which are sharing a common
pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them.
Note that this macro is including explicit alignment of the generated
functions, leading to __raw_callee_save___kvm_vcpu_is_preempted(),
_paravirt_nop() and paravirt_ret0() to be aligned at 4 byte boundaries
now.
The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as
it is included in paravirt_types.h already.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
Link: https://lkml.kernel.org/r/20221109134418.6516-1-jgross@suse.com
Diffstat (limited to 'arch/x86/kernel/kvm.c')
| -rw-r--r-- | arch/x86/kernel/kvm.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 95fb85bea111..4d053cb2c48a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -798,20 +798,13 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and * restoring to/from the stack. */ -asm( -".pushsection .text;" -".global __raw_callee_save___kvm_vcpu_is_preempted;" -".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" -ASM_FUNC_ALIGN -"__raw_callee_save___kvm_vcpu_is_preempted:" -ASM_ENDBR -"movq __per_cpu_offset(,%rdi,8), %rax;" -"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" -"setne %al;" -ASM_RET -".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" -".popsection"); +#define PV_VCPU_PREEMPTED_ASM \ + "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \ + "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \ + "setne %al\n\t" +DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted, + PV_VCPU_PREEMPTED_ASM, .text); #endif static void __init kvm_guest_init(void) |
