diff options
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r-- | arch/x86/kvm/x86.c | 324 |
1 files changed, 165 insertions, 159 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d2c36f05ee92..af6c8cf6a37a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { - if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) + if (kvm_x86_call(get_cpl)(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; @@ -909,7 +909,7 @@ static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return false; - return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); + return kvm_x86_call(is_valid_cr0)(vcpu, cr0); } void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) @@ -968,7 +968,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!is_pae(vcpu)) return 1; - static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); + kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } @@ -982,7 +982,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) return 1; - static_call(kvm_x86_set_cr0)(vcpu, cr0); + kvm_x86_call(set_cr0)(vcpu, cr0); kvm_post_set_cr0(vcpu, old_cr0, cr0); @@ -1100,7 +1100,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) { /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */ - if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || + if (kvm_x86_call(get_cpl)(vcpu) != 0 || __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { kvm_inject_gp(vcpu, 0); return 1; @@ -1125,7 +1125,7 @@ EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { return __kvm_is_valid_cr4(vcpu, cr4) && - static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); + kvm_x86_call(is_valid_cr4)(vcpu, cr4); } void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) @@ -1193,7 +1193,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; } - static_call(kvm_x86_set_cr4)(vcpu, cr4); + kvm_x86_call(set_cr4)(vcpu, cr4); kvm_post_set_cr4(vcpu, old_cr4, cr4); @@ -1332,7 +1332,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; - static_call(kvm_x86_set_dr7)(vcpu, dr7); + kvm_x86_call(set_dr7)(vcpu, dr7); vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; @@ -1675,7 +1675,7 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr) rdmsrl_safe(msr->index, &msr->data); break; default: - return static_call(kvm_x86_get_msr_feature)(msr); + return kvm_x86_call(get_msr_feature)(msr); } return 0; } @@ -1749,7 +1749,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; - r = static_call(kvm_x86_set_efer)(vcpu, efer); + r = kvm_x86_call(set_efer)(vcpu, efer); if (r) { WARN_ON(r > 0); return r; @@ -1879,7 +1879,7 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, msr.index = index; msr.host_initiated = host_initiated; - return static_call(kvm_x86_set_msr)(vcpu, &msr); + return kvm_x86_call(set_msr)(vcpu, &msr); } static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, @@ -1921,7 +1921,7 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, msr.index = index; msr.host_initiated = host_initiated; - ret = static_call(kvm_x86_get_msr)(vcpu, &msr); + ret = kvm_x86_call(get_msr)(vcpu, &msr); if (!ret) *data = msr.data; return ret; @@ -1989,7 +1989,7 @@ static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) static int complete_fast_msr_access(struct kvm_vcpu *vcpu) { - return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); + return kvm_x86_call(complete_emulated_msr)(vcpu, vcpu->run->msr.error); } static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) @@ -2053,7 +2053,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_read_ex(ecx); } - return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); + return kvm_x86_call(complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); @@ -2078,7 +2078,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) trace_kvm_msr_write_ex(ecx, data); } - return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); + return kvm_x86_call(complete_emulated_msr)(vcpu, r); } EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); @@ -2603,12 +2603,12 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) if (is_guest_mode(vcpu)) vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( l1_offset, - static_call(kvm_x86_get_l2_tsc_offset)(vcpu), - static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); + kvm_x86_call(get_l2_tsc_offset)(vcpu), + kvm_x86_call(get_l2_tsc_multiplier)(vcpu)); else vcpu->arch.tsc_offset = l1_offset; - static_call(kvm_x86_write_tsc_offset)(vcpu); + kvm_x86_call(write_tsc_offset)(vcpu); } static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) @@ -2619,12 +2619,12 @@ static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multipli if (is_guest_mode(vcpu)) vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( l1_multiplier, - static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); + kvm_x86_call(get_l2_tsc_multiplier)(vcpu)); else vcpu->arch.tsc_scaling_ratio = l1_multiplier; if (kvm_caps.has_tsc_control) - static_call(kvm_x86_write_tsc_multiplier)(vcpu); + kvm_x86_call(write_tsc_multiplier)(vcpu); } static inline bool kvm_check_tsc_unstable(void) @@ -3597,7 +3597,7 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu) static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - static_call(kvm_x86_flush_tlb_all)(vcpu); + kvm_x86_call(flush_tlb_all)(vcpu); /* Flushing all ASIDs flushes the current ASID... */ kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); @@ -3618,7 +3618,7 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) kvm_mmu_sync_prev_roots(vcpu); } - static_call(kvm_x86_flush_tlb_guest)(vcpu); + kvm_x86_call(flush_tlb_guest)(vcpu); /* * Flushing all "guest" TLB is always a superset of Hyper-V's fine @@ -3631,7 +3631,7 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; - static_call(kvm_x86_flush_tlb_current)(vcpu); + kvm_x86_call(flush_tlb_current)(vcpu); } /* @@ -4747,7 +4747,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * fringe case that is not enabled except via specific settings * of the module parameters. */ - r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); + r = kvm_x86_call(has_emulated_msr)(kvm, MSR_IA32_SMBASE); break; case KVM_CAP_NR_VCPUS: r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS); @@ -4827,7 +4827,7 @@ static int __kvm_x86_dev_get_attr(struct kvm_device_attr *attr, u64 *val) { if (attr->group) { if (kvm_x86_ops.dev_get_attr) - return static_call(kvm_x86_dev_get_attr)(attr->group, attr->attr, val); + return kvm_x86_call(dev_get_attr)(attr->group, attr->attr, val); return -ENXIO; } @@ -5000,14 +5000,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { - if (static_call(kvm_x86_has_wbinvd_exit)()) + if (kvm_x86_call(has_wbinvd_exit)()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } - static_call(kvm_x86_vcpu_load)(vcpu, cpu); + kvm_x86_call(vcpu_load)(vcpu, cpu); /* Save host pkru register if supported */ vcpu->arch.host_pkru = read_pkru(); @@ -5115,14 +5115,14 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) srcu_read_unlock(&vcpu->kvm->srcu, idx); } - static_call(kvm_x86_vcpu_put)(vcpu); + kvm_x86_call(vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + kvm_x86_call(sync_pir_to_irr)(vcpu); return kvm_apic_get_state(vcpu, s); } @@ -5239,7 +5239,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, kvm_apic_after_set_mcg_cap(vcpu); - static_call(kvm_x86_setup_mce)(vcpu); + kvm_x86_call(setup_mce)(vcpu); out: return r; } @@ -5399,11 +5399,11 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->interrupt.injected = vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; - events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); + events->interrupt.shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); - events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); + events->nmi.masked = kvm_x86_call(get_nmi_mask)(vcpu); /* events->sipi_vector is never valid when reporting to user space */ @@ -5485,8 +5485,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) - static_call(kvm_x86_set_interrupt_shadow)(vcpu, - events->interrupt.shadow); + kvm_x86_call(set_interrupt_shadow)(vcpu, + events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { @@ -5495,7 +5495,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, if (events->nmi.pending) kvm_make_request(KVM_REQ_NMI, vcpu); } - static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); + kvm_x86_call(set_nmi_mask)(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && lapic_in_kernel(vcpu)) @@ -5843,7 +5843,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, if (!kvm_x86_ops.enable_l2_tlb_flush) return -ENOTTY; - return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); + return kvm_x86_call(enable_l2_tlb_flush)(vcpu); case KVM_CAP_HYPERV_ENFORCE_CPUID: return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); @@ -6332,14 +6332,14 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; - ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); + ret = kvm_x86_call(set_tss_addr)(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { - return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); + return kvm_x86_call(set_identity_map_addr)(kvm, ident_addr); } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, @@ -6649,14 +6649,14 @@ split_irqchip_unlock: if (!kvm_x86_ops.vm_copy_enc_context_from) break; - r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]); + r = kvm_x86_call(vm_copy_enc_context_from)(kvm, cap->args[0]); break; case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM: r = -EINVAL; if (!kvm_x86_ops.vm_move_enc_context_from) break; - r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]); + r = kvm_x86_call(vm_move_enc_context_from)(kvm, cap->args[0]); break; case KVM_CAP_EXIT_HYPERCALL: if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { @@ -7317,7 +7317,7 @@ set_pit2_out: if (!kvm_x86_ops.mem_enc_ioctl) goto out; - r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp); + r = kvm_x86_call(mem_enc_ioctl)(kvm, argp); break; } case KVM_MEMORY_ENCRYPT_REG_REGION: { @@ -7331,7 +7331,7 @@ set_pit2_out: if (!kvm_x86_ops.mem_enc_register_region) goto out; - r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion); + r = kvm_x86_call(mem_enc_register_region)(kvm, ®ion); break; } case KVM_MEMORY_ENCRYPT_UNREG_REGION: { @@ -7345,7 +7345,7 @@ set_pit2_out: if (!kvm_x86_ops.mem_enc_unregister_region) goto out; - r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion); + r = kvm_x86_call(mem_enc_unregister_region)(kvm, ®ion); break; } #ifdef CONFIG_KVM_HYPERV @@ -7499,7 +7499,8 @@ static void kvm_init_msr_lists(void) } for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { - if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) + if (!kvm_x86_call(has_emulated_msr)(NULL, + emulated_msrs_all[i])) continue; emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; @@ -7558,13 +7559,13 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - static_call(kvm_x86_set_segment)(vcpu, var, seg); + kvm_x86_call(set_segment)(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { - static_call(kvm_x86_get_segment)(vcpu, var, seg); + kvm_x86_call(get_segment)(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, @@ -7587,7 +7588,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); @@ -7597,7 +7598,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } @@ -7650,7 +7651,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; unsigned offset; int ret; @@ -7675,7 +7676,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { - u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; + u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; /* * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED @@ -7698,7 +7699,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, if (system) access |= PFERR_IMPLICIT_ACCESS; - else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) + else if (kvm_x86_call(get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); @@ -7743,7 +7744,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v if (system) access |= PFERR_IMPLICIT_ACCESS; - else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) + else if (kvm_x86_call(get_cpl)(vcpu) == 3) access |= PFERR_USER_MASK; return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, @@ -7764,8 +7765,8 @@ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) { - return static_call(kvm_x86_check_emulate_instruction)(vcpu, emul_type, - insn, insn_len); + return kvm_x86_call(check_emulate_instruction)(vcpu, emul_type, + insn, insn_len); } int handle_ud(struct kvm_vcpu *vcpu) @@ -7815,8 +7816,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, bool write) { struct kvm_mmu *mmu = vcpu->arch.walk_mmu; - u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) - | (write ? PFERR_WRITE_MASK : 0); + u64 access = ((kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) + | (write ? PFERR_WRITE_MASK : 0); /* * currently PKRU is only applied to ept enabled guest so @@ -8242,7 +8243,7 @@ static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { - return static_call(kvm_x86_get_segment_base)(vcpu, seg); + return kvm_x86_call(get_segment_base)(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) @@ -8255,7 +8256,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; - if (static_call(kvm_x86_has_wbinvd_exit)()) { + if (kvm_x86_call(has_wbinvd_exit)()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); @@ -8359,27 +8360,27 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { - return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); + return kvm_x86_call(get_cpl)(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); + kvm_x86_call(get_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); + kvm_x86_call(get_idt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); + kvm_x86_call(set_gdt)(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { - static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); + kvm_x86_call(set_idt)(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( @@ -8526,8 +8527,8 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { - return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, - &ctxt->exception); + return kvm_x86_call(check_intercept)(emul_to_vcpu(ctxt), info, stage, + &ctxt->exception); } static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, @@ -8569,7 +8570,7 @@ static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulon static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) { - static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); + kvm_x86_call(set_nmi_mask)(emul_to_vcpu(ctxt), masked); } static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt) @@ -8614,7 +8615,8 @@ static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt, if (!kvm_x86_ops.get_untagged_addr) return addr; - return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags); + return kvm_x86_call(get_untagged_addr)(emul_to_vcpu(ctxt), + addr, flags); } static const struct x86_emulate_ops emulate_ops = { @@ -8667,7 +8669,7 @@ static const struct x86_emulate_ops emulate_ops = { static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { - u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); + u32 int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or @@ -8678,7 +8680,7 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) if (int_shadow & mask) mask = 0; if (unlikely(int_shadow || mask)) { - static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); + kvm_x86_call(set_interrupt_shadow)(vcpu, mask); if (!mask) kvm_make_request(KVM_REQ_EVENT, vcpu); } @@ -8719,7 +8721,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; int cs_db, cs_l; - static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); + kvm_x86_call(get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); ctxt->gpa_available = false; ctxt->eflags = kvm_get_rflags(vcpu); @@ -8775,9 +8777,8 @@ static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, */ memset(&info, 0, sizeof(info)); - static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], - &info[2], (u32 *)&info[3], - (u32 *)&info[4]); + kvm_x86_call(get_exit_info)(vcpu, (u32 *)&info[0], &info[1], &info[2], + (u32 *)&info[3], (u32 *)&info[4]); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION; @@ -8854,7 +8855,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) kvm_queue_exception(vcpu, UD_VECTOR); - if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { + if (!is_guest_mode(vcpu) && kvm_x86_call(get_cpl)(vcpu) == 0) { prepare_emulation_ctxt_failure_exit(vcpu); return 0; } @@ -9012,10 +9013,10 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) { - unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); + unsigned long rflags = kvm_x86_call(get_rflags)(vcpu); int r; - r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); + r = kvm_x86_call(skip_emulated_instruction)(vcpu); if (unlikely(!r)) return 0; @@ -9047,7 +9048,7 @@ static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) if (!guest_cpuid_is_intel_compatible(vcpu)) return false; - return static_call(kvm_x86_get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS; + return kvm_x86_call(get_interrupt_shadow)(vcpu) & KVM_X86_SHADOW_INT_MOV_SS; } static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, @@ -9319,7 +9320,7 @@ restart: writeback: if (writeback) { - unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); + unsigned long rflags = kvm_x86_call(get_rflags)(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; @@ -9336,7 +9337,7 @@ writeback: kvm_rip_write(vcpu, ctxt->eip); if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) r = kvm_vcpu_do_singlestep(vcpu); - static_call(kvm_x86_update_emulated_instruction)(vcpu); + kvm_x86_call(update_emulated_instruction)(vcpu); __kvm_set_rflags(vcpu, ctxt->eflags); } @@ -9735,7 +9736,7 @@ static int kvm_x86_check_processor_compatibility(void) __cr4_reserved_bits(cpu_has, &boot_cpu_data)) return -EIO; - return static_call(kvm_x86_check_processor_compatibility)(); + return kvm_x86_call(check_processor_compatibility)(); } static void kvm_x86_check_cpu_compat(void *ret) @@ -9878,7 +9879,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) out_unwind_ops: kvm_x86_ops.hardware_enable = NULL; - static_call(kvm_x86_hardware_unsetup)(); + kvm_x86_call(hardware_unsetup)(); out_mmu_exit: kvm_mmu_vendor_module_exit(); out_free_percpu: @@ -9909,7 +9910,7 @@ void kvm_x86_vendor_exit(void) irq_work_sync(&pvclock_irq_work); cancel_work_sync(&pvclock_gtod_work); #endif - static_call(kvm_x86_hardware_unsetup)(); + kvm_x86_call(hardware_unsetup)(); kvm_mmu_vendor_module_exit(); free_percpu(user_return_msrs); kmem_cache_destroy(x86_emulator_cache); @@ -10035,7 +10036,8 @@ EXPORT_SYMBOL_GPL(kvm_apicv_activated); bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) { ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); - ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); + ulong vcpu_reasons = + kvm_x86_call(vcpu_get_apicv_inhibit_reasons)(vcpu); return (vm_reasons | vcpu_reasons) == 0; } @@ -10221,7 +10223,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a2 = kvm_rdx_read(vcpu); a3 = kvm_rsi_read(vcpu); op_64_bit = is_64_bit_hypercall(vcpu); - cpl = static_call(kvm_x86_get_cpl)(vcpu); + cpl = kvm_x86_call(get_cpl)(vcpu); ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl); if (nr == KVM_HC_MAP_GPA_RANGE && !ret) @@ -10253,7 +10255,7 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) return X86EMUL_PROPAGATE_FAULT; } - static_call(kvm_x86_patch_hypercall)(vcpu, instruction); + kvm_x86_call(patch_hypercall)(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, &ctxt->exception); @@ -10270,7 +10272,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; - kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); + kvm_run->if_flag = kvm_x86_call(get_if_flag)(vcpu); kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); @@ -10307,7 +10309,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) tpr = kvm_lapic_get_cr8(vcpu); - static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); + kvm_x86_call(update_cr8_intercept)(vcpu, tpr, max_irr); } @@ -10337,7 +10339,7 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) vcpu->arch.exception.error_code, vcpu->arch.exception.injected); - static_call(kvm_x86_inject_exception)(vcpu); + kvm_x86_call(inject_exception)(vcpu); } /* @@ -10423,9 +10425,9 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, else if (kvm_is_exception_pending(vcpu)) ; /* see above */ else if (vcpu->arch.nmi_injected) - static_call(kvm_x86_inject_nmi)(vcpu); + kvm_x86_call(inject_nmi)(vcpu); else if (vcpu->arch.interrupt.injected) - static_call(kvm_x86_inject_irq)(vcpu, true); + kvm_x86_call(inject_irq)(vcpu, true); /* * Exceptions that morph to VM-Exits are handled above, and pending @@ -10510,7 +10512,8 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, */ #ifdef CONFIG_KVM_SMM if (vcpu->arch.smi_pending) { - r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; + r = can_inject ? kvm_x86_call(smi_allowed)(vcpu, true) : + -EBUSY; if (r < 0) goto out; if (r) { @@ -10519,27 +10522,29 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, enter_smm(vcpu); can_inject = false; } else - static_call(kvm_x86_enable_smi_window)(vcpu); + kvm_x86_call(enable_smi_window)(vcpu); } #endif if (vcpu->arch.nmi_pending) { - r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; + r = can_inject ? kvm_x86_call(nmi_allowed)(vcpu, true) : + -EBUSY; if (r < 0) goto out; if (r) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; - static_call(kvm_x86_inject_nmi)(vcpu); + kvm_x86_call(inject_nmi)(vcpu); can_inject = false; - WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); + WARN_ON(kvm_x86_call(nmi_allowed)(vcpu, true) < 0); } if (vcpu->arch.nmi_pending) - static_call(kvm_x86_enable_nmi_window)(vcpu); + kvm_x86_call(enable_nmi_window)(vcpu); } if (kvm_cpu_has_injectable_intr(vcpu)) { - r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; + r = can_inject ? kvm_x86_call(interrupt_allowed)(vcpu, true) : + -EBUSY; if (r < 0) goto out; if (r) { @@ -10547,12 +10552,12 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, if (!WARN_ON_ONCE(irq == -1)) { kvm_queue_interrupt(vcpu, irq, false); - static_call(kvm_x86_inject_irq)(vcpu, false); - WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); + kvm_x86_call(inject_irq)(vcpu, false); + WARN_ON(kvm_x86_call(interrupt_allowed)(vcpu, true) < 0); } } if (kvm_cpu_has_injectable_intr(vcpu)) - static_call(kvm_x86_enable_irq_window)(vcpu); + kvm_x86_call(enable_irq_window)(vcpu); } if (is_guest_mode(vcpu) && @@ -10598,7 +10603,7 @@ static void process_nmi(struct kvm_vcpu *vcpu) * blocks NMIs). KVM will immediately inject one of the two NMIs, and * will request an NMI window to handle the second NMI. */ - if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) + if (kvm_x86_call(get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) limit = 1; else limit = 2; @@ -10607,14 +10612,14 @@ static void process_nmi(struct kvm_vcpu *vcpu) * Adjust the limit to account for pending virtual NMIs, which aren't * tracked in vcpu->arch.nmi_pending. */ - if (static_call(kvm_x86_is_vnmi_pending)(vcpu)) + if (kvm_x86_call(is_vnmi_pending)(vcpu)) limit--; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); if (vcpu->arch.nmi_pending && - (static_call(kvm_x86_set_vnmi_pending)(vcpu))) + (kvm_x86_call(set_vnmi_pending)(vcpu))) vcpu->arch.nmi_pending--; if (vcpu->arch.nmi_pending) @@ -10625,7 +10630,7 @@ static void process_nmi(struct kvm_vcpu *vcpu) int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu) { return vcpu->arch.nmi_pending + - static_call(kvm_x86_is_vnmi_pending)(vcpu); + kvm_x86_call(is_vnmi_pending)(vcpu); } void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, @@ -10659,7 +10664,7 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) apic->apicv_active = activate; kvm_apic_update_apicv(vcpu); - static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); + kvm_x86_call(refresh_apicv_exec_ctrl)(vcpu); /* * When APICv gets disabled, we may still have injected interrupts @@ -10759,7 +10764,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + kvm_x86_call(sync_pir_to_irr)(vcpu); if (irqchip_split(vcpu->kvm)) kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); @@ -10784,17 +10789,17 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, to_hv_synic(vcpu)->vec_bitmap, 256); - static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); + kvm_x86_call(load_eoi_exitmap)(vcpu, eoi_exit_bitmap); return; } #endif - static_call(kvm_x86_load_eoi_exitmap)( + kvm_x86_call(load_eoi_exitmap)( vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); } void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { - static_call(kvm_x86_guest_memory_reclaimed)(kvm); + kvm_x86_call(guest_memory_reclaimed)(kvm); } static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) @@ -10802,7 +10807,7 @@ static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) if (!lapic_in_kernel(vcpu)) return; - static_call(kvm_x86_set_apic_access_page_addr)(vcpu); + kvm_x86_call(set_apic_access_page_addr)(vcpu); } /* @@ -10966,10 +10971,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) kvm_check_async_pf_completion(vcpu); if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) - static_call(kvm_x86_msr_filter_changed)(vcpu); + kvm_x86_call(msr_filter_changed)(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) - static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); + kvm_x86_call(update_cpu_dirty_logging)(vcpu); if (kvm_check_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu)) { kvm_vcpu_reset(vcpu, true); @@ -10999,7 +11004,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } if (req_int_win) - static_call(kvm_x86_enable_irq_window)(vcpu); + kvm_x86_call(enable_irq_window)(vcpu); if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); @@ -11014,7 +11019,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) preempt_disable(); - static_call(kvm_x86_prepare_switch_to_guest)(vcpu); + kvm_x86_call(prepare_switch_to_guest)(vcpu); /* * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt @@ -11050,7 +11055,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * i.e. they can post interrupts even if APICv is temporarily disabled. */ if (kvm_lapic_enabled(vcpu)) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + kvm_x86_call(sync_pir_to_irr)(vcpu); if (kvm_vcpu_exit_request(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; @@ -11094,12 +11099,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); - exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu, req_immediate_exit); + exit_fastpath = kvm_x86_call(vcpu_run)(vcpu, + req_immediate_exit); if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST)) break; if (kvm_lapic_enabled(vcpu)) - static_call(kvm_x86_sync_pir_to_irr)(vcpu); + kvm_x86_call(sync_pir_to_irr)(vcpu); if (unlikely(kvm_vcpu_exit_request(vcpu))) { exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; @@ -11118,7 +11124,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); - static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); + kvm_x86_call(sync_dirty_debug_regs)(vcpu); kvm_update_dr0123(vcpu); kvm_update_dr7(vcpu); } @@ -11147,7 +11153,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.xfd_no_write_intercept) fpu_sync_guest_vmexit_xfd_state(); - static_call(kvm_x86_handle_exit_irqoff)(vcpu); + kvm_x86_call(handle_exit_irqoff)(vcpu); if (vcpu->arch.guest_fpu.xfd_err) wrmsrl(MSR_IA32_XFD_ERR, 0); @@ -11199,13 +11205,13 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); - r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); + r = kvm_x86_call(handle_exit)(vcpu, exit_fastpath); return r; cancel_injection: if (req_immediate_exit) kvm_make_request(KVM_REQ_EVENT, vcpu); - static_call(kvm_x86_cancel_injection)(vcpu); + kvm_x86_call(cancel_injection)(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: @@ -11527,7 +11533,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) goto out; } - r = static_call(kvm_x86_vcpu_pre_run)(vcpu); + r = kvm_x86_call(vcpu_pre_run)(vcpu); if (r <= 0) goto out; @@ -11655,10 +11661,10 @@ static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); - static_call(kvm_x86_get_idt)(vcpu, &dt); + kvm_x86_call(get_idt)(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; - static_call(kvm_x86_get_gdt)(vcpu, &dt); + kvm_x86_call(get_gdt)(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; @@ -11864,27 +11870,27 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, dt.size = sregs->idt.limit; dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); + kvm_x86_call(set_idt)(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + kvm_x86_call(set_gdt)(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3); kvm_set_cr8(vcpu, sregs->cr8); *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; |