// SPDX-License-Identifier: GPL-2.0
#include "mmu.h"
#include "mmu_internal.h"
#include "mmutrace.h"
#include "tdp_iter.h"
#include "tdp_mmu.h"
#include "spte.h"
#ifdef CONFIG_X86_64
static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
#endif
static bool is_tdp_mmu_enabled(void)
{
#ifdef CONFIG_X86_64
return tdp_enabled && READ_ONCE(tdp_mmu_enabled);
#else
return false;
#endif /* CONFIG_X86_64 */
}
/* Initializes the TDP MMU for the VM, if enabled. */
void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
if (!is_tdp_mmu_enabled())
return;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
}
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
{
if (!kvm->arch.tdp_mmu_enabled)
return;
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
}
static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
if (kvm_mmu_put_root(kvm, root))
kvm_tdp_mmu_free_root(kvm, root);
}
static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
struct kvm_mmu_page *root)
{
lockdep_assert_held(&kvm->mmu_lock);
if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
return false;
kvm_mmu_get_root(kvm, root);
return true;
}
static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
struct kvm_mmu_page *root)
{
struct kvm_mmu_page *next_root;
next_root = list_next_entry(root, link);
tdp_mmu_put_root(kvm, root);
return next_root;
}
/*
* Note: this iterator gets and puts references to the roots it iterates over.
* This makes it safe to release the MMU lock and yield within the loop, but
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
*/
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
typeof(*_root), link); \
tdp_mmu_next_root_valid(_kvm, _root); \
_root = tdp_mmu_next_root(_kvm, _root))
#define for_each_tdp_mmu_root(_kvm, _root) \
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
{
struct kvm_mmu_page *sp;
if (!kvm->arch.tdp_mmu_enabled)
return false;
if (WARN_ON(!VALID_PAGE(hpa)))
return false;
sp = to_shadow_page(hpa);
if (WARN_ON(!sp))
return false;
return sp->tdp_mmu_page && sp->root_count;
}
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush);
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
{
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
lockdep_assert_held(&kvm->mmu_lock);
WARN_ON(root->root_count);
WARN_ON(!root->tdp_mmu_page);
list_del(&root->link);
zap_gfn_range(kvm, root, 0, max_gfn, false, false);
free_page((unsigned long)root->spt);
kmem_cache_free(mmu_page_header_cache, root);
}
static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
int level)
{
union kvm_mmu_page_role role;
role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = true;
role.gpte_is_8_bytes = true;
role.access = ACC_ALL;
return role;
}
static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
int level)
{
struct kvm_mmu_page *sp;