diff options
author | Sean Christopherson <seanjc@google.com> | 2024-10-09 08:04:52 -0700 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2024-12-16 14:37:30 -0800 |
commit | 6e2b2358b3ef870d24109083d2e314d04fc72de4 (patch) | |
tree | 98c689e4e3c33fbc70e36db603e23c487dd63408 /virt | |
parent | 0664dc74e9d004c36b4400081811df795169809a (diff) | |
download | linux-6e2b2358b3ef870d24109083d2e314d04fc72de4.tar.gz linux-6e2b2358b3ef870d24109083d2e314d04fc72de4.tar.bz2 linux-6e2b2358b3ef870d24109083d2e314d04fc72de4.zip |
KVM: Grab vcpu->mutex across installing the vCPU's fd and bumping online_vcpus
During vCPU creation, acquire vcpu->mutex prior to exposing the vCPU to
userspace, and hold the mutex until online_vcpus is bumped, i.e. until the
vCPU is fully online from KVM's perspective.
To ensure asynchronous vCPU ioctls also wait for the vCPU to come online,
explicitly check online_vcpus at the start of kvm_vcpu_ioctl(), and take
the vCPU's mutex to wait if necessary (having to wait for any ioctl should
be exceedingly rare, i.e. not worth optimizing).
Reported-by: Will Deacon <will@kernel.org>
Reported-by: Michal Luczaj <mhal@rbox.co>
Link: https://lore.kernel.org/all/20240730155646.1687-1-will@kernel.org
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20241009150455.1057573-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 47 |
1 files changed, 46 insertions, 1 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index de2c11dae231..fb117af473e5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4132,7 +4132,14 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) if (r) goto unlock_vcpu_destroy; - /* Now it's all set up, let userspace reach it */ + /* + * Now it's all set up, let userspace reach it. Grab the vCPU's mutex + * so that userspace can't invoke vCPU ioctl()s until the vCPU is fully + * visible (per online_vcpus), e.g. so that KVM doesn't get tricked + * into a NULL-pointer dereference because KVM thinks the _current_ + * vCPU doesn't exist. + */ + mutex_lock(&vcpu->mutex); kvm_get_kvm(kvm); r = create_vcpu_fd(vcpu); if (r < 0) @@ -4149,6 +4156,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) */ smp_wmb(); atomic_inc(&kvm->online_vcpus); + mutex_unlock(&vcpu->mutex); mutex_unlock(&kvm->lock); kvm_arch_vcpu_postcreate(vcpu); @@ -4156,6 +4164,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id) return r; kvm_put_xa_release: + mutex_unlock(&vcpu->mutex); kvm_put_kvm_no_destroy(kvm); xa_release(&kvm->vcpu_array, vcpu->vcpu_idx); unlock_vcpu_destroy: @@ -4282,6 +4291,33 @@ static int kvm_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, } #endif +static int kvm_wait_for_vcpu_online(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + + /* + * In practice, this happy path will always be taken, as a well-behaved + * VMM will never invoke a vCPU ioctl() before KVM_CREATE_VCPU returns. + */ + if (likely(vcpu->vcpu_idx < atomic_read(&kvm->online_vcpus))) + return 0; + + /* + * Acquire and release the vCPU's mutex to wait for vCPU creation to + * complete (kvm_vm_ioctl_create_vcpu() holds the mutex until the vCPU + * is fully online). + */ + if (mutex_lock_killable(&vcpu->mutex)) + return -EINTR; + + mutex_unlock(&vcpu->mutex); + + if (WARN_ON_ONCE(!kvm_get_vcpu(kvm, vcpu->vcpu_idx))) + return -EIO; + + return 0; +} + static long kvm_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -4298,6 +4334,15 @@ static long kvm_vcpu_ioctl(struct file *filp, return -EINVAL; /* + * Wait for the vCPU to be online before handling the ioctl(), as KVM + * assumes the vCPU is reachable via vcpu_array, i.e. may dereference + * a NULL pointer if userspace invokes an ioctl() before KVM is ready. + */ + r = kvm_wait_for_vcpu_online(vcpu); + if (r) + return r; + + /* * Some architectures have vcpu ioctls that are asynchronous to vcpu * execution; mutex_lock() would break them. */ |