summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b7675a58d663..5fcd401a5897 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -49,6 +49,7 @@
#include <linux/lockdep.h>
#include <linux/kthread.h>
#include <linux/suspend.h>
+#include <linux/rseq.h>
#include <asm/processor.h>
#include <asm/ioctl.h>
@@ -4026,7 +4027,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- WRITE_ONCE(kvm->last_boosted_vcpu, i);
+ WRITE_ONCE(kvm->last_boosted_vcpu, idx);
break;
} else if (yielded < 0 && !--try) {
break;
@@ -4434,10 +4435,10 @@ static long kvm_vcpu_ioctl(struct file *filp,
return r;
/*
- * Some architectures have vcpu ioctls that are asynchronous to vcpu
- * execution; mutex_lock() would break them.
+ * Let arch code handle select vCPU ioctls without holding vcpu->mutex,
+ * e.g. to support ioctls that can run asynchronous to vCPU execution.
*/
- r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+ r = kvm_arch_vcpu_unlocked_ioctl(filp, ioctl, arg);
if (r != -ENOIOCTLCMD)
return r;
@@ -4476,6 +4477,12 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = kvm_arch_vcpu_ioctl_run(vcpu);
vcpu->wants_to_run = false;
+ /*
+ * FIXME: Remove this hack once all KVM architectures
+ * support the generic TIF bits, i.e. a dedicated TIF_RSEQ.
+ */
+ rseq_virt_userspace_exit();
+
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
}
@@ -4928,8 +4935,8 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#ifdef CONFIG_KVM_GUEST_MEMFD
case KVM_CAP_GUEST_MEMFD:
return 1;
- case KVM_CAP_GUEST_MEMFD_MMAP:
- return !kvm || kvm_arch_supports_gmem_mmap(kvm);
+ case KVM_CAP_GUEST_MEMFD_FLAGS:
+ return kvm_gmem_get_supported_flags(kvm);
#endif
default:
break;
@@ -6521,7 +6528,9 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
if (WARN_ON_ONCE(r))
goto err_vfio;
- kvm_gmem_init(module);
+ r = kvm_gmem_init(module);
+ if (r)
+ goto err_gmem;
r = kvm_init_virtualization();
if (r)
@@ -6542,6 +6551,8 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
err_register:
kvm_uninit_virtualization();
err_virt:
+ kvm_gmem_exit();
+err_gmem:
kvm_vfio_ops_exit();
err_vfio:
kvm_async_pf_deinit();
@@ -6573,6 +6584,7 @@ void kvm_exit(void)
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
+ kvm_gmem_exit();
kvm_vfio_ops_exit();
kvm_async_pf_deinit();
kvm_irqfd_exit();