After the vcpu_load/vcpu_put pushdown, the handling of asynchronous VCPU
ioctl is already much clearer in that it is obvious that they bypass
vcpu_load and vcpu_put.

However, it is still not perfect in that the different state of the VCPU
mutex is still hidden in the caller.  Separate those ioctls into a new
function kvm_arch_vcpu_async_ioctl that returns -ENOIOCTLCMD for more
"traditional" synchronous ioctls.

Cc: James Hogan <jho...@kernel.org>
Cc: Paul Mackerras <pau...@ozlabs.org>
Cc: Christian Borntraeger <borntrae...@de.ibm.com>
Reviewed-by: Christoffer Dall <christoffer.d...@linaro.org>
Reviewed-by: Cornelia Huck <coh...@redhat.com>
Suggested-by: Cornelia Huck <coh...@redhat.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/mips/kvm/Kconfig      |  1 +
 arch/mips/kvm/mips.c       | 15 ++++++++++++---
 arch/powerpc/kvm/Kconfig   |  1 +
 arch/powerpc/kvm/powerpc.c | 14 +++++++++++---
 arch/s390/kvm/Kconfig      |  1 +
 arch/s390/kvm/kvm-s390.c   | 16 ++++++++++++----
 include/linux/kvm_host.h   | 11 +++++++++++
 virt/kvm/kvm_main.c        | 12 +++++-------
 8 files changed, 54 insertions(+), 17 deletions(-)

diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b17447ce8873..76b93a9c8c9b 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select KVM_MMIO
        select MMU_NOTIFIER
        select SRCU
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 9200b3def440..2549fdd27ee1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -903,12 +903,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu 
*vcpu,
        return r;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
-                        unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
+                              unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       long r;
 
        if (ioctl == KVM_INTERRUPT) {
                struct kvm_mips_interrupt irq;
@@ -921,6 +920,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int 
ioctl,
                return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
        }
 
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+                        unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
        vcpu_load(vcpu);
 
        switch (ioctl) {
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b12b8eb39c29..f884a0529dfe 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_EVENTFD
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select SRCU
        select KVM_VFIO
        select IRQ_BYPASS_MANAGER
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index ba8134a989c1..66a310779de5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1607,12 +1607,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu 
*vcpu,
        return -EINVAL;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp,
-                         unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       long r;
 
        if (ioctl == KVM_INTERRUPT) {
                struct kvm_interrupt irq;
@@ -1620,6 +1619,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        return -EFAULT;
                return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
        }
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                         unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
 
        vcpu_load(vcpu);
 
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 9a4594e0a1ff..a3dbd459cce9 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_CPU_RELAX_INTERCEPT
+       select HAVE_KVM_VCPU_ASYNC_IOCTL
        select HAVE_KVM_EVENTFD
        select KVM_ASYNC_PF
        select KVM_ASYNC_PF_SYNC
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9700d71cb691..40f0ae5a883f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3725,13 +3725,11 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
        return r;
 }
 
-long kvm_arch_vcpu_ioctl(struct file *filp,
-                        unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       int idx;
-       long r;
 
        switch (ioctl) {
        case KVM_S390_IRQ: {
@@ -3752,6 +3750,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                return kvm_s390_inject_vcpu(vcpu, &s390irq);
        }
        }
+       return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       int idx;
+       long r;
 
        vcpu_load(vcpu);
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 09de0ff3d677..23a680b4c932 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1260,4 +1260,15 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu 
*vcpu)
 }
 #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
 
+#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                              unsigned int ioctl, unsigned long arg);
+#else
+static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+                                            unsigned int ioctl, unsigned long 
arg)
+{
+       return -ENOIOCTLCMD;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
 #endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 19c184fa1839..b4414842b023 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2544,15 +2544,13 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
                return -EINVAL;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
-        * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
-        * so vcpu_load() would break it.
+        * Some architectures have vcpu ioctls that are asynchronous to vcpu
+        * execution; mutex_lock() would break them.
         */
-       if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == 
KVM_INTERRUPT)
-               return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
-#endif
-
+       r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+       if (r != -ENOIOCTLCMD)
+               return r;
 
        if (mutex_lock_killable(&vcpu->mutex))
                return -EINTR;
-- 
1.8.3.1

Reply via email to