In order to differenciate between architectures that require no extra
synchronisation when accessing the dirty ring and those who do,
add a new capability (KVM_CAP_DIRTY_LOG_RING_ORDERED) that identify
the latter sort. TSO architectures can obviously advertise both, while
relaxed architectures most only advertise the ORDERED version.

Suggested-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 include/linux/kvm_dirty_ring.h |  6 +++---
 include/uapi/linux/kvm.h       |  1 +
 virt/kvm/Kconfig               | 14 ++++++++++++++
 virt/kvm/Makefile.kvm          |  2 +-
 virt/kvm/kvm_main.c            | 11 +++++++++--
 5 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 906f899813dc..7a0c90ae9a3f 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -27,7 +27,7 @@ struct kvm_dirty_ring {
        int index;
 };
 
-#ifndef CONFIG_HAVE_KVM_DIRTY_RING
+#ifndef CONFIG_HAVE_KVM_DIRTY_LOG
 /*
  * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
  * not be included as well, so define these nop functions for the arch.
@@ -69,7 +69,7 @@ static inline bool kvm_dirty_ring_soft_full(struct 
kvm_dirty_ring *ring)
        return true;
 }
 
-#else /* CONFIG_HAVE_KVM_DIRTY_RING */
+#else /* CONFIG_HAVE_KVM_DIRTY_LOG */
 
 u32 kvm_dirty_ring_get_rsvd_entries(void);
 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
@@ -92,6 +92,6 @@ struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring 
*ring, u32 offset);
 void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
 bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
 
-#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
+#endif /* CONFIG_HAVE_KVM_DIRTY_LOG */
 
 #endif /* KVM_DIRTY_RING_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index eed0315a77a6..c1c9c0c8be5c 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1177,6 +1177,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
 #define KVM_CAP_S390_ZPCI_OP 221
 #define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ORDERED 223
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index a8c5c9f06b3c..1023426bf7dd 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -16,8 +16,22 @@ config HAVE_KVM_IRQFD
 config HAVE_KVM_IRQ_ROUTING
        bool
 
+config HAVE_KVM_DIRTY_LOG
+       bool
+
+# Only strongly ordered architectures can select this, as it doesn't
+# put any constraint on userspace ordering. They also can select the
+# _ORDERED version.
 config HAVE_KVM_DIRTY_RING
        bool
+       select HAVE_KVM_DIRTY_LOG
+       depends on X86
+
+# Weakly ordered architectures can only select this, advertising
+# to userspace the additional ordering requirements.
+config HAVE_KVM_DIRTY_RING_ORDERED
+       bool
+       select HAVE_KVM_DIRTY_LOG
 
 config HAVE_KVM_EVENTFD
        bool
diff --git a/virt/kvm/Makefile.kvm b/virt/kvm/Makefile.kvm
index 2c27d5d0c367..2bc6d0bb5e5c 100644
--- a/virt/kvm/Makefile.kvm
+++ b/virt/kvm/Makefile.kvm
@@ -10,5 +10,5 @@ kvm-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
 kvm-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
 kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
 kvm-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
-kvm-$(CONFIG_HAVE_KVM_DIRTY_RING) += $(KVM)/dirty_ring.o
+kvm-$(CONFIG_HAVE_KVM_DIRTY_LOG) += $(KVM)/dirty_ring.o
 kvm-$(CONFIG_HAVE_KVM_PFNCACHE) += $(KVM)/pfncache.o
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 584a5bab3af3..cb1c103e2018 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3304,7 +3304,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
 {
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
 
-#ifdef CONFIG_HAVE_KVM_DIRTY_RING
+#ifdef CONFIG_HAVE_KVM_DIRTY_LOG
        if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
                return;
 #endif
@@ -3758,7 +3758,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
 
 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
 {
-#ifdef CONFIG_HAVE_KVM_DIRTY_RING
+#ifdef CONFIG_HAVE_KVM_DIRTY_LOG
        return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
            (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
             kvm->dirty_ring_size / PAGE_SIZE);
@@ -4479,6 +4479,12 @@ static long kvm_vm_ioctl_check_extension_generic(struct 
kvm *kvm, long arg)
                return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct 
kvm_dirty_gfn);
 #else
                return 0;
+#endif
+       case KVM_CAP_DIRTY_LOG_RING_ORDERED:
+#ifdef CONFIG_HAVE_KVM_DIRTY_RING_ORDERED
+               return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct 
kvm_dirty_gfn);
+#else
+               return 0;
 #endif
        case KVM_CAP_BINARY_STATS_FD:
        case KVM_CAP_SYSTEM_EVENT_DATA:
@@ -4580,6 +4586,7 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm 
*kvm,
                return 0;
        }
        case KVM_CAP_DIRTY_LOG_RING:
+       case KVM_CAP_DIRTY_LOG_RING_ORDERED:
                return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
        default:
                return kvm_vm_ioctl_enable_cap(kvm, cap);
-- 
2.34.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to