KVM memslots can change after they have been created so new memslots
have to be mapped when they are created.

TODO: we currently don't unmapped old memslots, they should be unmapped
when they are freed.

Signed-off-by: Alexandre Chartre <alexandre.char...@oracle.com>
---
 arch/x86/kvm/isolation.c |   39 +++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/isolation.h |    1 +
 arch/x86/kvm/x86.c       |    3 +++
 3 files changed, 43 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index b0c789f..255b2da 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -1593,13 +1593,45 @@ static void kvm_isolation_clear_handlers(void)
        kvm_page_fault_handler = NULL;
 }
 
+void kvm_isolation_check_memslots(struct kvm *kvm)
+{
+       struct kvm_range_mapping *rmapping;
+       int i, err;
+
+       if (!kvm_isolation())
+               return;
+
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               rmapping = kvm_get_range_mapping(kvm->memslots[i], NULL);
+               if (rmapping)
+                       continue;
+               pr_debug("remapping kvm memslots[%d]\n", i);
+               err = kvm_copy_ptes(kvm->memslots[i],
+                   sizeof(struct kvm_memslots));
+               if (err)
+                       pr_debug("failed to map kvm memslots[%d]\n", i);
+       }
+
+}
+
 int kvm_isolation_init_vm(struct kvm *kvm)
 {
+       int err, i;
+
        if (!kvm_isolation())
                return 0;
 
        kvm_clear_page_fault();
 
+       pr_debug("mapping kvm memslots\n");
+
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+               err = kvm_copy_ptes(kvm->memslots[i],
+                   sizeof(struct kvm_memslots));
+               if (err)
+                       return err;
+       }
+
        pr_debug("mapping kvm srcu sda\n");
 
        return (kvm_copy_percpu_mapping(kvm->srcu.sda,
@@ -1608,9 +1640,16 @@ int kvm_isolation_init_vm(struct kvm *kvm)
 
 void kvm_isolation_destroy_vm(struct kvm *kvm)
 {
+       int i;
+
        if (!kvm_isolation())
                return;
 
+       pr_debug("unmapping kvm memslots\n");
+
+       for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+               kvm_clear_range_mapping(kvm->memslots[i]);
+
        pr_debug("unmapping kvm srcu sda\n");
 
        kvm_clear_percpu_mapping(kvm->srcu.sda);
diff --git a/arch/x86/kvm/isolation.h b/arch/x86/kvm/isolation.h
index 2d7d016..1e55799 100644
--- a/arch/x86/kvm/isolation.h
+++ b/arch/x86/kvm/isolation.h
@@ -32,6 +32,7 @@ static inline bool kvm_isolation(void)
 extern void kvm_clear_range_mapping(void *ptr);
 extern int kvm_copy_percpu_mapping(void *percpu_ptr, size_t size);
 extern void kvm_clear_percpu_mapping(void *percpu_ptr);
+extern void kvm_isolation_check_memslots(struct kvm *kvm);
 extern int kvm_add_task_mapping(struct task_struct *tsk);
 extern void kvm_cleanup_task_mapping(struct task_struct *tsk);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e1cc3a6..7d98e9f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9438,6 +9438,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
         * mmio generation may have reached its maximum value.
         */
        kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+       kvm_isolation_check_memslots(kvm);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -9537,6 +9538,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
         */
        if (change != KVM_MR_DELETE)
                kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
+
+       kvm_isolation_check_memslots(kvm);
 }
 
 void kvm_arch_flush_shadow_all(struct kvm *kvm)
-- 
1.7.1

Reply via email to