Grab kvm->lock before pinning memory when registering an encrypted
region; sev_pin_memory() relies on kvm->lock being held to ensure
correctness when checking and updating the number of pinned pages.

Add a lockdep assertion to help prevent future regressions.

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Joerg Roedel <j...@8bytes.org>
Cc: Tom Lendacky <thomas.lenda...@amd.com>
Cc: Brijesh Singh <brijesh.si...@amd.com>
Cc: Sean Christopherson <sea...@google.com>
Cc: x...@kernel.org
Cc: k...@vger.kernel.org
Cc: sta...@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Fixes: 1e80fdc09d12 ("KVM: SVM: Pin guest memory when SEV is active")
Signed-off-by: Peter Gonda <pgo...@google.com>

V2
 - Fix up patch description
 - Correct file paths svm.c -> sev.c
 - Add unlock of kvm->lock on sev_pin_memory error

V1
 - https://lore.kernel.org/kvm/20210126185431.1824530-1-pgo...@google.com/

---
 arch/x86/kvm/svm/sev.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index c8ffdbc81709..b80e9bf0a31b 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -342,6 +342,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, 
unsigned long uaddr,
        unsigned long first, last;
        int ret;
 
+       lockdep_assert_held(&kvm->lock);
+
        if (ulen == 0 || uaddr + ulen < uaddr)
                return ERR_PTR(-EINVAL);
 
@@ -1119,12 +1121,20 @@ int svm_register_enc_region(struct kvm *kvm,
        if (!region)
                return -ENOMEM;
 
+       mutex_lock(&kvm->lock);
        region->pages = sev_pin_memory(kvm, range->addr, range->size, 
&region->npages, 1);
        if (IS_ERR(region->pages)) {
                ret = PTR_ERR(region->pages);
+               mutex_unlock(&kvm->lock);
                goto e_free;
        }
 
+       region->uaddr = range->addr;
+       region->size = range->size;
+
+       list_add_tail(&region->list, &sev->regions_list);
+       mutex_unlock(&kvm->lock);
+
        /*
         * The guest may change the memory encryption attribute from C=0 -> C=1
         * or vice versa for this memory range. Lets make sure caches are
@@ -1133,13 +1143,6 @@ int svm_register_enc_region(struct kvm *kvm,
         */
        sev_clflush_pages(region->pages, region->npages);
 
-       region->uaddr = range->addr;
-       region->size = range->size;
-
-       mutex_lock(&kvm->lock);
-       list_add_tail(&region->list, &sev->regions_list);
-       mutex_unlock(&kvm->lock);
-
        return ret;
 
 e_free:
-- 
2.30.0.280.ga3ce27912f-goog

Reply via email to