From: Ackerley Tng <[email protected]>

When memory in guest_memfd is converted from private to shared, the
platform-specific state associated with the guest-private pages must be
invalidated or cleaned up.

Iterate over the folios in the affected range and call the
kvm_arch_gmem_invalidate() hook for each PFN range. This allows
architectures to perform necessary teardown, such as updating hardware
metadata or encryption states, before the pages are transitioned to the
shared state.

Invoke this helper after indicating to KVM's mmu code that an invalidation
is in progress to stop in-flight page faults from succeeding.

Signed-off-by: Ackerley Tng <[email protected]>
---
 virt/kvm/guest_memfd.c | 41 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index e87a2b72ff802..d563d80d4accb 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -615,6 +615,42 @@ static bool kvm_gmem_is_safe_for_conversion(struct inode 
*inode, pgoff_t start,
        return safe;
 }
 
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+static void kvm_gmem_invalidate(struct inode *inode, pgoff_t start, pgoff_t 
end)
+{
+       struct folio_batch fbatch;
+       pgoff_t next = start;
+       int i;
+
+       folio_batch_init(&fbatch);
+       while (filemap_get_folios(inode->i_mapping, &next, end - 1, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); ++i) {
+                       struct folio *folio = fbatch.folios[i];
+                       pgoff_t start_index, end_index;
+                       kvm_pfn_t start_pfn, end_pfn;
+
+                       start_index = max(start, folio->index);
+                       end_index = min(end, folio_next_index(folio));
+                       /*
+                        * end_index is either in folio or points to
+                        * the first page of the next folio. Hence,
+                        * all pages in range [start_index, end_index)
+                        * are contiguous.
+                        */
+                       start_pfn = folio_file_pfn(folio, start_index);
+                       end_pfn = start_pfn + end_index - start_index;
+
+                       kvm_arch_gmem_invalidate(start_pfn, end_pfn);
+               }
+
+               folio_batch_release(&fbatch);
+               cond_resched();
+       }
+}
+#else
+static void kvm_gmem_invalidate(struct inode *inode, pgoff_t start, pgoff_t 
end) {}
+#endif
+
 static int __kvm_gmem_set_attributes(struct inode *inode, pgoff_t start,
                                     size_t nr_pages, uint64_t attrs,
                                     pgoff_t *err_index)
@@ -655,7 +691,12 @@ static int __kvm_gmem_set_attributes(struct inode *inode, 
pgoff_t start,
         */
 
        kvm_gmem_invalidate_begin(inode, start, end);
+
+       if (!to_private)
+               kvm_gmem_invalidate(inode, start, end);
+
        mas_store_prealloc(&mas, xa_mk_value(attrs));
+
        kvm_gmem_invalidate_end(inode, start, end);
 out:
        filemap_invalidate_unlock(mapping);

-- 
2.54.0.545.g6539524ca2-goog



Reply via email to