Re: [PATCH v8 5/8] KVM: x86/mmu: Don't pass FOLL_GET to __kvm_follow_pfn

2023-08-24 Thread Mika Penttilä

On 8/24/23 11:04, David Stevens wrote:

From: David Stevens 

Stop passing FOLL_GET to __kvm_follow_pfn. This allows the host to map
memory into the guest that is backed by un-refcounted struct pages - for
example, the tail pages of higher order non-compound pages allocated by
the amdgpu driver via ttm_pool_alloc_page.

The bulk of this change is tracking the is_refcounted_page flag so that
non-refcounted pages don't trigger page_count() == 0 warnings. This is
done by storing the flag in an unused bit in the sptes. This only bit is
not available in PAE SPTEs, so FOLL_GET is only omitted for TDP on
x86-64.

Signed-off-by: David Stevens 
---
  arch/x86/kvm/mmu/mmu.c  | 55 +++--
  arch/x86/kvm/mmu/mmu_internal.h |  1 +
  arch/x86/kvm/mmu/paging_tmpl.h  |  8 +++--
  arch/x86/kvm/mmu/spte.c |  4 ++-
  arch/x86/kvm/mmu/spte.h | 12 ++-
  arch/x86/kvm/mmu/tdp_mmu.c  | 22 +++--
  include/linux/kvm_host.h|  3 ++
  virt/kvm/kvm_main.c |  6 ++--
  8 files changed, 79 insertions(+), 32 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index dabae67f198b..4f5d33e95c6e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -553,12 +553,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
  
  	if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {

flush = true;
-   kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+   if (is_refcounted_page_pte(old_spte))
+   
kvm_set_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
}
  
  	if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {

flush = true;
-   kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+   if (is_refcounted_page_pte(old_spte))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(old_spte)));
}
  
  	return flush;

@@ -596,14 +598,18 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 
*sptep)
 * before they are reclaimed.  Sanity check that, if the pfn is backed
 * by a refcounted page, the refcount is elevated.
 */
-   page = kvm_pfn_to_refcounted_page(pfn);
-   WARN_ON(page && !page_count(page));
+   if (is_refcounted_page_pte(old_spte)) {
+   page = kvm_pfn_to_refcounted_page(pfn);
+   WARN_ON(!page || !page_count(page));
+   }
  
-	if (is_accessed_spte(old_spte))

-   kvm_set_pfn_accessed(pfn);
+   if (is_refcounted_page_pte(old_spte)) {
+   if (is_accessed_spte(old_spte))
+   kvm_set_page_accessed(pfn_to_page(pfn));
  
-	if (is_dirty_spte(old_spte))

-   kvm_set_pfn_dirty(pfn);
+   if (is_dirty_spte(old_spte))
+   kvm_set_page_dirty(pfn_to_page(pfn));
+   }
  
  	return old_spte;

  }
@@ -639,8 +645,8 @@ static bool mmu_spte_age(u64 *sptep)
 * Capture the dirty status of the page, so that it doesn't get
 * lost when the SPTE is marked for access tracking.
 */
-   if (is_writable_pte(spte))
-   kvm_set_pfn_dirty(spte_to_pfn(spte));
+   if (is_writable_pte(spte) && is_refcounted_page_pte(spte))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(spte)));
  
  		spte = mark_spte_for_access_track(spte);

mmu_spte_update_no_track(sptep, spte);
@@ -1278,8 +1284,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
  {
bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
   (unsigned long *)sptep);
-   if (was_writable && !spte_ad_enabled(*sptep))
-   kvm_set_pfn_dirty(spte_to_pfn(*sptep));
+   if (was_writable && !spte_ad_enabled(*sptep) && 
is_refcounted_page_pte(*sptep))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(*sptep)));
  
  	return was_writable;

  }
@@ -2937,6 +2943,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct 
kvm_memory_slot *slot,
bool host_writable = !fault || fault->map_writable;
bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write;
+   /*
+* Prefetching uses gfn_to_page_many_atomic, which never gets
+* non-refcounted pages.
+*/
+   bool is_refcounted = !fault || fault->is_refcounted_page;
  
  	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,

 *sptep, write_fault, gfn);
@@ -2969,7 +2980,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct 
kvm_memory_slot *slot,
}
  
  	wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,

-  true, host_writable, );
+  true, host_writable, is_refcounted, );
  
  	if (*sptep == spte) {

ret = RET_PF_SPURIOUS;
@@ -4296,11 +4307,19 @@ void 

[PATCH v8 5/8] KVM: x86/mmu: Don't pass FOLL_GET to __kvm_follow_pfn

2023-08-24 Thread David Stevens
From: David Stevens 

Stop passing FOLL_GET to __kvm_follow_pfn. This allows the host to map
memory into the guest that is backed by un-refcounted struct pages - for
example, the tail pages of higher order non-compound pages allocated by
the amdgpu driver via ttm_pool_alloc_page.

The bulk of this change is tracking the is_refcounted_page flag so that
non-refcounted pages don't trigger page_count() == 0 warnings. This is
done by storing the flag in an unused bit in the sptes. This only bit is
not available in PAE SPTEs, so FOLL_GET is only omitted for TDP on
x86-64.

Signed-off-by: David Stevens 
---
 arch/x86/kvm/mmu/mmu.c  | 55 +++--
 arch/x86/kvm/mmu/mmu_internal.h |  1 +
 arch/x86/kvm/mmu/paging_tmpl.h  |  8 +++--
 arch/x86/kvm/mmu/spte.c |  4 ++-
 arch/x86/kvm/mmu/spte.h | 12 ++-
 arch/x86/kvm/mmu/tdp_mmu.c  | 22 +++--
 include/linux/kvm_host.h|  3 ++
 virt/kvm/kvm_main.c |  6 ++--
 8 files changed, 79 insertions(+), 32 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index dabae67f198b..4f5d33e95c6e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -553,12 +553,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
 
if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
flush = true;
-   kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+   if (is_refcounted_page_pte(old_spte))
+   
kvm_set_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
}
 
if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
flush = true;
-   kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+   if (is_refcounted_page_pte(old_spte))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(old_spte)));
}
 
return flush;
@@ -596,14 +598,18 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 
*sptep)
 * before they are reclaimed.  Sanity check that, if the pfn is backed
 * by a refcounted page, the refcount is elevated.
 */
-   page = kvm_pfn_to_refcounted_page(pfn);
-   WARN_ON(page && !page_count(page));
+   if (is_refcounted_page_pte(old_spte)) {
+   page = kvm_pfn_to_refcounted_page(pfn);
+   WARN_ON(!page || !page_count(page));
+   }
 
-   if (is_accessed_spte(old_spte))
-   kvm_set_pfn_accessed(pfn);
+   if (is_refcounted_page_pte(old_spte)) {
+   if (is_accessed_spte(old_spte))
+   kvm_set_page_accessed(pfn_to_page(pfn));
 
-   if (is_dirty_spte(old_spte))
-   kvm_set_pfn_dirty(pfn);
+   if (is_dirty_spte(old_spte))
+   kvm_set_page_dirty(pfn_to_page(pfn));
+   }
 
return old_spte;
 }
@@ -639,8 +645,8 @@ static bool mmu_spte_age(u64 *sptep)
 * Capture the dirty status of the page, so that it doesn't get
 * lost when the SPTE is marked for access tracking.
 */
-   if (is_writable_pte(spte))
-   kvm_set_pfn_dirty(spte_to_pfn(spte));
+   if (is_writable_pte(spte) && is_refcounted_page_pte(spte))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(spte)));
 
spte = mark_spte_for_access_track(spte);
mmu_spte_update_no_track(sptep, spte);
@@ -1278,8 +1284,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
 {
bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
   (unsigned long *)sptep);
-   if (was_writable && !spte_ad_enabled(*sptep))
-   kvm_set_pfn_dirty(spte_to_pfn(*sptep));
+   if (was_writable && !spte_ad_enabled(*sptep) && 
is_refcounted_page_pte(*sptep))
+   kvm_set_page_dirty(pfn_to_page(spte_to_pfn(*sptep)));
 
return was_writable;
 }
@@ -2937,6 +2943,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct 
kvm_memory_slot *slot,
bool host_writable = !fault || fault->map_writable;
bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write;
+   /*
+* Prefetching uses gfn_to_page_many_atomic, which never gets
+* non-refcounted pages.
+*/
+   bool is_refcounted = !fault || fault->is_refcounted_page;
 
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
 *sptep, write_fault, gfn);
@@ -2969,7 +2980,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct 
kvm_memory_slot *slot,
}
 
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, 
prefetch,
-  true, host_writable, );
+  true, host_writable, is_refcounted, );
 
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;
@@ -4296,11 +4307,19 @@ void