Update the PML table without mapping and unmapping the page. This also
avoids using kvm_vcpu_gpa_to_page(..) which assumes that there is a "struct
page" for guest memory.

As a side-effect of using kvm_write_guest_page the page is also properly
marked as dirty.

Signed-off-by: KarimAllah Ahmed <karah...@amazon.de>
Reviewed-by: David Hildenbrand <da...@redhat.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.w...@oracle.com>

---
v1 -> v2:
- Use kvm_write_guest_page instead of kvm_write_guest (pbonzini)
- Do not use pointer arithmetic for pml_address (pbonzini)
---
 arch/x86/kvm/vmx/vmx.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4341175..a3da75a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7206,9 +7206,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa;
-       struct page *page = NULL;
-       u64 *pml_address;
+       gpa_t gpa, dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7228,15 +7226,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                }
 
                gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               dst = vmcs12->pml_address + sizeof(u64) * 
vmcs12->guest_pml_index;
 
-               page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
-               if (is_error_page(page))
+               if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+                                        offset_in_page(dst), sizeof(gpa)))
                        return 0;
 
-               pml_address = kmap(page);
-               pml_address[vmcs12->guest_pml_index--] = gpa;
-               kunmap(page);
-               kvm_release_page_clean(page);
+               vmcs12->guest_pml_index--;
        }
 
        return 0;
-- 
2.7.4

Reply via email to