Read the data directly from guest memory instead of the map->read->unmap
sequence. This also avoids using kvm_vcpu_gpa_to_page() and kmap() which
assumes that there is a "struct page" for guest memory.

Suggested-by: Jim Mattson <jmatt...@google.com>
Signed-off-by: KarimAllah Ahmed <karah...@amazon.de>
---
v1 -> v2:
- Massage commit message a bit.
---
 arch/x86/kvm/vmx.c | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4f5d4bd..358759a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8321,7 +8321,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 {
        int ret;
        gpa_t vmptr;
-       struct page *page;
+       uint32_t revision;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
                | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -8373,19 +8373,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
                return kvm_skip_emulated_instruction(vcpu);
        }
 
-       page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
-       if (is_error_page(page)) {
+       if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
+           revision != VMCS12_REVISION) {
                nested_vmx_failInvalid(vcpu);
                return kvm_skip_emulated_instruction(vcpu);
        }
-       if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-               kunmap(page);
-               kvm_release_page_clean(page);
-               nested_vmx_failInvalid(vcpu);
-               return kvm_skip_emulated_instruction(vcpu);
-       }
-       kunmap(page);
-       kvm_release_page_clean(page);
 
        vmx->nested.vmxon_ptr = vmptr;
        ret = enter_vmx_operation(vcpu);
-- 
2.7.4

Reply via email to