Re: [PATCH v4 01/14] X86/nVMX: handle_vmon: Read 4 bytes from guest memory

2018-12-03 Thread David Hildenbrand
On 03.12.18 10:30, KarimAllah Ahmed wrote:
> Read the data directly from guest memory instead of the map->read->unmap
> sequence. This also avoids using kvm_vcpu_gpa_to_page() and kmap() which
> assumes that there is a "struct page" for guest memory.
> 
> Suggested-by: Jim Mattson 
> Signed-off-by: KarimAllah Ahmed 
> Reviewed-by: Jim Mattson 
> 
> ---
> v1 -> v2:
> - Massage commit message a bit.
> ---
>  arch/x86/kvm/vmx.c | 14 +++---
>  1 file changed, 3 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 02edd99..b84f230 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -8358,7 +8358,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>  {
>   int ret;
>   gpa_t vmptr;
> - struct page *page;
> + uint32_t revision;
>   struct vcpu_vmx *vmx = to_vmx(vcpu);
>   const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
>   | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
> @@ -8407,18 +8407,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
>   if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
>   return nested_vmx_failInvalid(vcpu);
>  
> - page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
> - if (is_error_page(page))
> + if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
> + revision != VMCS12_REVISION)
>   return nested_vmx_failInvalid(vcpu);
>  
> - if (*(u32 *)kmap(page) != VMCS12_REVISION) {
> - kunmap(page);
> - kvm_release_page_clean(page);
> - return nested_vmx_failInvalid(vcpu);
> - }
> - kunmap(page);
> - kvm_release_page_clean(page);
> -
>   vmx->nested.vmxon_ptr = vmptr;
>   ret = enter_vmx_operation(vcpu);
>   if (ret)
> 

Reviewed-by: David Hildenbrand 

-- 

Thanks,

David / dhildenb


[PATCH v4 01/14] X86/nVMX: handle_vmon: Read 4 bytes from guest memory

2018-12-03 Thread KarimAllah Ahmed
Read the data directly from guest memory instead of the map->read->unmap
sequence. This also avoids using kvm_vcpu_gpa_to_page() and kmap() which
assumes that there is a "struct page" for guest memory.

Suggested-by: Jim Mattson 
Signed-off-by: KarimAllah Ahmed 
Reviewed-by: Jim Mattson 

---
v1 -> v2:
- Massage commit message a bit.
---
 arch/x86/kvm/vmx.c | 14 +++---
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 02edd99..b84f230 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8358,7 +8358,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 {
int ret;
gpa_t vmptr;
-   struct page *page;
+   uint32_t revision;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -8407,18 +8407,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
return nested_vmx_failInvalid(vcpu);
 
-   page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
-   if (is_error_page(page))
+   if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
+   revision != VMCS12_REVISION)
return nested_vmx_failInvalid(vcpu);
 
-   if (*(u32 *)kmap(page) != VMCS12_REVISION) {
-   kunmap(page);
-   kvm_release_page_clean(page);
-   return nested_vmx_failInvalid(vcpu);
-   }
-   kunmap(page);
-   kvm_release_page_clean(page);
-
vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
if (ret)
-- 
2.7.4