Re: [PATCH v5 10/13] KVM/nSVM: Use the new mapping API for mapping guest memory

2019-01-23 Thread Konrad Rzeszutek Wilk
On Wed, Jan 09, 2019 at 10:42:10AM +0100, KarimAllah Ahmed wrote:
> Use the new mapping API for mapping guest memory to avoid depending on
> "struct page".
> 
> Signed-off-by: KarimAllah Ahmed 

Reviewed-by: Konrad Rzeszutek Wilk 


[PATCH v5 10/13] KVM/nSVM: Use the new mapping API for mapping guest memory

2019-01-09 Thread KarimAllah Ahmed
Use the new mapping API for mapping guest memory to avoid depending on
"struct page".

Signed-off-by: KarimAllah Ahmed 
---
v4 -> v5:
- unmap with dirty flag
---
 arch/x86/kvm/svm.c | 97 +++---
 1 file changed, 49 insertions(+), 48 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 307e5bd..d886664 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3062,32 +3062,6 @@ static inline bool nested_svm_nmi(struct vcpu_svm *svm)
return false;
 }
 
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
-{
-   struct page *page;
-
-   might_sleep();
-
-   page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
-   if (is_error_page(page))
-   goto error;
-
-   *_page = page;
-
-   return kmap(page);
-
-error:
-   kvm_inject_gp(&svm->vcpu, 0);
-
-   return NULL;
-}
-
-static void nested_svm_unmap(struct page *page)
-{
-   kunmap(page);
-   kvm_release_page_dirty(page);
-}
-
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
unsigned port, size, iopm_len;
@@ -3290,10 +3264,11 @@ static inline void copy_vmcb_control_area(struct vmcb 
*dst_vmcb, struct vmcb *fr
 
 static int nested_svm_vmexit(struct vcpu_svm *svm)
 {
+   int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
-   struct page *page;
+   struct kvm_host_map map;
 
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
   vmcb->control.exit_info_1,
@@ -3302,9 +3277,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
   vmcb->control.exit_int_info_err,
   KVM_ISA_SVM);
 
-   nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
-   if (!nested_vmcb)
+   rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
+   if (rc) {
+   if (rc == -EINVAL)
+   kvm_inject_gp(&svm->vcpu, 0);
return 1;
+   }
+
+   nested_vmcb = map.hva;
 
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
@@ -3408,7 +3388,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
 
mark_all_dirty(svm->vmcb);
 
-   nested_svm_unmap(page);
+   kvm_vcpu_unmap(&map, true);
 
nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu);
@@ -3466,7 +3446,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
 }
 
 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-struct vmcb *nested_vmcb, struct page *page)
+struct vmcb *nested_vmcb, struct kvm_host_map 
*map)
 {
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -3550,7 +3530,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, 
u64 vmcb_gpa,
svm->vmcb->control.pause_filter_thresh =
nested_vmcb->control.pause_filter_thresh;
 
-   nested_svm_unmap(page);
+   kvm_vcpu_unmap(map, true);
 
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
@@ -3570,17 +3550,23 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, 
u64 vmcb_gpa,
 
 static bool nested_svm_vmrun(struct vcpu_svm *svm)
 {
+   int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
-   struct page *page;
+   struct kvm_host_map map;
u64 vmcb_gpa;
 
vmcb_gpa = svm->vmcb->save.rax;
 
-   nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
-   if (!nested_vmcb)
+   rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
+   if (rc) {
+   if (rc == -EINVAL)
+   kvm_inject_gp(&svm->vcpu, 0);
return false;
+   }
+
+   nested_vmcb = map.hva;
 
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code= SVM_EXIT_ERR;
@@ -3588,7 +3574,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
nested_vmcb->control.exit_info_1  = 0;
nested_vmcb->control.exit_info_2  = 0;
 
-   nested_svm_unmap(page);
+   kvm_vcpu_unmap(&map, true);
 
return false;
}
@@ -3632,7 +3618,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 
copy_vmcb_control_area(hsave, vmcb);
 
-   enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
+   enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
 
return true;
 }
@@ -3656,21 +3642,26 @@ static void nested_svm_vmloadsave(struct vmcb 
*from_vmcb, struct vmcb *to_vmcb)
 static int vmload_interception(struct vcpu_svm *svm)
 {
struct vmcb *nested_vmcb;
-   struct page *page;
+   struct kvm_