Use kvm_vcpu_map when mapping the virtual APIC page since using
kvm_vcpu_gpa_to_page() and kmap() will only work for guest memory that has
a "struct page".

One additional semantic change is that the virtual host mapping lifecycle
has changed a bit. It now has the same lifetime of the pinning of the
virtual APIC page on the host side.

Signed-off-by: KarimAllah Ahmed <karah...@amazon.de>
---
v1 -> v2:
- Do not change the lifecycle of the mapping (pbonzini)
- Use pfn_to_hpa instead of gfn_to_gpa
---
 arch/x86/kvm/vmx.c | 39 +++++++++++++--------------------------
 1 file changed, 13 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b15ca2..9683923 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -845,9 +845,8 @@ struct nested_vmx {
         * pointers, so we must keep them pinned while L2 runs.
         */
        struct page *apic_access_page;
-       struct page *virtual_apic_page;
+       struct kvm_host_map virtual_apic_map;
        struct page *pi_desc_page;
-
        struct kvm_host_map msr_bitmap_map;
 
        struct pi_desc *pi_desc;
@@ -6152,11 +6151,12 @@ static void vmx_complete_nested_posted_interrupt(struct 
kvm_vcpu *vcpu)
 
        max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
        if (max_irr != 256) {
-               vapic_page = kmap(vmx->nested.virtual_apic_page);
+               vapic_page = vmx->nested.virtual_apic_map.hva;
+               if (!vapic_page)
+                       return;
+
                __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
                        vapic_page, &max_irr);
-               kunmap(vmx->nested.virtual_apic_page);
-
                status = vmcs_read16(GUEST_INTR_STATUS);
                if ((u8)max_irr > ((u8)status & 0xff)) {
                        status &= ~0xff;
@@ -6182,14 +6182,13 @@ static bool vmx_guest_apic_has_interrupt(struct 
kvm_vcpu *vcpu)
 
        if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
                !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
-               WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+               WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
                return false;
 
        rvi = vmx_get_rvi();
 
-       vapic_page = kmap(vmx->nested.virtual_apic_page);
+       vapic_page = vmx->nested.virtual_apic_map.hva;
        vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
-       kunmap(vmx->nested.virtual_apic_page);
 
        return ((rvi & 0xf0) > (vppr & 0xf0));
 }
@@ -8468,10 +8467,7 @@ static void free_nested(struct vcpu_vmx *vmx)
                kvm_release_page_dirty(vmx->nested.apic_access_page);
                vmx->nested.apic_access_page = NULL;
        }
-       if (vmx->nested.virtual_apic_page) {
-               kvm_release_page_dirty(vmx->nested.virtual_apic_page);
-               vmx->nested.virtual_apic_page = NULL;
-       }
+       kvm_vcpu_unmap(&vmx->nested.virtual_apic_map);
        if (vmx->nested.pi_desc_page) {
                kunmap(vmx->nested.pi_desc_page);
                kvm_release_page_dirty(vmx->nested.pi_desc_page);
@@ -11394,6 +11390,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu 
*vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_host_map *map;
        struct page *page;
        u64 hpa;
 
@@ -11426,11 +11423,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu 
*vcpu)
        }
 
        if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
-               if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
-                       kvm_release_page_dirty(vmx->nested.virtual_apic_page);
-                       vmx->nested.virtual_apic_page = NULL;
-               }
-               page = kvm_vcpu_gpa_to_page(vcpu, 
vmcs12->virtual_apic_page_addr);
+               map = &vmx->nested.virtual_apic_map;
 
                /*
                 * If translation failed, VM entry will fail because
@@ -11445,11 +11438,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu 
*vcpu)
                 * control.  But such a configuration is useless, so
                 * let's keep the code simple.
                 */
-               if (!is_error_page(page)) {
-                       vmx->nested.virtual_apic_page = page;
-                       hpa = page_to_phys(vmx->nested.virtual_apic_page);
-                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
-               }
+               if (!kvm_vcpu_map(vcpu, 
gpa_to_gfn(vmcs12->virtual_apic_page_addr), map))
+                       vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 
pfn_to_hpa(map->pfn));
        }
 
        if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -13353,10 +13343,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, 
u32 exit_reason,
                kvm_release_page_dirty(vmx->nested.apic_access_page);
                vmx->nested.apic_access_page = NULL;
        }
-       if (vmx->nested.virtual_apic_page) {
-               kvm_release_page_dirty(vmx->nested.virtual_apic_page);
-               vmx->nested.virtual_apic_page = NULL;
-       }
+       kvm_vcpu_unmap(&vmx->nested.virtual_apic_map);
        if (vmx->nested.pi_desc_page) {
                kunmap(vmx->nested.pi_desc_page);
                kvm_release_page_dirty(vmx->nested.pi_desc_page);
-- 
2.7.4

Reply via email to