Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya...@lab.ntt.co.jp>
---
 arch/x86/kvm/mmu.c         | 20 +++++++-------------
 arch/x86/kvm/paging_tmpl.h |  4 ++--
 2 files changed, 9 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 204c7d4..a1a3d19 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2071,8 +2071,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct 
kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int direct,
-                                            unsigned access,
-                                            u64 *parent_pte)
+                                            unsigned access)
 {
        union kvm_mmu_page_role role;
        unsigned quadrant;
@@ -2720,8 +2719,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, 
int map_writable,
                        base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
                        pseudo_gfn = base_addr >> PAGE_SHIFT;
                        sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
-                                             iterator.level - 1,
-                                             1, ACC_ALL, iterator.sptep);
+                                             iterator.level - 1, 1, ACC_ALL);
 
                        link_shadow_page(vcpu, iterator.sptep, sp);
                }
@@ -3078,8 +3076,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
                spin_lock(&vcpu->kvm->mmu_lock);
                make_mmu_pages_available(vcpu);
-               sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
-                                     1, ACC_ALL, NULL);
+               sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, 1, ACC_ALL);
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu.root_hpa = __pa(sp->spt);
@@ -3091,9 +3088,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        spin_lock(&vcpu->kvm->mmu_lock);
                        make_mmu_pages_available(vcpu);
                        sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
-                                             i << 30,
-                                             PT32_ROOT_LEVEL, 1, ACC_ALL,
-                                             NULL);
+                                       i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
                        root = __pa(sp->spt);
                        ++sp->root_count;
                        spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3130,7 +3125,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                spin_lock(&vcpu->kvm->mmu_lock);
                make_mmu_pages_available(vcpu);
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
-                                     0, ACC_ALL, NULL);
+                                     0, ACC_ALL);
                root = __pa(sp->spt);
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
@@ -3163,9 +3158,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                }
                spin_lock(&vcpu->kvm->mmu_lock);
                make_mmu_pages_available(vcpu);
-               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
-                                     PT32_ROOT_LEVEL, 0,
-                                     ACC_ALL, NULL);
+               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
+                                     0, ACC_ALL);
                root = __pa(sp->spt);
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0dcf9c8..91e939b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -587,7 +587,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                if (!is_shadow_present_pte(*it.sptep)) {
                        table_gfn = gw->table_gfn[it.level - 2];
                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
-                                             false, access, it.sptep);
+                                             false, access);
                }
 
                /*
@@ -617,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 
                sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
-                                     true, direct_access, it.sptep);
+                                     true, direct_access);
                link_shadow_page(vcpu, it.sptep, sp);
        }
 
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to