From: Lai Jiangshan <la...@cn.fujitsu.com>

Rename it to fit its sense better

Signed-off-by: Lai Jiangshan <la...@cn.fujitsu.com>
Signed-off-by: Xiao Guangrong <xiaoguangr...@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 59b5bd2..dc64cd6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1958,7 +1958,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int user_fault,
                    int write_fault, int dirty, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
-                   bool can_unsync, bool reset_host_protection)
+                   bool can_unsync, bool host_writable)
 {
        u64 spte, entry = *sptep;
        int ret = 0;
@@ -1985,7 +1985,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
                        kvm_is_mmio_pfn(pfn));
 
-       if (reset_host_protection)
+       if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
 
        spte |= (u64)pfn << PAGE_SHIFT;
@@ -2048,7 +2048,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*sptep,
                         int user_fault, int write_fault, int dirty,
                         int *ptwrite, int level, gfn_t gfn,
                         pfn_t pfn, bool speculative,
-                        bool reset_host_protection)
+                        bool host_writable)
 {
        int was_rmapped = 0;
        int rmap_count;
@@ -2083,7 +2083,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 
*sptep,
 
        if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
                      dirty, level, gfn, pfn, speculative, true,
-                     reset_host_protection)) {
+                     host_writable)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_mmu_flush_tlb(vcpu);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ca0e5e8..57619ed 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -329,7 +329,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                return;
        kvm_get_pfn(pfn);
        /*
-        * we call mmu_set_spte() with reset_host_protection = true beacuse that
+        * we call mmu_set_spte() with host_writable = true beacuse that
         * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
         */
        mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
@@ -744,7 +744,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                            bool clear_unsync)
 {
        int i, offset, nr_present;
-       bool reset_host_protection;
+       bool host_writable;
        gpa_t first_pte_gpa;
 
        offset = nr_present = 0;
@@ -794,14 +794,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
                if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
                        pte_access &= ~ACC_WRITE_MASK;
-                       reset_host_protection = 0;
+                       host_writable = 0;
                } else {
-                       reset_host_protection = 1;
+                       host_writable = 1;
                }
                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
                         is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
                         spte_to_pfn(sp->spt[i]), true, false,
-                        reset_host_protection);
+                        host_writable);
        }
 
        return !nr_present;
--
To unsubscribe from this list: send the line "unsubscribe kvm-commits" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to