Replace PAGE_SHIFT, PAGE_SIZE, and PAGE_MASK with HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE, and HV_HYP_PAGE_MASK, respectively, because the guest
page size and hypervisor page size concepts are different, even though
they happen to be the same value on x86.

Signed-off-by: Maya Nakamura <m.maya.nakam...@gmail.com>
---
 arch/x86/hyperv/mmu.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index e65d7fe6489f..175f6dcc7362 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -15,7 +15,7 @@
 #include <asm/trace/hyperv.h>
 
 /* Each gva in gva_list encodes up to 4096 pages to flush */
-#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+#define HV_TLB_FLUSH_UNIT (4096 * HV_HYP_PAGE_SIZE)
 
 static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
                                      const struct flush_tlb_info *info);
@@ -32,15 +32,15 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
        do {
                diff = end > cur ? end - cur : 0;
 
-               gva_list[gva_n] = cur & PAGE_MASK;
+               gva_list[gva_n] = cur & HV_HYP_PAGE_MASK;
                /*
                 * Lower 12 bits encode the number of additional
                 * pages to flush (in addition to the 'cur' page).
                 */
                if (diff >= HV_TLB_FLUSH_UNIT)
-                       gva_list[gva_n] |= ~PAGE_MASK;
+                       gva_list[gva_n] |= ~HV_HYP_PAGE_MASK;
                else if (diff)
-                       gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+                       gva_list[gva_n] |= (diff - 1) >> HV_HYP_PAGE_SHIFT;
 
                cur += HV_TLB_FLUSH_UNIT;
                gva_n++;
@@ -129,7 +129,8 @@ static void hyperv_flush_tlb_others(const struct cpumask 
*cpus,
         * We can flush not more than max_gvas with one hypercall. Flush the
         * whole address space if we were asked to do more.
         */
-       max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
+       max_gvas = (HV_HYP_PAGE_SIZE - sizeof(*flush)) /
+                   sizeof(flush->gva_list[0]);
 
        if (info->end == TLB_FLUSH_ALL) {
                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
@@ -200,9 +201,9 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask 
*cpus,
         * whole address space if we were asked to do more.
         */
        max_gvas =
-               (PAGE_SIZE - sizeof(*flush) - nr_bank *
+               (HV_HYP_PAGE_SIZE - sizeof(*flush) - nr_bank *
                 sizeof(flush->hv_vp_set.bank_contents[0])) /
-               sizeof(flush->gva_list[0]);
+                sizeof(flush->gva_list[0]);
 
        if (info->end == TLB_FLUSH_ALL) {
                flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
-- 
2.17.1

Reply via email to