We need to hide SMRAM from guests not running in SMM.  Therefore,
all uses of kvm_read_guest* and kvm_write_guest* must be changed to
check whether the VCPU is in system management mode.  We need to
introduce a new family of functions for this.

For now, the code is just copied from virt/kvm/kvm_main.c, except
for calls to other read/write functions.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  20 ++++
 arch/x86/kvm/Makefile           |   2 +-
 arch/x86/kvm/lapic.c            |  22 ++---
 arch/x86/kvm/mmu.c              |   8 +-
 arch/x86/kvm/paging_tmpl.h      |   8 +-
 arch/x86/kvm/smram.c            | 208 ++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm.c              |   8 +-
 arch/x86/kvm/vmx.c              |  10 +-
 arch/x86/kvm/x86.c              |  62 ++++++------
 9 files changed, 288 insertions(+), 60 deletions(-)
 create mode 100644 arch/x86/kvm/smram.c

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f0db43e80d09..68a09a4394d8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -980,6 +980,26 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct 
x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                            gfn_t gfn, void *data, int offset, int len,
                            u32 access);
+
+struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+unsigned long x86_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                             gpa_t gpa, unsigned long len);
+int x86_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int 
offset,
+                       int len);
+int x86_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+                         unsigned long len);
+int x86_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+                  unsigned long len);
+int x86_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                         void *data, unsigned long len);
+int x86_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+                        int offset, int len);
+int x86_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+                   unsigned long len);
+int x86_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                          void *data, unsigned long len);
+
 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
 
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 16e8f962eaad..02b40b128490 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,7 @@ kvm-y                 += $(KVM)/kvm_main.o 
$(KVM)/coalesced_mmio.o \
 kvm-$(CONFIG_KVM_ASYNC_PF)     += $(KVM)/async_pf.o
 
 kvm-y                  += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-                          i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
+                          i8254.o ioapic.o irq_comm.o cpuid.o pmu.o smram.o
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)    += assigned-dev.o iommu.o
 kvm-intel-y            += vmx.o
 kvm-amd-y              += svm.o
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0b2e45e3dc4c..302027217553 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -497,15 +497,15 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct 
kvm_lapic_irq *irq,
 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
 {
 
-       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
+       return x86_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val,
                                      sizeof(val));
 }
 
 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
 {
 
-       return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
-                                     sizeof(*val));
+       return x86_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val,
+                                    sizeof(*val));
 }
 
 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
@@ -1879,8 +1879,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
        if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
                return;
 
-       kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-                               sizeof(u32));
+       x86_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+                             sizeof(u32));
 
        apic_set_tpr(vcpu->arch.apic, data & 0xff);
 }
@@ -1931,16 +1931,16 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
                max_isr = 0;
        data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-                               sizeof(u32));
+       x86_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data,
+                              sizeof(u32));
 }
 
 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 {
        if (vapic_addr) {
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
-                                       &vcpu->arch.apic->vapic_cache,
-                                       vapic_addr, sizeof(u32)))
+               if (x86_gfn_to_hva_cache_init(vcpu->kvm,
+                                             &vcpu->arch.apic->vapic_cache,
+                                             vapic_addr, sizeof(u32)))
                        return -EINVAL;
                __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
        } else {
@@ -2032,7 +2032,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 
data)
        vcpu->arch.pv_eoi.msr_val = data;
        if (!pv_eoi_enabled(vcpu))
                return 0;
-       return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+       return x86_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
                                         addr, sizeof(u8));
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 371109546382..4694ad42aa8b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -875,7 +875,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t 
gfn,
 {
        struct kvm_memory_slot *slot;
 
-       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       slot = x86_gfn_to_memslot(vcpu, gfn);
        if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
              (no_dirty_log && slot->dirty_bitmap))
                slot = NULL;
@@ -3460,7 +3460,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, 
gva_t gva, gfn_t gfn)
        arch.direct_map = vcpu->arch.mmu.direct_map;
        arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
 
-       return kvm_setup_async_pf(vcpu, gva, gfn_to_hva(vcpu->kvm, gfn), &arch);
+       return kvm_setup_async_pf(vcpu, gva, x86_gfn_to_hva(vcpu, gfn), &arch);
 }
 
 static bool can_do_async_pf(struct kvm_vcpu *vcpu)
@@ -3478,7 +3478,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool 
prefault, gfn_t gfn,
        struct kvm_memory_slot *slot;
        bool async;
 
-       slot = gfn_to_memslot(vcpu->kvm, gfn);
+       slot = x86_gfn_to_memslot(vcpu, gfn);
        async = false;
        *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
        if (!async)
@@ -4108,7 +4108,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu 
*vcpu, gpa_t *gpa,
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
                *gpa &= ~(gpa_t)7;
                *bytes = 8;
-               r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
+               r = x86_read_guest(vcpu, *gpa, &gentry, 8);
                if (r)
                        gentry = 0;
                new = (const u8 *)&gentry;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index fd49c867b25a..197599c09bbc 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -511,11 +511,11 @@ static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
                base_gpa = pte_gpa & ~mask;
                index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 
-               r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
+               r = x86_read_guest_atomic(vcpu, base_gpa,
                                gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
                curr_pte = gw->prefetch_ptes[index];
        } else
-               r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
+               r = x86_read_guest_atomic(vcpu, pte_gpa,
                                  &curr_pte, sizeof(curr_pte));
 
        return r || curr_pte != gw->ptes[level - 1];
@@ -862,7 +862,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                        if (!rmap_can_add(vcpu))
                                break;
 
-                       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+                       if (x86_read_guest_atomic(vcpu, pte_gpa, &gpte,
                                                  sizeof(pt_element_t)))
                                break;
 
@@ -949,7 +949,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp)
 
                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 
-               if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
+               if (x86_read_guest_atomic(vcpu, pte_gpa, &gpte,
                                          sizeof(pt_element_t)))
                        return -EINVAL;
 
diff --git a/arch/x86/kvm/smram.c b/arch/x86/kvm/smram.c
new file mode 100644
index 000000000000..73616edab631
--- /dev/null
+++ b/arch/x86/kvm/smram.c
@@ -0,0 +1,208 @@
+/*
+ * Helpers for SMRAM access
+ * Copyright 2015 Red Hat, Inc. and/or its affiliates.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn);
+
+       return slot;
+}
+EXPORT_SYMBOL_GPL(x86_gfn_to_memslot);
+
+static unsigned long x86_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                        bool *writable)
+{
+       struct kvm_memory_slot *slot = x86_gfn_to_memslot(vcpu, gfn);
+
+       return gfn_to_hva_memslot_prot(slot, gfn, writable);
+}
+
+unsigned long x86_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot = x86_gfn_to_memslot(vcpu, gfn);
+
+       return gfn_to_hva_memslot(slot, gfn);
+}
+EXPORT_SYMBOL_GPL(x86_gfn_to_hva);
+
+static int next_segment(unsigned long len, int offset)
+{
+       if (len > PAGE_SIZE - offset)
+               return PAGE_SIZE - offset;
+       else
+               return len;
+}
+
+int x86_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int 
offset,
+                       int len)
+{
+       int r;
+       unsigned long addr;
+
+       addr = x86_gfn_to_hva_prot(vcpu, gfn, NULL);
+       if (kvm_is_error_hva(addr))
+               return -EFAULT;
+       r = __copy_from_user(data, (void __user *)addr + offset, len);
+       if (r)
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_read_guest_page);
+
+int x86_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+                         unsigned long len)
+{
+       int r;
+       unsigned long addr;
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int offset = offset_in_page(gpa);
+
+       addr = x86_gfn_to_hva_prot(vcpu, gfn, NULL);
+       if (kvm_is_error_hva(addr))
+               return -EFAULT;
+       pagefault_disable();
+       r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
+       pagefault_enable();
+       if (r)
+               return -EFAULT;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_read_guest_atomic);
+
+int x86_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
+                  unsigned long len)
+{
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int seg;
+       int offset = offset_in_page(gpa);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               ret = x86_read_guest_page(vcpu, gfn, data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               ++gfn;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_read_guest);
+
+int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                             gpa_t gpa, unsigned long len)
+{
+       return kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
+}
+EXPORT_SYMBOL_GPL(x86_gfn_to_hva_cache_init);
+
+int x86_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                         void *data, unsigned long len)
+{
+       struct kvm_memslots *slots = kvm_memslots(vcpu->kvm);
+       int r;
+
+       BUG_ON(len > ghc->len);
+
+       if (slots->generation != ghc->generation)
+               x86_gfn_to_hva_cache_init(vcpu->kvm, ghc, ghc->gpa, ghc->len);
+
+       if (unlikely(!ghc->memslot))
+               return x86_read_guest(vcpu, ghc->gpa, data, len);
+
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+
+       r = __copy_from_user(data, (void __user *)ghc->hva, len);
+       if (r)
+               return -EFAULT;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_read_guest_cached);
+
+int x86_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
+                        int offset, int len)
+{
+       struct kvm_memory_slot *slot = x86_gfn_to_memslot(vcpu, gfn);
+       int r;
+       unsigned long addr;
+
+       slot = x86_gfn_to_memslot(vcpu, gfn);
+       addr = gfn_to_hva_memslot(slot, gfn);
+       if (kvm_is_error_hva(addr))
+               return -EFAULT;
+       r = __copy_to_user((void __user *)addr + offset, data, len);
+       if (r)
+               return -EFAULT;
+       mark_page_dirty_in_slot(slot, gfn);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_write_guest_page);
+
+int x86_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
+                   unsigned long len)
+{
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int seg;
+       int offset = offset_in_page(gpa);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               ret = x86_write_guest_page(vcpu, gfn, data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               ++gfn;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_write_guest);
+
+int x86_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+                          void *data, unsigned long len)
+{
+       struct kvm_memslots *slots = kvm_memslots(vcpu->kvm);
+       int r;
+
+       BUG_ON(len > ghc->len);
+
+       if (slots->generation != ghc->generation)
+               x86_gfn_to_hva_cache_init(vcpu->kvm, ghc, ghc->gpa, ghc->len);
+
+       if (unlikely(!ghc->memslot))
+               return x86_write_guest(vcpu, ghc->gpa, data, len);
+
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+
+       r = __copy_to_user((void __user *)ghc->hva, data, len);
+       if (r)
+               return -EFAULT;
+       mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(x86_write_guest_cached);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0cbb49b8d555..b7650c786474 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1953,7 +1953,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu 
*vcpu, int index)
        u64 pdpte;
        int ret;
 
-       ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
+       ret = x86_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
                                  offset_in_page(cr3) + index * 8, 8);
        if (ret)
                return 0;
@@ -2151,7 +2151,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
        mask = (0xf >> (4 - size)) << start_bit;
        val = 0;
 
-       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+       if (x86_read_guest(&svm->vcpu, gpa, &val, iopm_len))
                return NESTED_EXIT_DONE;
 
        return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
@@ -2176,7 +2176,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm 
*svm)
        /* Offset is in 32 bit units but need in 8 bit units */
        offset *= 4;
 
-       if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, 
&value, 4))
+       if (x86_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 
4))
                return NESTED_EXIT_DONE;
 
        return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
@@ -2447,7 +2447,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
                p      = msrpm_offsets[i];
                offset = svm->nested.vmcb_msrpm + (p * 4);
 
-               if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
+               if (x86_read_guest(&svm->vcpu, offset, &value, 4))
                        return false;
 
                svm->nested.msrpm[p] = svm->msrpm[p] | value;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6d296fcb68f4..a0f5952ed0e9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7328,7 +7328,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu 
*vcpu,
                bitmap += (port & 0x7fff) / 8;
 
                if (last_bitmap != bitmap)
-                       if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
+                       if (x86_read_guest(vcpu, bitmap, &b, 1))
                                return true;
                if (b & (1 << (port & 7)))
                        return true;
@@ -7372,7 +7372,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu 
*vcpu,
        /* Then read the msr_index'th bit from this bitmap: */
        if (msr_index < 1024*8) {
                unsigned char b;
-               if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
+               if (x86_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
                        return true;
                return 1 & (b >> (msr_index & 7));
        } else
@@ -9114,7 +9114,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 
gpa, u32 count)
 
        msr.host_initiated = false;
        for (i = 0; i < count; i++) {
-               if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
+               if (x86_read_guest(vcpu, gpa + i * sizeof(e),
                                   &e, sizeof(e))) {
                        pr_warn_ratelimited(
                                "%s cannot read MSR entry (%u, 0x%08llx)\n",
@@ -9148,7 +9148,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, 
u64 gpa, u32 count)
 
        for (i = 0; i < count; i++) {
                struct msr_data msr_info;
-               if (kvm_read_guest(vcpu->kvm,
+               if (x86_read_guest(vcpu,
                                   gpa + i * sizeof(e),
                                   &e, 2 * sizeof(u32))) {
                        pr_warn_ratelimited(
@@ -9170,7 +9170,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, 
u64 gpa, u32 count)
                                __func__, i, e.index);
                        return -EINVAL;
                }
-               if (kvm_write_guest(vcpu->kvm,
+               if (x86_write_guest(vcpu,
                                    gpa + i * sizeof(e) +
                                        offsetof(struct vmx_msr_entry, value),
                                    &msr_info.data, sizeof(msr_info.data))) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ab6a38617813..90ab62f54e1c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(kvm_require_dr);
 
 /*
  * This function will be used to read from the physical memory of the currently
- * running guest. The difference to kvm_read_guest_page is that this function
+ * running guest. The difference to x86_read_guest_page is that this function
  * can read from guest physical or from the guest's guest physical memory.
  */
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
@@ -493,7 +493,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct 
kvm_mmu *mmu,
 
        real_gfn = gpa_to_gfn(real_gfn);
 
-       return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
+       return x86_read_guest_page(vcpu, real_gfn, data, offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
 
@@ -1125,7 +1125,7 @@ void kvm_set_pending_timer(struct kvm_vcpu *vcpu)
        kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
 }
 
-static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+static void kvm_write_wall_clock(struct kvm_vcpu *vcpu, gpa_t wall_clock)
 {
        int version;
        int r;
@@ -1135,7 +1135,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t 
wall_clock)
        if (!wall_clock)
                return;
 
-       r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
+       r = x86_read_guest(vcpu, wall_clock, &version, sizeof(version));
        if (r)
                return;
 
@@ -1144,7 +1144,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t 
wall_clock)
 
        ++version;
 
-       kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+       x86_write_guest(vcpu, wall_clock, &version, sizeof(version));
 
        /*
         * The guest calculates current wall clock time by adding
@@ -1154,18 +1154,18 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t 
wall_clock)
         */
        getboottime(&boot);
 
-       if (kvm->arch.kvmclock_offset) {
-               struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
+       if (vcpu->kvm->arch.kvmclock_offset) {
+               struct timespec ts = 
ns_to_timespec(vcpu->kvm->arch.kvmclock_offset);
                boot = timespec_sub(boot, ts);
        }
        wc.sec = boot.tv_sec;
        wc.nsec = boot.tv_nsec;
        wc.version = version;
 
-       kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
+       x86_write_guest(vcpu, wall_clock, &wc, sizeof(wc));
 
        version++;
-       kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+       x86_write_guest(vcpu, wall_clock, &version, sizeof(version));
 }
 
 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
@@ -1681,7 +1681,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_guest_tsc = tsc_timestamp;
 
-       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+       if (unlikely(x86_read_guest_cached(v, &vcpu->pv_time,
                &guest_hv_clock, sizeof(guest_hv_clock))))
                return 0;
 
@@ -1724,7 +1724,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
 
-       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+       x86_write_guest_cached(v, &vcpu->pv_time,
                                &vcpu->hv_clock,
                                sizeof(vcpu->hv_clock));
 
@@ -1965,7 +1965,7 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
                r = PTR_ERR(page);
                goto out;
        }
-       if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
+       if (x86_write_guest(vcpu, page_addr, page, PAGE_SIZE))
                goto out_free;
        r = 0;
 out_free:
@@ -2018,7 +2018,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 
msr, u64 data)
                        break;
                }
                gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
-               addr = gfn_to_hva(kvm, gfn);
+               addr = x86_gfn_to_hva(vcpu, gfn);
                if (kvm_is_error_hva(addr))
                        return 1;
                kvm_x86_ops->patch_hypercall(vcpu, instructions);
@@ -2037,7 +2037,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 
msr, u64 data)
                if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                        break;
                gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-               if (kvm_write_guest(kvm, gfn << 
HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
+               if (x86_write_guest(vcpu, gfn << 
HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
                        &tsc_ref, sizeof(tsc_ref)))
                        return 1;
                mark_page_dirty(kvm, gfn);
@@ -2065,7 +2065,7 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, 
u64 data)
                        break;
                }
                gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
-               addr = gfn_to_hva(vcpu->kvm, gfn);
+               addr = x86_gfn_to_hva(vcpu, gfn);
                if (kvm_is_error_hva(addr))
                        return 1;
                if (__clear_user((void __user *)addr, PAGE_SIZE))
@@ -2107,8 +2107,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, 
u64 data)
                return 0;
        }
 
-       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
-                                       sizeof(u32)))
+       if (x86_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
+                                     sizeof(u32)))
                return 1;
 
        vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -2138,7 +2138,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
-       if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       if (unlikely(x86_read_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
                return;
 
@@ -2146,7 +2146,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
        vcpu->arch.st.steal.version += 2;
        vcpu->arch.st.accum_steal = 0;
 
-       kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+       x86_write_guest_cached(vcpu, &vcpu->arch.st.stime,
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
 }
 
@@ -2226,7 +2226,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        case MSR_KVM_WALL_CLOCK_NEW:
        case MSR_KVM_WALL_CLOCK:
                vcpu->kvm->arch.wall_clock = data;
-               kvm_write_wall_clock(vcpu->kvm, data);
+               kvm_write_wall_clock(vcpu, data);
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
@@ -2254,7 +2254,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
 
                gpa_offset = data & ~(PAGE_MASK | 1);
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (x86_gfn_to_hva_cache_init(vcpu->kvm,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
                        vcpu->arch.pv_time_enabled = false;
@@ -2275,9 +2275,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                if (data & KVM_STEAL_RESERVED_MASK)
                        return 1;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
-                                               data & KVM_STEAL_VALID_BITS,
-                                               sizeof(struct kvm_steal_time)))
+               if (x86_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+                                             data & KVM_STEAL_VALID_BITS,
+                                             sizeof(struct kvm_steal_time)))
                        return 1;
 
                vcpu->arch.st.msr_val = data;
@@ -4301,7 +4301,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void 
*val, unsigned int bytes,
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
+               ret = x86_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
                                          offset, toread);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
@@ -4335,7 +4335,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt 
*ctxt,
        offset = addr & (PAGE_SIZE-1);
        if (WARN_ON(offset + bytes > PAGE_SIZE))
                bytes = (unsigned)PAGE_SIZE - offset;
-       ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
+       ret = x86_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
                                  offset, bytes);
        if (unlikely(ret < 0))
                return X86EMUL_IO_NEEDED;
@@ -4382,7 +4382,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt 
*ctxt,
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
+               ret = x86_write_guest(vcpu, gpa, data, towrite);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
                        goto out;
@@ -4435,7 +4435,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 {
        int ret;
 
-       ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
+       ret = x86_write_guest(vcpu, gpa, val, bytes);
        if (ret < 0)
                return 0;
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
@@ -4469,7 +4469,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, 
int bytes)
 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
                        void *val, int bytes)
 {
-       return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
+       return !x86_read_guest(vcpu, gpa, val, bytes);
 }
 
 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -6406,7 +6406,7 @@ static void process_smi(struct kvm_vcpu *vcpu)
        else
                process_smi_save_state_32(vcpu, buf);
 
-       r = kvm_write_guest(vcpu->kvm, vcpu->arch.smbase + 0xfe00, buf, 
sizeof(buf));
+       r = x86_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
        if (r < 0)
                return;
 
@@ -8175,7 +8175,7 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, 
gfn_t gfn)
 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
 {
 
-       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+       return x86_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val,
                                      sizeof(val));
 }
 
-- 
1.8.3.1


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to