From: Dor Laor <[EMAIL PROTECTED]>

Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
---

 drivers/kvm/kvm.h      |   11 +++
 drivers/kvm/kvm_main.c |  153 +++++++++++++++++++++++++++++++++++++++---------
 drivers/kvm/svm.c      |   11 +++
 drivers/kvm/svm.h      |    2 -
 drivers/kvm/vmx.c      |    6 ++
 5 files changed, 148 insertions(+), 35 deletions(-)

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index a42a6f3..839e11c 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -316,9 +316,6 @@ struct kvm_vcpu {
        unsigned long cr0;
        unsigned long cr2;
        unsigned long cr3;
-       gpa_t para_state_gpa;
-       struct page *para_state_page;
-       gpa_t hypercall_gpa;
        unsigned long cr4;
        unsigned long cr8;
        u64 pdptrs[4]; /* pae */
@@ -388,6 +385,12 @@ struct kvm_memory_slot {
        unsigned long *dirty_bitmap;
 };
 
+struct kvm_hypercall {
+       unsigned long (*hypercall)(struct kvm_vcpu*, unsigned long args[]);
+       struct module *module;
+       int idx;
+};
+
 struct kvm {
        struct mutex lock; /* protects everything except vcpus */
        int naliases;
@@ -588,6 +591,8 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 
 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_register_hypercall(struct module *module, struct kvm_hypercall 
*hypercall);
+int kvm_unregister_hypercall(struct kvm_hypercall *hypercall);
 
 static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                                     u32 error_code)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index d154487..6428746 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -84,6 +84,8 @@ static struct kvm_stats_debugfs_item {
 
 static struct dentry *debugfs_dir;
 
+static struct kvm_hypercall hypercalls[KVM_NR_HYPERCALLS];
+
 #define MAX_IO_MSRS 256
 
 #define CR0_RESERVED_BITS                                              \
@@ -1263,53 +1265,150 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 
        vcpu->run->exit_reason = KVM_EXIT_HLT;
        ++vcpu->stat.halt_exits;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
+int kvm_register_hypercall(struct module* module,
+                          struct kvm_hypercall *hypercall)
+{
+       int r = 0;
+
+       if (hypercall->idx >= KVM_NR_HYPERCALLS ||
+           hypercall->idx < 0) {
+               printk(KERN_DEBUG "%s:hypercall registration idx(%d)\n",
+                        __FUNCTION__, hypercall->idx);
+               return -EINVAL;
+       }
+
+       spin_lock(&kvm_lock);
+
+       if (hypercalls[hypercall->idx].hypercall) {
+               printk(KERN_DEBUG "%s:hypercall idx(%d) already taken\n",
+                       __FUNCTION__, hypercall->idx);
+               r = -EEXIST;
+               goto out;
+       }
+
+       if (try_module_get(module) < 0) {
+               printk(KERN_DEBUG "%s: module reference count++ failed\n",
+                       __FUNCTION__);
+               r = -EINVAL;
+               goto out;
+       }
+
+       hypercalls[hypercall->idx].hypercall = hypercall->hypercall;
+       hypercalls[hypercall->idx].module = module;
+
+out:
+       spin_unlock(&kvm_lock);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(kvm_register_hypercall);
+
+int kvm_unregister_hypercall(struct kvm_hypercall *hypercall)
+{
+       if (hypercall->idx >= KVM_NR_HYPERCALLS ||
+           hypercall->idx < 0) {
+               printk(KERN_DEBUG "%s:hypercall unregistration idx(%d)\n",
+                        __FUNCTION__, hypercall->idx);
+               return -EINVAL;
+       }
+
+       spin_lock(&kvm_lock);
+       if (!hypercalls[hypercall->idx].hypercall) {
+               printk(KERN_DEBUG "%s:hypercall idx(%d) was not registered\n",
+                       __FUNCTION__, hypercall->idx);
+               spin_unlock(&kvm_lock);
+               return -EEXIST;
+       }
+
+       hypercalls[hypercall->idx].hypercall = 0;
+       module_put(hypercalls[hypercall->idx].module);
+       spin_unlock(&kvm_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_unregister_hypercall);
+
+/*
+ * Generic hypercall dispatcher routine.
+ * Returns 0 for user space handling, 1 on success handling
+ */
 int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-       unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
+       unsigned long nr, ret;
+       unsigned long args[6];
+       int res = 1;
 
        kvm_arch_ops->cache_regs(vcpu);
        ret = -KVM_EINVAL;
 #ifdef CONFIG_X86_64
        if (is_long_mode(vcpu)) {
                nr = vcpu->regs[VCPU_REGS_RAX];
-               a0 = vcpu->regs[VCPU_REGS_RDI];
-               a1 = vcpu->regs[VCPU_REGS_RSI];
-               a2 = vcpu->regs[VCPU_REGS_RDX];
-               a3 = vcpu->regs[VCPU_REGS_RCX];
-               a4 = vcpu->regs[VCPU_REGS_R8];
-               a5 = vcpu->regs[VCPU_REGS_R9];
+               args[0] = vcpu->regs[VCPU_REGS_RDI];
+               args[1] = vcpu->regs[VCPU_REGS_RSI];
+               args[2] = vcpu->regs[VCPU_REGS_RDX];
+               args[3] = vcpu->regs[VCPU_REGS_RCX];
+               args[4] = vcpu->regs[VCPU_REGS_R8];
+               args[5] = vcpu->regs[VCPU_REGS_R9];
        } else
 #endif
        {
-               nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
-               a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
-               a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
-               a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
-               a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
-               a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
-               a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
-       }
-       switch (nr) {
-       default:
+               nr = vcpu->regs[VCPU_REGS_RAX] & -1u;
+               args[0] = vcpu->regs[VCPU_REGS_RBX] & -1u;
+               args[1] = vcpu->regs[VCPU_REGS_RCX] & -1u;
+               args[2] = vcpu->regs[VCPU_REGS_RDX] & -1u;
+               args[3] = vcpu->regs[VCPU_REGS_RSI] & -1u;
+               args[4] = vcpu->regs[VCPU_REGS_RDI] & -1u;
+               args[5] = vcpu->regs[VCPU_REGS_RBP] & -1u;
+       }
+
+       if (nr >= KVM_NR_HYPERCALLS || nr < 0) {
                run->hypercall.nr = nr;
-               run->hypercall.args[0] = a0;
-               run->hypercall.args[1] = a1;
-               run->hypercall.args[2] = a2;
-               run->hypercall.args[3] = a3;
-               run->hypercall.args[4] = a4;
-               run->hypercall.args[5] = a5;
-               run->hypercall.ret = ret;
+               memcpy(run->hypercall.args, args, sizeof(args));
+               run->hypercall.ret = 0;
                run->hypercall.longmode = is_long_mode(vcpu);
                kvm_arch_ops->decache_regs(vcpu);
                return 0;
        }
+
+       /* The hypercall might block or do intensive work */
+       vcpu_put(vcpu);
+
+       /*
+        * Increase the hypercall's module ref count assures
+        * that no one will remove the module while a hypercall
+        * is executing.
+        * Theoretically we need to lock to get coherent module-hypercall
+        * view but practically there's almost no users this mechanism now.
+        */
+       if (try_module_get(hypercalls[nr].module) < 0) {
+               printk(KERN_DEBUG "%s: module reference count++ failed\n",
+                       __FUNCTION__);
+               res = 0;
+               goto out;
+       }
+
+       if (!hypercalls[nr].hypercall) {
+               printk(KERN_ERR "%s: hypercall nr(%lx) was not yet 
registered\n",
+                      __FUNCTION__, nr);
+               res = 0;
+               goto out_put;
+       }
+
+       ret = hypercalls[nr].hypercall(vcpu, args);
+
+out_put:
+       module_put(hypercalls[nr].module);
+out:
+       vcpu_load(vcpu);
+
        vcpu->regs[VCPU_REGS_RAX] = ret;
        kvm_arch_ops->decache_regs(vcpu);
-       return 1;
+       return res;
 }
 EXPORT_SYMBOL_GPL(kvm_hypercall);
 
@@ -1430,10 +1529,6 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, 
gpa_t para_state_gpa)
        }
 
        printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
-       vcpu->para_state_page = para_state_page;
-       vcpu->para_state_gpa = para_state_gpa;
-       vcpu->hypercall_gpa = hypercall_gpa;
-
        mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
        hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
                                KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index cc674bf..ce70c5d 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1022,9 +1022,18 @@ static int halt_interception(struct vcpu_svm *svm, 
struct kvm_run *kvm_run)
 
 static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
+       if (svm->vmcb->save.cpl != 0) {
+               inject_ud(&svm->vcpu);
+               return 1;
+       }
+
        svm->next_rip = svm->vmcb->save.rip + 3;
        skip_emulated_instruction(&svm->vcpu);
-       return kvm_hypercall(&svm->vcpu, kvm_run);
+       if (kvm_hypercall(&svm->vcpu, kvm_run))
+               return 1;
+
+       kvm_run->exit_reason = KVM_EXIT_HYPERCALL;
+       return 0;
 }
 
 static int invalid_op_interception(struct vcpu_svm *svm,
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 3b1b0f3..005a9c5 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,7 +175,7 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_CPUID_FUNC 0x8000000a
 
 #define MSR_EFER_SVME_MASK (1ULL << 12)
-#define MSR_VM_CR       0xc0010114
+#define MSR_VM_CR       0xc0010114ULL
 #define MSR_VM_HSAVE_PA 0xc0010117ULL
 
 #define SVM_VM_CR_SVM_DISABLE 4
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index b400668..2187061 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1981,7 +1981,11 @@ static int handle_halt(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        skip_emulated_instruction(vcpu);
-       return kvm_hypercall(vcpu, kvm_run);
+       if (kvm_hypercall(vcpu, kvm_run))
+               return 1;
+
+       kvm_run->exit_reason = KVM_EXIT_HYPERCALL;
+       return 0;
 }
 
 /*


-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >>  http://get.splunk.com/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to