Re: [PATCH 06/30] nVMX: Decoding memory operands of VMX instructions

2011-05-09 Thread Avi Kivity

On 05/08/2011 11:18 AM, Nadav Har'El wrote:

This patch includes a utility function for decoding pointer operands of VMX
instructions issued by L1 (a guest hypervisor)

+   /*
+* TODO: throw #GP (and return 1) in various cases that the VM*
+* instructions require it - e.g., offset beyond segment limit,
+* unusable or unreadable/unwritable segment, non-canonical 64-bit
+* address, and so on. Currently these are not checked.
+*/
+   return 0;
+}
+


Note: emulate.c now contains a function (linearize()) which does these 
calculations.  We need to generalize it and expose it so nvmx can make 
use of it.


There is no real security concern since these instructions are only 
allowed from cpl 0 anyway.


--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 06/30] nVMX: Decoding memory operands of VMX instructions

2011-05-08 Thread Nadav Har'El
This patch includes a utility function for decoding pointer operands of VMX
instructions issued by L1 (a guest hypervisor)

Signed-off-by: Nadav Har'El n...@il.ibm.com
---
 arch/x86/kvm/vmx.c |   53 +++
 arch/x86/kvm/x86.c |3 +-
 arch/x86/kvm/x86.h |4 +++
 3 files changed, 59 insertions(+), 1 deletion(-)

--- .before/arch/x86/kvm/x86.c  2011-05-08 10:43:18.0 +0300
+++ .after/arch/x86/kvm/x86.c   2011-05-08 10:43:18.0 +0300
@@ -3815,7 +3815,7 @@ static int kvm_fetch_guest_virt(struct x
  exception);
 }
 
-static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
   gva_t addr, void *val, unsigned int bytes,
   struct x86_exception *exception)
 {
@@ -3825,6 +3825,7 @@ static int kvm_read_guest_virt(struct x8
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
  exception);
 }
+EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
 
 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  gva_t addr, void *val, unsigned int bytes,
--- .before/arch/x86/kvm/x86.h  2011-05-08 10:43:18.0 +0300
+++ .after/arch/x86/kvm/x86.h   2011-05-08 10:43:18.0 +0300
@@ -81,4 +81,8 @@ int kvm_inject_realmode_interrupt(struct
 
 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
 
+int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+   gva_t addr, void *val, unsigned int bytes,
+   struct x86_exception *exception);
+
 #endif
--- .before/arch/x86/kvm/vmx.c  2011-05-08 10:43:18.0 +0300
+++ .after/arch/x86/kvm/vmx.c   2011-05-08 10:43:18.0 +0300
@@ -4254,6 +4254,59 @@ static int handle_vmoff(struct kvm_vcpu 
 }
 
 /*
+ * Decode the memory-address operand of a vmx instruction, as recorded on an
+ * exit caused by such an instruction (run by a guest hypervisor).
+ * On success, returns 0. When the operand is invalid, returns 1 and throws
+ * #UD or #GP.
+ */
+static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
+unsigned long exit_qualification,
+u32 vmx_instruction_info, gva_t *ret)
+{
+   /*
+* According to Vol. 3B, Information for VM Exits Due to Instruction
+* Execution, on an exit, vmx_instruction_info holds most of the
+* addressing components of the operand. Only the displacement part
+* is put in exit_qualification (see 3B, Basic VM-Exit Information).
+* For how an actual address is calculated from all these components,
+* refer to Vol. 1, Operand Addressing.
+*/
+   int  scaling = vmx_instruction_info  3;
+   int  addr_size = (vmx_instruction_info  7)  7;
+   bool is_reg = vmx_instruction_info  (1u  10);
+   int  seg_reg = (vmx_instruction_info  15)  7;
+   int  index_reg = (vmx_instruction_info  18)  0xf;
+   bool index_is_valid = !(vmx_instruction_info  (1u  22));
+   int  base_reg   = (vmx_instruction_info  23)  0xf;
+   bool base_is_valid  = !(vmx_instruction_info  (1u  27));
+
+   if (is_reg) {
+   kvm_queue_exception(vcpu, UD_VECTOR);
+   return 1;
+   }
+
+   /* Addr = segment_base + offset */
+   /* offset = base + [index * scale] + displacement */
+   *ret = vmx_get_segment_base(vcpu, seg_reg);
+   if (base_is_valid)
+   *ret += kvm_register_read(vcpu, base_reg);
+   if (index_is_valid)
+   *ret += kvm_register_read(vcpu, index_reg)scaling;
+   *ret += exit_qualification; /* holds the displacement */
+
+   if (addr_size == 1) /* 32 bit */
+   *ret = 0x;
+
+   /*
+* TODO: throw #GP (and return 1) in various cases that the VM*
+* instructions require it - e.g., offset beyond segment limit,
+* unusable or unreadable/unwritable segment, non-canonical 64-bit
+* address, and so on. Currently these are not checked.
+*/
+   return 0;
+}
+
+/*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
  * to be done to userspace and return 0.
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html