Re: [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB

2010-12-13 Thread Avi Kivity

On 12/10/2010 03:51 PM, Andre Przywara wrote:

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.



+static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   uint8_t len;
+   struct fetch_cache *fetch;
+
+   len = svm->vmcb->control.insn_len&  0x0F;
+   if (len == 0)
+   return 1;
+
+   fetch =&svm->vcpu.arch.emulate_ctxt.decode.fetch;
+   fetch->start = kvm_rip_read(&svm->vcpu);
+   fetch->end = fetch->start + len;
+   memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+   return 0;
+}


This reaching in into the emulator internals from svm code is not very 
good.  It also assumes ->prefetch_instruction() is called immediately 
after an exit; this isn't true in vmx and at least was considered for 
svm (emulating multiple instructions during the nsvm vmexit sequence).


Alternatives are:
- add the insn data to emulate_instruction() and friends (my first 
suggestion)
- adding x86_decode_insn_init(), which initializes the decode cache, and 
x86_decode_insn_prefill_cache(), called only if we have the insn data


Another one: teach kvm_fetch_guest_virt() to check if addr/bytes 
intersects with csbase+rip/len; if so, use that instead of doing the 
page table dance.


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/5] kvm/svm: copy instruction bytes from VMCB

2010-12-10 Thread Andre Przywara
In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara 
---
 arch/x86/include/asm/kvm_host.h |3 +++
 arch/x86/include/asm/svm.h  |4 +++-
 arch/x86/kvm/emulate.c  |1 +
 arch/x86/kvm/svm.c  |   20 
 arch/x86/kvm/vmx.c  |7 +++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2b89195..baaf063 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+   int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 lbr_ctl;
u64 reserved_5;
u64 next_rip;
-   u8 reserved_6[816];
+   u8 insn_len;
+   u8 insn_bytes[15];
+   u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..70385ee 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2623,6 +2623,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)
c->eip = ctxt->eip;
c->fetch.start = c->fetch.end = c->eip;
ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
+   kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
 
switch (mode) {
case X86EMUL_MODE_REAL:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 73f1a6d..685b264 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int svm_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   uint8_t len;
+   struct fetch_cache *fetch;
+
+   len = svm->vmcb->control.insn_len & 0x0F;
+   if (len == 0)
+   return 1;
+
+   fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+   fetch->start = kvm_rip_read(&svm->vcpu);
+   fetch->end = fetch->start + len;
+   memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+   return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -3848,6 +3866,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.adjust_tsc_offset = svm_adjust_tsc_offset,
 
.set_tdp_cr3 = set_tdp_cr3,
+
+   .prefetch_instruction = svm_prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e5ef924..7572751 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int vmx_prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+   return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.adjust_tsc_offset = vmx_adjust_tsc_offset,
 
.set_tdp_cr3 = vmx_set_cr3,
+
+   .prefetch_instruction = vmx_prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 5/5] kvm/svm: copy instruction bytes from VMCB

2010-12-07 Thread Avi Kivity

On 12/07/2010 12:59 PM, Andre Przywara wrote:

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.



diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cfbcbfa..3e3a67e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+   int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
const struct trace_print_flags *exit_reasons_str;
  };


How about adding a byte array/len parameter to x86_decode_insn()?  It 
could be used to prefill the buffer instead of invoking a callback.




diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..abff8ff 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -525,6 +525,7 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
/* x86 instructions are limited to 15 bytes. */
if (eip + size - ctxt->eip>  15)
return X86EMUL_UNHANDLEABLE;
+   kvm_x86_ops->prefetch_instruction(ctxt->vcpu);


Even with the callback, this belongs in x86_decode_insn(), not on every 
fetch.



while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3cf2cef..ed94e9a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
svm_set_interrupt_shadow(vcpu, 0);
  }

+static int prefetch_instruction(struct kvm_vcpu *vcpu)


svm_prefetch_instruction()


+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   uint8_t len;
+   struct fetch_cache *fetch;
+
+   len = svm->vmcb->control.insn_len&  0x0F;
+   if (len == 0)
+   return 1;
+
+   fetch =&svm->vcpu.arch.emulate_ctxt.decode.fetch;
+   fetch->start = kvm_rip_read(&svm->vcpu);
+   fetch->end = fetch->start + len;
+   memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+   return 0;
+}


svm code shouldn't reach in so deep into the emulator privates.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/5] kvm/svm: copy instruction bytes from VMCB

2010-12-07 Thread Andre Przywara
In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.

Signed-off-by: Andre Przywara 
---
 arch/x86/include/asm/kvm_host.h |3 +++
 arch/x86/include/asm/svm.h  |4 +++-
 arch/x86/kvm/emulate.c  |1 +
 arch/x86/kvm/svm.c  |   20 
 arch/x86/kvm/vmx.c  |7 +++
 5 files changed, 34 insertions(+), 1 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index cfbcbfa..3e3a67e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -586,6 +586,9 @@ struct kvm_x86_ops {
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+
+   int (*prefetch_instruction)(struct kvm_vcpu *vcpu);
+
const struct trace_print_flags *exit_reasons_str;
 };
 
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 589fc25..6d64b1d 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -81,7 +81,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 lbr_ctl;
u64 reserved_5;
u64 next_rip;
-   u8 reserved_6[816];
+   u8 insn_len;
+   u8 insn_bytes[15];
+   u8 reserved_6[800];
 };
 
 
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 6366735..abff8ff 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -525,6 +525,7 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
/* x86 instructions are limited to 15 bytes. */
if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
+   kvm_x86_ops->prefetch_instruction(ctxt->vcpu);
while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3cf2cef..ed94e9a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -464,6 +464,24 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
svm_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_svm *svm = to_svm(vcpu);
+   uint8_t len;
+   struct fetch_cache *fetch;
+
+   len = svm->vmcb->control.insn_len & 0x0F;
+   if (len == 0)
+   return 1;
+
+   fetch = &svm->vcpu.arch.emulate_ctxt.decode.fetch;
+   fetch->start = kvm_rip_read(&svm->vcpu);
+   fetch->end = fetch->start + len;
+   memcpy(fetch->data, svm->vmcb->control.insn_bytes, len);
+
+   return 0;
+}
+
 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -3830,6 +3848,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.adjust_tsc_offset = svm_adjust_tsc_offset,
 
.set_tdp_cr3 = set_tdp_cr3,
+
+   .prefetch_instruction = prefetch_instruction,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 72cfdb7..4825545 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1009,6 +1009,11 @@ static void skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static int prefetch_instruction(struct kvm_vcpu *vcpu)
+{
+   return 1;
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -4362,6 +4367,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.adjust_tsc_offset = vmx_adjust_tsc_offset,
 
.set_tdp_cr3 = vmx_set_cr3,
+
+   .prefetch_instruction = prefetch_instruction,
 };
 
 static int __init vmx_init(void)
-- 
1.6.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html