Re: [PATCH 4/6 v5] KVM: PPC: exit to user space on "ehpriv" instruction

2013-06-25 Thread tiejun.chen

On 06/26/2013 01:42 PM, Bharat Bhushan wrote:

"ehpriv" instruction is used for setting software breakpoints
by user space. This patch adds support to exit to user space
with "run->debug" have relevant information.

As this is the first point we are using run->debug, also defined
the run->debug structure.

Signed-off-by: Bharat Bhushan 
---
  arch/powerpc/include/asm/disassemble.h |4 
  arch/powerpc/include/uapi/asm/kvm.h|   21 +
  arch/powerpc/kvm/e500_emulate.c|   27 +++
  3 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/disassemble.h 
b/arch/powerpc/include/asm/disassemble.h
index 9b198d1..856f8de 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
return inst & 0x;
  }

+static inline unsigned int get_oc(u32 inst)
+{
+   return (inst >> 11) & 0x7fff;
+}
  #endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..ded0607 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -269,7 +269,24 @@ struct kvm_fpu {
__u64 fpr[32];
  };

+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE  0x0
+#define KVMPPC_DEBUG_BREAKPOINT(1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE   (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ(1UL << 3)
  struct kvm_debug_exit_arch {
+   __u64 address;
+   /*
+* exiting to userspace because of h/w breakpoint, watchpoint
+* (read, write or both) and software breakpoint.
+*/
+   __u32 status;
+   __u32 reserved;
  };

  /* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +298,6 @@ struct kvm_guest_debug_arch {
 * Type denotes h/w breakpoint, read watchpoint, write
 * watchpoint or watchpoint (both read and write).
 */
-#define KVMPPC_DEBUG_NONE  0x0
-#define KVMPPC_DEBUG_BREAKPOINT(1UL << 1)
-#define KVMPPC_DEBUG_WATCH_WRITE   (1UL << 2)
-#define KVMPPC_DEBUG_WATCH_READ(1UL << 3)
__u32 type;
__u32 reserved;
} bp[16];
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a012..dab9d07 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,8 @@
  #define XOP_TLBRE   946
  #define XOP_TLBWE   978
  #define XOP_TLBILX  18
+#define XOP_EHPRIV  270
+#define EHPRIV_OC_DEBUG 0


As I think the case, "OC = 0", is a bit specific since IIRC, if the OC
operand is omitted, its equal 0 by default. So I think we should start this OC 
value from 1 or other magic number.


And if possible, we'd better add some comments to describe this to make the OC 
definition readable.


Tiejun



  #ifdef CONFIG_KVM_E500MC
  static int dbell2prio(ulong param)
@@ -82,6 +84,26 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, 
int rb)
  }
  #endif

+static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+  unsigned int inst, int *advance)
+{
+   int emulated = EMULATE_DONE;
+
+   switch (get_oc(inst)) {
+   case EHPRIV_OC_DEBUG:
+   run->exit_reason = KVM_EXIT_DEBUG;
+   run->debug.arch.address = vcpu->arch.pc;
+   run->debug.arch.status = 0;
+   kvmppc_account_exit(vcpu, DEBUG_EXITS);
+   emulated = EMULATE_EXIT_USER;
+   *advance = 0;
+   break;
+   default:
+   emulated = EMULATE_FAIL;
+   }
+   return emulated;
+}
+
  int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
 unsigned int inst, int *advance)
  {
@@ -130,6 +152,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;

+   case XOP_EHPRIV:
+   emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
+  advance);
+   break;
+
default:
emulated = EMULATE_FAIL;
}



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [nVMX w/ Haswell] KVM unit-tests in L1 - eventinj test fails trying to send NMI

2013-06-25 Thread Jan Kiszka
On 2013-06-05 11:06, Kashyap Chamarthy wrote:
> Adding Jan, Jun, to see if they have any inputs here.

Thanks for the note, it's very helpful! This test actually fails on
older CPUs as well, and I can finally reproduce the issue that Jay also
reported. I'm not able to "cure" it by going back to 3b656cf764^, just
alter the error report. Anyway, a start. Now I just need time to debug it...

Jan

> 
> /kashyap
> 
> On Tue, Jun 4, 2013 at 6:14 PM, Kashyap Chamarthy  
> wrote:
>> Heya,
>>
>> So, I invoked this in L1 with:
>> ===
>> [test@foo kvm-unit-tests]$ time qemu-system-x86_64 -enable-kvm -device
>> pc-testdev -serial stdio -nographic -no-user-config -nodefaults
>> -device
>> isa-debug-exit,iobase=0xf4,iosize=0x4 -kernel ./x86/eventinj.flat |
>> tee /var/tmp/eventinj-test.txt
>> enabling apic
>> paging enabled
>> cr0 = 80010011
>> cr3 = 7fff000
>> cr4 = 20
>> Try to divide by 0
>> DE isr running divider is 0
>> Result is 150
>> DE exception: PASS
>> Try int 3
>> BP isr running
>> After int 3
>> BP exception: PASS
>> Try send vec 33 to itself
>> irq1 running
>> After vec 33 to itself
>> vec 33: PASS
>> Try int $33
>> irq1 running
>> After int $33
>> int $33: PASS
>> Try send vec 32 and 33 to itself
>> irq1 running
>> irq0 running
>> After vec 32 and 33 to itself
>> vec 32/33: PASS
>> Try send vec 32 and int $33
>> irq1 running
>> irq0 running
>> After vec 32 and int $33
>> vec 32/int $33: PASS
>> Try send vec 33 and 62 and mask one with TPR
>> irq1 running
>> After 33/62 TPR test
>> TPR: PASS
>> irq0 running
>> Try send NMI to itself
>> After NMI to itself
>> NMI: FAIL
>> Try int 33 with shadowed stack
>> irq1 running
>> After int 33 with shadowed stack
>> int 33 with shadowed stack: PASS
>>
>> summary: 9 tests, 1 failures
>>
>> real0m0.647s
>> user0m0.164s
>> sys 0m0.146s
>> [test@foo kvm-unit-tests]$
>> ===
>>
>> Any hints on further debugging this ?
>>
>>
>> Other info:
>> --
>>
>> - L1's qemu-kvm CLI
>> ===
>> # ps -ef | grep -i qemu
>> qemu  5455 1 94 Jun02 ?1-07:14:29
>> /usr/bin/qemu-system-x86_64 -machine accel=kvm -name regular-guest -S
>> -machine pc-i440fx-1.4,accel=kvm,usb=off -cpu Haswell,+vmx -m 10240
>> -smp 4,sockets=4,cores=1,threads=1 -uuid
>> 4ed9ac0b-7f72-dfcf-68b3-e6fe2ac588b2 -nographic -no-user-config
>> -nodefaults -chardev
>> socket,id=charmonitor,path=/var/lib/libvirt/qemu/regular-guest.monitor,server,nowait
>> -mon chardev=charmonitor,id=monitor,mode=control -rtc base=utc
>> -no-shutdown -device piix3-usb-uhci,id=usb,bus=pci.0,addr=0x1.0x2
>> -drive 
>> file=/home/test/vmimages/regular-guest.qcow2,if=none,id=drive-virtio-disk0,format=qcow2,cache=none
>> -device 
>> virtio-blk-pci,scsi=off,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,id=virtio-disk0,bootindex=1
>> -netdev tap,fd=23,id=hostnet0,vhost=on,vhostfd=24 -device
>> virtio-net-pci,netdev=hostnet0,id=net0,mac=52:54:00:80:c1:34,bus=pci.0,addr=0x3
>> -chardev pty,id=charserial0 -device
>> isa-serial,chardev=charserial0,id=serial0 -device usb-tablet,id=input0
>> -device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x5
>> root 12255  5419  0 08:41 pts/200:00:00 grep --color=auto -i qemu
>> ===
>>
>> - Setup details --
>> https://github.com/kashyapc/nvmx-haswell/blob/master/SETUP-nVMX.rst
>>
>> /kashyap



signature.asc
Description: OpenPGP digital signature


Re: [PATCH] KVM: Fix RTC interrupt coalescing tracking

2013-06-25 Thread Jan Kiszka
On 2013-06-26 08:15, Gleb Natapov wrote:
> On Wed, Jun 26, 2013 at 07:49:37AM +0200, Jan Kiszka wrote:
>> On 2013-06-24 14:19, Gleb Natapov wrote:
>>> This reverts most of the f1ed0450a5fac7067590317cbf027f566b6ccbca. After
>>> the commit kvm_apic_set_irq() no longer returns accurate information
>>> about interrupt injection status if injection is done into disabled
>>> APIC. RTC interrupt coalescing tracking relies on the information to be
>>> accurate and cannot recover if it is not.
>>>
>>> Signed-off-by: Gleb Natapov 
>>> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
>>> index 9d75193..9f4bea8 100644
>>> --- a/arch/x86/kvm/lapic.c
>>> +++ b/arch/x86/kvm/lapic.c
>>> @@ -405,17 +405,17 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
>>> return highest_irr;
>>>  }
>>>  
>>> -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>>> - int vector, int level, int trig_mode,
>>> - unsigned long *dest_map);
>>> +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>>> +int vector, int level, int trig_mode,
>>> +unsigned long *dest_map);
>>
>> I still think __apic_accept_irq should unconditionally inject, and the
>> test for acpi_enabled belongs into kvm_apic_set_irq. Why should
>> __apic_accept_irq accept non-APIC_DM_FIXED messages if the APIC is off?
>> See below for another reason to refactor this part of the interface.
>>
> 10.4.7.2 Local APIC State After It Has Been Software Disabled
> 
> The local APIC will respond normally to INIT, NMI, SMI, and SIPI
> messages.

OK, I see.

> 
>>>  
>>> -void kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
>>> - unsigned long *dest_map)
>>> +int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
>>> +   unsigned long *dest_map)
>>>  {
>>> struct kvm_lapic *apic = vcpu->arch.apic;
>>>  
>>> -   __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
>>> - irq->level, irq->trig_mode, dest_map);
>>> +   return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
>>> +   irq->level, irq->trig_mode, dest_map);
>>>  }
>>>  
>>>  static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
>>> @@ -608,8 +608,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
>>> struct kvm_lapic *src,
>>> *r = -1;
>>>  
>>> if (irq->shorthand == APIC_DEST_SELF) {
>>> -   kvm_apic_set_irq(src->vcpu, irq, dest_map);
>>> -   *r = 1;
>>> +   *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
>>> return true;
>>> }
>>>  
>>> @@ -654,8 +653,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
>>> struct kvm_lapic *src,
>>> continue;
>>> if (*r < 0)
>>> *r = 0;
>>> -   kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
>>> -   *r += 1;
>>> +   *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
>>> }
>>>  
>>> ret = true;
>>> @@ -664,11 +662,15 @@ out:
>>> return ret;
>>>  }
>>>  
>>> -/* Set an IRQ pending in the lapic. */
>>> -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>>> - int vector, int level, int trig_mode,
>>> - unsigned long *dest_map)
>>> +/*
>>> + * Add a pending IRQ into lapic.
>>> + * Return 1 if successfully added and 0 if discarded.
>>> + */
>>> +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
>>> +int vector, int level, int trig_mode,
>>> +unsigned long *dest_map)
>>>  {
>>> +   int result = 0;
>>> struct kvm_vcpu *vcpu = apic->vcpu;
>>>  
>>> switch (delivery_mode) {
>>> @@ -682,10 +684,13 @@ static void __apic_accept_irq(struct kvm_lapic *apic, 
>>> int delivery_mode,
>>> if (dest_map)
>>> __set_bit(vcpu->vcpu_id, dest_map);
>>>  
>>> -   if (kvm_x86_ops->deliver_posted_interrupt)
>>> +   if (kvm_x86_ops->deliver_posted_interrupt) {
>>> +   result = 1;
>>> kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
>>> -   else {
>>> -   if (apic_test_and_set_irr(vector, apic)) {
>>> +   } else {
>>> +   result = !apic_test_and_set_irr(vector, apic);
>>
>> This part of the revert makes no sense. If deliver_posted_interrupt is
>> on, we don't have this feedback anymore, thus we decided to remove it, no?
>>
> Agree, but I wanted to do clear revert and fix on top.

Fine with me, let's write a separate fix.

Jan




signature.asc
Description: OpenPGP digital signature


Re: [PATCH] KVM: Fix RTC interrupt coalescing tracking

2013-06-25 Thread Gleb Natapov
On Wed, Jun 26, 2013 at 07:49:37AM +0200, Jan Kiszka wrote:
> On 2013-06-24 14:19, Gleb Natapov wrote:
> > This reverts most of the f1ed0450a5fac7067590317cbf027f566b6ccbca. After
> > the commit kvm_apic_set_irq() no longer returns accurate information
> > about interrupt injection status if injection is done into disabled
> > APIC. RTC interrupt coalescing tracking relies on the information to be
> > accurate and cannot recover if it is not.
> > 
> > Signed-off-by: Gleb Natapov 
> > diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> > index 9d75193..9f4bea8 100644
> > --- a/arch/x86/kvm/lapic.c
> > +++ b/arch/x86/kvm/lapic.c
> > @@ -405,17 +405,17 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
> > return highest_irr;
> >  }
> >  
> > -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> > - int vector, int level, int trig_mode,
> > - unsigned long *dest_map);
> > +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> > +int vector, int level, int trig_mode,
> > +unsigned long *dest_map);
> 
> I still think __apic_accept_irq should unconditionally inject, and the
> test for acpi_enabled belongs into kvm_apic_set_irq. Why should
> __apic_accept_irq accept non-APIC_DM_FIXED messages if the APIC is off?
> See below for another reason to refactor this part of the interface.
> 
10.4.7.2 Local APIC State After It Has Been Software Disabled

The local APIC will respond normally to INIT, NMI, SMI, and SIPI
messages.

> >  
> > -void kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
> > - unsigned long *dest_map)
> > +int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
> > +   unsigned long *dest_map)
> >  {
> > struct kvm_lapic *apic = vcpu->arch.apic;
> >  
> > -   __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
> > - irq->level, irq->trig_mode, dest_map);
> > +   return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
> > +   irq->level, irq->trig_mode, dest_map);
> >  }
> >  
> >  static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
> > @@ -608,8 +608,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
> > struct kvm_lapic *src,
> > *r = -1;
> >  
> > if (irq->shorthand == APIC_DEST_SELF) {
> > -   kvm_apic_set_irq(src->vcpu, irq, dest_map);
> > -   *r = 1;
> > +   *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
> > return true;
> > }
> >  
> > @@ -654,8 +653,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
> > struct kvm_lapic *src,
> > continue;
> > if (*r < 0)
> > *r = 0;
> > -   kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
> > -   *r += 1;
> > +   *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
> > }
> >  
> > ret = true;
> > @@ -664,11 +662,15 @@ out:
> > return ret;
> >  }
> >  
> > -/* Set an IRQ pending in the lapic. */
> > -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> > - int vector, int level, int trig_mode,
> > - unsigned long *dest_map)
> > +/*
> > + * Add a pending IRQ into lapic.
> > + * Return 1 if successfully added and 0 if discarded.
> > + */
> > +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> > +int vector, int level, int trig_mode,
> > +unsigned long *dest_map)
> >  {
> > +   int result = 0;
> > struct kvm_vcpu *vcpu = apic->vcpu;
> >  
> > switch (delivery_mode) {
> > @@ -682,10 +684,13 @@ static void __apic_accept_irq(struct kvm_lapic *apic, 
> > int delivery_mode,
> > if (dest_map)
> > __set_bit(vcpu->vcpu_id, dest_map);
> >  
> > -   if (kvm_x86_ops->deliver_posted_interrupt)
> > +   if (kvm_x86_ops->deliver_posted_interrupt) {
> > +   result = 1;
> > kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
> > -   else {
> > -   if (apic_test_and_set_irr(vector, apic)) {
> > +   } else {
> > +   result = !apic_test_and_set_irr(vector, apic);
> 
> This part of the revert makes no sense. If deliver_posted_interrupt is
> on, we don't have this feedback anymore, thus we decided to remove it, no?
> 
Agree, but I wanted to do clear revert and fix on top.

> Jan
> 
> > +
> > +   if (!result) {
> > if (trig_mode)
> > apic_debug("level trig mode repeatedly "
> > "for vector %d", vector);
> > @@ -697,7 +702,7 @@ static void __apic_accept_irq(struct kvm_lapic *apic, 
> > int delivery_mode,
> > }
> >  out:
> > trace_kvm_apic_accept_irq(v

[PATCH][kvm-unit-test] Keep gui off when running test cases

2013-06-25 Thread Jan Kiszka
From: Jan Kiszka 

Signed-off-by: Jan Kiszka 
---
 x86-run |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/x86-run b/x86-run
index 14ff331..646c577 100755
--- a/x86-run
+++ b/x86-run
@@ -33,7 +33,7 @@ else
pc_testdev="-device testdev,chardev=testlog -chardev 
file,id=testlog,path=msr.out"
 fi
 
-command="${qemu} -enable-kvm $pc_testdev -serial stdio $pci_testdev -kernel"
+command="${qemu} -enable-kvm $pc_testdev -display none -serial stdio 
$pci_testdev -kernel"
 echo ${command} "$@"
 ${command} "$@"
 ret=$?
-- 
1.7.3.4
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] KVM: Fix RTC interrupt coalescing tracking

2013-06-25 Thread Jan Kiszka
On 2013-06-24 14:19, Gleb Natapov wrote:
> This reverts most of the f1ed0450a5fac7067590317cbf027f566b6ccbca. After
> the commit kvm_apic_set_irq() no longer returns accurate information
> about interrupt injection status if injection is done into disabled
> APIC. RTC interrupt coalescing tracking relies on the information to be
> accurate and cannot recover if it is not.
> 
> Signed-off-by: Gleb Natapov 
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 9d75193..9f4bea8 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -405,17 +405,17 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
>   return highest_irr;
>  }
>  
> -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> -   int vector, int level, int trig_mode,
> -   unsigned long *dest_map);
> +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> +  int vector, int level, int trig_mode,
> +  unsigned long *dest_map);

I still think __apic_accept_irq should unconditionally inject, and the
test for acpi_enabled belongs into kvm_apic_set_irq. Why should
__apic_accept_irq accept non-APIC_DM_FIXED messages if the APIC is off?
See below for another reason to refactor this part of the interface.

>  
> -void kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
> -   unsigned long *dest_map)
> +int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
> + unsigned long *dest_map)
>  {
>   struct kvm_lapic *apic = vcpu->arch.apic;
>  
> - __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
> -   irq->level, irq->trig_mode, dest_map);
> + return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
> + irq->level, irq->trig_mode, dest_map);
>  }
>  
>  static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
> @@ -608,8 +608,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
> struct kvm_lapic *src,
>   *r = -1;
>  
>   if (irq->shorthand == APIC_DEST_SELF) {
> - kvm_apic_set_irq(src->vcpu, irq, dest_map);
> - *r = 1;
> + *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
>   return true;
>   }
>  
> @@ -654,8 +653,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, 
> struct kvm_lapic *src,
>   continue;
>   if (*r < 0)
>   *r = 0;
> - kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
> - *r += 1;
> + *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
>   }
>  
>   ret = true;
> @@ -664,11 +662,15 @@ out:
>   return ret;
>  }
>  
> -/* Set an IRQ pending in the lapic. */
> -static void __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> -   int vector, int level, int trig_mode,
> -   unsigned long *dest_map)
> +/*
> + * Add a pending IRQ into lapic.
> + * Return 1 if successfully added and 0 if discarded.
> + */
> +static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
> +  int vector, int level, int trig_mode,
> +  unsigned long *dest_map)
>  {
> + int result = 0;
>   struct kvm_vcpu *vcpu = apic->vcpu;
>  
>   switch (delivery_mode) {
> @@ -682,10 +684,13 @@ static void __apic_accept_irq(struct kvm_lapic *apic, 
> int delivery_mode,
>   if (dest_map)
>   __set_bit(vcpu->vcpu_id, dest_map);
>  
> - if (kvm_x86_ops->deliver_posted_interrupt)
> + if (kvm_x86_ops->deliver_posted_interrupt) {
> + result = 1;
>   kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
> - else {
> - if (apic_test_and_set_irr(vector, apic)) {
> + } else {
> + result = !apic_test_and_set_irr(vector, apic);

This part of the revert makes no sense. If deliver_posted_interrupt is
on, we don't have this feedback anymore, thus we decided to remove it, no?

Jan

> +
> + if (!result) {
>   if (trig_mode)
>   apic_debug("level trig mode repeatedly "
>   "for vector %d", vector);
> @@ -697,7 +702,7 @@ static void __apic_accept_irq(struct kvm_lapic *apic, int 
> delivery_mode,
>   }
>  out:
>   trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
> -   trig_mode, vector, false);
> + trig_mode, vector, !result);
>   break;
>  
>   case APIC_DM_REMRD:
> @@ -709,12 +714,14 @@ out:
>   break;
>  
>   case APIC_DM_NMI:
> + result = 1;
>   kvm_inject_nmi(vcpu);
>   kvm

[PATCH 1/6 v5] powerpc: remove unnecessary line continuations

2013-06-25 Thread Bharat Bhushan
Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/kernel/process.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ceb4e7b..639a8de 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -325,7 +325,7 @@ static void set_debug_reg_defaults(struct thread_struct 
*thread)
/*
 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
 */
-   thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |   \
+   thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
DBCR1_IAC3US | DBCR1_IAC4US;
/*
 * Force Data Address Compare User/Supervisor bits to be User-only
-- 
1.7.0.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/6 v5] powerpc: export debug registers save function for KVM

2013-06-25 Thread Bharat Bhushan
KVM need this function when switching from vcpu to user-space
thread. My subsequent patch will use this function.

Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/include/asm/switch_to.h |4 
 arch/powerpc/kernel/process.c|3 ++-
 2 files changed, 6 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/include/asm/switch_to.h 
b/arch/powerpc/include/asm/switch_to.h
index 200d763..50b357f 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -30,6 +30,10 @@ extern void enable_kernel_spe(void);
 extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+extern void switch_booke_debug_regs(struct thread_struct *new_thread);
+#endif
+
 #ifndef CONFIG_SMP
 extern void discard_lazy_cpu_state(void);
 #else
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 01ff496..da586aa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -362,12 +362,13 @@ static void prime_debug_regs(struct thread_struct *thread)
  * debug registers, set the debug registers from the values
  * stored in the new thread.
  */
-static void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct thread_struct *new_thread)
 {
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
|| (new_thread->debug.dbcr0 & DBCR0_IDM))
prime_debug_regs(new_thread);
 }
+EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
 #else  /* !CONFIG_PPC_ADV_DEBUG_REGS */
 #ifndef CONFIG_HAVE_HW_BREAKPOINT
 static void set_debug_reg_defaults(struct thread_struct *thread)
-- 
1.7.0.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 5/6 v5] KVM: PPC: Using "struct debug_reg"

2013-06-25 Thread Bharat Bhushan
For KVM also use the "struct debug_reg" defined in asm/processor.h

Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/include/asm/kvm_host.h |   13 +
 arch/powerpc/kvm/booke.c|   34 --
 2 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index af326cd..838a577 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -381,17 +381,6 @@ struct kvmppc_slb {
 #define KVMPPC_EPR_USER1 /* exit to userspace to fill EPR */
 #define KVMPPC_EPR_KERNEL  2 /* in-kernel irqchip */
 
-struct kvmppc_booke_debug_reg {
-   u32 dbcr0;
-   u32 dbcr1;
-   u32 dbcr2;
-#ifdef CONFIG_KVM_E500MC
-   u32 dbcr4;
-#endif
-   u64 iac[KVMPPC_BOOKE_MAX_IAC];
-   u64 dac[KVMPPC_BOOKE_MAX_DAC];
-};
-
 #define KVMPPC_IRQ_DEFAULT 0
 #define KVMPPC_IRQ_MPIC1
 #define KVMPPC_IRQ_XICS2
@@ -535,7 +524,7 @@ struct kvm_vcpu_arch {
u32 eptcfg;
u32 epr;
u32 crit_save;
-   struct kvmppc_booke_debug_reg dbg_reg;
+   struct debug_reg dbg_reg;
 #endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 62d4ece..3e9fc1d 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1424,7 +1424,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
int r = 0;
union kvmppc_one_reg val;
int size;
-   long int i;
 
size = one_reg_size(reg->id);
if (size > sizeof(val))
@@ -1432,16 +1431,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
 
switch (reg->id) {
case KVM_REG_PPC_IAC1:
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
+   break;
case KVM_REG_PPC_IAC2:
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
+   break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
+   break;
case KVM_REG_PPC_IAC4:
-   i = reg->id - KVM_REG_PPC_IAC1;
-   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]);
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
break;
+#endif
case KVM_REG_PPC_DAC1:
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
+   break;
case KVM_REG_PPC_DAC2:
-   i = reg->id - KVM_REG_PPC_DAC1;
-   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]);
+   val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
break;
case KVM_REG_PPC_EPR: {
u32 epr = get_guest_epr(vcpu);
@@ -1481,7 +1488,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
int r = 0;
union kvmppc_one_reg val;
int size;
-   long int i;
 
size = one_reg_size(reg->id);
if (size > sizeof(val))
@@ -1492,16 +1498,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 
struct kvm_one_reg *reg)
 
switch (reg->id) {
case KVM_REG_PPC_IAC1:
+   vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
+   break;
case KVM_REG_PPC_IAC2:
+   vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
+   break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
+   vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
+   break;
case KVM_REG_PPC_IAC4:
-   i = reg->id - KVM_REG_PPC_IAC1;
-   vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val);
+   vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
break;
+#endif
case KVM_REG_PPC_DAC1:
+   vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
+   break;
case KVM_REG_PPC_DAC2:
-   i = reg->id - KVM_REG_PPC_DAC1;
-   vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val);
+   vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_EPR: {
u32 new_epr = set_reg_val(reg->id, val);
-- 
1.7.0.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 4/6 v5] KVM: PPC: exit to user space on "ehpriv" instruction

2013-06-25 Thread Bharat Bhushan
"ehpriv" instruction is used for setting software breakpoints
by user space. This patch adds support to exit to user space
with "run->debug" have relevant information.

As this is the first point we are using run->debug, also defined
the run->debug structure.

Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/include/asm/disassemble.h |4 
 arch/powerpc/include/uapi/asm/kvm.h|   21 +
 arch/powerpc/kvm/e500_emulate.c|   27 +++
 3 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/disassemble.h 
b/arch/powerpc/include/asm/disassemble.h
index 9b198d1..856f8de 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst)
return inst & 0x;
 }
 
+static inline unsigned int get_oc(u32 inst)
+{
+   return (inst >> 11) & 0x7fff;
+}
 #endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index 0fb1a6e..ded0607 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -269,7 +269,24 @@ struct kvm_fpu {
__u64 fpr[32];
 };
 
+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE  0x0
+#define KVMPPC_DEBUG_BREAKPOINT(1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE   (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ(1UL << 3)
 struct kvm_debug_exit_arch {
+   __u64 address;
+   /*
+* exiting to userspace because of h/w breakpoint, watchpoint
+* (read, write or both) and software breakpoint.
+*/
+   __u32 status;
+   __u32 reserved;
 };
 
 /* for KVM_SET_GUEST_DEBUG */
@@ -281,10 +298,6 @@ struct kvm_guest_debug_arch {
 * Type denotes h/w breakpoint, read watchpoint, write
 * watchpoint or watchpoint (both read and write).
 */
-#define KVMPPC_DEBUG_NONE  0x0
-#define KVMPPC_DEBUG_BREAKPOINT(1UL << 1)
-#define KVMPPC_DEBUG_WATCH_WRITE   (1UL << 2)
-#define KVMPPC_DEBUG_WATCH_READ(1UL << 3)
__u32 type;
__u32 reserved;
} bp[16];
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index b10a012..dab9d07 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -26,6 +26,8 @@
 #define XOP_TLBRE   946
 #define XOP_TLBWE   978
 #define XOP_TLBILX  18
+#define XOP_EHPRIV  270
+#define EHPRIV_OC_DEBUG 0
 
 #ifdef CONFIG_KVM_E500MC
 static int dbell2prio(ulong param)
@@ -82,6 +84,26 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, 
int rb)
 }
 #endif
 
+static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+  unsigned int inst, int *advance)
+{
+   int emulated = EMULATE_DONE;
+
+   switch (get_oc(inst)) {
+   case EHPRIV_OC_DEBUG:
+   run->exit_reason = KVM_EXIT_DEBUG;
+   run->debug.arch.address = vcpu->arch.pc;
+   run->debug.arch.status = 0;
+   kvmppc_account_exit(vcpu, DEBUG_EXITS);
+   emulated = EMULATE_EXIT_USER;
+   *advance = 0;
+   break;
+   default:
+   emulated = EMULATE_FAIL;
+   }
+   return emulated;
+}
+
 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
 {
@@ -130,6 +152,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
 
+   case XOP_EHPRIV:
+   emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
+  advance);
+   break;
+
default:
emulated = EMULATE_FAIL;
}
-- 
1.7.0.4


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 6/6 v5] KVM: PPC: Add userspace debug stub support

2013-06-25 Thread Bharat Bhushan
This patch adds the debug stub support on booke/bookehv.
Now QEMU debug stub can use hw breakpoint, watchpoint and
software breakpoint to debug guest.

This is how we save/restore debug register context when switching
between guest, userspace and kernel user-process:

When QEMU is running
 -> thread->debug_reg == QEMU debug register context.
 -> Kernel will handle switching the debug register on context switch.
 -> no vcpu_load() called

QEMU makes ioctls (except RUN)
 -> This will call vcpu_load()
 -> should not change context.
 -> Some ioctls can change vcpu debug register, context saved in 
vcpu->debug_regs

QEMU Makes RUN ioctl
 -> Save thread->debug_reg on STACK
 -> Store thread->debug_reg == vcpu->debug_reg
 -> load thread->debug_reg
 -> RUN VCPU ( So thread points to vcpu context )

Context switch happens When VCPU running
 -> makes vcpu_load() should not load any context
 -> kernel loads the vcpu context as thread->debug_regs points to vcpu context.

On heavyweight_exit
 -> Load the context saved on stack in thread->debug_reg

Currently we do not support debug resource emulation to guest,
On debug exception, always exit to user space irrespective of
user space is expecting the debug exception or not. If this is
unexpected exception (breakpoint/watchpoint event not set by
userspace) then let us leave the action on user space. This
is similar to what it was before, only thing is that now we
have proper exit state available to user space.

Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/include/asm/kvm_host.h |3 +
 arch/powerpc/include/uapi/asm/kvm.h |1 +
 arch/powerpc/kvm/booke.c|  239 ---
 arch/powerpc/kvm/booke.h|5 +
 4 files changed, 230 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 838a577..aeb490d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -524,7 +524,10 @@ struct kvm_vcpu_arch {
u32 eptcfg;
u32 epr;
u32 crit_save;
+   /* guest debug registers*/
struct debug_reg dbg_reg;
+   /* hardware visible debug registers when in guest state */
+   struct debug_reg shadow_dbg_reg;
 #endif
gpa_t paddr_accessed;
gva_t vaddr_accessed;
diff --git a/arch/powerpc/include/uapi/asm/kvm.h 
b/arch/powerpc/include/uapi/asm/kvm.h
index ded0607..f5077c2 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
 #define __KVM_HAVE_PPC_SMT
 #define __KVM_HAVE_IRQCHIP
 #define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_GUEST_DEBUG
 
 struct kvm_regs {
__u64 pc;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3e9fc1d..8cd8d41 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -133,6 +133,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
+{
+   /* Synchronize guest's desire to get debug interrupts into shadow MSR */
+#ifndef CONFIG_KVM_BOOKE_HV
+   vcpu->arch.shadow_msr &= ~MSR_DE;
+   vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
+#endif
+
+   /* Force enable debug interrupts when user space wants to debug */
+   if (vcpu->guest_debug) {
+#ifdef CONFIG_KVM_BOOKE_HV
+   /*
+* Since there is no shadow MSR, sync MSR_DE into the guest
+* visible MSR.
+*/
+   vcpu->arch.shared->msr |= MSR_DE;
+#else
+   vcpu->arch.shadow_msr |= MSR_DE;
+   vcpu->arch.shared->msr &= ~MSR_DE;
+#endif
+   }
+}
+
 /*
  * Helper function for "full" MSR writes.  No need to call this if only
  * EE/CE/ME/DE/RI are changing.
@@ -150,6 +173,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
kvmppc_mmu_msr_notify(vcpu, old_msr);
kvmppc_vcpu_sync_spe(vcpu);
kvmppc_vcpu_sync_fpu(vcpu);
+   kvmppc_vcpu_sync_debug(vcpu);
 }
 
 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
@@ -655,6 +679,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
int ret, s;
+   struct thread_struct thread;
 #ifdef CONFIG_PPC_FPU
unsigned int fpscr;
int fpexc_mode;
@@ -698,12 +723,21 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct 
kvm_vcpu *vcpu)
 
kvmppc_load_guest_fp(vcpu);
 #endif
+   /* Switch to guest debug context */
+   thread.debug = vcpu->arch.shadow_dbg_reg;
+   switch_booke_debug_regs(&thread);
+   thread.debug = current->thread.debug;
+   current->thread.debug = vcpu->arch.shadow_dbg_reg;
 
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
/* No need for kvm_guest_exit. It's done in handle_exit.
   We also get here with interrupts enabled. */
 
+   /* Switch back to user space debug context */
+

[PATCH 0/6 v5] KVM :PPC: Userspace Debug support

2013-06-25 Thread Bharat Bhushan
From: Bharat Bhushan 

Note: These patches are based on http://github.com/agraf/linux-2.6.git queue

This patchset adds the userspace debug support for booke/bookehv.
this is tested on powerpc e500v2/e500mc devices.

We are now assuming that debug resource will not be used by kernel for
its own debugging. It will be used for only kernel user process debugging.
So the kernel debug load interface during context_to is used to load
debug conext for that selected process.

v4->v5
 - Some comments reworded and other cleanup (like change of function name etc)
 - Added a function for setting MSRP rather than inline

v3->v4
 - 4 out of 7 patches of initial patchset were applied.
   This patchset is on and above those 4 patches
 - KVM local "struct kvmppc_booke_debug_reg" is replaced by
   powerpc global "struct debug_reg"
 - use switch_booke_debug_regs() for debug register context switch.
 - Save DBSR before kernel pre-emption is enabled.
 - Some more cleanup

v2->v3
 - We are now assuming that debug resource will not be used by
   kernel for its own debugging.
   It will be used for only kernel user process debugging.
   So the kernel debug load interface during context_to is
   used to load debug conext for that selected process.

v1->v2
 - Debug registers are save/restore in vcpu_put/vcpu_get.
   Earlier the debug registers are saved/restored in guest entry/exit

Bharat Bhushan (6):
  powerpc: remove unnecessary line continuations
  powerpc: move debug registers in a structure
  powerpc: export debug registers save function for KVM
  KVM: PPC: exit to user space on "ehpriv" instruction
  KVM: PPC: Using "struct debug_reg"
  KVM: PPC: Add userspace debug stub support

 arch/powerpc/include/asm/disassemble.h |4 +
 arch/powerpc/include/asm/kvm_host.h|   16 +--
 arch/powerpc/include/asm/processor.h   |   38 +++--
 arch/powerpc/include/asm/reg_booke.h   |8 +-
 arch/powerpc/include/asm/switch_to.h   |4 +
 arch/powerpc/include/uapi/asm/kvm.h|   22 ++-
 arch/powerpc/kernel/asm-offsets.c  |2 +-
 arch/powerpc/kernel/process.c  |   45 +++---
 arch/powerpc/kernel/ptrace.c   |  154 +-
 arch/powerpc/kernel/signal_32.c|6 +-
 arch/powerpc/kernel/traps.c|   35 ++--
 arch/powerpc/kvm/booke.c   |  273 
 arch/powerpc/kvm/booke.h   |5 +
 arch/powerpc/kvm/e500_emulate.c|   27 +++
 14 files changed, 455 insertions(+), 184 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/6 v5] powerpc: move debug registers in a structure

2013-06-25 Thread Bharat Bhushan
This way we can use same data type struct with KVM and
also help in using other debug related function.

Signed-off-by: Bharat Bhushan 
---
 arch/powerpc/include/asm/processor.h |   38 +
 arch/powerpc/include/asm/reg_booke.h |8 +-
 arch/powerpc/kernel/asm-offsets.c|2 +-
 arch/powerpc/kernel/process.c|   42 +-
 arch/powerpc/kernel/ptrace.c |  154 +-
 arch/powerpc/kernel/signal_32.c  |6 +-
 arch/powerpc/kernel/traps.c  |   35 
 7 files changed, 146 insertions(+), 139 deletions(-)

diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index d7e67ca..5b8a7f1 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -147,22 +147,7 @@ typedef struct {
 #define TS_FPR(i) fpr[i][TS_FPROFFSET]
 #define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
 
-struct thread_struct {
-   unsigned long   ksp;/* Kernel stack pointer */
-   unsigned long   ksp_limit;  /* if ksp <= ksp_limit stack overflow */
-
-#ifdef CONFIG_PPC64
-   unsigned long   ksp_vsid;
-#endif
-   struct pt_regs  *regs;  /* Pointer to saved register state */
-   mm_segment_tfs; /* for get_fs() validation */
-#ifdef CONFIG_BOOKE
-   /* BookE base exception scratch space; align on cacheline */
-   unsigned long   normsave[8] cacheline_aligned;
-#endif
-#ifdef CONFIG_PPC32
-   void*pgdir; /* root of page-table tree */
-#endif
+struct debug_reg {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
 * The following help to manage the use of Debug Control Registers
@@ -199,6 +184,27 @@ struct thread_struct {
unsigned long   dvc2;
 #endif
 #endif
+};
+
+struct thread_struct {
+   unsigned long   ksp;/* Kernel stack pointer */
+   unsigned long   ksp_limit;  /* if ksp <= ksp_limit stack overflow */
+
+#ifdef CONFIG_PPC64
+   unsigned long   ksp_vsid;
+#endif
+   struct pt_regs  *regs;  /* Pointer to saved register state */
+   mm_segment_tfs; /* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+   /* BookE base exception scratch space; align on cacheline */
+   unsigned long   normsave[8] cacheline_aligned;
+#endif
+#ifdef CONFIG_PPC32
+   void*pgdir; /* root of page-table tree */
+#endif
+   /* Debug Registers */
+   struct debug_reg debug;
+
/* FP and VSX 0-31 register set */
double  fpr[32][TS_FPRWIDTH];
struct {
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index b417de3..455dc89 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -381,7 +381,7 @@
 #define DBCR0_IA34T0x4000  /* Instr Addr 3-4 range Toggle */
 #define DBCR0_FT   0x0001  /* Freeze Timers on debug event */
 
-#define dbcr_iac_range(task)   ((task)->thread.dbcr0)
+#define dbcr_iac_range(task)   ((task)->thread.debug.dbcr0)
 #define DBCR_IAC12IDBCR0_IA12  /* Range Inclusive */
 #define DBCR_IAC12X(DBCR0_IA12 | DBCR0_IA12X)  /* Range Exclusive */
 #define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X)  /* IAC 1-2 Mode Bits */
@@ -395,7 +395,7 @@
 #define DBCR1_DAC1W0x2000  /* DAC1 Write Debug Event */
 #define DBCR1_DAC2W0x1000  /* DAC2 Write Debug Event */
 
-#define dbcr_dac(task) ((task)->thread.dbcr1)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr1)
 #define DBCR_DAC1R DBCR1_DAC1R
 #define DBCR_DAC1W DBCR1_DAC1W
 #define DBCR_DAC2R DBCR1_DAC2R
@@ -441,7 +441,7 @@
 #define DBCR0_CRET 0x0020  /* Critical Return Debug Event */
 #define DBCR0_FT   0x0001  /* Freeze Timers on debug event */
 
-#define dbcr_dac(task) ((task)->thread.dbcr0)
+#define dbcr_dac(task) ((task)->thread.debug.dbcr0)
 #define DBCR_DAC1R DBCR0_DAC1R
 #define DBCR_DAC1W DBCR0_DAC1W
 #define DBCR_DAC2R DBCR0_DAC2R
@@ -475,7 +475,7 @@
 #define DBCR1_IAC34MX  0x00C0  /* Instr Addr 3-4 range eXclusive */
 #define DBCR1_IAC34AT  0x0001  /* Instr Addr 3-4 range Toggle */
 
-#define dbcr_iac_range(task)   ((task)->thread.dbcr1)
+#define dbcr_iac_range(task)   ((task)->thread.debug.dbcr1)
 #define DBCR_IAC12IDBCR1_IAC12M/* Range Inclusive */
 #define DBCR_IAC12XDBCR1_IAC12MX   /* Range Exclusive */
 #define DBCR_IAC12MODE DBCR1_IAC12MX   /* IAC 1-2 Mode Bits */
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index b51a97c..c241c60 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -106,7 +106,7 @@ int main(void)
 #else /* CONFIG_PPC64 */
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
-   DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)

Re: [PATCH RFC] pci: ACS quirk for AMD southbridge

2013-06-25 Thread Bjorn Helgaas
[fix Joerg's email address]

On Tue, Jun 25, 2013 at 10:15 PM, Bjorn Helgaas  wrote:
> On Wed, Jul 11, 2012 at 11:18 PM, Alex Williamson
>  wrote:
>> We've confirmed that peer-to-peer between these devices is
>> not possible.  We can therefore claim that they support a
>> subset of ACS.
>>
>> Signed-off-by: Alex Williamson 
>> Cc: Joerg Roedel 
>> ---
>>
>> Two things about this patch make me a little nervous.  The
>> first is that I'd really like to have a pci_is_pcie() test
>> in pci_mf_no_p2p_acs_enabled(), but these devices don't
>> have a PCIe capability.  That means that if there was a
>> topology where these devices sit on a legacy PCI bus,
>> we incorrectly return that we're ACS safe here.  That leads
>> to my second problem, pciids seems to suggest that some of
>> these functions have been around for a while.  Is it just
>> this package that's peer-to-peer safe, or is it safe to
>> assume that any previous assembly of these functions is
>> also p2p safe.  Maybe we need to factor in device revs if
>> that uniquely identifies this package?
>>
>> Looks like another useful device to potentially quirk
>> would be:
>>
>> 00:15.0 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB700/SB800/SB900 
>> PCI to PCI bridge (PCIE port 0)
>> 00:15.1 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB700/SB800/SB900 
>> PCI to PCI bridge (PCIE port 1)
>> 00:15.2 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB900 PCI to PCI 
>> bridge (PCIE port 2)
>> 00:15.3 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB900 PCI to PCI 
>> bridge (PCIE port 3)
>>
>> 00:15.0 0604: 1002:43a0
>> 00:15.1 0604: 1002:43a1
>> 00:15.2 0604: 1002:43a2
>> 00:15.3 0604: 1002:43a3
>>
>>  drivers/pci/quirks.c |   29 +
>>  1 file changed, 29 insertions(+)
>>
>> diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
>> index 4ebc865..2c84961 100644
>> --- a/drivers/pci/quirks.c
>> +++ b/drivers/pci/quirks.c
>> @@ -3271,11 +3271,40 @@ struct pci_dev *pci_get_dma_source(struct pci_dev 
>> *dev)
>> return pci_dev_get(dev);
>>  }
>>
>> +/*
>> + * Multifunction devices that do not support peer-to-peer between
>> + * functions can claim to support a subset of ACS.  Such devices
>> + * effectively enable request redirect (RR) and completion redirect (CR)
>> + * since all transactions are redirected to the upstream root complex.
>> + */
>> +static int pci_mf_no_p2p_acs_enabled(struct pci_dev *dev, u16 acs_flags)
>> +{
>> +   if (!dev->multifunction)
>> +   return -ENODEV;
>> +
>> +   /* Filter out flags not applicable to multifunction */
>> +   acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
>> +
>> +   return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
>> +}
>> +
>>  static const struct pci_dev_acs_enabled {
>> u16 vendor;
>> u16 device;
>> int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
>>  } pci_dev_acs_enabled[] = {
>> +   /*
>> +* AMD/ATI multifunction southbridge devices.  AMD has confirmed
>> +* that peer-to-peer between these devices is not possible, so
>> +* they do support a subset of ACS even though the capability is
>> +* not exposed in config space.
>> +*/
>> +   { PCI_VENDOR_ID_ATI, 0x4385, pci_mf_no_p2p_acs_enabled },
>> +   { PCI_VENDOR_ID_ATI, 0x439c, pci_mf_no_p2p_acs_enabled },
>> +   { PCI_VENDOR_ID_ATI, 0x4383, pci_mf_no_p2p_acs_enabled },
>> +   { PCI_VENDOR_ID_ATI, 0x439d, pci_mf_no_p2p_acs_enabled },
>> +   { PCI_VENDOR_ID_ATI, 0x4384, pci_mf_no_p2p_acs_enabled },
>> +   { PCI_VENDOR_ID_ATI, 0x4399, pci_mf_no_p2p_acs_enabled },
>> { 0 }
>>  };
>>
>>
>
> I was looking for something else and found this old email.  This patch
> hasn't been applied and I haven't seen any discussion about it.  Is it
> still of interest?  It seems relevant to the current ACS discussion
> [1].
>
> If it's relevant, what's the topology?  Apparently they don't have a
> PCIe capability.  Is the upstream device a PCIe device (a downstream
> port or a root port)?  I assume anything downstream from these AMD
> devices (0x4385, 0x439c, etc.) is plain PCI (not PCIe)?
>
> Bjorn
>
> [1] https://lkml.kernel.org/r/20130607163441.7733.23221.st...@bling.home
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH RFC] pci: ACS quirk for AMD southbridge

2013-06-25 Thread Bjorn Helgaas
On Wed, Jul 11, 2012 at 11:18 PM, Alex Williamson
 wrote:
> We've confirmed that peer-to-peer between these devices is
> not possible.  We can therefore claim that they support a
> subset of ACS.
>
> Signed-off-by: Alex Williamson 
> Cc: Joerg Roedel 
> ---
>
> Two things about this patch make me a little nervous.  The
> first is that I'd really like to have a pci_is_pcie() test
> in pci_mf_no_p2p_acs_enabled(), but these devices don't
> have a PCIe capability.  That means that if there was a
> topology where these devices sit on a legacy PCI bus,
> we incorrectly return that we're ACS safe here.  That leads
> to my second problem, pciids seems to suggest that some of
> these functions have been around for a while.  Is it just
> this package that's peer-to-peer safe, or is it safe to
> assume that any previous assembly of these functions is
> also p2p safe.  Maybe we need to factor in device revs if
> that uniquely identifies this package?
>
> Looks like another useful device to potentially quirk
> would be:
>
> 00:15.0 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB700/SB800/SB900 
> PCI to PCI bridge (PCIE port 0)
> 00:15.1 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB700/SB800/SB900 
> PCI to PCI bridge (PCIE port 1)
> 00:15.2 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB900 PCI to PCI 
> bridge (PCIE port 2)
> 00:15.3 PCI bridge: Advanced Micro Devices [AMD] nee ATI SB900 PCI to PCI 
> bridge (PCIE port 3)
>
> 00:15.0 0604: 1002:43a0
> 00:15.1 0604: 1002:43a1
> 00:15.2 0604: 1002:43a2
> 00:15.3 0604: 1002:43a3
>
>  drivers/pci/quirks.c |   29 +
>  1 file changed, 29 insertions(+)
>
> diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
> index 4ebc865..2c84961 100644
> --- a/drivers/pci/quirks.c
> +++ b/drivers/pci/quirks.c
> @@ -3271,11 +3271,40 @@ struct pci_dev *pci_get_dma_source(struct pci_dev 
> *dev)
> return pci_dev_get(dev);
>  }
>
> +/*
> + * Multifunction devices that do not support peer-to-peer between
> + * functions can claim to support a subset of ACS.  Such devices
> + * effectively enable request redirect (RR) and completion redirect (CR)
> + * since all transactions are redirected to the upstream root complex.
> + */
> +static int pci_mf_no_p2p_acs_enabled(struct pci_dev *dev, u16 acs_flags)
> +{
> +   if (!dev->multifunction)
> +   return -ENODEV;
> +
> +   /* Filter out flags not applicable to multifunction */
> +   acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
> +
> +   return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
> +}
> +
>  static const struct pci_dev_acs_enabled {
> u16 vendor;
> u16 device;
> int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
>  } pci_dev_acs_enabled[] = {
> +   /*
> +* AMD/ATI multifunction southbridge devices.  AMD has confirmed
> +* that peer-to-peer between these devices is not possible, so
> +* they do support a subset of ACS even though the capability is
> +* not exposed in config space.
> +*/
> +   { PCI_VENDOR_ID_ATI, 0x4385, pci_mf_no_p2p_acs_enabled },
> +   { PCI_VENDOR_ID_ATI, 0x439c, pci_mf_no_p2p_acs_enabled },
> +   { PCI_VENDOR_ID_ATI, 0x4383, pci_mf_no_p2p_acs_enabled },
> +   { PCI_VENDOR_ID_ATI, 0x439d, pci_mf_no_p2p_acs_enabled },
> +   { PCI_VENDOR_ID_ATI, 0x4384, pci_mf_no_p2p_acs_enabled },
> +   { PCI_VENDOR_ID_ATI, 0x4399, pci_mf_no_p2p_acs_enabled },
> { 0 }
>  };
>
>

I was looking for something else and found this old email.  This patch
hasn't been applied and I haven't seen any discussion about it.  Is it
still of interest?  It seems relevant to the current ACS discussion
[1].

If it's relevant, what's the topology?  Apparently they don't have a
PCIe capability.  Is the upstream device a PCIe device (a downstream
port or a root port)?  I assume anything downstream from these AMD
devices (0x4385, 0x439c, etc.) is plain PCI (not PCIe)?

Bjorn

[1] https://lkml.kernel.org/r/20130607163441.7733.23221.st...@bling.home
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCHv2] vhost-net: fix use-after-free in vhost_net_flush

2013-06-25 Thread David Miller
From: "Michael S. Tsirkin" 
Date: Tue, 25 Jun 2013 17:29:46 +0300

> vhost_net_ubuf_put_and_wait has a confusing name:
> it will actually also free it's argument.
> Thus since commit 1280c27f8e29acf4af2da914e80ec27c3dbd5c01
> "vhost-net: flush outstanding DMAs on memory change"
> vhost_net_flush tries to use the argument after passing it
> to vhost_net_ubuf_put_and_wait, this results
> in use after free.
> To fix, don't free the argument in vhost_net_ubuf_put_and_wait,
> add an new API for callers that want to free ubufs.
> 
> Acked-by: Asias He 
> Acked-by: Jason Wang 
> Signed-off-by: Michael S. Tsirkin 

This doesn't apply cleanly to the 'net' tree, please fix this up
and resubmit.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH-next] kvm: don't try to take mmu_lock while holding the main raw kvm_lock

2013-06-25 Thread Paul Gortmaker
In commit e935b8372cf8 ("KVM: Convert kvm_lock to raw_spinlock"),
the kvm_lock was made a raw lock.  However, the kvm mmu_shrink()
function tries to grab the (non-raw) mmu_lock within the scope of
the raw locked kvm_lock being held.  This leads to the following:

BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
in_atomic(): 1, irqs_disabled(): 0, pid: 55, name: kswapd0
Preemption disabled at:[] mmu_shrink+0x5c/0x1b0 [kvm]

Pid: 55, comm: kswapd0 Not tainted 3.4.34_preempt-rt
Call Trace:
 [] __might_sleep+0xfd/0x160
 [] rt_spin_lock+0x24/0x50
 [] mmu_shrink+0xec/0x1b0 [kvm]
 [] shrink_slab+0x17d/0x3a0
 [] ? mem_cgroup_iter+0x130/0x260
 [] balance_pgdat+0x54a/0x730
 [] ? set_pgdat_percpu_threshold+0xa7/0xd0
 [] kswapd+0x18f/0x490
 [] ? get_parent_ip+0x11/0x50
 [] ? __init_waitqueue_head+0x50/0x50
 [] ? balance_pgdat+0x730/0x730
 [] kthread+0xdb/0xe0
 [] ? finish_task_switch+0x52/0x100
 [] kernel_thread_helper+0x4/0x10
 [] ? __init_kthread_worker+0x

Since we only use the lock for protecting the vm_list, once we've
found the instance we want, we can shuffle it to the end of the
list and then drop the kvm_lock before taking the mmu_lock.  We
can do this because after the mmu operations are completed, we
break -- i.e. we don't continue list processing, so it doesn't
matter if the list changed around us.

Signed-off-by: Paul Gortmaker 
---

[Note1: do double check that this solution makes sense for the
 mainline kernel; consider this an RFC patch that does want a
 review from people in the know.]

[Note2: you'll need to be running a preempt-rt kernel to actually
 see this.  Also note that the above patch is against linux-next.
 Alternate solutions welcome ; this seemed to me the obvious fix.]

 arch/x86/kvm/mmu.c | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 748e0d8..db93a70 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4322,6 +4322,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
 {
struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan;
+   int found = 0;
unsigned long freed = 0;
 
raw_spin_lock(&kvm_lock);
@@ -4349,6 +4350,12 @@ mmu_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
continue;
 
idx = srcu_read_lock(&kvm->srcu);
+
+   list_move_tail(&kvm->vm_list, &vm_list);
+   found = 1;
+   /* We can't be holding a raw lock and take non-raw mmu_lock */
+   raw_spin_unlock(&kvm_lock);
+
spin_lock(&kvm->mmu_lock);
 
if (kvm_has_zapped_obsolete_pages(kvm)) {
@@ -4370,11 +4377,12 @@ unlock:
 * per-vm shrinkers cry out
 * sadness comes quickly
 */
-   list_move_tail(&kvm->vm_list, &vm_list);
break;
}
 
-   raw_spin_unlock(&kvm_lock);
+   if (!found)
+   raw_spin_unlock(&kvm_lock);
+
return freed;
 
 }
-- 
1.8.1.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 41/45] powerpc: Use get/put_online_cpus_atomic() to prevent CPU offline

2013-06-25 Thread Srivatsa S. Bhat
Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.

Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.

Cc: Benjamin Herrenschmidt 
Cc: Gleb Natapov 
Cc: Alexander Graf 
Cc: Rob Herring 
Cc: Grant Likely 
Cc: Kumar Gala 
Cc: Zhao Chenhui 
Cc: linuxppc-...@lists.ozlabs.org
Cc: kvm@vger.kernel.org
Cc: kvm-...@vger.kernel.org
Cc: oprofile-l...@lists.sf.net
Cc: cbe-oss-...@lists.ozlabs.org
Signed-off-by: Srivatsa S. Bhat 
---

 arch/powerpc/kernel/irq.c  |7 ++-
 arch/powerpc/kernel/machine_kexec_64.c |4 ++--
 arch/powerpc/kernel/smp.c  |2 ++
 arch/powerpc/kvm/book3s_hv.c   |5 +++--
 arch/powerpc/mm/mmu_context_nohash.c   |3 +++
 arch/powerpc/oprofile/cell/spu_profiler.c  |3 +++
 arch/powerpc/oprofile/cell/spu_task_sync.c |4 
 arch/powerpc/oprofile/op_model_cell.c  |6 ++
 8 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ca39bac..41e9961 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -45,6 +45,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -410,7 +411,10 @@ void migrate_irqs(void)
unsigned int irq;
static int warned;
cpumask_var_t mask;
-   const struct cpumask *map = cpu_online_mask;
+   const struct cpumask *map;
+
+   get_online_cpus_atomic();
+   map = cpu_online_mask;
 
alloc_cpumask_var(&mask, GFP_ATOMIC);
 
@@ -436,6 +440,7 @@ void migrate_irqs(void)
}
 
free_cpumask_var(mask);
+   put_online_cpus_atomic();
 
local_irq_enable();
mdelay(1);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..38f6d75 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -187,7 +187,7 @@ static void kexec_prepare_cpus_wait(int wait_state)
int my_cpu, i, notified=-1;
 
hw_breakpoint_disable();
-   my_cpu = get_cpu();
+   my_cpu = get_online_cpus_atomic();
/* Make sure each CPU has at least made it to the state we need.
 *
 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
@@ -266,7 +266,7 @@ static void kexec_prepare_cpus(void)
 */
kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
 
-   put_cpu();
+   put_online_cpus_atomic();
 }
 
 #else /* ! SMP */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ee7ac5e..2123bec 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -277,9 +277,11 @@ void smp_send_debugger_break(void)
if (unlikely(!smp_ops))
return;
 
+   get_online_cpus_atomic();
for_each_online_cpu(cpu)
if (cpu != me)
do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+   put_online_cpus_atomic();
 }
 #endif
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dd..9d8a973 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -28,6 +28,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -78,7 +79,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_wakeup;
}
 
-   me = get_cpu();
+   me = get_online_cpus_atomic();
 
/* CPU points to the first thread of the core */
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
@@ -88,7 +89,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
else if (cpu_online(cpu))
smp_send_reschedule(cpu);
}
-   put_cpu();
+   put_online_cpus_atomic();
 }
 
 /*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c 
b/arch/powerpc/mm/mmu_context_nohash.c
index e779642..c7bdcb4 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -194,6 +194,8 @@ void switch_mmu_context(struct mm_struct *prev, struct 
mm_struct *next)
unsigned int i, id, cpu = smp_processor_id();
unsigned long *map;
 
+   get_online_cpus_atomic();
+
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
 
@@ -280,6 +282,7 @@ void switch_mmu_context(struct mm_struct *prev, struct 
mm_struct *next)
pr_hardcont(" -> %d\n", id);
set_context(id, next->pgd);
raw_spin_unlock(&context_lock);
+   put_online_cpus_atomic();
 }
 
 /*
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c 
b/arch/powerpc/oprofile/cell/spu_profiler.c
index b129d00..ab6e6c1 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -14,6 +14,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #incl

[PATCH v2 28/45] KVM: Use get/put_online_cpus_atomic() to prevent CPU offline

2013-06-25 Thread Srivatsa S. Bhat
Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.

Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.

Cc: Gleb Natapov 
Cc: Paolo Bonzini 
Cc: kvm@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat 
---

 virt/kvm/kvm_main.c |8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 302681c..5bbfa30 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -174,7 +174,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned 
int req)
 
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-   me = get_cpu();
+   me = get_online_cpus_atomic();
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_make_request(req, vcpu);
cpu = vcpu->cpu;
@@ -192,7 +192,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned 
int req)
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
-   put_cpu();
+   put_online_cpus_atomic();
free_cpumask_var(cpus);
return called;
 }
@@ -1707,11 +1707,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
++vcpu->stat.halt_wakeup;
}
 
-   me = get_cpu();
+   me = get_online_cpus_atomic();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (kvm_arch_vcpu_should_kick(vcpu))
smp_send_reschedule(cpu);
-   put_cpu();
+   put_online_cpus_atomic();
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
 #endif /* !CONFIG_S390 */

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v2 29/45] kvm/vmx: Use get/put_online_cpus_atomic() to prevent CPU offline

2013-06-25 Thread Srivatsa S. Bhat
Once stop_machine() is gone from the CPU offline path, we won't be able
to depend on disabling preemption to prevent CPUs from going offline
from under us.

Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going
offline, while invoking from atomic context.

Cc: Gleb Natapov 
Cc: Paolo Bonzini 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Cc: x...@kernel.org
Cc: kvm@vger.kernel.org
Signed-off-by: Srivatsa S. Bhat 
---

 arch/x86/kvm/vmx.c |   13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 260a919..4e1e966 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -7164,12 +7165,12 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
*kvm, unsigned int id)
if (!vmm_exclusive)
kvm_cpu_vmxoff();
 
-   cpu = get_cpu();
+   cpu = get_online_cpus_atomic();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu);
-   put_cpu();
+   put_online_cpus_atomic();
if (err)
goto free_vmcs;
if (vm_need_virtualize_apic_accesses(kvm)) {
@@ -7706,12 +7707,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
launch)
 
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
 
-   cpu = get_cpu();
+   cpu = get_online_cpus_atomic();
vmx->loaded_vmcs = vmcs02;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
-   put_cpu();
+   put_online_cpus_atomic();
 
vmx_segment_cache_clear(vmx);
 
@@ -8023,12 +8024,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
leave_guest_mode(vcpu);
prepare_vmcs12(vcpu, vmcs12);
 
-   cpu = get_cpu();
+   cpu = get_online_cpus_atomic();
vmx->loaded_vmcs = &vmx->vmcs01;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
-   put_cpu();
+   put_online_cpus_atomic();
 
vmx_segment_cache_clear(vmx);
 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Hot plugging PCIe device in Qemu Q35 chipset

2013-06-25 Thread Sudhir Rustogi
Hi all,

Was wondering if anyone can help me with a sample qemu command line on how to 
pass virtual root port device, upstream port device and downstream port device 
and their virtual topology into qemu when using Q35 chipset (-M q35) and then 
how to hotplug another pcie endpoint device that say plugs into the downstream 
port's bus.

I am trying this using qemu 1.4.1 and I am sure I am not doing this right as I 
keep running into errors. So it would be nice to have a good starting point 
from qemu command line point of view on how to do this. 

I didn't have problem using i440fx chipset (w/o the -M q35) and hotplugging the 
same device. But am not able to do the same when using q35 and its associated 
topology.

Tx

Sudhir

Sent from my iPhone--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] kvm tools: fix boot of guests with more than 4gb of ram

2013-06-25 Thread Sasha Levin

On 06/24/2013 08:58 PM, Michael Ellerman wrote:

On Sun, 2013-06-23 at 21:23 -0400, Sasha Levin wrote:

Commit "kvm tools: virtio: remove hardcoded assumptions
about guest page size" has introduced a bug that prevented
guests with more than 4gb of ram from booting.

The issue is that 'pfn' is a 32bit integer, so when multiplying
it by page size to get the actual page will cause an overflow if
the pfn referred to a memory area above 4gb.


Couldn't we just make pfn 64 bit?


pfn is passed to us by the guest virtio driver, and is 32 bit.


Thanks,
Sasha

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PULL v2 07/21] kvm: skip system call when msi route is unchanged

2013-06-25 Thread Michael S. Tsirkin
Some guests do a large number of mask/unmask
calls which currently trigger expensive route update
system calls.
Detect that route in unchanged and skip the system call.

Reported-by: "Zhanghaoyu (A)" 
Signed-off-by: Michael S. Tsirkin 
---
 kvm-all.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/kvm-all.c b/kvm-all.c
index f119ce1..891722b 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1026,6 +1026,10 @@ static int kvm_update_routing_entry(KVMState *s,
 continue;
 }
 
+if(!memcmp(entry, new_entry, sizeof *entry)) {
+return 0;
+}
+
 *entry = *new_entry;
 
 kvm_irqchip_commit_routes(s);
-- 
MST

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PULL v2 06/21] kvm: zero-initialize KVM_SET_GSI_ROUTING input

2013-06-25 Thread Michael S. Tsirkin
kvm_add_routing_entry makes an attempt to
zero-initialize any new routing entry.
However, it fails to initialize padding
within the u field of the structure
kvm_irq_routing_entry.

Other functions like kvm_irqchip_update_msi_route
also fail to initialize the padding field in
kvm_irq_routing_entry.

While mostly harmless, this would prevent us from
reusing these fields for something useful in
the future.

It's better to just make sure all input is initialized.

Once it is, we can also drop complex field by field assignment and just
do the simple *a = *b to update a route entry.

Signed-off-by: Michael S. Tsirkin 
---
 kvm-all.c | 19 +++
 1 file changed, 7 insertions(+), 12 deletions(-)

diff --git a/kvm-all.c b/kvm-all.c
index 405480e..f119ce1 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1006,11 +1006,8 @@ static void kvm_add_routing_entry(KVMState *s,
 }
 n = s->irq_routes->nr++;
 new = &s->irq_routes->entries[n];
-memset(new, 0, sizeof(*new));
-new->gsi = entry->gsi;
-new->type = entry->type;
-new->flags = entry->flags;
-new->u = entry->u;
+
+*new = *entry;
 
 set_gsi(s, entry->gsi);
 
@@ -1029,9 +1026,7 @@ static int kvm_update_routing_entry(KVMState *s,
 continue;
 }
 
-entry->type = new_entry->type;
-entry->flags = new_entry->flags;
-entry->u = new_entry->u;
+*entry = *new_entry;
 
 kvm_irqchip_commit_routes(s);
 
@@ -1043,7 +1038,7 @@ static int kvm_update_routing_entry(KVMState *s,
 
 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
 {
-struct kvm_irq_routing_entry e;
+struct kvm_irq_routing_entry e = {};
 
 assert(pin < s->gsi_count);
 
@@ -1156,7 +1151,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
 return virq;
 }
 
-route = g_malloc(sizeof(KVMMSIRoute));
+route = g_malloc0(sizeof(KVMMSIRoute));
 route->kroute.gsi = virq;
 route->kroute.type = KVM_IRQ_ROUTING_MSI;
 route->kroute.flags = 0;
@@ -1177,7 +1172,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
 
 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
 {
-struct kvm_irq_routing_entry kroute;
+struct kvm_irq_routing_entry kroute = {};
 int virq;
 
 if (!kvm_gsi_routing_enabled()) {
@@ -1203,7 +1198,7 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
 
 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
 {
-struct kvm_irq_routing_entry kroute;
+struct kvm_irq_routing_entry kroute = {};
 
 if (!kvm_irqchip_in_kernel()) {
 return -ENOSYS;
-- 
MST

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH RFC V9 0/19] Paravirtualized ticket spinlocks

2013-06-25 Thread Andrew Theurer
On Sun, 2013-06-02 at 00:51 +0530, Raghavendra K T wrote:
> This series replaces the existing paravirtualized spinlock mechanism
> with a paravirtualized ticketlock mechanism. The series provides
> implementation for both Xen and KVM.
> 
> Changes in V9:
> - Changed spin_threshold to 32k to avoid excess halt exits that are
>causing undercommit degradation (after PLE handler improvement).
> - Added  kvm_irq_delivery_to_apic (suggested by Gleb)
> - Optimized halt exit path to use PLE handler
> 
> V8 of PVspinlock was posted last year. After Avi's suggestions to look
> at PLE handler's improvements, various optimizations in PLE handling
> have been tried.

Sorry for not posting this sooner.  I have tested the v9 pv-ticketlock
patches in 1x and 2x over-commit with 10-vcpu and 20-vcpu VMs.  I have
tested these patches with and without PLE, as PLE is still not scalable
with large VMs.

System: x3850X5, 40 cores, 80 threads


1x over-commit with 10-vCPU VMs (8 VMs) all running dbench:
--
Total
Configuration   Throughput(MB/s)Notes

3.10-default-ple_on 22945   5% CPU in host 
kernel, 2% spin_lock in guests
3.10-default-ple_off23184   5% CPU in host 
kernel, 2% spin_lock in guests
3.10-pvticket-ple_on22895   5% CPU in host 
kernel, 2% spin_lock in guests
3.10-pvticket-ple_off   23051   5% CPU in host 
kernel, 2% spin_lock in guests
[all 1x results look good here]


2x over-commit with 10-vCPU VMs (16 VMs) all running dbench:
---
Total
Configuration   Throughput  Notes

3.10-default-ple_on  6287   55% CPU  host 
kernel, 17% spin_lock in guests
3.10-default-ple_off 1849   2% CPU in host 
kernel, 95% spin_lock in guests
3.10-pvticket-ple_on 6691   50% CPU in host 
kernel, 15% spin_lock in guests
3.10-pvticket-ple_off   16464   8% CPU in host 
kernel, 33% spin_lock in guests
[PLE hinders pv-ticket improvements, but even with PLE off,
 we still off from ideal throughput (somewhere >2)]


1x over-commit with 20-vCPU VMs (4 VMs) all running dbench:
--
Total
Configuration   Throughput  Notes

3.10-default-ple_on 22736   6% CPU in host 
kernel, 3% spin_lock in guests
3.10-default-ple_off23377   5% CPU in host 
kernel, 3% spin_lock in guests
3.10-pvticket-ple_on22471   6% CPU in host 
kernel, 3% spin_lock in guests
3.10-pvticket-ple_off   23445   5% CPU in host 
kernel, 3% spin_lock in guests
[1x looking fine here]


2x over-commit with 20-vCPU VMs (8 VMs) all running dbench:
--
Total
Configuration   Throughput  Notes

3.10-default-ple_on  1965   70% CPU in host 
kernel, 34% spin_lock in guests 
3.10-default-ple_off  226   2% CPU in host 
kernel, 94% spin_lock in guests
3.10-pvticket-ple_on 1942   70% CPU in host 
kernel, 35% spin_lock in guests
3.10-pvticket-ple_off8003   11% CPU in host 
kernel, 70% spin_lock in guests
[quite bad all around, but pv-tickets with PLE off the best so far.
 Still quite a bit off from ideal throughput]

In summary, I would state that the pv-ticket is an overall win, but the
current PLE handler tends to "get in the way" on these larger guests.

-Andrew

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCHv2] vhost-net: fix use-after-free in vhost_net_flush

2013-06-25 Thread Michael S. Tsirkin
vhost_net_ubuf_put_and_wait has a confusing name:
it will actually also free it's argument.
Thus since commit 1280c27f8e29acf4af2da914e80ec27c3dbd5c01
"vhost-net: flush outstanding DMAs on memory change"
vhost_net_flush tries to use the argument after passing it
to vhost_net_ubuf_put_and_wait, this results
in use after free.
To fix, don't free the argument in vhost_net_ubuf_put_and_wait,
add an new API for callers that want to free ubufs.

Acked-by: Asias He 
Acked-by: Jason Wang 
Signed-off-by: Michael S. Tsirkin 

---

Please review, and queue for 3.10 and stable.
Changes since v1:
- no functional change, tweaked the commit message

 drivers/vhost/net.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5c77d6a..534adb0 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -149,6 +149,11 @@ static void vhost_net_ubuf_put_and_wait(struct 
vhost_net_ubuf_ref *ubufs)
 {
kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+}
+
+static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
+{
+   vhost_net_ubuf_put_and_wait(ubufs);
kfree(ubufs);
 }
 
@@ -1073,7 +1078,7 @@ static long vhost_net_set_backend(struct vhost_net *n, 
unsigned index, int fd)
mutex_unlock(&vq->mutex);
 
if (oldubufs) {
-   vhost_net_ubuf_put_and_wait(oldubufs);
+   vhost_net_ubuf_put_wait_and_free(oldubufs);
mutex_lock(&vq->mutex);
vhost_zerocopy_signal_used(n, vq);
mutex_unlock(&vq->mutex);
@@ -1091,7 +1096,7 @@ err_used:
vq->private_data = oldsock;
vhost_net_enable_vq(n, vq);
if (ubufs)
-   vhost_net_ubuf_put_and_wait(ubufs);
+   vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
fput(sock->file);
 err_vq:
-- 
MST
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [Qemu-devel] KVM call agenda for 2013-06-25

2013-06-25 Thread Igor Mammedov
On Tue, 11 Jun 2013 17:52:53 +0200
Juan Quintela  wrote:

> 
> Hi
> 
> Now we have moved to one call each other week.
> Please, send any topic that you are interested in covering.
> 
> Thanks, Juan.
> 
> PD.  If you want to attend and you don't have the call details,
>   contact me.
> 

Using static vs. dynamic properties vs. globals in particular case 
for CPU feature properties. Anthony suggested on IRC to use static
properties for it but recently it was questioned why not used dynamic
properties for it (afaerber):

1. static properties:

* using default values in static properties to define CPU models at
  class_init() time

* static properties could eventually evolve onto class properties,
  probably without much effort required.

* allows to simplify x86_cpu_initfn() and replace several custom
  property setters with a generic static property setters as result
  reducing code base and duplication.

2. introduce post_initfn() hook and move setting defaults of
  static properties and globals into it. So that property setters
  won't have to operate on partially initialized object instance.
  It also would allow to use dynamic properties in globals and
  compat_props.

3. using globals for simplifying cpu_model parsing and possibly getting
   rid of it in favor of -device FOO_CPU and eventually replacing
   cpu_init(cpu_model) with all its complexity by simple generic
   device_add() sequence.
   
   as one of the steps to it, cpu_model "-cpu FOO_CPU,feat1=x,feat2=y"
   which is the template for N CPUs created in machine_init() and which
   is parsed by target specific parser in cpu_*_init(), could be internally
   transformed into a set of global properties, like:
  FOO_CPU.feat1=x FOO_CPU.feat2=y
   then target specific parsers could be modified to CPU_CLASS hook that would
   keep compatibility code and make this remapping instead of parsing cpu_model
   string for each CPU and operating directly on CPU object instance.
   That would allow to treat CPUs as usual devices during hot-plug and use
   device-add. And open possibility to create individual CPUs on QEMU CLI
   using -device (which is important for migration and arbitrary CPU 
hot-add/remove)
   and heterogeneous CPUs machines. 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] kvmclock: clock should count only if vm is running (v2)

2013-06-25 Thread Gleb Natapov
On Tue, Jun 18, 2013 at 08:38:25PM -0300, Marcelo Tosatti wrote:
> 
> v2: remove unnecessary runstate_is_running() usage (Paolo)
> 
> --
> 
> kvmclock should not count while vm is paused, because:
> 
> 1) if the vm is paused for long periods, timekeeping
> math can overflow while converting the (large) clocksource
> delta to nanoseconds.
> 
> 2) Users rely on CLOCK_MONOTONIC to count run time, that is,
> time which OS has been in a runnable state (see CLOCK_BOOTTIME).
> 
> Change kvmclock driver so as to save clock value when vm transitions
> from runnable to stopped state, and to restore clock value from stopped
> to runnable transition.
> 
> Signed-off-by: Marcelo Tosatti 
> 
Applied, thanks.

> diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
> index 87d4d0f..98e5ca5 100644
> --- a/hw/i386/kvm/clock.c
> +++ b/hw/i386/kvm/clock.c
> @@ -28,38 +28,6 @@ typedef struct KVMClockState {
>  bool clock_valid;
>  } KVMClockState;
>  
> -static void kvmclock_pre_save(void *opaque)
> -{
> -KVMClockState *s = opaque;
> -struct kvm_clock_data data;
> -int ret;
> -
> -if (s->clock_valid) {
> -return;
> -}
> -ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
> -if (ret < 0) {
> -fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
> -data.clock = 0;
> -}
> -s->clock = data.clock;
> -/*
> - * If the VM is stopped, declare the clock state valid to avoid 
> re-reading
> - * it on next vmsave (which would return a different value). Will be 
> reset
> - * when the VM is continued.
> - */
> -s->clock_valid = !runstate_is_running();
> -}
> -
> -static int kvmclock_post_load(void *opaque, int version_id)
> -{
> -KVMClockState *s = opaque;
> -struct kvm_clock_data data;
> -
> -data.clock = s->clock;
> -data.flags = 0;
> -return kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
> -}
>  
>  static void kvmclock_vm_state_change(void *opaque, int running,
>   RunState state)
> @@ -70,8 +38,18 @@ static void kvmclock_vm_state_change(void *opaque, int 
> running,
>  int ret;
>  
>  if (running) {
> +struct kvm_clock_data data;
> +
>  s->clock_valid = false;
>  
> +data.clock = s->clock;
> +data.flags = 0;
> +ret = kvm_vm_ioctl(kvm_state, KVM_SET_CLOCK, &data);
> +if (ret < 0) {
> +fprintf(stderr, "KVM_SET_CLOCK failed: %s\n", strerror(ret));
> +abort();
> +}
> +
>  if (!cap_clock_ctrl) {
>  return;
>  }
> @@ -84,6 +62,26 @@ static void kvmclock_vm_state_change(void *opaque, int 
> running,
>  return;
>  }
>  }
> +} else {
> +struct kvm_clock_data data;
> +int ret;
> +
> +if (s->clock_valid) {
> +return;
> +}
> +ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
> +if (ret < 0) {
> +fprintf(stderr, "KVM_GET_CLOCK failed: %s\n", strerror(ret));
> +abort();
> +}
> +s->clock = data.clock;
> +
> +/*
> + * If the VM is stopped, declare the clock state valid to
> + * avoid re-reading it on next vmsave (which would return
> + * a different value). Will be reset when the VM is continued.
> + */
> +s->clock_valid = true;
>  }
>  }
>  
> @@ -100,8 +98,6 @@ static const VMStateDescription kvmclock_vmsd = {
>  .version_id = 1,
>  .minimum_version_id = 1,
>  .minimum_version_id_old = 1,
> -.pre_save = kvmclock_pre_save,
> -.post_load = kvmclock_post_load,
>  .fields = (VMStateField[]) {
>  VMSTATE_UINT64(clock, KVMClockState),
>  VMSTATE_END_OF_LIST()
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majord...@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/3] emulator: Add spl/bpl/sil/dil access via modrm

2013-06-25 Thread Gleb Natapov
On Tue, Jun 25, 2013 at 04:51:31PM +0800, Arthur Chunqi Li wrote:
> Add test case of accessing spl/bpl/sil/dil via modrm in emulator.
> 
> Signed-off-by: Arthur Chunqi Li 
> ---
>  x86/emulator.c |   26 ++
>  1 file changed, 26 insertions(+)
> 
> diff --git a/x86/emulator.c b/x86/emulator.c
> index bd02d5c..bea9513 100755
> --- a/x86/emulator.c
> +++ b/x86/emulator.c
> @@ -840,6 +840,31 @@ static void test_nopl(uint64_t *mem, void *alt_insn_page)
>  report("nopl", 1);
>  }
>  
> +static void test_modrm(uint64_t *mem, void *alt_insn_page)
> +{
> +
> +MK_INSN(modrm_spl,"mov %al, %spl\n\t");
> +MK_INSN(modrm_bpl,"mov %cl, %bpl\n\t");
> +MK_INSN(modrm_sil,"mov %dl, %sil\n\t");
> +MK_INSN(modrm_dil,"mov %bl, %dil\n\t");
> +
The test can be compiled for 64 bit only so we need to put it in an
#ifdef. Previous patches that introduced testing infrastructure are
64 bit specific too, so now emulator.c can be compiled for 64bit only,
we should fix that one day.

> +inregs = (struct regs){ .rax = 0x1234 };
> +trap_emulator(mem, alt_insn_page, &insn_modrm_spl);
> +report("access spl via modr/m", outregs.rax == 0x1234);
I'd prefer to write report variant like the one in realmode.c that
checks all registers for consistency. Then the report would be like
that:
report("access spl via modr/m", R_SP, outregs.rsp  == 0x34);

> +
> +inregs = (struct regs){ .rcx = 0x1234 };
> +trap_emulator(mem, alt_insn_page, &insn_modrm_bpl);
> +report("access bpl via modr/m", outregs.rcx == 0x1234);
> +
> +inregs = (struct regs){ .rdx = 0x1234 };
> +trap_emulator(mem, alt_insn_page, &insn_modrm_sil);
> +report("access sil via modr/m", outregs.rdx == 0x1234);
> +
> +inregs = (struct regs){ .rbx = 0x1234 };
> +trap_emulator(mem, alt_insn_page, &insn_modrm_dil);
> +report("access dil via modr/m", outregs.rbx == 0x1234);
> +}
> +
>  static void test_crosspage_mmio(volatile uint8_t *mem)
>  {
>  volatile uint16_t w, *pw;
> @@ -1037,6 +1062,7 @@ int main()
>   test_mmx_movq_mf(mem, alt_insn_page);
>   test_movabs(mem, alt_insn_page);
>   test_nopl(mem, alt_insn_page);
> + test_modrm(mem, alt_insn_page);
>  
>   test_crosspage_mmio(mem);
>  
> -- 
> 1.7.9.5

--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/4] kvm, emulator: Rename VendorSpecific flag

2013-06-25 Thread Gleb Natapov
On Thu, Jun 20, 2013 at 11:30:45AM +0200, Borislav Petkov wrote:
> From: Borislav Petkov 
> 
> Call it EmulateOnUD which is exactly what we're trying to do with
> vendor-specific instructions.
> 
> Signed-off-by: Borislav Petkov 
> ---
>  arch/x86/kvm/emulate.c | 14 +++---
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index d9aa9f8772f0..7872d9871fb7 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -126,7 +126,7 @@
>  #define Mov (1<<20)
>  /* Misc flags */
>  #define Prot(1<<21) /* instruction generates #UD if not in prot-mode 
> */
> -#define VendorSpecific (1<<22) /* Vendor specific instruction */
> +#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
>  #define NoAccess(1<<23) /* Don't access memory (lea/invlpg/verr etc) */
>  #define Op3264  (1<<24) /* Operand is 64b in long mode, 32b otherwise */
>  #define Undefined   (1<<25) /* No Such Instruction */
> @@ -3688,7 +3688,7 @@ static const struct opcode group7_rm1[] = {
>  
>  static const struct opcode group7_rm3[] = {
>   DIP(SrcNone | Prot | Priv,  vmrun,  check_svme_pa),
> - II(SrcNone  | Prot | VendorSpecific,em_vmmcall, vmmcall),
> + II(SrcNone  | Prot | EmulateOnUD,   em_vmmcall, vmmcall),
>   DIP(SrcNone | Prot | Priv,  vmload, check_svme_pa),
>   DIP(SrcNone | Prot | Priv,  vmsave, check_svme_pa),
>   DIP(SrcNone | Prot | Priv,  stgi,   check_svme),
> @@ -3773,7 +3773,7 @@ static const struct group_dual group7 = { {
>   II(SrcMem16 | Mov | Priv,   em_lmsw, lmsw),
>   II(SrcMem | ByteOp | Priv | NoAccess,   em_invlpg, invlpg),
>  }, {
> - I(SrcNone | Priv | VendorSpecific,  em_vmcall),
> + I(SrcNone | Priv | EmulateOnUD, em_vmcall),
>   EXT(0, group7_rm1),
>   N, EXT(0, group7_rm3),
>   II(SrcNone | DstMem | Mov,  em_smsw, smsw), N,
> @@ -3995,7 +3995,7 @@ static const struct opcode opcode_table[256] = {
>  static const struct opcode twobyte_table[256] = {
>   /* 0x00 - 0x0F */
>   G(0, group6), GD(0, &group7), N, N,
> - N, I(ImplicitOps | VendorSpecific, em_syscall),
> + N, I(ImplicitOps | EmulateOnUD, em_syscall),
>   II(ImplicitOps | Priv, em_clts, clts), N,
>   DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
>   N, D(ImplicitOps | ModRM), N, N,
> @@ -4015,8 +4015,8 @@ static const struct opcode twobyte_table[256] = {
>   IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
>   II(ImplicitOps | Priv, em_rdmsr, rdmsr),
>   IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
> - I(ImplicitOps | VendorSpecific, em_sysenter),
> - I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
> + I(ImplicitOps | EmulateOnUD, em_sysenter),
> + I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
>   N, N,
>   N, N, N, N, N, N, N, N,
>   /* 0x40 - 0x4F */
> @@ -4435,7 +4435,7 @@ done_prefixes:
>   if (ctxt->d == 0 || (ctxt->d & NotImpl))
>   return EMULATION_FAILED;
>  
> - if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
> + if (!(ctxt->d & EmulateOnUD) && ctxt->only_vendor_specific_insn)
Lets rename only_vendor_specific_insn to something like ->ud too.

>   return EMULATION_FAILED;
>  
>   if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
> -- 
> 1.8.3

--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/3] emulator: fix confused param list

2013-06-25 Thread Arthur Chunqi Li
These patches are some unfinished work and bug fixes related to
Paolo's exercise. I think I'd better finish these simple jobs.

Arthur

On Tue, Jun 25, 2013 at 4:51 PM, Arthur Chunqi Li  wrote:
> Fix param list of test_mmx_movq_mf and test_movabs. The previous
> version uses "insn_page" and "insn_ram" which are not used afterwards.
> There are also two variants named "insn_page" and "insn_ram", which
> has no relation with these two functions.
>
> Signed-off-by: Arthur Chunqi Li 
> ---
>  x86/emulator.c |   10 --
>  1 file changed, 4 insertions(+), 6 deletions(-)
>  mode change 100644 => 100755 x86/emulator.c
>
> diff --git a/x86/emulator.c b/x86/emulator.c
> old mode 100644
> new mode 100755
> index 68d2b93..6972334
> --- a/x86/emulator.c
> +++ b/x86/emulator.c
> @@ -786,8 +786,7 @@ static void advance_rip_by_3_and_note_exception(struct 
> ex_regs *regs)
>  regs->rip += 3;
>  }
>
> -static void test_mmx_movq_mf(uint64_t *mem, uint8_t *insn_page,
> -uint8_t *alt_insn_page, void *insn_ram)
> +static void test_mmx_movq_mf(uint64_t *mem, uint8_t *alt_insn_page)
>  {
>  uint16_t fcw = 0;  /* all exceptions unmasked */
>  /* movq %mm0, (%rax) */
> @@ -808,8 +807,7 @@ static void test_mmx_movq_mf(uint64_t *mem, uint8_t 
> *insn_page,
>  handle_exception(MF_VECTOR, 0);
>  }
>
> -static void test_movabs(uint64_t *mem, uint8_t *insn_page,
> -  uint8_t *alt_insn_page, void *insn_ram)
> +static void test_movabs(uint64_t *mem, uint8_t *alt_insn_page)
>  {
>  /* mov $0x9090909090909090, %rcx */
>  MK_INSN(movabs, "mov $0x9090909090909090, %rcx\n\t");
> @@ -1012,8 +1010,8 @@ int main()
> test_lldt(mem);
> test_ltr(mem);
>
> -   test_mmx_movq_mf(mem, insn_page, alt_insn_page, insn_ram);
> -   test_movabs(mem, insn_page, alt_insn_page, insn_ram);
> +   test_mmx_movq_mf(mem, alt_insn_page);
> +   test_movabs(mem, alt_insn_page);
>
> test_crosspage_mmio(mem);
>
> --
> 1.7.9.5
>



-- 
Arthur Chunqi Li
Department of Computer Science
School of EECS
Peking University
Beijing, China
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 2/3] emulator: Add multibyte nopl test case

2013-06-25 Thread Arthur Chunqi Li
Test multiple byte nopl (from 1-byte nopl to 9-byte nopl) in
64-bit mode.

Signed-off-by: Arthur Chunqi Li 
---
 x86/emulator.c |   25 +
 1 file changed, 25 insertions(+)

diff --git a/x86/emulator.c b/x86/emulator.c
index 6972334..bd02d5c 100755
--- a/x86/emulator.c
+++ b/x86/emulator.c
@@ -816,6 +816,30 @@ static void test_movabs(uint64_t *mem, uint8_t 
*alt_insn_page)
 report("64-bit mov imm2", outregs.rcx == 0x9090909090909090);
 }
 
+static void test_nopl(uint64_t *mem, void *alt_insn_page)
+{
+MK_INSN(nopl1, ".byte 0x90\n\r"); /* 1 byte nop */
+MK_INSN(nopl2, ".byte 0x66, 0x90\n\r"); /* 2 byte nop */
+MK_INSN(nopl3, ".byte 0x0f, 0x1f, 0x00\n\r"); /* 3 byte nop */
+MK_INSN(nopl4, ".byte 0x0f, 0x1f, 0x40, 0x00\n\r"); /* 4 byte nop */
+MK_INSN(nopl5, ".byte 0x0f, 0x1f, 0x44, 0x00, 0x00\n\r"); /* 5 byte nop */
+MK_INSN(nopl6, ".byte 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00\n\r"); /* 6 byte 
nop */
+MK_INSN(nopl7, ".byte 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00\n\r"); /* 7 
byte nop */
+MK_INSN(nopl8, ".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 
0x00\n\r"); /* 8 byte nop */
+MK_INSN(nopl9, ".byte 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 
0x00\n\r"); /* 9 byte nop */
+
+trap_emulator(mem, alt_insn_page, &insn_nopl1);
+trap_emulator(mem, alt_insn_page, &insn_nopl2);
+trap_emulator(mem, alt_insn_page, &insn_nopl3);
+trap_emulator(mem, alt_insn_page, &insn_nopl4);
+trap_emulator(mem, alt_insn_page, &insn_nopl5);
+trap_emulator(mem, alt_insn_page, &insn_nopl6);
+trap_emulator(mem, alt_insn_page, &insn_nopl7);
+trap_emulator(mem, alt_insn_page, &insn_nopl8);
+trap_emulator(mem, alt_insn_page, &insn_nopl9);
+report("nopl", 1);
+}
+
 static void test_crosspage_mmio(volatile uint8_t *mem)
 {
 volatile uint16_t w, *pw;
@@ -1012,6 +1036,7 @@ int main()
 
test_mmx_movq_mf(mem, alt_insn_page);
test_movabs(mem, alt_insn_page);
+   test_nopl(mem, alt_insn_page);
 
test_crosspage_mmio(mem);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 3/3] emulator: Add spl/bpl/sil/dil access via modrm

2013-06-25 Thread Arthur Chunqi Li
Add test case of accessing spl/bpl/sil/dil via modrm in emulator.

Signed-off-by: Arthur Chunqi Li 
---
 x86/emulator.c |   26 ++
 1 file changed, 26 insertions(+)

diff --git a/x86/emulator.c b/x86/emulator.c
index bd02d5c..bea9513 100755
--- a/x86/emulator.c
+++ b/x86/emulator.c
@@ -840,6 +840,31 @@ static void test_nopl(uint64_t *mem, void *alt_insn_page)
 report("nopl", 1);
 }
 
+static void test_modrm(uint64_t *mem, void *alt_insn_page)
+{
+
+MK_INSN(modrm_spl,"mov %al, %spl\n\t");
+MK_INSN(modrm_bpl,"mov %cl, %bpl\n\t");
+MK_INSN(modrm_sil,"mov %dl, %sil\n\t");
+MK_INSN(modrm_dil,"mov %bl, %dil\n\t");
+
+inregs = (struct regs){ .rax = 0x1234 };
+trap_emulator(mem, alt_insn_page, &insn_modrm_spl);
+report("access spl via modr/m", outregs.rax == 0x1234);
+
+inregs = (struct regs){ .rcx = 0x1234 };
+trap_emulator(mem, alt_insn_page, &insn_modrm_bpl);
+report("access bpl via modr/m", outregs.rcx == 0x1234);
+
+inregs = (struct regs){ .rdx = 0x1234 };
+trap_emulator(mem, alt_insn_page, &insn_modrm_sil);
+report("access sil via modr/m", outregs.rdx == 0x1234);
+
+inregs = (struct regs){ .rbx = 0x1234 };
+trap_emulator(mem, alt_insn_page, &insn_modrm_dil);
+report("access dil via modr/m", outregs.rbx == 0x1234);
+}
+
 static void test_crosspage_mmio(volatile uint8_t *mem)
 {
 volatile uint16_t w, *pw;
@@ -1037,6 +1062,7 @@ int main()
test_mmx_movq_mf(mem, alt_insn_page);
test_movabs(mem, alt_insn_page);
test_nopl(mem, alt_insn_page);
+   test_modrm(mem, alt_insn_page);
 
test_crosspage_mmio(mem);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/3] emulator: fix confused param list

2013-06-25 Thread Arthur Chunqi Li
Fix param list of test_mmx_movq_mf and test_movabs. The previous
version uses "insn_page" and "insn_ram" which are not used afterwards.
There are also two variants named "insn_page" and "insn_ram", which
has no relation with these two functions.

Signed-off-by: Arthur Chunqi Li 
---
 x86/emulator.c |   10 --
 1 file changed, 4 insertions(+), 6 deletions(-)
 mode change 100644 => 100755 x86/emulator.c

diff --git a/x86/emulator.c b/x86/emulator.c
old mode 100644
new mode 100755
index 68d2b93..6972334
--- a/x86/emulator.c
+++ b/x86/emulator.c
@@ -786,8 +786,7 @@ static void advance_rip_by_3_and_note_exception(struct 
ex_regs *regs)
 regs->rip += 3;
 }
 
-static void test_mmx_movq_mf(uint64_t *mem, uint8_t *insn_page,
-uint8_t *alt_insn_page, void *insn_ram)
+static void test_mmx_movq_mf(uint64_t *mem, uint8_t *alt_insn_page)
 {
 uint16_t fcw = 0;  /* all exceptions unmasked */
 /* movq %mm0, (%rax) */
@@ -808,8 +807,7 @@ static void test_mmx_movq_mf(uint64_t *mem, uint8_t 
*insn_page,
 handle_exception(MF_VECTOR, 0);
 }
 
-static void test_movabs(uint64_t *mem, uint8_t *insn_page,
-  uint8_t *alt_insn_page, void *insn_ram)
+static void test_movabs(uint64_t *mem, uint8_t *alt_insn_page)
 {
 /* mov $0x9090909090909090, %rcx */
 MK_INSN(movabs, "mov $0x9090909090909090, %rcx\n\t");
@@ -1012,8 +1010,8 @@ int main()
test_lldt(mem);
test_ltr(mem);
 
-   test_mmx_movq_mf(mem, insn_page, alt_insn_page, insn_ram);
-   test_movabs(mem, insn_page, alt_insn_page, insn_ram);
+   test_mmx_movq_mf(mem, alt_insn_page);
+   test_movabs(mem, alt_insn_page);
 
test_crosspage_mmio(mem);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html