On Monday, March 26, 2012 07:52:49 PM Gleb Natapov wrote:
> On Mon, Mar 26, 2012 at 07:46:03PM +0200, Vadim Rozenfeld wrote:
> > On Monday, March 26, 2012 07:00:32 PM Peter Lieven wrote:
> > > On 22.03.2012 10:38, Vadim Rozenfeld wrote:
> > > > On Thursday, March 22, 2012 10:52:42 AM Peter Lieven wrote:
> > > >> On 22.03.2012 09:48, Vadim Rozenfeld wrote:
> > > >>> On Thursday, March 22, 2012 09:53:45 AM Gleb Natapov wrote:
> > > >>>> On Wed, Mar 21, 2012 at 06:31:02PM +0100, Peter Lieven wrote:
> > > >>>>> On 21.03.2012 12:10, David Cure wrote:
> > > >>>>>> hello,
> > > >>>>>>
> > > >>>>>> Le Tue, Mar 20, 2012 at 02:38:22PM +0200, Gleb Natapov ecrivait :
> > > >>>>>>> Try to add<feature policy='disable' name='hypervisor'/> to
> > > >>>>>>> cpu definition in XML and check command line.
> > > >>>>>>>
> > > >>>>>> ok I try this but I can't use<cpu model> to map the host cpu
> > > >>>>>>
> > > >>>>>> (my libvirt is 0.9.8) so I use :
> > > >>>>>> <cpu match='exact'>
> > > >>>>>>
> > > >>>>>> <model>Opteron_G3</model>
> > > >>>>>> <feature policy='disable' name='hypervisor'/>
> > > >>>>>>
> > > >>>>>> </cpu>
> > > >>>>>>
> > > >>>>>> (the physical server use Opteron CPU).
> > > >>>>>>
> > > >>>>>> The log is here :
> > > >>>>>> http://www.roullier.net/Report/report-3.2-vhost-net-1vcpu-cpu.tx
> > > >>>>>> t.gz
> > > >>>>>>
> > > >>>>>> And now with only 1 vcpu, the response time is 8.5s, great
> > > >>>>>>
> > > >>>>>> improvment. We keep this configuration for production : we check
> > > >>>>>> the response time when some other users are connected.
> > > >>>>>
> > > >>>>> please keep in mind, that setting -hypervisor, disabling hpet and
> > > >>>>> only one vcpu
> > > >>>>> makes windows use tsc as clocksource. you have to make sure, that
> > > >>>>> your vm is not switching between physical sockets on your system
> > > >>>>> and that you have constant_tsc feature to have a stable tsc
> > > >>>>> between the cores in the same socket. its also likely that the
> > > >>>>> vm will crash when live migrated.
> > > >>>>
> > > >>>> All true. I asked to try -hypervisor only to verify where we loose
> > > >>>> performance. Since you get good result with it frequent access to
> > > >>>> PM timer is probably the reason. I do not recommend using
> > > >>>> -hypervisor for production!
> > > >>>>
> > > >>>>> @gleb: do you know whats the state of in-kernel hyper-v timers?
> > > >>>>
> > > >>>> Vadim is working on it. I'll let him answer.
> > > >>>
> > > >>> It would be nice to have synthetic timers supported. But, at the
> > > >>> moment, I'm only researching this feature.
> > > >>
> > > >> So it will take months at least?
> > > >
> > > > I would say weeks.
> > >
> > > Is there a way, we could contribute and help you with this?
> >
> > Hi Peter,
> > You are welcome to add an appropriate handler.
>
> I think Vadim refers to this HV MSR
> http://msdn.microsoft.com/en-us/library/windows/hardware/ff542633%28v=vs.85
> %29.aspx
This one is pretty simple to support. Please see attachments for more details.
I was thinking about synthetic timers http://msdn.microsoft.com/en-
us/library/windows/hardware/ff542758(v=vs.85).aspx
>
> --
> Gleb.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74c9edf..fafc8ff 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -535,6 +535,7 @@ struct kvm_arch {
/* fields used by HYPER-V emulation */
u64 hv_guest_os_id;
u64 hv_hypercall;
+ u64 hv_ref_count;
atomic_t reader_counter;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c9d99e5..4562581 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1393,6 +1393,7 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
case HV_X64_MSR_HYPERCALL:
+ case HV_X64_MSR_TIME_REF_COUNT:
r = true;
break;
}
@@ -1432,6 +1433,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
+ kvm->arch.hv_ref_count = get_kernel_ns();
break;
}
default:
@@ -1467,6 +1469,9 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+ case 0x40000021:
+ break;
default:
pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
@@ -1842,6 +1847,10 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_HYPERCALL:
data = kvm->arch.hv_hypercall;
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ data = get_kernel_ns() - kvm->arch.hv_ref_count;
+ break;
default:
pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -1873,6 +1882,9 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_APIC_ASSIST_PAGE:
data = vcpu->arch.hv_vapic;
break;
default:
pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
diff --git a/target-i386/cpuid.c b/target-i386/cpuid.c
index c2edb64..5c85492 100644
--- a/target-i386/cpuid.c
+++ b/target-i386/cpuid.c
@@ -778,6 +778,9 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
hyperv_enable_relaxed_timing(true);
} else if (!strcmp(featurestr, "hv_vapic")) {
hyperv_enable_vapic_recommended(true);
+ } else if (!strcmp(featurestr, "hv_refcnt")) {
+ hyperv_enable_ref_count(true);
} else {
fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
goto error;
diff --git a/target-i386/hyperv.c b/target-i386/hyperv.c
index f284e99..13a1cb7 100644
--- a/target-i386/hyperv.c
+++ b/target-i386/hyperv.c
@@ -15,6 +15,7 @@
static bool hyperv_vapic;
static bool hyperv_relaxed_timing;
static int hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
+static bool hyperv_ref_count;
void hyperv_enable_vapic_recommended(bool val)
{
@@ -34,9 +35,17 @@ void hyperv_set_spinlock_retries(int val)
}
}
+void hyperv_enable_ref_count(bool val)
+{
+ hyperv_ref_count = val;
+}
+
bool hyperv_enabled(void)
{
- return hyperv_hypercall_available() || hyperv_relaxed_timing_enabled();
+ return hyperv_hypercall_available() ||
+ hyperv_relaxed_timing_enabled() ||
+ hyperv_ref_counter_enabled();
}
bool hyperv_hypercall_available(void)
@@ -62,3 +71,9 @@ int hyperv_get_spinlock_retries(void)
{
return hyperv_spinlock_attempts;
}
+
+bool hyperv_ref_counter_enabled(void)
+{
+ return hyperv_ref_count;
+}
diff --git a/target-i386/hyperv.h b/target-i386/hyperv.h
index bacb1d4..a65aa5f 100644
--- a/target-i386/hyperv.h
+++ b/target-i386/hyperv.h
@@ -30,10 +30,12 @@
void hyperv_enable_vapic_recommended(bool val);
void hyperv_enable_relaxed_timing(bool val);
void hyperv_set_spinlock_retries(int val);
+void hyperv_enable_ref_count(bool val);
#else
static inline void hyperv_enable_vapic_recommended(bool val) { }
static inline void hyperv_enable_relaxed_timing(bool val) { }
static inline void hyperv_set_spinlock_retries(int val) { }
+static inline void hyperv_enable_ref_count(bool val) {}
#endif
bool hyperv_enabled(void);
@@ -41,5 +43,5 @@ bool hyperv_hypercall_available(void);
bool hyperv_vapic_recommended(void);
bool hyperv_relaxed_timing_enabled(void);
int hyperv_get_spinlock_retries(void);
-
+bool hyperv_ref_counter_enabled(void);
#endif /* QEMU_HW_HYPERV_H */
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 9a73207..31ca04a 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -414,7 +414,12 @@ int kvm_arch_init_vcpu(CPUState *env)
c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
}
-
+ if (hyperv_ref_counter_enabled()) {
+ c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+ c->eax |= 0x200;
+ }
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;