[PATCH v5] i386: Introduce ARAT CPU feature

2015-06-07 Thread Jan Kiszka
From: Jan Kiszka 

ARAT signals that the APIC timer does not stop in power saving states.
As our APICs are emulated, it's fine to expose this feature to guests,
at least when asking for KVM host features or with CPU types that
include the flag. The exact model number that introduced the feature is
not known, but reports can be found that it's at least available since
Sandy Bridge.

Signed-off-by: Jan Kiszka 
---

Changes in v5:
 - rebased over master

 include/hw/i386/pc.h |  7 ++-
 target-i386/cpu.c| 33 -
 target-i386/cpu.h|  3 +++
 target-i386/kvm.c|  2 ++
 4 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index bec6de1..3b0b30f 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -294,7 +294,12 @@ int e820_get_num_entries(void);
 bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
 
 #define PC_COMPAT_2_3 \
-HW_COMPAT_2_3
+HW_COMPAT_2_3 \
+{\
+.driver   = TYPE_X86_CPU,\
+.property = "arat",\
+.value= "off",\
+},
 
 #define PC_COMPAT_2_2 \
 PC_COMPAT_2_3 \
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 99ad551..b5b9fc2 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -284,6 +284,17 @@ static const char *cpuid_xsave_feature_name[] = {
 NULL, NULL, NULL, NULL,
 };
 
+static const char *cpuid_6_feature_name[] = {
+NULL, NULL, "arat", NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+NULL, NULL, NULL, NULL,
+};
+
 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
   CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
@@ -339,6 +350,7 @@ static const char *cpuid_xsave_feature_name[] = {
   CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
   CPUID_7_0_EBX_RDSEED */
 #define TCG_APM_FEATURES 0
+#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
 
 
 typedef struct FeatureWordInfo {
@@ -408,6 +420,11 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
 .cpuid_reg = R_EAX,
 .tcg_features = 0,
 },
+[FEAT_6_EAX] = {
+.feat_names = cpuid_6_feature_name,
+.cpuid_eax = 6, .cpuid_reg = R_EAX,
+.tcg_features = TCG_6_EAX_FEATURES,
+},
 };
 
 typedef struct X86RegisterInfo32 {
@@ -1001,6 +1018,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
 .features[FEAT_8000_0001_ECX] =
 CPUID_EXT3_LAHF_LM,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
 },
@@ -1030,6 +1049,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_EXT3_LAHF_LM,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
 },
@@ -1062,6 +1083,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_EXT3_LAHF_LM,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
 },
@@ -1096,6 +1119,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Core Processor (Haswell, no TSX)",
 },{
@@ -1130,6 +1155,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_7_0_EBX_RTM,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Core Processor (Haswell)",
 },
@@ -1166,6 +1193,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_7_0_EBX_SMAP,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Core Processor (Broadwell, no TSX)",
 },
@@ -1202,6 +1231,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
 CPUID_7_0_EBX_SMAP,
 .features[FEAT_XSAVE] =
 CPUID_XSAVE_XSAVEOPT,
+.features[FEAT_6_EAX] =
+CPUID_6_EAX_ARAT,
 .xlevel = 0x800A,
 .model_id = "Intel Core Processor (Broadwell)",
 },
@@ -2358,7 +2389,7 @@ void cpu_x86_cpuid(

Re: [PATCH v2 01/11] KVM: arm: plug guest debug exploit

2015-06-07 Thread zichao
Hi, Marc,

On 2015/6/1 18:56, Marc Zyngier wrote:
> Hi Zhichao,
> 
> On 31/05/15 05:27, Zhichao Huang wrote:
>> Hardware debugging in guests is not intercepted currently, it means
>> that a malicious guest can bring down the entire machine by writing
>> to the debug registers.
>>
>> This patch enable trapping of all debug registers, preventing the guests
>> to mess with the host state.
>>
>> However, it is a precursor for later patches which will need to do
>> more to world switch debug states while necessary.
>>
>> Cc: 
>> Signed-off-by: Zhichao Huang 
>> ---
>>  arch/arm/include/asm/kvm_coproc.h |  3 +-
>>  arch/arm/kvm/coproc.c | 60 
>> +++
>>  arch/arm/kvm/handle_exit.c|  4 +--
>>  arch/arm/kvm/interrupts_head.S|  2 +-
>>  4 files changed, 59 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/arm/include/asm/kvm_coproc.h 
>> b/arch/arm/include/asm/kvm_coproc.h
>> index 4917c2f..e74ab0f 100644
>> --- a/arch/arm/include/asm/kvm_coproc.h
>> +++ b/arch/arm/include/asm/kvm_coproc.h
>> @@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct 
>> kvm_coproc_target_table *table);
>>  int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>  int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>  int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
>> -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
>> +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
>> +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>  int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>  int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
>>  
>> diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
>> index f3d88dc..2e12760 100644
>> --- a/arch/arm/kvm/coproc.c
>> +++ b/arch/arm/kvm/coproc.c
>> @@ -91,12 +91,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, 
>> struct kvm_run *run)
>>  return 1;
>>  }
>>  
>> -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
>> -{
>> -kvm_inject_undefined(vcpu);
>> -return 1;
>> -}
>> -
>>  static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
>>  {
>>  /*
>> @@ -519,6 +513,60 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct 
>> kvm_run *run)
>>  return emulate_cp15(vcpu, ¶ms);
>>  }
>>  
>> +/**
>> + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
>> + * @vcpu: The VCPU pointer
>> + * @run:  The kvm_run struct
>> + */
>> +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
>> +{
>> +struct coproc_params params;
>> +
>> +params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
>> +params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
>> +params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
>> +params.is_64bit = true;
>> +
>> +params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf;
>> +params.Op2 = 0;
>> +params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
>> +params.CRm = 0;
>> +
>> +/* raz_wi */
>> +(void)pm_fake(vcpu, ¶ms, NULL);
>> +
>> +/* handled */
>> +kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
>> +return 1;
>> +}
>> +
>> +/**
>> + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
>> + * @vcpu: The VCPU pointer
>> + * @run:  The kvm_run struct
>> + */
>> +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
>> +{
>> +struct coproc_params params;
>> +
>> +params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf;
>> +params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf;
>> +params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0);
>> +params.is_64bit = false;
>> +
>> +params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
>> +params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7;
>> +params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
>> +params.Rt2 = 0;
>> +
>> +/* raz_wi */
>> +(void)pm_fake(vcpu, ¶ms, NULL);
>> +
>> +/* handled */
>> +kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
>> +return 1;
>> +}
>> +
>>  
>> /**
>>   * Userspace API
>>   
>> */
>> diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
>> index 95f12b2..357ad1b 100644
>> --- a/arch/arm/kvm/handle_exit.c
>> +++ b/arch/arm/kvm/handle_exit.c
>> @@ -104,9 +104,9 @@ static exit_handle_fn arm_exit_handlers[] = {
>>  [HSR_EC_WFI]= kvm_handle_wfx,
>>  [HSR_EC_CP15_32]= kvm_handle_cp15_32,
>>  [HSR_EC_CP15_64]= kvm_handle_cp15_64,
>> -[HSR_EC_CP14_MR]= kvm_handle_cp14_access,
>> +[HSR_EC_CP14_MR]= kvm_handle_cp14_32,
>>  [HSR_EC_CP14_LS]= kvm_handle_cp14_load_store,
>> -[HSR_EC_CP14_64]= kvm_handle_cp14_access,
>> +[H

Re: [PATCH v2 09/11] KVM: arm: disable debug mode if we don't actually need it.

2015-06-07 Thread zichao
Hi, Will,

On 2015/6/1 18:16, Will Deacon wrote:
> On Sun, May 31, 2015 at 05:27:10AM +0100, Zhichao Huang wrote:
>> Until now we enable debug mode all the time even if we don't
>> actually need it.
>>
>> Inspired by the implementation in arm64, disable debug mode if
>> we don't need it. And then we are able to reduce unnecessary
>> registers saving/restoring when the debug mode is disabled.
> 
> I'm terrified about this patch. Enabling monitor mode has proven to be
> *extremely* fragile in practice on 32-bit ARM SoCs, so trying to do this
> morei often makes me very nervous.
> 
>> Signed-off-by: Zhichao Huang 
>> ---
>>  arch/arm/kernel/hw_breakpoint.c | 55 
>> ++---
>>  1 file changed, 46 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/arm/kernel/hw_breakpoint.c 
>> b/arch/arm/kernel/hw_breakpoint.c
>> index dc7d0a9..1d27563 100644
>> --- a/arch/arm/kernel/hw_breakpoint.c
>> +++ b/arch/arm/kernel/hw_breakpoint.c
>> @@ -266,8 +266,7 @@ static int enable_monitor_mode(void)
>>  }
>>  
>>  /* Check that the write made it through. */
>> -ARM_DBG_READ(c0, c1, 0, dscr);
>> -if (!(dscr & ARM_DSCR_MDBGEN)) {
>> +if (!monitor_mode_enabled()) {
>>  pr_warn_once("Failed to enable monitor mode on CPU %d.\n",
>>  smp_processor_id());
>>  return -EPERM;
> 
> Ok, this hunk is harmless :)
> 
>> @@ -277,6 +276,43 @@ out:
>>  return 0;
>>  }
>>  
>> +static int disable_monitor_mode(void)
>> +{
>> +u32 dscr;
>> +
>> +ARM_DBG_READ(c0, c1, 0, dscr);
>> +
>> +/* If monitor mode is already disabled, just return. */
>> +if (!(dscr & ARM_DSCR_MDBGEN))
>> +goto out;
>> +
>> +/* Write to the corresponding DSCR. */
>> +switch (get_debug_arch()) {
>> +case ARM_DEBUG_ARCH_V6:
>> +case ARM_DEBUG_ARCH_V6_1:
>> +ARM_DBG_WRITE(c0, c1, 0, (dscr & ~ARM_DSCR_MDBGEN));
>> +break;
>> +case ARM_DEBUG_ARCH_V7_ECP14:
>> +case ARM_DEBUG_ARCH_V7_1:
>> +case ARM_DEBUG_ARCH_V8:
>> +ARM_DBG_WRITE(c0, c2, 2, (dscr & ~ARM_DSCR_MDBGEN));
>> +isb();
>> +break;
>> +default:
>> +return -ENODEV;
>> +}
>> +
>> +/* Check that the write made it through. */
>> +if (monitor_mode_enabled()) {
>> +pr_warn_once("Failed to disable monitor mode on CPU %d.\n",
>> +smp_processor_id());
>> +return -EPERM;
>> +}
>> +
>> +out:
>> +return 0;
>> +}
> 
> I'm not comfortable with this. enable_monitor_mode has precisly one caller
> [reset_ctrl_regs] which goes to some lengths to get the system into a
> well-defined state. On top of that, the whole thing is run with an undef
> hook registered because there isn't an architected way to discover whether
> or not DBGSWENABLE is driven low.
> 
> Why exactly do you need this? Can you not trap guest debug accesses some
> other way?

OK, I shall look for some other ways to reduce the overhead of world switch.

And another problem might be that, when we start an ARMv7 vm (specify CPU to
be Cortex-A57) on the ARMv8 platform, debug registers will always be 
saving/loading
because it is always detected that the debug mode is enabled even though we
didn't acutally use it.

> 
>>  int hw_breakpoint_slots(int type)
>>  {
>>  if (!debug_arch_supported())
>> @@ -338,6 +374,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
>>  int i, max_slots, ctrl_base, val_base;
>>  u32 addr, ctrl;
>>  
>> +enable_monitor_mode();
>> +
>>  addr = info->address;
>>  ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
>>  
>> @@ -430,6 +468,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
>>  
>>  /* Reset the control register. */
>>  write_wb_reg(base + i, 0);
>> +
>> +disable_monitor_mode();
> 
> My previous concerns notwithstanding, shouldn't this be refcounted?

Maybe shouldn't in my opinion, because we install/uninstall breakpoints only 
when
we does context switch, and then we always install/uninstall all the 
breakpoints.

> 
> Will
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


RE: [v3 24/26] KVM: Update Posted-Interrupts Descriptor when vCPU is blocked

2015-06-07 Thread Wu, Feng


> -Original Message-
> From: Marcelo Tosatti [mailto:mtosa...@redhat.com]
> Sent: Saturday, June 06, 2015 5:59 AM
> To: Wu, Feng
> Cc: h...@zytor.com; t...@linutronix.de; mi...@redhat.com; x...@kernel.org;
> g...@kernel.org; pbonz...@redhat.com; dw...@infradead.org;
> j...@8bytes.org; alex.william...@redhat.com; jiang@linux.intel.com;
> eric.au...@linaro.org; linux-ker...@vger.kernel.org;
> io...@lists.linux-foundation.org; kvm@vger.kernel.org
> Subject: Re: [v3 24/26] KVM: Update Posted-Interrupts Descriptor when vCPU
> is blocked
> 
> On Tue, Apr 14, 2015 at 07:37:44AM +, Wu, Feng wrote:
> >
> >
> > > -Original Message-
> > > From: Marcelo Tosatti [mailto:mtosa...@redhat.com]
> > > Sent: Tuesday, March 31, 2015 7:56 AM
> > > To: Wu, Feng
> > > Cc: h...@zytor.com; t...@linutronix.de; mi...@redhat.com;
> x...@kernel.org;
> > > g...@kernel.org; pbonz...@redhat.com; dw...@infradead.org;
> > > j...@8bytes.org; alex.william...@redhat.com; jiang@linux.intel.com;
> > > eric.au...@linaro.org; linux-ker...@vger.kernel.org;
> > > io...@lists.linux-foundation.org; kvm@vger.kernel.org
> > > Subject: Re: [v3 24/26] KVM: Update Posted-Interrupts Descriptor when
> vCPU
> > > is blocked
> > >
> > > On Mon, Mar 30, 2015 at 04:46:55AM +, Wu, Feng wrote:
> > > >
> > > >
> > > > > -Original Message-
> > > > > From: Marcelo Tosatti [mailto:mtosa...@redhat.com]
> > > > > Sent: Saturday, March 28, 2015 3:30 AM
> > > > > To: Wu, Feng
> > > > > Cc: h...@zytor.com; t...@linutronix.de; mi...@redhat.com;
> > > x...@kernel.org;
> > > > > g...@kernel.org; pbonz...@redhat.com; dw...@infradead.org;
> > > > > j...@8bytes.org; alex.william...@redhat.com;
> jiang@linux.intel.com;
> > > > > eric.au...@linaro.org; linux-ker...@vger.kernel.org;
> > > > > io...@lists.linux-foundation.org; kvm@vger.kernel.org
> > > > > Subject: Re: [v3 24/26] KVM: Update Posted-Interrupts Descriptor when
> > > vCPU
> > > > > is blocked
> > > > >
> > > > > On Fri, Mar 27, 2015 at 06:34:14AM +, Wu, Feng wrote:
> > > > > > > > Currently, the following code is executed before 
> > > > > > > > local_irq_disable()
> is
> > > > > called,
> > > > > > > > so do you mean 1)moving local_irq_disable() to the place before 
> > > > > > > > it.
> 2)
> > > after
> > > > > > > interrupt
> > > > > > > > is disabled, set KVM_REQ_EVENT in case the ON bit is set?
> > > > > > >
> > > > > > > 2) after interrupt is disabled, set KVM_REQ_EVENT in case the ON
> bit
> > > > > > > is set.
> > > > > >
> > > > > > Here is my understanding about your comments here:
> > > > > > - Disable interrupts
> > > > > > - Check 'ON'
> > > > > > - Set KVM_REQ_EVENT if 'ON' is set
> > > > > >
> > > > > > Then we can put the above code inside " if
> > > > > (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) "
> > > > > > just like it used to be. However, I still have some questions about 
> > > > > > this
> > > > > comment:
> > > > > >
> > > > > > 1. Where should I set KVM_REQ_EVENT? In function
> vcpu_enter_guest(),
> > > or
> > > > > other places?
> > > > >
> > > > > See below:
> > > > >
> > > > > > If in vcpu_enter_guest(), since currently local_irq_disable() is 
> > > > > > called
> after
> > > > > 'KVM_REQ_EVENT'
> > > > > > is checked, is it helpful to set KVM_REQ_EVENT after
> local_irq_disable() is
> > > > > called?
> > > > >
> > > > > local_irq_disable();
> > > > >
> > > > >   *** add code here ***
> > > >
> > > > So we need add code like the following here, right?
> > > >
> > > >   if ('ON' is set)
> > > >   kvm_make_request(KVM_REQ_EVENT, vcpu);
> > >
> >
> > Hi Marcelo,
> >
> > I changed the code as above, then I found that the ping latency was
> extremely big, (70ms - 400ms).
> > I digged into it and got the root cause. We cannot use "checking-on" as the
> judgment, since 'ON'
> > can be cleared by hypervisor software in lots of places. In this case,
> KVM_REQ_EVENT cannot be
> > set when we check 'ON' bit, hence the interrupts are not injected to the 
> > guest
> in time.
> >
> > Please refer to the following code, in which 'ON' bit can be cleared:
> >
> > apic_find_highest_irr () --> vmx_sync_pir_to_irr () --> 
> > pi_test_and_clear_on()
> >
> > Searching from the code step by step, apic_find_highest_irr() can be called 
> > by
> many other guys.
> >
> > Thanks,
> 
> Ok then, ignore my suggestion.
> 
> Can you resend the latest version please ?

Thanks for your review, I will send the new version soon.

Thanks,
Feng

> 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 0/6] x86: reduce paravirtualized spinlock overhead

2015-06-07 Thread Juergen Gross

Ping?

Anything missing from my side?

On 04/30/2015 12:53 PM, Juergen Gross wrote:

Paravirtualized spinlocks produce some overhead even if the kernel is
running on bare metal. The main reason are the more complex locking
and unlocking functions. Especially unlocking is no longer just one
instruction but so complex that it is no longer inlined.

This patch series addresses this issue by adding two more pvops
functions to reduce the size of the inlined spinlock functions. When
running on bare metal unlocking is again basically one instruction.

Compile tested with CONFIG_PARAVIRT_SPINLOCKS on and off, 32 and 64
bits.

Functional testing on bare metal and as Xen dom0.

Correct patching verified by disassembly of active kernel.

Juergen Gross (6):
   x86: use macro instead of "0" for setting TICKET_SLOWPATH_FLAG
   x86: move decision about clearing slowpath flag into arch_spin_lock()
   x86: introduce new pvops function clear_slowpath
   x86: introduce new pvops function spin_unlock
   x86: switch config from UNINLINE_SPIN_UNLOCK to INLINE_SPIN_UNLOCK
   x86: remove no longer needed paravirt_ticketlocks_enabled

  arch/x86/Kconfig  |  1 -
  arch/x86/include/asm/paravirt.h   | 13 +
  arch/x86/include/asm/paravirt_types.h | 12 
  arch/x86/include/asm/spinlock.h   | 53 ---
  arch/x86/include/asm/spinlock_types.h |  3 +-
  arch/x86/kernel/kvm.c | 14 +
  arch/x86/kernel/paravirt-spinlocks.c  | 42 +--
  arch/x86/kernel/paravirt.c| 12 
  arch/x86/kernel/paravirt_patch_32.c   | 25 +
  arch/x86/kernel/paravirt_patch_64.c   | 24 
  arch/x86/xen/spinlock.c   | 23 +--
  include/linux/spinlock_api_smp.h  |  2 +-
  kernel/Kconfig.locks  |  7 +++--
  kernel/Kconfig.preempt|  3 +-
  kernel/locking/spinlock.c |  2 +-
  lib/Kconfig.debug |  1 -
  16 files changed, 154 insertions(+), 83 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Steroid Products--Numberone Biotech Inc.

2015-06-07 Thread Numberone Biotech Inc.
Dear,
Our company manufactures a range of injectable and oral steroids products that 
are used successfully in over 10 countries.
We are considering expanding our products to new markets and we would 
appreciate you assistance.
In particular, we would like to look for product agents. We will quote the 
competitve prices as possible.
Payment type: Western Union, Bank transfer, Money Gram.
Shipping term: express mail by air (with registered tracking number, door 
delivery).
Delivery time: within 10 days after get payment.
Supplying ability: 5000 vials per week.
If the goods are damaged or confiscated at customs. We will delivery the goods 
again or return your money as your request.
You can learn the details of our products on our web: .
If you are interested in our products, we are pleased to send you our price 
quote for them.
If you have any questions, please feel free to contact us at cnrocket...@126.com

Sincerely,

Overseas Sales Division
Numberone Biotech Co., Ltd.

RE: [PATCH 00/13] arm64: KVM: GICv3 ITS emulation

2015-06-07 Thread Pavel Fedin
 Hello everybody!

> The GICv3 ITS (Interrupt Translation Service) is a part of the
> ARM GICv3 interrupt controller used for implementing MSIs.
> It specifies a new kind of interrupts (LPIs), which are mapped to
> establish a connection between a device, its MSI payload value and
> the target processor the IRQ is eventually delivered to.
> In order to allow using MSIs in an ARM64 KVM guest, we emulate this
> ITS widget in the kernel.

 I have tested the patch and got some more ideas for future extension...

 First of all, it would be nice to have a possibility to directly inject LPIs 
by number.
This will be useful for irqfd support in qemu.
 Next, irqfd support currently poses a problem. We need to somehow know IRQ 
number from
MSI-X data (device ID plus event ID). ITS has all this information, so it would 
be nice to
be able to query for the translation from within userspace. The question is - 
how to do
it? Should we add some ioctl for this purpose? Currently i am experimenting 
with extra
KVM_TRANSLATE_MSI ioctl which, given MSI data, would return LPI number.
 Actually before your patch came out i have almost done the same thing. But 
instead i
decided to implement ITS in qemu while leaving LPI handling to kernel. In this 
case my
qemu would have everything needed.
 By the way, why did you decide to put everything into kernel? Yes, in-kernel 
emulation is
faster, but ITS is not accessed frequently.

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html