Re: [PATCH 3/3] KVM/arm64: enable enhanced armv8 fp/simd lazy switch

2015-11-14 Thread Mario Smarduch


On 11/10/2015 3:18 AM, Christoffer Dall wrote:
> On Mon, Nov 09, 2015 at 03:13:15PM -0800, Mario Smarduch wrote:
>>
>>
>> On 11/5/2015 7:02 AM, Christoffer Dall wrote:
>>> On Fri, Oct 30, 2015 at 02:56:33PM -0700, Mario Smarduch wrote:
[]
>> kern_hyp_va x0
>> add x2, x0, #VCPU_CONTEXT
>> mrs x1, fpexec32_el2
>> str x1, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
>> ret
>>
>> Of course each hyp call has additional overhead, at a high exit to
>> vcpu_put ratio hyp call appears better. But all this is very
>> highly dependent on exit rate and fp/simd usage. IMO hyp call
>> works better under extreme loads should be pretty close
>> for general loads.
>>
>> Any thoughts?
>>
> I think the typical case will be lots of exits and few
> vcpu_load/vcpu_put, and I think it's reasonable to write the code that
> way.

Yes, especially for RT guests where vCPU is pinned.

Thanks.
> 
> That should also be much better for VHE.
> 
> So I would go that direction.
> 
> Thanks,
> -Christoffer
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 1/3] KVM/arm/arm64: add hooks for armv7 fp/simd lazy switch support

2015-11-14 Thread Mario Smarduch
This patch adds vcpu fields to track lazy state, save host FPEXC, and
offsets to fields.

Signed-off-by: Mario Smarduch 
---
 arch/arm/include/asm/kvm_host.h | 6 ++
 arch/arm/kernel/asm-offsets.c   | 2 ++
 2 files changed, 8 insertions(+)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3df1e97..f1bf551 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -107,6 +107,12 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
u32 irq_lines;  /* IRQ and FIQ levels */
 
+   /* fp/simd dirty flag true if guest accessed register file */
+   boolvfp_dirty;
+
+   /* Save host FPEXC register to later restore on vcpu put */
+   u32 host_fpexc;
+
/* Exception Information */
struct kvm_vcpu_fault_info fault;
 
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 871b826..9f79712 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -186,6 +186,8 @@ int main(void)
   DEFINE(VCPU_CPSR,offsetof(struct kvm_vcpu, 
arch.regs.usr_regs.ARM_cpsr));
   DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
   DEFINE(VCPU_IRQ_LINES,   offsetof(struct kvm_vcpu, arch.irq_lines));
+  DEFINE(VCPU_VFP_DIRTY,   offsetof(struct kvm_vcpu, arch.vfp_dirty));
+  DEFINE(VCPU_VFP_HOST_FPEXC,  offsetof(struct kvm_vcpu, arch.host_fpexc));
   DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
   DEFINE(VCPU_HxFAR,   offsetof(struct kvm_vcpu, arch.fault.hxfar));
   DEFINE(VCPU_HPFAR,   offsetof(struct kvm_vcpu, arch.fault.hpfar));
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 3/3] KVM/arm/arm64: enable enhanced armv8 fp/simd lazy switch

2015-11-14 Thread Mario Smarduch
This patch tracks armv7 and armv8 fp/simd hardware state with a vcpu lazy flag.
On vcpu_load for 32 bit guests enable FP access, and later enable fp/simd
trapping for 32 and 64 bit guests if lazy flag is not set. On first fp/simd 
access trap to handler to save host and restore guest context, disable 
trapping and set vcpu lazy flag. On vcpu_put if flag is set save guest and 
restore host context and also save guest fpexc register.

Signed-off-by: Mario Smarduch 
---
 arch/arm/include/asm/kvm_host.h   |  3 ++
 arch/arm/kvm/arm.c| 18 +++--
 arch/arm64/include/asm/kvm_asm.h  |  2 +
 arch/arm64/include/asm/kvm_host.h | 17 +++-
 arch/arm64/kernel/asm-offsets.c   |  1 +
 arch/arm64/kvm/hyp.S  | 83 +--
 6 files changed, 89 insertions(+), 35 deletions(-)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 8fc7a59..6960ff2 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -40,6 +40,8 @@
 
 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
 
+#define kvm_guest_is32bit(vcpu)true
+
 /*
  * Reads the host FPEXC register, saves to vcpu context and enables the
  * FPEXC.
@@ -260,6 +262,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 void kvm_restore_host_vfp_state(struct kvm_vcpu *);
+static inline void kvm_save_guest_fpexc(struct kvm_vcpu *vcpu) {}
 
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index cfc348a..7a20530 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -292,8 +292,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
kvm_arm_set_running_vcpu(vcpu);
 
-   /* Save and enable FPEXC before we load guest context */
-   kvm_enable_fpexc(vcpu);
+   /*
+* For 32bit guest executing on arm64, enable fp/simd access in
+* EL2. On arm32 save host fpexc and then enable fp/simd access.
+*/
+   if (kvm_guest_is32bit(vcpu))
+   kvm_enable_fpexc(vcpu);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -301,10 +305,18 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
/* If the fp/simd registers are dirty save guest, restore host. */
if (vcpu->arch.vfp_dirty) {
kvm_restore_host_vfp_state(vcpu);
+
+   /*
+* For 32bit guest on arm64 save the guest fpexc register
+* in EL2 mode.
+*/
+   if (kvm_guest_is32bit(vcpu))
+   kvm_save_guest_fpexc(vcpu);
+
vcpu->arch.vfp_dirty = 0;
}
 
-   /* Restore host FPEXC trashed in vcpu_load */
+   /* For arm32 restore host FPEXC trashed in vcpu_load. */
kvm_restore_host_fpexc(vcpu);
 
/*
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 5e37710..c589ca9 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -117,6 +117,8 @@ extern char __kvm_hyp_vector[];
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_enable_fpexc32(void);
+extern void __kvm_save_fpexc32(struct kvm_vcpu *vcpu);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 83e65dd..6e2d6b5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -41,6 +41,8 @@
 
 #define KVM_VCPU_MAX_FEATURES 3
 
+#define kvm_guest_is32bit(vcpu)(!(vcpu->arch.hcr_el2 & HCR_RW))
+
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -251,9 +253,20 @@ static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_enable_fpexc(struct kvm_vcpu *vcpu) {}
-static inline void kvm_restore_host_vfp_state(struct kvm_vcpu *vcpu) {}
+
+static inline void kvm_enable_fpexc(struct kvm_vcpu *vcpu)
+{
+   /* Enable FP/SIMD access from EL2 mode*/
+   kvm_call_hyp(__kvm_enable_fpexc32);
+}
+
+static inline void kvm_save_guest_fpexc(struct kvm_vcpu *vcpu)
+{
+   /* Save FPEXEC32_EL2 in EL2 mode */
+   kvm_call_hyp(__kvm_save_fpexc32, vcpu);
+}
 static inline void kvm_restore_host_fpexc(struct kvm_vcpu *vcpu) {}
+void kvm_restore_host_vfp_state(struct kvm_vcpu *vcpu);
 
 void kvm_arm_init_debug(void);
 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c

[PATCH v4 0/3] KVM/arm/arm64: enhance armv7/8 fp/simd lazy switch

2015-11-14 Thread Mario Smarduch
This patch series combines the previous armv7 and armv8 versions.
For an FP and lmbench load it reduces fp/simd context switch from 30-50% down
to 2%. Results will vary with load but is no worse then current
approach.

In summary current lazy vfp/simd implementation switches hardware context only
on guest access and again on exit to host, otherwise hardware context is
skipped. This patch set builds on that functionality and executes a hardware
context switch only when  vCPU is scheduled out or returns to user space.

Patches were tested on FVP and Foundation Model sw platforms running floating 
point applications comparing outcome against known results. A bad FP/SIMDcontext
switch should result FP errors. Artificially skipping a fp/simd context switch
(1 in 1000) causes the applications to report errors.

The test can be found here, https://github.com/mjsmar/arm-arm64-fpsimd-test

Tests Ran:
armv7:
- On host executed 12 fp applications - evently pinned to cpus
- Two guests - with 12 fp crunching processes - also pinned to vpus.
- half ran with 1ms sleep, remaining with no sleep

armv8:
- same as above except used mix of armv7 and armv8 guests.

These patches are based on earlier arm64 fp/simd optimization work -
https://lists.cs.columbia.edu/pipermail/kvmarm/2015-July/015748.html

And subsequent fixes by Marc and Christoffer at KVM Forum hackathon to handle
32-bit guest on 64 bit host - 
https://lists.cs.columbia.edu/pipermail/kvmarm/2015-August/016128.html

Changes since v3->v4:
- Followup on Christoffers comments 
  - Move fpexc handling to vcpu_load and vcpu_put
  - Enable and restore fpexc in EL2 mode when running a 32 bit guest on 
64bit EL2
  - rework hcptr handling

Changes since v2->v3:
- combined arm v7 and v8 into one short patch series
- moved access to fpexec_el2 back to EL2
- Move host restore to EL1 from EL2 and call directly from host
- optimize trap enable code 
- renamed some variables to match usage

Changes since v1->v2:
- Fixed vfp/simd trap configuration to enable trace trapping
- Removed set_hcptr branch label
- Fixed handling of FPEXC to restore guest and host versions on vcpu_put
- Tested arm32/arm64
- rebased to 4.3-rc2
- changed a couple register accesses from 64 to 32 bit


Mario Smarduch (3):
  add hooks for armv7 fp/simd lazy switch support
  enable enhanced armv7 fp/simd lazy switch
  enable enhanced armv8 fp/simd lazy switch

 arch/arm/include/asm/kvm_host.h   | 42 
 arch/arm/kernel/asm-offsets.c |  2 +
 arch/arm/kvm/arm.c| 24 +++
 arch/arm/kvm/interrupts.S | 58 ---
 arch/arm/kvm/interrupts_head.S| 26 
 arch/arm64/include/asm/kvm_asm.h  |  2 +
 arch/arm64/include/asm/kvm_host.h | 19 +
 arch/arm64/kernel/asm-offsets.c   |  1 +
 arch/arm64/kvm/hyp.S  | 83 +--
 9 files changed, 196 insertions(+), 61 deletions(-)

-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH v4 2/3] KVM/arm/arm64: enable enhanced armv7 fp/simd lazy switch

2015-11-14 Thread Mario Smarduch
This patch tracks armv7 fp/simd hardware state with a vcpu lazy flag.
On vcpu_load saves host fpexc and enables FP access, and later enables fp/simd
trapping if lazy flag is not set. On first fp/simd access trap to handler 
to save host and restore guest context, disable trapping and set vcpu lazy 
flag. On vcpu_put if flag is set save guest and restore host context and 
always restore host fpexc.

Signed-off-by: Mario Smarduch 
---
 arch/arm/include/asm/kvm_host.h   | 33 ++
 arch/arm/kvm/arm.c| 12 
 arch/arm/kvm/interrupts.S | 58 +++
 arch/arm/kvm/interrupts_head.S| 26 +-
 arch/arm64/include/asm/kvm_host.h |  6 
 5 files changed, 104 insertions(+), 31 deletions(-)

diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f1bf551..8fc7a59 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -40,6 +40,38 @@
 
 #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
 
+/*
+ * Reads the host FPEXC register, saves it to vcpu context and enables the
+ * FP/SIMD unit.
+ */
+#ifdef CONFIG_VFPv3
+#define kvm_enable_fpexc(vcpu) {   \
+   u32 fpexc = 0;  \
+   asm volatile(   \
+   "mrc p10, 7, %0, cr8, cr0, 0\n" \
+   "str %0, [%1]\n"\
+   "orr %0, %0, #(1 << 30)\n"  \
+   "mcr p10, 7, %0, cr8, cr0, 0\n" \
+   : "+r" (fpexc)  \
+   : "r" (&vcpu->arch.host_fpexc)  \
+   );  \
+}
+#else
+#define kvm_enable_fpexc(vcpu)
+#endif
+
+/* Restores host FPEXC register */
+#ifdef CONFIG_VFPv3
+#define kvm_restore_host_fpexc(vcpu) { \
+   asm volatile(   \
+   "mcr p10, 7, %0, cr8, cr0, 0\n" \
+   : : "r" (vcpu->arch.host_fpexc) \
+   );  \
+}
+#else
+#define kvm_restore_host_fpexc(vcpu)
+#endif
+
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -227,6 +259,7 @@ int kvm_perf_teardown(void);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+void kvm_restore_host_vfp_state(struct kvm_vcpu *);
 
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index dc017ad..cfc348a 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -291,10 +291,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
kvm_arm_set_running_vcpu(vcpu);
+
+   /* Save and enable FPEXC before we load guest context */
+   kvm_enable_fpexc(vcpu);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+   /* If the fp/simd registers are dirty save guest, restore host. */
+   if (vcpu->arch.vfp_dirty) {
+   kvm_restore_host_vfp_state(vcpu);
+   vcpu->arch.vfp_dirty = 0;
+   }
+
+   /* Restore host FPEXC trashed in vcpu_load */
+   kvm_restore_host_fpexc(vcpu);
+
/*
 * The arch-generic KVM code expects the cpu field of a vcpu to be -1
 * if the vcpu is no longer assigned to a cpu.  This is used for the
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 900ef6d..1ddaa89 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -28,6 +28,26 @@
 #include "interrupts_head.S"
 
.text
+/**
+ * void kvm_restore_host_vfp_state(struct vcpu *vcpu) -
+ * This function is called from host to save the guest, and restore host
+ * fp/simd hardware context. It's placed outside of hyp start/end region.
+ */
+ENTRY(kvm_restore_host_vfp_state)
+#ifdef CONFIG_VFPv3
+   push{r4-r7}
+
+   add r7, vcpu, #VCPU_VFP_GUEST
+   store_vfp_state r7
+
+   add r7, vcpu, #VCPU_VFP_HOST
+   ldr r7, [r7]
+   restore_vfp_state r7
+
+   pop {r4-r7}
+#endif
+   bx  lr
+ENDPROC(kvm_restore_host_vfp_state)
 
 __kvm_hyp_code_start:
.globl __kvm_hyp_code_start
@@ -116,22 +136,22 @@ ENTRY(__kvm_vcpu_run)
read_cp15_state store_to_vcpu = 0
write_cp15_state read_from_vcpu = 1
 
+   set_hcptr_bits set, r4, (HCPTR_TTA)
@ If the host kernel has not been configured with VFPv3 support,
@ then it is safer if we deny guests from using it as well.
 #ifdef CONFIG_VFPv3
-   @ Set FPEXC_EN so the guest doesn't trap floating point instructions
-   VFPFMRX r2, FPEXC   @ VMRS
-   push{r2}
-   orr

[RFC PATCH 2/3] kvm: Add accessors for guest CPU's family, model, stepping

2015-11-14 Thread Borislav Petkov
From: Borislav Petkov 

Those give the family, model and stepping of the guest vcpu.

Signed-off-by: Borislav Petkov 
Cc: Paolo Bonzini 
---
 arch/x86/kvm/cpuid.h | 34 ++
 1 file changed, 34 insertions(+)

diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06332cb7e7d1..5d47e0d95ef1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -2,6 +2,7 @@
 #define ARCH_X86_KVM_CPUID_H
 
 #include "x86.h"
+#include 
 
 int kvm_update_cpuid(struct kvm_vcpu *vcpu);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
@@ -170,4 +171,37 @@ static inline bool guest_cpuid_has_nrips(struct kvm_vcpu 
*vcpu)
 }
 #undef BIT_NRIPS
 
+static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
+{
+   struct kvm_cpuid_entry2 *best;
+
+   best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+   if (!best)
+   return -1;
+
+   return x86_family(best->eax);
+}
+
+static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
+{
+   struct kvm_cpuid_entry2 *best;
+
+   best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+   if (!best)
+   return -1;
+
+   return x86_model(best->eax);
+}
+
+static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
+{
+   struct kvm_cpuid_entry2 *best;
+
+   best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+   if (!best)
+   return -1;
+
+   return x86_stepping(best->eax);
+}
+
 #endif
-- 
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 0/3] x86, kvm: Unify CPUID computation and fix MSR accessing

2015-11-14 Thread Borislav Petkov
From: Borislav Petkov 

Hi all,

so this is something which should help fixing the MSR access to IC_CFG
on AMD.

The usefulness should come, however, from the unification and the
additional family, model, stepping helpers in kvm in case one wants to
know those of the guest CPU. Who knows, might be much more useful in the
future.

Quick testing shows it works, I'll hammer on it more after rc1 is out.

Thanks.

Borislav Petkov (3):
  x86/cpu: Unify CPU family, model, stepping calculation
  kvm: Add accessors for guest CPU's family, model, stepping
  x86/cpu/amd, kvm: Satisfy guest kernel reads of IC_CFG MSR

 arch/x86/include/asm/cpu.h|  3 +++
 arch/x86/include/asm/microcode.h  | 31 +++
 arch/x86/include/asm/msr-index.h  |  1 +
 arch/x86/kernel/cpu/amd.c |  4 ++--
 arch/x86/kernel/cpu/common.c  | 11 +++
 arch/x86/kernel/cpu/microcode/core.c  |  6 +++---
 arch/x86/kernel/cpu/microcode/intel.c | 16 ++--
 arch/x86/kvm/cpuid.h  | 34 ++
 arch/x86/kvm/svm.c| 17 +
 arch/x86/lib/Makefile |  2 +-
 arch/x86/lib/cpu.c| 35 +++
 11 files changed, 108 insertions(+), 52 deletions(-)
 create mode 100644 arch/x86/lib/cpu.c

-- 
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC PATCH 1/3] x86/cpu: Unify CPU family, model, stepping calculation

2015-11-14 Thread Borislav Petkov
From: Borislav Petkov 

Add generic functions which calc family, model and stepping from the
CPUID_1.EAX leaf and stick them into the library we have.

No functionality change.

Signed-off-by: Borislav Petkov 
---
 arch/x86/include/asm/cpu.h|  3 +++
 arch/x86/include/asm/microcode.h  | 31 +++
 arch/x86/kernel/cpu/common.c  | 11 +++
 arch/x86/kernel/cpu/microcode/core.c  |  6 +++---
 arch/x86/kernel/cpu/microcode/intel.c | 16 ++--
 arch/x86/lib/Makefile |  2 +-
 arch/x86/lib/cpu.c| 35 +++
 7 files changed, 54 insertions(+), 50 deletions(-)
 create mode 100644 arch/x86/lib/cpu.c

diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bf2caa1dedc5..678637ad7476 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -36,4 +36,7 @@ extern int _debug_hotplug_cpu(int cpu, int action);
 
 int mwait_usable(const struct cpuinfo_x86 *);
 
+unsigned int x86_family(unsigned int sig);
+unsigned int x86_model(unsigned int sig);
+unsigned int x86_stepping(unsigned int sig);
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 34e62b1dcfce..0419cc4ab4fd 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_MICROCODE_H
 #define _ASM_X86_MICROCODE_H
 
+#include 
 #include 
 
 #define native_rdmsr(msr, val1, val2)  \
@@ -118,40 +119,14 @@ static inline int x86_vendor(void)
return X86_VENDOR_UNKNOWN;
 }
 
-static inline unsigned int __x86_family(unsigned int sig)
-{
-   unsigned int x86;
-
-   x86 = (sig >> 8) & 0xf;
-
-   if (x86 == 0xf)
-   x86 += (sig >> 20) & 0xff;
-
-   return x86;
-}
-
-static inline unsigned int x86_family(void)
+static inline unsigned int x86_family_cpuid(void)
 {
u32 eax = 0x0001;
u32 ebx, ecx = 0, edx;
 
native_cpuid(&eax, &ebx, &ecx, &edx);
 
-   return __x86_family(eax);
-}
-
-static inline unsigned int x86_model(unsigned int sig)
-{
-   unsigned int x86, model;
-
-   x86 = __x86_family(sig);
-
-   model = (sig >> 4) & 0xf;
-
-   if (x86 == 0x6 || x86 == 0xf)
-   model += ((sig >> 16) & 0xf) << 4;
-
-   return model;
+   return x86_family(eax);
 }
 
 #ifdef CONFIG_MICROCODE
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c311b51efe15 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -582,14 +582,9 @@ void cpu_detect(struct cpuinfo_x86 *c)
u32 junk, tfms, cap0, misc;
 
cpuid(0x0001, &tfms, &misc, &junk, &cap0);
-   c->x86 = (tfms >> 8) & 0xf;
-   c->x86_model = (tfms >> 4) & 0xf;
-   c->x86_mask = tfms & 0xf;
-
-   if (c->x86 == 0xf)
-   c->x86 += (tfms >> 20) & 0xff;
-   if (c->x86 >= 0x6)
-   c->x86_model += ((tfms >> 16) & 0xf) << 4;
+   c->x86  = x86_family(tfms);
+   c->x86_model= x86_model(tfms);
+   c->x86_mask = x86_stepping(tfms);
 
if (cap0 & (1<<19)) {
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
diff --git a/arch/x86/kernel/cpu/microcode/core.c 
b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..e98c3487c9d3 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -130,7 +130,7 @@ void __init load_ucode_bsp(void)
return;
 
vendor = x86_vendor();
-   family = x86_family();
+   family = x86_family_cpuid();
 
switch (vendor) {
case X86_VENDOR_INTEL:
@@ -166,7 +166,7 @@ void load_ucode_ap(void)
return;
 
vendor = x86_vendor();
-   family = x86_family();
+   family = x86_family_cpuid();
 
switch (vendor) {
case X86_VENDOR_INTEL:
@@ -207,7 +207,7 @@ void reload_early_microcode(void)
int vendor, family;
 
vendor = x86_vendor();
-   family = x86_family();
+   family = x86_family_cpuid();
 
switch (vendor) {
case X86_VENDOR_INTEL:
diff --git a/arch/x86/kernel/cpu/microcode/intel.c 
b/arch/x86/kernel/cpu/microcode/intel.c
index ce47402eb2f9..ee81c544ee0d 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -145,10 +145,10 @@ matching_model_microcode(struct microcode_header_intel 
*mc_header,
int ext_sigcount, i;
struct extended_signature *ext_sig;
 
-   fam   = __x86_family(sig);
+   fam   = x86_family(sig);
model = x86_model(sig);
 
-   fam_ucode   = __x86_family(mc_header->sig);
+   fam_ucode   = x86_family(mc_header->sig);
model_ucode = x86_model(mc_header->sig);
 
if (fam == fam_ucode && model 

[RFC PATCH 3/3] x86/cpu/amd, kvm: Satisfy guest kernel reads of IC_CFG MSR

2015-11-14 Thread Borislav Petkov
From: Borislav Petkov 

The kernel accesses IC_CFG MSR (0xc0011021) on AMD because it checks
whether the way access filter is enabled on some F15h models, and, if
so, disables it.

kvm doesn't handle that MSR access and complains about it, which can
get really noisy in dmesg when one starts kvm guests all the time for
testing. And it is useless anyway - guest kernel shouldn't be doing such
changes anyway so tell it that that filter is disabled.

Signed-off-by: Borislav Petkov 
Cc: Paolo Bonzini 
---
 arch/x86/include/asm/msr-index.h |  1 +
 arch/x86/kernel/cpu/amd.c|  4 ++--
 arch/x86/kvm/svm.c   | 17 +
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f3905697f12..5384485f8569 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -322,6 +322,7 @@
 #define MSR_F15H_PERF_CTR  0xc0010201
 #define MSR_F15H_NB_PERF_CTL   0xc0010240
 #define MSR_F15H_NB_PERF_CTR   0xc0010241
+#define MSR_F15H_IC_CFG0xc0011021
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE  0xc0010058
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4a70fc6d400a..1d76dcdf7e55 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -665,9 +665,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 * Disable it on the affected CPUs.
 */
if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
-   if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
+   if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
value |= 0x1E;
-   wrmsrl_safe(0xc0011021, value);
+   wrmsrl_safe(MSR_F15H_IC_CFG, value);
}
}
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a1c643f9a5..58b64c17c4a8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3053,6 +3053,23 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
case MSR_IA32_UCODE_REV:
msr_info->data = 0x0165;
break;
+   case MSR_F15H_IC_CFG: {
+
+   int family, model;
+
+   family = guest_cpuid_family(vcpu);
+   model  = guest_cpuid_model(vcpu);
+
+   if (family < 0 || model < 0)
+   return kvm_get_msr_common(vcpu, msr_info);
+
+   msr_info->data = 0;
+
+   if (family == 0x15 &&
+   (model >= 0x2 && model < 0x20))
+   msr_info->data = 0x1E;
+   }
+   break;
default:
return kvm_get_msr_common(vcpu, msr_info);
}
-- 
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 0/9] constify pci_error_handlers structures

2015-11-14 Thread Julia Lawall
Constify never-modified pci_error_handlers structures.

---

 drivers/crypto/qat/qat_common/adf_aer.c |2 +-
 drivers/misc/genwqe/card_base.c |2 +-
 drivers/net/ethernet/cavium/liquidio/lio_main.c |2 +-
 drivers/net/ethernet/sfc/efx.c  |2 +-
 drivers/scsi/be2iscsi/be_main.c |2 +-
 drivers/scsi/bfa/bfad.c |2 +-
 drivers/scsi/csiostor/csio_init.c   |2 +-
 drivers/scsi/mpt3sas/mpt3sas_scsih.c|2 +-
 drivers/vfio/pci/vfio_pci.c |2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 9/9] vfio-pci: constify pci_error_handlers structures

2015-11-14 Thread Julia Lawall
This pci_error_handlers structure is never modified, like all the other
pci_error_handlers structures, so declare it as const.

Done with the help of Coccinelle.

Signed-off-by: Julia Lawall 

---
There are no dependencies between these patches.

 drivers/vfio/pci/vfio_pci.c |2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 32b88bd..2760a7b 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct 
pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
 }
 
-static struct pci_error_handlers vfio_err_handlers = {
+static const struct pci_error_handlers vfio_err_handlers = {
.error_detected = vfio_pci_aer_err_detected,
 };
 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/2] KVM: kvm_is_visible_gfn can be boolean

2015-11-14 Thread Amos Jianjun Kong
On Sat, Nov 14, 2015 at 11:21 AM, Yaowei Bai
 wrote:
> This patch makes kvm_is_visible_gfn return bool due to this particular
> function only using either one or zero as its return value.
>
> No functional change.
>
> Signed-off-by: Yaowei Bai 

Hi Yaowei,

> ---
>  include/linux/kvm_host.h | 2 +-
>  virt/kvm/kvm_main.c  | 6 +++---
>  2 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 5706a21..4436539 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -623,7 +623,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct 
> gfn_to_hva_cache *ghc,
>  int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
>  int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
>  struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
> -int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
> +bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
>  unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
>  void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 484079e..73cbb41 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -1164,15 +1164,15 @@ struct kvm_memory_slot 
> *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
> return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
>  }
>
> -int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
> +bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
>  {
> struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
>
> if (!memslot || memslot->id >= KVM_USER_MEM_SLOTS ||
>   memslot->flags & KVM_MEMSLOT_INVALID)
> -   return 0;
> +   return false;
>
> -   return 1;
> +   return true;
>  }
>  EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);


kvm_is_visible_gfn() is also used in arch/powerpc/kvm/book3s_pr.c:

static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{
..
if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
return 1;
}

return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
}

Do we still need to update that function?

Thanks, Amos
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html