RE: [PATCH 1/2] x86/vmx: Calculate model-specific LBRs once at start of day

2023-01-11 Thread Tian, Kevin
> From: Andrew Cooper 
> Sent: Monday, January 9, 2023 8:08 PM
> 
> There is no point repeating this calculation at runtime, especially as it is
> in the fallback path of the WRSMR/RDMSR handlers.
> 
> Move the infrastructure higher in vmx.c to avoid forward declarations,
> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
> these are model-specific only.
> 
> No practical change.
> 
> Signed-off-by: Andrew Cooper 

Reviewed-by: Kevin Tian 


Re: [PATCH 1/2] x86/vmx: Calculate model-specific LBRs once at start of day

2023-01-10 Thread Andrew Cooper
On 10/01/2023 4:26 pm, Jan Beulich wrote:
> On 09.01.2023 13:08, Andrew Cooper wrote:
>> There is no point repeating this calculation at runtime, especially as it is
>> in the fallback path of the WRSMR/RDMSR handlers.
>>
>> Move the infrastructure higher in vmx.c to avoid forward declarations,
>> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
>> these are model-specific only.
>>
>> No practical change.
>>
>> Signed-off-by: Andrew Cooper 
> Reviewed-by: Jan Beulich 
> with one nit:
>
>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>> @@ -396,6 +396,142 @@ void vmx_pi_hooks_deassign(struct domain *d)
>>  domain_unpause(d);
>>  }
>>  
>> +static const struct lbr_info {
>> +u32 base, count;
>> +} p4_lbr[] = {
>> +{ MSR_P4_LER_FROM_LIP,  1 },
>> +{ MSR_P4_LER_TO_LIP,1 },
>> +{ MSR_P4_LASTBRANCH_TOS,1 },
>> +{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +}, c2_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_C2_LASTBRANCH_TOS,1 },
>> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
>> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_C2_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +}, nh_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_NHL_LBR_SELECT,   1 },
>> +{ MSR_NHL_LASTBRANCH_TOS,   1 },
>> +{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +}, sk_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_NHL_LBR_SELECT,   1 },
>> +{ MSR_NHL_LASTBRANCH_TOS,   1 },
>> +{ MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
>> +{ MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
>> +{ MSR_SKL_LASTBRANCH_0_INFO,NUM_MSR_SKL_LASTBRANCH },
>> +{ 0, 0 }
>> +}, at_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_C2_LASTBRANCH_TOS,1 },
>> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +}, sm_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_SM_LBR_SELECT,1 },
>> +{ MSR_SM_LASTBRANCH_TOS,1 },
>> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +}, gm_lbr[] = {
>> +{ MSR_IA32_LASTINTFROMIP,   1 },
>> +{ MSR_IA32_LASTINTTOIP, 1 },
>> +{ MSR_SM_LBR_SELECT,1 },
>> +{ MSR_SM_LASTBRANCH_TOS,1 },
>> +{ MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
>> +{ MSR_GM_LASTBRANCH_0_TO_IP,NUM_MSR_GM_LASTBRANCH_FROM_TO },
>> +{ 0, 0 }
>> +};
>> +static const struct lbr_info * __ro_after_init model_specific_lbr;
>> +
>> +static const struct __init lbr_info *get_model_specific_lbr(void)
> Please move __init:
>
> static const struct lbr_info *__init get_model_specific_lbr(void)

Yeah, I noticed and fixed both style errors here.  Also an extraneous
space before __ro_after_init.

~Andrew


Re: [PATCH 1/2] x86/vmx: Calculate model-specific LBRs once at start of day

2023-01-10 Thread Jan Beulich
On 09.01.2023 13:08, Andrew Cooper wrote:
> There is no point repeating this calculation at runtime, especially as it is
> in the fallback path of the WRSMR/RDMSR handlers.
> 
> Move the infrastructure higher in vmx.c to avoid forward declarations,
> renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
> these are model-specific only.
> 
> No practical change.
> 
> Signed-off-by: Andrew Cooper 

Reviewed-by: Jan Beulich 
with one nit:

> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -396,6 +396,142 @@ void vmx_pi_hooks_deassign(struct domain *d)
>  domain_unpause(d);
>  }
>  
> +static const struct lbr_info {
> +u32 base, count;
> +} p4_lbr[] = {
> +{ MSR_P4_LER_FROM_LIP,  1 },
> +{ MSR_P4_LER_TO_LIP,1 },
> +{ MSR_P4_LASTBRANCH_TOS,1 },
> +{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +}, c2_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_C2_LASTBRANCH_TOS,1 },
> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_C2_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +}, nh_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_NHL_LBR_SELECT,   1 },
> +{ MSR_NHL_LASTBRANCH_TOS,   1 },
> +{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +}, sk_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_NHL_LBR_SELECT,   1 },
> +{ MSR_NHL_LASTBRANCH_TOS,   1 },
> +{ MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
> +{ MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
> +{ MSR_SKL_LASTBRANCH_0_INFO,NUM_MSR_SKL_LASTBRANCH },
> +{ 0, 0 }
> +}, at_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_C2_LASTBRANCH_TOS,1 },
> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +}, sm_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_SM_LBR_SELECT,1 },
> +{ MSR_SM_LASTBRANCH_TOS,1 },
> +{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +}, gm_lbr[] = {
> +{ MSR_IA32_LASTINTFROMIP,   1 },
> +{ MSR_IA32_LASTINTTOIP, 1 },
> +{ MSR_SM_LBR_SELECT,1 },
> +{ MSR_SM_LASTBRANCH_TOS,1 },
> +{ MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
> +{ MSR_GM_LASTBRANCH_0_TO_IP,NUM_MSR_GM_LASTBRANCH_FROM_TO },
> +{ 0, 0 }
> +};
> +static const struct lbr_info * __ro_after_init model_specific_lbr;
> +
> +static const struct __init lbr_info *get_model_specific_lbr(void)

Please move __init:

static const struct lbr_info *__init get_model_specific_lbr(void)

Jan



[PATCH 1/2] x86/vmx: Calculate model-specific LBRs once at start of day

2023-01-09 Thread Andrew Cooper
There is no point repeating this calculation at runtime, especially as it is
in the fallback path of the WRSMR/RDMSR handlers.

Move the infrastructure higher in vmx.c to avoid forward declarations,
renaming last_branch_msr_get() to get_model_specific_lbr() to highlight that
these are model-specific only.

No practical change.

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Roger Pau Monné 
CC: Wei Liu 
CC: Jun Nakajima 
CC: Kevin Tian 
---
 xen/arch/x86/hvm/vmx/vmx.c | 276 +++--
 1 file changed, 139 insertions(+), 137 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 43a4865d1c76..17320f9fb267 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -396,6 +396,142 @@ void vmx_pi_hooks_deassign(struct domain *d)
 domain_unpause(d);
 }
 
+static const struct lbr_info {
+u32 base, count;
+} p4_lbr[] = {
+{ MSR_P4_LER_FROM_LIP,  1 },
+{ MSR_P4_LER_TO_LIP,1 },
+{ MSR_P4_LASTBRANCH_TOS,1 },
+{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
+{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+}, c2_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_C2_LASTBRANCH_TOS,1 },
+{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_C2_LASTBRANCH_FROM_TO },
+{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_C2_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+}, nh_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_NHL_LBR_SELECT,   1 },
+{ MSR_NHL_LASTBRANCH_TOS,   1 },
+{ MSR_P4_LASTBRANCH_0_FROM_LIP, NUM_MSR_P4_LASTBRANCH_FROM_TO },
+{ MSR_P4_LASTBRANCH_0_TO_LIP,   NUM_MSR_P4_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+}, sk_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_NHL_LBR_SELECT,   1 },
+{ MSR_NHL_LASTBRANCH_TOS,   1 },
+{ MSR_SKL_LASTBRANCH_0_FROM_IP, NUM_MSR_SKL_LASTBRANCH },
+{ MSR_SKL_LASTBRANCH_0_TO_IP,   NUM_MSR_SKL_LASTBRANCH },
+{ MSR_SKL_LASTBRANCH_0_INFO,NUM_MSR_SKL_LASTBRANCH },
+{ 0, 0 }
+}, at_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_C2_LASTBRANCH_TOS,1 },
+{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+}, sm_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_SM_LBR_SELECT,1 },
+{ MSR_SM_LASTBRANCH_TOS,1 },
+{ MSR_C2_LASTBRANCH_0_FROM_IP,  NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+{ MSR_C2_LASTBRANCH_0_TO_IP,NUM_MSR_ATOM_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+}, gm_lbr[] = {
+{ MSR_IA32_LASTINTFROMIP,   1 },
+{ MSR_IA32_LASTINTTOIP, 1 },
+{ MSR_SM_LBR_SELECT,1 },
+{ MSR_SM_LASTBRANCH_TOS,1 },
+{ MSR_GM_LASTBRANCH_0_FROM_IP,  NUM_MSR_GM_LASTBRANCH_FROM_TO },
+{ MSR_GM_LASTBRANCH_0_TO_IP,NUM_MSR_GM_LASTBRANCH_FROM_TO },
+{ 0, 0 }
+};
+static const struct lbr_info * __ro_after_init model_specific_lbr;
+
+static const struct __init lbr_info *get_model_specific_lbr(void)
+{
+switch ( boot_cpu_data.x86 )
+{
+case 6:
+switch ( boot_cpu_data.x86_model )
+{
+/* Core2 Duo */
+case 0x0f:
+/* Enhanced Core */
+case 0x17:
+/* Xeon 7400 */
+case 0x1d:
+return c2_lbr;
+/* Nehalem */
+case 0x1a: case 0x1e: case 0x1f: case 0x2e:
+/* Westmere */
+case 0x25: case 0x2c: case 0x2f:
+/* Sandy Bridge */
+case 0x2a: case 0x2d:
+/* Ivy Bridge */
+case 0x3a: case 0x3e:
+/* Haswell */
+case 0x3c: case 0x3f: case 0x45: case 0x46:
+/* Broadwell */
+case 0x3d: case 0x47: case 0x4f: case 0x56:
+return nh_lbr;
+/* Skylake */
+case 0x4e: case 0x5e:
+/* Xeon Scalable */
+case 0x55:
+/* Cannon Lake */
+case 0x66:
+/* Goldmont Plus */
+case 0x7a:
+/* Ice Lake */
+case 0x6a: case 0x6c: case 0x7d: case 0x7e:
+/* Tiger Lake */
+case 0x8c: case 0x8d:
+/* Tremont */
+case 0x86:
+/* Kaby Lake */
+case 0x8e: case 0x9e:
+/* Comet Lake */
+case 0xa5: case 0xa6:
+return sk_lbr;
+/* Atom */
+case 0x1c: case 0x26: case 0x27: case 0x35: case 0x36:
+return at_lbr;
+/* Silvermont */
+case 0x37: case 0x4a: case 0x4d: case 0x5a: case 0x5d:
+/* Xeon Phi Knights Landing */
+case 0x57:
+/* Xeon Phi Knights Mill */
+case 0x85:
+/* Airmont */
+case 0x4c:
+return sm_lbr;
+/*