Re: [Xen-devel] [PATCH 2/9] x86/vmx: Internal cleanup for MSR load/save infrastructure

2018-05-26 Thread Tian, Kevin
> From: Andrew Cooper [mailto:andrew.coop...@citrix.com]
> Sent: Tuesday, May 22, 2018 7:21 PM
> 
>  * Use an arch_vmx_struct local variable to reduce later code volume.
>  * Use start/total instead of msr_area/msr_count.  This is in preparation for
>more finegrained handling with later changes.
>  * Use ent/end pointers (again for preparation), and to make the
> vmx_add_msr()
>logic easier to follow.
>  * Make the memory allocation block of vmx_add_msr() unlikely, and
> calculate
>virt_to_maddr() just once.
> 
> No practical change to functionality.
> 
> Signed-off-by: Andrew Cooper 

Acked-by: Kevin Tian 
___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 2/9] x86/vmx: Internal cleanup for MSR load/save infrastructure

2018-05-24 Thread Jan Beulich
>>> On 22.05.18 at 13:20,  wrote:
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1292,48 +1292,50 @@ static int vmx_msr_entry_key_cmp(const void *key, 
> const void *elt)
>  struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
>  {
>  struct vcpu *curr = current;
> -unsigned int msr_count;
> -struct vmx_msr_entry *msr_area = NULL;
> +struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;

In the interest of code volume reduction - why the arch_ prefix? There's
no arch_-less vmx anywhere afaict.

> +struct vmx_msr_entry *start = NULL;
> +unsigned int total;
>  
>  switch ( type )
>  {
>  case VMX_MSR_HOST:
> -msr_count = curr->arch.hvm_vmx.host_msr_count;
> -msr_area = curr->arch.hvm_vmx.host_msr_area;
> +start= arch_vmx->host_msr_area;
> +total= arch_vmx->host_msr_count;
>  break;
>  
>  case VMX_MSR_GUEST:
> -msr_count = curr->arch.hvm_vmx.msr_count;
> -msr_area = curr->arch.hvm_vmx.msr_area;
> +start= arch_vmx->msr_area;
> +total= arch_vmx->msr_count;
>  break;
>  
>  default:
>  ASSERT_UNREACHABLE();
>  }
>  
> -if ( msr_area == NULL )
> +if ( !start )
>  return NULL;
>  
> -return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
> +return bsearch(&msr, start, total, sizeof(struct vmx_msr_entry),

sizeof(*start) ?

Jan



___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 2/9] x86/vmx: Internal cleanup for MSR load/save infrastructure

2018-05-23 Thread Andrew Cooper
On 23/05/18 17:28, Roger Pau Monné wrote:
> On Tue, May 22, 2018 at 12:20:39PM +0100, Andrew Cooper wrote:
>>  * Use an arch_vmx_struct local variable to reduce later code volume.
>>  * Use start/total instead of msr_area/msr_count.  This is in preparation for
>>more finegrained handling with later changes.
>>  * Use ent/end pointers (again for preparation), and to make the 
>> vmx_add_msr()
>>logic easier to follow.
>>  * Make the memory allocation block of vmx_add_msr() unlikely, and calculate
>>virt_to_maddr() just once.
>>
>> No practical change to functionality.
>>
>> Signed-off-by: Andrew Cooper 
>> ---
>> CC: Jan Beulich 
>> CC: Jun Nakajima 
>> CC: Kevin Tian 
>> CC: Wei Liu 
>> CC: Roger Pau Monné 
>> ---
>>  xen/arch/x86/hvm/vmx/vmcs.c | 74 
>> -
>>  1 file changed, 40 insertions(+), 34 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
>> index a5dcf5c..f557857 100644
>> --- a/xen/arch/x86/hvm/vmx/vmcs.c
>> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
>> @@ -1292,48 +1292,50 @@ static int vmx_msr_entry_key_cmp(const void *key, 
>> const void *elt)
>>  struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type 
>> type)
>>  {
>>  struct vcpu *curr = current;
>> -unsigned int msr_count;
>> -struct vmx_msr_entry *msr_area = NULL;
>> +struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;
> curr is used here only, so you can use current and get rid of the curr
> local variable?

curr disappears in patch 4 "x86/vmx: Support remote access to the MSR
lists", when v starts getting passed in from the outside.

>
>> +struct vmx_msr_entry *start = NULL;
>> +unsigned int total;
>>  
>>  switch ( type )
>>  {
>>  case VMX_MSR_HOST:
>> -msr_count = curr->arch.hvm_vmx.host_msr_count;
>> -msr_area = curr->arch.hvm_vmx.host_msr_area;
>> +start= arch_vmx->host_msr_area;
>> +total= arch_vmx->host_msr_count;
>>  break;
>>  
>>  case VMX_MSR_GUEST:
>> -msr_count = curr->arch.hvm_vmx.msr_count;
>> -msr_area = curr->arch.hvm_vmx.msr_area;
>> +start= arch_vmx->msr_area;
>> +total= arch_vmx->msr_count;
> Not that I think is wrong, but why are you adding the extra spaces
> after the variable name? Those assignments will already be aligned
> because start and total names have the same length.

There are changes in later patches, for which this is the correct
indentation.

>
>>  break;
>>  
>>  default:
>>  ASSERT_UNREACHABLE();
>>  }
>>  
>> -if ( msr_area == NULL )
>> +if ( !start )
>>  return NULL;
>>  
>> -return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
>> +return bsearch(&msr, start, total, sizeof(struct vmx_msr_entry),
>> vmx_msr_entry_key_cmp);
>>  }
>>  
>>  int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
>>  {
>>  struct vcpu *curr = current;
> curr seems to be used only once below in order to get hvm_vmx? In
> which case it could be removed.
>
>> -unsigned int idx, *msr_count;
>> -struct vmx_msr_entry **msr_area, *msr_area_elem;
>> +struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;
>> +struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
> Do you really need to initialize start here? It seems like it's
> unconditionally set to *ptr before any usage.

Yes, for safety through the default case in release builds.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 2/9] x86/vmx: Internal cleanup for MSR load/save infrastructure

2018-05-23 Thread Roger Pau Monné
On Tue, May 22, 2018 at 12:20:39PM +0100, Andrew Cooper wrote:
>  * Use an arch_vmx_struct local variable to reduce later code volume.
>  * Use start/total instead of msr_area/msr_count.  This is in preparation for
>more finegrained handling with later changes.
>  * Use ent/end pointers (again for preparation), and to make the vmx_add_msr()
>logic easier to follow.
>  * Make the memory allocation block of vmx_add_msr() unlikely, and calculate
>virt_to_maddr() just once.
> 
> No practical change to functionality.
> 
> Signed-off-by: Andrew Cooper 
> ---
> CC: Jan Beulich 
> CC: Jun Nakajima 
> CC: Kevin Tian 
> CC: Wei Liu 
> CC: Roger Pau Monné 
> ---
>  xen/arch/x86/hvm/vmx/vmcs.c | 74 
> -
>  1 file changed, 40 insertions(+), 34 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index a5dcf5c..f557857 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1292,48 +1292,50 @@ static int vmx_msr_entry_key_cmp(const void *key, 
> const void *elt)
>  struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
>  {
>  struct vcpu *curr = current;
> -unsigned int msr_count;
> -struct vmx_msr_entry *msr_area = NULL;
> +struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;

curr is used here only, so you can use current and get rid of the curr
local variable?

> +struct vmx_msr_entry *start = NULL;
> +unsigned int total;
>  
>  switch ( type )
>  {
>  case VMX_MSR_HOST:
> -msr_count = curr->arch.hvm_vmx.host_msr_count;
> -msr_area = curr->arch.hvm_vmx.host_msr_area;
> +start= arch_vmx->host_msr_area;
> +total= arch_vmx->host_msr_count;
>  break;
>  
>  case VMX_MSR_GUEST:
> -msr_count = curr->arch.hvm_vmx.msr_count;
> -msr_area = curr->arch.hvm_vmx.msr_area;
> +start= arch_vmx->msr_area;
> +total= arch_vmx->msr_count;

Not that I think is wrong, but why are you adding the extra spaces
after the variable name? Those assignments will already be aligned
because start and total names have the same length.

>  break;
>  
>  default:
>  ASSERT_UNREACHABLE();
>  }
>  
> -if ( msr_area == NULL )
> +if ( !start )
>  return NULL;
>  
> -return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
> +return bsearch(&msr, start, total, sizeof(struct vmx_msr_entry),
> vmx_msr_entry_key_cmp);
>  }
>  
>  int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
>  {
>  struct vcpu *curr = current;

curr seems to be used only once below in order to get hvm_vmx? In
which case it could be removed.

> -unsigned int idx, *msr_count;
> -struct vmx_msr_entry **msr_area, *msr_area_elem;
> +struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;
> +struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;

Do you really need to initialize start here? It seems like it's
unconditionally set to *ptr before any usage.

Thanks, Roger.

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

[Xen-devel] [PATCH 2/9] x86/vmx: Internal cleanup for MSR load/save infrastructure

2018-05-22 Thread Andrew Cooper
 * Use an arch_vmx_struct local variable to reduce later code volume.
 * Use start/total instead of msr_area/msr_count.  This is in preparation for
   more finegrained handling with later changes.
 * Use ent/end pointers (again for preparation), and to make the vmx_add_msr()
   logic easier to follow.
 * Make the memory allocation block of vmx_add_msr() unlikely, and calculate
   virt_to_maddr() just once.

No practical change to functionality.

Signed-off-by: Andrew Cooper 
---
CC: Jan Beulich 
CC: Jun Nakajima 
CC: Kevin Tian 
CC: Wei Liu 
CC: Roger Pau Monné 
---
 xen/arch/x86/hvm/vmx/vmcs.c | 74 -
 1 file changed, 40 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a5dcf5c..f557857 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1292,48 +1292,50 @@ static int vmx_msr_entry_key_cmp(const void *key, const 
void *elt)
 struct vmx_msr_entry *vmx_find_msr(uint32_t msr, enum vmx_msr_list_type type)
 {
 struct vcpu *curr = current;
-unsigned int msr_count;
-struct vmx_msr_entry *msr_area = NULL;
+struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;
+struct vmx_msr_entry *start = NULL;
+unsigned int total;
 
 switch ( type )
 {
 case VMX_MSR_HOST:
-msr_count = curr->arch.hvm_vmx.host_msr_count;
-msr_area = curr->arch.hvm_vmx.host_msr_area;
+start= arch_vmx->host_msr_area;
+total= arch_vmx->host_msr_count;
 break;
 
 case VMX_MSR_GUEST:
-msr_count = curr->arch.hvm_vmx.msr_count;
-msr_area = curr->arch.hvm_vmx.msr_area;
+start= arch_vmx->msr_area;
+total= arch_vmx->msr_count;
 break;
 
 default:
 ASSERT_UNREACHABLE();
 }
 
-if ( msr_area == NULL )
+if ( !start )
 return NULL;
 
-return bsearch(&msr, msr_area, msr_count, sizeof(struct vmx_msr_entry),
+return bsearch(&msr, start, total, sizeof(struct vmx_msr_entry),
vmx_msr_entry_key_cmp);
 }
 
 int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type type)
 {
 struct vcpu *curr = current;
-unsigned int idx, *msr_count;
-struct vmx_msr_entry **msr_area, *msr_area_elem;
+struct arch_vmx_struct *arch_vmx = &curr->arch.hvm_vmx;
+struct vmx_msr_entry **ptr, *start = NULL, *ent, *end;
+unsigned int total;
 
 switch ( type )
 {
 case VMX_MSR_HOST:
-msr_count = &curr->arch.hvm_vmx.host_msr_count;
-msr_area = &curr->arch.hvm_vmx.host_msr_area;
+ptr  = &arch_vmx->host_msr_area;
+total= arch_vmx->host_msr_count;
 break;
 
 case VMX_MSR_GUEST:
-msr_count = &curr->arch.hvm_vmx.msr_count;
-msr_area = &curr->arch.hvm_vmx.msr_area;
+ptr  = &arch_vmx->msr_area;
+total= arch_vmx->msr_count;
 break;
 
 default:
@@ -1341,51 +1343,55 @@ int vmx_add_msr(uint32_t msr, enum vmx_msr_list_type 
type)
 return -EINVAL;
 }
 
-if ( *msr_area == NULL )
+/* Allocate memory on first use. */
+if ( unlikely(!*ptr) )
 {
-if ( (*msr_area = alloc_xenheap_page()) == NULL )
+paddr_t addr;
+
+if ( (*ptr = alloc_xenheap_page()) == NULL )
 return -ENOMEM;
 
+addr = virt_to_maddr(*ptr);
+
 switch ( type )
 {
 case VMX_MSR_HOST:
-__vmwrite(VM_EXIT_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+__vmwrite(VM_EXIT_MSR_LOAD_ADDR, addr);
 break;
 
 case VMX_MSR_GUEST:
-__vmwrite(VM_EXIT_MSR_STORE_ADDR, virt_to_maddr(*msr_area));
-__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, virt_to_maddr(*msr_area));
+__vmwrite(VM_EXIT_MSR_STORE_ADDR, addr);
+__vmwrite(VM_ENTRY_MSR_LOAD_ADDR, addr);
 break;
 }
 }
 
-for ( idx = 0; idx < *msr_count && (*msr_area)[idx].index <= msr; idx++ )
-if ( (*msr_area)[idx].index == msr )
+start = *ptr;
+end   = start + total;
+
+for ( ent = start; ent < end && ent->index <= msr; ++ent )
+if ( ent->index == msr )
 return 0;
 
-if ( *msr_count == (PAGE_SIZE / sizeof(struct vmx_msr_entry)) )
+if ( total == (PAGE_SIZE / sizeof(*ent)) )
 return -ENOSPC;
 
-memmove(*msr_area + idx + 1, *msr_area + idx,
-sizeof(*msr_area_elem) * (*msr_count - idx));
-
-msr_area_elem = *msr_area + idx;
-msr_area_elem->index = msr;
-msr_area_elem->mbz = 0;
+memmove(ent + 1, ent, sizeof(*ent) * (end - ent));
 
-++*msr_count;
+ent->index = msr;
+ent->mbz = 0;
 
 switch ( type )
 {
 case VMX_MSR_HOST:
-rdmsrl(msr, msr_area_elem->data);
-__vmwrite(VM_EXIT_MSR_LOAD_COUNT, *msr_count);
+rdmsrl(msr, ent->data);
+__vmwrite(VM_EXIT_MSR_LOAD_COUNT, ++arch_vmx->host_msr_count);
 break;
 
 case VMX_MSR_GUEST:
-