Re: [Xen-devel] [PATCH v1 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated.

2016-06-28 Thread Jan Beulich
>>> On 21.06.16 at 18:04,  wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -5228,7 +5228,7 @@ static int do_altp2m_op(
>  
>  if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
>   (a.cmd != HVMOP_altp2m_set_domain_state) &&
> - !d->arch.altp2m_active )
> + ! altp2m_active(d) )

Stray blank.

> @@ -5262,11 +5262,11 @@ static int do_altp2m_op(
>  break;
>  }
>  
> -ostate = d->arch.altp2m_active;
> -d->arch.altp2m_active = !!a.u.domain_state.state;
> +ostate = altp2m_active(d);
> + set_altp2m_active(d, !!a.u.domain_state.state);

Bogus tab indentation.

> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -193,12 +193,15 @@ static void p2m_teardown_altp2m(struct domain *d)
>  
>  for ( i = 0; i < MAX_ALTP2M; i++ )
>  {
> -if ( !d->arch.altp2m_p2m[i] )
> +if ( !d->arch.altp2m->altp2m_p2m[i] )
>  continue;
> -p2m = d->arch.altp2m_p2m[i];
> +p2m = d->arch.altp2m->altp2m_p2m[i];
>  p2m_free_one(p2m);
> -d->arch.altp2m_p2m[i] = NULL;
> +d->arch.altp2m->altp2m_p2m[i] = NULL;
>  }
> +
> +if (d->arch.altp2m) 

Missing blanks.

> +xfree(d->arch.altp2m);

But the conditional is pointless anyway.

> @@ -206,10 +209,12 @@ static int p2m_init_altp2m(struct domain *d)
>  unsigned int i;
>  struct p2m_domain *p2m;
>  
> -mm_lock_init(>arch.altp2m_list_lock);
> +d->arch.altp2m = xzalloc(struct altp2m_domain);
> +
> +mm_lock_init(>arch.altp2m->altp2m_list_lock);

Missing error check.

> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -274,6 +274,13 @@ struct monitor_write_data {
>  uint64_t cr4;
>  };
>  
> +struct altp2m_domain {
> +bool_t altp2m_active;
> +struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
> +mm_lock_t altp2m_list_lock;
> +uint64_t *altp2m_eptp;
> +};

No point prefixing all the fields with altp2m_. And also the structure
now doesn't belong here anymore - it should move to e.g. p2m.h.

> @@ -320,10 +327,13 @@ struct arch_domain
>  mm_lock_t nested_p2m_lock;
>  
>  /* altp2m: allow multiple copies of host p2m */
> +/*
>  bool_t altp2m_active;
>  struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
>  mm_lock_t altp2m_list_lock;
> -uint64_t *altp2m_eptp;
> +uint64_t *altp2m_eptp; 
> +*/

What's the purpose of this comment?

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH v1 Altp2m cleanup 3/3] Making altp2m struct dynamically allocated.

2016-06-21 Thread Paul Lai
Ravi Sahita's dynamically allocated altp2m structs

Signed-off-by: Paul Lai 
---
 xen/arch/x86/hvm/hvm.c   |  8 +++---
 xen/arch/x86/hvm/vmx/vmx.c   |  2 +-
 xen/arch/x86/mm/altp2m.c | 18 +++---
 xen/arch/x86/mm/mm-locks.h   |  4 +--
 xen/arch/x86/mm/p2m-ept.c|  8 +++---
 xen/arch/x86/mm/p2m.c| 59 
 xen/include/asm-x86/altp2m.h |  7 +-
 xen/include/asm-x86/domain.h | 12 -
 xen/include/asm-x86/p2m.h|  2 +-
 9 files changed, 70 insertions(+), 50 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1595b3e..40270d0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5228,7 +5228,7 @@ static int do_altp2m_op(
 
 if ( (a.cmd != HVMOP_altp2m_get_domain_state) &&
  (a.cmd != HVMOP_altp2m_set_domain_state) &&
- !d->arch.altp2m_active )
+ ! altp2m_active(d) )
 {
 rc = -EOPNOTSUPP;
 goto out;
@@ -5262,11 +5262,11 @@ static int do_altp2m_op(
 break;
 }
 
-ostate = d->arch.altp2m_active;
-d->arch.altp2m_active = !!a.u.domain_state.state;
+ostate = altp2m_active(d);
+   set_altp2m_active(d, !!a.u.domain_state.state);
 
 /* If the alternate p2m state has changed, handle appropriately */
-if ( d->arch.altp2m_active != ostate &&
+if ( altp2m_active(d) != ostate &&
  (ostate || !(rc = p2m_init_altp2m_by_id(d, 0))) )
 {
 for_each_vcpu( d, v )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 670d7dc..b522578 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2017,7 +2017,7 @@ static void vmx_vcpu_update_vmfunc_ve(struct vcpu *v)
 {
 v->arch.hvm_vmx.secondary_exec_control |= mask;
 __vmwrite(VM_FUNCTION_CONTROL, VMX_VMFUNC_EPTP_SWITCHING);
-__vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m_eptp));
+__vmwrite(EPTP_LIST_ADDR, virt_to_maddr(d->arch.altp2m->altp2m_eptp));
 
 if ( cpu_has_vmx_virt_exceptions )
 {
diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
index 1caf6b4..77187c9 100644
--- a/xen/arch/x86/mm/altp2m.c
+++ b/xen/arch/x86/mm/altp2m.c
@@ -72,23 +72,23 @@ hvm_altp2m_init( struct domain *d) {
 unsigned int i = 0;
 
 /* Init alternate p2m data */
-if ( (d->arch.altp2m_eptp = alloc_xenheap_page()) == NULL )
+if ( (d->arch.altp2m->altp2m_eptp = alloc_xenheap_page()) == NULL )
 {
 rv = -ENOMEM;
 goto out;
 }
 
 for ( i = 0; i < MAX_EPTP; i++ )
-d->arch.altp2m_eptp[i] = INVALID_MFN;
+d->arch.altp2m->altp2m_eptp[i] = INVALID_MFN;
 
 for ( i = 0; i < MAX_ALTP2M; i++ )
 {
-rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
+rv = p2m_alloc_table(d->arch.altp2m->altp2m_p2m[i]);
 if ( rv != 0 )
goto out;
 }
 
-d->arch.altp2m_active = 0;
+set_altp2m_active(d, 0);
  out:
 return rv;
 }
@@ -96,16 +96,16 @@ hvm_altp2m_init( struct domain *d) {
 void
 hvm_altp2m_teardown( struct domain *d) {
 unsigned int i = 0;
-d->arch.altp2m_active = 0;
+set_altp2m_active(d, 0);
 
-if ( d->arch.altp2m_eptp )
+if ( d->arch.altp2m->altp2m_eptp )
 {
-free_xenheap_page(d->arch.altp2m_eptp);
-d->arch.altp2m_eptp = NULL;
+free_xenheap_page(d->arch.altp2m->altp2m_eptp);
+d->arch.altp2m->altp2m_eptp = NULL;
 }
 
 for ( i = 0; i < MAX_ALTP2M; i++ )
-p2m_teardown(d->arch.altp2m_p2m[i]);
+p2m_teardown(d->arch.altp2m->altp2m_p2m[i]);
 }
 
 /*
diff --git a/xen/arch/x86/mm/mm-locks.h b/xen/arch/x86/mm/mm-locks.h
index 086c8bb..4d17b0a 100644
--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -251,8 +251,8 @@ declare_mm_rwlock(p2m);
  */
 
 declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d)   mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
-#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d)   mm_lock(altp2mlist, 
&(d)->arch.altp2m->altp2m_list_lock)
+#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m->altp2m_list_lock)
 
 /* P2M lock (per-altp2m-table)
  *
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index dff34b1..754b660 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1330,14 +1330,14 @@ void setup_ept_dump(void)
 }
 
 void p2m_init_altp2m_helper( struct domain *d, unsigned int i) {
-struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+struct p2m_domain *p2m = d->arch.altp2m->altp2m_p2m[i];
 struct ept_data *ept;
 
 p2m->min_remapped_gfn = INVALID_GFN;
 p2m->max_remapped_gfn = 0;
 ept = >ept;
 ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
-d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+d->arch.altp2m->altp2m_eptp[i] = ept_get_eptp(ept);
 }
 
 unsigned int