Re: [Xen-devel] [PATCH v12 6/6] x86/ioreq server: Synchronously reset outstanding p2m_ioreq_server entries when an ioreq server unmaps.

2017-04-30 Thread Yu Zhang



On 4/28/2017 3:45 PM, Zhang, Xiong Y wrote:

I found this patch couldn't work, the reason is inline.  And need propose to 
fix this.

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 7e0da81..d72b7bd 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -384,15 +384,50 @@ static int dm_op(domid_t domid,

  case XEN_DMOP_map_mem_type_to_ioreq_server:
  {
-const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
+struct xen_dm_op_map_mem_type_to_ioreq_server *data =
  _mem_type_to_ioreq_server;
+unsigned long first_gfn = data->opaque;
+
+const_op = false;

  rc = -EOPNOTSUPP;
  if ( !hap_enabled(d) )
  break;

-rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
-  data->type, data->flags);
+if ( first_gfn == 0 )
+rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
+  data->type,
data->flags);
+else
+rc = 0;
+
+/*
+ * Iterate p2m table when an ioreq server unmaps from
p2m_ioreq_server,
+ * and reset the remaining p2m_ioreq_server entries back to
p2m_ram_rw.
+ */
+if ( rc == 0 && data->flags == 0 )
+{
+struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+while ( read_atomic(>ioreq.entry_count) &&
+first_gfn <= p2m->max_mapped_pfn )
+{
+/* Iterate p2m table for 256 gfns each time. */
+p2m_finish_type_change(d, _gfn(first_gfn), 256,
+   p2m_ioreq_server,
p2m_ram_rw);
+
+first_gfn += 256;
+
+/* Check for continuation if it's not the last iteration. */
+if ( first_gfn <= p2m->max_mapped_pfn &&
+ hypercall_preempt_check() )
+{
+rc = -ERESTART;
+data->opaque = first_gfn;
+break;
+}
+}
+}
+
  break;
  }

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4169d18..1d57e5c 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1011,6 +1011,35 @@ void p2m_change_type_range(struct domain *d,
  p2m_unlock(p2m);
  }

+/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
+void p2m_finish_type_change(struct domain *d,
+gfn_t first_gfn, unsigned long max_nr,
+p2m_type_t ot, p2m_type_t nt)
+{
+struct p2m_domain *p2m = p2m_get_hostp2m(d);
+p2m_type_t t;
+unsigned long gfn = gfn_x(first_gfn);
+unsigned long last_gfn = gfn + max_nr - 1;
+
+ASSERT(ot != nt);
+ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
+
+p2m_lock(p2m);
+
+last_gfn = min(last_gfn, p2m->max_mapped_pfn);
+while ( gfn <= last_gfn )
+{
+get_gfn_query_unlocked(d, gfn, );

[Zhang, Xiong Y] As the previous patch "asynchronously reset outstanding 
p2m_ioreq_server_entries" call ept_chang_entry_type_global() which
set ept_entry.recalc=1 and ept_entry.emt=MTRR_NUM_TYPES. So
get_gfn_query_unlocked(gfn) will recalc gfn mem_type and return
the new mem_type not the old mem_type.
For pfn is old p2m_ioreq_server mem_type, the returned  is p2m_raw_rw.
Then (t == ot) couldn't be true, and p2m_change_type_one() never be called.

This result a guest vm using this interface couldn't reboot.


The root cause is in the last version of patch 5/6, that p2m_ram_rw is 
returned for ioreq server pages whenever there's no mapping ioreq server.

There's no such problem for version 12 and earlier ones.
I have sent Xiong a patch to fix this. Maybe he can send out the fix 
patch after XenGT tests pass.

BTW, thanks Xiong for helping find this error. :)

Thanks
Yu

[snip]

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH v12 6/6] x86/ioreq server: Synchronously reset outstanding p2m_ioreq_server entries when an ioreq server unmaps.

2017-04-28 Thread Zhang, Xiong Y
I found this patch couldn't work, the reason is inline.  And need propose to 
fix this.
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index 7e0da81..d72b7bd 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -384,15 +384,50 @@ static int dm_op(domid_t domid,
> 
>  case XEN_DMOP_map_mem_type_to_ioreq_server:
>  {
> -const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
> +struct xen_dm_op_map_mem_type_to_ioreq_server *data =
>  _mem_type_to_ioreq_server;
> +unsigned long first_gfn = data->opaque;
> +
> +const_op = false;
> 
>  rc = -EOPNOTSUPP;
>  if ( !hap_enabled(d) )
>  break;
> 
> -rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> -  data->type, data->flags);
> +if ( first_gfn == 0 )
> +rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
> +  data->type,
> data->flags);
> +else
> +rc = 0;
> +
> +/*
> + * Iterate p2m table when an ioreq server unmaps from
> p2m_ioreq_server,
> + * and reset the remaining p2m_ioreq_server entries back to
> p2m_ram_rw.
> + */
> +if ( rc == 0 && data->flags == 0 )
> +{
> +struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +
> +while ( read_atomic(>ioreq.entry_count) &&
> +first_gfn <= p2m->max_mapped_pfn )
> +{
> +/* Iterate p2m table for 256 gfns each time. */
> +p2m_finish_type_change(d, _gfn(first_gfn), 256,
> +   p2m_ioreq_server,
> p2m_ram_rw);
> +
> +first_gfn += 256;
> +
> +/* Check for continuation if it's not the last iteration. */
> +if ( first_gfn <= p2m->max_mapped_pfn &&
> + hypercall_preempt_check() )
> +{
> +rc = -ERESTART;
> +data->opaque = first_gfn;
> +break;
> +}
> +}
> +}
> +
>  break;
>  }
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 4169d18..1d57e5c 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1011,6 +1011,35 @@ void p2m_change_type_range(struct domain *d,
>  p2m_unlock(p2m);
>  }
> 
> +/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
> +void p2m_finish_type_change(struct domain *d,
> +gfn_t first_gfn, unsigned long max_nr,
> +p2m_type_t ot, p2m_type_t nt)
> +{
> +struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +p2m_type_t t;
> +unsigned long gfn = gfn_x(first_gfn);
> +unsigned long last_gfn = gfn + max_nr - 1;
> +
> +ASSERT(ot != nt);
> +ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
> +
> +p2m_lock(p2m);
> +
> +last_gfn = min(last_gfn, p2m->max_mapped_pfn);
> +while ( gfn <= last_gfn )
> +{
> +get_gfn_query_unlocked(d, gfn, );
[Zhang, Xiong Y] As the previous patch "asynchronously reset outstanding 
p2m_ioreq_server_entries" call ept_chang_entry_type_global() which
set ept_entry.recalc=1 and ept_entry.emt=MTRR_NUM_TYPES. So 
get_gfn_query_unlocked(gfn) will recalc gfn mem_type and return
the new mem_type not the old mem_type.
For pfn is old p2m_ioreq_server mem_type, the returned  is p2m_raw_rw.
Then (t == ot) couldn't be true, and p2m_change_type_one() never be called.

This result a guest vm using this interface couldn't reboot.

thanks
> +
> +if ( t == ot )
> +p2m_change_type_one(d, gfn, t, nt);
> +
> +gfn++;
> +}
> +
> +p2m_unlock(p2m);
> +}
> +
>  /*
>   * Returns:
>   *0  for success
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index e7e390d..0e670af 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -611,6 +611,12 @@ void p2m_change_type_range(struct domain *d,
>  int p2m_change_type_one(struct domain *d, unsigned long gfn,
>  p2m_type_t ot, p2m_type_t nt);
> 
> +/* Synchronously change the p2m type for a range of gfns */
> +void p2m_finish_type_change(struct domain *d,
> +gfn_t first_gfn,
> +unsigned long max_nr,
> +p2m_type_t ot, p2m_type_t nt);
> +
>  /* Report a change affecting memory types. */
>  void p2m_memory_type_changed(struct domain *d);
> 
> --
> 1.9.1
> 
> 
> ___
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v12 6/6] x86/ioreq server: Synchronously reset outstanding p2m_ioreq_server entries when an ioreq server unmaps.

2017-04-06 Thread Yu Zhang
After an ioreq server has unmapped, the remaining p2m_ioreq_server
entries need to be reset back to p2m_ram_rw. This patch does this
synchronously by iterating the p2m table.

The synchronous resetting is necessary because we need to guarantee
the p2m table is clean before another ioreq server is mapped. And
since the sweeping of p2m table could be time consuming, it is done
with hypercall continuation.

Signed-off-by: Yu Zhang 
Reviewed-by: Paul Durrant 
Reviewed-by: Jan Beulich 
Reviewed-by: George Dunlap 
---
Cc: Paul Durrant 
Cc: Jan Beulich 
Cc: Andrew Cooper 
Cc: George Dunlap 

changes in v4: 
  - Added "Reviewed-by: Paul Durrant "
  - Added "Reviewed-by: Jan Beulich "
  - Added "Reviewed-by: George Dunlap "

changes in v3: 
  - According to comments from Paul: use mar_nr, instead of
last_gfn for p2m_finish_type_change().
  - According to comments from Jan: use gfn_t as type of
first_gfn in p2m_finish_type_change().
  - According to comments from Jan: simplify the if condition
before using p2m_finish_type_change().

changes in v2: 
  - According to comments from Jan and Andrew: do not use the 
HVMOP type hypercall continuation method. Instead, adding
an opaque in xen_dm_op_map_mem_type_to_ioreq_server to
store the gfn.
  - According to comments from Jan: change routine's comments
and name of parameters of p2m_finish_type_change().

changes in v1:
  - This patch is splitted from patch 4 of last version.
  - According to comments from Jan: update the gfn_start for
when use hypercall continuation to reset the p2m type.
  - According to comments from Jan: use min() to compare gfn_end
and max mapped pfn in p2m_finish_type_change()
---
 xen/arch/x86/hvm/dm.c | 41 ++---
 xen/arch/x86/mm/p2m.c | 29 +
 xen/include/asm-x86/p2m.h |  6 ++
 3 files changed, 73 insertions(+), 3 deletions(-)

diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 7e0da81..d72b7bd 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -384,15 +384,50 @@ static int dm_op(domid_t domid,
 
 case XEN_DMOP_map_mem_type_to_ioreq_server:
 {
-const struct xen_dm_op_map_mem_type_to_ioreq_server *data =
+struct xen_dm_op_map_mem_type_to_ioreq_server *data =
 _mem_type_to_ioreq_server;
+unsigned long first_gfn = data->opaque;
+
+const_op = false;
 
 rc = -EOPNOTSUPP;
 if ( !hap_enabled(d) )
 break;
 
-rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
-  data->type, data->flags);
+if ( first_gfn == 0 )
+rc = hvm_map_mem_type_to_ioreq_server(d, data->id,
+  data->type, data->flags);
+else
+rc = 0;
+
+/*
+ * Iterate p2m table when an ioreq server unmaps from p2m_ioreq_server,
+ * and reset the remaining p2m_ioreq_server entries back to p2m_ram_rw.
+ */
+if ( rc == 0 && data->flags == 0 )
+{
+struct p2m_domain *p2m = p2m_get_hostp2m(d);
+
+while ( read_atomic(>ioreq.entry_count) &&
+first_gfn <= p2m->max_mapped_pfn )
+{
+/* Iterate p2m table for 256 gfns each time. */
+p2m_finish_type_change(d, _gfn(first_gfn), 256,
+   p2m_ioreq_server, p2m_ram_rw);
+
+first_gfn += 256;
+
+/* Check for continuation if it's not the last iteration. */
+if ( first_gfn <= p2m->max_mapped_pfn &&
+ hypercall_preempt_check() )
+{
+rc = -ERESTART;
+data->opaque = first_gfn;
+break;
+}
+}
+}
+
 break;
 }
 
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4169d18..1d57e5c 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1011,6 +1011,35 @@ void p2m_change_type_range(struct domain *d,
 p2m_unlock(p2m);
 }
 
+/* Synchronously modify the p2m type for a range of gfns from ot to nt. */
+void p2m_finish_type_change(struct domain *d,
+gfn_t first_gfn, unsigned long max_nr,
+p2m_type_t ot, p2m_type_t nt)
+{
+struct p2m_domain *p2m = p2m_get_hostp2m(d);
+p2m_type_t t;
+unsigned long gfn = gfn_x(first_gfn);
+unsigned long last_gfn = gfn + max_nr - 1;
+
+ASSERT(ot != nt);
+ASSERT(p2m_is_changeable(ot) && p2m_is_changeable(nt));
+
+p2m_lock(p2m);
+
+last_gfn = min(last_gfn,