Re: [PATCH v3 19/20] xen/privcmd: Add support for Linux 64KB page granularity

2015-09-01 Thread Julien Grall
Hi Stefano,

On 10/08/15 13:03, Stefano Stabellini wrote:
>> +xen_pfn = xen_page_to_pfn(page);
>> +}
>> +fn(pfn_to_gfn(xen_pfn++), data);
> 
> What is the purpose of incrementing xen_pfn here?

Because the Linux page is split into multiple xen_pfn, so we want to get
the next xen_pfn for the next iteration.

Regards,

-- 
Julien Grall
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 19/20] xen/privcmd: Add support for Linux 64KB page granularity

2015-08-10 Thread Stefano Stabellini
On Fri, 7 Aug 2015, Julien Grall wrote:
> The hypercall interface (as well as the toolstack) is always using 4KB
> page granularity. When the toolstack is asking for mapping a series of
> guest PFN in a batch, it expects to have the page map contiguously in
> its virtual memory.
> 
> When Linux is using 64KB page granularity, the privcmd driver will have
> to map multiple Xen PFN in a single Linux page.
> 
> Note that this solution works on page granularity which is a multiple of
> 4KB.
> 
> Signed-off-by: Julien Grall 
> 
> ---
> Cc: Konrad Rzeszutek Wilk 
> Cc: Boris Ostrovsky 
> Cc: David Vrabel 
> 
> I kept the hypercall arguments in remap_data to avoid allocating them on
> the stack every time that remap_pte_fn is called.
> I will keep like that unless someone is strongly disagree.
> 
> Changes in v3:
> - The function to split a Linux page in mutiple Xen page has
> been moved internally. It was the only use (not used anymore in
> the balloon) and it's not quite clear what should be the common
> interface. Differ the question until someone need to use it.
> - s/nr_pfn/numgfns/ to make clear that we are dealing with GFN
> - Use DIV_ROUND_UP rather round_up and fix the usage in
> xen_xlate_unmap_gfn_range
> 
> Changes in v2:
> - Use xen_apply_to_page
> ---
>  drivers/xen/privcmd.c   |   8 ++--
>  drivers/xen/xlate_mmu.c | 124 
> 
>  2 files changed, 89 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
> index c6deb87..c8798ee 100644
> --- a/drivers/xen/privcmd.c
> +++ b/drivers/xen/privcmd.c
> @@ -446,7 +446,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
> int version)
>   return -EINVAL;
>   }
>  
> - nr_pages = m.num;
> + nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
>   if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
>   return -EINVAL;
>  
> @@ -494,7 +494,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
> int version)
>   goto out_unlock;
>   }
>   if (xen_feature(XENFEAT_auto_translated_physmap)) {
> - ret = alloc_empty_pages(vma, m.num);
> + ret = alloc_empty_pages(vma, nr_pages);
>   if (ret < 0)
>   goto out_unlock;
>   } else
> @@ -518,6 +518,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, 
> int version)
>   state.global_error  = 0;
>   state.version   = version;
>  
> + BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
>   /* mmap_batch_fn guarantees ret == 0 */
>   BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
>   &pagelist, mmap_batch_fn, &state));
> @@ -582,12 +583,13 @@ static void privcmd_close(struct vm_area_struct *vma)
>  {
>   struct page **pages = vma->vm_private_data;
>   int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
> + int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
>   int rc;
>  
>   if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
>   return;
>  
> - rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
> + rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
>   if (rc == 0)
>   free_xenballooned_pages(numpgs, pages);
>   else
> diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
> index cff2387..a1d3904 100644
> --- a/drivers/xen/xlate_mmu.c
> +++ b/drivers/xen/xlate_mmu.c
> @@ -38,31 +38,28 @@
>  #include 
>  #include 
>  
> -/* map fgfn of domid to lpfn in the current domain */
> -static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
> - unsigned int domid)
> -{
> - int rc;
> - struct xen_add_to_physmap_range xatp = {
> - .domid = DOMID_SELF,
> - .foreign_domid = domid,
> - .size = 1,
> - .space = XENMAPSPACE_gmfn_foreign,
> - };
> - xen_ulong_t idx = fgfn;
> - xen_pfn_t gpfn = lpfn;
> - int err = 0;
> +typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
>  
> - set_xen_guest_handle(xatp.idxs, &idx);
> - set_xen_guest_handle(xatp.gpfns, &gpfn);
> - set_xen_guest_handle(xatp.errs, &err);
> +/* Break down the pages in 4KB chunk and call fn for each gfn */
> +static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
> +  xen_gfn_fn_t fn, void *data)
> +{
> + unsigned long xen_pfn = 0;
> + struct page *page;
> + int i;
>  
> - rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
> - return rc < 0 ? rc : err;
> + for (i = 0; i < nr_gfn; i++) {
> + if ((i % XEN_PFN_PER_PAGE) == 0) {
> + page = pages[i / XEN_PFN_PER_PAGE];

If this functi