On Mon, Dec 11, 2023 at 11:38:01PM -0800, Vivek Kasireddy wrote:
> @@ -42,7 +42,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
>       if (pgoff >= ubuf->pagecount)
>               return VM_FAULT_SIGBUS;
>  
> -     pfn = page_to_pfn(ubuf->pages[pgoff]);
> +     pfn = page_to_pfn(&ubuf->folios[pgoff]->page);

We have folio_pfn().

>  static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
>  {
>       struct udmabuf *ubuf = buf->priv;
> +     struct page **pages;
>       void *vaddr;
> +     pgoff_t pg;
>  
>       dma_resv_assert_held(buf->resv);
>  
> -     vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
> +     pages = kmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
> +     if (!pages)
> +             return -ENOMEM;
> +
> +     for (pg = 0; pg < ubuf->pagecount; pg++)
> +             pages[pg] = &ubuf->folios[pg]->page;
> +
> +     vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
> +     kfree(pages);

We don't yet have a vm_map_ram() variant that takes an array of
folios.  We probably should; there was _something_ I was looking at
recently that would have liked it ...

> @@ -254,31 +262,70 @@ static int handle_shmem_pages(struct udmabuf *ubuf, 
> struct file *memfd,
>                             pgoff_t *pgbuf)
>  {
>       pgoff_t pgidx, pgoff = offset >> PAGE_SHIFT;
> -     struct page *page;
> +     struct folio *folio = NULL;
>  
>       for (pgidx = 0; pgidx < pgcnt; pgidx++) {
> -             page = shmem_read_mapping_page(memfd->f_mapping,
> -                                            pgoff + pgidx);
> -             if (IS_ERR(page))
> -                     return PTR_ERR(page);
> +             folio = shmem_read_folio(memfd->f_mapping,
> +                                      pgoff + pgidx);

You could join these two lines.

Reply via email to