On 22/08/2022 23:01, Robin Murphy wrote:
> Convert to io-pgtable's bulk {map,unmap}_pages() APIs, to help the old
> single-page interfaces eventually go away. Unmapping heap BOs still
> wants to be done a page at a time, but everything else can get the full
> benefit of the more efficient interface.
> 
> Signed-off-by: Robin Murphy <robin.mur...@arm.com>

Looks correct to me. Although the voodoo magic does take a little bit of
figuring out ;)

Reviewed-by: Steven Price <steven.pr...@arm.com>

I'll push to drm-misc-next.

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 40 +++++++++++++++----------
>  1 file changed, 25 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index b285a8001b1d..e246d914e7f6 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -248,11 +248,15 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev)
>       mmu_write(pfdev, MMU_INT_MASK, ~0);
>  }
>  
> -static size_t get_pgsize(u64 addr, size_t size)
> +static size_t get_pgsize(u64 addr, size_t size, size_t *count)
>  {
> -     if (addr & (SZ_2M - 1) || size < SZ_2M)
> -             return SZ_4K;
> +     size_t blk_offset = -addr % SZ_2M;
>  
> +     if (blk_offset || size < SZ_2M) {
> +             *count = min_not_zero(blk_offset, size) / SZ_4K;
> +             return SZ_4K;
> +     }
> +     *count = size / SZ_2M;
>       return SZ_2M;
>  }
>  
> @@ -287,12 +291,16 @@ static int mmu_map_sg(struct panfrost_device *pfdev, 
> struct panfrost_mmu *mmu,
>               dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, 
> len=%zx", mmu->as, iova, paddr, len);
>  
>               while (len) {
> -                     size_t pgsize = get_pgsize(iova | paddr, len);
> +                     size_t pgcount, mapped = 0;
> +                     size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
>  
> -                     ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
> -                     iova += pgsize;
> -                     paddr += pgsize;
> -                     len -= pgsize;
> +                     ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
> +                                    GFP_KERNEL, &mapped);
> +                     /* Don't get stuck if things have gone wrong */
> +                     mapped = max(mapped, pgsize);
> +                     iova += mapped;
> +                     paddr += mapped;
> +                     len -= mapped;
>               }
>       }
>  
> @@ -344,15 +352,17 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping 
> *mapping)
>               mapping->mmu->as, iova, len);
>  
>       while (unmapped_len < len) {
> -             size_t unmapped_page;
> -             size_t pgsize = get_pgsize(iova, len - unmapped_len);
> +             size_t unmapped_page, pgcount;
> +             size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
>  
> -             if (ops->iova_to_phys(ops, iova)) {
> -                     unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
> -                     WARN_ON(unmapped_page != pgsize);
> +             if (bo->is_heap)
> +                     pgcount = 1;
> +             if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
> +                     unmapped_page = ops->unmap_pages(ops, iova, pgsize, 
> pgcount, NULL);
> +                     WARN_ON(unmapped_page != pgsize * pgcount);
>               }
> -             iova += pgsize;
> -             unmapped_len += pgsize;
> +             iova += pgsize * pgcount;
> +             unmapped_len += pgsize * pgcount;
>       }
>  
>       panfrost_mmu_flush_range(pfdev, mapping->mmu,

Reply via email to