On Mon, 12 Jan 2026 16:41:41 +0000
Steven Price <[email protected]> wrote:

> On 12/01/2026 14:32, Boris Brezillon wrote:
> > On Mon, 12 Jan 2026 12:15:13 +0000
> > Steven Price <[email protected]> wrote:
> >   
> >> On 09/01/2026 13:07, Boris Brezillon wrote:  
> >>> Defer pages allocation until their first access.
> >>>
> >>> Signed-off-by: Boris Brezillon <[email protected]>
> >>> ---
> >>>  drivers/gpu/drm/panthor/panthor_gem.c | 119 ++++++++++++++++----------
> >>>  1 file changed, 75 insertions(+), 44 deletions(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/panthor/panthor_gem.c 
> >>> b/drivers/gpu/drm/panthor/panthor_gem.c
> >>> index 0e52c7a07c87..44f05bd957e7 100644
> >>> --- a/drivers/gpu/drm/panthor/panthor_gem.c
> >>> +++ b/drivers/gpu/drm/panthor/panthor_gem.c
> >>> @@ -600,15 +600,6 @@ static int panthor_gem_mmap(struct drm_gem_object 
> >>> *obj, struct vm_area_struct *v
> >>>   if (is_cow_mapping(vma->vm_flags))
> >>>           return -EINVAL;
> >>>  
> >>> - dma_resv_lock(obj->resv, NULL);
> >>> - ret = panthor_gem_backing_get_pages_locked(bo);
> >>> - if (!ret)
> >>> -         ret = panthor_gem_prep_for_cpu_map_locked(bo);
> >>> - dma_resv_unlock(obj->resv);
> >>> -
> >>> - if (ret)
> >>> -         return ret;
> >>> -
> >>>   vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
> >>>   vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> >>>   if (should_map_wc(bo))
> >>> @@ -628,82 +619,122 @@ static enum drm_gem_object_status 
> >>> panthor_gem_status(struct drm_gem_object *obj)
> >>>   return res;
> >>>  }
> >>>  
> >>> -static bool try_map_pmd(struct vm_fault *vmf, unsigned long addr, struct 
> >>> page *page)
> >>> +static vm_fault_t insert_page(struct vm_fault *vmf, struct page *page)
> >>>  {
> >>> + struct vm_area_struct *vma = vmf->vma;
> >>> + vm_fault_t ret;
> >>> +
> >>>  #ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
> >>>   unsigned long pfn = page_to_pfn(page);
> >>>   unsigned long paddr = pfn << PAGE_SHIFT;
> >>> - bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
> >>> + bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
> >>>  
> >>>   if (aligned &&
> >>>       pmd_none(*vmf->pmd) &&
> >>>       folio_test_pmd_mappable(page_folio(page))) {
> >>>           pfn &= PMD_MASK >> PAGE_SHIFT;
> >>> -         if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
> >>> -                 return true;
> >>> +         ret = vmf_insert_pfn_pmd(vmf, pfn, false);
> >>> +         if (ret == VM_FAULT_NOPAGE)
> >>> +                 return VM_FAULT_NOPAGE;
> >>>   }
> >>>  #endif
> >>>  
> >>> - return false;
> >>> + return vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
> >>>  }
> >>>  
> >>> -static vm_fault_t panthor_gem_fault(struct vm_fault *vmf)
> >>> +static vm_fault_t nonblocking_page_setup(struct vm_fault *vmf, pgoff_t 
> >>> page_offset)
> >>>  {
> >>>   struct vm_area_struct *vma = vmf->vma;
> >>> - struct drm_gem_object *obj = vma->vm_private_data;
> >>>   struct panthor_gem_object *bo = to_panthor_bo(vma->vm_private_data);
> >>> - loff_t num_pages = obj->size >> PAGE_SHIFT;
> >>>   vm_fault_t ret;
> >>> - pgoff_t page_offset;
> >>> - unsigned long pfn;
> >>>  
> >>> - /* Offset to faulty address in the VMA. */
> >>> - page_offset = vmf->pgoff - vma->vm_pgoff;
> >>> + if (!dma_resv_trylock(bo->base.resv))
> >>> +         return VM_FAULT_RETRY;
> >>>  
> >>> - dma_resv_lock(bo->base.resv, NULL);
> >>> + if (bo->backing.pages)
> >>> +         ret = insert_page(vmf, bo->backing.pages[page_offset]);
> >>> + else
> >>> +         ret = VM_FAULT_RETRY;
> >>>  
> >>> - if (page_offset >= num_pages ||
> >>> -     drm_WARN_ON_ONCE(obj->dev, !bo->backing.pages)) {
> >>> -         ret = VM_FAULT_SIGBUS;
> >>> -         goto out;
> >>> + dma_resv_unlock(bo->base.resv);
> >>> + return ret;
> >>> +}
> >>> +
> >>> +static vm_fault_t blocking_page_setup(struct vm_fault *vmf,
> >>> +                               struct panthor_gem_object *bo,
> >>> +                               pgoff_t page_offset, bool mmap_lock_held)
> >>> +{
> >>> + vm_fault_t ret;
> >>> + int err;
> >>> +
> >>> + if (vmf->flags & FAULT_FLAG_INTERRUPTIBLE) {
> >>> +         err = dma_resv_lock_interruptible(bo->base.resv, NULL);
> >>> +         if (err)
> >>> +                 return mmap_lock_held ? VM_FAULT_NOPAGE : 
> >>> VM_FAULT_RETRY;    
> >>
> >> I'm not sure about this. First FAULT_FLAG_INTERRUPTIBLE is currently
> >> only used by userfaultfd AFAICT.  
> > 
> > And GUP, which admittedly, only seems possible if one tries to map a
> > userpage in kernel space, and we don't support that (yet?).  
> 
> Well unless my grepping is wrong, GUP is the one place that sets it and
> userfaultfd is the one place that consumes it.

Indeed.

> 
> As special as Mali is, I'm not sure why we're doing something that no
> other driver is?

Fair enough, I'll drop this "if (interruptible)" block.

Reply via email to