On Thu, Jan 15, 2026 at 04:28:50PM +0800, Jianping Li wrote:
> From: Ekansh Gupta <[email protected]>
> 
> fastrpc_req_munmap_impl() is called to unmap any buffer. The buffer is
> getting removed from the list after it is unmapped from DSP. This can
> create potential race conditions if any other thread removes the entry
> from list while unmap operation is ongoing. Remove the entry before
> calling unmap operation.
> 
> Fixes: 2419e55e532de ("misc: fastrpc: add mmap/unmap support")
> Cc: [email protected]
> Co-developed-by: Ekansh Gupta <[email protected]>
> Signed-off-by: Ekansh Gupta <[email protected]>
> Signed-off-by: Jianping Li <[email protected]>
> ---
>  drivers/misc/fastrpc.c | 28 ++++++++++++++++++++--------
>  1 file changed, 20 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
> index 4f12fa5a05aa..833c265add5e 100644
> --- a/drivers/misc/fastrpc.c
> +++ b/drivers/misc/fastrpc.c
> @@ -202,6 +202,8 @@ struct fastrpc_buf {
>       /* mmap support */
>       struct list_head node; /* list of user requested mmaps */
>       uintptr_t raddr;
> +     /* Lock for buf->node */
> +     spinlock_t *list_lock;

Why do you need to lock this? Isn't fl->lock enough?

>  };
>  
>  struct fastrpc_dma_buf_attachment {
> @@ -441,6 +443,7 @@ static int __fastrpc_buf_alloc(struct fastrpc_user *fl, 
> struct device *dev,
>       buf->size = size;
>       buf->dev = dev;
>       buf->raddr = 0;
> +     buf->list_lock = &fl->lock;
>  
>       buf->virt = dma_alloc_coherent(dev, buf->size, &buf->dma_addr,
>                                      GFP_KERNEL);
> @@ -1865,9 +1868,6 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user 
> *fl, struct fastrpc_buf *
>                                     &args[0]);
>       if (!err) {
>               dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
> -             spin_lock(&fl->lock);
> -             list_del(&buf->node);
> -             spin_unlock(&fl->lock);
>               fastrpc_buf_free(buf);
>       } else {
>               dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
> @@ -1881,6 +1881,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
> char __user *argp)
>       struct fastrpc_buf *buf = NULL, *iter, *b;
>       struct fastrpc_req_munmap req;
>       struct device *dev = fl->sctx->dev;
> +     int err;
>  
>       if (copy_from_user(&req, argp, sizeof(req)))
>               return -EFAULT;
> @@ -1888,6 +1889,7 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
> char __user *argp)
>       spin_lock(&fl->lock);
>       list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
>               if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
> +                     list_del(&iter->node);
>                       buf = iter;
>                       break;
>               }
> @@ -1900,7 +1902,14 @@ static int fastrpc_req_munmap(struct fastrpc_user *fl, 
> char __user *argp)
>               return -EINVAL;
>       }
>  
> -     return fastrpc_req_munmap_impl(fl, buf);
> +     err = fastrpc_req_munmap_impl(fl, buf);
> +     if (err) {
> +             spin_lock(buf->list_lock);
> +             list_add_tail(&buf->node, &fl->mmaps);
> +             spin_unlock(buf->list_lock);
> +     }
> +
> +     return err;
>  }
>  
>  static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
> @@ -1985,20 +1994,23 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, 
> char __user *argp)
>               }
>       }
>  
> -     spin_lock(&fl->lock);
> +     spin_lock(buf->list_lock);
>       list_add_tail(&buf->node, &fl->mmaps);
> -     spin_unlock(&fl->lock);
> +     spin_unlock(buf->list_lock);
>  
>       if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
>               err = -EFAULT;
> -             goto err_assign;
> +             goto err_copy;
>       }
>  
>       dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
>               buf->raddr, buf->size);
>  
>       return 0;
> -
> +err_copy:
> +     spin_lock(buf->list_lock);
> +     list_del(&buf->node);
> +     spin_unlock(buf->list_lock);
>  err_assign:
>       fastrpc_req_munmap_impl(fl, buf);
>  
> -- 
> 2.43.0
> 

-- 
With best wishes
Dmitry

Reply via email to