Don't mix dma fence lock with the active_job lock. The issue was
uncovered when PREEMPT_RT on.

Link: https://github.com/raspberrypi/linux/issues/7035
Fixes: fa6a20c87470 ("drm/v3d: Address race-condition between per-fd GPU stats 
and fd release")
Signed-off-by: Melissa Wen <[email protected]>
---
 drivers/gpu/drm/v3d/v3d_drv.h   | 1 +
 drivers/gpu/drm/v3d/v3d_fence.c | 2 +-
 drivers/gpu/drm/v3d/v3d_gem.c   | 1 +
 3 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 0317f3d7452a..b1c41af87e17 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -62,6 +62,7 @@ struct v3d_queue_state {
        /* Currently active job for this queue */
        struct v3d_job *active_job;
        spinlock_t queue_lock;
+       spinlock_t fence_lock;
 };
 
 /* Performance monitor object. The perform lifetime is controlled by userspace
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 8f8471adae34..c82500a1df73 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -15,7 +15,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum 
v3d_queue q)
        fence->dev = &v3d->drm;
        fence->queue = q;
        fence->seqno = ++queue->emit_seqno;
-       dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock,
+       dma_fence_init(&fence->base, &v3d_fence_ops, &queue->fence_lock,
                       queue->fence_context, fence->seqno);
 
        return &fence->base;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index c77d90aa9b82..bb110d35f749 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -273,6 +273,7 @@ v3d_gem_init(struct drm_device *dev)
                seqcount_init(&queue->stats.lock);
 
                spin_lock_init(&queue->queue_lock);
+               spin_lock_init(&queue->fence_lock);
        }
 
        spin_lock_init(&v3d->mm_lock);
-- 
2.47.2

Reply via email to