This will ensure we always hold the required lock when calling those functions. --- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 ++ drivers/gpu/drm/nouveau/nouveau_display.c | 17 +++++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b6dc85c614be..33eb7164525a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1431,6 +1431,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) struct nouveau_fence *new_fence = nouveau_fence_ref(fence); struct nouveau_fence *old_fence = NULL; + lockdep_assert_held(&nvbo->bo.resv->lock.base); + spin_lock(&nvbo->bo.bdev->fence_lock); old_fence = nvbo->bo.sync_obj; nvbo->bo.sync_obj = new_fence; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index da764a4ed958..61b8c3375135 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -716,6 +716,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, } mutex_lock(&chan->cli->mutex); + ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL); + if (ret) + goto fail_unpin; /* synchronise rendering channel with the kernel's channel */ spin_lock(&new_bo->bo.bdev->fence_lock); @@ -723,12 +726,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, spin_unlock(&new_bo->bo.bdev->fence_lock); ret = nouveau_fence_sync(fence, chan); nouveau_fence_unref(&fence); - if (ret) + if (ret) { + ttm_bo_unreserve(&new_bo->bo); goto fail_unpin; + } - ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); - if (ret) - goto fail_unpin; + if (new_bo != old_bo) { + ttm_bo_unreserve(&new_bo->bo); + + ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); + if (ret) + goto fail_unpin; + } /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state)