We are removing obj->mm.lock, and need to take the reservation lock before we can pin pages. Move the pinning pages into the helper, and merge gtt pwrite/pread preparation and cleanup paths.
The fence lock is also removed; it will conflict with fence annotations, because of memory allocations done when pagefaulting inside copy_*_user. Signed-off-by: Maarten Lankhorst <maarten.lankho...@linux.intel.com> --- drivers/gpu/drm/i915/Makefile | 1 - drivers/gpu/drm/i915/gem/i915_gem_fence.c | 95 -------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 5 - drivers/gpu/drm/i915/i915_gem.c | 247 +++++++++++---------- 4 files changed, 133 insertions(+), 215 deletions(-) delete mode 100644 drivers/gpu/drm/i915/gem/i915_gem_fence.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e5574e506a5c..58d129b5a65a 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -134,7 +134,6 @@ gem-y += \ gem/i915_gem_dmabuf.o \ gem/i915_gem_domain.o \ gem/i915_gem_execbuffer.o \ - gem/i915_gem_fence.o \ gem/i915_gem_internal.o \ gem/i915_gem_object.o \ gem/i915_gem_object_blt.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c deleted file mode 100644 index 8ab842c80f99..000000000000 --- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2019 Intel Corporation - */ - -#include "i915_drv.h" -#include "i915_gem_object.h" - -struct stub_fence { - struct dma_fence dma; - struct i915_sw_fence chain; -}; - -static int __i915_sw_fence_call -stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) -{ - struct stub_fence *stub = container_of(fence, typeof(*stub), chain); - - switch (state) { - case FENCE_COMPLETE: - dma_fence_signal(&stub->dma); - break; - - case FENCE_FREE: - dma_fence_put(&stub->dma); - break; - } - - return NOTIFY_DONE; -} - -static const char *stub_driver_name(struct dma_fence *fence) -{ - return DRIVER_NAME; -} - -static const char *stub_timeline_name(struct dma_fence *fence) -{ - return "object"; -} - -static void stub_release(struct dma_fence *fence) -{ - struct stub_fence *stub = container_of(fence, typeof(*stub), dma); - - i915_sw_fence_fini(&stub->chain); - - BUILD_BUG_ON(offsetof(typeof(*stub), dma)); - dma_fence_free(&stub->dma); -} - -static const struct dma_fence_ops stub_fence_ops = { - .get_driver_name = stub_driver_name, - .get_timeline_name = stub_timeline_name, - .release = stub_release, -}; - -struct dma_fence * -i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) -{ - struct stub_fence *stub; - - assert_object_held(obj); - - stub = kmalloc(sizeof(*stub), GFP_KERNEL); - if (!stub) - return NULL; - - i915_sw_fence_init(&stub->chain, stub_notify); - dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock, - 0, 0); - - if (i915_sw_fence_await_reservation(&stub->chain, - obj->base.resv, NULL, true, - i915_fence_timeout(to_i915(obj->base.dev)), - I915_FENCE_GFP) < 0) - goto err; - - dma_resv_add_excl_fence(obj->base.resv, &stub->dma); - - return &stub->dma; - -err: - stub_release(&stub->dma); - return NULL; -} - -void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, - struct dma_fence *fence) -{ - struct stub_fence *stub = container_of(fence, typeof(*stub), dma); - - i915_sw_fence_commit(&stub->chain); -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index c48df27d66f6..107b98a82e44 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -157,11 +157,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) dma_resv_unlock(obj->base.resv); } -struct dma_fence * -i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); -void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, - struct dma_fence *fence); - static inline void i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 929a8f20cca4..4b5afb85efd1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -184,23 +184,38 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + void *vaddr; char __user *user_data = u64_to_user_ptr(args->data_ptr); + int ret; + + ret = i915_gem_object_lock_interruptible(obj, NULL); + if (ret) + return ret; + ret = i915_gem_object_pin_pages(obj); + i915_gem_object_unlock(obj); + if (ret) + return ret; + + vaddr = sg_page(obj->mm.pages->sgl) + args->offset; /* * We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. */ i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); - if (copy_from_user(vaddr, user_data, args->size)) - return -EFAULT; + if (copy_from_user(vaddr, user_data, args->size)) { + ret = -EFAULT; + goto err; + } drm_clflush_virt_range(vaddr, args->size); intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - return 0; +err: + i915_gem_object_unpin_pages(obj); + return ret; } static int @@ -330,7 +345,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, { unsigned int needs_clflush; unsigned int idx, offset; - struct dma_fence *fence; char __user *user_data; u64 remain; int ret; @@ -339,19 +353,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, if (ret) return ret; + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err_unlock; + ret = i915_gem_object_prepare_read(obj, &needs_clflush); - if (ret) { - i915_gem_object_unlock(obj); - return ret; - } + if (ret) + goto err_unpin; - fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); i915_gem_object_unlock(obj); - if (!fence) - return -ENOMEM; - remain = args->size; user_data = u64_to_user_ptr(args->data_ptr); offset = offset_in_page(args->offset); @@ -369,7 +381,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = 0; } - i915_gem_object_unlock_fence(obj, fence); + i915_gem_object_unpin_pages(obj); + return ret; + +err_unpin: + i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } @@ -397,52 +415,102 @@ gtt_user_read(struct io_mapping *mapping, return unwritten; } -static int -i915_gem_gtt_pread(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_pread *args) +static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, + struct drm_mm_node *node, + bool write) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; - intel_wakeref_t wakeref; - struct drm_mm_node node; - struct dma_fence *fence; - void __user *user_data; struct i915_vma *vma; - u64 remain, offset; + struct i915_gem_ww_ctx ww; int ret; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + i915_gem_ww_ctx_init(&ww, true); +retry: vma = ERR_PTR(-ENODEV); + ret = i915_gem_object_lock(obj, &ww); + if (ret) + goto err_ww; + + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto err_ww; + if (!i915_gem_object_is_tiled(obj)) - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK /* NOWARN */ | - PIN_NOEVICT); - if (!IS_ERR(vma)) { - node.start = i915_ggtt_offset(vma); - node.flags = 0; + vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK /* NOWARN */ | + PIN_NOEVICT); + if (vma == ERR_PTR(-EDEADLK)) { + ret = -EDEADLK; + goto err_ww; + } else if (!IS_ERR(vma)) { + node->start = i915_ggtt_offset(vma); + node->flags = 0; } else { - ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); + ret = insert_mappable_node(ggtt, node, PAGE_SIZE); if (ret) - goto out_rpm; - GEM_BUG_ON(!drm_mm_node_allocated(&node)); + goto err_ww; + GEM_BUG_ON(!drm_mm_node_allocated(node)); + vma = NULL; } - ret = i915_gem_object_lock_interruptible(obj, NULL); - if (ret) - goto out_unpin; - - ret = i915_gem_object_set_to_gtt_domain(obj, false); + ret = i915_gem_object_pin_pages(obj); if (ret) { - i915_gem_object_unlock(obj); - goto out_unpin; + if (drm_mm_node_allocated(node)) { + ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); + remove_mappable_node(ggtt, node); + } else { + i915_vma_unpin(vma); + } } - fence = i915_gem_object_lock_fence(obj); - i915_gem_object_unlock(obj); - if (!fence) { - ret = -ENOMEM; - goto out_unpin; +err_ww: + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + return ret ? ERR_PTR(ret) : vma; +} + +static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, + struct drm_mm_node *node, + struct i915_vma *vma) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + + i915_gem_object_unpin_pages(obj); + if (drm_mm_node_allocated(node)) { + ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); + remove_mappable_node(ggtt, node); + } else { + i915_vma_unpin(vma); + } +} + +static int +i915_gem_gtt_pread(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pread *args) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + intel_wakeref_t wakeref; + struct drm_mm_node node; + void __user *user_data; + struct i915_vma *vma; + u64 remain, offset; + int ret = 0; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + vma = i915_gem_gtt_prepare(obj, &node, false); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out_rpm; } user_data = u64_to_user_ptr(args->data_ptr); @@ -479,14 +547,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, offset += page_length; } - i915_gem_object_unlock_fence(obj, fence); -out_unpin: - if (drm_mm_node_allocated(&node)) { - ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(ggtt, &node); - } else { - i915_vma_unpin(vma); - } + i915_gem_gtt_cleanup(obj, &node, vma); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; @@ -538,15 +599,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto out; - ret = i915_gem_shmem_pread(obj, args); if (ret == -EFAULT || ret == -ENODEV) ret = i915_gem_gtt_pread(obj, args); - i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); return ret; @@ -594,11 +650,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; struct drm_mm_node node; - struct dma_fence *fence; struct i915_vma *vma; u64 remain, offset; void __user *user_data; - int ret; + int ret = 0; if (i915_gem_object_has_struct_page(obj)) { /* @@ -616,37 +671,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, wakeref = intel_runtime_pm_get(rpm); } - vma = ERR_PTR(-ENODEV); - if (!i915_gem_object_is_tiled(obj)) - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK /* NOWARN */ | - PIN_NOEVICT); - if (!IS_ERR(vma)) { - node.start = i915_ggtt_offset(vma); - node.flags = 0; - } else { - ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); - if (ret) - goto out_rpm; - GEM_BUG_ON(!drm_mm_node_allocated(&node)); - } - - ret = i915_gem_object_lock_interruptible(obj, NULL); - if (ret) - goto out_unpin; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) { - i915_gem_object_unlock(obj); - goto out_unpin; - } - - fence = i915_gem_object_lock_fence(obj); - i915_gem_object_unlock(obj); - if (!fence) { - ret = -ENOMEM; - goto out_unpin; + vma = i915_gem_gtt_prepare(obj, &node, true); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto out_rpm; } i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); @@ -695,14 +723,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, intel_gt_flush_ggtt_writes(ggtt->vm.gt); i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - i915_gem_object_unlock_fence(obj, fence); -out_unpin: - if (drm_mm_node_allocated(&node)) { - ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(ggtt, &node); - } else { - i915_vma_unpin(vma); - } + i915_gem_gtt_cleanup(obj, &node, vma); out_rpm: intel_runtime_pm_put(rpm, wakeref); return ret; @@ -742,7 +763,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, unsigned int partial_cacheline_write; unsigned int needs_clflush; unsigned int offset, idx; - struct dma_fence *fence; void __user *user_data; u64 remain; int ret; @@ -751,19 +771,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err_unlock; + ret = i915_gem_object_prepare_write(obj, &needs_clflush); - if (ret) { - i915_gem_object_unlock(obj); - return ret; - } + if (ret) + goto err_unpin; - fence = i915_gem_object_lock_fence(obj); i915_gem_object_finish_access(obj); i915_gem_object_unlock(obj); - if (!fence) - return -ENOMEM; - /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. @@ -791,8 +809,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, } i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); - i915_gem_object_unlock_fence(obj, fence); + i915_gem_object_unpin_pages(obj); + return ret; + +err_unpin: + i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } @@ -849,10 +873,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) goto err; - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - ret = -EFAULT; /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get @@ -875,7 +895,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_phys_pwrite(obj, args, file); } - i915_gem_object_unpin_pages(obj); err: i915_gem_object_put(obj); return ret; -- 2.28.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx