The end goal is to have userspace tell the kernel what buffers will require CPU access, however if we ever reach the CPU fault handler, and the current resource is not mappable, then we should attempt to migrate the buffer to the mappable portion of LMEM, or even system memory, if the allowable placements permit it.
Signed-off-by: Matthew Auld <matthew.a...@intel.com> Cc: Thomas Hellström <thomas.hellst...@linux.intel.com> Reviewed-by: Thomas Hellström <thomas.hellst...@linux.intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 54 ++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 3790e569d6a9..780513e98fdc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -636,11 +636,24 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) i915_ttm_purge(obj); } +static bool i915_ttm_resource_mappable(struct ttm_resource *res) +{ + struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); + + if (!i915_ttm_cpu_maps_iomem(res)) + return true; + + return bman_res->used_visible_size == bman_res->base.num_pages; +} + static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { if (!i915_ttm_cpu_maps_iomem(mem)) return 0; + if (!i915_ttm_resource_mappable(mem)) + return -EINVAL; + mem->bus.caching = ttm_write_combined; mem->bus.is_iomem = true; @@ -779,14 +792,15 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) * Gem forced migration using the i915_ttm_migrate() op, is allowed even * to regions that are not in the object's list of allowable placements. */ -static int i915_ttm_migrate(struct drm_i915_gem_object *obj, - struct intel_memory_region *mr) +static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, + struct intel_memory_region *mr, + unsigned int flags) { struct ttm_place requested; struct ttm_placement placement; int ret; - i915_ttm_place_from_region(mr, &requested, obj->flags); + i915_ttm_place_from_region(mr, &requested, flags); placement.num_placement = 1; placement.num_busy_placement = 1; placement.placement = &requested; @@ -809,6 +823,12 @@ static int i915_ttm_migrate(struct drm_i915_gem_object *obj, return 0; } +static int i915_ttm_migrate(struct drm_i915_gem_object *obj, + struct intel_memory_region *mr) +{ + return __i915_ttm_migrate(obj, mr, obj->flags); +} + static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, struct sg_table *st) { @@ -953,9 +973,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (!obj) return VM_FAULT_SIGBUS; - if (obj->flags & I915_BO_ALLOC_GPU_ONLY) - return -EINVAL; - /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) @@ -970,6 +987,31 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (!i915_ttm_resource_mappable(bo->resource)) { + int err = -ENODEV; + int i; + + for (i = 0; i < obj->mm.n_placements; i++) { + struct intel_memory_region *mr = obj->mm.placements[i]; + unsigned int flags; + + if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) + continue; + + flags = obj->flags; + flags &= ~I915_BO_ALLOC_TOPDOWN; + err = __i915_ttm_migrate(obj, mr, flags); + if (!err) + break; + } + + if (err) { + drm_dbg(dev, "Unable to make resource CPU accessible\n"); + dma_resv_unlock(bo->base.resv); + return VM_FAULT_SIGBUS; + } + } + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); -- 2.34.1