When backing up or restoring contents of pinned objects at suspend /
resume time we need to allocate a new object as the backup. Add a function
to facilitate copies between the two. Some data needs to be copied before
the migration context is ready for operation, so make sure we can
disable accelerated copies.
Signed-off-by: Thomas Hellström
---
drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 69 +
drivers/gpu/drm/i915/gem/i915_gem_ttm.h | 4 ++
2 files changed, 64 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 59ca53a3ef6a..df2dcbad1eb9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -432,6 +432,7 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
bool clear,
struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
struct sg_table *dst_st)
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
@@ -441,14 +442,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object
*bo,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct sg_table *src_st;
struct i915_request *rq;
- struct ttm_tt *ttm = bo->ttm;
+ struct ttm_tt *src_ttm = bo->ttm;
enum i915_cache_level src_level, dst_level;
int ret;
if (!i915->gt.migrate.context)
return -EINVAL;
- dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
+ dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
if (clear) {
if (bo->type == ttm_bo_type_kernel)
return -EINVAL;
@@ -465,10 +466,10 @@ static int i915_ttm_accel_move(struct ttm_buffer_object
*bo,
}
intel_engine_pm_put(i915->gt.migrate.context->engine);
} else {
- src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
+ src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
obj->ttm.cached_io_st;
- src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
+ src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context,
NULL, src_st->sgl, src_level,
@@ -488,11 +489,14 @@ static int i915_ttm_accel_move(struct ttm_buffer_object
*bo,
static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
struct ttm_resource *dst_mem,
- struct sg_table *dst_st)
+ struct ttm_tt *dst_ttm,
+ struct sg_table *dst_st,
+ bool allow_accel)
{
- int ret;
+ int ret = -EINVAL;
- ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_st);
+ if (allow_accel)
+ ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
if (ret) {
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct intel_memory_region *dst_reg, *src_reg;
@@ -507,7 +511,7 @@ static void __i915_ttm_move(struct ttm_buffer_object *bo,
bool clear,
GEM_BUG_ON(!dst_reg || !src_reg);
dst_iter = !cpu_maps_iomem(dst_mem) ?
- ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
+ ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
ttm_kmap_iter_iomap_init(&_dst_iter.io, _reg->iomap,
dst_st, dst_reg->region.start);
@@ -562,7 +566,7 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool
evict,
clear = !cpu_maps_iomem(bo->resource) && (!ttm ||
!ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)))
- __i915_ttm_move(bo, clear, dst_mem, dst_st);
+ __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
ttm_bo_move_sync_cleanup(bo, dst_mem);
i915_ttm_adjust_domains_after_move(obj);
@@ -973,3 +977,50 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
+
+/**
+ * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
+ * another
+ * @dst: The destination object
+ * @src: The source object
+ * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
+ * @intr: Whether to perform waits interruptible:
+ *
+ * Note: The caller is responsible for assuring that the underlying
+ * TTM objects are populated if needed and locked.
+ *
+ * Return: Zero on