From: Venkata Sandeep Dhanalakota
As PCIe is non-coherent link, do not allow direct access to buffer
objects across the PCIe link for SVM case. Upon CPU accesses (mmap, pread),
migrate buffer object to host memory.
Cc: Joonas Lahtinen
Cc: Jon Bloomfield
Cc: Daniel Vetter
Cc: Sudeep Dutt
Cc: Niranjana Vishwanathapura
Signed-off-by: Venkata Sandeep Dhanalakota
---
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 10
drivers/gpu/drm/i915/gem/i915_gem_object.c | 29 +-
drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 +++
drivers/gpu/drm/i915/intel_memory_region.c | 4 ---
drivers/gpu/drm/i915/intel_memory_region.h | 4 +++
5 files changed, 40 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 879fff8adc48..fc1a11f0bec9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -14,6 +14,7 @@
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
+#include "i915_gem_lmem.h"
#include "i915_gem_object.h"
#include "i915_gem_mman.h"
#include "i915_trace.h"
@@ -295,6 +296,15 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (i915_gem_object_is_readonly(obj) && write)
return VM_FAULT_SIGBUS;
+ /* Implicitly migrate BO to SMEM if it is SVM mapped */
+ if (i915_gem_object_svm_mapped(obj) && i915_gem_object_is_lmem(obj)) {
+ u32 regions[] = { REGION_MAP(INTEL_MEMORY_SYSTEM, 0) };
+
+ ret = i915_gem_object_migrate_region(obj, regions, 1);
+ if (ret)
+ goto err;
+ }
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 025c26266801..003d81c171d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -517,12 +517,17 @@ __region_id(u32 region)
return INTEL_REGION_UNKNOWN;
}
+bool
+i915_gem_object_svm_mapped(struct drm_i915_gem_object *obj)
+{
+ return false;
+}
+
static int i915_gem_object_region_select(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object_param *args,
struct drm_file *file,
struct drm_i915_gem_object *obj)
{
- struct intel_context *ce = dev_priv->engine[BCS0]->kernel_context;
u32 __user *uregions = u64_to_user_ptr(args->data);
u32 uregions_copy[INTEL_REGION_UNKNOWN];
int i, ret;
@@ -542,16 +547,28 @@ static int i915_gem_object_region_select(struct
drm_i915_private *dev_priv,
++uregions;
}
+ ret = i915_gem_object_migrate_region(obj, uregions_copy,
+args->size);
+
+ return ret;
+}
+
+int i915_gem_object_migrate_region(struct drm_i915_gem_object *obj,
+ u32 *regions, int size)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct intel_context *ce = dev_priv->engine[BCS0]->kernel_context;
+ int i, ret;
+
mutex_lock(&dev_priv->drm.struct_mutex);
ret = i915_gem_object_prepare_move(obj);
if (ret) {
DRM_ERROR("Cannot set memory region, object in use\n");
- goto err;
+ goto err;
}
- for (i = 0; i < args->size; i++) {
- u32 region = uregions_copy[i];
- enum intel_region_id id = __region_id(region);
+ for (i = 0; i < size; i++) {
+ enum intel_region_id id = __region_id(regions[i]);
if (id == INTEL_REGION_UNKNOWN) {
ret = -EINVAL;
@@ -561,7 +578,7 @@ static int i915_gem_object_region_select(struct
drm_i915_private *dev_priv,
ret = i915_gem_object_migrate(obj, ce, id);
if (!ret) {
if (!i915_gem_object_has_pages(obj) &&
- MEMORY_TYPE_FROM_REGION(region) ==
+ MEMORY_TYPE_FROM_REGION(regions[i]) ==
INTEL_MEMORY_LOCAL) {
/*
* TODO: this should be part of get_pages(),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 87e6b6f18d91..6d8ca3f0ccf7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -47,6 +47,9 @@ int i915_gem_object_prepare_move(struct drm_i915_gem_object
*obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct intel_context *ce,
enum intel_region_id id);
+bool i915_gem_object_