We cannot write requests to objects without struct pages, so escape
early if the requests are bound to objects that lack them.

Signed-off-by: Jonathan Cavitt <jonathan.cav...@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d3a771afb083e..e680f78ce34d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3313,6 +3313,13 @@ eb_requests_create(struct i915_execbuffer *eb, struct 
dma_fence *in_fence,
        unsigned int i;
 
        for_each_batch_create_order(eb, i) {
+               /* Do not write requests to objects without struct pages. */
+               if (eb->batches[i]->vma &&
+                   !i915_gem_object_has_struct_page(eb->batches[i]->vma-obj)) {
+                       out_fence = ERR_PTR(-EINVAL);
+                       return out_fence;
+               }
+
                /* Allocate a request for this batch buffer nice and early. */
                eb->requests[i] = i915_request_create(eb_find_context(eb, i));
                if (IS_ERR(eb->requests[i])) {
-- 
2.25.1

Reply via email to