The obj->dirty bit is a companion to the obj->active bits that were
moved to the obj->flags bitmask. Since we also update this bit inside
the i915_vma_move_to_active() hotpath, we can aide gcc by also moving
the obj->dirty bit to obj->flags bitmask.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 20 ++++++++++----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  6 +++---
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  6 +++---
 7 files changed, 40 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 75ad7b9c243c..086053fa2820 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -156,7 +156,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object 
*obj)
                   i915_gem_active_get_seqno(&obj->last_write,
                                             &obj->base.dev->struct_mutex),
                   i915_cache_level_str(to_i915(obj->base.dev), 
obj->cache_level),
-                  obj->dirty ? " dirty" : "",
+                  i915_gem_object_is_dirty(obj) ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
                seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 30ae14870c47..5613f2886ad3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2185,7 +2185,8 @@ struct drm_i915_gem_object {
         * This is set if the object has been written to since last bound
         * to the GTT
         */
-       unsigned int dirty:1;
+#define I915_BO_DIRTY_SHIFT (I915_BO_ACTIVE_REF_SHIFT + 1)
+#define I915_BO_DIRTY_BIT BIT(I915_BO_DIRTY_SHIFT)
 
        /**
         * Advice: are the backing pages purgeable?
@@ -2371,6 +2372,25 @@ i915_gem_object_clear_active_reference(struct 
drm_i915_gem_object *obj)
 
 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
 
+static inline bool
+i915_gem_object_is_dirty(const struct drm_i915_gem_object *obj)
+{
+       return obj->flags & I915_BO_DIRTY_BIT;
+}
+
+static inline void
+i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
+{
+       GEM_BUG_ON(obj->pages_pin_count == 0);
+       obj->flags |= I915_BO_DIRTY_BIT;
+}
+
+static inline void
+i915_gem_object_clear_dirty(struct drm_i915_gem_object *obj)
+{
+       obj->flags &= ~I915_BO_DIRTY_BIT;
+}
+
 static inline unsigned int
 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e601b74c19f9..7b8abda541e6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -234,9 +234,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object 
*obj)
        }
 
        if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
 
-       if (obj->dirty) {
+       if (i915_gem_object_is_dirty(obj)) {
                struct address_space *mapping = obj->base.filp->f_mapping;
                char *vaddr = obj->phys_handle->vaddr;
                int i;
@@ -260,7 +260,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object 
*obj)
                        put_page(page);
                        vaddr += PAGE_SIZE;
                }
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
        }
 
        sg_free_table(obj->pages);
@@ -703,7 +703,7 @@ int i915_gem_obj_prepare_shmem_write(struct 
drm_i915_gem_object *obj,
                obj->cache_dirty = true;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = 1;
+       i915_gem_object_set_dirty(obj);
        /* return with the pages pinned */
        return 0;
 
@@ -1156,7 +1156,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                goto out_unpin;
 
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       obj->dirty = true;
+       i915_gem_object_set_dirty(obj);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
@@ -2099,10 +2099,10 @@ i915_gem_object_put_pages_gtt(struct 
drm_i915_gem_object *obj)
                i915_gem_object_save_bit_17_swizzle(obj);
 
        if (obj->madv == I915_MADV_DONTNEED)
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
 
        for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+               if (i915_gem_object_is_dirty(obj))
                        set_page_dirty(page);
 
                if (obj->madv == I915_MADV_WILLNEED)
@@ -2110,7 +2110,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object 
*obj)
 
                put_page(page);
        }
-       obj->dirty = 0;
+       i915_gem_object_clear_dirty(obj);
 
        sg_free_table(obj->pages);
        kfree(obj->pages);
@@ -3272,7 +3272,7 @@ i915_gem_object_set_to_gtt_domain(struct 
drm_i915_gem_object *obj, bool write)
        if (write) {
                obj->base.read_domains = I915_GEM_DOMAIN_GTT;
                obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-               obj->dirty = 1;
+               i915_gem_object_set_dirty(obj);
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -4751,7 +4751,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        i915_gem_object_pin_pages(obj);
        sg = obj->pages;
        bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-       obj->dirty = 1;         /* Backing store is now out of date */
+       i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
        i915_gem_object_unpin_pages(obj);
 
        if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a5f43960cb6..125fb38eff40 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1275,14 +1275,13 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       obj->dirty = 1; /* be paranoid  */
-
        /* The order in which we add operations to the retirement queue is
         * vital here: mark_active adds to the start of the callback list,
         * such that subsequent callbacks are called first. Therefore we
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
+       i915_gem_object_set_dirty(obj); /* be paranoid */
        i915_gem_object_set_active(obj, idx);
        i915_gem_active_set(&obj->last_read[idx], req);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
b/drivers/gpu/drm/i915/i915_gem_userptr.c
index be54825ef3e8..581df2316ca5 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -679,18 +679,18 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object 
*obj)
        __i915_gem_userptr_set_active(obj, false);
 
        if (obj->madv != I915_MADV_WILLNEED)
-               obj->dirty = 0;
+               i915_gem_object_clear_dirty(obj);
 
        i915_gem_gtt_finish_object(obj);
 
        for_each_sgt_page(page, sgt_iter, obj->pages) {
-               if (obj->dirty)
+               if (i915_gem_object_is_dirty(obj))
                        set_page_dirty(page);
 
                mark_page_accessed(page);
                put_page(page);
        }
-       obj->dirty = 0;
+       i915_gem_object_clear_dirty(obj);
 
        sg_free_table(obj->pages);
        kfree(obj->pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index dbf1dcbd4692..75136ae98d9c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -843,7 +843,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->write_domain = obj->base.write_domain;
        err->fence_reg = vma->fence ? vma->fence->id : -1;
        err->tiling = i915_gem_object_get_tiling(obj);
-       err->dirty = obj->dirty;
+       err->dirty = i915_gem_object_is_dirty(obj);
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
        err->cache_level = obj->cache_level;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6b49df4316f4..e3ad05da2f77 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -795,7 +795,7 @@ static int intel_lr_context_pin(struct i915_gem_context 
*ctx,
        lrc_reg_state[CTX_RING_BUFFER_START+1] =
                i915_ggtt_offset(ce->ring->vma);
        ce->lrc_reg_state = lrc_reg_state;
-       ce->state->obj->dirty = true;
+       i915_gem_object_set_dirty(ce->state->obj);
 
        /* Invalidate GuC TLB. */
        if (i915.enable_guc_submission) {
@@ -1969,7 +1969,7 @@ populate_lr_context(struct i915_gem_context *ctx,
                DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
                return ret;
        }
-       ctx_obj->dirty = true;
+       i915_gem_object_set_dirty(ctx_obj);
 
        /* The second page of the context object contains some fields which must
         * be set up prior to the first execution. */
@@ -2197,7 +2197,7 @@ void intel_lr_context_reset(struct drm_i915_private 
*dev_priv,
                reg_state[CTX_RING_HEAD+1] = 0;
                reg_state[CTX_RING_TAIL+1] = 0;
 
-               ce->state->obj->dirty = true;
+               i915_gem_object_set_dirty(ce->state->obj);
                i915_gem_object_unpin_map(ce->state->obj);
 
                ce->ring->head = 0;
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to