This is primarily intended to simplify later patches that add various
backpointers to the structs, but in the meantime we can enjoy various
little syntactic conveniences.

Signed-off-by: Chris Wilson <[email protected]>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c     |  12 +-
 drivers/gpu/drm/i915/i915_drv.h            |  12 +-
 drivers/gpu/drm/i915/i915_gem.c            |   7 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |   6 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |   4 +-
 drivers/gpu/drm/i915/i915_irq.c            |  28 ++--
 drivers/gpu/drm/i915/intel_display.c       |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 132 ++++++++----------
 drivers/gpu/drm/i915/intel_mocs.c          |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 212 +++++++++++++----------------
 drivers/gpu/drm/i915/intel_uncore.c        |   4 +-
 12 files changed, 194 insertions(+), 229 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c 
b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..bd8b1aef4882 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -750,12 +750,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs 
*engine)
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN7(engine->dev))
+       if (!IS_GEN7(engine))
                return 0;
 
        switch (engine->id) {
        case RCS:
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine)) {
                        cmd_tables = hsw_render_ring_cmds;
                        cmd_table_count =
                                ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +764,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs 
*engine)
                        cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
                }
 
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine)) {
                        engine->reg_tables = hsw_render_reg_tables;
                        engine->reg_table_count = 
ARRAY_SIZE(hsw_render_reg_tables);
                } else {
@@ -780,7 +780,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs 
*engine)
                engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        case BCS:
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine)) {
                        cmd_tables = hsw_blt_ring_cmds;
                        cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
                } else {
@@ -788,7 +788,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs 
*engine)
                        cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
                }
 
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine)) {
                        engine->reg_tables = hsw_blt_reg_tables;
                        engine->reg_table_count = 
ARRAY_SIZE(hsw_blt_reg_tables);
                } else {
@@ -1035,7 +1035,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
        if (!engine->needs_cmd_parser)
                return false;
 
-       if (!USES_PPGTT(engine->dev))
+       if (!USES_PPGTT(engine))
                return false;
 
        return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c6ead62bc411..6349f9bf7c78 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2035,7 +2035,7 @@ __guc_to_i915(const struct intel_guc *guc)
                for_each_if (((id__) = (engine__)->id, \
                              intel_engine_initialized(engine__)))
 
-/* Iterator over subset of engines selected by mask */
+/* Iterate over initialised rings */
 #define for_each_engine_masked(engine__, ptr__, mask__) \
        for ((engine__) = &to_i915(ptr__)->engine[0]; \
             (engine__) < &to_i915(ptr__)->engine[I915_NUM_ENGINES]; \
@@ -2043,6 +2043,12 @@ __guc_to_i915(const struct intel_guc *guc)
                for_each_if (((mask__) & intel_engine_flag(engine__)) && \
                             intel_engine_initialized(engine__))
 
+static inline struct drm_i915_private *
+__engine_to_i915(const struct intel_engine_cs *engine)
+{
+       return __to_i915(engine->dev);
+}
+
 enum hdmi_force_audio {
        HDMI_AUDIO_OFF_DVI = -2,        /* no aux data for HDMI-DVI converter */
        HDMI_AUDIO_OFF,                 /* force turn off HDMI audio */
@@ -2517,6 +2523,10 @@ struct drm_i915_cmd_table {
                __p = __guc_to_i915((struct intel_guc *)p); \
        else if (__builtin_types_compatible_p(typeof(*p), struct 
drm_i915_gem_request)) \
                __p = __request_to_i915((struct drm_i915_gem_request *)(p)); \
+       else if (__builtin_types_compatible_p(typeof(*p), struct 
intel_engine_cs)) \
+               __p = __engine_to_i915((struct intel_engine_cs *)(p)); \
+       else if (__builtin_types_compatible_p(typeof(*p), struct 
intel_ringbuffer)) \
+               __p = __engine_to_i915(((struct intel_ringbuffer 
*)(p))->engine); \
        else \
                BUILD_BUG(); \
        __p; \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 237d5e884610..cf50e3386de4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1228,8 +1228,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        struct intel_rps_client *rps)
 {
        struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(req);
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & 
intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -2733,7 +2732,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
                         struct intel_context *ctx,
                         struct drm_i915_gem_request **req_out)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
@@ -2825,7 +2824,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        int err;
 
        if (ctx == NULL)
-               ctx = to_i915(engine->dev)->kernel_context;
+               ctx = to_i915(engine)->kernel_context;
        err = __i915_gem_request_alloc(engine, ctx, &req);
        return err ? ERR_PTR(err) : req;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index e53889498696..ad20a0250218 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -107,11 +107,11 @@ static size_t get_context_alignment(struct drm_device 
*dev)
 
 static int get_context_size(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
        u32 reg;
 
-       switch (INTEL_INFO(dev)->gen) {
+       switch (INTEL_INFO(dev_priv)->gen) {
        case 6:
                reg = I915_READ(CXT_SIZE);
                ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
@@ -638,7 +638,7 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct 
intel_context *to)
        if (engine->id != RCS)
                return true;
 
-       if (INTEL_INFO(engine->dev)->gen < 8)
+       if (INTEL_INFO(engine)->gen < 8)
                return true;
 
        return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9c582c815af2..fc5b19c71bfe 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -719,7 +719,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        struct i915_address_space *vm;
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
-       bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+       bool has_fenced_gpu_access = INTEL_INFO(engine)->gen < 4;
        int retry;
 
        i915_gem_retire_requests_ring(engine);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..9bff5e2cad43 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -958,7 +958,7 @@ static void i915_record_ring_state(struct drm_device *dev,
                                mmio = VEBOX_HWS_PGA_GEN7;
                                break;
                        }
-               } else if (IS_GEN6(engine->dev)) {
+               } else if (IS_GEN6(engine)) {
                        mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
                } else {
                        /* XXX: gen8 returns to sanity */
@@ -998,7 +998,7 @@ static void i915_gem_record_active_context(struct 
intel_engine_cs *engine,
                                           struct drm_i915_error_state *error,
                                           struct drm_i915_error_ring *ering)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        struct drm_i915_gem_object *obj;
 
        /* Currently render ring is the only HW context user */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 93da4feb3048..d4b1867b06d4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2884,11 +2884,10 @@ static struct intel_engine_cs *
 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
                                 u64 offset)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        struct intel_engine_cs *signaller;
 
-       if (INTEL_INFO(dev_priv)->gen >= 8) {
-               for_each_engine(signaller, dev_priv) {
+       if (INTEL_INFO(engine)->gen >= 8) {
+               for_each_engine(signaller, engine) {
                        if (engine == signaller)
                                continue;
 
@@ -2898,7 +2897,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs 
*engine, u32 ipehr,
        } else {
                u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
 
-               for_each_engine(signaller, dev_priv) {
+               for_each_engine(signaller, engine) {
                        if(engine == signaller)
                                continue;
 
@@ -2916,7 +2915,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs 
*engine, u32 ipehr,
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u32 cmd, ipehr, head;
        u64 offset = 0;
        int i, backwards;
@@ -2954,7 +2953,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
         * ringbuffer itself.
         */
        head = I915_READ_HEAD(engine) & HEAD_ADDR;
-       backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+       backwards = (INTEL_INFO(engine)->gen >= 8) ? 5 : 4;
 
        for (i = backwards; i; --i) {
                /*
@@ -2976,7 +2975,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
                return NULL;
 
        *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
-       if (INTEL_INFO(engine->dev)->gen >= 8) {
+       if (INTEL_INFO(engine)->gen >= 8) {
                offset = ioread32(engine->buffer->virtual_start + head + 12);
                offset <<= 32;
                offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2985,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
 
 static int semaphore_passed(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        struct intel_engine_cs *signaller;
        u32 seqno;
 
@@ -3069,8 +3068,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
 static enum intel_ring_hangcheck_action
 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        enum intel_ring_hangcheck_action ha;
        u32 tmp;
 
@@ -3078,7 +3076,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        if (ha != HANGCHECK_HUNG)
                return ha;
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(engine))
                return HANGCHECK_HUNG;
 
        /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3086,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
         */
        tmp = I915_READ_CTL(engine);
        if (tmp & RING_WAIT) {
-               i915_handle_error(dev, 0,
+               i915_handle_error(engine->dev, 0,
                                  "Kicking stuck wait on %s",
                                  engine->name);
                I915_WRITE_CTL(engine, tmp);
                return HANGCHECK_KICK;
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+       if (INTEL_INFO(engine)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
                switch (semaphore_passed(engine)) {
                default:
                        return HANGCHECK_HUNG;
                case 1:
-                       i915_handle_error(dev, 0,
+                       i915_handle_error(engine->dev, 0,
                                          "Kicking stuck semaphore on %s",
                                          engine->name);
                        I915_WRITE_CTL(engine, tmp);
@@ -3115,7 +3113,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 
 static unsigned kick_waiters(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = to_i915(engine->dev);
+       struct drm_i915_private *i915 = to_i915(engine);
        unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
 
        if (engine->hangcheck.user_interrupts == user_interrupts &&
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index 4cca155376be..55472ffd6be5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11265,7 +11265,7 @@ static bool use_mmio_flip(struct intel_engine_cs 
*engine,
        if (engine == NULL)
                return true;
 
-       if (INTEL_INFO(engine->dev)->gen < 5)
+       if (INTEL_INFO(engine)->gen < 5)
                return false;
 
        if (i915.use_mmio_flip < 0)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 1926bef2e612..ee195b22d68a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -266,19 +266,18 @@ int intel_sanitize_enable_execlists(struct drm_device 
*dev, int enable_execlists
 static void
 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-
-       if (IS_GEN8(dev) || IS_GEN9(dev))
+       if (IS_GEN8(engine) || IS_GEN9(engine))
                engine->idle_lite_restore_wa = ~0;
 
-       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-                                       IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-                                       (engine->id == VCS || engine->id == 
VCS2);
+       engine->disable_lite_restore_wa =
+               (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+                IS_BXT_REVID(engine, 0, BXT_REVID_A1)) &&
+               (engine->id == VCS || engine->id == VCS2);
 
        engine->ctx_desc_template = GEN8_CTX_VALID;
-       engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
-                                  GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       if (IS_GEN8(dev))
+       engine->ctx_desc_template |=
+               GEN8_CTX_ADDRESSING_MODE(engine) << 
GEN8_CTX_ADDRESSING_MODE_SHIFT;
+       if (IS_GEN8(engine))
                engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
        engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
@@ -359,8 +358,7 @@ static void execlists_elsp_write(struct 
drm_i915_gem_request *rq0,
 {
 
        struct intel_engine_cs *engine = rq0->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        uint64_t desc[2];
 
        if (rq1) {
@@ -396,9 +394,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, 
u32 *reg_state)
 
 static void execlists_update_context(struct drm_i915_gem_request *rq)
 {
-       struct intel_engine_cs *engine = rq->engine;
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
+       uint32_t *reg_state = rq->ctx->engine[rq->engine->id].lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = rq->tail;
 
@@ -407,14 +404,14 @@ static void execlists_update_context(struct 
drm_i915_gem_request *rq)
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
         */
-       if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+       if (ppgtt && !USES_FULL_48BIT_PPGTT(rq))
                execlists_update_context_pdps(ppgtt, reg_state);
 }
 
 static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
                                      struct drm_i915_gem_request *rq1)
 {
-       struct drm_i915_private *dev_priv = rq0->i915;
+       struct drm_i915_private *dev_priv = to_i915(rq0);
        unsigned int fw_domains = rq0->engine->fw_domains;
 
        execlists_update_context(rq0);
@@ -442,7 +439,7 @@ static void execlists_context_unqueue(struct 
intel_engine_cs *engine)
         * If irqs are not active generate a warning as batches that finish
         * without the irqs may get lost and a GPU Hang may occur.
         */
-       WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+       WARN_ON(!intel_irqs_enabled(to_i915(engine)));
 
        /* Try to read in pairs */
        list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -517,7 +514,7 @@ static u32
 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
                   u32 *context_id)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u32 status;
 
        read_pointer %= GEN8_CSB_ENTRIES;
@@ -543,7 +540,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned 
int read_pointer,
 static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u32 status_pointer;
        unsigned int read_pointer, write_pointer;
        u32 csb[GEN8_CSB_ENTRIES][2];
@@ -1040,7 +1037,7 @@ void intel_execlists_retire_requests(struct 
intel_engine_cs *engine)
 
 void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
 
        if (!intel_engine_initialized(engine))
@@ -1079,8 +1076,7 @@ int logical_ring_flush_all_caches(struct 
drm_i915_gem_request *req)
 static int intel_lr_context_do_pin(struct intel_context *ctx,
                                   struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = ctx->i915;
        struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
        void *vaddr;
@@ -1240,7 +1236,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct 
intel_engine_cs *engine,
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1329,7 +1325,7 @@ static int gen8_init_indirectctx_bb(struct 
intel_engine_cs *engine,
        wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-       if (IS_BROADWELL(engine->dev)) {
+       if (IS_BROADWELL(engine)) {
                int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
                if (rc < 0)
                        return rc;
@@ -1401,12 +1397,11 @@ static int gen9_init_indirectctx_bb(struct 
intel_engine_cs *engine,
                                    uint32_t *offset)
 {
        int ret;
-       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1))
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1427,12 +1422,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs 
*engine,
                               uint32_t *const batch,
                               uint32_t *offset)
 {
-       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
                wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
                wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
                wa_ctx_emit(batch, index,
@@ -1441,7 +1435,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs 
*engine,
        }
 
        /* WaClearTdlStateAckDirtyBits:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_B0)) {
                wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
 
                wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1460,8 +1454,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs 
*engine,
        }
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1))
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
        wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1511,9 +1505,9 @@ static int intel_init_workaround_bb(struct 
intel_engine_cs *engine)
        WARN_ON(engine->id != RCS);
 
        /* update this when WA for higher Gen are added */
-       if (INTEL_INFO(engine->dev)->gen > 9) {
+       if (INTEL_INFO(engine)->gen > 9) {
                DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-                         INTEL_INFO(engine->dev)->gen);
+                         INTEL_INFO(engine)->gen);
                return 0;
        }
 
@@ -1533,7 +1527,7 @@ static int intel_init_workaround_bb(struct 
intel_engine_cs *engine)
        batch = kmap_atomic(page);
        offset = 0;
 
-       if (INTEL_INFO(engine->dev)->gen == 8) {
+       if (INTEL_INFO(engine)->gen == 8) {
                ret = gen8_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
@@ -1547,7 +1541,7 @@ static int intel_init_workaround_bb(struct 
intel_engine_cs *engine)
                                          &offset);
                if (ret)
                        goto out;
-       } else if (INTEL_INFO(engine->dev)->gen == 9) {
+       } else if (INTEL_INFO(engine)->gen == 9) {
                ret = gen9_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
@@ -1573,7 +1567,7 @@ out:
 
 static void lrc_init_hws(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
                   (u32)engine->status_page.gfx_addr);
@@ -1582,8 +1576,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned int next_context_status_buffer_hw;
 
        lrc_init_hws(engine);
@@ -1630,8 +1623,7 @@ static int gen8_init_common_ring(struct intel_engine_cs 
*engine)
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
 
        ret = gen8_init_common_ring(engine);
@@ -1736,8 +1728,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
 
 static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1756,8 +1747,7 @@ static bool gen8_logical_ring_get_irq(struct 
intel_engine_cs *engine)
 
 static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1774,8 +1764,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
*request,
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *engine = ringbuf->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(request);
        uint32_t cmd;
        int ret;
 
@@ -1843,7 +1832,7 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (IS_GEN9(engine->dev))
+               if (IS_GEN9(request))
                        vf_flush_wa = true;
        }
 
@@ -2028,8 +2017,6 @@ static int gen8_init_rcs_context(struct 
drm_i915_gem_request *req)
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv;
-
        if (!intel_engine_initialized(engine))
                return;
 
@@ -2040,9 +2027,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*engine)
        if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
                tasklet_kill(&engine->irq_tasklet);
 
-       dev_priv = engine->dev->dev_private;
-
        if (engine->buffer) {
+               struct drm_i915_private *dev_priv = to_i915(engine);
                intel_logical_ring_stop(engine);
                WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
        }
@@ -2078,7 +2064,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs 
*engine)
        engine->emit_bb_start = gen8_emit_bb_start;
        engine->get_seqno = gen8_get_seqno;
        engine->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(engine->dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
                engine->irq_seqno_barrier = bxt_a_seqno_barrier;
                engine->set_seqno = bxt_a_set_seqno;
        }
@@ -2170,7 +2156,7 @@ logical_ring_setup(struct drm_device *dev, int id,
 static int
 logical_ring_init(struct intel_engine_cs *engine)
 {
-       struct intel_context *dctx = to_i915(engine->dev)->kernel_context;
+       struct intel_context *dctx = to_i915(engine)->kernel_context;
        int ret;
 
        ret = i915_cmd_parser_init_ring(engine);
@@ -2360,7 +2346,7 @@ cleanup_render_ring:
 }
 
 static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct intel_engine_cs *engine)
 {
        u32 rpcs = 0;
 
@@ -2368,7 +2354,7 @@ make_rpcs(struct drm_device *dev)
         * No explicit RPCS request is needed to ensure full
         * slice/subslice/EU enablement prior to Gen9.
        */
-       if (INTEL_INFO(dev)->gen < 9)
+       if (INTEL_INFO(engine)->gen < 9)
                return 0;
 
        /*
@@ -2377,24 +2363,24 @@ make_rpcs(struct drm_device *dev)
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev)->has_slice_pg) {
+       if (INTEL_INFO(engine)->has_slice_pg) {
                rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev)->slice_total <<
+               rpcs |= INTEL_INFO(engine)->slice_total <<
                        GEN8_RPCS_S_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev)->has_subslice_pg) {
+       if (INTEL_INFO(engine)->has_subslice_pg) {
                rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+               rpcs |= INTEL_INFO(engine)->subslice_per_slice <<
                        GEN8_RPCS_SS_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev)->has_eu_pg) {
-               rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+       if (INTEL_INFO(engine)->has_eu_pg) {
+               rpcs |= INTEL_INFO(engine)->eu_per_subslice <<
                        GEN8_RPCS_EU_MIN_SHIFT;
-               rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+               rpcs |= INTEL_INFO(engine)->eu_per_subslice <<
                        GEN8_RPCS_EU_MAX_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
@@ -2406,9 +2392,9 @@ static u32 intel_lr_indirect_ctx_offset(struct 
intel_engine_cs *engine)
 {
        u32 indirect_ctx_offset;
 
-       switch (INTEL_INFO(engine->dev)->gen) {
+       switch (INTEL_INFO(engine)->gen) {
        default:
-               MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+               MISSING_CASE(INTEL_INFO(engine)->gen);
                /* fall through */
        case 9:
                indirect_ctx_offset =
@@ -2429,8 +2415,7 @@ populate_lr_context(struct intel_context *ctx,
                    struct intel_engine_cs *engine,
                    struct intel_ringbuffer *ringbuf)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = ctx->i915;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        void *vaddr;
        u32 *reg_state;
@@ -2468,7 +2453,7 @@ populate_lr_context(struct intel_context *ctx,
                       RING_CONTEXT_CONTROL(engine),
                       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
                                          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-                                         (HAS_RESOURCE_STREAMER(dev) ?
+                                         (HAS_RESOURCE_STREAMER(engine) ?
                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
        ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
                       0);
@@ -2539,7 +2524,7 @@ populate_lr_context(struct intel_context *ctx,
        ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
                       0);
 
-       if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+       if (USES_FULL_48BIT_PPGTT(engine)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
@@ -2557,7 +2542,7 @@ populate_lr_context(struct intel_context *ctx,
        if (engine->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, 
GEN8_R_PWR_CLK_STATE,
-                              make_rpcs(dev));
+                              make_rpcs(engine));
        }
 
        i915_gem_object_unpin_map(ctx_obj);
@@ -2614,11 +2599,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs 
*engine)
 {
        int ret = 0;
 
-       WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+       WARN_ON(INTEL_INFO(engine)->gen < 8);
 
        switch (engine->id) {
        case RCS:
-               if (INTEL_INFO(engine->dev)->gen >= 9)
+               if (INTEL_INFO(engine)->gen >= 9)
                        ret = GEN9_LR_CONTEXT_RENDER_SIZE;
                else
                        ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2651,7 +2636,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs 
*engine)
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                                    struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
        struct intel_ringbuffer *ringbuf;
@@ -2665,7 +2649,7 @@ int intel_lr_context_deferred_alloc(struct intel_context 
*ctx,
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
-       ctx_obj = i915_gem_alloc_object(dev, context_size);
+       ctx_obj = i915_gem_alloc_object(engine->dev, context_size);
        if (!ctx_obj) {
                DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
                return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index b7b5fefa0cec..9958a745e0cd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, 
int index)
  */
 int intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine);
        struct drm_i915_mocs_table table;
        unsigned int index;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0c8c2d40987f..e43507244527 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,7 +61,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf)
 
 bool intel_engine_stopped(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
 }
 
@@ -431,19 +431,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 static void ring_write_tail(struct intel_engine_cs *engine,
                            u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        I915_WRITE_TAIL(engine, value);
 }
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u64 acthd;
 
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_INFO(engine)->gen >= 8)
                acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
                                         RING_ACTHD_UDW(engine->mmio_base));
-       else if (INTEL_INFO(engine->dev)->gen >= 4)
+       else if (INTEL_INFO(engine)->gen >= 4)
                acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
@@ -453,25 +453,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs 
*engine)
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u32 addr;
 
        addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(engine->dev)->gen >= 4)
+       if (INTEL_INFO(engine)->gen >= 4)
                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
        I915_WRITE(HWS_PGA, addr);
 }
 
 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        i915_reg_t mmio;
 
        /* The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN7(dev)) {
+       if (IS_GEN7(engine)) {
                switch (engine->id) {
                case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
@@ -491,7 +490,7 @@ static void intel_ring_setup_status_page(struct 
intel_engine_cs *engine)
                        mmio = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(engine->dev)) {
+       } else if (IS_GEN6(engine)) {
                mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                /* XXX: gen8 returns to sanity */
@@ -508,7 +507,7 @@ static void intel_ring_setup_status_page(struct 
intel_engine_cs *engine)
         * arises: do we still need this and if so how should we go about
         * invalidating the TLB?
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+       if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8) {
                i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
                /* ring should be idle before issuing a sync flush*/
@@ -526,9 +525,9 @@ static void intel_ring_setup_status_page(struct 
intel_engine_cs *engine)
 
 static bool stop_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(engine)) {
                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
                if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
                        DRM_ERROR("%s : timed out trying to stop ring\n",
@@ -546,7 +545,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
        I915_WRITE_HEAD(engine, 0);
        engine->write_tail(engine, 0);
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(engine)) {
                (void)I915_READ_CTL(engine);
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
@@ -561,8 +560,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs 
*engine)
 
 static int init_ring_common(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret = 0;
@@ -592,7 +590,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev))
+       if (I915_NEED_GFX_HWS(dev_priv))
                intel_ring_setup_status_page(engine);
        else
                ring_setup_phys_status_page(engine);
@@ -649,12 +647,10 @@ out:
 void
 intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-
        if (engine->scratch.obj == NULL)
                return;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
+       if (INTEL_INFO(engine)->gen >= 5) {
                kunmap(sg_page(engine->scratch.obj->pages->sgl));
                i915_gem_object_ggtt_unpin(engine->scratch.obj);
        }
@@ -799,7 +795,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
                                 i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        struct i915_workarounds *wa = &dev_priv->workarounds;
        const uint32_t index = wa->hw_whitelist_count[engine->id];
 
@@ -815,8 +811,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs 
*engine,
 
 static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -867,9 +862,8 @@ static int gen8_init_workarounds(struct intel_engine_cs 
*engine)
 
 static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -889,16 +883,15 @@ static int bdw_init_workarounds(struct intel_engine_cs 
*engine)
                          /* WaForceContextSaveRestoreNonCoherent:bdw */
                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
                          /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                         (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+                         (IS_BDW_GT3(engine) ? HDC_FENCE_DEST_SLM_DISABLE : 
0));
 
        return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -915,8 +908,7 @@ static int chv_init_workarounds(struct intel_engine_cs 
*engine)
 
 static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        uint32_t tmp;
        int ret;
 
@@ -939,14 +931,14 @@ static int gen9_init_workarounds(struct intel_engine_cs 
*engine)
                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
        /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1))
                WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                                  GEN9_DG_MIRROR_FIX_ENABLE);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
                /*
@@ -957,7 +949,8 @@ static int gen9_init_workarounds(struct intel_engine_cs 
*engine)
        }
 
        /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
+       if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER) ||
+           IS_BROXTON(engine))
                WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
                                  GEN9_ENABLE_YV12_BUGFIX);
 
@@ -971,20 +964,20 @@ static int gen9_init_workarounds(struct intel_engine_cs 
*engine)
                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 
        /* WaDisableMaskBasedCammingInRCC:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_C0) ||
+           IS_BXT_REVID(engine, 0, BXT_REVID_A1))
                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
                                  PIXEL_MASK_CAMMING_DISABLE);
 
        /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
        tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-           IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
+       if (IS_SKL_REVID(engine, SKL_REVID_F0, REVID_FOREVER) ||
+           IS_BXT_REVID(engine, BXT_REVID_B0, REVID_FOREVER))
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
 
        /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-       if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+       if (IS_SKYLAKE(engine) || IS_BXT_REVID(engine, 0, BXT_REVID_B0))
                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
@@ -1010,8 +1003,7 @@ static int gen9_init_workarounds(struct intel_engine_cs 
*engine)
 
 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
@@ -1052,9 +1044,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs 
*engine)
 
 static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1065,12 +1056,12 @@ static int skl_init_workarounds(struct intel_engine_cs 
*engine)
         * until D0 which is the default case so this is equivalent to
         * !WaDisablePerCtxtPreemptionGranularityControl:skl
         */
-       if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(engine, SKL_REVID_E0, REVID_FOREVER)) {
                I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
        }
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_D0)) {
                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
                I915_WRITE(FF_SLICE_CS_CHICKEN2,
                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1079,24 +1070,24 @@ static int skl_init_workarounds(struct intel_engine_cs 
*engine)
        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
         * involving this register should also be added to WA batch as required.
         */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_E0))
                /* WaDisableLSQCROPERFforOCL:skl */
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
 
        /* WaEnableGapsTsvCreditFix:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER)) {
                I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
                                           GEN9_GAPS_TSV_CREDIT_DISABLE));
        }
 
        /* WaDisablePowerCompilerClockGating:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+       if (IS_SKL_REVID(engine, SKL_REVID_B0, SKL_REVID_B0))
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
        /* This is tied to WaForceContextSaveRestoreNonCoherent */
-       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(engine, 0, REVID_FOREVER)) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
                 * is a workaround for a possible hang in the unlikely event
@@ -1112,13 +1103,13 @@ static int skl_init_workarounds(struct intel_engine_cs 
*engine)
        }
 
        /* WaBarrierPerformanceFixDisable:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+       if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_D0))
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE |
                                  HDC_BARRIER_PERFORMANCE_DISABLE);
 
        /* WaDisableSbeCacheDispatchPortSharing:skl */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+       if (IS_SKL_REVID(engine, 0, SKL_REVID_F0))
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1133,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs 
*engine)
 
 static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1143,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs 
*engine)
 
        /* WaStoreMultiplePTEenable:bxt */
        /* This is a requirement according to Hardware specification */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_A1))
                I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
        /* WaSetClckGatingDisableMedia:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
                I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
                                            ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
        }
@@ -1157,7 +1147,7 @@ static int bxt_init_workarounds(struct intel_engine_cs 
*engine)
                          STALL_DOP_GATING_DISABLE);
 
        /* WaDisableSbeCacheDispatchPortSharing:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_B0)) {
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1167,7 +1157,7 @@ static int bxt_init_workarounds(struct intel_engine_cs 
*engine)
        /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
        /* WaDisableLSQCROPERFforOCL:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) {
                ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
                if (ret)
                        return ret;
@@ -1182,24 +1172,23 @@ static int bxt_init_workarounds(struct intel_engine_cs 
*engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        WARN_ON(engine->id != RCS);
 
        dev_priv->workarounds.count = 0;
        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-       if (IS_BROADWELL(dev))
+       if (IS_BROADWELL(engine))
                return bdw_init_workarounds(engine);
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(engine))
                return chv_init_workarounds(engine);
 
-       if (IS_SKYLAKE(dev))
+       if (IS_SKYLAKE(engine))
                return skl_init_workarounds(engine);
 
-       if (IS_BROXTON(dev))
+       if (IS_BROXTON(engine))
                return bxt_init_workarounds(engine);
 
        return 0;
@@ -1207,14 +1196,13 @@ int init_workarounds_ring(struct intel_engine_cs 
*engine)
 
 static int init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret = init_ring_common(engine);
        if (ret)
                return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-       if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+       if (INTEL_INFO(engine)->gen >= 4 && INTEL_INFO(engine)->gen < 7)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
        /* We need to disable the AsyncFlip performance optimisations in order
@@ -1223,22 +1211,22 @@ static int init_render_ring(struct intel_engine_cs 
*engine)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8)
                I915_WRITE(MI_MODE, 
_MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
        /* WaEnableFlushTlbInvalidationMode:snb */
-       if (INTEL_INFO(dev)->gen == 6)
+       if (INTEL_INFO(engine)->gen == 6)
                I915_WRITE(GFX_MODE,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-       if (IS_GEN7(dev))
+       if (IS_GEN7(engine))
                I915_WRITE(GFX_MODE_GEN7,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(engine)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
                 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1248,19 +1236,18 @@ static int init_render_ring(struct intel_engine_cs 
*engine)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_DPF(dev))
-               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+       if (HAS_L3_DPF(engine))
+               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine));
 
        return init_workarounds_ring(engine);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        if (dev_priv->semaphore_obj) {
                i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1423,8 +1410,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
                                              u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       return dev_priv->last_seqno < seqno;
+       return to_i915(dev)->last_seqno < seqno;
 }
 
 /**
@@ -1579,7 +1565,7 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
         * batch i.e. much more frequent than a delay when waiting for the
         * interrupt (with the same net latency).
         */
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
 }
 
@@ -1610,8 +1596,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 
seqno)
 static bool
 gen5_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1628,8 +1613,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen5_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1641,8 +1625,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i9xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1662,8 +1645,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i9xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1678,8 +1660,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 i8xx_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1699,8 +1680,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine)
 static void
 i8xx_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1753,8 +1733,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 static bool
 gen6_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1762,10 +1741,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
+               if (HAS_L3_DPF(engine) && engine->id == RCS)
                        I915_WRITE_IMR(engine,
                                       ~(engine->irq_enable_mask |
-                                        GT_PARITY_ERROR(dev)));
+                                        GT_PARITY_ERROR(engine)));
                else
                        I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
                gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1778,14 +1757,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen6_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
-                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+               if (HAS_L3_DPF(engine) && engine->id == RCS)
+                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine));
                else
                        I915_WRITE_IMR(engine, ~0);
                gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
@@ -1796,8 +1774,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine)
 static bool
 hsw_vebox_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1816,8 +1793,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine)
 static void
 hsw_vebox_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1831,8 +1807,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine)
 static bool
 gen8_ring_get_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        if (WARN_ON(!intel_irqs_enabled(dev_priv)))
@@ -1840,7 +1815,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
+               if (HAS_L3_DPF(engine) && engine->id == RCS) {
                        I915_WRITE_IMR(engine,
                                       ~(engine->irq_enable_mask |
                                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
@@ -1857,13 +1832,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine)
 static void
 gen8_ring_put_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
+               if (HAS_L3_DPF(engine) && engine->id == RCS) {
                        I915_WRITE_IMR(engine,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
                } else {
@@ -1985,7 +1959,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        if (!dev_priv->status_page_dmah)
                return;
@@ -2027,7 +2001,7 @@ static int init_status_page(struct intel_engine_cs 
*engine)
                        goto err_unref;
 
                flags = 0;
-               if (!HAS_LLC(engine->dev))
+               if (!HAS_LLC(engine))
                        /* On g33, we cannot place HWS above 256MiB, so
                         * restrict its pinning to the low mappable arena.
                         * Though this restriction is not documented for
@@ -2061,7 +2035,7 @@ err_unref:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
@@ -2188,7 +2162,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs 
*engine, int size)
         * of the buffer.
         */
        ring->effective_size = size;
-       if (IS_I830(engine->dev) || IS_845G(engine->dev))
+       if (IS_I830(engine) || IS_845G(engine))
                ring->effective_size -= 2 * CACHELINE_BYTES;
 
        ring->last_retired_head = -1;
@@ -2277,11 +2251,11 @@ void intel_cleanup_engine(struct intel_engine_cs 
*engine)
        if (!intel_engine_initialized(engine))
                return;
 
-       dev_priv = to_i915(engine->dev);
+       dev_priv = to_i915(engine);
 
        if (engine->buffer) {
                intel_stop_engine(engine);
-               WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & 
MODE_IDLE) == 0);
+               WARN_ON(!IS_GEN2(engine) && (I915_READ_MODE(engine) & 
MODE_IDLE) == 0);
 
                intel_unpin_ringbuffer_obj(engine->buffer);
                intel_ringbuffer_free(engine->buffer);
@@ -2291,7 +2265,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(engine->dev)) {
+       if (I915_NEED_GFX_HWS(engine)) {
                cleanup_status_page(engine);
        } else {
                WARN_ON(engine->id != RCS);
@@ -2525,7 +2499,7 @@ int intel_ring_cacheline_align(struct 
drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        /* Our semaphore implementation is strictly monotonic (i.e. we proceed
         * so long as the semaphore value in the register/page is greater
@@ -2561,7 +2535,7 @@ void intel_ring_init_seqno(struct intel_engine_cs 
*engine, u32 seqno)
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                                     u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        /* Every tail move must follow the sequence below */
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c 
b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..655dd5e0536c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1663,8 +1663,8 @@ static int wait_for_register_fw(struct drm_i915_private 
*dev_priv,
 
 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(engine);
        int ret;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
@@ -1682,7 +1682,7 @@ static int gen8_request_engine_reset(struct 
intel_engine_cs *engine)
 
 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(engine);
 
        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-- 
2.8.0.rc3

_______________________________________________
Intel-gfx mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to