By using the same address for storing the HWS on every platform, we can
remove the platform specific vfuncs and reduce the get-seqno routine to
a single read of a cached memory location.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  6 +--
 drivers/gpu/drm/i915/i915_drv.h          |  4 +-
 drivers/gpu/drm/i915/i915_gpu_error.c    |  2 +-
 drivers/gpu/drm/i915/i915_irq.c          |  4 +-
 drivers/gpu/drm/i915/i915_trace.h        |  2 +-
 drivers/gpu/drm/i915/intel_breadcrumbs.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c         | 46 ++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.c  | 86 ++++++++------------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h  |  7 +--
 9 files changed, 41 insertions(+), 118 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 99a79a10ce75..1ced3738281c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -600,7 +600,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void 
*data)
                                           ring->name,
                                           
i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          ring->get_seqno(ring),
+                                          intel_ring_get_seqno(ring),
                                           
i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any 
ring\n");
@@ -731,7 +731,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *ring)
 {
        seq_printf(m, "Current sequence (%s): %x\n",
-                  ring->name, ring->get_seqno(ring));
+                  ring->name, intel_ring_get_seqno(ring));
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -1340,7 +1340,7 @@ static int i915_hangcheck_info(struct seq_file *m, void 
*unused)
        intel_runtime_pm_get(dev_priv);
 
        for_each_ring(ring, dev_priv, i) {
-               seqno[i] = ring->get_seqno(ring);
+               seqno[i] = intel_ring_get_seqno(ring);
                acthd[i] = intel_ring_get_active_head(ring);
        }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8eb0871cd7af..410939c3f05b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2987,13 +2987,13 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 
 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(req->ring->get_seqno(req->ring),
+       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
                                 req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(req->ring->get_seqno(req->ring),
+       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
                                 req->seqno);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 7a427240c813..b4e4796fac1a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -904,7 +904,7 @@ static void i915_record_ring_state(struct drm_device *dev,
        ering->waiting = 
READ_ONCE(dev_priv->breadcrumbs.engine[ring->id].first);
        ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
        ering->acthd = intel_ring_get_active_head(ring);
-       ering->seqno = ring->get_seqno(ring);
+       ering->seqno = intel_ring_get_seqno(ring);
        ering->start = I915_READ_START(ring);
        ering->head = I915_READ_HEAD(ring);
        ering->tail = I915_READ_TAIL(ring);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 43078e09e1f0..86f98d8eacc8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2871,7 +2871,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
        if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
                return -1;
 
-       if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+       if (i915_seqno_passed(intel_ring_get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -2975,7 +2975,7 @@ static void i915_hangcheck_elapsed(struct work_struct 
*work)
                semaphore_clear_deadlocks(dev_priv);
 
                acthd = intel_ring_get_active_head(ring);
-               seqno = ring->get_seqno(ring);
+               seqno = intel_ring_get_seqno(ring);
 
                if (ring->hangcheck.seqno == seqno) {
                        if (ring_idle(ring, seqno)) {
diff --git a/drivers/gpu/drm/i915/i915_trace.h 
b/drivers/gpu/drm/i915/i915_trace.h
index cfb5f78a6e84..efca75bcace3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -573,7 +573,7 @@ TRACE_EVENT(i915_gem_request_notify,
            TP_fast_assign(
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->seqno = ring->get_seqno(ring);
+                          __entry->seqno = intel_ring_get_seqno(ring);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c 
b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index b1b99068442c..4a22c6db39b4 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -110,7 +110,7 @@ static int intel_breadcrumbs_irq(void *data)
                        if (engine->seqno_barrier)
                                engine->seqno_barrier(engine);
 
-                       seqno = engine->get_seqno(engine);
+                       seqno = intel_ring_get_seqno(engine);
                        do {
                                struct rb_node *next;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fef97dc8e02d..c457dd035900 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1755,16 +1755,6 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
        return 0;
 }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *ring)
-{
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
-{
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
-}
-
 static void bxt_seqno_barrier(struct intel_engine_cs *ring)
 {
        /*
@@ -1780,14 +1770,6 @@ static void bxt_seqno_barrier(struct intel_engine_cs 
*ring)
        intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
-{
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
-
-       /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1812,7 +1794,7 @@ static int gen8_emit_request(struct drm_i915_gem_request 
*request)
                                (ring->status_page.gfx_addr +
                                (I915_GEM_HWS_INDEX << 
MI_STORE_DWORD_INDEX_SHIFT)));
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ringbuf, request->seqno);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance_and_submit(request);
@@ -1972,12 +1954,8 @@ static int logical_render_ring_init(struct drm_device 
*dev)
                ring->init_hw = gen8_init_render_ring;
        ring->init_context = gen8_init_rcs_context;
        ring->cleanup = intel_fini_pipe_control;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
                ring->seqno_barrier = bxt_seqno_barrier;
-               ring->set_seqno = bxt_a_set_seqno;
-       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush_render;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2023,12 +2001,8 @@ static int logical_bsd_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
                ring->seqno_barrier = bxt_seqno_barrier;
-               ring->set_seqno = bxt_a_set_seqno;
-       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2052,8 +2026,6 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2077,12 +2049,8 @@ static int logical_blt_ring_init(struct drm_device *dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
                ring->seqno_barrier = bxt_seqno_barrier;
-               ring->set_seqno = bxt_a_set_seqno;
-       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
@@ -2106,12 +2074,8 @@ static int logical_vebox_ring_init(struct drm_device 
*dev)
                GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
        ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
                ring->seqno_barrier = bxt_seqno_barrier;
-               ring->set_seqno = bxt_a_set_seqno;
-       }
        ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush;
        ring->irq_get = gen8_logical_ring_get_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 98de72177d12..3d59dd555e64 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1232,19 +1232,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
                                           PIPE_CONTROL_FLUSH_ENABLE);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
@@ -1273,18 +1271,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->id));
                intel_ring_emit(signaller, 0);
@@ -1315,11 +1311,9 @@ static int gen6_signal(struct drm_i915_gem_request 
*signaller_req,
                i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1354,7 +1348,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+       intel_ring_emit(ring, req->seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
@@ -1456,7 +1450,9 @@ static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *ring = req->ring;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 addr = req->ring->status_page.gfx_addr +
+               (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       u32 scratch_addr = addr;
        int ret;
 
        /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1471,11 +1467,12 @@ pc_render_add_request(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(ring, ring->scratch.gtt_offset | 
PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+       intel_ring_emit(ring,
+                       GFX_OP_PIPE_CONTROL(4) |
+                       PIPE_CONTROL_QW_WRITE |
+                       PIPE_CONTROL_WRITE_FLUSH);
+       intel_ring_emit(ring, addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, req->seqno);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1489,12 +1486,12 @@ pc_render_add_request(struct drm_i915_gem_request *req)
        scratch_addr += 2 * CACHELINE_BYTES;
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
 
-       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+       intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) |
+                       PIPE_CONTROL_QW_WRITE |
                        PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(ring, ring->scratch.gtt_offset | 
PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+       intel_ring_emit(ring, addr | PIPE_CONTROL_GLOBAL_GTT);
+       intel_ring_emit(ring, req->seqno);
        intel_ring_emit(ring, 0);
        __intel_ring_advance(ring);
 
@@ -1511,30 +1508,6 @@ gen6_seqno_barrier(struct intel_engine_cs *ring)
        POSTING_READ(RING_ACTHD(ring->mmio_base));
 }
 
-static u32
-ring_get_seqno(struct intel_engine_cs *ring)
-{
-       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
-{
-       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *ring)
-{
-       return ring->scratch.cpu_page[0];
-}
-
-static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
-{
-       ring->scratch.cpu_page[0] = seqno;
-}
-
 static bool
 gen5_ring_get_irq(struct intel_engine_cs *ring)
 {
@@ -1670,7 +1643,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
+       intel_ring_emit(ring, req->seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
@@ -2462,7 +2435,10 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, 
u32 seqno)
                        I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
        }
 
-       ring->set_seqno(ring, seqno);
+       intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+       if (ring->seqno_barrier)
+               ring->seqno_barrier(ring);
+
        ring->hangcheck.seqno = seqno;
 }
 
@@ -2700,8 +2676,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                ring->irq_put = gen8_ring_put_irq;
                ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
                ring->seqno_barrier = gen6_seqno_barrier;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
                if (i915_semaphore_is_enabled(dev)) {
                        WARN_ON(!dev_priv->semaphore_obj);
                        ring->semaphore.sync_to = gen8_ring_sync;
@@ -2718,8 +2692,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                ring->irq_put = gen6_ring_put_irq;
                ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
                ring->seqno_barrier = gen6_seqno_barrier;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
                if (i915_semaphore_is_enabled(dev)) {
                        ring->semaphore.sync_to = gen6_ring_sync;
                        ring->semaphore.signal = gen6_signal;
@@ -2744,8 +2716,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
                ring->flush = gen4_render_ring_flush;
-               ring->get_seqno = pc_render_get_seqno;
-               ring->set_seqno = pc_render_set_seqno;
                ring->irq_get = gen5_ring_get_irq;
                ring->irq_put = gen5_ring_put_irq;
                ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
@@ -2756,8 +2726,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                        ring->flush = gen2_render_ring_flush;
                else
                        ring->flush = gen4_render_ring_flush;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
                if (IS_GEN2(dev)) {
                        ring->irq_get = i8xx_ring_get_irq;
                        ring->irq_put = i8xx_ring_put_irq;
@@ -2833,8 +2801,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
                ring->flush = gen6_bsd_ring_flush;
                ring->add_request = gen6_add_request;
                ring->seqno_barrier = gen6_seqno_barrier;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
                if (INTEL_INFO(dev)->gen >= 8) {
                        ring->irq_enable_mask =
                                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
@@ -2872,8 +2838,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
                ring->mmio_base = BSD_RING_BASE;
                ring->flush = bsd_ring_flush;
                ring->add_request = i9xx_add_request;
-               ring->get_seqno = ring_get_seqno;
-               ring->set_seqno = ring_set_seqno;
                if (IS_GEN5(dev)) {
                        ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
                        ring->irq_get = gen5_ring_get_irq;
@@ -2906,8 +2870,6 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
        ring->flush = gen6_bsd_ring_flush;
        ring->add_request = gen6_add_request;
        ring->seqno_barrier = gen6_seqno_barrier;
-       ring->get_seqno = ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
        ring->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
        ring->irq_get = gen8_ring_get_irq;
@@ -2937,8 +2899,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
        ring->flush = gen6_ring_flush;
        ring->add_request = gen6_add_request;
        ring->seqno_barrier = gen6_seqno_barrier;
-       ring->get_seqno = ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
        if (INTEL_INFO(dev)->gen >= 8) {
                ring->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
@@ -2995,8 +2955,6 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        ring->flush = gen6_ring_flush;
        ring->add_request = gen6_add_request;
        ring->seqno_barrier = gen6_seqno_barrier;
-       ring->get_seqno = ring_get_seqno;
-       ring->set_seqno = ring_set_seqno;
 
        if (INTEL_INFO(dev)->gen >= 8) {
                ring->irq_enable_mask =
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index af66119ecca9..74b9df21f062 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -194,9 +194,6 @@ struct  intel_engine_cs {
         * monotonic, even if not coherent.
         */
        void            (*seqno_barrier)(struct intel_engine_cs *ring);
-       u32             (*get_seqno)(struct intel_engine_cs *ring);
-       void            (*set_seqno)(struct intel_engine_cs *ring,
-                                    u32 seqno);
        int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
                                               u64 offset, u32 length,
                                               unsigned dispatch_flags);
@@ -472,6 +469,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
+{
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
 
 int init_workarounds_ring(struct intel_engine_cs *ring);
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to