---
 drivers/gpu/drm/i915/i915_debugfs.c        | 162 +---------------
 drivers/gpu/drm/i915/i915_drv.h            |  84 ++++++---
 drivers/gpu/drm/i915/i915_gem.c            | 144 +++------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   3 +-
 drivers/gpu/drm/i915/i915_gem_request.c    | 287 +++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_request.h    |   5 +
 drivers/gpu/drm/i915/i915_gpu_error.c      |  60 +-----
 drivers/gpu/drm/i915/intel_lrc.c           |   4 +-
 drivers/gpu/drm/i915/intel_pm.c            |   3 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   4 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  14 +-
 11 files changed, 251 insertions(+), 519 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 6454e61f8ac3..3fe4f73916b5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -366,28 +366,6 @@ static int per_file_stats(int id, void *ptr, void *data)
                           stats.unbound); \
 } while (0)

-static void print_batch_pool_stats(struct seq_file *m,
-                                  struct drm_i915_private *dev_priv)
-{
-       struct drm_i915_gem_object *obj;
-       struct file_stats stats;
-       struct intel_engine_cs *engine;
-       int j;
-
-       memset(&stats, 0, sizeof(stats));
-
-       for_each_engine(engine, dev_priv) {
-               for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) 
{
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link)
-                               per_file_stats(0, obj, &stats);
-               }
-       }
-
-       print_file_stats(m, "[k]batch pool", stats);
-}
-
 static int per_file_ctx_stats(int id, void *ptr, void *data)
 {
        struct i915_gem_context *ctx = ptr;
@@ -545,7 +523,6 @@ static int i915_gem_object_info(struct seq_file *m, void* 
data)
                   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);

        seq_putc(m, '\n');
-       print_batch_pool_stats(m, dev_priv);
        mutex_unlock(&dev->struct_mutex);

        mutex_lock(&dev->filelist_mutex);
@@ -655,10 +632,9 @@ static int i915_gem_pageflip_info(struct seq_file *m, void 
*data)
                        if (work->flip_queued_req) {
                                struct intel_engine_cs *engine = 
i915_gem_request_get_engine(work->flip_queued_req);

-                               seq_printf(m, "Flip queued on %s at seqno %x, 
next seqno %x [current breadcrumb %x], completed? %d\n",
+                               seq_printf(m, "Flip queued on %s at seqno %x, 
current breadcrumb %x, completed? %d\n",
                                           engine->name,
                                           
i915_gem_request_get_seqno(work->flip_queued_req),
-                                          dev_priv->next_seqno,
                                           intel_engine_get_seqno(engine),
                                           
i915_gem_request_completed(work->flip_queued_req));
                        } else
@@ -688,99 +664,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void 
*data)
        return 0;
 }

-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *engine;
-       int total = 0;
-       int ret, j;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       for_each_engine(engine, dev_priv) {
-               for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) 
{
-                       int count;
-
-                       count = 0;
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link)
-                               count++;
-                       seq_printf(m, "%s cache[%d]: %d objects\n",
-                                  engine->name, j, count);
-
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link) {
-                               seq_puts(m, "   ");
-                               describe_obj(m, obj);
-                               seq_putc(m, '\n');
-                       }
-
-                       total += count;
-               }
-       }
-
-       seq_printf(m, "total: %d\n", total);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
-static int i915_gem_request_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine;
-       struct drm_i915_gem_request *req;
-       int ret, any;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       any = 0;
-       for_each_engine(engine, dev_priv) {
-               int count;
-
-               count = 0;
-               list_for_each_entry(req, &engine->request_list, link)
-                       count++;
-               if (count == 0)
-                       continue;
-
-               seq_printf(m, "%s requests: %d\n", engine->name, count);
-               list_for_each_entry(req, &engine->request_list, link) {
-                       struct pid *pid = req->ctx->pid;
-                       struct task_struct *task;
-
-                       rcu_read_lock();
-                       task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
-                       seq_printf(m, "    %x @ %d: %s [%d]\n",
-                                  req->fence.seqno,
-                                  (int) (jiffies - req->emitted_jiffies),
-                                  task ? task->comm : "<unknown>",
-                                  task ? task->pid : -1);
-                       rcu_read_unlock();
-               }
-
-               any++;
-       }
-       mutex_unlock(&dev->struct_mutex);
-
-       if (any == 0)
-               seq_puts(m, "No requests\n");
-
-       return 0;
-}
-
 static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *engine)
 {
@@ -1150,43 +1033,6 @@ static const struct file_operations 
i915_error_state_fops = {
        .release = i915_error_state_release,
 };

-static int
-i915_next_seqno_get(void *data, u64 *val)
-{
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       *val = dev_priv->next_seqno;
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
-       struct drm_device *dev = data;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       ret = i915_gem_set_seqno(dev, val);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
-                       i915_next_seqno_get, i915_next_seqno_set,
-                       "0x%llx\n");
-
 static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
@@ -2349,8 +2195,6 @@ static int i915_rps_boost_info(struct seq_file *m, void 
*data)
        struct drm_file *file;

        seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
-       seq_printf(m, "GPU busy? %s [%x]\n",
-                  yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max 
soft:%d, hard:%d\n",
                   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2688,7 +2532,6 @@ static int i915_runtime_pm_status(struct seq_file *m, 
void *unused)
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");

-       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
@@ -5269,7 +5112,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) 
INACTIVE_LIST},
        {"i915_gem_stolen", i915_gem_stolen_list_info },
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
-       {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
        {"i915_gem_interrupt", i915_interrupt_info, 0},
@@ -5277,7 +5119,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
        {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
        {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
-       {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
        {"i915_guc_info", i915_guc_info, 0},
        {"i915_guc_load_status", i915_guc_load_status_info, 0},
        {"i915_guc_log_dump", i915_guc_log_dump, 0},
@@ -5329,7 +5170,6 @@ static const struct i915_debugfs_files {
        {"i915_ring_test_irq", &i915_ring_test_irq_fops},
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
        {"i915_error_state", &i915_error_state_fops},
-       {"i915_next_seqno", &i915_next_seqno_fops},
        {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
        {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
        {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f88133e5d725..ba5007653f00 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -856,6 +856,58 @@ struct i915_ctx_hang_stats {
        bool banned;
 };

+struct i915_vma;
+
+struct i915_timeline {
+       u32 id;
+       u32 next_seqno;
+
+       struct mutex mutex;
+       struct list_head link;
+       struct drm_i915_private *i915;
+
+       struct i915_timeline_engine {
+               u32 mask;
+
+               struct i915_timeline *timeline;
+               struct list_head requests;
+
+               u32 last_submitted_seqno;
+
+               /*
+                * A pool of objects to use as shadow copies of client batch
+                * buffers when the command parser is enabled. Prevents the
+                * client from modifying the batch contents after software
+                * parsing.
+                */
+               struct i915_gem_batch_pool batch_pool;
+       } engine[I915_NUM_ENGINES];
+
+       struct {
+               struct i915_vma *vma;
+               uint32_t *map;
+       } hws;
+
+       /**
+        * Is the GPU currently considered idle, or busy executing
+        * userspace requests? Whilst idle, we allow runtime power
+        * management to power down the hardware and display clocks.
+        * In order to reduce the effect on performance, there
+        * is a slight delay before we do so.
+        */
+       unsigned active_engines;
+
+       /**
+        * We leave the user IRQ off as much as possible,
+        * but this means that requests will finish and never
+        * be retired once the system goes idle. Set a timer to
+        * fire periodically while the ring is running. When it
+        * fires, go retire requests.
+        */
+       struct delayed_work retire_work;
+};
+
+
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0

@@ -1778,8 +1830,8 @@ struct drm_i915_private {
        struct pci_dev *bridge_dev;
        struct i915_gem_context *kernel_context;
        struct intel_engine_cs engine[I915_NUM_ENGINES];
+       struct i915_timeline kernel_timeline;
        struct i915_vma *semaphore_vma;
-       uint32_t next_seqno;

        struct drm_dma_handle *status_page_dmah;
        struct resource mch_res;
@@ -2030,33 +2082,9 @@ struct drm_i915_private {
                int (*init_engines)(struct drm_device *dev);
                void (*cleanup_engine)(struct intel_engine_cs *engine);

-               /**
-                * Is the GPU currently considered idle, or busy executing
-                * userspace requests? Whilst idle, we allow runtime power
-                * management to power down the hardware and display clocks.
-                * In order to reduce the effect on performance, there
-                * is a slight delay before we do so.
-                */
-               unsigned active_engines;
-               bool awake;
+               struct list_head timelines;

-               /**
-                * We leave the user IRQ off as much as possible,
-                * but this means that requests will finish and never
-                * be retired once the system goes idle. Set a timer to
-                * fire periodically while the ring is running. When it
-                * fires, go retire requests.
-                */
-               struct delayed_work retire_work;
-
-               /**
-                * When we detect an idle GPU, we want to turn on
-                * powersaving features. So once we see that there
-                * are no more requests outstanding and no more
-                * arrive within a small period of time, we fire
-                * off the idle_work.
-                */
-               struct delayed_work idle_work;
+               atomic_t active;
        } gt;

        /* perform PHY state sanity checks? */
@@ -3207,7 +3235,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);

 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
+i915_gem_find_active_request(struct i915_timeline_engine *te);

 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fc46ee10e5cd..4b4a8e5833a8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2410,7 +2410,7 @@ static void i915_set_reset_status(struct i915_gem_context 
*ctx,
 }

 struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
+i915_gem_find_active_request(struct i915_timeline_engine *te)
 {
        struct drm_i915_gem_request *request;

@@ -2422,7 +2422,7 @@ i915_gem_find_active_request(struct intel_engine_cs 
*engine)
         * extra delay for a recent interrupt is pointless. Hence, we do
         * not need an engine->irq_seqno_barrier() before the seqno reads.
         */
-       list_for_each_entry(request, &engine->request_list, link) {
+       list_for_each_entry(request, &te->requests, link) {
                if (i915_gem_request_completed(request))
                        continue;

@@ -2432,35 +2432,37 @@ i915_gem_find_active_request(struct intel_engine_cs 
*engine)
        return NULL;
 }

-static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
+static void i915_gem_reset_timeline_status(struct i915_timeline *timeline)
 {
-       struct drm_i915_gem_request *request;
+       int i;

-       request = i915_gem_find_active_request(engine);
-       if (request == NULL)
-               return;
+       for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+               struct i915_timeline_engine *te = &timeline->engine[i];
+               struct drm_i915_gem_request *request;
+
+               if (list_empty(&te->requests))
+                       continue;
+
+               request = i915_gem_find_active_request(te);
+               if (request) {
+                       i915_set_reset_status(request->ctx,
+                                             
i915_gem_request_started(request));
+                       list_for_each_entry_continue(request, &te->requests, 
link)
+                               i915_set_reset_status(request->ctx, false);
+               }

-       i915_set_reset_status(request->ctx,
-                             i915_gem_request_started(request));
-       list_for_each_entry_continue(request, &engine->request_list, link)
-               i915_set_reset_status(request->ctx, false);
+               request = list_last_entry(&te->requests,
+                                         struct drm_i915_gem_request,
+                                         link);
+               i915_gem_request_retire_upto(request);
+               //intel_engine_init_seqno(te, te->last_submitted_seqno);
+       }
 }

 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_i915_gem_request *request;
        struct intel_ring *ring;

-       request = i915_gem_active_peek(&engine->last_request,
-                                      &engine->i915->dev->struct_mutex);
-
-       /* Mark all pending requests as complete so that any concurrent
-        * (lockless) lookup doesn't try and wait upon the request as we
-        * reset it.
-        */
-       if (request)
-               intel_engine_init_seqno(engine, request->fence.seqno);
-
        /*
         * Clear the execlists queue up before freeing the requests, as those
         * are the ones that keep the context and ringbuffer backing objects
@@ -2478,17 +2480,6 @@ static void i915_gem_reset_engine_cleanup(struct 
intel_engine_cs *engine)
                                        NULL);
        }

-       /*
-        * We must free the requests after all the corresponding objects have
-        * been moved off active lists. Which is the same order as the normal
-        * retire_requests function does. This is important if object hold
-        * implicit references on things like e.g. ppgtt address spaces through
-        * the request.
-        */
-       if (request)
-               i915_gem_request_retire_upto(request);
-       GEM_BUG_ON(intel_engine_is_active(engine));
-
        /* Having flushed all requests from all queues, we know that all
         * ringbuffers must now be empty. However, since we do not reclaim
         * all space when retiring the request (to prevent HEADs colliding
@@ -2506,14 +2497,15 @@ void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
+       struct i915_timeline *timeline;

        /*
         * Before we free the objects from the requests, we need to inspect
         * them for finding the guilty party. As the requests only borrow
         * their reference to the objects, the inspection must be done first.
         */
-       for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(engine);
+       list_for_each_entry(timeline, &dev_priv->gt.timelines, link)
+               i915_gem_reset_timeline_status(timeline);

        for_each_engine(engine, dev_priv)
                i915_gem_reset_engine_cleanup(engine);
@@ -2523,57 +2515,6 @@ void i915_gem_reset(struct drm_device *dev)
        i915_gem_restore_fences(dev);
 }

-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), gt.retire_work.work);
-       struct drm_device *dev = dev_priv->dev;
-
-       /* Come back later if the device is busy... */
-       if (mutex_trylock(&dev->struct_mutex)) {
-               i915_gem_retire_requests(dev_priv);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       /* Keep the retire handler running until we are finally idle.
-        * We do not need to do this test under locking as in the worst-case
-        * we queue the retire worker once too often.
-        */
-       if (READ_ONCE(dev_priv->gt.awake))
-               queue_delayed_work(dev_priv->wq,
-                                  &dev_priv->gt.retire_work,
-                                  round_jiffies_up_relative(HZ));
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), gt.idle_work.work);
-       struct drm_device *dev = dev_priv->dev;
-       struct intel_engine_cs *engine;
-
-       if (!READ_ONCE(dev_priv->gt.awake))
-               return;
-
-       mutex_lock(&dev->struct_mutex);
-       if (dev_priv->gt.active_engines)
-               goto out;
-
-       for_each_engine(engine, dev_priv)
-               i915_gem_batch_pool_fini(&engine->batch_pool);
-
-       GEM_BUG_ON(!dev_priv->gt.awake);
-       dev_priv->gt.awake = false;
-
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_idle(dev_priv);
-       intel_runtime_pm_put(dev_priv);
-out:
-       mutex_unlock(&dev->struct_mutex);
-}
-
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -4128,22 +4069,10 @@ i915_gem_suspend(struct drm_device *dev)
        if (ret)
                return ret;

-       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
-       cancel_delayed_work_sync(&dev_priv->gt.idle_work);
+       //cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       //flush_delayed_work(&dev_priv->gt.idle_work);
        flush_work(&dev_priv->mm.free_work);

-       mutex_lock(&dev_priv->dev->struct_mutex);
-
-       /* Assert that we sucessfully flushed all the work and
-        * reset the GPU back to its idle, low power state.
-        */
-       if (dev_priv->gt.awake) {
-               if (INTEL_INFO(dev_priv)->gen >= 6)
-                       gen6_rps_idle(dev_priv);
-               intel_runtime_pm_put(dev_priv);
-               dev_priv->gt.awake = false;
-       }
-
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev_priv->dev->struct_mutex);

@@ -4383,13 +4312,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
                dev_priv->gt.cleanup_engine(engine);
 }

-static void
-init_engine_lists(struct intel_engine_cs *engine)
-{
-       /* Early initialisation so that core GEM works during engine setup */
-       INIT_LIST_HEAD(&engine->request_list);
-}
-
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
@@ -4425,7 +4347,6 @@ void
 i915_gem_load_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;

        dev_priv->objects =
                kmem_cache_create("i915_gem_object",
@@ -4451,12 +4372,7 @@ i915_gem_load_init(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       for (i = 0; i < I915_NUM_ENGINES; i++)
-               init_engine_lists(&dev_priv->engine[i]);
-       INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
-                         i915_gem_retire_work_handler);
-       INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
-                         i915_gem_idle_work_handler);
+       INIT_LIST_HEAD(&dev_priv->gt.timelines);
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eff9c81c8f3..4fdf995b9808 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -60,6 +60,7 @@ struct i915_execbuffer {
        struct drm_i915_gem_exec_object2 *exec;
        struct intel_engine_cs *engine;
        struct i915_gem_context *ctx;
+       struct i915_timeline_engine *timeline;
        struct i915_address_space *vm;
        struct i915_vma *batch_vma;
        struct drm_i915_gem_request *request;
@@ -1560,7 +1561,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer 
*eb, bool is_master)
        struct i915_vma *vma;
        int ret;

-       shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
+       shadow_batch_obj = i915_gem_batch_pool_get(&eb->timeline->batch_pool,
                                                   
PAGE_ALIGN(eb->args->batch_len));
        if (IS_ERR(shadow_batch_obj))
                return ERR_CAST(shadow_batch_obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 43b5d4fa480a..5e6b9116e098 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -26,6 +26,125 @@

 #include "i915_drv.h"

+static void i915_gem_request_retire(struct drm_i915_gem_request *request);
+
+static bool
+i915_timeline_engine_retire(struct i915_timeline_engine *te)
+{
+       struct drm_i915_gem_request *request, *next;
+
+       list_for_each_entry_safe(request, next, &te->requests, link) {
+               if (!i915_gem_request_completed(request))
+                       return false;
+
+               i915_gem_request_retire(request);
+       }
+       return true;
+}
+
+static bool i915_timeline_retire_requests(struct i915_timeline *tl)
+{
+       int i;
+
+       if (READ_ONCE(tl->active_engines) == 0)
+               return false;
+
+       for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+               if (i915_timeline_engine_retire(&tl->engine[i]))
+                       tl->active_engines &= ~(1 << i);
+       }
+
+       if (tl->active_engines == 0 &&
+           atomic_dec_and_test(&tl->i915->gt.active)) {
+               if (INTEL_GEN(tl->i915) >= 6)
+                       gen6_rps_idle(tl->i915);
+               intel_runtime_pm_put(tl->i915);
+       }
+
+       return true;
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+       struct i915_timeline *timeline;
+
+       lockdep_assert_held(&dev_priv->dev->struct_mutex);
+       list_for_each_entry(timeline, &dev_priv->gt.timelines, link)
+               i915_timeline_retire_requests(timeline);
+}
+
+static void
+i915_timeline_retire_work(struct work_struct *work)
+{
+       struct i915_timeline *tl =
+               container_of(work, typeof(*tl), retire_work.work);
+
+       /* Keep the retire handler running until we are finally idle.
+        * We do not need to do this test under locking as in the worst-case
+        * we queue the retire worker once too often.
+        */
+       if (i915_timeline_retire_requests(tl))
+               queue_delayed_work(tl->i915->wq, &tl->retire_work,
+                                  round_jiffies_up_relative(HZ));
+}
+
+static int i915_timeline_init_seqno(struct i915_timeline *tl, u32 seqno)
+{
+       struct intel_engine_cs *engine;
+       int ret;
+
+       if (tl->id)
+               return 0;
+
+       /* Carefully retire all requests without writing to the rings */
+       for_each_engine(engine, tl->i915) {
+               ret = intel_engine_idle(engine);
+               if (ret)
+                       return ret;
+       }
+       i915_timeline_retire_requests(tl);
+
+       /* Finally reset hw state */
+       for_each_engine(engine, tl->i915)
+               intel_engine_init_seqno(engine, seqno);
+
+       return 0;
+}
+
+static int
+i915_timeline_get_seqno(struct i915_timeline *tl, u32 *seqno)
+{
+       /* reserve 0 for non-seqno */
+       if (unlikely(tl->next_seqno == 0)) {
+               int ret = i915_timeline_init_seqno(tl, 0);
+               if (ret)
+                       return ret;
+
+               tl->next_seqno = 2;
+       }
+
+       /* Each request uses a start / stop sequence */
+       GEM_BUG_ON(tl->next_seqno & 1);
+       *seqno = tl->next_seqno;
+       tl->next_seqno += 2;
+       return 0;
+}
+
+void i915_timeline_init(struct drm_i915_private *i915,
+                       struct i915_timeline *tl)
+{
+       int i;
+
+       tl->i915 = i915;
+       list_add(&tl->link, &i915->gt.timelines);
+       tl->next_seqno = 2;
+
+       for (i = 0; i < ARRAY_SIZE(tl->engine); i++)
+               INIT_LIST_HEAD(&tl->engine[i].requests);
+
+       INIT_DELAYED_WORK(&tl->retire_work, i915_timeline_retire_work);
+}
+
 static inline struct drm_i915_gem_request *
 to_i915_request(struct fence *fence)
 {
@@ -183,16 +302,14 @@ static void i915_gem_request_retire(struct 
drm_i915_gem_request *request)

 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->engine;
+       struct i915_timeline_engine *te = req->timeline;
        struct drm_i915_gem_request *tmp;

        lockdep_assert_held(&req->i915->dev->struct_mutex);
        GEM_BUG_ON(list_empty(&req->link));

        do {
-               tmp = list_first_entry(&engine->request_list,
-                                      typeof(*tmp), link);
-
+               tmp = list_first_entry(&te->requests, typeof(*tmp), link);
                i915_gem_request_retire(tmp);
        } while (tmp != req);
 }
@@ -214,71 +331,6 @@ static int i915_gem_check_wedge(unsigned reset_counter, 
bool interruptible)
        return 0;
 }

-static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
-{
-       struct intel_engine_cs *engine;
-       int ret;
-
-       /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine);
-               if (ret)
-                       return ret;
-       }
-       i915_gem_retire_requests(dev_priv);
-
-       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
-       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
-               while (intel_kick_waiters(dev_priv) ||
-                      intel_kick_signalers(dev_priv))
-                       yield();
-       }
-
-       /* Finally reset hw state */
-       for_each_engine(engine, dev_priv)
-               intel_engine_init_seqno(engine, seqno);
-
-       return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       seqno = (seqno + 1) & ~1;
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       ret = i915_gem_init_seqno(dev_priv, seqno - 2);
-       if (ret)
-               return ret;
-
-       dev_priv->next_seqno = seqno;
-       return 0;
-}
-
-static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
-{
-       /* reserve 0 for non-seqno */
-       if (unlikely(dev_priv->next_seqno == 0)) {
-               int ret = i915_gem_init_seqno(dev_priv, 0);
-               if (ret)
-                       return ret;
-
-               dev_priv->next_seqno = 2;
-       }
-
-       /* Each request uses a start / stop sequence */
-       GEM_BUG_ON(dev_priv->next_seqno & 1);
-       *seqno = dev_priv->next_seqno;
-       dev_priv->next_seqno += 2;
-       return 0;
-}
-
 static void __kfence_call submit_notify(struct kfence *fence)
 {
        struct drm_i915_gem_request *request =
@@ -311,6 +363,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        struct drm_i915_private *dev_priv = engine->i915;
        unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req, *prev;
+       struct i915_timeline *tl = &dev_priv->kernel_timeline;
+       struct i915_timeline_engine *te = &tl->engine[engine->id];
        u32 seqno;
        int ret;

@@ -322,9 +376,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (ret)
                return ERR_PTR(ret);

-       if (!list_empty(&engine->request_list)) {
-               req = list_first_entry(&engine->request_list,
-                                      typeof(*req), link);
+       if (!list_empty(&te->requests)) {
+               req = list_first_entry(&te->requests, typeof(*req), link);
                if (i915_gem_request_completed(req))
                        i915_gem_request_retire(req);
        }
@@ -333,7 +386,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (req == NULL)
                return ERR_PTR(-ENOMEM);

-       ret = i915_gem_get_seqno(dev_priv, &seqno);
+       ret = i915_timeline_get_seqno(tl, &seqno);
        if (ret)
                goto err;

@@ -349,6 +402,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        req->i915 = dev_priv;
        req->file_priv = NULL;
        req->engine = engine;
+       req->timeline = te;
        req->signaling.wait.tsk = NULL;
        req->reset_counter = reset_counter;
        req->ctx = ctx;
@@ -398,24 +452,28 @@ err:
        return ERR_PTR(ret);
 }

-static void i915_gem_mark_busy(struct drm_i915_private *dev_priv,
-                              const struct intel_engine_cs *engine)
+static void i915_gem_mark_busy(struct drm_i915_gem_request *req)
 {
-       dev_priv->gt.active_engines |= intel_engine_flag(engine);
-       if (dev_priv->gt.awake)
-               return;
+       struct i915_timeline *timeline = req->timeline->timeline;

-       intel_runtime_pm_get_noresume(dev_priv);
-       dev_priv->gt.awake = true;
+       if (timeline->active_engines & req->timeline->mask)
+               return;

-       intel_enable_gt_powersave(dev_priv);
-       i915_update_gfx_val(dev_priv);
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_busy(dev_priv);
+       if (timeline->active_engines == 0) {
+               queue_delayed_work(req->i915->wq,
+                                  &timeline->retire_work,
+                                  round_jiffies_up_relative(HZ));
+
+               if (atomic_inc_return(&req->i915->gt.active) == 1) {
+                       intel_runtime_pm_get_noresume(req->i915);
+                       intel_enable_gt_powersave(req->i915);
+                       i915_update_gfx_val(req->i915);
+                       if (INTEL_GEN(req->i915) >= 6)
+                               gen6_rps_busy(req->i915);
+               }
+       }

-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->gt.retire_work,
-                          round_jiffies_up_relative(HZ));
+       timeline->active_engines |= req->timeline->mask;
 }

 static void i915_gem_request_cancel(struct drm_i915_gem_request *request)
@@ -485,7 +543,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request, bool flush_caches)
         */
        request->emitted_jiffies = jiffies;
        i915_gem_active_set(&engine->last_request, request);
-       list_add_tail(&request->link, &engine->request_list);
+       list_add_tail(&request->link, &request->timeline->requests);

        /* Record the position of the start of the breadcrumb so that
         * should we detect the updated seqno part-way through the
@@ -505,7 +563,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request, bool flush_caches)
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);

-       i915_gem_mark_busy(request->i915, engine);
+       i915_gem_mark_busy(request);

        kfence_signal(&request->submit);
        kfence_put(&request->submit);
@@ -638,7 +696,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (!IS_ERR(rps) && INTEL_INFO(req->i915)->gen >= 6)
+       if (!IS_ERR(rps) && INTEL_GEN(req->i915) >= 6)
                gen6_rps_boost(req->i915, rps, req->emitted_jiffies);

        /* Optimistic spin for the next ~jiffie before touching IRQs */
@@ -731,7 +789,6 @@ complete:

 static int wait_for_space(struct intel_ring *ring, int bytes)
 {
-       struct intel_engine_cs *engine = ring->engine;
        struct drm_i915_gem_request *target;
        int ret;

@@ -739,17 +796,9 @@ static int wait_for_space(struct intel_ring *ring, int 
bytes)
        if (ring->space >= bytes)
                return 0;

-       list_for_each_entry(target, &engine->request_list, link) {
+       list_for_each_entry(target, &ring->timeline->requests, link) {
                unsigned space;

-               /*
-                * The request queue is per-engine, so can contain requests
-                * from multiple ring. Here, we must ignore any that
-                * aren't from the ring we're considering.
-                */
-               if (target->ring != ring)
-                       continue;
-
                /* Would completion of this request free enough space? */
                space = __intel_ring_space(target->postfix,
                                           ring->tail, ring->size);
@@ -757,7 +806,7 @@ static int wait_for_space(struct intel_ring *ring, int 
bytes)
                        break;
        }

-       if (WARN_ON(&target->link == &engine->request_list))
+       if (WARN_ON(&target->link == &ring->timeline->requests))
                return -ENOSPC;

        ret = __i915_wait_request(target, true, NULL, NULL);
@@ -862,39 +911,3 @@ int i915_gem_request_align(struct drm_i915_gem_request 
*req)
        memset(out, 0, bytes);
        return 0;
 }
-
-static bool i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
-{
-       struct drm_i915_gem_request *request, *next;
-
-       list_for_each_entry_safe(request, next, &engine->request_list, link) {
-               if (!i915_gem_request_completed(request))
-                       return false;
-
-               i915_gem_request_retire(request);
-       }
-
-       return true;
-}
-
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-
-       lockdep_assert_held(&dev_priv->dev->struct_mutex);
-
-       if (dev_priv->gt.active_engines == 0)
-               return;
-
-       GEM_BUG_ON(!dev_priv->gt.awake);
-
-       for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines) {
-               if (i915_gem_retire_requests_ring(engine))
-                       dev_priv->gt.active_engines &= 
~intel_engine_flag(engine);
-       }
-
-       if (dev_priv->gt.active_engines == 0)
-               queue_delayed_work(dev_priv->wq,
-                                  &dev_priv->gt.idle_work,
-                                  msecs_to_jiffies(100));
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h 
b/drivers/gpu/drm/i915/i915_gem_request.h
index 5bf350183883..4933a9c7886b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -40,6 +40,9 @@ struct intel_signal_node {
        struct intel_wait wait;
 };

+struct i915_timeline;
+struct i915_timeline_engine;
+
 /**
  * Request queue structure.
  *
@@ -75,6 +78,8 @@ struct drm_i915_gem_request {
        struct intel_ring *ring;
        struct intel_signal_node signaling;

+       struct i915_timeline_engine *timeline;
+
        unsigned reset_counter;
        struct kfence submit;

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 05a5fb328c13..b7dfcc74223e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -978,61 +978,6 @@ static void i915_record_ring_state(struct drm_i915_private 
*dev_priv,
        }
 }

-static void engine_record_requests(struct intel_engine_cs *engine,
-                                  struct drm_i915_gem_request *first,
-                                  struct drm_i915_error_ring *ering)
-{
-       struct drm_i915_gem_request *request;
-       int count;
-
-       count = 0;
-       request = first;
-       list_for_each_entry_from(request, &engine->request_list, link)
-               count++;
-
-       ering->requests = NULL;
-       kcalloc(count, sizeof(*ering->requests),
-               GFP_ATOMIC);
-       if (ering->requests == NULL)
-               return;
-       ering->num_requests = count;
-
-       count = 0;
-       request = first;
-       list_for_each_entry_from(request, &engine->request_list, link) {
-               struct drm_i915_error_request *erq;
-
-               if (count >= ering->num_requests) {
-                       /*
-                        * If the ring request list was changed in
-                        * between the point where the error request
-                        * list was created and dimensioned and this
-                        * point then just exit early to avoid crashes.
-                        *
-                        * We don't need to communicate that the
-                        * request list changed state during error
-                        * state capture and that the error state is
-                        * slightly incorrect as a consequence since we
-                        * are typically only interested in the request
-                        * list state at the point of error state
-                        * capture, not in any changes happening during
-                        * the capture.
-                        */
-                       break;
-               }
-
-               erq = &ering->requests[count++];
-               erq->seqno = request->fence.seqno;
-               erq->jiffies = request->emitted_jiffies;
-               erq->head = request->head;
-               erq->tail = request->tail;
-
-               rcu_read_lock();
-               erq->pid = request->ctx ? pid_nr(request->ctx->pid) : 0;
-               rcu_read_unlock();
-       }
-}
-
 static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                                  struct drm_i915_error_state *error)
 {
@@ -1054,7 +999,8 @@ static void i915_gem_record_rings(struct drm_i915_private 
*dev_priv,
                i915_record_ring_state(dev_priv, error, engine, ering);
                engine_record_waiters(engine, ering);

-               request = i915_gem_find_active_request(engine);
+               //request = i915_gem_find_active_request(engine);
+               request = NULL;
                if (request) {
                        struct i915_address_space *vm;
                        struct intel_ring *ring;
@@ -1102,8 +1048,6 @@ static void i915_gem_record_rings(struct drm_i915_private 
*dev_priv,
                        ering->cpu_ring_tail = ring->tail;
                        ering->ringbuffer =
                                i915_error_object_create(dev_priv, ring->vma);
-
-                       engine_record_requests(engine, request, ering);
                }

                ering->hws_page =
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 54895b16c764..0bfa0654b0a4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1563,7 +1563,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*engine)

        i915_cmd_parser_fini_ring(engine);
        i915_gem_render_state_fini(engine);
-       i915_gem_batch_pool_fini(&engine->batch_pool);
+       //i915_gem_batch_pool_fini(&engine->batch_pool);

        intel_engine_fini_breadcrumbs(engine);

@@ -1708,7 +1708,7 @@ logical_ring_setup(struct drm_device *dev, enum 
intel_engine_id id)
        logical_ring_default_irqs(engine, info->irq_shift);

        intel_engine_init_requests(engine);
-       i915_gem_batch_pool_init(engine, &engine->batch_pool);
+       //i915_gem_batch_pool_init(engine, &engine->batch_pool);
        i915_cmd_parser_init_ring(engine);

        return engine;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6f4e298a1aa1..17ff67ba8e45 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4883,8 +4883,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
        /* This is intentionally racy! We peek at the state here, then
         * validate inside the RPS worker.
         */
-       if (!(dev_priv->gt.awake &&
-             dev_priv->rps.enabled &&
+       if (!(dev_priv->rps.enabled &&
              dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
                return;

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8fcbd59d4f36..329c7c9b971d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2039,7 +2039,7 @@ static int intel_init_engine(struct drm_device *dev,
        engine->fence_context = fence_context_alloc(1);
        INIT_LIST_HEAD(&engine->execlist_queue);
        INIT_LIST_HEAD(&engine->buffers);
-       i915_gem_batch_pool_init(engine, &engine->batch_pool);
+       //i915_gem_batch_pool_init(engine, &engine->batch_pool);
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));

@@ -2138,7 +2138,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)

        i915_gem_render_state_fini(engine);
        i915_cmd_parser_fini_ring(engine);
-       i915_gem_batch_pool_fini(&engine->batch_pool);
        intel_engine_fini_breadcrumbs(engine);

        intel_ring_context_unpin(dev_priv->kernel_context, engine);
@@ -2155,7 +2154,6 @@ intel_engine_retire(struct i915_gem_active *active,
 void intel_engine_init_requests(struct intel_engine_cs *engine)
 {
        init_request_active(&engine->last_request, intel_engine_retire);
-       INIT_LIST_HEAD(&engine->request_list);
 }

 int intel_engine_idle(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bdb4f15024d1..29e156064e21 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -77,6 +77,7 @@ struct intel_ring {
        void *vaddr;

        struct intel_engine_cs *engine;
+       struct i915_timeline_engine *timeline;
        struct list_head link;

        u32 head;
@@ -175,13 +176,6 @@ struct intel_engine_cs {
                bool rpm_wakelock;
        } breadcrumbs;

-       /*
-        * A pool of objects to use as shadow copies of client batch buffers
-        * when the command parser is enabled. Prevents the client from
-        * modifying the batch contents after software parsing.
-        */
-       struct i915_gem_batch_pool batch_pool;
-
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;

@@ -287,12 +281,6 @@ struct intel_engine_cs {
        bool preempt_wa;
        u32 ctx_desc_template;

-       /**
-        * List of breadcrumbs associated with GPU requests currently
-        * outstanding.
-        */
-       struct list_head request_list;
-
        /* An RCU guarded pointer to the last request. No reference is
         * held to the request, users must carefully acquire a reference to
         * the request using i915_gem_active_get_request_rcu(), or hold the
-- 
2.8.1

Reply via email to