Switch to tracking activity via i915_active on individual nodes, only
keeping a list of retired objects in the cache, and reaping the cache
when the engine itself idles.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.a...@intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  58 +++---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 -
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   4 +-
 drivers/gpu/drm/i915/gt/intel_engine.h        |   1 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  11 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +
 drivers/gpu/drm/i915/gt/intel_engine_pool.c   | 166 ++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_pool.h   |  34 ++++
 .../gpu/drm/i915/gt/intel_engine_pool_types.h |  29 +++
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |   3 +
 drivers/gpu/drm/i915/i915_debugfs.c           |  68 -------
 drivers/gpu/drm/i915/i915_gem_batch_pool.c    | 132 --------------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h    |  26 ---
 16 files changed, 279 insertions(+), 265 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.c
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3bd8f0349a8a..0bcb2f5766c9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -72,6 +72,7 @@ obj-y += gt/
 gt-y += \
        gt/intel_breadcrumbs.o \
        gt/intel_context.o \
+       gt/intel_engine_pool.o \
        gt/intel_engine_cs.o \
        gt/intel_engine_pm.o \
        gt/intel_gt.o \
@@ -118,7 +119,6 @@ i915-y += \
          $(gem-y) \
          i915_active.o \
          i915_cmd_parser.o \
-         i915_gem_batch_pool.o \
          i915_gem_evict.o \
          i915_gem_fence_reg.o \
          i915_gem_gtt.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 80c9c57a302f..0ea2d49bc8b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -16,6 +16,7 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
@@ -1145,25 +1146,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                             unsigned int len)
 {
        struct reloc_cache *cache = &eb->reloc_cache;
-       struct drm_i915_gem_object *obj;
+       struct intel_engine_pool_node *pool;
        struct i915_request *rq;
        struct i915_vma *batch;
        u32 *cmd;
        int err;
 
-       obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
-       if (IS_ERR(obj))
-               return PTR_ERR(obj);
+       pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+       if (IS_ERR(pool))
+               return PTR_ERR(pool);
 
-       cmd = i915_gem_object_pin_map(obj,
+       cmd = i915_gem_object_pin_map(pool->obj,
                                      cache->has_llc ?
                                      I915_MAP_FORCE_WB :
                                      I915_MAP_FORCE_WC);
-       i915_gem_object_unpin_pages(obj);
-       if (IS_ERR(cmd))
-               return PTR_ERR(cmd);
+       if (IS_ERR(cmd)) {
+               err = PTR_ERR(cmd);
+               goto out_pool;
+       }
 
-       batch = i915_vma_instance(obj, vma->vm, NULL);
+       batch = i915_vma_instance(pool->obj, vma->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
                goto err_unmap;
@@ -1179,6 +1181,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto err_unpin;
        }
 
+       err = intel_engine_pool_mark_active(pool, rq);
+       if (err)
+               goto err_request;
+
        err = reloc_move_to_gpu(rq, vma);
        if (err)
                goto err_request;
@@ -1204,7 +1210,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        cache->rq_size = 0;
 
        /* Return with batch mapping (cmd) still pinned */
-       return 0;
+       goto out_pool;
 
 skip_request:
        i915_request_skip(rq, err);
@@ -1213,7 +1219,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 err_unpin:
        i915_vma_unpin(batch);
 err_unmap:
-       i915_gem_object_unpin_map(obj);
+       i915_gem_object_unpin_map(pool->obj);
+out_pool:
+       intel_engine_pool_put(pool);
        return err;
 }
 
@@ -1957,18 +1965,17 @@ static int i915_reset_gen7_sol_offsets(struct 
i915_request *rq)
 
 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 {
-       struct drm_i915_gem_object *shadow_batch_obj;
+       struct intel_engine_pool_node *pool;
        struct i915_vma *vma;
        int err;
 
-       shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
-                                                  PAGE_ALIGN(eb->batch_len));
-       if (IS_ERR(shadow_batch_obj))
-               return ERR_CAST(shadow_batch_obj);
+       pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+       if (IS_ERR(pool))
+               return ERR_CAST(pool);
 
        err = intel_engine_cmd_parser(eb->engine,
                                      eb->batch->obj,
-                                     shadow_batch_obj,
+                                     pool->obj,
                                      eb->batch_start_offset,
                                      eb->batch_len,
                                      is_master);
@@ -1977,12 +1984,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer 
*eb, bool is_master)
                        vma = NULL;
                else
                        vma = ERR_PTR(err);
-               goto out;
+               goto err;
        }
 
-       vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+       vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
        if (IS_ERR(vma))
-               goto out;
+               goto err;
 
        eb->vma[eb->buffer_count] = i915_vma_get(vma);
        eb->flags[eb->buffer_count] =
@@ -1990,8 +1997,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer 
*eb, bool is_master)
        vma->exec_flags = &eb->flags[eb->buffer_count];
        eb->buffer_count++;
 
-out:
-       i915_gem_object_unpin_pages(shadow_batch_obj);
+       vma->private = pool;
+       return vma;
+
+err:
+       intel_engine_pool_put(pool);
        return vma;
 }
 
@@ -2615,6 +2625,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
         * to explicitly hold another reference here.
         */
        eb.request->batch = eb.batch;
+       if (eb.batch->private)
+               intel_engine_pool_mark_active(eb.batch->private, eb.request);
 
        trace_i915_request_queue(eb.request, eb.batch_flags);
        err = eb_submit(&eb);
@@ -2639,6 +2651,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_batch_unpin:
        if (eb.batch_flags & I915_DISPATCH_SECURE)
                i915_vma_unpin(eb.batch);
+       if (eb.batch->private)
+               intel_engine_pool_put(eb.batch->private);
 err_vma:
        if (eb.exec)
                eb_release_vmas(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 43194fbcbc2e..3260377ac021 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -64,7 +64,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->vma.list);
 
        INIT_LIST_HEAD(&obj->lut_list);
-       INIT_LIST_HEAD(&obj->batch_pool_link);
 
        init_rcu_head(&obj->rcu);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 34b51fad02de..d474c6ac4100 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
        unsigned int userfault_count;
        struct list_head userfault_link;
 
-       struct list_head batch_pool_link;
        I915_SELFTEST_DECLARE(struct list_head st_link);
 
        /*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 93d188526457..bf085b0cb7c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -33,10 +33,8 @@ static void i915_gem_park(struct drm_i915_private *i915)
 
        lockdep_assert_held(&i915->drm.struct_mutex);
 
-       for_each_engine(engine, i915, id) {
+       for_each_engine(engine, i915, id)
                call_idle_barriers(engine); /* cleanup after wedging */
-               i915_gem_batch_pool_fini(&engine->batch_pool);
-       }
 
        i915_vma_parked(i915);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h 
b/drivers/gpu/drm/i915/gt/intel_engine.h
index 557b08b13feb..6375d6111b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,7 +9,6 @@
 #include <linux/random.h>
 #include <linux/seqlock.h>
 
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_reg.h"
 #include "i915_request.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c 
b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b27fc555fe09..49439cf2fd1f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -32,6 +32,7 @@
 
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 #include "intel_context.h"
 #include "intel_lrc.h"
 #include "intel_reset.h"
@@ -498,11 +499,6 @@ int intel_engines_init(struct drm_i915_private *i915)
        return err;
 }
 
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
-       i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
 void intel_engine_init_execlists(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -628,10 +624,11 @@ static int intel_engine_setup_common(struct 
intel_engine_cs *engine)
        intel_engine_init_breadcrumbs(engine);
        intel_engine_init_execlists(engine);
        intel_engine_init_hangcheck(engine);
-       intel_engine_init_batch_pool(engine);
        intel_engine_init_cmd_parser(engine);
        intel_engine_init__pm(engine);
 
+       intel_engine_pool_init(&engine->pool);
+
        /* Use the whole device by default */
        engine->sseu =
                intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -880,9 +877,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs 
*engine)
 
        cleanup_status_page(engine);
 
+       intel_engine_pool_fini(&engine->pool);
        intel_engine_fini_breadcrumbs(engine);
        intel_engine_cleanup_cmd_parser(engine);
-       i915_gem_batch_pool_fini(&engine->batch_pool);
 
        if (engine->default_state)
                i915_gem_object_put(engine->default_state);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 9751a02d86bc..fe9f9eaffe88 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 
 #include "intel_engine.h"
+#include "intel_engine_pool.h"
 #include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 
@@ -116,6 +117,7 @@ static int __engine_park(struct intel_wakeref *wf)
        GEM_TRACE("%s\n", engine->name);
 
        intel_engine_disarm_breadcrumbs(engine);
+       intel_engine_pool_park(&engine->pool);
 
        /* Must be reset upon idling, or we may miss the busy wakeup. */
        GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c 
b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..32688ca379ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,166 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+       return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+       int n;
+
+       /*
+        * Compute a power-of-two bucket, but throw everything greater than
+        * 16KiB into the same bucket: i.e. the buckets hold objects of
+        * (1 page, 2 pages, 4 pages, 8+ pages).
+        */
+       n = fls(sz >> PAGE_SHIFT) - 1;
+       if (n >= ARRAY_SIZE(pool->cache_list))
+               n = ARRAY_SIZE(pool->cache_list) - 1;
+
+       return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+       i915_gem_object_put(node->obj);
+       i915_active_fini(&node->active);
+       kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+       struct intel_engine_pool_node *node =
+               container_of(ref, typeof(*node), active);
+       struct reservation_object *resv = node->obj->base.resv;
+
+       if (reservation_object_trylock(resv)) {
+               reservation_object_add_excl_fence(resv, NULL);
+               reservation_object_unlock(resv);
+       }
+
+       return i915_gem_object_pin_pages(node->obj);
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+       struct intel_engine_pool_node *node =
+               container_of(ref, typeof(*node), active);
+       struct intel_engine_pool *pool = node->pool;
+       struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+       unsigned long flags;
+
+       GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+       i915_gem_object_unpin_pages(node->obj);
+
+       spin_lock_irqsave(&pool->lock, flags);
+       list_add(&node->link, list);
+       spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+       struct intel_engine_cs *engine = to_engine(pool);
+       struct intel_engine_pool_node *node;
+       struct drm_i915_gem_object *obj;
+
+       node = kmalloc(sizeof(*node),
+                      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+       if (!node)
+               return ERR_PTR(-ENOMEM);
+
+       node->pool = pool;
+       i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+
+       obj = i915_gem_object_create_internal(engine->i915, sz);
+       if (IS_ERR(obj)) {
+               i915_active_fini(&node->active);
+               kfree(node);
+               return ERR_CAST(obj);
+       }
+
+       node->obj = obj;
+       return node;
+}
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+{
+       struct intel_engine_pool_node *node;
+       struct list_head *list;
+       unsigned long flags;
+       int ret;
+
+       GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+       size = PAGE_ALIGN(size);
+       list = bucket_for_size(pool, size);
+
+       spin_lock_irqsave(&pool->lock, flags);
+       list_for_each_entry(node, list, link) {
+               if (node->obj->base.size < size)
+                       continue;
+               list_del(&node->link);
+               break;
+       }
+       spin_unlock_irqrestore(&pool->lock, flags);
+
+       if (&node->link == list) {
+               node = node_create(pool, size);
+               if (IS_ERR(node))
+                       return node;
+       }
+
+       ret = i915_active_acquire(&node->active);
+       if (ret) {
+               node_free(node);
+               return ERR_PTR(ret);
+       }
+
+       return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+       int n;
+
+       spin_lock_init(&pool->lock);
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+               INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+       int n;
+
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+               struct list_head *list = &pool->cache_list[n];
+               struct intel_engine_pool_node *node, *nn;
+
+               list_for_each_entry_safe(node, nn, list, link)
+                       node_free(node);
+
+               INIT_LIST_HEAD(list);
+       }
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+       int n;
+
+       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+               GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h 
b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..f7a0a660c1c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+                             struct i915_request *rq)
+{
+       return i915_active_ref(&node->active, rq->fence.context, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+       i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h 
b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+       spinlock_t lock;
+       struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+       struct i915_active active;
+       struct drm_i915_gem_object *obj;
+       struct list_head link;
+       struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h 
b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 0dde7e04b102..6d2f3e11da1c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -16,12 +16,12 @@
 #include <linux/types.h>
 
 #include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_engine_pool_types.h"
 #include "intel_sseu.h"
+#include "intel_timeline_types.h"
 #include "intel_wakeref.h"
 #include "intel_workarounds_types.h"
 
@@ -353,7 +353,7 @@ struct intel_engine_cs {
         * when the command parser is enabled. Prevents the client from
         * modifying the batch contents after software parsing.
         */
-       struct i915_gem_batch_pool batch_pool;
+       struct intel_engine_pool pool;
 
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c 
b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5bcb461b8372..b94d57bf2c48 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -27,6 +27,7 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 
 #include "mock_engine.h"
 #include "selftests/mock_request.h"
@@ -291,6 +292,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
        intel_engine_init_execlists(engine);
        intel_engine_init__pm(engine);
 
+       intel_engine_pool_init(&engine->pool);
+
        engine->kernel_context =
                i915_gem_context_get_engine(i915->kernel_context, engine->id);
        if (IS_ERR(engine->kernel_context))
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index eeecdad0e3ca..253e86868061 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -295,27 +295,6 @@ static int per_file_stats(int id, void *ptr, void *data)
                           stats.closed); \
 } while (0)
 
-static void print_batch_pool_stats(struct seq_file *m,
-                                  struct drm_i915_private *dev_priv)
-{
-       struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *engine;
-       struct file_stats stats = {};
-       enum intel_engine_id id;
-       int j;
-
-       for_each_engine(engine, dev_priv, id) {
-               for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) 
{
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link)
-                               per_file_stats(0, obj, &stats);
-               }
-       }
-
-       print_file_stats(m, "[k]batch pool", stats);
-}
-
 static void print_context_stats(struct seq_file *m,
                                struct drm_i915_private *i915)
 {
@@ -373,58 +352,12 @@ static int i915_gem_object_info(struct seq_file *m, void 
*data)
        if (ret)
                return ret;
 
-       print_batch_pool_stats(m, i915);
        print_context_stats(m, i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
        return 0;
 }
 
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct drm_i915_gem_object *obj;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int total = 0;
-       int ret, j;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       for_each_engine(engine, dev_priv, id) {
-               for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) 
{
-                       int count;
-
-                       count = 0;
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link)
-                               count++;
-                       seq_printf(m, "%s cache[%d]: %d objects\n",
-                                  engine->name, j, count);
-
-                       list_for_each_entry(obj,
-                                           &engine->batch_pool.cache_list[j],
-                                           batch_pool_link) {
-                               seq_puts(m, "   ");
-                               describe_obj(m, obj);
-                               seq_putc(m, '\n');
-                       }
-
-                       total += count;
-               }
-       }
-
-       seq_printf(m, "total: %d\n", total);
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
 static void gen8_display_interrupt_info(struct seq_file *m)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4364,7 +4297,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_objects", i915_gem_object_info, 0},
        {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
        {"i915_gem_interrupt", i915_interrupt_info, 0},
-       {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
        {"i915_guc_info", i915_guc_info, 0},
        {"i915_guc_load_status", i915_guc_load_status_info, 0},
        {"i915_guc_log_dump", i915_guc_log_dump, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c 
b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index b17f23991253..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-                             struct intel_engine_cs *engine)
-{
-       int n;
-
-       pool->engine = engine;
-
-       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
-               INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
-       int n;
-
-       lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-       for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
-               struct drm_i915_gem_object *obj, *next;
-
-               list_for_each_entry_safe(obj, next,
-                                        &pool->cache_list[n],
-                                        batch_pool_link)
-                       i915_gem_object_put(obj);
-
-               INIT_LIST_HEAD(&pool->cache_list[n]);
-       }
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
-                       size_t size)
-{
-       struct drm_i915_gem_object *obj;
-       struct list_head *list;
-       int n, ret;
-
-       lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-       /* Compute a power-of-two bucket, but throw everything greater than
-        * 16KiB into the same bucket: i.e. the the buckets hold objects of
-        * (1 page, 2 pages, 4 pages, 8+ pages).
-        */
-       n = fls(size >> PAGE_SHIFT) - 1;
-       if (n >= ARRAY_SIZE(pool->cache_list))
-               n = ARRAY_SIZE(pool->cache_list) - 1;
-       list = &pool->cache_list[n];
-
-       list_for_each_entry(obj, list, batch_pool_link) {
-               struct reservation_object *resv = obj->base.resv;
-
-               /* The batches are strictly LRU ordered */
-               if (!reservation_object_test_signaled_rcu(resv, true))
-                       break;
-
-               /*
-                * The object is now idle, clear the array of shared
-                * fences before we add a new request. Although, we
-                * remain on the same engine, we may be on a different
-                * timeline and so may continually grow the array,
-                * trapping a reference to all the old fences, rather
-                * than replace the existing fence.
-                */
-               if (rcu_access_pointer(resv->fence)) {
-                       reservation_object_lock(resv, NULL);
-                       reservation_object_add_excl_fence(resv, NULL);
-                       reservation_object_unlock(resv);
-               }
-
-               if (obj->base.size >= size)
-                       goto found;
-       }
-
-       obj = i915_gem_object_create_internal(pool->engine->i915, size);
-       if (IS_ERR(obj))
-               return obj;
-
-found:
-       ret = i915_gem_object_pin_pages(obj);
-       if (ret)
-               return ERR_PTR(ret);
-
-       list_move_tail(&obj->batch_pool_link, list);
-       return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h 
b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
-       struct intel_engine_cs *engine;
-       struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-                             struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to