Re: [Intel-gfx] [PATCH] drm/i915/selftests: Spin on all engines simultaneously

2019-10-31 Thread Vanshidhar Konda

On Thu, Oct 31, 2019 at 09:23:36PM +, Chris Wilson wrote:

Vanshidhar Konda asked for the simplest test "to verify that the kernel
can submit and hardware can execute batch buffers on all the command
streamers in parallel." We have a number of tests in userspace that
submit load to each engine and verify that it is present, but strictly
we have no selftest to prove that the kernel can _simultaneously_
execute on all known engines. (We have tests to demonstrate that we can
submit to HW in parallel, but we don't insist that they execute in
parallel.)

Suggested-by: Vanshidhar Konda 
Signed-off-by: Chris Wilson 
Cc: Vanshidhar Konda 
Cc: Matthew Auld 
---
drivers/gpu/drm/i915/i915_drv.h   |  6 ++
drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++
2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..0c3ab6020bc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,10 @@ struct intel_cdclk_state {
u8 voltage_level;
};

+struct i915_selftest_stash {
+   atomic_t counter;
+};
+
struct drm_i915_private {
struct drm_device drm;

@@ -1286,6 +1290,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;

+   I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 * will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c 
b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..6181b327b4ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
+#include "igt_spinner.h"
#include "lib_sw_fence.h"

#include "mock_drm.h"
@@ -1115,12 +1116,72 @@ static int __live_parallel_engineN(void *arg)
return 0;
}

+static int wait_for_all(struct drm_i915_private *i915)
+{
+   if (atomic_dec_and_test(&i915->selftest.counter)) {
+   wake_up_var(&i915->selftest.counter);
+   return 0;
+   }
+
+   if (wait_var_event_timeout(&i915->selftest.counter,
+  !atomic_read(&i915->selftest.counter),
+  i915_selftest.timeout_jiffies))
+   return 0;
+
+   return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+   struct intel_engine_cs *engine = arg;
+   struct igt_spinner spin;
+   struct i915_request *rq;
+   int err = 0;
+
+   /*
+* Create a spinner running for eternity on each engine. If a second
+* spinner is incorrectly placed on the same engine, it will not be
+* able to start in time.
+*/
+
+   if (igt_spinner_init(&spin, engine->gt))
+   return -ENOMEM;
+
+   rq = igt_spinner_create_request(&spin,
+   engine->kernel_context,
+   MI_NOOP); /* no preemption */
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_spin;
+   }
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+   if (igt_wait_for_spinner(&spin, rq)) {
+   /* Occupy this engine for the whole test */
+   err = wait_for_all(engine->i915);
+   } else {
+   pr_err("Failed to start spinner on %s\n", engine->name);
+   err = -EINVAL;
+   }
+   igt_spinner_end(&spin);
+
+   if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+   err = -EIO;
+   i915_request_put(rq);
+
+out_spin:
+   igt_spinner_fini(&spin);
+   return err;
+}
+
static int live_parallel_engines(void *arg)
{
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+   __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1146,6 +1207,8 @@ static int live_parallel_engines(void *arg)
if (err)
break;

+   atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
--
2.24.0.rc2



Reviewed-by: Vanshidhar Konda 

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH] drm/i915/selftests: Spin on all engines simultaneously

2019-10-31 Thread Chris Wilson
Vanshidhar Konda asked for the simplest test "to verify that the kernel
can submit and hardware can execute batch buffers on all the command
streamers in parallel." We have a number of tests in userspace that
submit load to each engine and verify that it is present, but strictly
we have no selftest to prove that the kernel can _simultaneously_
execute on all known engines. (We have tests to demonstrate that we can
submit to HW in parallel, but we don't insist that they execute in
parallel.)

Suggested-by: Vanshidhar Konda 
Signed-off-by: Chris Wilson 
Cc: Vanshidhar Konda 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h   |  6 ++
 drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++
 2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..0c3ab6020bc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,10 @@ struct intel_cdclk_state {
u8 voltage_level;
 };
 
+struct i915_selftest_stash {
+   atomic_t counter;
+};
+
 struct drm_i915_private {
struct drm_device drm;
 
@@ -1286,6 +1290,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
 
+   I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 * will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c 
b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..6181b327b4ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
+#include "igt_spinner.h"
 #include "lib_sw_fence.h"
 
 #include "mock_drm.h"
@@ -1115,12 +1116,72 @@ static int __live_parallel_engineN(void *arg)
return 0;
 }
 
+static int wait_for_all(struct drm_i915_private *i915)
+{
+   if (atomic_dec_and_test(&i915->selftest.counter)) {
+   wake_up_var(&i915->selftest.counter);
+   return 0;
+   }
+
+   if (wait_var_event_timeout(&i915->selftest.counter,
+  !atomic_read(&i915->selftest.counter),
+  i915_selftest.timeout_jiffies))
+   return 0;
+
+   return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+   struct intel_engine_cs *engine = arg;
+   struct igt_spinner spin;
+   struct i915_request *rq;
+   int err = 0;
+
+   /*
+* Create a spinner running for eternity on each engine. If a second
+* spinner is incorrectly placed on the same engine, it will not be
+* able to start in time.
+*/
+
+   if (igt_spinner_init(&spin, engine->gt))
+   return -ENOMEM;
+
+   rq = igt_spinner_create_request(&spin,
+   engine->kernel_context,
+   MI_NOOP); /* no preemption */
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_spin;
+   }
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+   if (igt_wait_for_spinner(&spin, rq)) {
+   /* Occupy this engine for the whole test */
+   err = wait_for_all(engine->i915);
+   } else {
+   pr_err("Failed to start spinner on %s\n", engine->name);
+   err = -EINVAL;
+   }
+   igt_spinner_end(&spin);
+
+   if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+   err = -EIO;
+   i915_request_put(rq);
+
+out_spin:
+   igt_spinner_fini(&spin);
+   return err;
+}
+
 static int live_parallel_engines(void *arg)
 {
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+   __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1146,6 +1207,8 @@ static int live_parallel_engines(void *arg)
if (err)
break;
 
+   atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
-- 
2.24.0.rc2

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

[Intel-gfx] [PATCH] drm/i915/selftests: Spin on all engines simultaneously

2019-10-31 Thread Chris Wilson
Vanshidhar Konda asked for the simplest test "to verify that the kernel
can submit and hardware can execute batch buffers on all the command
streamers in parallel." We have a number of tests in userspace that
submit load to each engine and verify that it is present, but strictly
we have no selftest to prove that the kernel can _simultaneously_
execute on all known engines. (We have tests to demonstrate that we can
submit to HW in parallel, but we don't insist that they execute in
parallel.)

Suggested-by: Vanshidhar Konda 
Signed-off-by: Chris Wilson 
Cc: Vanshidhar Konda 
Cc: Matthew Auld 
---
 drivers/gpu/drm/i915/i915_drv.h   |  6 ++
 drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++
 2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..0c3ab6020bc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,10 @@ struct intel_cdclk_state {
u8 voltage_level;
 };
 
+struct i915_selftest_stash {
+   atomic_t counter;
+};
+
 struct drm_i915_private {
struct drm_device drm;
 
@@ -1286,6 +1290,8 @@ struct drm_i915_private {
/* Mutex to protect the above hdcp component related values. */
struct mutex hdcp_comp_mutex;
 
+   I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
/*
 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 * will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c 
b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..191c4f8c35c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
+#include "igt_spinner.h"
 #include "lib_sw_fence.h"
 
 #include "mock_drm.h"
@@ -1115,12 +1116,72 @@ static int __live_parallel_engineN(void *arg)
return 0;
 }
 
+static int wait_for_all(struct drm_i915_private *i915)
+{
+   if (atomic_dec_and_test(&i915->selftest.counter)) {
+   wake_up_var(&i915->selftest.counter);
+   return 0;
+   }
+
+   if (!wait_var_event_timeout(&i915->selftest.counter,
+  !atomic_read(&i915->selftest.counter),
+  i915_selftest.timeout_jiffies))
+   return 0;
+
+   return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+   struct intel_engine_cs *engine = arg;
+   struct igt_spinner spin;
+   struct i915_request *rq;
+   int err = 0;
+
+   /*
+* Create a spinner running for eternity on each engine. If a second
+* spinner is incorrectly placed on the same engine, it will not be
+* able to start in time.
+*/
+
+   if (igt_spinner_init(&spin, engine->gt))
+   return -ENOMEM;
+
+   rq = igt_spinner_create_request(&spin,
+   engine->kernel_context,
+   MI_NOOP); /* no preemption */
+   if (IS_ERR(rq)) {
+   err = PTR_ERR(rq);
+   goto out_spin;
+   }
+
+   i915_request_get(rq);
+   i915_request_add(rq);
+   if (igt_wait_for_spinner(&spin, rq)) {
+   /* Occupy this engine for the whole test */
+   err = wait_for_all(engine->i915);
+   } else {
+   pr_err("Failed to start spinner on %s\n", engine->name);
+   err = -EINVAL;
+   }
+   igt_spinner_end(&spin);
+
+   if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+   err = -EIO;
+   i915_request_put(rq);
+
+out_spin:
+   igt_spinner_fini(&spin);
+   return err;
+}
+
 static int live_parallel_engines(void *arg)
 {
struct drm_i915_private *i915 = arg;
static int (* const func[])(void *arg) = {
__live_parallel_engine1,
__live_parallel_engineN,
+   __live_parallel_spin,
NULL,
};
const unsigned int nengines = num_uabi_engines(i915);
@@ -1146,6 +1207,8 @@ static int live_parallel_engines(void *arg)
if (err)
break;
 
+   atomic_set(&i915->selftest.counter, nengines);
+
idx = 0;
for_each_uabi_engine(engine, i915) {
tsk[idx] = kthread_run(*fn, engine,
-- 
2.24.0.rc2

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx