This patch adds a parameter that allows to make the spinning batch
pre-emptible by adding an arbitration point to the spinning loop.

Cc: Chris Wilson <ch...@chris-wilson.co.uk>
Signed-off-by: Antonio Argenziano <antonio.argenzi...@intel.com>
---
 lib/igt_dummyload.c       |  4 ++--
 lib/igt_dummyload.h       |  1 +
 lib/igt_gt.c              |  3 ++-
 tests/drv_missed_irq.c    |  2 +-
 tests/gem_busy.c          | 10 +++++-----
 tests/gem_exec_fence.c    | 14 +++++++-------
 tests/gem_exec_latency.c  |  2 +-
 tests/gem_exec_nop.c      |  2 +-
 tests/gem_exec_reloc.c    |  6 +++---
 tests/gem_exec_schedule.c |  8 ++++----
 tests/gem_exec_suspend.c  |  2 +-
 tests/gem_shrink.c        |  4 ++--
 tests/gem_spin_batch.c    |  4 ++--
 tests/gem_wait.c          |  2 +-
 tests/kms_busy.c          |  6 +++---
 tests/kms_cursor_legacy.c |  2 +-
 tests/perf_pmu.c          | 18 +++++++++---------
 tests/pm_rps.c            |  2 +-
 18 files changed, 47 insertions(+), 45 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 0bb02e5b..cf5b29d5 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -121,8 +121,8 @@ void emit_recursive_batch(igt_spin_t *spin,
        spin->batch = batch;
        spin->handle = obj[BATCH].handle;
 
-       /* Allow ourselves to be preempted */
-       *batch++ = MI_ARB_CHK;
+       if (opts.preemptible)
+               *batch++ = MI_ARB_CHK; /* Allow ourselves to be preempted */
 
        /* Pad with a few nops so that we do not completely hog the system.
         *
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 2f3f2ebf..c285ece8 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -42,6 +42,7 @@ typedef struct igt_spin_opt {
        uint32_t ctx;
        unsigned engine;
        uint32_t dep;
+       bool preemptible;
 } igt_spin_opt_t;
 
 void emit_recursive_batch(igt_spin_t *spin, int fd, igt_spin_opt_t opts);
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index 9ec3b0f6..48d40e61 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -293,7 +293,8 @@ igt_hang_t igt_hang_ctx(int fd, igt_hang_opt_t opts)
        if ((opts.flags & HANG_ALLOW_BAN) == 0)
                context_set_ban(fd, opts.ctx, 0);
 
-       emit_recursive_batch(&spin, fd, (igt_spin_opt_t){opts.ctx, opts.ring, 
0});
+       emit_recursive_batch(&spin, fd,
+                       (igt_spin_opt_t){opts.ctx, opts.ring, 0, false});
 
        if (opts.offset)
                *opts.offset = spin.spinning_offset;
diff --git a/tests/drv_missed_irq.c b/tests/drv_missed_irq.c
index db67367b..308b9b60 100644
--- a/tests/drv_missed_irq.c
+++ b/tests/drv_missed_irq.c
@@ -33,7 +33,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure 
they are caught");
 
 static void trigger_missed_interrupt(int fd, unsigned ring)
 {
-       igt_spin_t *spin = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 
0});
+       igt_spin_t *spin = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 
0, true});
 
        igt_fork(child, 1) {
                /* We are now a low priority child on the *same* CPU as the
diff --git a/tests/gem_busy.c b/tests/gem_busy.c
index 119db4e2..b581a490 100644
--- a/tests/gem_busy.c
+++ b/tests/gem_busy.c
@@ -113,7 +113,7 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
 
        /* Create a long running batch which we can use to hog the GPU */
        handle[BUSY] = gem_create(fd, 4096);
-       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, handle[BUSY]});
+       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, handle[BUSY], 
true});
 
        /* Queue a batch after the busy, it should block and remain "busy" */
        igt_assert(exec_noop(fd, handle, ring | flags, false));
@@ -460,7 +460,7 @@ static void close_race(int fd)
 
                for (i = 0; i < nhandles; i++) {
                        spin[i] = igt_spin_batch_new(fd, (igt_spin_opt_t){0,
-                                                    engines[rand() % nengine], 
0});
+                                                    engines[rand() % nengine], 
0, true});
                        handles[i] = spin[i]->handle;
                }
 
@@ -469,7 +469,7 @@ static void close_race(int fd)
                                igt_spin_batch_free(fd, spin[i]);
                                spin[i] = igt_spin_batch_new(fd, 
(igt_spin_opt_t){0,
                                                             engines[rand() % 
nengine],
-                                                            0});
+                                                            0, true});
                                handles[i] = spin[i]->handle;
                                __sync_synchronize();
                        }
@@ -511,7 +511,7 @@ static bool has_semaphores(int fd)
 
 static bool has_extended_busy_ioctl(int fd)
 {
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_RENDER, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_RENDER, 0, true});
        uint32_t read, write;
 
        __gem_busy(fd, spin->handle, &read, &write);
@@ -522,7 +522,7 @@ static bool has_extended_busy_ioctl(int fd)
 
 static void basic(int fd, unsigned ring, unsigned flags)
 {
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, ring, 0, 
true});
        struct timespec tv;
        int timeout;
        bool busy;
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
index ddf61722..01ef6cb5 100644
--- a/tests/gem_exec_fence.c
+++ b/tests/gem_exec_fence.c
@@ -438,7 +438,7 @@ static void test_parallel(int fd, unsigned int master)
        /* Fill the queue with many requests so that the next one has to
         * wait before it can be executed by the hardware.
         */
-       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, master, c.handle});
+       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, master, c.handle, 
true});
        resubmit(fd, spin->handle, master, 16);
 
        /* Now queue the master request and its secondaries */
@@ -961,7 +961,7 @@ static void test_syncobj_unused_fence(int fd)
        struct local_gem_exec_fence fence = {
                .handle = syncobj_create(fd),
        };
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, 
true});
 
        /* sanity check our syncobj_to_sync_file interface */
        igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1053,7 +1053,7 @@ static void test_syncobj_signal(int fd)
        struct local_gem_exec_fence fence = {
                .handle = syncobj_create(fd),
        };
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, 
true});
 
        /* Check that the syncobj is signaled only when our request/fence is */
 
@@ -1103,7 +1103,7 @@ static void test_syncobj_wait(int fd)
 
        gem_quiescent_gpu(fd);
 
-       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, true});
 
        memset(&execbuf, 0, sizeof(execbuf));
        execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1173,7 +1173,7 @@ static void test_syncobj_export(int fd)
                .handle = syncobj_create(fd),
        };
        int export[2];
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, 
true});
 
        /* Check that if we export the syncobj prior to use it picks up
         * the later fence. This allows a syncobj to establish a channel
@@ -1231,7 +1231,7 @@ static void test_syncobj_repeat(int fd)
        struct drm_i915_gem_execbuffer2 execbuf;
        struct local_gem_exec_fence *fence;
        int export;
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, 
true});
 
        /* Check that we can wait on the same fence multiple times */
        fence = calloc(nfences, sizeof(*fence));
@@ -1286,7 +1286,7 @@ static void test_syncobj_import(int fd)
        const uint32_t bbe = MI_BATCH_BUFFER_END;
        struct drm_i915_gem_exec_object2 obj;
        struct drm_i915_gem_execbuffer2 execbuf;
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 0, 
true});
        uint32_t sync = syncobj_create(fd);
        int fence;
 
diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c
index 32c1295d..c7248ae9 100644
--- a/tests/gem_exec_latency.c
+++ b/tests/gem_exec_latency.c
@@ -344,7 +344,7 @@ static void latency_from_ring(int fd,
                               I915_GEM_DOMAIN_GTT);
 
                if (flags & PREEMPT)
-                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], 
ring, 0});
+                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], 
ring, 0, true});
 
                if (flags & CORK) {
                        plug(fd, &c);
diff --git a/tests/gem_exec_nop.c b/tests/gem_exec_nop.c
index 57f4b551..6b119027 100644
--- a/tests/gem_exec_nop.c
+++ b/tests/gem_exec_nop.c
@@ -620,7 +620,7 @@ static void preempt(int fd, uint32_t handle,
        clock_gettime(CLOCK_MONOTONIC, &start);
        do {
                igt_spin_t *spin =
-                       __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], 
ring_id, 0});
+                       __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[0], 
ring_id, 0, true});
 
                for (int loop = 0; loop < 1024; loop++)
                        gem_execbuf(fd, &execbuf);
diff --git a/tests/gem_exec_reloc.c b/tests/gem_exec_reloc.c
index cad9cc1f..18f051a2 100644
--- a/tests/gem_exec_reloc.c
+++ b/tests/gem_exec_reloc.c
@@ -388,7 +388,7 @@ static void basic_reloc(int fd, unsigned before, unsigned 
after, unsigned flags)
                }
 
                if (flags & ACTIVE) {
-                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_DEFAULT, obj.handle});
+                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_DEFAULT, obj.handle, true});
                        if (!(flags & HANG))
                                igt_spin_batch_set_timeout(spin, 
NSEC_PER_SEC/100);
                        igt_assert(gem_bo_busy(fd, obj.handle));
@@ -454,7 +454,7 @@ static void basic_reloc(int fd, unsigned before, unsigned 
after, unsigned flags)
                }
 
                if (flags & ACTIVE) {
-                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_DEFAULT, obj.handle});
+                       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
I915_EXEC_DEFAULT, obj.handle, true});
                        if (!(flags & HANG))
                                igt_spin_batch_set_timeout(spin, 
NSEC_PER_SEC/100);
                        igt_assert(gem_bo_busy(fd, obj.handle));
@@ -581,7 +581,7 @@ static void basic_range(int fd, unsigned flags)
        execbuf.buffer_count = n + 1;
 
        if (flags & ACTIVE) {
-               spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 
obj[n].handle});
+               spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, 0, 
obj[n].handle, true});
                if (!(flags & HANG))
                        igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
                igt_assert(gem_bo_busy(fd, obj[n].handle));
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 51f494a7..0b88acd1 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -147,7 +147,7 @@ static void unplug_show_queue(int fd, struct cork *c, 
unsigned int engine)
 
        for (int n = 0; n < ARRAY_SIZE(spin); n++) {
                uint32_t ctx = create_highest_priority(fd);
-               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx, 
engine, 0});
+               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx, 
engine, 0, true});
                gem_context_destroy(fd, ctx);
        }
 
@@ -376,7 +376,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
                        ctx[LO] = gem_context_create(fd);
                        gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
                }
-               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[LO], 
ring, 0});
+               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[LO], 
ring, 0, true});
                igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
 
                store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, 
I915_GEM_DOMAIN_RENDER);
@@ -425,7 +425,7 @@ static void preempt_other(int fd, unsigned ring)
 
        n = 0;
        for_each_engine(fd, other) {
-               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], 
other, 0});
+               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], 
other, 0, true});
                store_dword(fd, ctx[LO], other,
                            result, (n + 1)*sizeof(uint32_t), n + 1,
                            0, I915_GEM_DOMAIN_RENDER);
@@ -478,7 +478,7 @@ static void preempt_self(int fd, unsigned ring)
        n = 0;
        gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
        for_each_engine(fd, other) {
-               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], 
other, 0});
+               spin[n] = __igt_spin_batch_new(fd, (igt_spin_opt_t){ctx[NOISE], 
other, 0, true});
                store_dword(fd, ctx[HI], other,
                            result, (n + 1)*sizeof(uint32_t), n + 1,
                            0, I915_GEM_DOMAIN_RENDER);
diff --git a/tests/gem_exec_suspend.c b/tests/gem_exec_suspend.c
index 8a83b11e..1751ae6f 100644
--- a/tests/gem_exec_suspend.c
+++ b/tests/gem_exec_suspend.c
@@ -201,7 +201,7 @@ static void run_test(int fd, unsigned engine, unsigned 
flags)
        }
 
        if (flags & HANG)
-               spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
+               spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0, 
true});
 
        switch (mode(flags)) {
        case NOSLEEP:
diff --git a/tests/gem_shrink.c b/tests/gem_shrink.c
index 13110d06..dbc3b047 100644
--- a/tests/gem_shrink.c
+++ b/tests/gem_shrink.c
@@ -311,9 +311,9 @@ static void reclaim(unsigned engine, int timeout)
                } while (!*shared);
        }
 
-       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
+       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0, true});
        igt_until_timeout(timeout) {
-               igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
engine, 0});
+               igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
engine, 0, true});
 
                igt_spin_batch_set_timeout(spin, timeout_100ms);
                gem_sync(fd, spin->handle);
diff --git a/tests/gem_spin_batch.c b/tests/gem_spin_batch.c
index 37d1d6e9..785b6cf4 100644
--- a/tests/gem_spin_batch.c
+++ b/tests/gem_spin_batch.c
@@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int 
timeout_sec)
        struct timespec itv = { };
        uint64_t elapsed;
 
-       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
+       spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0, true});
        while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
-               igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
engine, 0});
+               igt_spin_t *next = __igt_spin_batch_new(fd, (igt_spin_opt_t){0, 
engine, 0, true});
 
                igt_spin_batch_set_timeout(spin,
                                           timeout_100ms - 
igt_nsec_elapsed(&itv));
diff --git a/tests/gem_wait.c b/tests/gem_wait.c
index 845d8b21..cae3587b 100644
--- a/tests/gem_wait.c
+++ b/tests/gem_wait.c
@@ -110,7 +110,7 @@ static void unplug(struct cork *c)
 static void basic(int fd, unsigned engine, unsigned flags)
 {
        struct cork cork = plug(fd, flags);
-       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 
cork.handle});
+       igt_spin_t *spin = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 
cork.handle, true});
        struct drm_i915_gem_wait wait = {
                flags & WRITE ? cork.handle : spin->handle
                };
diff --git a/tests/kms_busy.c b/tests/kms_busy.c
index df949b97..9aa53e7c 100644
--- a/tests/kms_busy.c
+++ b/tests/kms_busy.c
@@ -92,7 +92,7 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
        struct drm_event_vblank ev;
 
        igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-                                          (igt_spin_opt_t){0, ring, 
fb->gem_handle});
+                                          (igt_spin_opt_t){0, ring, 
fb->gem_handle, true});
 
        if (modeset) {
                /*
@@ -208,7 +208,7 @@ static void test_atomic_commit_hang(igt_display_t *dpy, 
igt_plane_t *primary,
                                    struct igt_fb *busy_fb, unsigned ring)
 {
        igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-                                          (igt_spin_opt_t){0, ring, 
busy_fb->gem_handle});
+                                          (igt_spin_opt_t){0, ring, 
busy_fb->gem_handle, true});
        struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
        unsigned flags = 0;
        struct drm_event_vblank ev;
@@ -295,7 +295,7 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
 
        igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : 
COMMIT_LEGACY);
 
-       t = igt_spin_batch_new(dpy->drm_fd, (igt_spin_opt_t){0, ring, 
fb.gem_handle});
+       t = igt_spin_batch_new(dpy->drm_fd, (igt_spin_opt_t){0, ring, 
fb.gem_handle, true});
 
        do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, 
fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
 
diff --git a/tests/kms_cursor_legacy.c b/tests/kms_cursor_legacy.c
index 21ada773..29ec00db 100644
--- a/tests/kms_cursor_legacy.c
+++ b/tests/kms_cursor_legacy.c
@@ -532,7 +532,7 @@ static void basic_flip_cursor(igt_display_t *display,
 
                spin = NULL;
                if (flags & BASIC_BUSY)
-                       spin = igt_spin_batch_new(display->drm_fd, 
(igt_spin_opt_t){0, 0, fb_info.gem_handle});
+                       spin = igt_spin_batch_new(display->drm_fd, 
(igt_spin_opt_t){0, 0, fb_info.gem_handle, true});
 
                /* Start with a synchronous query to align with the vblank */
                vblank_start = get_vblank(display->drm_fd, pipe, 
DRM_VBLANK_NEXTONMISS);
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index e5aa563b..d06b67a8 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -141,7 +141,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, 
bool busy)
        fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
        if (busy) {
-               spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0});
+               spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0, true});
                igt_spin_batch_set_timeout(spin, batch_duration_ns);
        } else {
                usleep(batch_duration_ns / 1000);
@@ -203,7 +203,7 @@ busy_check_all(int gem_fd, const struct 
intel_execution_engine2 *e,
 
        igt_assert_eq(i, num_engines);
 
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, 
e), 0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, 
e), 0, true});
        igt_spin_batch_set_timeout(spin, batch_duration_ns);
 
        gem_sync(gem_fd, spin->handle);
@@ -297,7 +297,7 @@ all_busy_check_all(int gem_fd, const unsigned int 
num_engines)
                fd[i] = open_group(I915_PMU_ENGINE_BUSY(e->class, e->instance),
                                   fd[0]);
 
-               spin[i] = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0});
+               spin[i] = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0, true});
                igt_spin_batch_set_timeout(spin[i], batch_duration_ns);
 
                i++;
@@ -328,7 +328,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 
*e, bool busy)
        open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
 
        if (busy) {
-               spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0});
+               spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 
e2ring(gem_fd, e), 0, true});
                igt_spin_batch_set_timeout(spin, batch_duration_ns);
        } else {
                usleep(batch_duration_ns / 1000);
@@ -647,7 +647,7 @@ multi_client(int gem_fd, const struct 
intel_execution_engine2 *e)
         */
        fd[1] = open_pmu(config);
 
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, 
e), 0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, e2ring(gem_fd, 
e), 0, true});
        igt_spin_batch_set_timeout(spin, 2 * batch_duration_ns);
 
        slept = measured_usleep(batch_duration_ns / 1000);
@@ -752,7 +752,7 @@ static void cpu_hotplug(int gem_fd)
        fd = perf_i915_open(I915_PMU_ENGINE_BUSY(I915_ENGINE_CLASS_RENDER, 0));
        igt_assert(fd >= 0);
 
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0, true});
 
        igt_nsec_elapsed(&start);
 
@@ -865,7 +865,7 @@ test_interrupts(int gem_fd)
        gem_quiescent_gpu(gem_fd);
 
        fd = open_pmu(I915_PMU_INTERRUPTS);
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 0, 0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, 0, 0, true});
 
        obj.handle = gem_create(gem_fd, sz);
        gem_write(gem_fd, obj.handle, sz - sizeof(bbe), &bbe, sizeof(bbe));
@@ -947,7 +947,7 @@ test_frequency(int gem_fd)
 
        pmu_read_multi(fd, 2, start);
 
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0, true});
        igt_spin_batch_set_timeout(spin, duration_ns);
        gem_sync(gem_fd, spin->handle);
 
@@ -972,7 +972,7 @@ test_frequency(int gem_fd)
 
        pmu_read_multi(fd, 2, start);
 
-       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0});
+       spin = igt_spin_batch_new(gem_fd, (igt_spin_opt_t){0, I915_EXEC_RENDER, 
0, true});
        igt_spin_batch_set_timeout(spin, duration_ns);
        gem_sync(gem_fd, spin->handle);
 
diff --git a/tests/pm_rps.c b/tests/pm_rps.c
index 234348da..4e01a1e0 100644
--- a/tests/pm_rps.c
+++ b/tests/pm_rps.c
@@ -588,7 +588,7 @@ static void boost_freq(int fd, int *boost_freqs)
        engine = I915_EXEC_RENDER;
        if (intel_gen(lh.devid) >= 6)
                engine = I915_EXEC_BLT;
-       load = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0});
+       load = igt_spin_batch_new(fd, (igt_spin_opt_t){0, engine, 0, true});
        /* Waiting will grant us a boost to maximum */
        gem_wait(fd, load->handle, &timeout);
 
-- 
2.14.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to