For the common path where we want to execute the batch, if we push the no_hw detection down to the execbuf we can eliminate one loop over all the execobjects. For the less common path where we don't want to execute the batch, no_hw was leaving out_fence uninitialised.
To simplify later changes, the execbuf routine was then inlined into its only caller. Cc: Kenneth Graunke <kenn...@whitecape.org> Cc: Matt Turner <matts...@gmail.com> --- src/mesa/drivers/dri/i965/intel_batchbuffer.c | 134 +++++++++++--------------- 1 file changed, 56 insertions(+), 78 deletions(-) diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index 4461a59b80..59d95c4e66 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -383,13 +383,6 @@ static void do_batch_dump(struct brw_context *brw) { } static void brw_new_batch(struct brw_context *brw) { - /* Unreference any BOs held by the previous batch, and reset counts. */ - for (int i = 0; i < brw->batch.exec_count; i++) { - if (brw->batch.exec_bos[i] != brw->batch.bo) { - brw_bo_unreference(brw->batch.exec_bos[i]); - } - brw->batch.exec_bos[i] = NULL; - } brw->batch.reloc_count = 0; brw->batch.exec_count = 0; brw->batch.aperture_space = BATCH_SZ; @@ -559,63 +552,8 @@ add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo) } static int -execbuffer(int fd, - struct intel_batchbuffer *batch, - uint32_t ctx_id, - int used, - int in_fence, - int *out_fence, - int flags) -{ - struct drm_i915_gem_execbuffer2 execbuf = { - .buffers_ptr = (uintptr_t) batch->validation_list, - .buffer_count = batch->exec_count, - .batch_start_offset = 0, - .batch_len = used, - .flags = flags, - .rsvd1 = ctx_id, /* rsvd1 is actually the context ID */ - }; - - unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2; - - if (in_fence != -1) { - execbuf.rsvd2 = in_fence; - execbuf.flags |= I915_EXEC_FENCE_IN; - } - - if (out_fence != NULL) { - cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR; - *out_fence = -1; - execbuf.flags |= I915_EXEC_FENCE_OUT; - } - - int ret = drmIoctl(fd, cmd, &execbuf); - if (ret != 0) - ret = -errno; - - for (int i = 0; i < batch->exec_count; i++) { - struct brw_bo *bo = batch->exec_bos[i]; - - bo->idle = false; - - /* Update brw_bo::offset64 */ - if (batch->validation_list[i].offset != bo->offset64) { - DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n", - bo->gem_handle, bo->offset64, batch->validation_list[i].offset); - bo->offset64 = batch->validation_list[i].offset; - } - } - - if (ret == 0 && out_fence != NULL) - *out_fence = execbuf.rsvd2 >> 32; - - return ret; -} - -static int do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd) { - __DRIscreen *dri_screen = brw->screen->driScrnPriv; struct intel_batchbuffer *batch = &brw->batch; int ret = 0; @@ -624,35 +562,75 @@ do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd) } else { ret = brw_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map); if (ret == 0 && batch->state_batch_offset != batch->bo->size) { - ret = brw_bo_subdata(batch->bo, - batch->state_batch_offset, - batch->bo->size - batch->state_batch_offset, - (char *)batch->map + batch->state_batch_offset); + ret = brw_bo_subdata(batch->bo, + batch->state_batch_offset, + batch->bo->size - batch->state_batch_offset, + (char *)batch->map + batch->state_batch_offset); } } - if (!brw->screen->no_hw) { - int flags; + if (ret == 0) { + /* Add the batch itself to the end of the validation list */ + add_exec_bo(batch, batch->bo); + + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = (uintptr_t) batch->validation_list, + .buffer_count = batch->exec_count, + .batch_len = 4 * USED_BATCH(*batch), + /* rsvd1 is actually the context ID */ + .rsvd1 = batch->ring == RENDER_RING ? brw->hw_ctx : 0, + }; if (brw->gen >= 6 && batch->ring == BLT_RING) { - flags = I915_EXEC_BLT; + execbuf.flags = I915_EXEC_BLT; } else { - flags = I915_EXEC_RENDER; + execbuf.flags = I915_EXEC_RENDER; } if (batch->needs_sol_reset) - flags |= I915_EXEC_GEN7_SOL_RESET; + execbuf.flags |= I915_EXEC_GEN7_SOL_RESET; - if (ret == 0) { - uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0; + unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2; - /* Add the batch itself to the end of the validation list */ - add_exec_bo(batch, batch->bo); + if (in_fence_fd != -1) { + execbuf.rsvd2 = in_fence_fd; + execbuf.flags |= I915_EXEC_FENCE_IN; + } - ret = execbuffer(dri_screen->fd, batch, hw_ctx, - 4 * USED_BATCH(*batch), - in_fence_fd, out_fence_fd, flags); + if (out_fence_fd != NULL) { + cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR; + *out_fence_fd = -1; + execbuf.flags |= I915_EXEC_FENCE_OUT; } + if (likely(!brw->screen->no_hw)) { + __DRIscreen *dri_screen = brw->screen->driScrnPriv; + if (unlikely(drmIoctl(dri_screen->fd, cmd, &execbuf))) + ret = -errno; + } else { + out_fence_fd = NULL; + } + + for (int i = 0; i < batch->exec_count; i++) { + struct brw_bo *bo = batch->exec_bos[i]; + + bo->idle = false; + + /* Update brw_bo::offset64 */ + if (batch->validation_list[i].offset != bo->offset64) { + DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n", + bo->gem_handle, bo->offset64, batch->validation_list[i].offset); + bo->offset64 = batch->validation_list[i].offset; + } + + if (batch->exec_bos[i] != batch->bo) { + brw_bo_unreference(batch->exec_bos[i]); + } + batch->exec_bos[i] = NULL; + } + + if (ret == 0 && out_fence_fd != NULL) + *out_fence_fd = execbuf.rsvd2 >> 32; + throttle(brw); } -- 2.13.3 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev