Re: [Mesa-dev] [PATCH v3 00/17] panfrost: Support batch pipelining

2019-09-20 Thread Alyssa Rosenzweig
Series looks quite good, overall. Just a few minor issues, but probably
not even enough to justify a respin. Congratulations! :p

On Wed, Sep 18, 2019 at 03:24:22PM +0200, Boris Brezillon wrote:
> Hello,
> 
> This is the third attempt at supporting batch pipelining. This time I
> implemented it using a dependency graph (as suggested by Alyssa and
> Steven) so that batch submission can be delayed even more: the only
> time we flush batches now is when we have an explicit flush or when
> the CPU needs to access a BO (we might want to tweak that a bit to
> avoid the extra latency incurred by this solution). With that in place
> we hope to increase GPU utilization.
> 
> Patches 15 and 16 are optional, but I remember reading (I think it was
> Steven who mentioned that) that draw order matters when queueing render
> operations for different frames (frame N should ideally be ready before
> frame N+1). Not sure if enforcing draw call order is enough to guarantee
> that rendering of frame N always finishes before frame N+1 though.
> If that's something you don't want to merge, I can drop it.
> 
> Regards,
> 
> Boris
> 
> Boris Brezillon (17):
>   panfrost: Extend the panfrost_batch_add_bo() API to pass access flags
>   panfrost: Make panfrost_batch->bos a hash table
>   panfrost: Add a batch fence
>   panfrost: Use the per-batch fences to wait on the last submitted batch
>   panfrost: Add a panfrost_freeze_batch() helper
>   panfrost: Start tracking inter-batch dependencies
>   panfrost: Prepare panfrost_fence for batch pipelining
>   panfrost: Add a panfrost_flush_all_batches() helper
>   panfrost: Add a panfrost_flush_batches_accessing_bo() helper
>   panfrost: Kill the explicit serialization in panfrost_batch_submit()
>   panfrost: Get rid of the flush in panfrost_set_framebuffer_state()
>   panfrost: Add flags to reflect the BO imported/exported state
>   panfrost: Make sure the BO is 'ready' when picked from the cache
>   panfrost: Do fine-grained flushing when preparing BO for CPU accesses
>   panfrost: Rename ctx->batches into ctx->fbo_to_batch
>   panfrost: Take draw call order into account
>   panfrost/ci: New tests are passing
> 
>  .../drivers/panfrost/ci/expected-failures.txt |   4 -
>  src/gallium/drivers/panfrost/pan_allocate.c   |  14 +-
>  src/gallium/drivers/panfrost/pan_blend_cso.c  |   6 +-
>  src/gallium/drivers/panfrost/pan_bo.c | 116 ++-
>  src/gallium/drivers/panfrost/pan_bo.h |  44 ++
>  src/gallium/drivers/panfrost/pan_compute.c|   2 +-
>  src/gallium/drivers/panfrost/pan_context.c| 121 ++--
>  src/gallium/drivers/panfrost/pan_context.h|  15 +-
>  src/gallium/drivers/panfrost/pan_instancing.c |   5 +-
>  src/gallium/drivers/panfrost/pan_job.c| 668 --
>  src/gallium/drivers/panfrost/pan_job.h|  58 +-
>  src/gallium/drivers/panfrost/pan_resource.c   |  27 +-
>  src/gallium/drivers/panfrost/pan_screen.c |  65 +-
>  src/gallium/drivers/panfrost/pan_screen.h |   3 +-
>  src/gallium/drivers/panfrost/pan_varyings.c   |  10 +-
>  15 files changed, 956 insertions(+), 202 deletions(-)
> 
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH v3 15/17] panfrost: Rename ctx->batches into ctx->fbo_to_batch

2019-09-20 Thread Alyssa Rosenzweig
Erm, un r-b, sorry, didn't realize this was the optional. Let's hold off
on this patch and the succeeding one for now.

On Wed, Sep 18, 2019 at 03:24:37PM +0200, Boris Brezillon wrote:
> We are about to add a batch queue to keep track of submission order.
> Let's rename the existing batches hash table (which is used to get the
> batch attached to an FBO) into fbo_to_batch to avoid confusion.
> 
> Signed-off-by: Boris Brezillon 
> ---
>  src/gallium/drivers/panfrost/pan_context.c  |  2 +-
>  src/gallium/drivers/panfrost/pan_context.h  |  2 +-
>  src/gallium/drivers/panfrost/pan_job.c  | 21 +++--
>  src/gallium/drivers/panfrost/pan_resource.c | 16 
>  4 files changed, 21 insertions(+), 20 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index 07bafad58a00..0330b5852676 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -1355,7 +1355,7 @@ panfrost_flush(
>   */
>  if (fence) {
>  util_dynarray_init(, NULL);
> -hash_table_foreach(ctx->batches, hentry) {
> +hash_table_foreach(ctx->fbo_to_batch, hentry) {
>  struct panfrost_batch *batch = hentry->data;
>  
>  panfrost_batch_fence_reference(batch->out_sync);
> diff --git a/src/gallium/drivers/panfrost/pan_context.h 
> b/src/gallium/drivers/panfrost/pan_context.h
> index d50ed57d5d8a..f13967f51b46 100644
> --- a/src/gallium/drivers/panfrost/pan_context.h
> +++ b/src/gallium/drivers/panfrost/pan_context.h
> @@ -112,7 +112,7 @@ struct panfrost_context {
>  
>  /* Bound job batch and map of panfrost_batch_key to job batches */
>  struct panfrost_batch *batch;
> -struct hash_table *batches;
> +struct hash_table *fbo_to_batch;
>  
>  /* panfrost_bo -> panfrost_bo_access */
>  struct hash_table *accessed_bos;
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index a56f4044fda0..45f9d9d24b41 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -132,9 +132,9 @@ panfrost_freeze_batch(struct panfrost_batch *batch)
>   * matches. This way, next draws/clears targeting this FBO will 
> trigger
>   * the creation of a new batch.
>   */
> -entry = _mesa_hash_table_search(ctx->batches, >key);
> +entry = _mesa_hash_table_search(ctx->fbo_to_batch, >key);
>  if (entry && entry->data == batch)
> -_mesa_hash_table_remove(ctx->batches, entry);
> +_mesa_hash_table_remove(ctx->fbo_to_batch, entry);
>  
>  /* If this is the bound batch, the panfrost_context parameters are
>   * relevant so submitting it invalidates those parameters, but if 
> it's
> @@ -153,7 +153,7 @@ static bool panfrost_batch_is_frozen(struct 
> panfrost_batch *batch)
>  struct panfrost_context *ctx = batch->ctx;
>  struct hash_entry *entry;
>  
> -entry = _mesa_hash_table_search(ctx->batches, >key);
> +entry = _mesa_hash_table_search(ctx->fbo_to_batch, >key);
>  if (entry && entry->data == batch)
>  return false;
>  
> @@ -248,7 +248,8 @@ panfrost_get_batch(struct panfrost_context *ctx,
> const struct pipe_framebuffer_state *key)
>  {
>  /* Lookup the job first */
> -struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, 
> key);
> +struct hash_entry *entry = _mesa_hash_table_search(ctx->fbo_to_batch,
> +   key);
>  
>  if (entry)
>  return entry->data;
> @@ -258,7 +259,7 @@ panfrost_get_batch(struct panfrost_context *ctx,
>  struct panfrost_batch *batch = panfrost_create_batch(ctx, key);
>  
>  /* Save the created job */
> -_mesa_hash_table_insert(ctx->batches, >key, batch);
> +_mesa_hash_table_insert(ctx->fbo_to_batch, >key, batch);
>  
>  return batch;
>  }
> @@ -915,7 +916,7 @@ panfrost_flush_all_batches(struct panfrost_context *ctx, 
> bool wait)
>  util_dynarray_init(, NULL);
>  }
>  
> -hash_table_foreach(ctx->batches, hentry) {
> +hash_table_foreach(ctx->fbo_to_batch, hentry) {
>  struct panfrost_batch *batch = hentry->data;
>  
>  assert(batch);
> @@ -931,7 +932,7 @@ panfrost_flush_all_batches(struct panfrost_context *ctx, 
> bool wait)
>  panfrost_batch_submit(batch);
>  }
>  
> -assert(!ctx->batches->entries);
> +assert(!ctx->fbo_to_batch->entries);
>  
>  /* Collect batch fences before returning */
>  panfrost_gc_fences(ctx);
> @@ -1183,9 +1184,9 @@ panfrost_batch_is_scanout(struct panfrost_batch *batch)
>  void
>  

Re: [Mesa-dev] [PATCH v3 15/17] panfrost: Rename ctx->batches into ctx->fbo_to_batch

2019-09-20 Thread Alyssa Rosenzweig
R-b
On Wed, Sep 18, 2019 at 03:24:37PM +0200, Boris Brezillon wrote:
> We are about to add a batch queue to keep track of submission order.
> Let's rename the existing batches hash table (which is used to get the
> batch attached to an FBO) into fbo_to_batch to avoid confusion.
> 
> Signed-off-by: Boris Brezillon 
> ---
>  src/gallium/drivers/panfrost/pan_context.c  |  2 +-
>  src/gallium/drivers/panfrost/pan_context.h  |  2 +-
>  src/gallium/drivers/panfrost/pan_job.c  | 21 +++--
>  src/gallium/drivers/panfrost/pan_resource.c | 16 
>  4 files changed, 21 insertions(+), 20 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index 07bafad58a00..0330b5852676 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -1355,7 +1355,7 @@ panfrost_flush(
>   */
>  if (fence) {
>  util_dynarray_init(, NULL);
> -hash_table_foreach(ctx->batches, hentry) {
> +hash_table_foreach(ctx->fbo_to_batch, hentry) {
>  struct panfrost_batch *batch = hentry->data;
>  
>  panfrost_batch_fence_reference(batch->out_sync);
> diff --git a/src/gallium/drivers/panfrost/pan_context.h 
> b/src/gallium/drivers/panfrost/pan_context.h
> index d50ed57d5d8a..f13967f51b46 100644
> --- a/src/gallium/drivers/panfrost/pan_context.h
> +++ b/src/gallium/drivers/panfrost/pan_context.h
> @@ -112,7 +112,7 @@ struct panfrost_context {
>  
>  /* Bound job batch and map of panfrost_batch_key to job batches */
>  struct panfrost_batch *batch;
> -struct hash_table *batches;
> +struct hash_table *fbo_to_batch;
>  
>  /* panfrost_bo -> panfrost_bo_access */
>  struct hash_table *accessed_bos;
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index a56f4044fda0..45f9d9d24b41 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -132,9 +132,9 @@ panfrost_freeze_batch(struct panfrost_batch *batch)
>   * matches. This way, next draws/clears targeting this FBO will 
> trigger
>   * the creation of a new batch.
>   */
> -entry = _mesa_hash_table_search(ctx->batches, >key);
> +entry = _mesa_hash_table_search(ctx->fbo_to_batch, >key);
>  if (entry && entry->data == batch)
> -_mesa_hash_table_remove(ctx->batches, entry);
> +_mesa_hash_table_remove(ctx->fbo_to_batch, entry);
>  
>  /* If this is the bound batch, the panfrost_context parameters are
>   * relevant so submitting it invalidates those parameters, but if 
> it's
> @@ -153,7 +153,7 @@ static bool panfrost_batch_is_frozen(struct 
> panfrost_batch *batch)
>  struct panfrost_context *ctx = batch->ctx;
>  struct hash_entry *entry;
>  
> -entry = _mesa_hash_table_search(ctx->batches, >key);
> +entry = _mesa_hash_table_search(ctx->fbo_to_batch, >key);
>  if (entry && entry->data == batch)
>  return false;
>  
> @@ -248,7 +248,8 @@ panfrost_get_batch(struct panfrost_context *ctx,
> const struct pipe_framebuffer_state *key)
>  {
>  /* Lookup the job first */
> -struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, 
> key);
> +struct hash_entry *entry = _mesa_hash_table_search(ctx->fbo_to_batch,
> +   key);
>  
>  if (entry)
>  return entry->data;
> @@ -258,7 +259,7 @@ panfrost_get_batch(struct panfrost_context *ctx,
>  struct panfrost_batch *batch = panfrost_create_batch(ctx, key);
>  
>  /* Save the created job */
> -_mesa_hash_table_insert(ctx->batches, >key, batch);
> +_mesa_hash_table_insert(ctx->fbo_to_batch, >key, batch);
>  
>  return batch;
>  }
> @@ -915,7 +916,7 @@ panfrost_flush_all_batches(struct panfrost_context *ctx, 
> bool wait)
>  util_dynarray_init(, NULL);
>  }
>  
> -hash_table_foreach(ctx->batches, hentry) {
> +hash_table_foreach(ctx->fbo_to_batch, hentry) {
>  struct panfrost_batch *batch = hentry->data;
>  
>  assert(batch);
> @@ -931,7 +932,7 @@ panfrost_flush_all_batches(struct panfrost_context *ctx, 
> bool wait)
>  panfrost_batch_submit(batch);
>  }
>  
> -assert(!ctx->batches->entries);
> +assert(!ctx->fbo_to_batch->entries);
>  
>  /* Collect batch fences before returning */
>  panfrost_gc_fences(ctx);
> @@ -1183,9 +1184,9 @@ panfrost_batch_is_scanout(struct panfrost_batch *batch)
>  void
>  panfrost_batch_init(struct panfrost_context *ctx)
>  {
> -ctx->batches = _mesa_hash_table_create(ctx,
> -

Re: [Mesa-dev] [PATCH v3 14/17] panfrost: Do fine-grained flushing when preparing BO for CPU accesses

2019-09-20 Thread Alyssa Rosenzweig
Looks good, still r-b. But while we're here, let's get this right:

> @@ -578,10 +578,8 @@ panfrost_transfer_map(struct pipe_context *pctx,
>  is_bound |= fb->cbufs[c]->texture == resource;
>  }
>  
> -if (is_bound && (usage & PIPE_TRANSFER_READ)) {
> -assert(level == 0);
> -panfrost_flush_all_batches(ctx, true);
> -}
> +if (is_bound && (usage & PIPE_TRANSFER_READ))
> + assert(level == 0);

Everything from "Check if we're bound..." to "flush_all_batches()}" can
be removed along with this commit :)

I.e. just delete all the lines from L566 - L585 on master as of
46b7512b0a73 ... of course your line numbers are a bit different.
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH v3 13/17] panfrost: Make sure the BO is 'ready' when picked from the cache

2019-09-20 Thread Alyssa Rosenzweig
Very nice! I'm quite happy with this version, all considered, so R-b!

On Wed, Sep 18, 2019 at 03:24:35PM +0200, Boris Brezillon wrote:
> This is needed if we want to free the panfrost_batch object at submit
> time in order to not have to GC the batch on the next job submission.
> 
> Signed-off-by: Boris Brezillon 
> ---
> Changes in v3:
> * Move the patch later in the series and squash "panfrost: Cache GPU
>   accesses to BOs" in it
> * Add extra comments to explain what we're doing
> ---
>  src/gallium/drivers/panfrost/pan_bo.c  | 112 -
>  src/gallium/drivers/panfrost/pan_bo.h  |   9 ++
>  src/gallium/drivers/panfrost/pan_job.c |  11 +++
>  3 files changed, 109 insertions(+), 23 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_bo.c 
> b/src/gallium/drivers/panfrost/pan_bo.c
> index 9daddf9d0cc2..37602688d630 100644
> --- a/src/gallium/drivers/panfrost/pan_bo.c
> +++ b/src/gallium/drivers/panfrost/pan_bo.c
> @@ -23,6 +23,7 @@
>   * Authors (Collabora):
>   *   Alyssa Rosenzweig 
>   */
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -101,6 +102,63 @@ panfrost_bo_free(struct panfrost_bo *bo)
>  ralloc_free(bo);
>  }
>  
> +/* Returns true if the BO is ready, false otherwise.
> + * access_type is encoding the type of access one wants to ensure is done.
> + * Say you want to make sure all writers are done writing, you should pass
> + * PAN_BO_ACCESS_WRITE.
> + * If you want to wait for all users, you should pass PAN_BO_ACCESS_RW.
> + * PAN_BO_ACCESS_READ would work too as waiting for readers implies
> + * waiting for writers as well, but we want to make things explicit and 
> waiting
> + * only for readers is impossible.
> + */
> +bool
> +panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,
> + uint32_t access_type)
> +{
> +struct drm_panfrost_wait_bo req = {
> +.handle = bo->gem_handle,
> + .timeout_ns = timeout_ns,
> +};
> +int ret;
> +
> +assert(access_type == PAN_BO_ACCESS_WRITE ||
> +   access_type == PAN_BO_ACCESS_RW);
> +
> +/* If the BO has been exported or imported we can't rely on the 
> cached
> + * state, we need to call the WAIT_BO ioctl.
> + */
> +if (!(bo->flags & (PAN_BO_IMPORTED | PAN_BO_EXPORTED))) {
> +/* If ->gpu_access is 0, the BO is idle, no need to wait. */
> +if (!bo->gpu_access)
> +return true;
> +
> +/* If the caller only wants to wait for writers and no
> + * writes are pending, we don't have to wait.
> + */
> +if (access_type == PAN_BO_ACCESS_WRITE &&
> +!(bo->gpu_access & PAN_BO_ACCESS_WRITE))
> +return true;
> +}
> +
> +/* The ioctl returns >= 0 value when the BO we are waiting for is 
> ready
> + * -1 otherwise.
> + */
> +ret = drmIoctl(bo->screen->fd, DRM_IOCTL_PANFROST_WAIT_BO, );
> +if (ret != -1) {
> +/* Set gpu_access to 0 so that the next call to bo_wait()
> + * doesn't have to call the WAIT_BO ioctl.
> + */
> +bo->gpu_access = 0;
> +return true;
> +}
> +
> +/* If errno is not ETIMEDOUT or EBUSY that means the handle we passed
> + * is invalid, which shouldn't happen here.
> + */
> +assert(errno == ETIMEDOUT || errno == EBUSY);
> +return false;
> +}
> +
>  /* Helper to calculate the bucket index of a BO */
>  
>  static unsigned
> @@ -137,9 +195,8 @@ pan_bucket(struct panfrost_screen *screen, unsigned size)
>   * BO. */
>  
>  static struct panfrost_bo *
> -panfrost_bo_cache_fetch(
> -struct panfrost_screen *screen,
> -size_t size, uint32_t flags)
> +panfrost_bo_cache_fetch(struct panfrost_screen *screen,
> +size_t size, uint32_t flags, bool dontwait)
>  {
>  pthread_mutex_lock(>bo_cache_lock);
>  struct list_head *bucket = pan_bucket(screen, size);
> @@ -147,27 +204,30 @@ panfrost_bo_cache_fetch(
>  
>  /* Iterate the bucket looking for something suitable */
>  list_for_each_entry_safe(struct panfrost_bo, entry, bucket, link) {
> -if (entry->size >= size &&
> -entry->flags == flags) {
> -int ret;
> -struct drm_panfrost_madvise madv;
> +if (entry->size < size || entry->flags != flags)
> +continue;
>  
> -/* This one works, splice it out of the cache */
> -list_del(>link);
> +if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX,
> +  PAN_BO_ACCESS_RW))
> +continue;
>  
> -madv.handle = 

Re: [Mesa-dev] [PATCH v3 08/17] panfrost: Add a panfrost_flush_all_batches() helper

2019-09-20 Thread Alyssa Rosenzweig
(Still r-b)

On Wed, Sep 18, 2019 at 03:24:30PM +0200, Boris Brezillon wrote:
> And use it in panfrost_flush() to flush all batches, and not only the
> one currently bound to the context.
> 
> We also replace all internal calls to panfrost_flush() by
> panfrost_flush_all_batches() ones.
> 
> Signed-off-by: Boris Brezillon 
> Reviewed-by: Alyssa Rosenzweig 
> ---
> Changes in v3:
> * Add missing blank line
> * Collect R-b
> ---
>  src/gallium/drivers/panfrost/pan_compute.c  |  2 +-
>  src/gallium/drivers/panfrost/pan_context.c  | 23 +++
>  src/gallium/drivers/panfrost/pan_job.c  | 46 -
>  src/gallium/drivers/panfrost/pan_job.h  |  2 +-
>  src/gallium/drivers/panfrost/pan_resource.c |  6 +--
>  5 files changed, 64 insertions(+), 15 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_compute.c 
> b/src/gallium/drivers/panfrost/pan_compute.c
> index 4639c1b03c38..036dffbb17be 100644
> --- a/src/gallium/drivers/panfrost/pan_compute.c
> +++ b/src/gallium/drivers/panfrost/pan_compute.c
> @@ -133,7 +133,7 @@ panfrost_launch_grid(struct pipe_context *pipe,
>  /* Queue the job */
>  panfrost_scoreboard_queue_compute_job(batch, transfer);
>  
> -panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
> +panfrost_flush_all_batches(ctx, true);
>  }
>  
>  void
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index aad69e3f9991..861b4b621602 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -1348,7 +1348,6 @@ panfrost_flush(
>  unsigned flags)
>  {
>  struct panfrost_context *ctx = pan_context(pipe);
> -struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
>  struct util_dynarray fences;
>  
>  /* We must collect the fences before the flush is done, otherwise 
> we'll
> @@ -1356,13 +1355,18 @@ panfrost_flush(
>   */
>  if (fence) {
>  util_dynarray_init(, NULL);
> -panfrost_batch_fence_reference(batch->out_sync);
> -util_dynarray_append(, struct panfrost_batch_fence *,
> - batch->out_sync);
> +hash_table_foreach(ctx->batches, hentry) {
> +struct panfrost_batch *batch = hentry->data;
> +
> +panfrost_batch_fence_reference(batch->out_sync);
> +util_dynarray_append(,
> + struct panfrost_batch_fence *,
> + batch->out_sync);
> +}
>  }
>  
> -/* Submit the frame itself */
> -panfrost_batch_submit(batch);
> +/* Submit all pending jobs */
> +panfrost_flush_all_batches(ctx, false);
>  
>  if (fence) {
>  struct panfrost_fence *f = panfrost_fence_create(ctx, 
> );
> @@ -2321,7 +2325,7 @@ panfrost_set_framebuffer_state(struct pipe_context 
> *pctx,
>  }
>  
>  if (!is_scanout || has_draws)
> -panfrost_flush(pctx, NULL, PIPE_FLUSH_END_OF_FRAME);
> +panfrost_flush_all_batches(ctx, true);
>  else
>  
> assert(!ctx->payloads[PIPE_SHADER_VERTEX].postfix.framebuffer &&
> 
> !ctx->payloads[PIPE_SHADER_FRAGMENT].postfix.framebuffer);
> @@ -2553,6 +2557,7 @@ panfrost_get_query_result(struct pipe_context *pipe,
>union pipe_query_result *vresult)
>  {
>  struct panfrost_query *query = (struct panfrost_query *) q;
> +struct panfrost_context *ctx = pan_context(pipe);
>  
>  
>  switch (query->type) {
> @@ -2560,7 +2565,7 @@ panfrost_get_query_result(struct pipe_context *pipe,
>  case PIPE_QUERY_OCCLUSION_PREDICATE:
>  case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
>  /* Flush first */
> -panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
> +panfrost_flush_all_batches(ctx, true);
>  
>  /* Read back the query results */
>  unsigned *result = (unsigned *) query->transfer.cpu;
> @@ -2576,7 +2581,7 @@ panfrost_get_query_result(struct pipe_context *pipe,
>  
>  case PIPE_QUERY_PRIMITIVES_GENERATED:
>  case PIPE_QUERY_PRIMITIVES_EMITTED:
> -panfrost_flush(pipe, NULL, PIPE_FLUSH_END_OF_FRAME);
> +panfrost_flush_all_batches(ctx, true);
>  vresult->u64 = query->end - query->start;
>  break;
>  
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 211e48bafd4e..3ccf4bb6b3e9 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -856,7 +856,7 @@ panfrost_batch_submit_jobs(struct panfrost_batch *batch)
>  return ret;
>  }
>  
> 

Re: [Mesa-dev] [PATCH v3 09/17] panfrost: Add a panfrost_flush_batches_accessing_bo() helper

2019-09-20 Thread Alyssa Rosenzweig
"
On Wed, Sep 18, 2019 at 03:24:31PM +0200, Boris Brezillon wrote:
> This will allow us to only flush batches touching a specific resource,
> which is particularly useful when the CPU needs to access a BO.
> 
> Signed-off-by: Boris Brezillon 
> Reviewed-by: Alyssa Rosenzweig 
> ---
> Changes in v3:
> * Collect R-b
> ---
>  src/gallium/drivers/panfrost/pan_job.c | 31 ++
>  src/gallium/drivers/panfrost/pan_job.h |  4 
>  2 files changed, 35 insertions(+)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 3ccf4bb6b3e9..e7eae399830f 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -952,6 +952,37 @@ panfrost_flush_all_batches(struct panfrost_context *ctx, 
> bool wait)
>  util_dynarray_fini();
>  }
>  
> +void
> +panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
> +struct panfrost_bo *bo,
> +uint32_t access_type)
> +{
> +struct panfrost_bo_access *access;
> +struct hash_entry *hentry;
> +
> +/* It doesn't make any to flush only the readers. */
> +assert(access_type == PAN_BO_ACCESS_WRITE ||
> +   access_type == PAN_BO_ACCESS_RW);
> +
> +hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
> +access = hentry ? hentry->data : NULL;
> +if (!access)
> +return;
> +
> +if (access_type & PAN_BO_ACCESS_WRITE && access->writer &&
> +access->writer->batch)
> +panfrost_batch_submit(access->writer->batch);
> +
> +if (!(access_type & PAN_BO_ACCESS_READ))
> +return;
> +
> +util_dynarray_foreach(>readers, struct panfrost_batch_fence 
> *,
> +  reader) {
> +if (*reader && (*reader)->batch)
> +panfrost_batch_submit((*reader)->batch);
> +}
> +}
> +
>  void
>  panfrost_batch_set_requirements(struct panfrost_batch *batch)
>  {
> diff --git a/src/gallium/drivers/panfrost/pan_job.h 
> b/src/gallium/drivers/panfrost/pan_job.h
> index e95e156a40f8..25905b516739 100644
> --- a/src/gallium/drivers/panfrost/pan_job.h
> +++ b/src/gallium/drivers/panfrost/pan_job.h
> @@ -185,6 +185,10 @@ panfrost_batch_create_bo(struct panfrost_batch *batch, 
> size_t size,
>  void
>  panfrost_flush_all_batches(struct panfrost_context *ctx, bool wait);
>  
> +void
> +panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
> +struct panfrost_bo *bo, uint32_t flags);
> +
>  void
>  panfrost_batch_set_requirements(struct panfrost_batch *batch);
>  
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH v3 07/17] panfrost: Prepare panfrost_fence for batch pipelining

2019-09-20 Thread Alyssa Rosenzweig
(Still r-b)

On Wed, Sep 18, 2019 at 03:24:29PM +0200, Boris Brezillon wrote:
> The panfrost_fence logic currently waits on the last submitted batch,
> but the batch serialization that was enforced in
> panfrost_batch_submit() is about to go away, allowing for several
> batches to be pipelined, and the last submitted one is not necessarily
> the one that will finish last.
> 
> We need to make sure the fence logic waits on all flushed batches, not
> only the last one.
> 
> Signed-off-by: Boris Brezillon 
> Reviewed-by: Alyssa Rosenzweig 
> ---
> Changes in v3:
> * Fix a comment
> * Adjust things to match the changes done in "panfrost: Add a batch fence"
> ---
>  src/gallium/drivers/panfrost/pan_context.c | 18 +-
>  src/gallium/drivers/panfrost/pan_context.h |  5 +-
>  src/gallium/drivers/panfrost/pan_job.c | 16 -
>  src/gallium/drivers/panfrost/pan_screen.c  | 71 +++---
>  src/gallium/drivers/panfrost/pan_screen.h  |  3 +-
>  5 files changed, 57 insertions(+), 56 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index 312a9e93e455..aad69e3f9991 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -1349,14 +1349,30 @@ panfrost_flush(
>  {
>  struct panfrost_context *ctx = pan_context(pipe);
>  struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
> +struct util_dynarray fences;
> +
> +/* We must collect the fences before the flush is done, otherwise 
> we'll
> + * lose track of them.
> + */
> +if (fence) {
> +util_dynarray_init(, NULL);
> +panfrost_batch_fence_reference(batch->out_sync);
> +util_dynarray_append(, struct panfrost_batch_fence *,
> + batch->out_sync);
> +}
>  
>  /* Submit the frame itself */
>  panfrost_batch_submit(batch);
>  
>  if (fence) {
> -struct panfrost_fence *f = panfrost_fence_create(ctx);
> +struct panfrost_fence *f = panfrost_fence_create(ctx, 
> );
>  pipe->screen->fence_reference(pipe->screen, fence, NULL);
>  *fence = (struct pipe_fence_handle *)f;
> +
> +util_dynarray_foreach(, struct panfrost_batch_fence 
> *, fence)
> +panfrost_batch_fence_unreference(*fence);
> +
> +util_dynarray_fini();
>  }
>  }
>  
> diff --git a/src/gallium/drivers/panfrost/pan_context.h 
> b/src/gallium/drivers/panfrost/pan_context.h
> index 3b09952345cf..d50ed57d5d8a 100644
> --- a/src/gallium/drivers/panfrost/pan_context.h
> +++ b/src/gallium/drivers/panfrost/pan_context.h
> @@ -94,7 +94,7 @@ struct panfrost_query {
>  
>  struct panfrost_fence {
>  struct pipe_reference reference;
> -int fd;
> +struct util_dynarray syncfds;
>  };
>  
>  struct panfrost_streamout {
> @@ -193,9 +193,6 @@ struct panfrost_context {
>  
>  /* True for t6XX, false for t8xx. */
>  bool is_t6xx;
> -
> -/* The out sync fence of the last submitted batch. */
> -struct panfrost_batch_fence *last_out_sync;
>  };
>  
>  /* Corresponds to the CSO */
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index b0494af3482f..211e48bafd4e 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -819,13 +819,6 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
>  free(bo_handles);
>  free(in_syncs);
>  
> -/* Release the last batch fence if any, and retain the new one */
> -if (ctx->last_out_sync)
> -panfrost_batch_fence_unreference(ctx->last_out_sync);
> -
> -panfrost_batch_fence_reference(batch->out_sync);
> -ctx->last_out_sync = batch->out_sync;
> -
>  if (ret) {
>  fprintf(stderr, "Error submitting: %m\n");
>  return errno;
> @@ -884,15 +877,6 @@ panfrost_batch_submit(struct panfrost_batch *batch)
>   * to wait on it.
>   */
>  batch->out_sync->signaled = true;
> -
> -/* Release the last batch fence if any, and set 
> ->last_out_sync
> - * to NULL
> - */
> -if (ctx->last_out_sync) {
> -panfrost_batch_fence_unreference(ctx->last_out_sync);
> -ctx->last_out_sync = NULL;
> -}
> -
>  goto out;
>  }
>  
> diff --git a/src/gallium/drivers/panfrost/pan_screen.c 
> b/src/gallium/drivers/panfrost/pan_screen.c
> index e2c31f7f8213..55c66e0c9a79 100644
> --- a/src/gallium/drivers/panfrost/pan_screen.c
> +++ b/src/gallium/drivers/panfrost/pan_screen.c
> @@ -575,7 +575,9 @@ panfrost_fence_reference(struct pipe_screen 

Re: [Mesa-dev] [PATCH v3 06/17] panfrost: Start tracking inter-batch dependencies

2019-09-20 Thread Alyssa Rosenzweig
R-b. nice work!

On Wed, Sep 18, 2019 at 03:24:28PM +0200, Boris Brezillon wrote:
> The idea is to track which BO are being accessed and the type of access
> to determine when a dependency exists. Thanks to that we can build a
> dependency graph that will allow us to flush batches in the correct
> order.
> 
> Signed-off-by: Boris Brezillon 
> ---
> Changes in v3:
> * Fix coding style issues
> * Do not check for batch presence in the reader array when updating
>   a BO access (we already have this information)
> * Add more comments to explain what we're doing and why we're doing
>   it like that
> ---
>  src/gallium/drivers/panfrost/pan_context.h |   3 +
>  src/gallium/drivers/panfrost/pan_job.c | 355 -
>  src/gallium/drivers/panfrost/pan_job.h |   3 +
>  3 files changed, 356 insertions(+), 5 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.h 
> b/src/gallium/drivers/panfrost/pan_context.h
> index ce3e0c899a4f..3b09952345cf 100644
> --- a/src/gallium/drivers/panfrost/pan_context.h
> +++ b/src/gallium/drivers/panfrost/pan_context.h
> @@ -114,6 +114,9 @@ struct panfrost_context {
>  struct panfrost_batch *batch;
>  struct hash_table *batches;
>  
> +/* panfrost_bo -> panfrost_bo_access */
> +struct hash_table *accessed_bos;
> +
>  /* Within a launch_grid call.. */
>  const struct pipe_grid_info *compute_grid;
>  
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 872c846207bf..b0494af3482f 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -36,6 +36,29 @@
>  #include "pan_util.h"
>  #include "pandecode/decode.h"
>  
> +/* panfrost_bo_access is here to help us keep track of batch accesses to BOs
> + * and build a proper dependency graph such that batches can be pipelined for
> + * better GPU utilization.
> + *
> + * Each accessed BO has a corresponding entry in the ->accessed_bos hash 
> table.
> + * A BO is either being written or read at any time, that's what the type 
> field
> + * encodes.
> + * When the last access is a write, the batch writing the BO might have read
> + * dependencies (readers that have not been executed yet and want to read the
> + * previous BO content), and when the last access is a read, all readers 
> might
> + * depend on another batch to push its results to memory. That's what the
> + * readers/writers keep track off.
> + * There can only be one writer at any given time, if a new batch wants to
> + * write to the same BO, a dependency will be added between the new writer 
> and
> + * the old writer (at the batch level), and panfrost_bo_access->writer will 
> be
> + * updated to point to the new writer.
> + */
> +struct panfrost_bo_access {
> +uint32_t type;
> +struct util_dynarray readers;
> +struct panfrost_batch_fence *writer;
> +};
> +
>  static struct panfrost_batch_fence *
>  panfrost_create_batch_fence(struct panfrost_batch *batch)
>  {
> @@ -92,6 +115,7 @@ panfrost_create_batch(struct panfrost_context *ctx,
>  
>  util_dynarray_init(>headers, batch);
>  util_dynarray_init(>gpu_headers, batch);
> +util_dynarray_init(>dependencies, batch);
>  batch->out_sync = panfrost_create_batch_fence(batch);
>  util_copy_framebuffer_state(>key, key);
>  
> @@ -151,6 +175,11 @@ panfrost_free_batch(struct panfrost_batch *batch)
>  hash_table_foreach(batch->bos, entry)
>  panfrost_bo_unreference((struct panfrost_bo *)entry->key);
>  
> +util_dynarray_foreach(>dependencies,
> +  struct panfrost_batch_fence *, dep) {
> +panfrost_batch_fence_unreference(*dep);
> +}
> +
>  /* The out_sync fence lifetime is different from the the batch one
>   * since other batches might want to wait on a fence of already
>   * submitted/signaled batch. All we need to do here is make sure the
> @@ -164,6 +193,56 @@ panfrost_free_batch(struct panfrost_batch *batch)
>  ralloc_free(batch);
>  }
>  
> +#ifndef NDEBUG
> +static bool
> +panfrost_dep_graph_contains_batch(struct panfrost_batch *root,
> +  struct panfrost_batch *batch)
> +{
> +if (!root)
> +return false;
> +
> +util_dynarray_foreach(>dependencies,
> +  struct panfrost_batch_fence *, dep) {
> +if ((*dep)->batch == batch ||
> +panfrost_dep_graph_contains_batch((*dep)->batch, batch))
> +return true;
> +}
> +
> +return false;
> +}
> +#endif
> +
> +static void
> +panfrost_batch_add_dep(struct panfrost_batch *batch,
> +   struct panfrost_batch_fence *newdep)
> +{
> +if (batch == newdep->batch)
> +return;
> +
> +/* We might want to turn ->dependencies into a set 

Re: [Mesa-dev] [PATCH v3 05/17] panfrost: Add a panfrost_freeze_batch() helper

2019-09-20 Thread Alyssa Rosenzweig
Still r-b, but please use the collabora address:)
On Wed, Sep 18, 2019 at 03:24:27PM +0200, Boris Brezillon wrote:
> We'll soon need to freeze a batch not only when it's flushed, but also
> when another batch depends on us, so let's add a helper to avoid
> duplicating the logic.
> 
> Signed-off-by: Boris Brezillon 
> Reviewed-by: Alyssa Rosenzweig 
> ---
> Changes in v3:
> * Collect R-b
> ---
>  src/gallium/drivers/panfrost/pan_job.c | 62 ++
>  1 file changed, 44 insertions(+), 18 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 55780dd3d9d6..872c846207bf 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -98,22 +98,59 @@ panfrost_create_batch(struct panfrost_context *ctx,
>  return batch;
>  }
>  
> +static void
> +panfrost_freeze_batch(struct panfrost_batch *batch)
> +{
> +struct panfrost_context *ctx = batch->ctx;
> +struct hash_entry *entry;
> +
> +/* Remove the entry in the FBO -> batch hash table if the batch
> + * matches. This way, next draws/clears targeting this FBO will 
> trigger
> + * the creation of a new batch.
> + */
> +entry = _mesa_hash_table_search(ctx->batches, >key);
> +if (entry && entry->data == batch)
> +_mesa_hash_table_remove(ctx->batches, entry);
> +
> +/* If this is the bound batch, the panfrost_context parameters are
> + * relevant so submitting it invalidates those parameters, but if 
> it's
> + * not bound, the context parameters are for some other batch so we
> + * can't invalidate them.
> + */
> +if (ctx->batch == batch) {
> +panfrost_invalidate_frame(ctx);
> +ctx->batch = NULL;
> +}
> +}
> +
> +#ifndef NDEBUG
> +static bool panfrost_batch_is_frozen(struct panfrost_batch *batch)
> +{
> +struct panfrost_context *ctx = batch->ctx;
> +struct hash_entry *entry;
> +
> +entry = _mesa_hash_table_search(ctx->batches, >key);
> +if (entry && entry->data == batch)
> +return false;
> +
> +if (ctx->batch == batch)
> +return false;
> +
> +return true;
> +}
> +#endif
> +
>  static void
>  panfrost_free_batch(struct panfrost_batch *batch)
>  {
>  if (!batch)
>  return;
>  
> -struct panfrost_context *ctx = batch->ctx;
> +assert(panfrost_batch_is_frozen(batch));
>  
>  hash_table_foreach(batch->bos, entry)
>  panfrost_bo_unreference((struct panfrost_bo *)entry->key);
>  
> -_mesa_hash_table_remove_key(ctx->batches, >key);
> -
> -if (ctx->batch == batch)
> -ctx->batch = NULL;
> -
>  /* The out_sync fence lifetime is different from the the batch one
>   * since other batches might want to wait on a fence of already
>   * submitted/signaled batch. All we need to do here is make sure the
> @@ -529,19 +566,8 @@ panfrost_batch_submit(struct panfrost_batch *batch)
>  fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
>  
>  out:
> -/* If this is the bound batch, the panfrost_context parameters are
> - * relevant so submitting it invalidates those paramaters, but if 
> it's
> - * not bound, the context parameters are for some other batch so we
> - * can't invalidate them.
> - */
> -if (ctx->batch == batch)
> -panfrost_invalidate_frame(ctx);
> -
> -/* The job has been submitted, let's invalidate the current FBO job
> - * cache.
> -  */
> +panfrost_freeze_batch(batch);
>  assert(!ctx->batch || batch == ctx->batch);
> -ctx->batch = NULL;
>  
>  /* We always stall the pipeline for correct results since pipelined
>   * rendering is quite broken right now (to be fixed by the 
> panfrost_job
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH v3 04/17] panfrost: Use the per-batch fences to wait on the last submitted batch

2019-09-20 Thread Alyssa Rosenzweig
R-b
On Wed, Sep 18, 2019 at 03:24:26PM +0200, Boris Brezillon wrote:
> We just replace the per-context out_sync object by a pointer to the
> the fence of the last last submitted batch. Pipelining of batches will
> come later.
> 
> Signed-off-by: Boris Brezillon 
> ---
> Alyssa, I dropped your R-b since the other changes you asked me to do
> in "panfrost: Add a batch fence" had some impact on this patch.
> 
> Changes in v3:
> * Make sure we don't try to wait on dummy batches (those with no
>   vertex/tiler/fragment jobs)
> ---
>  src/gallium/drivers/panfrost/pan_context.c |  6 
>  src/gallium/drivers/panfrost/pan_context.h |  3 +-
>  src/gallium/drivers/panfrost/pan_job.c | 35 ++
>  src/gallium/drivers/panfrost/pan_screen.c  | 18 +--
>  4 files changed, 47 insertions(+), 15 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index 65a6c7f8c5ae..312a9e93e455 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -2702,12 +2702,6 @@ panfrost_create_context(struct pipe_screen *screen, 
> void *priv, unsigned flags)
>  panfrost_blend_context_init(gallium);
>  panfrost_compute_context_init(gallium);
>  
> -ASSERTED int ret;
> -
> -ret = drmSyncobjCreate(pscreen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
> -   >out_sync);
> -assert(!ret);
> -
>  /* XXX: leaks */
>  gallium->stream_uploader = u_upload_create_default(gallium);
>  gallium->const_uploader = gallium->stream_uploader;
> diff --git a/src/gallium/drivers/panfrost/pan_context.h 
> b/src/gallium/drivers/panfrost/pan_context.h
> index c145d589757e..ce3e0c899a4f 100644
> --- a/src/gallium/drivers/panfrost/pan_context.h
> +++ b/src/gallium/drivers/panfrost/pan_context.h
> @@ -191,7 +191,8 @@ struct panfrost_context {
>  /* True for t6XX, false for t8xx. */
>  bool is_t6xx;
>  
> -uint32_t out_sync;
> +/* The out sync fence of the last submitted batch. */
> +struct panfrost_batch_fence *last_out_sync;
>  };
>  
>  /* Corresponds to the CSO */
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index b6763da66a97..55780dd3d9d6 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -425,11 +425,13 @@ panfrost_batch_submit_ioctl(struct panfrost_batch 
> *batch,
>  uint32_t *bo_handles;
>  int ret;
>  
> -submit.in_syncs = (u64) (uintptr_t) >out_sync;
> -submit.in_sync_count = 1;
>  
> -submit.out_sync = ctx->out_sync;
> +if (ctx->last_out_sync) {
> +submit.in_sync_count = 1;
> +submit.in_syncs = (uintptr_t)>last_out_sync->syncobj;
> +}
>  
> +submit.out_sync = batch->out_sync->syncobj;
>  submit.jc = first_job_desc;
>  submit.requirements = reqs;
>  
> @@ -445,6 +447,14 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
>  submit.bo_handles = (u64) (uintptr_t) bo_handles;
>  ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, );
>  free(bo_handles);
> +
> +/* Release the last batch fence if any, and retain the new one */
> +if (ctx->last_out_sync)
> +panfrost_batch_fence_unreference(ctx->last_out_sync);
> +
> +panfrost_batch_fence_reference(batch->out_sync);
> +ctx->last_out_sync = batch->out_sync;
> +
>  if (ret) {
>  fprintf(stderr, "Error submitting: %m\n");
>  return errno;
> @@ -453,7 +463,8 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
>  /* Trace the job if we're doing that */
>  if (pan_debug & PAN_DBG_TRACE) {
>  /* Wait so we can get errors reported back */
> -drmSyncobjWait(screen->fd, >out_sync, 1, INT64_MAX, 0, 
> NULL);
> +drmSyncobjWait(screen->fd, >out_sync->syncobj, 1,
> +   INT64_MAX, 0, NULL);
>  pandecode_jc(submit.jc, FALSE);
>  }
>  
> @@ -495,6 +506,15 @@ panfrost_batch_submit(struct panfrost_batch *batch)
>   * to wait on it.
>   */
>  batch->out_sync->signaled = true;
> +
> +/* Release the last batch fence if any, and set 
> ->last_out_sync
> + * to NULL
> + */
> +if (ctx->last_out_sync) {
> +panfrost_batch_fence_unreference(ctx->last_out_sync);
> +ctx->last_out_sync = NULL;
> +}
> +
>  goto out;
>  }
>  
> @@ -527,8 +547,11 @@ out:
>   * rendering is quite broken right now (to be fixed by the 
> panfrost_job
>   * refactor, just take the perf hit for correctness)
>   */
> -   

Re: [Mesa-dev] [PATCH v3 03/17] panfrost: Add a batch fence

2019-09-20 Thread Alyssa Rosenzweig
R-b

On Wed, Sep 18, 2019 at 03:24:25PM +0200, Boris Brezillon wrote:
> So we can implement fine-grained dependency tracking between batches.
> 
> Signed-off-by: Boris Brezillon 
> ---
> Changes in v3:
> * Fix typos
> * Do not initialize the syncobj in a signaled state, and set
>   fence->signaled to true when submitting a dummy batch (one with no
>   draw/clear queued)
> ---
>  src/gallium/drivers/panfrost/pan_job.c | 56 +-
>  src/gallium/drivers/panfrost/pan_job.h | 39 ++
>  2 files changed, 94 insertions(+), 1 deletion(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 785317dbd0b0..b6763da66a97 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -36,6 +36,45 @@
>  #include "pan_util.h"
>  #include "pandecode/decode.h"
>  
> +static struct panfrost_batch_fence *
> +panfrost_create_batch_fence(struct panfrost_batch *batch)
> +{
> +struct panfrost_batch_fence *fence;
> +ASSERTED int ret;
> +
> +fence = rzalloc(NULL, struct panfrost_batch_fence);
> +assert(fence);
> +pipe_reference_init(>reference, 1);
> +fence->ctx = batch->ctx;
> +fence->batch = batch;
> +ret = drmSyncobjCreate(pan_screen(batch->ctx->base.screen)->fd, 0,
> +   >syncobj);
> +assert(!ret);
> +
> +return fence;
> +}
> +
> +static void
> +panfrost_free_batch_fence(struct panfrost_batch_fence *fence)
> +{
> +drmSyncobjDestroy(pan_screen(fence->ctx->base.screen)->fd,
> +  fence->syncobj);
> +ralloc_free(fence);
> +}
> +
> +void
> +panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence)
> +{
> +if (pipe_reference(>reference, NULL))
> + panfrost_free_batch_fence(fence);
> +}
> +
> +void
> +panfrost_batch_fence_reference(struct panfrost_batch_fence *fence)
> +{
> +pipe_reference(NULL, >reference);
> +}
> +
>  static struct panfrost_batch *
>  panfrost_create_batch(struct panfrost_context *ctx,
>const struct pipe_framebuffer_state *key)
> @@ -53,6 +92,7 @@ panfrost_create_batch(struct panfrost_context *ctx,
>  
>  util_dynarray_init(>headers, batch);
>  util_dynarray_init(>gpu_headers, batch);
> +batch->out_sync = panfrost_create_batch_fence(batch);
>  util_copy_framebuffer_state(>key, key);
>  
>  return batch;
> @@ -74,6 +114,15 @@ panfrost_free_batch(struct panfrost_batch *batch)
>  if (ctx->batch == batch)
>  ctx->batch = NULL;
>  
> +/* The out_sync fence lifetime is different from the the batch one
> + * since other batches might want to wait on a fence of already
> + * submitted/signaled batch. All we need to do here is make sure the
> + * fence does not point to an invalid batch, which the core will
> + * interpret as 'batch is already submitted'.
> + */
> +batch->out_sync->batch = NULL;
> +panfrost_batch_fence_unreference(batch->out_sync);
> +
>  util_unreference_framebuffer_state(>key);
>  ralloc_free(batch);
>  }
> @@ -441,8 +490,13 @@ panfrost_batch_submit(struct panfrost_batch *batch)
>  int ret;
>  
>  /* Nothing to do! */
> -if (!batch->last_job.gpu && !batch->clear)
> +if (!batch->last_job.gpu && !batch->clear) {
> +/* Mark the fence as signaled so the fence logic does not try
> + * to wait on it.
> + */
> +batch->out_sync->signaled = true;
>  goto out;
> +}
>  
>  if (!batch->clear && batch->last_tiler.gpu)
>  panfrost_batch_draw_wallpaper(batch);
> diff --git a/src/gallium/drivers/panfrost/pan_job.h 
> b/src/gallium/drivers/panfrost/pan_job.h
> index 3f2cf1a999f3..88f1e4620fd0 100644
> --- a/src/gallium/drivers/panfrost/pan_job.h
> +++ b/src/gallium/drivers/panfrost/pan_job.h
> @@ -31,6 +31,36 @@
>  #include "pan_allocate.h"
>  #include "pan_resource.h"
>  
> +/* panfrost_batch_fence is the out fence of a batch that users or other 
> batches
> + * might want to wait on. The batch fence lifetime is different from the 
> batch
> + * one as want will certainly want to wait upon the fence after the batch has
> + * been submitted (which is when panfrost_batch objects are freed).
> + */
> +struct panfrost_batch_fence {
> +/* Refcounting object for the fence. */
> +struct pipe_reference reference;
> +
> +/* Batch that created this fence object. Will become NULL at batch
> + * submission time. This field is mainly here to know whether the
> + * batch has been flushed or not.
> + */
> +struct panfrost_batch *batch;
> +
> +/* Context this fence is attached to. We need both ctx and batch, as
> + * the batch will go away 

Re: [Mesa-dev] [PATCH v3 02/17] panfrost: Make panfrost_batch->bos a hash table

2019-09-20 Thread Alyssa Rosenzweig
R-b
On Wed, Sep 18, 2019 at 03:24:24PM +0200, Boris Brezillon wrote:
> So we can store the flags as data and keep the BO as a key. This way
> we keep track of the type of access done on BOs.
> 
> Signed-off-by: Boris Brezillon 
> ---
> Changes in v3:
> * None
> ---
>  src/gallium/drivers/panfrost/pan_job.c | 33 +-
>  src/gallium/drivers/panfrost/pan_job.h |  2 +-
>  2 files changed, 23 insertions(+), 12 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 8e2703ae168c..785317dbd0b0 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -44,9 +44,8 @@ panfrost_create_batch(struct panfrost_context *ctx,
>  
>  batch->ctx = ctx;
>  
> -batch->bos = _mesa_set_create(batch,
> -  _mesa_hash_pointer,
> -  _mesa_key_pointer_equal);
> +batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
> + _mesa_key_pointer_equal);
>  
>  batch->minx = batch->miny = ~0;
>  batch->maxx = batch->maxy = 0;
> @@ -67,10 +66,8 @@ panfrost_free_batch(struct panfrost_batch *batch)
>  
>  struct panfrost_context *ctx = batch->ctx;
>  
> -set_foreach(batch->bos, entry) {
> -struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
> -panfrost_bo_unreference(bo);
> -}
> +hash_table_foreach(batch->bos, entry)
> +panfrost_bo_unreference((struct panfrost_bo *)entry->key);
>  
>  _mesa_hash_table_remove_key(ctx->batches, >key);
>  
> @@ -138,11 +135,25 @@ panfrost_batch_add_bo(struct panfrost_batch *batch, 
> struct panfrost_bo *bo,
>  if (!bo)
>  return;
>  
> -if (_mesa_set_search(batch->bos, bo))
> +struct hash_entry *entry;
> +uint32_t old_flags = 0;
> +
> +entry = _mesa_hash_table_search(batch->bos, bo);
> +if (!entry) {
> +entry = _mesa_hash_table_insert(batch->bos, bo,
> +(void *)(uintptr_t)flags);
> +panfrost_bo_reference(bo);
> + } else {
> +old_flags = (uintptr_t)entry->data;
> +}
> +
> +assert(entry);
> +
> +if (old_flags == flags)
>  return;
>  
> -panfrost_bo_reference(bo);
> -_mesa_set_add(batch->bos, bo);
> +flags |= old_flags;
> +entry->data = (void *)(uintptr_t)flags;
>  }
>  
>  void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
> @@ -376,7 +387,7 @@ panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
>  bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
>  assert(bo_handles);
>  
> -set_foreach(batch->bos, entry) {
> +hash_table_foreach(batch->bos, entry) {
>  struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
>  assert(bo->gem_handle > 0);
>  bo_handles[submit.bo_handle_count++] = bo->gem_handle;
> diff --git a/src/gallium/drivers/panfrost/pan_job.h 
> b/src/gallium/drivers/panfrost/pan_job.h
> index 0b37a3131e86..3f2cf1a999f3 100644
> --- a/src/gallium/drivers/panfrost/pan_job.h
> +++ b/src/gallium/drivers/panfrost/pan_job.h
> @@ -98,7 +98,7 @@ struct panfrost_batch {
>  unsigned job_index;
>  
>  /* BOs referenced -- will be used for flushing logic */
> -struct set *bos;
> +struct hash_table *bos;
>  
>  /* Current transient BO */
>   struct panfrost_bo *transient_bo;
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH v3 01/17] panfrost: Extend the panfrost_batch_add_bo() API to pass access flags

2019-09-20 Thread Alyssa Rosenzweig
> @@ -1121,7 +1134,11 @@ panfrost_emit_for_draw(struct panfrost_context *ctx, 
> bool with_vertex_data)
>  
>  struct panfrost_shader_state *ss = 
> >variants[all->active_variant];
>  
> -panfrost_batch_add_bo(batch, ss->bo);
> +panfrost_batch_add_bo(batch, ss->bo,
> +  PAN_BO_ACCESS_PRIVATE |
> +  PAN_BO_ACCESS_READ |

> +  PAN_BO_ACCESS_VERTEX_TILER |
> +  PAN_BO_ACCESS_FRAGMENT);

I believe this should be just the access for the stage `i`

Although actually I am not at all sure what this batch_add_bo is doing
at all?

I think this batch_add_bo should probably dropped altogether? This loop
is dealing with constant buffers; the shaders themselves were added

>  void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
>  {
> +uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
> + PAN_BO_ACCESS_VERTEX_TILER |
> + PAN_BO_ACCESS_FRAGMENT;

I think we can drop VERTEX_TILER here...? The buffers are written right
at the end of the FRAGMENT job, not touched before that.

If nothing else is broken, this should allow a nice perf boost with
pipelining, so the vertex/tiler from frame n+1 can run in parallel with
the fragment of frame n (rather than blocking on frame n finishing with
the FBOs).
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH 3/3] panfrost: More tests are passing

2019-09-20 Thread Alyssa Rosenzweig
R-b with pleasure! Glad to see those tests fixed, those have stumped me
for a *long* time :D

Congratulations! Thank you!

On Fri, Sep 20, 2019 at 04:53:39PM +0200, Boris Brezillon wrote:
> Remove the tests that are now passing.
> 
> Signed-off-by: Boris Brezillon 
> ---
>  .../drivers/panfrost/ci/expected-failures.txt | 153 --
>  1 file changed, 153 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/ci/expected-failures.txt 
> b/src/gallium/drivers/panfrost/ci/expected-failures.txt
> index 7e7dbd62307b..91c1f14ce1a2 100644
> --- a/src/gallium/drivers/panfrost/ci/expected-failures.txt
> +++ b/src/gallium/drivers/panfrost/ci/expected-failures.txt
> @@ -1,10 +1,3 @@
> -dEQP-GLES2.functional.color_clear.masked_rgba Fail
> -dEQP-GLES2.functional.color_clear.masked_rgb Fail
> -dEQP-GLES2.functional.color_clear.masked_scissored_rgba Fail
> -dEQP-GLES2.functional.color_clear.masked_scissored_rgb Fail
> -dEQP-GLES2.functional.color_clear.scissored_rgba Fail
> -dEQP-GLES2.functional.color_clear.scissored_rgb Fail
> -dEQP-GLES2.functional.color_clear.short_scissored_rgb Fail
>  dEQP-GLES2.functional.depth_range.write.0_8_to_third Fail
>  dEQP-GLES2.functional.depth_range.write.clamp_both Fail
>  dEQP-GLES2.functional.depth_range.write.clamp_far Fail
> @@ -672,201 +665,55 @@ 
> dEQP-GLES2.functional.fragment_ops.depth_stencil.stencil_ops.zero_zero_zero 
> Fail
>  dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.both Fail
>  dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.depth Fail
>  dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.stencil Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.0 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.10 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.11 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.12 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.13 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.15 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.16 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.17 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.18 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.19 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.1 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.20 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.21 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.22 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.23 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.24 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.25 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.26 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.29 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.30 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.31 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.32 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.33 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.34 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.35 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.36 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.37 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.38 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.39 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.3 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.40 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.41 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.42 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.43 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.44 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.46 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.47 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.48 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.49 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.50 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.51 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.52 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.53 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.54 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.55 Fail
> -dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.56 Fail
>  dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.57 Fail
> 

Re: [Mesa-dev] [PATCH 2/3] panfrost: Draw the wallpaper when only depth/stencil bufs are cleared

2019-09-20 Thread Alyssa Rosenzweig
R-b, nice fix :)

On Fri, Sep 20, 2019 at 04:53:38PM +0200, Boris Brezillon wrote:
> When only the depth/stencil bufs are cleared, we should make sure the
> color content is reloaded into the tile buffers if we want to preserve
> their content.
> 
> Signed-off-by: Boris Brezillon 
> ---
> There might be a more optimal solution to do that (like not passing the
> color bufs to the fragment job?), but this solution seems to fix a few
> deqp tests.
> ---
>  src/gallium/drivers/panfrost/pan_context.c |  2 +-
>  src/gallium/drivers/panfrost/pan_job.c | 16 ++--
>  2 files changed, 15 insertions(+), 3 deletions(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index b2f2a9da7a51..c99bf1b26ce7 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -1333,7 +1333,7 @@ panfrost_queue_draw(struct panfrost_context *ctx)
>  
>  if (rasterizer_discard)
>  panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
> -else if (ctx->wallpaper_batch)
> +else if (ctx->wallpaper_batch && batch->first_tiler.gpu)
>  panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, 
> tiler);
>  else
>  panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index 4ec2aa0565d7..a2df31f96f00 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -698,10 +698,23 @@ panfrost_batch_get_tiler_dummy(struct panfrost_batch 
> *batch)
>  static void
>  panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
>  {
> +/* Color 0 is cleared, no need to draw the wallpaper.
> + * TODO: MRT wallpapers.
> + */
> +if (batch->clear & PIPE_CLEAR_COLOR0)
> +return;
> +
>  /* Nothing to reload? TODO: MRT wallpapers */
>  if (batch->key.cbufs[0] == NULL)
>  return;
>  
> +/* No draw calls, and no clear on the depth/stencil bufs.
> + * Drawing the wallpaper would be useless.
> + */
> +if (!batch->last_tiler.gpu &&
> +!(batch->clear & PIPE_CLEAR_DEPTHSTENCIL))
> +return;
> +
>  /* Check if the buffer has any content on it worth preserving */
>  
>  struct pipe_surface *surf = batch->key.cbufs[0];
> @@ -923,8 +936,7 @@ panfrost_batch_submit(struct panfrost_batch *batch)
>  goto out;
>  }
>  
> -if (!batch->clear && batch->last_tiler.gpu)
> -panfrost_batch_draw_wallpaper(batch);
> +panfrost_batch_draw_wallpaper(batch);
>  
>  panfrost_scoreboard_link_batch(batch);
>  
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] [PATCH 1/3] panfrost: Make sure a clear does not re-use a pre-existing batch

2019-09-20 Thread Alyssa Rosenzweig
To be clear, if we have a batch and do the following operations:

clear red
draw 1
clear green
draw 2
flush

All we should see is #2 on a green background, which this patch handles
by the second clear invalidating all the clears/draws that came before
it (provided there is no flush in between). 

I might just be tripped up by the "freeze" name. That really means throw
away / free here, I guess?

Provided that's the idea (and we're not somehow saving the original draw
1), it's Reviewed-by A R 

On Fri, Sep 20, 2019 at 04:53:37PM +0200, Boris Brezillon wrote:
> glClear()s are expected to be the first thing GL apps do before drawing
> new things. If there's already an existing batch targetting the same
> FBO that has draws attached to it, we should make sure the new clear
> gets a new batch assigned to guaranteed that the FB content is actually
> cleared with the requested color/depth/stencil values.
> 
> We create a panfrost_get_fresh_batch_for_fbo() helper for that and
> call it from panfrost_clear().
> 
> Signed-off-by: Boris Brezillon 
> ---
>  src/gallium/drivers/panfrost/pan_context.c |  2 +-
>  src/gallium/drivers/panfrost/pan_job.c | 21 +
>  src/gallium/drivers/panfrost/pan_job.h |  3 +++
>  3 files changed, 25 insertions(+), 1 deletion(-)
> 
> diff --git a/src/gallium/drivers/panfrost/pan_context.c 
> b/src/gallium/drivers/panfrost/pan_context.c
> index ac01461a07fe..b2f2a9da7a51 100644
> --- a/src/gallium/drivers/panfrost/pan_context.c
> +++ b/src/gallium/drivers/panfrost/pan_context.c
> @@ -162,7 +162,7 @@ panfrost_clear(
>  double depth, unsigned stencil)
>  {
>  struct panfrost_context *ctx = pan_context(pipe);
> -struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
> +struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
>  
>  panfrost_batch_add_fbo_bos(batch);
>  panfrost_batch_clear(batch, buffers, color, depth, stencil);
> diff --git a/src/gallium/drivers/panfrost/pan_job.c 
> b/src/gallium/drivers/panfrost/pan_job.c
> index d8330bc133a6..4ec2aa0565d7 100644
> --- a/src/gallium/drivers/panfrost/pan_job.c
> +++ b/src/gallium/drivers/panfrost/pan_job.c
> @@ -298,6 +298,27 @@ panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
>  return batch;
>  }
>  
> +struct panfrost_batch *
> +panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
> +{
> +struct panfrost_batch *batch;
> +
> +batch = panfrost_get_batch(ctx, >pipe_framebuffer);
> +
> +/* The batch has no draw/clear queued, let's return it directly.
> + * Note that it's perfectly fine to re-use a batch with an
> + * existing clear, we'll just update it with the new clear request.
> + */
> +if (!batch->last_job.gpu)
> +return batch;
> +
> +/* Otherwise, we need to freeze the existing one and instantiate a 
> new
> + * one.
> + */
> +panfrost_freeze_batch(batch);
> +return panfrost_get_batch(ctx, >pipe_framebuffer);
> +}
> +
>  static bool
>  panfrost_batch_fence_is_signaled(struct panfrost_batch_fence *fence)
>  {
> diff --git a/src/gallium/drivers/panfrost/pan_job.h 
> b/src/gallium/drivers/panfrost/pan_job.h
> index e1b1f56a2e64..0bd78bba267a 100644
> --- a/src/gallium/drivers/panfrost/pan_job.h
> +++ b/src/gallium/drivers/panfrost/pan_job.h
> @@ -172,6 +172,9 @@ panfrost_batch_fence_reference(struct 
> panfrost_batch_fence *batch);
>  struct panfrost_batch *
>  panfrost_get_batch_for_fbo(struct panfrost_context *ctx);
>  
> +struct panfrost_batch *
> +panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx);
> +
>  void
>  panfrost_batch_init(struct panfrost_context *ctx);
>  
> -- 
> 2.21.0
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 4/4] nv50, nvc0: fix must_check warning of util_dynarray_resize_bytes

2019-09-20 Thread Karol Herbst
Signed-off-by: Karol Herbst 
---
 src/gallium/drivers/nouveau/nv50/nv50_state.c | 10 +++---
 src/gallium/drivers/nouveau/nvc0/nvc0_state.c | 10 +++---
 2 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/src/gallium/drivers/nouveau/nv50/nv50_state.c 
b/src/gallium/drivers/nouveau/nv50/nv50_state.c
index a4163aa1713..9390b61b748 100644
--- a/src/gallium/drivers/nouveau/nv50/nv50_state.c
+++ b/src/gallium/drivers/nouveau/nv50/nv50_state.c
@@ -1267,9 +1267,13 @@ nv50_set_global_bindings(struct pipe_context *pipe,
 
if (nv50->global_residents.size <= (end * sizeof(struct pipe_resource *))) {
   const unsigned old_size = nv50->global_residents.size;
-  util_dynarray_resize(>global_residents, struct pipe_resource *, 
end);
-  memset((uint8_t *)nv50->global_residents.data + old_size, 0,
- nv50->global_residents.size - old_size);
+  if (util_dynarray_resize(>global_residents, struct pipe_resource 
*, end)) {
+ memset((uint8_t *)nv50->global_residents.data + old_size, 0,
+nv50->global_residents.size - old_size);
+  } else {
+ NOUVEAU_ERR("Could not resize global residents array\n");
+ return;
+  }
}
 
if (resources) {
diff --git a/src/gallium/drivers/nouveau/nvc0/nvc0_state.c 
b/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
index 60dcbe3ec39..956bd78defa 100644
--- a/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
+++ b/src/gallium/drivers/nouveau/nvc0/nvc0_state.c
@@ -1374,9 +1374,13 @@ nvc0_set_global_bindings(struct pipe_context *pipe,
 
if (nvc0->global_residents.size <= (end * sizeof(struct pipe_resource *))) {
   const unsigned old_size = nvc0->global_residents.size;
-  util_dynarray_resize(>global_residents, struct pipe_resource *, 
end);
-  memset((uint8_t *)nvc0->global_residents.data + old_size, 0,
- nvc0->global_residents.size - old_size);
+  if (util_dynarray_resize(>global_residents, struct pipe_resource 
*, end)) {
+ memset((uint8_t *)nvc0->global_residents.data + old_size, 0,
+nvc0->global_residents.size - old_size);
+  } else {
+ NOUVEAU_ERR("Could not resize global residents array\n");
+ return;
+  }
}
 
if (resources) {
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 2/4] nv50ir: fix unnecessary parentheses warning

2019-09-20 Thread Karol Herbst
Signed-off-by: Karol Herbst 
---
 src/gallium/drivers/nouveau/codegen/nv50_ir_util.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir_util.h 
b/src/gallium/drivers/nouveau/codegen/nv50_ir_util.h
index 307c23d5e03..b1766f48205 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir_util.h
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir_util.h
@@ -145,7 +145,7 @@ public:
 #define DLLIST_EMPTY(__list) ((__list)->next == (__list))
 
 #define DLLIST_FOR_EACH(list, it) \
-   for (DLList::Iterator (it) = (list)->iterator(); !(it).end(); (it).next())
+   for (DLList::Iterator it = (list)->iterator(); !(it).end(); (it).next())
 
 class DLList
 {
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 3/4] nv50ir/nir: comparison of integer expressions of different signedness warning

2019-09-20 Thread Karol Herbst
Signed-off-by: Karol Herbst 
---
 src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp 
b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
index 4e86ab8f8cc..95b60d2c7d0 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir_from_nir.cpp
@@ -1957,7 +1957,7 @@ Converter::visit(nir_intrinsic_instr *insn)
  }
  case Program::TYPE_GEOMETRY:
  case Program::TYPE_VERTEX: {
-if (info->io.genUserClip > 0 && idx == clipVertexOutput) {
+if (info->io.genUserClip > 0 && idx == (uint32_t)clipVertexOutput) 
{
mkMov(clipVtx[i], src);
src = clipVtx[i];
 }
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 1/4] nv50ir: fix memset on non trivial types warning

2019-09-20 Thread Karol Herbst
Signed-off-by: Karol Herbst 
---
 src/gallium/drivers/nouveau/codegen/nv50_ir.cpp| 4 +---
 src/gallium/drivers/nouveau/codegen/nv50_ir.h  | 2 +-
 src/gallium/drivers/nouveau/codegen/nv50_ir_target.cpp | 2 +-
 3 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir.cpp 
b/src/gallium/drivers/nouveau/codegen/nv50_ir.cpp
index a181a13a3b1..45ee95bb103 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir.cpp
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir.cpp
@@ -903,10 +903,8 @@ Instruction::isCommutationLegal(const Instruction *i) const
 }
 
 TexInstruction::TexInstruction(Function *fn, operation op)
-   : Instruction(fn, op, TYPE_F32)
+   : Instruction(fn, op, TYPE_F32), tex()
 {
-   memset(, 0, sizeof(tex));
-
tex.rIndirectSrc = -1;
tex.sIndirectSrc = -1;
 
diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir.h 
b/src/gallium/drivers/nouveau/codegen/nv50_ir.h
index b19751ab372..5163e1a7ec2 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir.h
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir.h
@@ -957,7 +957,7 @@ public:
class Target
{
public:
-  Target(TexTarget targ = TEX_TARGET_2D) : target(targ) { }
+  Target(TexTarget targ = TEX_TARGET_1D) : target(targ) { }
 
   const char *getName() const { return descTable[target].name; }
   unsigned int getArgCount() const { return descTable[target].argc; }
diff --git a/src/gallium/drivers/nouveau/codegen/nv50_ir_target.cpp 
b/src/gallium/drivers/nouveau/codegen/nv50_ir_target.cpp
index 5c6d0570ae2..609e7b89290 100644
--- a/src/gallium/drivers/nouveau/codegen/nv50_ir_target.cpp
+++ b/src/gallium/drivers/nouveau/codegen/nv50_ir_target.cpp
@@ -455,7 +455,7 @@ CodeEmitter::addInterp(int ipa, int reg, FixupApply apply)
   if (!fixupInfo)
  return false;
   if (n == 0)
- memset(fixupInfo, 0, sizeof(FixupInfo));
+ fixupInfo->count = 0;
}
++fixupInfo->count;
 
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Re: [Mesa-dev] Release workflow with gitlab issues

2019-09-20 Thread Jason Ekstrand
On Wed, Sep 18, 2019 at 2:08 PM Mark Janes  wrote:

> Right now, anyone can create milestones.  Is there a way to limit the
> capability to release managers?  Would that be desirable?
>

I don't think so.  We may want to use milestones for other task tracking
beyond just releases such as "Switch radeonisi to NIR by default".

--Jason
___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 2/3] panfrost: Draw the wallpaper when only depth/stencil bufs are cleared

2019-09-20 Thread Boris Brezillon
When only the depth/stencil bufs are cleared, we should make sure the
color content is reloaded into the tile buffers if we want to preserve
their content.

Signed-off-by: Boris Brezillon 
---
There might be a more optimal solution to do that (like not passing the
color bufs to the fragment job?), but this solution seems to fix a few
deqp tests.
---
 src/gallium/drivers/panfrost/pan_context.c |  2 +-
 src/gallium/drivers/panfrost/pan_job.c | 16 ++--
 2 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/src/gallium/drivers/panfrost/pan_context.c 
b/src/gallium/drivers/panfrost/pan_context.c
index b2f2a9da7a51..c99bf1b26ce7 100644
--- a/src/gallium/drivers/panfrost/pan_context.c
+++ b/src/gallium/drivers/panfrost/pan_context.c
@@ -1333,7 +1333,7 @@ panfrost_queue_draw(struct panfrost_context *ctx)
 
 if (rasterizer_discard)
 panfrost_scoreboard_queue_vertex_job(batch, vertex, FALSE);
-else if (ctx->wallpaper_batch)
+else if (ctx->wallpaper_batch && batch->first_tiler.gpu)
 panfrost_scoreboard_queue_fused_job_prepend(batch, vertex, 
tiler);
 else
 panfrost_scoreboard_queue_fused_job(batch, vertex, tiler);
diff --git a/src/gallium/drivers/panfrost/pan_job.c 
b/src/gallium/drivers/panfrost/pan_job.c
index 4ec2aa0565d7..a2df31f96f00 100644
--- a/src/gallium/drivers/panfrost/pan_job.c
+++ b/src/gallium/drivers/panfrost/pan_job.c
@@ -698,10 +698,23 @@ panfrost_batch_get_tiler_dummy(struct panfrost_batch 
*batch)
 static void
 panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
 {
+/* Color 0 is cleared, no need to draw the wallpaper.
+ * TODO: MRT wallpapers.
+ */
+if (batch->clear & PIPE_CLEAR_COLOR0)
+return;
+
 /* Nothing to reload? TODO: MRT wallpapers */
 if (batch->key.cbufs[0] == NULL)
 return;
 
+/* No draw calls, and no clear on the depth/stencil bufs.
+ * Drawing the wallpaper would be useless.
+ */
+if (!batch->last_tiler.gpu &&
+!(batch->clear & PIPE_CLEAR_DEPTHSTENCIL))
+return;
+
 /* Check if the buffer has any content on it worth preserving */
 
 struct pipe_surface *surf = batch->key.cbufs[0];
@@ -923,8 +936,7 @@ panfrost_batch_submit(struct panfrost_batch *batch)
 goto out;
 }
 
-if (!batch->clear && batch->last_tiler.gpu)
-panfrost_batch_draw_wallpaper(batch);
+panfrost_batch_draw_wallpaper(batch);
 
 panfrost_scoreboard_link_batch(batch);
 
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

[Mesa-dev] [PATCH 3/3] panfrost: More tests are passing

2019-09-20 Thread Boris Brezillon
Remove the tests that are now passing.

Signed-off-by: Boris Brezillon 
---
 .../drivers/panfrost/ci/expected-failures.txt | 153 --
 1 file changed, 153 deletions(-)

diff --git a/src/gallium/drivers/panfrost/ci/expected-failures.txt 
b/src/gallium/drivers/panfrost/ci/expected-failures.txt
index 7e7dbd62307b..91c1f14ce1a2 100644
--- a/src/gallium/drivers/panfrost/ci/expected-failures.txt
+++ b/src/gallium/drivers/panfrost/ci/expected-failures.txt
@@ -1,10 +1,3 @@
-dEQP-GLES2.functional.color_clear.masked_rgba Fail
-dEQP-GLES2.functional.color_clear.masked_rgb Fail
-dEQP-GLES2.functional.color_clear.masked_scissored_rgba Fail
-dEQP-GLES2.functional.color_clear.masked_scissored_rgb Fail
-dEQP-GLES2.functional.color_clear.scissored_rgba Fail
-dEQP-GLES2.functional.color_clear.scissored_rgb Fail
-dEQP-GLES2.functional.color_clear.short_scissored_rgb Fail
 dEQP-GLES2.functional.depth_range.write.0_8_to_third Fail
 dEQP-GLES2.functional.depth_range.write.clamp_both Fail
 dEQP-GLES2.functional.depth_range.write.clamp_far Fail
@@ -672,201 +665,55 @@ 
dEQP-GLES2.functional.fragment_ops.depth_stencil.stencil_ops.zero_zero_zero Fail
 dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.both Fail
 dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.depth Fail
 dEQP-GLES2.functional.fragment_ops.depth_stencil.write_mask.stencil Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.0 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.10 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.11 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.12 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.13 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.15 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.16 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.17 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.18 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.19 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.1 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.20 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.21 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.22 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.23 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.24 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.25 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.26 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.29 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.30 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.31 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.32 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.33 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.34 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.35 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.36 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.37 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.38 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.39 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.3 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.40 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.41 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.42 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.43 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.44 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.46 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.47 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.48 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.49 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.50 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.51 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.52 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.53 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.54 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.55 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.56 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.57 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.58 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.59 Fail
-dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.5 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.60 Fail
 dEQP-GLES2.functional.fragment_ops.interaction.basic_shader.61 Fail

[Mesa-dev] [PATCH 1/3] panfrost: Make sure a clear does not re-use a pre-existing batch

2019-09-20 Thread Boris Brezillon
glClear()s are expected to be the first thing GL apps do before drawing
new things. If there's already an existing batch targetting the same
FBO that has draws attached to it, we should make sure the new clear
gets a new batch assigned to guaranteed that the FB content is actually
cleared with the requested color/depth/stencil values.

We create a panfrost_get_fresh_batch_for_fbo() helper for that and
call it from panfrost_clear().

Signed-off-by: Boris Brezillon 
---
 src/gallium/drivers/panfrost/pan_context.c |  2 +-
 src/gallium/drivers/panfrost/pan_job.c | 21 +
 src/gallium/drivers/panfrost/pan_job.h |  3 +++
 3 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/src/gallium/drivers/panfrost/pan_context.c 
b/src/gallium/drivers/panfrost/pan_context.c
index ac01461a07fe..b2f2a9da7a51 100644
--- a/src/gallium/drivers/panfrost/pan_context.c
+++ b/src/gallium/drivers/panfrost/pan_context.c
@@ -162,7 +162,7 @@ panfrost_clear(
 double depth, unsigned stencil)
 {
 struct panfrost_context *ctx = pan_context(pipe);
-struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+struct panfrost_batch *batch = panfrost_get_fresh_batch_for_fbo(ctx);
 
 panfrost_batch_add_fbo_bos(batch);
 panfrost_batch_clear(batch, buffers, color, depth, stencil);
diff --git a/src/gallium/drivers/panfrost/pan_job.c 
b/src/gallium/drivers/panfrost/pan_job.c
index d8330bc133a6..4ec2aa0565d7 100644
--- a/src/gallium/drivers/panfrost/pan_job.c
+++ b/src/gallium/drivers/panfrost/pan_job.c
@@ -298,6 +298,27 @@ panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
 return batch;
 }
 
+struct panfrost_batch *
+panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
+{
+struct panfrost_batch *batch;
+
+batch = panfrost_get_batch(ctx, >pipe_framebuffer);
+
+/* The batch has no draw/clear queued, let's return it directly.
+ * Note that it's perfectly fine to re-use a batch with an
+ * existing clear, we'll just update it with the new clear request.
+ */
+if (!batch->last_job.gpu)
+return batch;
+
+/* Otherwise, we need to freeze the existing one and instantiate a new
+ * one.
+ */
+panfrost_freeze_batch(batch);
+return panfrost_get_batch(ctx, >pipe_framebuffer);
+}
+
 static bool
 panfrost_batch_fence_is_signaled(struct panfrost_batch_fence *fence)
 {
diff --git a/src/gallium/drivers/panfrost/pan_job.h 
b/src/gallium/drivers/panfrost/pan_job.h
index e1b1f56a2e64..0bd78bba267a 100644
--- a/src/gallium/drivers/panfrost/pan_job.h
+++ b/src/gallium/drivers/panfrost/pan_job.h
@@ -172,6 +172,9 @@ panfrost_batch_fence_reference(struct panfrost_batch_fence 
*batch);
 struct panfrost_batch *
 panfrost_get_batch_for_fbo(struct panfrost_context *ctx);
 
+struct panfrost_batch *
+panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx);
+
 void
 panfrost_batch_init(struct panfrost_context *ctx);
 
-- 
2.21.0

___
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev