Module: Mesa
Branch: master
Commit: 39d6343a3eb123f2c1b3005c0e8fee30b3d9e405
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=39d6343a3eb123f2c1b3005c0e8fee30b3d9e405

Author: Rob Clark <[email protected]>
Date:   Tue Mar  2 15:08:38 2021 -0800

freedreno: Split out batch/resource tracking

For threaded_context, to properly handle replace_buffer_storage, we'll
need to handle multiple "iterations" of a resource using the same
tracking in order to implement transfer_map() correctly.

Signed-off-by: Rob Clark <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9323>

---

 src/gallium/drivers/freedreno/freedreno_batch.c    | 24 +++---
 .../drivers/freedreno/freedreno_batch_cache.c      | 14 ++--
 .../drivers/freedreno/freedreno_query_acc.c        |  6 +-
 src/gallium/drivers/freedreno/freedreno_query_hw.c |  6 +-
 src/gallium/drivers/freedreno/freedreno_resource.c | 39 ++++++----
 src/gallium/drivers/freedreno/freedreno_resource.h | 85 ++++++++++++++++------
 6 files changed, 113 insertions(+), 61 deletions(-)

diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c 
b/src/gallium/drivers/freedreno/freedreno_batch.c
index 42869eda460..7c065541b94 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch.c
@@ -260,10 +260,10 @@ batch_reset_resources_locked(struct fd_batch *batch)
        set_foreach(batch->resources, entry) {
                struct fd_resource *rsc = (struct fd_resource *)entry->key;
                _mesa_set_remove(batch->resources, entry);
-               debug_assert(rsc->batch_mask & (1 << batch->idx));
-               rsc->batch_mask &= ~(1 << batch->idx);
-               if (rsc->write_batch == batch)
-                       fd_batch_reference_locked(&rsc->write_batch, NULL);
+               debug_assert(rsc->track->batch_mask & (1 << batch->idx));
+               rsc->track->batch_mask &= ~(1 << batch->idx);
+               if (rsc->track->write_batch == batch)
+                       fd_batch_reference_locked(&rsc->track->write_batch, 
NULL);
        }
 }
 
@@ -437,7 +437,7 @@ flush_write_batch(struct fd_resource *rsc)
        assert_dt
 {
        struct fd_batch *b = NULL;
-       fd_batch_reference_locked(&b, rsc->write_batch);
+       fd_batch_reference_locked(&b, rsc->track->write_batch);
 
        fd_screen_unlock(b->ctx->screen);
        fd_batch_flush(b);
@@ -458,7 +458,7 @@ fd_batch_add_resource(struct fd_batch *batch, struct 
fd_resource *rsc)
        debug_assert(!_mesa_set_search(batch->resources, rsc));
 
        _mesa_set_add(batch->resources, rsc);
-       rsc->batch_mask |= (1 << batch->idx);
+       rsc->track->batch_mask |= (1 << batch->idx);
 }
 
 void
@@ -473,7 +473,7 @@ fd_batch_resource_write(struct fd_batch *batch, struct 
fd_resource *rsc)
         */
        rsc->valid = true;
 
-       if (rsc->write_batch == batch)
+       if (rsc->track->write_batch == batch)
                return;
 
        fd_batch_write_prep(batch, rsc);
@@ -485,14 +485,14 @@ fd_batch_resource_write(struct fd_batch *batch, struct 
fd_resource *rsc)
         * resulting in a write-after-read hazard.
         */
        /* if we are pending read or write by any other batch: */
-       if (unlikely(rsc->batch_mask & ~(1 << batch->idx))) {
+       if (unlikely(rsc->track->batch_mask & ~(1 << batch->idx))) {
                struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
                struct fd_batch *dep;
 
-               if (rsc->write_batch)
+               if (rsc->track->write_batch)
                        flush_write_batch(rsc);
 
-               foreach_batch(dep, cache, rsc->batch_mask) {
+               foreach_batch (dep, cache, rsc->track->batch_mask) {
                        struct fd_batch *b = NULL;
                        if (dep == batch)
                                continue;
@@ -506,7 +506,7 @@ fd_batch_resource_write(struct fd_batch *batch, struct 
fd_resource *rsc)
                        fd_batch_reference_locked(&b, NULL);
                }
        }
-       fd_batch_reference_locked(&rsc->write_batch, batch);
+       fd_batch_reference_locked(&rsc->track->write_batch, batch);
 
        fd_batch_add_resource(batch, rsc);
 }
@@ -525,7 +525,7 @@ fd_batch_resource_read_slowpath(struct fd_batch *batch, 
struct fd_resource *rsc)
         * writer.  This avoids situations where we end up having to
         * flush the current batch in _resource_used()
         */
-       if (unlikely(rsc->write_batch && rsc->write_batch != batch))
+       if (unlikely(rsc->track->write_batch && rsc->track->write_batch != 
batch))
                flush_write_batch(rsc);
 
        fd_batch_add_resource(batch, rsc);
diff --git a/src/gallium/drivers/freedreno/freedreno_batch_cache.c 
b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
index 0bb75ff241a..d445b9362c2 100644
--- a/src/gallium/drivers/freedreno/freedreno_batch_cache.c
+++ b/src/gallium/drivers/freedreno/freedreno_batch_cache.c
@@ -280,7 +280,7 @@ fd_bc_invalidate_batch(struct fd_batch *batch, bool remove)
        DBG("%p: key=%p", batch, batch->key);
        for (unsigned idx = 0; idx < key->num_surfs; idx++) {
                struct fd_resource *rsc = fd_resource(key->surf[idx].texture);
-               rsc->bc_batch_mask &= ~(1 << batch->idx);
+               rsc->track->bc_batch_mask &= ~(1 << batch->idx);
        }
 
        struct hash_entry *entry =
@@ -300,19 +300,19 @@ fd_bc_invalidate_resource(struct fd_resource *rsc, bool 
destroy)
        fd_screen_lock(screen);
 
        if (destroy) {
-               foreach_batch(batch, &screen->batch_cache, rsc->batch_mask) {
+               foreach_batch (batch, &screen->batch_cache, 
rsc->track->batch_mask) {
                        struct set_entry *entry = 
_mesa_set_search(batch->resources, rsc);
                        _mesa_set_remove(batch->resources, entry);
                }
-               rsc->batch_mask = 0;
+               rsc->track->batch_mask = 0;
 
-               fd_batch_reference_locked(&rsc->write_batch, NULL);
+               fd_batch_reference_locked(&rsc->track->write_batch, NULL);
        }
 
-       foreach_batch(batch, &screen->batch_cache, rsc->bc_batch_mask)
+       foreach_batch (batch, &screen->batch_cache, rsc->track->bc_batch_mask)
                fd_bc_invalidate_batch(batch, false);
 
-       rsc->bc_batch_mask = 0;
+       rsc->track->bc_batch_mask = 0;
 
        fd_screen_unlock(screen);
 }
@@ -459,7 +459,7 @@ batch_from_key(struct fd_batch_cache *cache, struct 
fd_batch_key *key,
 
        for (unsigned idx = 0; idx < key->num_surfs; idx++) {
                struct fd_resource *rsc = fd_resource(key->surf[idx].texture);
-               rsc->bc_batch_mask = (1 << batch->idx);
+               rsc->track->bc_batch_mask = (1 << batch->idx);
        }
 
        return batch;
diff --git a/src/gallium/drivers/freedreno/freedreno_query_acc.c 
b/src/gallium/drivers/freedreno/freedreno_query_acc.c
index e7c5db9aa67..5603a6cf83a 100644
--- a/src/gallium/drivers/freedreno/freedreno_query_acc.c
+++ b/src/gallium/drivers/freedreno/freedreno_query_acc.c
@@ -165,7 +165,7 @@ fd_acc_get_query_result(struct fd_context *ctx, struct 
fd_query *q,
                         */
                        if (aq->no_wait_cnt++ > 5) {
                                fd_context_access_begin(ctx);
-                               fd_batch_flush(rsc->write_batch);
+                               fd_batch_flush(rsc->track->write_batch);
                                fd_context_access_end(ctx);
                        }
                        return false;
@@ -179,9 +179,9 @@ fd_acc_get_query_result(struct fd_context *ctx, struct 
fd_query *q,
                fd_bo_cpu_fini(rsc->bo);
        }
 
-       if (rsc->write_batch) {
+       if (rsc->track->write_batch) {
                fd_context_access_begin(ctx);
-               fd_batch_flush(rsc->write_batch);
+               fd_batch_flush(rsc->track->write_batch);
                fd_context_access_end(ctx);
        }
 
diff --git a/src/gallium/drivers/freedreno/freedreno_query_hw.c 
b/src/gallium/drivers/freedreno/freedreno_query_hw.c
index 445c86d4552..6fbe365a93b 100644
--- a/src/gallium/drivers/freedreno/freedreno_query_hw.c
+++ b/src/gallium/drivers/freedreno/freedreno_query_hw.c
@@ -221,7 +221,7 @@ fd_hw_get_query_result(struct fd_context *ctx, struct 
fd_query *q,
                         */
                        if (hq->no_wait_cnt++ > 5) {
                                fd_context_access_begin(ctx);
-                               fd_batch_flush(rsc->write_batch);
+                               fd_batch_flush(rsc->track->write_batch);
                                fd_context_access_end(ctx);
                        }
                        return false;
@@ -250,9 +250,9 @@ fd_hw_get_query_result(struct fd_context *ctx, struct 
fd_query *q,
 
                struct fd_resource *rsc = fd_resource(start->prsc);
 
-               if (rsc->write_batch) {
+               if (rsc->track->write_batch) {
                        fd_context_access_begin(ctx);
-                       fd_batch_flush(rsc->write_batch);
+                       fd_batch_flush(rsc->track->write_batch);
                        fd_context_access_end(ctx);
                }
 
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.c 
b/src/gallium/drivers/freedreno/freedreno_resource.c
index b7b097f5aaf..0ee47089638 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -279,7 +279,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct 
fd_resource *rsc,
         * since that defeats the purpose of shadowing, but this is a
         * case where we'd have to flush anyways.
         */
-       if (rsc->write_batch == ctx->batch)
+       if (rsc->track->write_batch == ctx->batch)
                flush_resource(ctx, rsc, 0);
 
        /* TODO: somehow munge dimensions and format to copy unsupported
@@ -330,12 +330,11 @@ fd_try_shadow_resource(struct fd_context *ctx, struct 
fd_resource *rsc,
         */
        struct fd_resource *shadow = fd_resource(pshadow);
 
-       DBG("shadow: %p (%d) -> %p (%d)\n", rsc, rsc->base.reference.count,
-                       shadow, shadow->base.reference.count);
+       DBG("shadow: %p (%d, %p) -> %p (%d, %p)", rsc, 
rsc->base.reference.count, rsc->track,
+                       shadow, shadow->base.reference.count, shadow->track);
 
        /* TODO valid_buffer_range?? */
-       swap(rsc->bo,        shadow->bo);
-       swap(rsc->write_batch,   shadow->write_batch);
+       swap(rsc->bo,     shadow->bo);
        swap(rsc->layout, shadow->layout);
        rsc->seqno = p_atomic_inc_return(&ctx->screen->rsc_seqno);
 
@@ -343,14 +342,14 @@ fd_try_shadow_resource(struct fd_context *ctx, struct 
fd_resource *rsc,
         * by any batches, but the existing rsc (probably) is.  We need to
         * transfer those references over:
         */
-       debug_assert(shadow->batch_mask == 0);
+       debug_assert(shadow->track->batch_mask == 0);
        struct fd_batch *batch;
-       foreach_batch(batch, &ctx->screen->batch_cache, rsc->batch_mask) {
+       foreach_batch (batch, &ctx->screen->batch_cache, 
rsc->track->batch_mask) {
                struct set_entry *entry = _mesa_set_search(batch->resources, 
rsc);
                _mesa_set_remove(batch->resources, entry);
                _mesa_set_add(batch->resources, shadow);
        }
-       swap(rsc->batch_mask, shadow->batch_mask);
+       swap(rsc->track, shadow->track);
 
        fd_screen_unlock(ctx->screen);
 
@@ -552,7 +551,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource 
*rsc, unsigned usage)
        struct fd_batch *write_batch = NULL;
 
        fd_screen_lock(ctx->screen);
-       fd_batch_reference_locked(&write_batch, rsc->write_batch);
+       fd_batch_reference_locked(&write_batch, rsc->track->write_batch);
        fd_screen_unlock(ctx->screen);
 
        if (usage & PIPE_MAP_WRITE) {
@@ -565,7 +564,7 @@ flush_resource(struct fd_context *ctx, struct fd_resource 
*rsc, unsigned usage)
                 * we must first grab references under a lock, then flush.
                 */
                fd_screen_lock(ctx->screen);
-               batch_mask = rsc->batch_mask;
+               batch_mask = rsc->track->batch_mask;
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
                        fd_batch_reference_locked(&batches[batch->idx], batch);
                fd_screen_unlock(ctx->screen);
@@ -576,14 +575,14 @@ flush_resource(struct fd_context *ctx, struct fd_resource 
*rsc, unsigned usage)
                foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
                        fd_batch_reference(&batches[batch->idx], NULL);
                }
-               assert(rsc->batch_mask == 0);
+               assert(rsc->track->batch_mask == 0);
        } else if (write_batch) {
                fd_batch_flush(write_batch);
        }
 
        fd_batch_reference(&write_batch, NULL);
 
-       assert(!rsc->write_batch);
+       assert(!rsc->track->write_batch);
 }
 
 static void
@@ -744,7 +743,7 @@ fd_resource_transfer_map(struct pipe_context *pctx,
 
                /* hold a reference, so it doesn't disappear under us: */
                fd_screen_lock(ctx->screen);
-               fd_batch_reference_locked(&write_batch, rsc->write_batch);
+               fd_batch_reference_locked(&write_batch, 
rsc->track->write_batch);
                fd_screen_unlock(ctx->screen);
 
                if ((usage & PIPE_MAP_WRITE) && write_batch &&
@@ -865,6 +864,8 @@ fd_resource_destroy(struct pipe_screen *pscreen,
 
        util_range_destroy(&rsc->valid_buffer_range);
        simple_mtx_destroy(&rsc->lock);
+       fd_resource_tracking_reference(&rsc->track, NULL);
+
        FREE(rsc);
 }
 
@@ -946,6 +947,14 @@ alloc_resource_struct(struct pipe_screen *pscreen, const 
struct pipe_resource *t
        util_range_init(&rsc->valid_buffer_range);
        simple_mtx_init(&rsc->lock, mtx_plain);
 
+       rsc->track = CALLOC_STRUCT(fd_resource_tracking);
+       if (!rsc->track) {
+               free(rsc);
+               return NULL;
+       }
+
+       pipe_reference_init(&rsc->track->reference, 1);
+
        return rsc;
 }
 
@@ -1236,12 +1245,12 @@ fd_invalidate_resource(struct pipe_context *pctx, 
struct pipe_resource *prsc)
                /* Handle the glInvalidateBufferData() case:
                 */
                invalidate_resource(rsc, PIPE_MAP_READ | PIPE_MAP_WRITE);
-       } else if (rsc->write_batch) {
+       } else if (rsc->track->write_batch) {
                /* Handle the glInvalidateFramebuffer() case, telling us that
                 * we can skip resolve.
                 */
 
-               struct fd_batch *batch = rsc->write_batch;
+               struct fd_batch *batch = rsc->track->write_batch;
                struct pipe_framebuffer_state *pfb = &batch->framebuffer;
 
                if (pfb->zsbuf && pfb->zsbuf->texture == prsc) {
diff --git a/src/gallium/drivers/freedreno/freedreno_resource.h 
b/src/gallium/drivers/freedreno/freedreno_resource.h
index 71dc24da13d..774fe8d3967 100644
--- a/src/gallium/drivers/freedreno/freedreno_resource.h
+++ b/src/gallium/drivers/freedreno/freedreno_resource.h
@@ -56,22 +56,26 @@ enum fd_lrz_direction {
        FD_LRZ_GREATER,
 };
 
-struct fd_resource {
-       struct pipe_resource base;
-       struct fd_bo *bo;  /* use fd_resource_set_bo() to write */
-       enum pipe_format internal_format;
-       struct fdl_layout layout;
-
-       /* buffer range that has been initialized */
-       struct util_range valid_buffer_range;
-       bool valid;
-       struct renderonly_scanout *scanout;
-
-       /* reference to the resource holding stencil data for a z32_s8 texture 
*/
-       /* TODO rename to secondary or auxiliary? */
-       struct fd_resource *stencil;
-
-       simple_mtx_t lock;
+/**
+ * State related to batch/resource tracking.
+ *
+ * With threaded_context we need to support replace_buffer_storage, in
+ * which case we can end up in transfer_map with tres->latest, but other
+ * pipe_context APIs using the original prsc pointer.  This allows TC to
+ * not have to synchronize the front-end thread with the buffer storage
+ * replacement called on driver thread.  But it complicates the batch/
+ * resource tracking.
+ *
+ * To handle this, we need to split the tracking out into it's own ref-
+ * counted structure, so as needed both "versions" of the resource can
+ * point to the same tracking.
+ *
+ * We could *almost* just push this down to fd_bo, except for a3xx/a4xx
+ * hw queries, where we don't know up-front the size to allocate for
+ * per-tile query results.
+ */
+struct fd_resource_tracking {
+       struct pipe_reference reference;
 
        /* bitmask of in-flight batches which reference this resource.  Note
         * that the batch doesn't hold reference to resources (but instead
@@ -90,15 +94,54 @@ struct fd_resource {
         * shadowed.
         */
        uint32_t bc_batch_mask;
+};
 
-       /* Sequence # incremented each time bo changes: */
-       uint16_t seqno;
+void __fd_resource_tracking_destroy(struct fd_resource_tracking *track);
+
+static inline void
+fd_resource_tracking_reference(struct fd_resource_tracking **ptr,
+               struct fd_resource_tracking *track)
+{
+       struct fd_resource_tracking *old_track = *ptr;
+
+       if (pipe_reference(&(*ptr)->reference, &track->reference)) {
+               assert(!old_track->write_batch);
+               free(old_track);
+       }
+
+       *ptr = track;
+}
+
+/**
+ * A resource (any buffer/texture/image/etc)
+ */
+struct fd_resource {
+       struct pipe_resource base;
+       struct fd_bo *bo;  /* use fd_resource_set_bo() to write */
+       enum pipe_format internal_format;
+       struct fdl_layout layout;
+
+       /* buffer range that has been initialized */
+       struct util_range valid_buffer_range;
+       bool valid;
+       struct renderonly_scanout *scanout;
+
+       /* reference to the resource holding stencil data for a z32_s8 texture 
*/
+       /* TODO rename to secondary or auxiliary? */
+       struct fd_resource *stencil;
+
+       struct fd_resource_tracking *track;
+
+       simple_mtx_t lock;
 
        /* bitmask of state this resource could potentially dirty when rebound,
         * see rebind_resource()
         */
        enum fd_dirty_3d_state dirty;
 
+       /* Sequence # incremented each time bo changes: */
+       uint16_t seqno;
+
        /* Uninitialized resources with UBWC format need their UBWC flag data
         * cleared before writes, as the UBWC state is read and used during
         * writes, so undefined UBWC flag data results in undefined results.
@@ -146,11 +189,11 @@ static inline bool
 pending(struct fd_resource *rsc, bool write)
 {
        /* if we have a pending GPU write, we are busy in any case: */
-       if (rsc->write_batch)
+       if (rsc->track->write_batch)
                return true;
 
        /* if CPU wants to write, but we are pending a GPU read, we are busy: */
-       if (write && rsc->batch_mask)
+       if (write && rsc->track->batch_mask)
                return true;
 
        if (rsc->stencil && pending(rsc->stencil, write))
@@ -300,7 +343,7 @@ bool fd_render_condition_check(struct pipe_context *pctx) 
assert_dt;
 static inline bool
 fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
 {
-       return rsc->batch_mask & (1 << batch->idx);
+       return rsc->track->batch_mask & (1 << batch->idx);
 }
 
 static inline void

_______________________________________________
mesa-commit mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/mesa-commit

Reply via email to