On Monday 08 September 2014, 15:19:15, Bruno Jimenez wrote:
> Hi,
> 
> I'm not sure if this will work. Imagine this case:
> 
> We  have an item in the pool, and we want to use
> r600_resource_copy_region with it, for example because we want to demote
> it. This will call r600_copy_global_buffer, and with your patch it will
> call r600_compute_global_demote_or_alloc, which will again call
> compute_memory_demote_item causing an infinite cycle.

I think this will not be a problem because neither the pool bo nor the 
"real_buffer" will have the PIPE_BIND_GLOBAL flag. Therefore, 
r600_compute_global_demote_or_alloc will not be called again.

> Also, why are you reassigning src and dst in r600_copy_global_buffer?

For r600, resources with PIPE_BIND_GLOBAL are not real resources but only 
correspond to items in the compute pool. There they can either have the 
"real_buffer" bo when they should be mapped or be part of the pool bo. 
Therefore the pipe_resources have to be reassigned accordingly.

I am however not sure if it is really necessary to demote the item from the 
pool before copying data to it. Otherwise it would be possible to directly 
access the pool bo if the item is already in it.

> - Bruno
> 
> On Sun, 2014-09-07 at 18:32 +0200, Niels Ole Salscheider wrote:
> > Signed-off-by: Niels Ole Salscheider <niels_...@salscheider-online.de>
> > ---
> > 
> >  src/gallium/drivers/r600/evergreen_compute.c | 27 ++++++++++++-------
> >  src/gallium/drivers/r600/evergreen_compute.h |  1 +
> >  src/gallium/drivers/r600/r600_blit.c         | 40
> >  ++++++++++++++++------------ 3 files changed, 41 insertions(+), 27
> >  deletions(-)
> > 
> > diff --git a/src/gallium/drivers/r600/evergreen_compute.c
> > b/src/gallium/drivers/r600/evergreen_compute.c index 38b78c7..b495868
> > 100644
> > --- a/src/gallium/drivers/r600/evergreen_compute.c
> > +++ b/src/gallium/drivers/r600/evergreen_compute.c
> > @@ -953,6 +953,22 @@ void r600_compute_global_buffer_destroy(
> > 
> >     free(res);
> >  
> >  }
> > 
> > +void r600_compute_global_demote_or_alloc(
> > +   struct compute_memory_pool *pool,
> > +   struct compute_memory_item *item,
> > +   struct pipe_context *ctx)
> > +{
> > +   if (is_item_in_pool(item)) {
> > +           compute_memory_demote_item(pool, item, ctx);
> > +   } else {
> > +           if (item->real_buffer == NULL) {
> > +                   item->real_buffer = (struct r600_resource*)
> > +                                   
> > r600_compute_buffer_alloc_vram(pool->screen, item-
>size_in_dw * 4);
> > +           }
> > +   }
> > +
> > +}
> > +
> > 
> >  void *r600_compute_global_transfer_map(
> >  
> >     struct pipe_context *ctx_,
> >     struct pipe_resource *resource,
> > 
> > @@ -970,16 +986,7 @@ void *r600_compute_global_transfer_map(
> > 
> >     struct pipe_resource *dst = NULL;
> >     unsigned offset = box->x;
> > 
> > -   if (is_item_in_pool(item)) {
> > -           compute_memory_demote_item(pool, item, ctx_);
> > -   }
> > -   else {
> > -           if (item->real_buffer == NULL) {
> > -                   item->real_buffer = (struct r600_resource*)
> > -                                   
> > r600_compute_buffer_alloc_vram(pool->screen, item-
>size_in_dw * 4);
> > -           }
> > -   }
> > -
> > +   r600_compute_global_demote_or_alloc(pool, item, ctx_);
> > 
> >     dst = (struct pipe_resource*)item->real_buffer;
> >     
> >     if (usage & PIPE_TRANSFER_READ)
> > 
> > diff --git a/src/gallium/drivers/r600/evergreen_compute.h
> > b/src/gallium/drivers/r600/evergreen_compute.h index 4fb53a1..39bb854
> > 100644
> > --- a/src/gallium/drivers/r600/evergreen_compute.h
> > +++ b/src/gallium/drivers/r600/evergreen_compute.h
> > @@ -47,6 +47,7 @@ void evergreen_emit_cs_shader(struct r600_context *rctx,
> > struct r600_atom * atom> 
> >  struct pipe_resource *r600_compute_global_buffer_create(struct
> >  pipe_screen *screen, const struct pipe_resource *templ); void
> >  r600_compute_global_buffer_destroy(struct pipe_screen *screen, struct
> >  pipe_resource *res);> 
> > +void r600_compute_global_demote_or_alloc(struct compute_memory_pool
> > *pool, struct compute_memory_item *item, struct pipe_context *ctx);> 
> >  void *r600_compute_global_transfer_map(
> >  
> >     struct pipe_context *ctx_,
> >     struct pipe_resource *resource,
> > 
> > diff --git a/src/gallium/drivers/r600/r600_blit.c
> > b/src/gallium/drivers/r600/r600_blit.c index f766e37..f6471cb 100644
> > --- a/src/gallium/drivers/r600/r600_blit.c
> > +++ b/src/gallium/drivers/r600/r600_blit.c
> > @@ -21,6 +21,8 @@
> > 
> >   * USE OR OTHER DEALINGS IN THE SOFTWARE.
> >   */
> >  
> >  #include "r600_pipe.h"
> > 
> > +#include "compute_memory_pool.h"
> > +#include "evergreen_compute.h"
> > 
> >  #include "util/u_surface.h"
> >  #include "util/u_format.h"
> >  #include "evergreend.h"
> > 
> > @@ -514,29 +516,33 @@ static void r600_copy_buffer(struct pipe_context
> > *ctx, struct pipe_resource *dst> 
> >   * into a single global resource (r600_screen::global_pool).  The means
> >   * they don't have their own cs_buf handle, so they cannot be passed
> >   * to r600_copy_buffer() and must be handled separately.
> > 
> > - *
> > - * XXX: It should be possible to implement this function using
> > - * r600_copy_buffer() by passing the memory_pool resource as both src
> > - * and dst and updating dstx and src_box to point to the correct offsets.
> > - * This would likely perform better than the current implementation.
> > 
> >   */
> >  
> >  static void r600_copy_global_buffer(struct pipe_context *ctx,
> >  
> >                                 struct pipe_resource *dst, unsigned
> >                                 dstx, struct pipe_resource *src,
> >                                 const struct pipe_box *src_box)
> >  
> >  {
> > 
> > -   struct pipe_box dst_box; struct pipe_transfer *src_pxfer,
> > -   *dst_pxfer;
> > -
> > -   u_box_1d(dstx, src_box->width, &dst_box);
> > -   void *src_ptr = ctx->transfer_map(ctx, src, 0, PIPE_TRANSFER_READ,
> > -                                     src_box, &src_pxfer);
> > -   void *dst_ptr = ctx->transfer_map(ctx, dst, 0, PIPE_TRANSFER_WRITE,
> > -                                     &dst_box, &dst_pxfer);
> > -   memcpy(dst_ptr, src_ptr, src_box->width);
> > -
> > -   ctx->transfer_unmap(ctx, src_pxfer);
> > -   ctx->transfer_unmap(ctx, dst_pxfer);
> > +   struct r600_context *rctx = (struct r600_context*)ctx;
> > +   struct compute_memory_pool *pool = rctx->screen->global_pool;
> > +
> > +   if (src->bind & PIPE_BIND_GLOBAL) {
> > +           struct r600_resource_global *rsrc =
> > +                   (struct r600_resource_global *)src;
> > +           struct compute_memory_item *item = rsrc->chunk;
> > +
> > +           r600_compute_global_demote_or_alloc(pool, item, ctx);
> > +           src = (struct pipe_resource*)item->real_buffer;
> > +   }
> > +   if (dst->bind & PIPE_BIND_GLOBAL) {
> > +           struct r600_resource_global *rdst =
> > +                   (struct r600_resource_global *)dst;
> > +           struct compute_memory_item *item = rdst->chunk;
> > +
> > +           r600_compute_global_demote_or_alloc(pool, item, ctx);
> > +           dst = (struct pipe_resource*)item->real_buffer;
> > +   }
> > +
> > +   r600_copy_buffer(ctx, dst, dstx, src, src_box);
> > 
> >  }
> >  
> >  static void r600_clear_buffer(struct pipe_context *ctx, struct
> >  pipe_resource *dst,

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to