From: Marek Olšák <marek.ol...@amd.com> --- src/gallium/winsys/amdgpu/drm/amdgpu_bo.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c index 3ee38b8a79f..49de30bb57c 100644 --- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c +++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c @@ -211,20 +211,26 @@ static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf) struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf); assert(bo->bo); /* slab buffers have a separate vtbl */ if (bo->u.real.use_reusable_pool) pb_cache_add_buffer(&bo->u.real.cache_entry); else amdgpu_bo_destroy(_buf); } +static void amdgpu_clean_up_buffer_managers(struct amdgpu_winsys *ws) +{ + pb_slabs_reclaim(&ws->bo_slabs); + pb_cache_release_all_buffers(&ws->bo_cache); +} + static void *amdgpu_bo_map(struct pb_buffer *buf, struct radeon_cmdbuf *rcs, enum pipe_transfer_usage usage) { struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf; struct amdgpu_winsys_bo *real; struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs; int r; void *cpu = NULL; uint64_t offset = 0; @@ -318,22 +324,22 @@ static void *amdgpu_bo_map(struct pb_buffer *buf, if (bo->bo) { real = bo; } else { real = bo->u.slab.real; offset = bo->va - real->va; } r = amdgpu_bo_cpu_map(real->bo, &cpu); if (r) { - /* Clear the cache and try again. */ - pb_cache_release_all_buffers(&real->ws->bo_cache); + /* Clean up buffer managers and try again. */ + amdgpu_clean_up_buffer_managers(real->ws); r = amdgpu_bo_cpu_map(real->bo, &cpu); if (r) return NULL; } if (p_atomic_inc_return(&real->u.real.map_count) == 1) { if (real->initial_domain & RADEON_DOMAIN_VRAM) real->ws->mapped_vram += real->base.size; else if (real->initial_domain & RADEON_DOMAIN_GTT) real->ws->mapped_gtt += real->base.size; @@ -1198,22 +1204,22 @@ amdgpu_bo_create(struct radeon_winsys *rws, size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) && alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) { struct pb_slab_entry *entry; int heap = radeon_get_heap_index(domain, flags); if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS) goto no_slab; entry = pb_slab_alloc(&ws->bo_slabs, size, heap); if (!entry) { - /* Clear the cache and try again. */ - pb_cache_release_all_buffers(&ws->bo_cache); + /* Clean up buffer managers and try again. */ + amdgpu_clean_up_buffer_managers(ws); entry = pb_slab_alloc(&ws->bo_slabs, size, heap); } if (!entry) return NULL; bo = NULL; bo = container_of(entry, bo, u.slab.entry); pipe_reference_init(&bo->base.reference, 1); @@ -1247,23 +1253,23 @@ no_slab: /* Get a buffer from the cache. */ bo = (struct amdgpu_winsys_bo*) pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap); if (bo) return &bo->base; } /* Create a new one. */ bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap); if (!bo) { - /* Clear the cache and try again. */ - pb_slabs_reclaim(&ws->bo_slabs); - pb_cache_release_all_buffers(&ws->bo_cache); + /* Clean up buffer managers and try again. */ + amdgpu_clean_up_buffer_managers(ws); + bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap); if (!bo) return NULL; } bo->u.real.use_reusable_pool = use_reusable_pool; return &bo->base; } static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws, -- 2.17.1 _______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/mesa-dev