From: Marek Olšák <marek.ol...@amd.com>

---
 src/gallium/winsys/radeon/drm/radeon_drm_bo.c     | 63 ++++++++++++-----------
 src/gallium/winsys/radeon/drm/radeon_drm_winsys.c |  9 ++--
 src/gallium/winsys/radeon/drm/radeon_drm_winsys.h | 11 ++--
 3 files changed, 47 insertions(+), 36 deletions(-)

diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
index 7aef238..bbfe5cc 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
@@ -191,146 +191,148 @@ static enum radeon_bo_domain 
radeon_bo_get_initial_domain(
         fprintf(stderr, "radeon: failed to get initial domain: %p 0x%08X\n",
                 bo, bo->handle);
         /* Default domain as returned by get_valid_domain. */
         return RADEON_DOMAIN_VRAM_GTT;
     }
 
     /* GEM domains and winsys domains are defined the same. */
     return get_valid_domain(args.value);
 }
 
-static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
+static uint64_t radeon_bomgr_find_va(const struct radeon_info *info,
+                                     struct radeon_vm_heap *heap,
                                      uint64_t size, uint64_t alignment)
 {
     struct radeon_bo_va_hole *hole, *n;
     uint64_t offset = 0, waste = 0;
 
     /* All VM address space holes will implicitly start aligned to the
      * size alignment, so we don't need to sanitize the alignment here
      */
-    size = align(size, rws->info.gart_page_size);
+    size = align(size, info->gart_page_size);
 
-    mtx_lock(&rws->bo_va_mutex);
+    mtx_lock(&heap->mutex);
     /* first look for a hole */
-    LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
+    LIST_FOR_EACH_ENTRY_SAFE(hole, n, &heap->holes, list) {
         offset = hole->offset;
         waste = offset % alignment;
         waste = waste ? alignment - waste : 0;
         offset += waste;
         if (offset >= (hole->offset + hole->size)) {
             continue;
         }
         if (!waste && hole->size == size) {
             offset = hole->offset;
             list_del(&hole->list);
             FREE(hole);
-            mtx_unlock(&rws->bo_va_mutex);
+            mtx_unlock(&heap->mutex);
             return offset;
         }
         if ((hole->size - waste) > size) {
             if (waste) {
                 n = CALLOC_STRUCT(radeon_bo_va_hole);
                 n->size = waste;
                 n->offset = hole->offset;
                 list_add(&n->list, &hole->list);
             }
             hole->size -= (size + waste);
             hole->offset += size + waste;
-            mtx_unlock(&rws->bo_va_mutex);
+            mtx_unlock(&heap->mutex);
             return offset;
         }
         if ((hole->size - waste) == size) {
             hole->size = waste;
-            mtx_unlock(&rws->bo_va_mutex);
+            mtx_unlock(&heap->mutex);
             return offset;
         }
     }
 
-    offset = rws->va_offset;
+    offset = heap->start;
     waste = offset % alignment;
     waste = waste ? alignment - waste : 0;
     if (waste) {
         n = CALLOC_STRUCT(radeon_bo_va_hole);
         n->size = waste;
         n->offset = offset;
-        list_add(&n->list, &rws->va_holes);
+        list_add(&n->list, &heap->holes);
     }
     offset += waste;
-    rws->va_offset += size + waste;
-    mtx_unlock(&rws->bo_va_mutex);
+    heap->start += size + waste;
+    mtx_unlock(&heap->mutex);
     return offset;
 }
 
-static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
+static void radeon_bomgr_free_va(const struct radeon_info *info,
+                                 struct radeon_vm_heap *heap,
                                  uint64_t va, uint64_t size)
 {
     struct radeon_bo_va_hole *hole = NULL;
 
-    size = align(size, rws->info.gart_page_size);
+    size = align(size, info->gart_page_size);
 
-    mtx_lock(&rws->bo_va_mutex);
-    if ((va + size) == rws->va_offset) {
-        rws->va_offset = va;
+    mtx_lock(&heap->mutex);
+    if ((va + size) == heap->start) {
+        heap->start = va;
         /* Delete uppermost hole if it reaches the new top */
-        if (!LIST_IS_EMPTY(&rws->va_holes)) {
-            hole = container_of(rws->va_holes.next, hole, list);
+        if (!LIST_IS_EMPTY(&heap->holes)) {
+            hole = container_of(heap->holes.next, hole, list);
             if ((hole->offset + hole->size) == va) {
-                rws->va_offset = hole->offset;
+                heap->start = hole->offset;
                 list_del(&hole->list);
                 FREE(hole);
             }
         }
     } else {
         struct radeon_bo_va_hole *next;
 
-        hole = container_of(&rws->va_holes, hole, list);
-        LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
+        hole = container_of(&heap->holes, hole, list);
+        LIST_FOR_EACH_ENTRY(next, &heap->holes, list) {
            if (next->offset < va)
                break;
             hole = next;
         }
 
-        if (&hole->list != &rws->va_holes) {
+        if (&hole->list != &heap->holes) {
             /* Grow upper hole if it's adjacent */
             if (hole->offset == (va + size)) {
                 hole->offset = va;
                 hole->size += size;
                 /* Merge lower hole if it's adjacent */
-                if (next != hole && &next->list != &rws->va_holes &&
+                if (next != hole && &next->list != &heap->holes &&
                     (next->offset + next->size) == va) {
                     next->size += hole->size;
                     list_del(&hole->list);
                     FREE(hole);
                 }
                 goto out;
             }
         }
 
         /* Grow lower hole if it's adjacent */
-        if (next != hole && &next->list != &rws->va_holes &&
+        if (next != hole && &next->list != &heap->holes &&
             (next->offset + next->size) == va) {
             next->size += size;
             goto out;
         }
 
         /* FIXME on allocation failure we just lose virtual address space
          * maybe print a warning
          */
         next = CALLOC_STRUCT(radeon_bo_va_hole);
         if (next) {
             next->size = size;
             next->offset = va;
             list_add(&next->list, &hole->list);
         }
     }
 out:
-    mtx_unlock(&rws->bo_va_mutex);
+    mtx_unlock(&heap->mutex);
 }
 
 void radeon_bo_destroy(struct pb_buffer *_buf)
 {
     struct radeon_bo *bo = radeon_bo(_buf);
     struct radeon_drm_winsys *rws = bo->rws;
     struct drm_gem_close args;
 
     assert(bo->handle && "must not be called for slab entries");
 
@@ -361,21 +363,21 @@ void radeon_bo_destroy(struct pb_buffer *_buf)
 
             if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,
                                    sizeof(va)) != 0 &&
                va.operation == RADEON_VA_RESULT_ERROR) {
                 fprintf(stderr, "radeon: Failed to deallocate virtual address 
for buffer:\n");
                 fprintf(stderr, "radeon:    size      : %"PRIu64" bytes\n", 
bo->base.size);
                 fprintf(stderr, "radeon:    va        : 0x%"PRIx64"\n", 
bo->va);
             }
        }
 
-       radeon_bomgr_free_va(rws, bo->va, bo->base.size);
+       radeon_bomgr_free_va(&rws->info, &rws->vm64, bo->va, bo->base.size);
     }
 
     /* Close object. */
     args.handle = bo->handle;
     drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
 
     mtx_destroy(&bo->u.real.map_mutex);
 
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
         rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
@@ -651,21 +653,22 @@ static struct radeon_bo *radeon_create_bo(struct 
radeon_drm_winsys *rws,
     if (heap >= 0) {
         pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
                             heap);
     }
 
     if (rws->info.has_virtual_memory) {
         struct drm_radeon_gem_va va;
         unsigned va_gap_size;
 
         va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
-        bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
+        bo->va = radeon_bomgr_find_va(&rws->info, &rws->vm64,
+                                      size + va_gap_size, alignment);
 
         va.handle = bo->handle;
         va.vm_id = 0;
         va.operation = RADEON_VA_MAP;
         va.flags = RADEON_VM_PAGE_READABLE |
                    RADEON_VM_PAGE_WRITEABLE |
                    RADEON_VM_PAGE_SNOOPED;
         va.offset = bo->va;
         r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
         if (r && va.operation == RADEON_VA_RESULT_ERROR) {
@@ -1052,21 +1055,22 @@ static struct pb_buffer 
*radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
     bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
     (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
 
     util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
 
     mtx_unlock(&ws->bo_handles_mutex);
 
     if (ws->info.has_virtual_memory) {
         struct drm_radeon_gem_va va;
 
-        bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+        bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+                                      bo->base.size, 1 << 20);
 
         va.handle = bo->handle;
         va.operation = RADEON_VA_MAP;
         va.vm_id = 0;
         va.offset = bo->va;
         va.flags = RADEON_VM_PAGE_READABLE |
                    RADEON_VM_PAGE_WRITEABLE |
                    RADEON_VM_PAGE_SNOOPED;
         va.offset = bo->va;
         r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
@@ -1195,21 +1199,22 @@ done:
     mtx_unlock(&ws->bo_handles_mutex);
 
     if (stride)
         *stride = whandle->stride;
     if (offset)
         *offset = whandle->offset;
 
     if (ws->info.has_virtual_memory && !bo->va) {
         struct drm_radeon_gem_va va;
 
-        bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
+        bo->va = radeon_bomgr_find_va(&ws->info, &ws->vm64,
+                                      bo->base.size, 1 << 20);
 
         va.handle = bo->handle;
         va.operation = RADEON_VA_MAP;
         va.vm_id = 0;
         va.offset = bo->va;
         va.flags = RADEON_VM_PAGE_READABLE |
                    RADEON_VM_PAGE_WRITEABLE |
                    RADEON_VM_PAGE_SNOOPED;
         va.offset = bo->va;
         r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c 
b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
index 25faa40..0c1085b 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
@@ -542,21 +542,21 @@ static void radeon_winsys_destroy(struct radeon_winsys 
*rws)
     pb_cache_deinit(&ws->bo_cache);
 
     if (ws->gen >= DRV_R600) {
         radeon_surface_manager_free(ws->surf_man);
     }
 
     util_hash_table_destroy(ws->bo_names);
     util_hash_table_destroy(ws->bo_handles);
     util_hash_table_destroy(ws->bo_vas);
     mtx_destroy(&ws->bo_handles_mutex);
-    mtx_destroy(&ws->bo_va_mutex);
+    mtx_destroy(&ws->vm64.mutex);
     mtx_destroy(&ws->bo_fence_lock);
 
     if (ws->fd >= 0)
         close(ws->fd);
 
     FREE(rws);
 }
 
 static void radeon_query_info(struct radeon_winsys *rws,
                               struct radeon_info *info)
@@ -805,24 +805,25 @@ radeon_drm_winsys_create(int fd, const struct 
pipe_screen_config *config,
     radeon_drm_cs_init_functions(ws);
     radeon_surface_init_functions(ws);
 
     (void) mtx_init(&ws->hyperz_owner_mutex, mtx_plain);
     (void) mtx_init(&ws->cmask_owner_mutex, mtx_plain);
 
     ws->bo_names = util_hash_table_create(handle_hash, handle_compare);
     ws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
     ws->bo_vas = util_hash_table_create(handle_hash, handle_compare);
     (void) mtx_init(&ws->bo_handles_mutex, mtx_plain);
-    (void) mtx_init(&ws->bo_va_mutex, mtx_plain);
+    (void) mtx_init(&ws->vm64.mutex, mtx_plain);
     (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
-    ws->va_offset = ws->va_start;
-    list_inithead(&ws->va_holes);
+    list_inithead(&ws->vm64.holes);
+
+    ws->vm64.start = ws->va_start;
 
     /* TTM aligns the BO size to the CPU page size */
     ws->info.gart_page_size = sysconf(_SC_PAGESIZE);
 
     if (ws->num_cpus > 1 && debug_get_option_thread())
         util_queue_init(&ws->cs_queue, "radeon_cs", 8, 1, 0);
 
     /* Create the screen at the end. The winsys must be initialized
      * completely.
      *
diff --git a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h 
b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
index dd6e19a..c65f5cb 100644
--- a/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
+++ b/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
@@ -38,20 +38,26 @@ struct radeon_drm_cs;
 
 enum radeon_generation {
     DRV_R300,
     DRV_R600,
     DRV_SI
 };
 
 #define RADEON_SLAB_MIN_SIZE_LOG2 9
 #define RADEON_SLAB_MAX_SIZE_LOG2 14
 
+struct radeon_vm_heap {
+    mtx_t mutex;
+    uint64_t start;
+    struct list_head holes;
+};
+
 struct radeon_drm_winsys {
     struct radeon_winsys base;
     struct pipe_reference reference;
     struct pb_cache bo_cache;
     struct pb_slabs bo_slabs;
 
     int fd; /* DRM file descriptor */
     int num_cs; /* The number of command streams created. */
     uint64_t allocated_vram;
     uint64_t allocated_gtt;
@@ -69,25 +75,24 @@ struct radeon_drm_winsys {
     uint32_t va_unmap_working;
     uint32_t accel_working2;
 
     /* List of buffer GEM names. Protected by bo_handles_mutex. */
     struct util_hash_table *bo_names;
     /* List of buffer handles. Protectded by bo_handles_mutex. */
     struct util_hash_table *bo_handles;
     /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
     struct util_hash_table *bo_vas;
     mtx_t bo_handles_mutex;
-    mtx_t bo_va_mutex;
     mtx_t bo_fence_lock;
 
-    uint64_t va_offset;
-    struct list_head va_holes;
+    struct radeon_vm_heap vm64;
+
     bool check_vm;
 
     struct radeon_surface_manager *surf_man;
 
     uint32_t num_cpus;      /* Number of CPUs. */
 
     struct radeon_drm_cs *hyperz_owner;
     mtx_t hyperz_owner_mutex;
     struct radeon_drm_cs *cmask_owner;
     mtx_t cmask_owner_mutex;
-- 
2.7.4

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to