It isn't necessary any more and the suballocator seems to perform even better.
Signed-off-by: Christian König <deathsim...@vodafone.de> --- drivers/gpu/drm/radeon/radeon.h | 22 +-- drivers/gpu/drm/radeon/radeon_device.c | 1 - drivers/gpu/drm/radeon/radeon_fence.c | 44 +++++- drivers/gpu/drm/radeon/radeon_gart.c | 12 +- drivers/gpu/drm/radeon/radeon_ring.c | 240 ++++++++-------------------- drivers/gpu/drm/radeon/radeon_semaphore.c | 6 +- 6 files changed, 123 insertions(+), 202 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 222939f..d46d5ac 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -249,6 +249,8 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, /* * Fences. */ +struct radeon_ib; + struct radeon_fence_driver { uint32_t scratch_reg; uint64_t gpu_addr; @@ -259,7 +261,6 @@ struct radeon_fence_driver { wait_queue_head_t queue; struct list_head created; struct list_head emitted; - struct list_head signaled; bool initialized; }; @@ -274,6 +275,7 @@ struct radeon_fence { /* RB, DMA, etc. */ int ring; struct radeon_semaphore *semaphore; + struct radeon_ib *ib; }; int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); @@ -289,6 +291,7 @@ int radeon_fence_wait_last(struct radeon_device *rdev, int ring); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); +bool radeon_fence_set_associated_ib(struct radeon_fence *fence, struct radeon_ib *ib); /* * Tiling registers @@ -603,7 +606,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); struct radeon_ib { struct radeon_sa_bo sa_bo; - unsigned idx; uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; @@ -612,18 +614,6 @@ struct radeon_ib { bool is_const_ib; }; -/* - * locking - - * mutex protects scheduled_ibs, ready, alloc_bm - */ -struct radeon_ib_pool { - struct radeon_mutex mutex; - struct radeon_sa_manager sa_manager; - struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; - bool ready; - unsigned head_id; -}; - struct radeon_ring { struct radeon_bo *ring_obj; volatile uint32_t *ring; @@ -764,7 +754,6 @@ struct si_rlc { int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); -bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); @@ -1497,7 +1486,8 @@ struct radeon_device { rwlock_t fence_lock; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; struct radeon_ring ring[RADEON_NUM_RINGS]; - struct radeon_ib_pool ib_pool; + bool ib_pool_ready; + struct radeon_sa_manager sa_manager; struct radeon_irq irq; struct radeon_asic *asic; struct radeon_gem gem; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8c49990..9189f8d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -723,7 +723,6 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ radeon_mutex_init(&rdev->cs_mutex); - radeon_mutex_init(&rdev->ib_pool.mutex); for (i = 0; i < RADEON_NUM_RINGS; ++i) mutex_init(&rdev->ring[i].mutex); mutex_init(&rdev->dc_hw_i2c_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 764ab7e..0e8ac35 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -83,7 +83,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) return 0; } -static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring) +static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring, + struct list_head *signaled) { struct radeon_fence *fence; struct list_head *i, *n; @@ -110,7 +111,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring) i = n; do { n = i->prev; - list_move_tail(i, &rdev->fence_drv[ring].signaled); + list_move_tail(i, signaled); fence = list_entry(i, struct radeon_fence, list); fence->signaled = true; i = n; @@ -120,6 +121,18 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring) return wake; } +static void radeon_fence_process_signaled(struct radeon_device *rdev, struct list_head *signaled) +{ + struct radeon_fence *fence; + struct list_head *i, *n; + + list_for_each_safe(i, n, signaled) { + fence = list_entry(i, struct radeon_fence, list); + list_del_init(&fence->list); + radeon_ib_free(rdev, &fence->ib); + } +} + static void radeon_fence_destroy(struct kref *kref) { unsigned long irq_flags; @@ -152,6 +165,7 @@ int radeon_fence_create(struct radeon_device *rdev, (*fence)->seq = 0; (*fence)->ring = ring; (*fence)->semaphore = NULL; + (*fence)->ib = NULL; INIT_LIST_HEAD(&(*fence)->list); write_lock_irqsave(&rdev->fence_lock, irq_flags); @@ -164,6 +178,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence) { unsigned long irq_flags; bool signaled = false; + LIST_HEAD(siglist); if (!fence) return true; @@ -179,10 +194,12 @@ bool radeon_fence_signaled(struct radeon_fence *fence) signaled = true; } if (!signaled) { - radeon_fence_poll_locked(fence->rdev, fence->ring); + radeon_fence_poll_locked(fence->rdev, fence->ring, &siglist); signaled = fence->signaled; } write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags); + radeon_fence_process_signaled(fence->rdev, &siglist); + return signaled; } @@ -341,10 +358,12 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) { unsigned long irq_flags; bool wake; + LIST_HEAD(signaled); write_lock_irqsave(&rdev->fence_lock, irq_flags); - wake = radeon_fence_poll_locked(rdev, ring); + wake = radeon_fence_poll_locked(rdev, ring, &signaled); write_unlock_irqrestore(&rdev->fence_lock, irq_flags); + radeon_fence_process_signaled(rdev, &signaled); if (wake) { wake_up_all(&rdev->fence_drv[ring].queue); } @@ -373,6 +392,22 @@ int radeon_fence_count_emitted(struct radeon_device *rdev, int ring) return not_processed; } +bool radeon_fence_set_associated_ib(struct radeon_fence *fence, struct radeon_ib *ib) +{ + struct radeon_device *rdev = fence->rdev; + unsigned long irq_flags; + bool isset = false; + + /* a readlock is suficient, cause this should be called only once */ + read_lock_irqsave(&rdev->fence_lock, irq_flags); + if (fence->emitted && !fence->signaled) { + fence->ib = ib; + isset = true; + } + read_unlock_irqrestore(&rdev->fence_lock, irq_flags); + return isset; +} + int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) { unsigned long irq_flags; @@ -413,7 +448,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) atomic_set(&rdev->fence_drv[ring].seq, 0); INIT_LIST_HEAD(&rdev->fence_drv[ring].created); INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted); - INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled); init_waitqueue_head(&rdev->fence_drv[ring].queue); rdev->fence_drv[ring].initialized = false; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index c58a036..0ab3277 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -434,8 +434,8 @@ retry_id: rdev->vm_manager.use_bitmap |= 1 << id; vm->id = id; list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); - return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo, - &rdev->ib_pool.sa_manager.bo->tbo.mem); + return radeon_vm_bo_update_pte(rdev, vm, rdev->sa_manager.bo, + &rdev->sa_manager.bo->tbo.mem); } /* object have to be reserved */ @@ -633,7 +633,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) /* map the ib pool buffer at 0 in virtual address space, set * read only */ - r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0, + r = radeon_vm_bo_add(rdev, vm, rdev->sa_manager.bo, 0, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); return r; } @@ -650,12 +650,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_mutex_unlock(&rdev->cs_mutex); /* remove all bo */ - r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false); + r = radeon_bo_reserve(rdev->sa_manager.bo, false); if (!r) { - bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm); + bo_va = radeon_bo_va(rdev->sa_manager.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); - radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo); + radeon_bo_unreserve(rdev->sa_manager.bo); kfree(bo_va); } if (!list_empty(&vm->va)) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 5942769..77bed81 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -24,6 +24,7 @@ * Authors: Dave Airlie * Alex Deucher * Jerome Glisse + * Christian König */ #include <linux/seq_file.h> #include <linux/slab.h> @@ -33,8 +34,10 @@ #include "radeon.h" #include "atom.h" -int radeon_debugfs_ib_init(struct radeon_device *rdev); -int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); +/* + * IB. + */ +int radeon_debugfs_sa_init(struct radeon_device *rdev); u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) { @@ -61,123 +64,62 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) return idx_value; } -void radeon_ring_write(struct radeon_ring *ring, uint32_t v) -{ -#if DRM_DEBUG_CODE - if (ring->count_dw <= 0) { - DRM_ERROR("radeon: writting more dword to ring than expected !\n"); - } -#endif - ring->ring[ring->wptr++] = v; - ring->wptr &= ring->ptr_mask; - ring->count_dw--; - ring->ring_free_dw--; -} - -/* - * IB. - */ -bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib) -{ - bool done = false; - - /* only free ib which have been emited */ - if (ib->fence && ib->fence->emitted) { - if (radeon_fence_signaled(ib->fence)) { - radeon_fence_unref(&ib->fence); - radeon_sa_bo_free(rdev, &ib->sa_bo); - done = true; - } - } - return done; -} - int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib, unsigned size) { - struct radeon_fence *fence; - unsigned cretry = 0; - int r = 0, i, idx; + int r; - *ib = NULL; - /* align size on 256 bytes */ - size = ALIGN(size, 256); + *ib = kmalloc(sizeof(struct radeon_ib), GFP_KERNEL); + if (*ib == NULL) { + return -ENOMEM; + } - r = radeon_fence_create(rdev, &fence, ring); + r = radeon_sa_bo_new(rdev, &rdev->sa_manager, &(*ib)->sa_bo, size, 256); if (r) { - dev_err(rdev->dev, "failed to create fence for new IB\n"); + dev_err(rdev->dev, "failed to get a new IB (%d)\n", r); + kfree(*ib); + *ib = NULL; return r; } - radeon_mutex_lock(&rdev->ib_pool.mutex); - idx = rdev->ib_pool.head_id; -retry: - if (cretry > 5) { - dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); - radeon_mutex_unlock(&rdev->ib_pool.mutex); - radeon_fence_unref(&fence); - return -ENOMEM; - } - cretry++; - for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { - radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); - if (rdev->ib_pool.ibs[idx].fence == NULL) { - r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, - &rdev->ib_pool.ibs[idx].sa_bo, - size, 256); - if (!r) { - *ib = &rdev->ib_pool.ibs[idx]; - (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr; - (*ib)->ptr += ((*ib)->sa_bo.offset >> 2); - (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr; - (*ib)->gpu_addr += (*ib)->sa_bo.offset; - (*ib)->fence = fence; - (*ib)->vm_id = 0; - (*ib)->is_const_ib = false; - /* ib are most likely to be allocated in a ring fashion - * thus rdev->ib_pool.head_id should be the id of the - * oldest ib - */ - rdev->ib_pool.head_id = (1 + idx); - rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); - radeon_mutex_unlock(&rdev->ib_pool.mutex); - return 0; - } - } - idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); - } - /* this should be rare event, ie all ib scheduled none signaled yet. - */ - for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { - if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) { - r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false); - if (!r) { - goto retry; - } - /* an error happened */ - break; - } - idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); + r = radeon_fence_create(rdev, &(*ib)->fence, ring); + if (r) { + dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r); + radeon_sa_bo_free(rdev, &(*ib)->sa_bo); + kfree(*ib); + *ib = NULL; + return r; } - radeon_mutex_unlock(&rdev->ib_pool.mutex); - radeon_fence_unref(&fence); - return r; + + (*ib)->ptr = rdev->sa_manager.cpu_ptr; + (*ib)->ptr += ((*ib)->sa_bo.offset >> 2); + (*ib)->gpu_addr = rdev->sa_manager.gpu_addr; + (*ib)->gpu_addr += (*ib)->sa_bo.offset; + (*ib)->vm_id = 0; + (*ib)->is_const_ib = false; + + return 0; } void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) { struct radeon_ib *tmp = *ib; + bool destroy = true; *ib = NULL; if (tmp == NULL) { return; } - radeon_mutex_lock(&rdev->ib_pool.mutex); - if (tmp->fence && !tmp->fence->emitted) { + + if (tmp->fence) { + destroy = !radeon_fence_set_associated_ib(tmp->fence, tmp); + } + + if (destroy) { radeon_sa_bo_free(rdev, &tmp->sa_bo); radeon_fence_unref(&tmp->fence); + kfree(tmp); } - radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) @@ -187,7 +129,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) if (!ib->length_dw || !ring->ready) { /* TODO: Nothings in the ib we should report. */ - DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); + DRM_ERROR("radeon: couldn't schedule IB.\n"); return -EINVAL; } @@ -205,64 +147,45 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) int radeon_ib_pool_init(struct radeon_device *rdev) { - int i, r; + int r; - radeon_mutex_lock(&rdev->ib_pool.mutex); - if (rdev->ib_pool.ready) { + if (rdev->ib_pool_ready) { return 0; } - rdev->ib_pool.ready = true; - radeon_mutex_unlock(&rdev->ib_pool.mutex); - r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager, + r = radeon_sa_bo_manager_init(rdev, &rdev->sa_manager, RADEON_IB_POOL_SIZE*64*1024, RADEON_GEM_DOMAIN_GTT); if (r) { return r; } - radeon_mutex_lock(&rdev->ib_pool.mutex); - for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { - rdev->ib_pool.ibs[i].fence = NULL; - rdev->ib_pool.ibs[i].idx = i; - rdev->ib_pool.ibs[i].length_dw = 0; - INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list); + if (radeon_debugfs_sa_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for SA !\n"); } - rdev->ib_pool.head_id = 0; - rdev->ib_pool.ready = true; + DRM_INFO("radeon: ib pool ready.\n"); + rdev->ib_pool_ready = true; - if (radeon_debugfs_ib_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for IB !\n"); - } - radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } void radeon_ib_pool_fini(struct radeon_device *rdev) { - unsigned i; - - radeon_mutex_lock(&rdev->ib_pool.mutex); - if (rdev->ib_pool.ready) { - for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { - radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo); - radeon_fence_unref(&rdev->ib_pool.ibs[i].fence); - } - radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); - rdev->ib_pool.ready = false; + if (rdev->ib_pool_ready) { + radeon_sa_bo_manager_fini(rdev, &rdev->sa_manager); + rdev->ib_pool_ready = false; } - radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_pool_start(struct radeon_device *rdev) { - return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); + return radeon_sa_bo_manager_start(rdev, &rdev->sa_manager); } int radeon_ib_pool_suspend(struct radeon_device *rdev) { - return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); + return radeon_sa_bo_manager_suspend(rdev, &rdev->sa_manager); } int radeon_ib_ring_tests(struct radeon_device *rdev) @@ -298,6 +221,21 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) /* * Ring. */ +int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); + +void radeon_ring_write(struct radeon_ring *ring, uint32_t v) +{ +#if DRM_DEBUG_CODE + if (ring->count_dw <= 0) { + DRM_ERROR("radeon: writting more dword to ring than expected !\n"); + } +#endif + ring->ring[ring->wptr++] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; + ring->ring_free_dw--; +} + int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) { /* r1xx-r5xx only has CP ring */ @@ -504,37 +442,13 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = { {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, }; -static int radeon_debugfs_ib_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)]; - unsigned i; - - if (ib == NULL) { - return 0; - } - seq_printf(m, "IB %04u\n", ib->idx); - seq_printf(m, "IB fence %p\n", ib->fence); - seq_printf(m, "IB size %05u dwords\n", ib->length_dw); - for (i = 0; i < ib->length_dw; i++) { - seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); - } - return 0; -} - -static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; -static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; -static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE]; - static int radeon_debugfs_sa_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - radeon_sa_bo_dump_debug_info(&rdev->ib_pool.sa_manager, m); + radeon_sa_bo_dump_debug_info(&rdev->sa_manager, m); return 0; @@ -566,26 +480,10 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin return 0; } -int radeon_debugfs_ib_init(struct radeon_device *rdev) +int radeon_debugfs_sa_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) - unsigned i; - int r; - - r = radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); - if (r) - return r; - - for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { - sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); - radeon_debugfs_ib_idx[i] = i; - radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; - radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info; - radeon_debugfs_ib_list[i].driver_features = 0; - radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i]; - } - return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list, - RADEON_IB_POOL_SIZE); + return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); #else return 0; #endif diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 4603fab..a09cc05 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -42,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev, return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, + r = radeon_sa_bo_new(rdev, &rdev->sa_manager, &(*semaphore)->sa_bo, 8, 8); if (r) { kfree(*semaphore); @@ -51,10 +51,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, } (*semaphore)->waiters = 0; - (*semaphore)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr; + (*semaphore)->gpu_addr = rdev->sa_manager.gpu_addr; (*semaphore)->gpu_addr += (*semaphore)->sa_bo.offset; - cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr; + cpu_ptr = rdev->sa_manager.cpu_ptr; cpu_ptr += (*semaphore)->sa_bo.offset; *((uint64_t*)cpu_ptr) = 0; -- 1.7.5.4 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel