This change allow driver to pass memory space preference
on buffer object placement. Up to 15 differents places
which should be enought. The placement order is given
by the memory type encoded on 4bits in 64bits dword.
First 4bits is the number of memory placement. Next
4bits is the prefered placement, then next 4bits is
the second prefered placement.

In order to avoid long function prototype i used a structure
to path along driver wishes on the allocation. Beside
the 64bits dwords describing prefered placement, there
is flags indicating others preferences (caching attributes,
pinning, ...), there is also fpfn & lpfn which are the
first page and last page number btw which allocation can
happen. This allow to place a buffer in a certain range.
If those fields are set to 0 ttm will assume buffer can
be put anywhere in the address space (thus it avoids putting
a burden on the driver to always properly set those fields).

This patch also factor few functions like evicting first
entry of lru list or getting a memory space. This avoid
code duplication.

Signed-off-by: Jerome Glisse <jgli...@redhat.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c    |  413 +++++++++++++++++----------------------
 include/drm/ttm/ttm_bo_api.h    |   36 +++-
 include/drm/ttm/ttm_bo_driver.h |   20 +--
 3 files changed, 216 insertions(+), 253 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c0625..de07ac1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -247,7 +255,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
 /*
  * Call bo->mutex locked.
  */
-
 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -328,14 +335,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object 
*bo,
                }
 
                if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
-                       struct ttm_mem_reg *old_mem = &bo->mem;
-                       uint32_t save_flags = old_mem->placement;
-
-                       *old_mem = *mem;
+                       bo->mem = *mem;
                        mem->mm_node = NULL;
-                       ttm_flag_masked(&save_flags, mem->placement,
-                                       TTM_PL_MASK_MEMTYPE);
                        goto moved;
                }
 
@@ -418,6 +419,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object 
*bo, bool remove_all)
                        kref_put(&bo->list_kref, ttm_bo_ref_bug);
                }
                if (bo->mem.mm_node) {
+                       bo->mem.mm_node->private = NULL;
                        drm_mm_put_block(bo->mem.mm_node);
                        bo->mem.mm_node = NULL;
                }
@@ -554,17 +556,14 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
-                       bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+                       bool no_wait)
 {
-       int ret = 0;
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_reg evict_mem;
-       uint32_t proposed_placement;
-
-       if (bo->mem.mem_type != mem_type)
-               goto out;
+       struct ttm_placement placement;
+       int ret = 0;
 
        spin_lock(&bo->lock);
        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
@@ -584,14 +583,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, 
unsigned mem_type,
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
 
-       proposed_placement = bdev->driver->evict_flags(bo);
-
-       ret = ttm_bo_mem_space(bo, proposed_placement,
-                              &evict_mem, interruptible, no_wait);
-       if (unlikely(ret != 0 && ret != -ERESTART))
-               ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
-                                      &evict_mem, interruptible, no_wait);
-
+       bdev->driver->evict_flags(bo, &placement);
+       ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+                               no_wait);
        if (ret) {
                if (ret != -ERESTART)
                        printk(KERN_ERR TTM_PFX
@@ -605,95 +599,118 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, 
unsigned mem_type,
        if (ret) {
                if (ret != -ERESTART)
                        printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+               spin_lock(&glob->lru_lock);
+               if (evict_mem.mm_node) {
+                       evict_mem.mm_node->private = NULL;
+                       drm_mm_put_block(evict_mem.mm_node);
+                       evict_mem.mm_node = NULL;
+               }
+               spin_unlock(&glob->lru_lock);
                goto out;
        }
+       bo->evicted = true;
+out:
+       return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+                               uint32_t mem_type,
+                               bool interruptible, bool no_wait)
+{
+       struct ttm_bo_global *glob = bdev->glob;
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+       struct ttm_buffer_object *bo;
+       int ret, put_count = 0;
 
        spin_lock(&glob->lru_lock);
-       if (evict_mem.mm_node) {
-               drm_mm_put_block(evict_mem.mm_node);
-               evict_mem.mm_node = NULL;
-       }
+       bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+       kref_get(&bo->list_kref);
+       ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+       if (likely(ret == 0))
+               put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
-       bo->evicted = true;
-out:
+       if (unlikely(ret != 0))
+               return ret;
+       while (put_count--)
+               kref_put(&bo->list_kref, ttm_bo_ref_bug);
+       ret = ttm_bo_evict(bo, interruptible, no_wait);
+       ttm_bo_unreserve(bo);
+       kref_put(&bo->list_kref, ttm_bo_release_list);
        return ret;
 }
 
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+                               struct ttm_mem_type_manager *man,
+                               struct ttm_placement *placement,
+                               struct ttm_mem_reg *mem,
+                               struct drm_mm_node **node)
+{
+       struct ttm_bo_global *glob = bo->glob;
+       unsigned long lpfn;
+       int ret;
+
+       lpfn = placement->lpfn;
+       if (!lpfn)
+               lpfn = man->size;
+       *node = NULL;
+       do {
+               ret = drm_mm_pre_get(&man->manager);
+               if (unlikely(ret))
+                       return ret;
+
+               spin_lock(&glob->lru_lock);
+               *node = drm_mm_search_free_in_range(&man->manager,
+                                       mem->num_pages, mem->page_alignment,
+                                       placement->fpfn, lpfn, 1);
+               if (unlikely(*node == NULL)) {
+                       spin_unlock(&glob->lru_lock);
+                       return 0;
+               }
+               *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+                                                       mem->page_alignment,
+                                                       placement->fpfn,
+                                                       lpfn);
+               spin_unlock(&glob->lru_lock);
+       } while (*node == NULL);
+       return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
  */
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
-                                 struct ttm_mem_reg *mem,
-                                 uint32_t mem_type,
-                                 bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+                                       uint32_t mem_type,
+                                       struct ttm_placement *placement,
+                                       struct ttm_mem_reg *mem,
+                                       bool interruptible, bool no_wait)
 {
+       struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bdev->glob;
-       struct drm_mm_node *node;
-       struct ttm_buffer_object *entry;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-       struct list_head *lru;
-       unsigned long num_pages = mem->num_pages;
-       int put_count = 0;
+       struct drm_mm_node *node;
        int ret;
 
-retry_pre_get:
-       ret = drm_mm_pre_get(&man->manager);
-       if (unlikely(ret != 0))
-               return ret;
-
-       spin_lock(&glob->lru_lock);
        do {
-               node = drm_mm_search_free(&man->manager, num_pages,
-                                         mem->page_alignment, 1);
+               ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+               if (unlikely(ret != 0))
+                       return ret;
                if (node)
                        break;
-
-               lru = &man->lru;
-               if (list_empty(lru))
+               spin_lock(&glob->lru_lock);
+               if (list_empty(&man->lru)) {
+                       spin_unlock(&glob->lru_lock);
                        break;
-
-               entry = list_first_entry(lru, struct ttm_buffer_object, lru);
-               kref_get(&entry->list_kref);
-
-               ret =
-                   ttm_bo_reserve_locked(entry, interruptible, no_wait,
-                                         false, 0);
-
-               if (likely(ret == 0))
-                       put_count = ttm_bo_del_from_lru(entry);
-
+               }
                spin_unlock(&glob->lru_lock);
-
+               ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+                                               no_wait);
                if (unlikely(ret != 0))
                        return ret;
-
-               while (put_count--)
-                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
-               ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
-               ttm_bo_unreserve(entry);
-
-               kref_put(&entry->list_kref, ttm_bo_release_list);
-               if (ret)
-                       return ret;
-
-               spin_lock(&glob->lru_lock);
        } while (1);
-
-       if (!node) {
-               spin_unlock(&glob->lru_lock);
+       if (node == NULL) {
                return -ENOMEM;
        }
-
-       node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
-       if (unlikely(!node)) {
-               spin_unlock(&glob->lru_lock);
-               goto retry_pre_get;
-       }
-
-       spin_unlock(&glob->lru_lock);
        mem->mm_node = node;
        mem->mem_type = mem_type;
        return 0;
@@ -724,7 +741,6 @@ static uint32_t ttm_bo_select_caching(struct 
ttm_mem_type_manager *man,
        return result;
 }
 
-
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
                                 bool disallow_fixed,
                                 uint32_t mem_type,
@@ -757,33 +773,28 @@ static bool ttm_bo_mt_compatible(struct 
ttm_mem_type_manager *man,
  * space.
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-                    uint32_t proposed_placement,
-                    struct ttm_mem_reg *mem,
-                    bool interruptible, bool no_wait)
+                       struct ttm_placement *placement,
+                       struct ttm_mem_reg *mem,
+                       bool interruptible, bool no_wait)
 {
        struct ttm_bo_device *bdev = bo->bdev;
-       struct ttm_bo_global *glob = bo->glob;
        struct ttm_mem_type_manager *man;
-
-       uint32_t num_prios = bdev->driver->num_mem_type_prio;
-       const uint32_t *prios = bdev->driver->mem_type_prio;
-       uint32_t i;
        uint32_t mem_type = TTM_PL_SYSTEM;
        uint32_t cur_flags = 0;
        bool type_found = false;
        bool type_ok = false;
        bool has_eagain = false;
        struct drm_mm_node *node = NULL;
-       int ret;
+       int i, ret;
 
        mem->mm_node = NULL;
-       for (i = 0; i < num_prios; ++i) {
-               mem_type = prios[i];
+       for (i = 1; i <= (placement->placements & 0xF); ++i) {
+               mem_type = (placement->placements >> (i * 4)) & 0xF;
                man = &bdev->man[mem_type];
 
                type_ok = ttm_bo_mt_compatible(man,
                                               bo->type == ttm_bo_type_user,
-                                              mem_type, proposed_placement,
+                                              mem_type, placement->flags,
                                               &cur_flags);
 
                if (!type_ok)
@@ -797,26 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
                if (man->has_type && man->use_type) {
                        type_found = true;
-                       do {
-                               ret = drm_mm_pre_get(&man->manager);
-                               if (unlikely(ret))
-                                       return ret;
-
-                               spin_lock(&glob->lru_lock);
-                               node = drm_mm_search_free(&man->manager,
-                                                         mem->num_pages,
-                                                         mem->page_alignment,
-                                                         1);
-                               if (unlikely(!node)) {
-                                       spin_unlock(&glob->lru_lock);
-                                       break;
-                               }
-                               node = drm_mm_get_block_atomic(node,
-                                                              mem->num_pages,
-                                                              mem->
-                                                              page_alignment);
-                               spin_unlock(&glob->lru_lock);
-                       } while (!node);
+                       ret = ttm_bo_man_get_node(bo, man, placement, mem, 
&node);
+                       if (unlikely(ret))
+                               return ret;
                }
                if (node)
                        break;
@@ -826,43 +820,38 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                mem->mm_node = node;
                mem->mem_type = mem_type;
                mem->placement = cur_flags;
+               if (node)
+                       node->private = bo;
                return 0;
        }
 
        if (!type_found)
                return -EINVAL;
 
-       num_prios = bdev->driver->num_mem_busy_prio;
-       prios = bdev->driver->mem_busy_prio;
-
-       for (i = 0; i < num_prios; ++i) {
-               mem_type = prios[i];
+       for (i = 1; i <= (placement->placements & 0xF); ++i) {
+               mem_type = (placement->placements >> (i * 4)) & 0xF;
                man = &bdev->man[mem_type];
-
                if (!man->has_type)
                        continue;
-
                if (!ttm_bo_mt_compatible(man,
                                          bo->type == ttm_bo_type_user,
                                          mem_type,
-                                         proposed_placement, &cur_flags))
+                                         placement->flags, &cur_flags))
                        continue;
 
                cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
                                                  cur_flags);
 
-               ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
-                                            interruptible, no_wait);
-
+               ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+                                               interruptible, no_wait);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
+                       mem->mm_node->private = bo;
                        return 0;
                }
-
                if (ret == -ERESTART)
                        has_eagain = true;
        }
-
        ret = (has_eagain) ? -ERESTART : -ENOMEM;
        return ret;
 }
@@ -885,8 +874,8 @@ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool 
no_wait)
 }
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
-                      uint32_t proposed_placement,
-                      bool interruptible, bool no_wait)
+                       struct ttm_placement *placement,
+                       bool interruptible, bool no_wait)
 {
        struct ttm_bo_global *glob = bo->glob;
        int ret = 0;
@@ -899,87 +888,73 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
         * Have the driver move function wait for idle when necessary,
         * instead of doing it here.
         */
-
        spin_lock(&bo->lock);
        ret = ttm_bo_wait(bo, false, interruptible, no_wait);
        spin_unlock(&bo->lock);
-
        if (ret)
                return ret;
-
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
-
        /*
         * Determine where to move the buffer.
         */
-
-       ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
-                              interruptible, no_wait);
+       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
        if (ret)
                goto out_unlock;
-
        ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
 out_unlock:
        if (ret && mem.mm_node) {
                spin_lock(&glob->lru_lock);
+               mem.mm_node->private = NULL;
                drm_mm_put_block(mem.mm_node);
                spin_unlock(&glob->lru_lock);
        }
        return ret;
 }
 
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
                             struct ttm_mem_reg *mem)
 {
-       if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
-               return 0;
-       if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
-               return 0;
+       int i, m = 0;
 
-       return 1;
+       if ((placement->flags & mem->placement & TTM_PL_MASK_CACHING) == 0)
+               return 0;
+       for (i = 1; i <= (placement->placements & 0xF); i++) {
+               m |= (1 << ((placement->placements >> (i * 4)) & 0xF)) &
+                       mem->placement;
+       }
+       return !!m;
 }
 
 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-                              uint32_t proposed_placement,
-                              bool interruptible, bool no_wait)
+                               struct ttm_placement *placement,
+                               bool interruptible, bool no_wait)
 {
-       int ret;
+       int i, ret;
 
        BUG_ON(!atomic_read(&bo->reserved));
-       bo->proposed_placement = proposed_placement;
-
-       TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
-                 (unsigned long)proposed_placement,
-                 (unsigned long)bo->mem.placement);
-
+       /* Check that range is valid */
+       if (placement->lpfn || placement->fpfn)
+               if (placement->fpfn > placement->lpfn ||
+                       (placement->lpfn - placement->fpfn) < bo->num_pages)
+                       return -EINVAL;
+       for (i = 1; i <= (placement->placements & 0xF); i++) {
+               ret = (placement->placements >> (i * 4)) & 0xF;
+               if (ret == TTM_PL_SWAPPED)
+                       return -EINVAL;
+       }
        /*
         * Check whether we need to move buffer.
         */
-
-       if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
-               ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
-                                        interruptible, no_wait);
-               if (ret) {
-                       if (ret != -ERESTART)
-                               printk(KERN_ERR TTM_PFX
-                                      "Failed moving buffer. "
-                                      "Proposed placement 0x%08x\n",
-                                      bo->proposed_placement);
-                       if (ret == -ENOMEM)
-                               printk(KERN_ERR TTM_PFX
-                                      "Out of aperture space or "
-                                      "DRM memory quota.\n");
+       if (!ttm_bo_mem_compat(placement, &bo->mem)) {
+               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+               if (ret)
                        return ret;
-               }
        }
-
        /*
         * We might need to add a TTM.
         */
-
        if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                ret = ttm_bo_add_ttm(bo, true);
                if (ret)
@@ -990,10 +965,8 @@ int ttm_buffer_object_validate(struct ttm_buffer_object 
*bo,
         * non-mapping-related flag bits from the proposed flags to
         * the active flags
         */
-
-       ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
+       ttm_flag_masked(&bo->mem.placement, placement->flags,
                        ~TTM_PL_MASK_MEMTYPE);
-
        return 0;
 }
 EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1041,8 +1014,9 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
                           size_t acc_size,
                           void (*destroy) (struct ttm_buffer_object *))
 {
-       int ret = 0;
+       int i, c, ret = 0;
        unsigned long num_pages;
+       struct ttm_placement placement;
 
        size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1099,7 +1073,18 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+       placement.fpfn = 0;
+       placement.lpfn = 0;
+       placement.placements = 0;
+       for (i = 0, c = 0; i < TTM_PL_SWAPPED; i++)
+               if (flags & (1 << i))
+                       placement.placements |= i << ((++c) * 4);
+       placement.placements |= c;
+       /* Flags as no specific memory type, ask for system ram */
+       if (!c)
+               placement.placements = 1;
+       placement.flags = flags;
+       ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
        if (ret)
                goto out_err;
 
@@ -1134,8 +1119,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
                             struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo;
-       int ret;
        struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+       int ret;
 
        size_t acc_size =
            ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1160,66 +1145,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
        return ret;
 }
 
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
-                            uint32_t mem_type, bool allow_errors)
-{
-       int ret;
-
-       spin_lock(&bo->lock);
-       ret = ttm_bo_wait(bo, false, false, false);
-       spin_unlock(&bo->lock);
-
-       if (ret && allow_errors)
-               goto out;
-
-       if (bo->mem.mem_type == mem_type)
-               ret = ttm_bo_evict(bo, mem_type, false, false);
-
-       if (ret) {
-               if (allow_errors) {
-                       goto out;
-               } else {
-                       ret = 0;
-                       printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
-               }
-       }
-
-out:
-       return ret;
-}
-
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-                                  struct list_head *head,
-                                  unsigned mem_type, bool allow_errors)
+                                       unsigned mem_type, bool allow_errors)
 {
+       struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_bo_global *glob = bdev->glob;
-       struct ttm_buffer_object *entry;
        int ret;
-       int put_count;
 
        /*
         * Can't use standard list traversal since we're unlocking.
         */
 
        spin_lock(&glob->lru_lock);
-
-       while (!list_empty(head)) {
-               entry = list_first_entry(head, struct ttm_buffer_object, lru);
-               kref_get(&entry->list_kref);
-               ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
-               put_count = ttm_bo_del_from_lru(entry);
+       while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               while (put_count--)
-                       kref_put(&entry->list_kref, ttm_bo_ref_bug);
-               BUG_ON(ret);
-               ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
-               ttm_bo_unreserve(entry);
-               kref_put(&entry->list_kref, ttm_bo_release_list);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+               if (ret) {
+                       if (allow_errors) {
+                               return ret;
+                       } else {
+                               printk(KERN_ERR TTM_PFX
+                                       "Cleanup eviction failed\n");
+                       }
+               }
                spin_lock(&glob->lru_lock);
        }
-
        spin_unlock(&glob->lru_lock);
-
        return 0;
 }
 
@@ -1246,7 +1197,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned 
mem_type)
 
        ret = 0;
        if (mem_type > 0) {
-               ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+               ttm_bo_force_list_clean(bdev, mem_type, false);
 
                spin_lock(&glob->lru_lock);
                if (drm_mm_clean(&man->manager))
@@ -1279,12 +1230,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, 
unsigned mem_type)
                return 0;
        }
 
-       return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+       return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-                  unsigned long p_offset, unsigned long p_size)
+                       unsigned long p_size)
 {
        int ret = -EINVAL;
        struct ttm_mem_type_manager *man;
@@ -1314,7 +1265,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned 
type,
                               type);
                        return ret;
                }
-               ret = drm_mm_init(&man->manager, p_offset, p_size);
+               ret = drm_mm_init(&man->manager, 0, p_size);
                if (ret)
                        return ret;
        }
@@ -1463,7 +1414,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
         */
-       ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+       ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
        if (unlikely(ret != 0))
                goto out_no_sys;
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 4911461..76cc92c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,28 @@ struct ttm_bo_device;
 
 struct drm_mm_node;
 
+
+/**
+ * struct ttm_placement
+ *
+ * @fpfn:              first valid page frame number to put the object
+ * @lpfn:              last valid page frame number to put the object
+ * @placements:                prefered placements, 4bits per placement,
+ * first 4 bits are the the number of placement
+ * @busy_placements:   prefered placements when need to evict,
+ * 4bits per placement, first 4 bits are the the number of placement
+ * @flags:     Additional placement flags
+ *
+ * Structure indicating the placement you request for an object.
+ */
+struct ttm_placement {
+       unsigned        fpfn;
+       unsigned        lpfn;
+       u64             placements;
+       u32             flags;
+};
+
+
 /**
  * struct ttm_mem_reg
  *
@@ -177,7 +199,7 @@ struct ttm_buffer_object {
         * Members protected by the bo::reserved lock.
         */
 
-       uint32_t proposed_placement;
+//     uint32_t proposed_placement;
        struct ttm_mem_reg mem;
        struct file *persistant_swap_storage;
        struct ttm_tt *ttm;
@@ -293,21 +315,22 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool 
lazy,
  * ttm_buffer_object_validate
  *
  * @bo: The buffer object.
- * @proposed_placement: Proposed_placement for the buffer object.
+ * @placement: Proposed_placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
  * @no_wait: Return immediately if the buffer is busy.
  *
  * Changes placement and caching policy of the buffer object
  * according to bo::proposed_flags.
  * Returns
- * -EINVAL on invalid proposed_flags.
+ * -EINVAL on invalid proposed placement.
  * -ENOMEM on out-of-memory condition.
  * -EBUSY if no_wait is true and buffer busy.
  * -ERESTART if interrupted by a signal.
  */
 extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-                                     uint32_t proposed_placement,
-                                     bool interruptible, bool no_wait);
+                                       struct ttm_placement *placement,
+                                       bool interruptible, bool no_wait);
+
 /**
  * ttm_bo_unref
  *
@@ -445,7 +468,6 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object 
*bo,
  *
  * @bdev: Pointer to a ttm_bo_device struct.
  * @mem_type: The memory type.
- * @p_offset: offset for managed area in pages.
  * @p_size: size managed area in pages.
  *
  * Initialize a manager for a given memory type.
@@ -458,7 +480,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object 
*bo,
  */
 
 extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-                         unsigned long p_offset, unsigned long p_size);
+                               unsigned long p_size);
 /**
  * ttm_bo_clean_mm
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d2..1dd1dbc 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
 /**
  * struct ttm_bo_driver
  *
- * @mem_type_prio: Priority array of memory types to place a buffer object in
- * if it fits without evicting buffers from any of these memory types.
- * @mem_busy_prio: Priority array of memory types to place a buffer object in
- * if it needs to evict buffers to make room.
- * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
- * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
  * @invalidate_caches: Callback to invalidate read caches when a buffer object
  * has been evicted.
@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
  */
 
 struct ttm_bo_driver {
-       const uint32_t *mem_type_prio;
-       const uint32_t *mem_busy_prio;
-       uint32_t num_mem_type_prio;
-       uint32_t num_mem_busy_prio;
-
        /**
         * struct ttm_bo_driver member create_ttm_backend_entry
         *
@@ -306,7 +295,8 @@ struct ttm_bo_driver {
         * finished, they'll end up in bo->mem.flags
         */
 
-        uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
+        void(*evict_flags) (struct ttm_buffer_object *bo,
+                               struct ttm_placement *placement);
        /**
         * struct ttm_bo_driver member move:
         *
@@ -642,9 +632,9 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * -ERESTART: An interruptible sleep was interrupted by a signal.
  */
 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-                           uint32_t proposed_placement,
-                           struct ttm_mem_reg *mem,
-                           bool interruptible, bool no_wait);
+                               struct ttm_placement *placement,
+                               struct ttm_mem_reg *mem,
+                               bool interruptible, bool no_wait);
 /**
  * ttm_bo_wait_for_cpu
  *
-- 
1.6.5.2


------------------------------------------------------------------------------
Join us December 9, 2009 for the Red Hat Virtual Experience,
a free event focused on virtualization and cloud computing. 
Attend in-depth sessions from your desk. Your couch. Anywhere.
http://p.sf.net/sfu/redhat-sfdev2dev
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to