[PATCH] drm/ttm: ttm_fault callback to allow driver to handle bo placement V3

2010-03-24 Thread Jerome Glisse
On fault the driver is given the opportunity to perform any operation
it sees fit in order to place the buffer into a CPU visible area of
memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
should keep working properly. Future patch will take advantage of this
infrastructure and remove the old path from TTM once driver are
converted.

V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
   is responsible to perform any necessary task for mapping to succeed

Signed-off-by: Jerome Glisse jgli...@redhat.com
---
 drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
 drivers/gpu/drm/ttm/ttm_bo_util.c |   95 ++---
 drivers/gpu/drm/ttm/ttm_bo_vm.c   |   46 --
 include/drm/ttm/ttm_bo_api.h  |   21 
 include/drm/ttm/ttm_bo_driver.h   |   16 ++-
 5 files changed, 108 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6f51b30..3131416 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -632,6 +632,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool 
interruptible,
 
evict_mem = bo-mem;
evict_mem.mm_node = NULL;
+   atomic_set(evict_mem.bus.use_count, 0);
 
placement.fpfn = 0;
placement.lpfn = 0;
@@ -1005,6 +1006,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
mem.num_pages = bo-num_pages;
mem.size = mem.num_pages  PAGE_SHIFT;
mem.page_alignment = bo-mem.page_alignment;
+   atomic_set(mem.bus.use_count, 0);
/*
 * Determine where to move the buffer.
 */
@@ -1147,6 +1149,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
kref_init(bo-list_kref);
atomic_set(bo-cpu_writers, 0);
atomic_set(bo-reserved, 1);
+   atomic_set(bo-mem.bus.use_count, 0);
init_waitqueue_head(bo-event_queue);
INIT_LIST_HEAD(bo-lru);
INIT_LIST_HEAD(bo-ddestroy);
@@ -1574,7 +1577,7 @@ int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
if (ttm_mem_reg_is_pci(bdev, mem)) {
*bus_offset = mem-mm_node-start  PAGE_SHIFT;
*bus_size = mem-num_pages  PAGE_SHIFT;
-   *bus_base = man-io_offset;
+   *bus_base = man-io_offset + (uintptr_t)man-io_addr;
}
 
return 0;
@@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
if (!bdev-dev_mapping)
return;
-
unmap_mapping_range(bdev-dev_mapping, offset, holelen, 1);
+   ttm_mem_io_free(bdev, bo-mem);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 865b2a8..f8ed48e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -81,30 +81,56 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+   int ret;
+
+   if (bdev-driver-io_mem_reserve) {
+   if (!atomic_xchg(mem-bus.use_count, 1)) {
+   ret = bdev-driver-io_mem_reserve(bdev, mem);
+   if (unlikely(ret != 0))
+   return ret;
+   }
+   } else {
+   ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
mem-bus.offset, mem-bus.size);
+   if (unlikely(ret != 0))
+   return ret;
+   mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
+   }
+   return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+   if (bdev-driver-io_mem_reserve) {
+   atomic_set(mem-bus.use_count, 0);
+   bdev-driver-io_mem_free(bdev, mem);
+   }
+}
+
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
 {
struct ttm_mem_type_manager *man = bdev-man[mem-mem_type];
-   unsigned long bus_offset;
-   unsigned long bus_size;
-   unsigned long bus_base;
int ret;
void *addr;
 
*virtual = NULL;
-   ret = ttm_bo_pci_offset(bdev, mem, bus_base, bus_offset, bus_size);
-   if (ret || bus_size == 0)
+   ret = ttm_mem_io_reserve(bdev, mem);
+   if (ret)
return ret;
 
-   if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-   addr = (void *)(((u8 *) man-io_addr) + bus_offset);
-   else {
+   if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+   addr = (void *)(mem-bus.base + mem-bus.offset);
+   } else {
if (mem-placement  TTM_PL_FLAG_WC)
-   addr = ioremap_wc(bus_base + bus_offset, bus_size);
+   addr = ioremap_wc(mem-bus.base + mem-bus.offset, 
mem-bus.size);
else
-

Re: [PATCH] drm/ttm: ttm_fault callback to allow driver to handle bo placement V3

2010-03-24 Thread Thomas Hellstrom
Jerome Glisse wrote:
 On fault the driver is given the opportunity to perform any operation
 it sees fit in order to place the buffer into a CPU visible area of
 memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
 should keep working properly. Future patch will take advantage of this
 infrastructure and remove the old path from TTM once driver are
 converted.

 V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
 V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
is responsible to perform any necessary task for mapping to succeed

 Signed-off-by: Jerome Glisse jgli...@redhat.com
 ---
  drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
  drivers/gpu/drm/ttm/ttm_bo_util.c |   95 
 ++---
  drivers/gpu/drm/ttm/ttm_bo_vm.c   |   46 --
  include/drm/ttm/ttm_bo_api.h  |   21 
  include/drm/ttm/ttm_bo_driver.h   |   16 ++-
  5 files changed, 108 insertions(+), 77 deletions(-)

 @@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  
   if (!bdev-dev_mapping)
   return;
 -
   

Still a lot of these. Please remove.
  
 +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 + int ret;
 +
 + if (bdev-driver-io_mem_reserve) {
 + if (!atomic_xchg(mem-bus.use_count, 1)) {
 + ret = bdev-driver-io_mem_reserve(bdev, mem);
 + if (unlikely(ret != 0))
 + return ret;
 + }
 + } else {
 + ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
 mem-bus.offset, mem-bus.size);
 + if (unlikely(ret != 0))
 + return ret;
 + mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
 + }
 + return 0;
 +}
 +
 +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 + if (bdev-driver-io_mem_reserve) {
 + atomic_set(mem-bus.use_count, 0);
   

Shouldn't there be a test for zero before calling this?
 + bdev-driver-io_mem_free(bdev, mem);
 + }
 +}
 +
   

Hmm. I don't get the logic of the above refcounting. First, the kernel 
can preempt between refcounting and driver calls:

Thread a sets use_count to 1 and preempts.
Thread b sees use_count 1 and calls ttm_bo_pci_offset(), but we haven't 
yet done an io_mem_reserve??

Otoh, from the last section it seems we always will hold bo::reserved 
around these calls, so instead we could make use_count a non-atomic 
variable.

Then, for the rest please consider the following use cases:

1) We want to temporarily map a bo within the kernel. We do:
reserve_bo().
make_mappable().
kmap().
kunmap().
free_mappable_resources().  // This is just a hint. When the bo is 
unreserved, the manager is free to evict the mappable region.
unreserve_bo().

2) We want to permanently map a bo within the kernel (kernel map for fbdev).
We do (bo is not reserved).
pin_mappable().
kmap().
access
kunmap().
unpin_mappable().

3) Fault.
reserve_bo().
make_mappable().
set_up user_space_map().
unreserve_bo().

/// Here the manager is free to evict the mappable range by reserving 
and then calling ttm_bo_unmap_virtual().

4) Unmap Virtual. (Called reserved).
unmap_user_space_mappings().
free_mappable_resources().

It looks to me like you've implemented make_mappable() = 
ttm_mem_io_reserve() and free_mappable_resources() = ttm_mem_io_free(), 
and from the above use cases we can see that they always will be called 
when the bo is reserved. Hence no need for an atomic variable and we can 
ignore the race scenarios above.

but what about pin_mappable() and unpin_mappable()? A general mapping 
manager must be able to perform these operations. Perhaps



Finally, consider a very simple mapping manager that uses the two ttm 
regions VRAM and VRAM_MAPPABLE. We'd implement the driver operations as 
follows, assuming we add io_mem_pin and io_mem_unpin:

io_mem_reserve:
ttm_bo_validate(VRAM_MAPPABLE); (Evicting as needed).

io_mem_unreserve:
noop().

io_mem_pin:
ttm_bo_reserve()
if (pin_count++ == 0)
   ttm_bo_validate(VRAM_MAPPABLE | NO_EVICT);
ttm_bo_unreserve()

io_mem_unpin:
ttm_bo_reserve()
if (--pin_count == 0)
   ttm_bo_validate(VRAM_MAPPABLE);
ttm_bo_unreserve()

This simple mapping manager would need a struct ttm_buffer_object as an 
argument. Not just the mem argument.

/Thomas




--
Download Intel#174; Parallel Studio Eval
Try the new software tools for yourself. Speed compiling, find bugs
proactively, and fine-tune applications for parallel performance.
See why Intel Parallel Studio got high marks during beta.
http://p.sf.net/sfu/intel-sw-dev
--
___
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel


Re: [PATCH] drm/ttm: ttm_fault callback to allow driver to handle bo placement V3

2010-03-24 Thread Jerome Glisse
On Wed, Mar 24, 2010 at 07:27:57PM +0100, Thomas Hellstrom wrote:
 Jerome Glisse wrote:
  On fault the driver is given the opportunity to perform any operation
  it sees fit in order to place the buffer into a CPU visible area of
  memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
  should keep working properly. Future patch will take advantage of this
  infrastructure and remove the old path from TTM once driver are
  converted.
 
  V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
  V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
 is responsible to perform any necessary task for mapping to succeed
 
  Signed-off-by: Jerome Glisse jgli...@redhat.com
  ---
   drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
   drivers/gpu/drm/ttm/ttm_bo_util.c |   95 
  ++---
   drivers/gpu/drm/ttm/ttm_bo_vm.c   |   46 --
   include/drm/ttm/ttm_bo_api.h  |   21 
   include/drm/ttm/ttm_bo_driver.h   |   16 ++-
   5 files changed, 108 insertions(+), 77 deletions(-)
 
  @@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object 
  *bo)
   
  if (!bdev-dev_mapping)
  return;
  -

 
 Still a lot of these. Please remove.
   
  +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  +{
  +   int ret;
  +
  +   if (bdev-driver-io_mem_reserve) {
  +   if (!atomic_xchg(mem-bus.use_count, 1)) {
  +   ret = bdev-driver-io_mem_reserve(bdev, mem);
  +   if (unlikely(ret != 0))
  +   return ret;
  +   }
  +   } else {
  +   ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
  mem-bus.offset, mem-bus.size);
  +   if (unlikely(ret != 0))
  +   return ret;
  +   mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
  +   }
  +   return 0;
  +}
  +
  +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  +{
  +   if (bdev-driver-io_mem_reserve) {
  +   atomic_set(mem-bus.use_count, 0);

 
 Shouldn't there be a test for zero before calling this?
  +   bdev-driver-io_mem_free(bdev, mem);
  +   }
  +}
  +

 
 Hmm. I don't get the logic of the above refcounting. First, the kernel 
 can preempt between refcounting and driver calls:
 
 Thread a sets use_count to 1 and preempts.
 Thread b sees use_count 1 and calls ttm_bo_pci_offset(), but we haven't 
 yet done an io_mem_reserve??
 
 Otoh, from the last section it seems we always will hold bo::reserved 
 around these calls, so instead we could make use_count a non-atomic 
 variable.
 

use_count could become bool io_reserved it's atomic for patch historical
reason, will respawn a patch without atomic and blank line removal.

 Then, for the rest please consider the following use cases:
 
 1) We want to temporarily map a bo within the kernel. We do:
 reserve_bo().
 make_mappable().
 kmap().
 kunmap().
 free_mappable_resources().  // This is just a hint. When the bo is 
 unreserved, the manager is free to evict the mappable region.
 unreserve_bo().
 
 2) We want to permanently map a bo within the kernel (kernel map for fbdev).
 We do (bo is not reserved).
 pin_mappable().
 kmap().
 access
 kunmap().
 unpin_mappable().
 
 3) Fault.
 reserve_bo().
 make_mappable().
 set_up user_space_map().
 unreserve_bo().
 
 /// Here the manager is free to evict the mappable range by reserving 
 and then calling ttm_bo_unmap_virtual().
 
 4) Unmap Virtual. (Called reserved).
 unmap_user_space_mappings().
 free_mappable_resources().
 
 It looks to me like you've implemented make_mappable() = 
 ttm_mem_io_reserve() and free_mappable_resources() = ttm_mem_io_free(), 
 and from the above use cases we can see that they always will be called 
 when the bo is reserved. Hence no need for an atomic variable and we can 
 ignore the race scenarios above.
 
 but what about pin_mappable() and unpin_mappable()? A general mapping 
 manager must be able to perform these operations. Perhaps

Idea is that buffer that will be mapped the whole time will also be
set with no evict so unmap virtual is never call on them (at least
that is my feeling from the code). So iomeme_reserve works also for
pinned buffer and i don't separate the pined/not pinned case from
the driver io manager (if driver has any).

 
 
 Finally, consider a very simple mapping manager that uses the two ttm 
 regions VRAM and VRAM_MAPPABLE. We'd implement the driver operations as 
 follows, assuming we add io_mem_pin and io_mem_unpin:
 
 io_mem_reserve:
 ttm_bo_validate(VRAM_MAPPABLE); (Evicting as needed).
 
 io_mem_unreserve:
 noop().
 
 io_mem_pin:
 ttm_bo_reserve()
 if (pin_count++ == 0)
ttm_bo_validate(VRAM_MAPPABLE | NO_EVICT);
 ttm_bo_unreserve()
 
 io_mem_unpin:
 ttm_bo_reserve()
 if (--pin_count == 0)
ttm_bo_validate(VRAM_MAPPABLE);
 ttm_bo_unreserve()
 
 This simple mapping manager would need a struct ttm_buffer_object as an 
 

Re: [PATCH] drm/ttm: ttm_fault callback to allow driver to handle bo placement V3

2010-03-24 Thread Thomas Hellstrom
Jerome Glisse wrote:
 On Wed, Mar 24, 2010 at 07:27:57PM +0100, Thomas Hellstrom wrote:
   
 Jerome Glisse wrote:
 
 On fault the driver is given the opportunity to perform any operation
 it sees fit in order to place the buffer into a CPU visible area of
 memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
 should keep working properly. Future patch will take advantage of this
 infrastructure and remove the old path from TTM once driver are
 converted.

 V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
 V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
is responsible to perform any necessary task for mapping to succeed

 Signed-off-by: Jerome Glisse jgli...@redhat.com
 ---
  drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
  drivers/gpu/drm/ttm/ttm_bo_util.c |   95 
 ++---
  drivers/gpu/drm/ttm/ttm_bo_vm.c   |   46 --
  include/drm/ttm/ttm_bo_api.h  |   21 
  include/drm/ttm/ttm_bo_driver.h   |   16 ++-
  5 files changed, 108 insertions(+), 77 deletions(-)

 @@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object 
 *bo)
  
 if (!bdev-dev_mapping)
 return;
 -
   
   
 Still a lot of these. Please remove.
 
  
 +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 +   int ret;
 +
 +   if (bdev-driver-io_mem_reserve) {
 +   if (!atomic_xchg(mem-bus.use_count, 1)) {
 +   ret = bdev-driver-io_mem_reserve(bdev, mem);
 +   if (unlikely(ret != 0))
 +   return ret;
 +   }
 +   } else {
 +   ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
 mem-bus.offset, mem-bus.size);
 +   if (unlikely(ret != 0))
 +   return ret;
 +   mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
 +   }
 +   return 0;
 +}
 +
 +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 +   if (bdev-driver-io_mem_reserve) {
 +   atomic_set(mem-bus.use_count, 0);
   
   
 Shouldn't there be a test for zero before calling this?
 
 +   bdev-driver-io_mem_free(bdev, mem);
 +   }
 +}
 +
   
   
 Hmm. I don't get the logic of the above refcounting. First, the kernel 
 can preempt between refcounting and driver calls:

 Thread a sets use_count to 1 and preempts.
 Thread b sees use_count 1 and calls ttm_bo_pci_offset(), but we haven't 
 yet done an io_mem_reserve??

 Otoh, from the last section it seems we always will hold bo::reserved 
 around these calls, so instead we could make use_count a non-atomic 
 variable.

 

 use_count could become bool io_reserved it's atomic for patch historical
 reason, will respawn a patch without atomic and blank line removal.
   

Great. Please also consider the test for 0 on unreserve.

   
 Then, for the rest please consider the following use cases:

 1) We want to temporarily map a bo within the kernel. We do:
 reserve_bo().
 make_mappable().
 kmap().
 kunmap().
 free_mappable_resources().  // This is just a hint. When the bo is 
 unreserved, the manager is free to evict the mappable region.
 unreserve_bo().

 2) We want to permanently map a bo within the kernel (kernel map for fbdev).
 We do (bo is not reserved).
 pin_mappable().
 kmap().
 access
 kunmap().
 unpin_mappable().

 3) Fault.
 reserve_bo().
 make_mappable().
 set_up user_space_map().
 unreserve_bo().

 /// Here the manager is free to evict the mappable range by reserving 
 and then calling ttm_bo_unmap_virtual().

 4) Unmap Virtual. (Called reserved).
 unmap_user_space_mappings().
 free_mappable_resources().

 It looks to me like you've implemented make_mappable() = 
 ttm_mem_io_reserve() and free_mappable_resources() = ttm_mem_io_free(), 
 and from the above use cases we can see that they always will be called 
 when the bo is reserved. Hence no need for an atomic variable and we can 
 ignore the race scenarios above.

 but what about pin_mappable() and unpin_mappable()? A general mapping 
 manager must be able to perform these operations. Perhaps
 

 Idea is that buffer that will be mapped the whole time will also be
 set with no evict so unmap virtual is never call on them (at least
 that is my feeling from the code). So iomeme_reserve works also for
 pinned buffer and i don't separate the pined/not pinned case from
 the driver io manager (if driver has any).
   

Yes, That's the case for simple io managers, where the mappable range is 
simply a (sub)TTM region. Then TTM NO_EVICT is equivalent to io manager 
NO_EVICT. However, if the IO manager is not a TTM region manager but 
something the driver implements with its own LRU list, the IO manager 
must be informed about this. Admitted, we have no driver like this yet, 
and the common code won't be using that API, so I'm OK with leaving it 
for now. I might add a comment about this close to the io manager hooks 
later on.

   
 Finally, consider a 

Re: [PATCH] drm/ttm: ttm_fault callback to allow driver to handle bo placement V3

2010-03-24 Thread Jerome Glisse
On Wed, Mar 24, 2010 at 09:08:08PM +0100, Thomas Hellstrom wrote:
 Jerome Glisse wrote:
 On Wed, Mar 24, 2010 at 07:27:57PM +0100, Thomas Hellstrom wrote:
 Jerome Glisse wrote:
 On fault the driver is given the opportunity to perform any operation
 it sees fit in order to place the buffer into a CPU visible area of
 memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
 should keep working properly. Future patch will take advantage of this
 infrastructure and remove the old path from TTM once driver are
 converted.
 
 V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
 V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
is responsible to perform any necessary task for mapping to succeed
 
 Signed-off-by: Jerome Glisse jgli...@redhat.com
 ---
  drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
  drivers/gpu/drm/ttm/ttm_bo_util.c |   95 
  ++---
  drivers/gpu/drm/ttm/ttm_bo_vm.c   |   46 --
  include/drm/ttm/ttm_bo_api.h  |   21 
  include/drm/ttm/ttm_bo_driver.h   |   16 ++-
  5 files changed, 108 insertions(+), 77 deletions(-)
 
 @@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object 
 *bo)
if (!bdev-dev_mapping)
return;
 -
 Still a lot of these. Please remove.
 +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg 
 *mem)
 +{
 +  int ret;
 +
 +  if (bdev-driver-io_mem_reserve) {
 +  if (!atomic_xchg(mem-bus.use_count, 1)) {
 +  ret = bdev-driver-io_mem_reserve(bdev, mem);
 +  if (unlikely(ret != 0))
 +  return ret;
 +  }
 +  } else {
 +  ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
 mem-bus.offset, mem-bus.size);
 +  if (unlikely(ret != 0))
 +  return ret;
 +  mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
 +  }
 +  return 0;
 +}
 +
 +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 +  if (bdev-driver-io_mem_reserve) {
 +  atomic_set(mem-bus.use_count, 0);
 Shouldn't there be a test for zero before calling this?
 +  bdev-driver-io_mem_free(bdev, mem);
 +  }
 +}
 +
 Hmm. I don't get the logic of the above refcounting. First, the
 kernel can preempt between refcounting and driver calls:
 
 Thread a sets use_count to 1 and preempts.
 Thread b sees use_count 1 and calls ttm_bo_pci_offset(), but we
 haven't yet done an io_mem_reserve??
 
 Otoh, from the last section it seems we always will hold
 bo::reserved around these calls, so instead we could make
 use_count a non-atomic variable.
 
 
 use_count could become bool io_reserved it's atomic for patch historical
 reason, will respawn a patch without atomic and blank line removal.
 
 Great. Please also consider the test for 0 on unreserve.
 
 Then, for the rest please consider the following use cases:
 
 1) We want to temporarily map a bo within the kernel. We do:
 reserve_bo().
 make_mappable().
 kmap().
 kunmap().
 free_mappable_resources().  // This is just a hint. When the bo
 is unreserved, the manager is free to evict the mappable region.
 unreserve_bo().
 
 2) We want to permanently map a bo within the kernel (kernel map for fbdev).
 We do (bo is not reserved).
 pin_mappable().
 kmap().
 access
 kunmap().
 unpin_mappable().
 
 3) Fault.
 reserve_bo().
 make_mappable().
 set_up user_space_map().
 unreserve_bo().
 
 /// Here the manager is free to evict the mappable range by
 reserving and then calling ttm_bo_unmap_virtual().
 
 4) Unmap Virtual. (Called reserved).
 unmap_user_space_mappings().
 free_mappable_resources().
 
 It looks to me like you've implemented make_mappable() =
 ttm_mem_io_reserve() and free_mappable_resources() =
 ttm_mem_io_free(), and from the above use cases we can see that
 they always will be called when the bo is reserved. Hence no
 need for an atomic variable and we can ignore the race scenarios
 above.
 
 but what about pin_mappable() and unpin_mappable()? A general
 mapping manager must be able to perform these operations.
 Perhaps
 
 Idea is that buffer that will be mapped the whole time will also be
 set with no evict so unmap virtual is never call on them (at least
 that is my feeling from the code). So iomeme_reserve works also for
 pinned buffer and i don't separate the pined/not pinned case from
 the driver io manager (if driver has any).
 
 Yes, That's the case for simple io managers, where the mappable
 range is simply a (sub)TTM region. Then TTM NO_EVICT is equivalent
 to io manager NO_EVICT. However, if the IO manager is not a TTM
 region manager but something the driver implements with its own LRU
 list, the IO manager must be informed about this. Admitted, we have
 no driver like this yet, and the common code won't be using that
 API, so I'm OK with leaving it for now. I might add a comment about
 this close to the io manager hooks later on.
 
 Finally, consider a very simple mapping manager