Re: [PATCH 05/13] drm/ttm: ttm_fault callback to allow driver to handle bo placement V4

2010-04-05 Thread Thomas Hellstrom
Jerome Glisse wrote:
 On fault the driver is given the opportunity to perform any operation
 it sees fit in order to place the buffer into a CPU visible area of
 memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
 should keep working properly. Future patch will take advantage of this
 infrastructure and remove the old path from TTM once driver are
 converted.

 V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
 V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
is responsible to perform any necessary task for mapping to succeed
 V4 minor cleanup, atomic_t - bool as member is protected by reserve
mecanism from concurent access

 Signed-off-by: Jerome Glisse jgli...@redhat.com
   

Reviewed-by: Thomas Hellstrom thellst...@vmware.com

 ---
  drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
  drivers/gpu/drm/ttm/ttm_bo_util.c |   98 ++--
  drivers/gpu/drm/ttm/ttm_bo_vm.c   |   41 
  include/drm/ttm/ttm_bo_api.h  |   21 
  include/drm/ttm/ttm_bo_driver.h   |   16 ++-
  5 files changed, 111 insertions(+), 72 deletions(-)

 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
 index 6f51b30..2171f86 100644
 --- a/drivers/gpu/drm/ttm/ttm_bo.c
 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
 @@ -632,6 +632,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, 
 bool interruptible,
  
   evict_mem = bo-mem;
   evict_mem.mm_node = NULL;
 + evict_mem.bus.io_reserved = false;
  
   placement.fpfn = 0;
   placement.lpfn = 0;
 @@ -1005,6 +1006,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
   mem.num_pages = bo-num_pages;
   mem.size = mem.num_pages  PAGE_SHIFT;
   mem.page_alignment = bo-mem.page_alignment;
 + mem.bus.io_reserved = false;
   /*
* Determine where to move the buffer.
*/
 @@ -1160,6 +1162,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
   bo-mem.num_pages = bo-num_pages;
   bo-mem.mm_node = NULL;
   bo-mem.page_alignment = page_alignment;
 + bo-mem.bus.io_reserved = false;
   bo-buffer_start = buffer_start  PAGE_MASK;
   bo-priv_flags = 0;
   bo-mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
 @@ -1574,7 +1577,7 @@ int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
   if (ttm_mem_reg_is_pci(bdev, mem)) {
   *bus_offset = mem-mm_node-start  PAGE_SHIFT;
   *bus_size = mem-num_pages  PAGE_SHIFT;
 - *bus_base = man-io_offset;
 + *bus_base = man-io_offset + (uintptr_t)man-io_addr;
   }
  
   return 0;
 @@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  
   if (!bdev-dev_mapping)
   return;
 -
   unmap_mapping_range(bdev-dev_mapping, offset, holelen, 1);
 + ttm_mem_io_free(bdev, bo-mem);
  }
  EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  
 diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
 b/drivers/gpu/drm/ttm/ttm_bo_util.c
 index 865b2a8..878dc49 100644
 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
 +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
 @@ -81,30 +81,59 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  }
  EXPORT_SYMBOL(ttm_bo_move_ttm);
  
 +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 + int ret;
 +
 + if (bdev-driver-io_mem_reserve) {
 + if (!mem-bus.io_reserved) {
 + mem-bus.io_reserved = true;
 + ret = bdev-driver-io_mem_reserve(bdev, mem);
 + if (unlikely(ret != 0))
 + return ret;
 + }
 + } else {
 + ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
 mem-bus.offset, mem-bus.size);
 + if (unlikely(ret != 0))
 + return ret;
 + mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
 + }
 + return 0;
 +}
 +
 +void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 +{
 + if (bdev-driver-io_mem_reserve) {
 + if (mem-bus.io_reserved) {
 + mem-bus.io_reserved = false;
 + bdev-driver-io_mem_free(bdev, mem);
 + }
 + }
 +}
 +
  int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
   void **virtual)
  {
   struct ttm_mem_type_manager *man = bdev-man[mem-mem_type];
 - unsigned long bus_offset;
 - unsigned long bus_size;
 - unsigned long bus_base;
   int ret;
   void *addr;
  
   *virtual = NULL;
 - ret = ttm_bo_pci_offset(bdev, mem, bus_base, bus_offset, bus_size);
 - if (ret || bus_size == 0)
 + ret = ttm_mem_io_reserve(bdev, mem);
 + if (ret)
   return ret;
  
 - if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
 - addr = (void *)(((u8 *) man-io_addr) + bus_offset);
 - else {
 + if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
 + addr 

[PATCH 05/13] drm/ttm: ttm_fault callback to allow driver to handle bo placement V4

2010-03-25 Thread Jerome Glisse
On fault the driver is given the opportunity to perform any operation
it sees fit in order to place the buffer into a CPU visible area of
memory. This patch doesn't break TTM users, nouveau, vmwgfx and radeon
should keep working properly. Future patch will take advantage of this
infrastructure and remove the old path from TTM once driver are
converted.

V2 return VM_FAULT_NOPAGE if callback return -EBUSY or -ERESTARTSYS
V3 balance io_mem_reserve and io_mem_free call, fault_reserve_notify
   is responsible to perform any necessary task for mapping to succeed
V4 minor cleanup, atomic_t - bool as member is protected by reserve
   mecanism from concurent access

Signed-off-by: Jerome Glisse jgli...@redhat.com
---
 drivers/gpu/drm/ttm/ttm_bo.c  |7 ++-
 drivers/gpu/drm/ttm/ttm_bo_util.c |   98 ++--
 drivers/gpu/drm/ttm/ttm_bo_vm.c   |   41 
 include/drm/ttm/ttm_bo_api.h  |   21 
 include/drm/ttm/ttm_bo_driver.h   |   16 ++-
 5 files changed, 111 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 6f51b30..2171f86 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -632,6 +632,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool 
interruptible,
 
evict_mem = bo-mem;
evict_mem.mm_node = NULL;
+   evict_mem.bus.io_reserved = false;
 
placement.fpfn = 0;
placement.lpfn = 0;
@@ -1005,6 +1006,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
mem.num_pages = bo-num_pages;
mem.size = mem.num_pages  PAGE_SHIFT;
mem.page_alignment = bo-mem.page_alignment;
+   mem.bus.io_reserved = false;
/*
 * Determine where to move the buffer.
 */
@@ -1160,6 +1162,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo-mem.num_pages = bo-num_pages;
bo-mem.mm_node = NULL;
bo-mem.page_alignment = page_alignment;
+   bo-mem.bus.io_reserved = false;
bo-buffer_start = buffer_start  PAGE_MASK;
bo-priv_flags = 0;
bo-mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1574,7 +1577,7 @@ int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
if (ttm_mem_reg_is_pci(bdev, mem)) {
*bus_offset = mem-mm_node-start  PAGE_SHIFT;
*bus_size = mem-num_pages  PAGE_SHIFT;
-   *bus_base = man-io_offset;
+   *bus_base = man-io_offset + (uintptr_t)man-io_addr;
}
 
return 0;
@@ -1588,8 +1591,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
 
if (!bdev-dev_mapping)
return;
-
unmap_mapping_range(bdev-dev_mapping, offset, holelen, 1);
+   ttm_mem_io_free(bdev, bo-mem);
 }
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 865b2a8..878dc49 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -81,30 +81,59 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+   int ret;
+
+   if (bdev-driver-io_mem_reserve) {
+   if (!mem-bus.io_reserved) {
+   mem-bus.io_reserved = true;
+   ret = bdev-driver-io_mem_reserve(bdev, mem);
+   if (unlikely(ret != 0))
+   return ret;
+   }
+   } else {
+   ret = ttm_bo_pci_offset(bdev, mem, mem-bus.base, 
mem-bus.offset, mem-bus.size);
+   if (unlikely(ret != 0))
+   return ret;
+   mem-bus.is_iomem = (mem-bus.size  0) ? 1 : 0;
+   }
+   return 0;
+}
+
+void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+   if (bdev-driver-io_mem_reserve) {
+   if (mem-bus.io_reserved) {
+   mem-bus.io_reserved = false;
+   bdev-driver-io_mem_free(bdev, mem);
+   }
+   }
+}
+
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
 {
struct ttm_mem_type_manager *man = bdev-man[mem-mem_type];
-   unsigned long bus_offset;
-   unsigned long bus_size;
-   unsigned long bus_base;
int ret;
void *addr;
 
*virtual = NULL;
-   ret = ttm_bo_pci_offset(bdev, mem, bus_base, bus_offset, bus_size);
-   if (ret || bus_size == 0)
+   ret = ttm_mem_io_reserve(bdev, mem);
+   if (ret)
return ret;
 
-   if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
-   addr = (void *)(((u8 *) man-io_addr) + bus_offset);
-   else {
+   if (!(man-flags  TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
+   addr = (void *)(mem-bus.base + mem-bus.offset);
+   } else {