Here's an updated set of GTT mapping patches against Eric's drm-gem-merge 
branch from earlier today and DRM master for the libdrm bits.  I fixed up a 
couple of bugs, but it looks like UXA/EXA still don't work against GEM even 
w/o these changes applied.  I'm not quite sure about the zap_vma_ptes code, 
that'll need review from one of the MM guys (Nick can you take a look at the 
i915-gtt-mapping patch?).

Other than that, I think the code is looking fairly complete, so at this point 
we just need to stabilize GEM and play with using GTT mapping for the general 
dri_bo_map case to see if we want to do it by default.

Thanks,
-- 
Jesse Barnes, Intel Open Source Technology Center
diff --git a/src/i830_exa.c b/src/i830_exa.c
index fd29df1..8213242 100644
--- a/src/i830_exa.c
+++ b/src/i830_exa.c
@@ -767,7 +767,7 @@ i830_uxa_prepare_access (PixmapPtr pixmap, uxa_access_t access)
 	    I830Sync(scrn);
 	    i830->need_sync = FALSE;
 	}
-	if (dri_bo_map (bo, access == UXA_ACCESS_RW) != 0)
+	if (dri_gem_bo_map_gtt(bo, 4096))
 	    return FALSE;
         pixmap->devPrivate.ptr = bo->virtual;
     }
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
index c44d596..51b08a4 100644
--- a/libdrm/intel/intel_bufmgr.h
+++ b/libdrm/intel/intel_bufmgr.h
@@ -123,5 +123,7 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,
 void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
 void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
 
+int dri_gem_bo_map_gtt(dri_bo *bo, uint32_t alignment);
+
 #endif /* INTEL_BUFMGR_H */
 
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 97e387f..af90a4f 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -39,6 +39,7 @@
 #endif
 
 #include <xf86drm.h>
+#include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -46,6 +47,8 @@
 #include <assert.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
 #include "errno.h"
 #include "intel_bufmgr.h"
@@ -517,6 +520,83 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
     return 0;
 }
 
+int
+dri_gem_bo_map_gtt(dri_bo *bo, uint32_t alignment)
+{
+    dri_bufmgr_gem *bufmgr_gem;
+    dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+    struct drm_i915_gem_set_domain set_domain;
+    int ret;
+
+    bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+    /* Allow recursive mapping. Mesa may recursively map buffers with
+     * nested display loops.
+     */
+    if (!bo_gem->mapped) {
+
+	assert(bo->virtual == NULL);
+
+	DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+	if (bo_gem->virtual == NULL) {
+		struct drm_i915_gem_mmap_gtt mmap_arg;
+
+		memset(&mmap_arg, 0, sizeof(mmap_arg));
+		mmap_arg.handle = bo_gem->gem_handle;
+		/* Just map the whole object */
+		mmap_arg.offset = 0;
+		mmap_arg.size = bo->size;
+		mmap_arg.alignment = alignment;
+
+		ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
+			    &mmap_arg);
+		if (ret != 0) {
+			fprintf(stderr,
+				"%s:%d: Error preparing buffer map %d (%s): %s .\n",
+				__FILE__, __LINE__,
+				bo_gem->gem_handle, bo_gem->name,
+				strerror(errno));
+			return ret;
+		}
+		bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+				       MAP_SHARED, bufmgr_gem->fd,
+				       mmap_arg.addr_ptr);
+		if (bo_gem->virtual == MAP_FAILED) {
+			fprintf(stderr,
+				"%s:%d: Error mapping buffer %d (%s): %s .\n",
+				__FILE__, __LINE__,
+				bo_gem->gem_handle, bo_gem->name,
+				strerror(errno));
+			return errno;
+		}
+	}
+
+	bo->virtual = bo_gem->virtual;
+	bo_gem->swrast = 0;
+	bo_gem->mapped = 1;
+	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+	    bo_gem->virtual);
+    }
+
+    if (!bo_gem->swrast) {
+	set_domain.handle = bo_gem->gem_handle;
+	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+	do {
+	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+			&set_domain);
+	} while (ret == -1 && errno == EINTR);
+	if (ret != 0) {
+	    fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
+		     __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
+	}
+	bo_gem->swrast = 1;
+    }
+
+    return 0;
+}
+
 static int
 dri_gem_bo_unmap(dri_bo *bo)
 {
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index bde64b8..9916366 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
 		DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
 
 		break;
+	case _DRM_GEM:
+		DRM_ERROR("tried to rmmap GEM object\n");
+		break;
 	}
 	case _DRM_SCATTER_GATHER:
 		if (!dev->sg) {
@@ -419,6 +422,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
 		dmah.size = map->size;
 		__drm_pci_free(dev, &dmah);
 		break;
+	case _DRM_GEM:
+		DRM_ERROR("tried to rmmap GEM object\n");
+		break;
 	}
 	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f934414..43aacca 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -315,6 +315,7 @@ static void drm_cleanup(struct drm_device * dev)
 		dev->driver->unload(dev);
 
 	drm_ht_remove(&dev->map_hash);
+	drm_mm_takedown(&dev->offset_manager);
 	drm_ctxbitmap_cleanup(dev);
 
 	drm_put_minor(&dev->primary);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ccd1afd..9079d88 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -419,3 +419,20 @@ drm_gem_object_handle_free(struct kref *kref)
 }
 EXPORT_SYMBOL(drm_gem_object_handle_free);
 
+int
+drm_gem_mmap(struct vm_area_struct *vma, struct file *filp, struct drm_map *map)
+{
+	struct drm_gem_object *obj = map->handle;
+
+	if (!obj->dev->driver->gem_vm_ops)
+		return -EINVAL;
+
+	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP;
+	vma->vm_ops = obj->dev->driver->gem_vm_ops;
+	vma->vm_private_data = map->handle;
+	vma->vm_file = obj->filp;
+
+	obj->vma = vma;
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 3316067..af539f7 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 	}
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_insert_item);
 
 /*
  * Just insert an item and return any "bits" bit key that hasn't been
@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 	ht->fill--;
 	return 0;
 }
+EXPORT_SYMBOL(drm_ht_remove_item);
 
 void drm_ht_remove(struct drm_open_hash *ht)
 {
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 82f4657..498463f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -113,6 +113,12 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 		return -ENOMEM;
 	}
 
+	if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+			DRM_FILE_PAGE_OFFSET_SIZE)) {
+		drm_ht_remove(&dev->map_hash);
+		return -ENOMEM;
+	}
+
 	/* the DRM has 6 basic counters */
 	dev->counters = 6;
 	dev->types[0] = _DRM_STAT_LOCK;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c234c6f..f4dfe8e 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
 				dmah.size = map->size;
 				__drm_pci_free(dev, &dmah);
 				break;
+			case _DRM_GEM:
+				DRM_ERROR("tried to rmmap GEM object\n");
+				break;
 			}
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 		}
@@ -547,6 +550,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 	struct drm_map *map = NULL;
 	unsigned long offset = 0;
 	struct drm_hash_item *hash;
+	int ret;
 
 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
 		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -566,6 +570,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 	    )
 		return drm_mmap_dma(filp, vma);
 
+	DRM_ERROR("looking for object at 0x%08lx\n", vma->vm_pgoff);
 	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
 		DRM_ERROR("Could not find map\n");
 		return -EINVAL;
@@ -647,6 +652,12 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 		vma->vm_flags |= VM_RESERVED;
 		vma->vm_page_prot = drm_dma_prot(map->type, vma);
 		break;
+	case _DRM_GEM:
+		DRM_ERROR("found GEM object, setting up vm_ops\n");
+		ret = drm_gem_mmap(vma, filp, map);
+		if (ret)
+			return ret;
+		break;
 	default:
 		return -EINVAL;	/* This should never happen. */
 	}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 93ca81b..0568c0e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -954,6 +954,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e7c05d8..dde8d36 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -539,6 +539,14 @@ static int i915_resume(struct drm_device *dev)
 	return 0;
 }
 
+static struct vm_operations_struct i915_gem_vm_ops = {
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+	.fault = i915_gem_fault,
+	.page_mkwrite = i915_gem_vm_mkwrite,
+	.access = i915_gem_vm_access,
+};
+
 static struct drm_driver driver = {
 	/* don't use mtrr's here, the Xserver or user space app should
 	 * deal with them for intel hardware.
@@ -569,6 +577,7 @@ static struct drm_driver driver = {
 	.proc_cleanup = i915_gem_proc_cleanup,
 	.gem_init_object = i915_gem_init_object,
 	.gem_free_object = i915_gem_free_object,
+	.gem_vm_ops = &i915_gem_vm_ops,
 	.ioctls = i915_ioctls,
 	.fops = {
 		 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5e64b21..7135566 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -350,6 +350,8 @@ struct drm_i915_gem_object {
 	 * This is the same as gtt_space->start
 	 */
 	uint32_t gtt_offset;
+	uint32_t gtt_alignment;
+	uint64_t mmap_offset;
 
 	/** Boolean whether this object has a valid gtt offset. */
 	int gtt_bound;
@@ -466,6 +468,8 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv);
 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
+int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -500,6 +504,12 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev);
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
+void i915_gem_vm_open(struct vm_area_struct *vma);
+void i915_gem_vm_close(struct vm_area_struct *vma);
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int i915_gem_vm_mkwrite(struct vm_area_struct *vma, struct page *page);
+int i915_gem_vm_access(struct vm_area_struct *vma, unsigned long addr,
+		       void *buf, int len, int write);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 29d9d21..4965b2b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -30,6 +30,7 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include <linux/swap.h>
+#include <linux/pci.h>
 
 static int
 i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -49,6 +50,8 @@ i915_gem_set_domain(struct drm_gem_object *obj,
 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+				       unsigned alignment);
 
 int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -467,6 +470,91 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
+void i915_gem_vm_open(struct vm_area_struct *vma)
+{
+}
+
+void i915_gem_vm_close(struct vm_area_struct *vma)
+{
+}
+
+int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	unsigned long page_offset;
+	unsigned long pfn;
+	int ret = 0;
+
+	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	DRM_ERROR("faulting in object %d offset 0x%08lx\n", obj->name,
+		  page_offset);
+
+	/* Now bind it into the GTT */
+	if (i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment))
+		return VM_FAULT_SIGBUS;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+		page_offset;
+	/* Finally, remap it using the new GTT offset */
+	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+
+	return ret == -EAGAIN ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
+}
+
+int i915_gem_vm_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+	return 0;
+}
+
+int i915_gem_vm_access(struct vm_area_struct *vma, unsigned long addr,
+		       void *buf, int len, int write)
+{
+	return 0;
+}
+
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file_priv: GEM object info
+ *
+ * Allocate a fake mmap offset for the given object and add it
+ * to the DRM map hash.  This allows drm_mmap() to find it so that
+ * the vm_ops can be overridden with GEM fault & access handlers.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_i915_gem_mmap_gtt *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+	obj_priv = obj->driver_private;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	args->addr_ptr = obj_priv->mmap_offset;
+
+	obj_priv->gtt_alignment = args->alignment;
+
+	return 0;
+}
+
 static void
 i915_gem_object_free_page_list(struct drm_gem_object *obj)
 {
@@ -897,6 +985,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct vm_area_struct *vma = obj->vma;
 	int ret = 0;
 
 #if WATCH_BUF
@@ -940,6 +1029,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 
 	BUG_ON(obj_priv->active);
 
+	/* blow away mappings if mapped through GTT */
+	if (vma)
+		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+
 	i915_gem_object_free_page_list(obj);
 
 	if (obj_priv->gtt_space) {
@@ -2120,7 +2213,11 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 
 int i915_gem_init_object(struct drm_gem_object *obj)
 {
+	struct drm_device *dev = obj->dev;
 	struct drm_i915_gem_object *obj_priv;
+	struct drm_map_list *list;
+	struct drm_map *map;
+	int ret = 0;
 
 	obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
 	if (obj_priv == NULL)
@@ -2138,11 +2235,61 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 	obj->driver_private = obj_priv;
 	obj_priv->obj = obj;
 	INIT_LIST_HEAD(&obj_priv->list);
+
+	/* Set the object up for mmap'ing */
+	list = &obj->map_list;
+	list->map = drm_calloc(1, sizeof(struct drm_map_list),
+			       DRM_MEM_DRIVER);
+	if (!list->map)
+		return -ENOMEM;
+
+	map = list->map;
+	map->type = _DRM_GEM;
+	map->size = obj->size;
+	map->handle = obj;
+
+	/* Get a DRM GEM mmap offset allocated... */
+	list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+						    obj->size / PAGE_SIZE, 0, 0);
+	if (!list->file_offset_node) {
+		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		ret = -ENOMEM;
+		goto out_free_list;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+						  obj->size / PAGE_SIZE, 0);
+	if (!list->file_offset_node) {
+		ret = -ENOMEM;
+		goto out_free_list;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+		DRM_ERROR("failed to add to map hash\n");
+		goto out_free_mm;
+		return -ENOMEM;
+	}
+
+	/* By now we should be all set, any drm_mmap request on the offset
+	 * below will get to our mmap & fault handler */
+	obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
 	return 0;
+
+out_free_mm:
+	drm_mm_put_block(list->file_offset_node);
+out_free_list:
+	drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
+
+	return ret;
 }
 
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
+	struct drm_device *dev = obj->dev;
+	struct drm_map_list *list;
+        struct drm_map *map;
 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
 	while (obj_priv->pin_count > 0)
@@ -2150,6 +2297,20 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 
 	i915_gem_object_unbind(obj);
 
+	list = &obj->map_list;
+	drm_ht_remove_item(&dev->map_hash, &list->hash);
+
+	if (list->file_offset_node) {
+		drm_mm_put_block(list->file_offset_node);
+		list->file_offset_node = NULL;
+	}
+
+	map = list->map;
+	if (map) {
+		drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
+		list->map = NULL;
+	}
+
 	drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
 	drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
 }
@@ -2265,7 +2426,15 @@ i915_gem_idle(struct drm_device *dev)
 	 * waited for a sequence higher than any pending execbuffer
 	 */
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	if (!list_empty(&dev_priv->mm.flushing_list)) {
+		struct drm_i915_gem_object *obj_priv;
+		DRM_ERROR("flushing list not empty, still has:\n");
+		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+				    list) {
+			DRM_ERROR("    %p: %08x\n", obj_priv,
+				  obj_priv->last_rendering_seqno);
+		}
+	}
 
 	/* Request should now be empty as we've also waited
 	 * for the last request in the list
@@ -2278,7 +2447,6 @@ i915_gem_idle(struct drm_device *dev)
 		return ret;
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
 	BUG_ON(!list_empty(&dev_priv->mm.request_list));
 	return 0;
@@ -2369,7 +2537,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 	dev_priv->ring.map.flags = 0;
 	dev_priv->ring.map.mtrr = 0;
 
-	drm_core_ioremap(&dev_priv->ring.map, dev);
+	DRM_ERROR("remapping ring, offset 0x%08lx size %ld\n",
+		  dev_priv->ring.map.offset, obj->size);
+	drm_core_ioremap_wc(&dev_priv->ring.map, dev);
 	if (dev_priv->ring.map.handle == NULL) {
 		DRM_ERROR("Failed to map ringbuffer.\n");
 		memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
diff --git a/include/drm/drm.h b/include/drm/drm.h
index dc347ef..8b3c126 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -190,6 +190,7 @@ enum drm_map_type {
 	_DRM_AGP = 3,		  /**< AGP/GART */
 	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
 	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
+	_DRM_GEM = 6,		  /**< GEM object */
 };
 
 /**
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index eb2a05f..538bb7c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -150,6 +150,13 @@ struct drm_device;
 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
 #define DRM_MAP_HASH_OFFSET 0x10000000
 
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+
 /[EMAIL PROTECTED]/
 
 /***********************************************************************/
@@ -523,6 +530,7 @@ struct drm_map_list {
 	struct drm_hash_item hash;
 	struct drm_map *map;			/**< mapping */
 	uint64_t user_token;
+	struct drm_mm_node *file_offset_node;	/**< fake offset */
 };
 
 typedef struct drm_map drm_local_map_t;
@@ -579,6 +587,12 @@ struct drm_gem_object {
 	/** File representing the shmem storage */
 	struct file *filp;
 
+	/* Mapping info for this object */
+	struct drm_map_list map_list;
+
+	/* VMA containing this object's GTT mapping (if any) */
+	struct vm_area_struct *vma;
+
 	/**
 	 * Size of the object, in bytes.  Immutable over the object's
 	 * lifetime.
@@ -681,6 +695,9 @@ struct drm_driver {
 	int (*gem_init_object) (struct drm_gem_object *obj);
 	void (*gem_free_object) (struct drm_gem_object *obj);
 
+	/* Driver private ops for this object */
+	struct vm_operations_struct *gem_vm_ops;
+
 	int major;
 	int minor;
 	int patchlevel;
@@ -757,6 +774,7 @@ struct drm_device {
 	struct list_head maplist;	/**< Linked list of regions */
 	int map_count;			/**< Number of mappable regions */
 	struct drm_open_hash map_hash;	/**< User token hash table for maps */
+	struct drm_mm offset_manager;	/**< Offset mgmt for buffer objects */
 
 	/** \name Context handle management */
 	/[EMAIL PROTECTED] */
@@ -1186,6 +1204,8 @@ void drm_gem_object_free(struct kref *kref);
 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
 					    size_t size);
 void drm_gem_object_handle_free(struct kref *kref);
+int drm_gem_mmap(struct vm_area_struct *vma, struct file *filp,
+		 struct drm_map *map);
 
 static inline void
 drm_gem_object_reference(struct drm_gem_object *obj)
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index eb4b350..fed1d7c 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -159,6 +159,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_SW_FINISH	0x20
 #define DRM_I915_GEM_SET_TILING	0x21
 #define DRM_I915_GEM_GET_TILING	0x22
+#define DRM_I915_GEM_MMAP_GTT	0x23
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -186,6 +187,7 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
 #define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
 #define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 #define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -380,6 +382,24 @@ struct drm_i915_gem_mmap {
 	uint64_t addr_ptr;
 };
 
+struct drm_i915_gem_mmap_gtt {
+	/** Handle for the object being mapped. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset in the object to map. */
+	uint64_t offset;
+	/**
+	 * Length of data to map.
+	 *
+	 * The value will be page-aligned.
+	 */
+	uint64_t size;
+	/** Returned pointer the data was mapped at */
+	uint64_t addr_ptr;	/* void *, but pointers are not 32/64 compatible */
+	uint32_t flags;
+	uint32_t alignment;
+};
+
 struct drm_i915_gem_set_domain {
 	/** Handle for the object */
 	uint32_t handle;
-------------------------------------------------------------------------
This SF.Net email is sponsored by the Moblin Your Move Developer's challenge
Build the coolest Linux based applications with Moblin SDK & win great prizes
Grand prize is a trip for two to an Open Source event anywhere in the world
http://moblin-contest.org/redirect.php?banner_id=100&url=/
--
_______________________________________________
Dri-devel mailing list
Dri-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-devel

Reply via email to