Module Name:    src
Committed By:   riastradh
Date:           Sun Sep  8 15:52:20 UTC 2013

Modified Files:
        src/sys/external/bsd/drm2/dist/drm/i915 [riastradh-drm2]: i915_drv.h
            i915_gem.c i915_gem_execbuffer.c i915_gem_tiling.c
        src/sys/modules/i915drm2 [riastradh-drm2]: Makefile
Added Files:
        src/sys/external/bsd/drm2/i915drm [riastradh-drm2]: i915_gem_gtt.c
            intel_gtt.c
Removed Files:
        src/sys/external/bsd/drm2/i915drm [riastradh-drm2]: i915_gem.c

Log Message:
Adapt the i915 GEM code to NetBSD.


To generate a diff of this commit:
cvs rdiff -u -r1.1.1.1.2.14 -r1.1.1.1.2.15 \
    src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
cvs rdiff -u -r1.1.1.1.2.7 -r1.1.1.1.2.8 \
    src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
cvs rdiff -u -r1.1.1.1.2.3 -r1.1.1.1.2.4 \
    src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c
cvs rdiff -u -r1.1.1.1.2.2 -r1.1.1.1.2.3 \
    src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c
cvs rdiff -u -r1.1.2.2 -r0 src/sys/external/bsd/drm2/i915drm/i915_gem.c
cvs rdiff -u -r0 -r1.1.2.1 src/sys/external/bsd/drm2/i915drm/i915_gem_gtt.c \
    src/sys/external/bsd/drm2/i915drm/intel_gtt.c
cvs rdiff -u -r1.1.2.8 -r1.1.2.9 src/sys/modules/i915drm2/Makefile

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.1.1.1.2.14 src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.1.1.1.2.15
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h:1.1.1.1.2.14	Wed Jul 24 03:40:29 2013
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h	Sun Sep  8 15:52:20 2013
@@ -1084,7 +1084,14 @@ struct drm_i915_gem_object {
 	unsigned int has_global_gtt_mapping:1;
 	unsigned int has_dma_mapping:1;
 
+#ifdef __NetBSD__
+	struct pglist igo_pageq;
+	bus_dma_segment_t *pages; /* `pages' is an expedient misnomer.  */
+	int igo_nsegs;
+	bus_dmamap_t igo_dmamap;
+#else
 	struct sg_table *pages;
+#endif
 	int pages_pin_count;
 
 	/* prime dma-buf support */
@@ -1436,7 +1443,23 @@ void i915_gem_release_mmap(struct drm_i9
 void i915_gem_lastclose(struct drm_device *dev);
 
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-#ifndef __NetBSD__		/* XXX */
+#ifdef __NetBSD__		/* XXX */
+static inline struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+
+	/*
+	 * Pages must be pinned so that we need not hold the lock to
+	 * prevent them from disappearing.
+	 */
+	KASSERT(obj->pages != NULL);
+	mutex_enter(obj->base.gemo_uvmobj.vmobjlock);
+	struct vm_page *const page = uvm_pagelookup(obj->base.gemo_shm_uao, n);
+	mutex_exit(obj->base.gemo_uvmobj.vmobjlock);
+
+	return container_of(page, struct page, p_vmp);
+}
+#else
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
 	struct scatterlist *sg = obj->pages->sgl;
@@ -1534,7 +1557,10 @@ int i915_add_request(struct intel_ring_b
 		     u32 *seqno);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
 				 uint32_t seqno);
-#ifndef __NetBSD__		/* XXX */
+#ifdef __NetBSD__		/* XXX */
+int i915_gem_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
+    int, int, vm_prot_t, int);
+#else
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 #endif
 int __must_check

Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.1.1.1.2.7 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.1.1.1.2.8
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c:1.1.1.1.2.7	Sun Sep  8 15:41:41 2013
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c	Sun Sep  8 15:52:20 2013
@@ -25,6 +25,21 @@
  *
  */
 
+#ifdef __NetBSD__
+#if 0				/* XXX uvmhist option?  */
+#include "opt_uvmhist.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_fault.h>
+#include <uvm/uvm_page.h>
+#include <uvm/uvm_pmap.h>
+#include <uvm/uvm_prot.h>
+#endif
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -350,6 +365,9 @@ shmem_pread_fast(struct page *page, int 
 		 char __user *user_data,
 		 bool page_do_bit17_swizzling, bool needs_clflush)
 {
+#ifdef __NetBSD__		/* XXX atomic shmem fast path */
+	return -EFAULT;
+#else
 	char *vaddr;
 	int ret;
 
@@ -366,6 +384,7 @@ shmem_pread_fast(struct page *page, int 
 	kunmap_atomic(vaddr);
 
 	return ret ? -EFAULT : 0;
+#endif
 }
 
 static void
@@ -431,10 +450,14 @@ i915_gem_shmem_pread(struct drm_device *
 	int shmem_page_offset, page_length, ret = 0;
 	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
 	int hit_slowpath = 0;
+#ifndef __NetBSD__		/* XXX */
 	int prefaulted = 0;
+#endif
 	int needs_clflush = 0;
+#ifndef __NetBSD__
 	struct scatterlist *sg;
 	int i;
+#endif
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -463,6 +486,50 @@ i915_gem_shmem_pread(struct drm_device *
 
 	offset = args->offset;
 
+#ifdef __NetBSD__
+	/*
+	 * XXX This is a big #ifdef with a lot of duplicated code, but
+	 * factoring out the loop head -- which is all that
+	 * substantially differs -- is probably more trouble than it's
+	 * worth at the moment.
+	 */
+	while (0 < remain) {
+		/* Get the next page.  */
+		shmem_page_offset = offset_in_page(offset);
+		KASSERT(shmem_page_offset < PAGE_SIZE);
+		page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+		struct page *const page = i915_gem_object_get_page(obj,
+		    (offset & ~(PAGE_SIZE-1)));
+
+		/* Decide whether to swizzle bit 17.  */
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+		    (page_to_phys(page) & (1 << 17)) != 0;
+
+		/* Try the fast path.  */
+		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+		    user_data, page_do_bit17_swizzling, needs_clflush);
+		if (ret == 0)
+			goto next_page;
+
+		/* Fast path failed.  Try the slow path.  */
+		hit_slowpath = 1;
+		mutex_unlock(&dev->struct_mutex);
+		/* XXX prefault */
+		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+		    user_data, page_do_bit17_swizzling, needs_clflush);
+		mutex_lock(&dev->struct_mutex);
+
+next_page:
+		/* XXX mark page accessed */
+		if (ret)
+			goto out;
+
+		KASSERT(page_length <= remain);
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+#else
 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 
@@ -521,6 +588,7 @@ next_page:
 		user_data += page_length;
 		offset += page_length;
 	}
+#endif
 
 out:
 	i915_gem_object_unpin_pages(obj);
@@ -572,6 +640,7 @@ i915_gem_pread_ioctl(struct drm_device *
 		goto out;
 	}
 
+#ifndef __NetBSD__		/* XXX drm prime */
 	/* prime objects have no backing filp to GEM pread/pwrite
 	 * pages from.
 	 */
@@ -579,6 +648,7 @@ i915_gem_pread_ioctl(struct drm_device *
 		ret = -EINVAL;
 		goto out;
 	}
+#endif
 
 	trace_i915_gem_object_pread(obj, args->offset, args->size);
 
@@ -601,6 +671,9 @@ fast_user_write(struct io_mapping *mappi
 		char __user *user_data,
 		int length)
 {
+#ifdef __NetBSD__		/* XXX atomic shmem fast path */
+	return -EFAULT;
+#else
 	void __iomem *vaddr_atomic;
 	void *vaddr;
 	unsigned long unwritten;
@@ -612,6 +685,7 @@ fast_user_write(struct io_mapping *mappi
 						      user_data, length);
 	io_mapping_unmap_atomic(vaddr_atomic);
 	return unwritten;
+#endif
 }
 
 /**
@@ -692,6 +766,9 @@ shmem_pwrite_fast(struct page *page, int
 		  bool needs_clflush_before,
 		  bool needs_clflush_after)
 {
+#ifdef __NetBSD__
+	return -EFAULT;
+#else
 	char *vaddr;
 	int ret;
 
@@ -711,6 +788,7 @@ shmem_pwrite_fast(struct page *page, int
 	kunmap_atomic(vaddr);
 
 	return ret ? -EFAULT : 0;
+#endif
 }
 
 /* Only difference to the fast-path function is that this can handle bit17
@@ -761,8 +839,10 @@ i915_gem_shmem_pwrite(struct drm_device 
 	int hit_slowpath = 0;
 	int needs_clflush_after = 0;
 	int needs_clflush_before = 0;
+#ifndef __NetBSD__
 	int i;
 	struct scatterlist *sg;
+#endif
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
@@ -797,6 +877,49 @@ i915_gem_shmem_pwrite(struct drm_device 
 	offset = args->offset;
 	obj->dirty = 1;
 
+#ifdef __NetBSD__
+	while (0 < remain) {
+		/* Get the next page.  */
+		shmem_page_offset = offset_in_page(offset);
+		KASSERT(shmem_page_offset < PAGE_SIZE);
+		page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+		struct page *const page = i915_gem_object_get_page(obj,
+		    (offset & ~(PAGE_SIZE-1)));
+
+		/* Decide whether to flush the cache or swizzle bit 17.  */
+		const bool partial_cacheline_write = needs_clflush_before &&
+		    ((shmem_page_offset | page_length)
+			& (cpu_info_primary.ci_cflush_lsize - 1));
+		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+		    (page_to_phys(page) & (1 << 17)) != 0;
+
+		/* Try the fast path.  */
+		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+		    user_data, page_do_bit17_swizzling,
+		    partial_cacheline_write, needs_clflush_after);
+		if (ret == 0)
+			goto next_page;
+
+		/* Fast path failed.  Try the slow path.  */
+		hit_slowpath = 1;
+		mutex_unlock(&dev->struct_mutex);
+		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+		    user_data, page_do_bit17_swizzling,
+		    partial_cacheline_write, needs_clflush_after);
+		mutex_lock(&dev->struct_mutex);
+
+next_page:
+		page->p_vmp.flags &= ~PG_CLEAN;
+		/* XXX mark page accessed */
+		if (ret)
+			goto out;
+
+		KASSERT(page_length <= remain);
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+#else
 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
 		struct page *page;
 		int partial_cacheline_write;
@@ -856,6 +979,7 @@ next_page:
 		user_data += page_length;
 		offset += page_length;
 	}
+#endif
 
 out:
 	i915_gem_object_unpin_pages(obj);
@@ -899,10 +1023,12 @@ i915_gem_pwrite_ioctl(struct drm_device 
 		       args->size))
 		return -EFAULT;
 
+#ifndef __NetBSD__		/* XXX prefault */
 	ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
 					   args->size);
 	if (ret)
 		return -EFAULT;
+#endif
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
@@ -921,6 +1047,7 @@ i915_gem_pwrite_ioctl(struct drm_device 
 		goto out;
 	}
 
+#ifndef __NetBSD__		/* XXX drm prime */
 	/* prime objects have no backing filp to GEM pread/pwrite
 	 * pages from.
 	 */
@@ -928,6 +1055,7 @@ i915_gem_pwrite_ioctl(struct drm_device 
 		ret = -EINVAL;
 		goto out;
 	}
+#endif
 
 	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
@@ -969,12 +1097,24 @@ i915_gem_check_wedge(struct drm_i915_pri
 	if (atomic_read(&dev_priv->mm.wedged)) {
 		struct completion *x = &dev_priv->error_completion;
 		bool recovery_complete;
+#ifndef __NetBSD__
 		unsigned long flags;
+#endif
 
+#ifdef __NetBSD__
+		/*
+		 * XXX This is a horrible kludge.  Reading internal
+		 * fields is no good, nor is reading them unlocked, and
+		 * neither is locking it and then unlocking it before
+		 * making a decision.
+		 */
+		recovery_complete = x->c_done > 0;
+#else
 		/* Give the error handler a chance to run. */
 		spin_lock_irqsave(&x->wait.lock, flags);
 		recovery_complete = x->done > 0;
 		spin_unlock_irqrestore(&x->wait.lock, flags);
+#endif
 
 		/* Non-interruptible callers can't handle -EAGAIN, hence return
 		 * -EIO unconditionally for these. */
@@ -1316,11 +1456,15 @@ i915_gem_mmap_ioctl(struct drm_device *d
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_gem_object *obj;
 	unsigned long addr;
+#ifdef __NetBSD__
+	int ret;
+#endif
 
 	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
 
+#ifndef __NetBSD__    /* XXX drm prime */
 	/* prime objects have no backing filp to GEM mmap
 	 * pages from.
 	 */
@@ -1328,19 +1472,174 @@ i915_gem_mmap_ioctl(struct drm_device *d
 		drm_gem_object_unreference_unlocked(obj);
 		return -EINVAL;
 	}
+#endif
 
+#ifdef __NetBSD__
+	addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
+	    (vaddr_t)curproc->p_vmspace->vm_daddr, args->size);
+	/* XXX errno NetBSD->Linux */
+	ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
+	    obj->gemo_shm_uao, args->offset, 0,
+	    UVM_MAPFLAG((VM_PROT_READ | VM_PROT_WRITE),
+		(VM_PROT_READ | VM_PROT_WRITE), UVM_INH_COPY, UVM_ADV_NORMAL,
+		UVM_FLAG_COPYONW));
+	if (ret)
+		return ret;
+#else
 	addr = vm_mmap(obj->filp, 0, args->size,
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       args->offset);
 	drm_gem_object_unreference_unlocked(obj);
 	if (IS_ERR((void *)addr))
 		return addr;
+#endif
 
 	args->addr_ptr = (uint64_t) addr;
 
 	return 0;
 }
 
+#ifdef __NetBSD__		/* XXX gem gtt fault */
+static int	i915_udv_fault(struct uvm_faultinfo *, vaddr_t,
+		    struct vm_page **, int, int, vm_prot_t, int, paddr_t);
+
+int
+i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
+    int npages, int centeridx, vm_prot_t access_type, int flags)
+{
+	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
+	struct drm_gem_object *gem_obj =
+	    container_of(uobj, struct drm_gem_object, gemo_uvmobj);
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	pgoff_t page_offset;
+	int ret = 0;
+	bool write = ISSET(access_type, VM_PROT_WRITE)? 1 : 0;
+
+	page_offset = (ufi->entry->offset + (vaddr - ufi->entry->start)) >>
+	    PAGE_SHIFT;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto out;
+
+	trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+	/* Now bind it into the GTT if needed */
+	ret = i915_gem_object_pin(obj, 0, true, false);
+	if (ret)
+		goto unlock;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unpin;
+
+	ret = i915_gem_object_get_fence(obj);
+	if (ret)
+		goto unpin;
+
+	obj->fault_mappable = true;
+
+	/* Finally, remap it using the new GTT offset */
+	/* XXX errno NetBSD->Linux */
+	ret = -i915_udv_fault(ufi, vaddr, pps, npages, centeridx, access_type,
+	    flags, (dev_priv->mm.gtt_base_addr + obj->gtt_offset));
+unpin:
+	i915_gem_object_unpin(obj);
+unlock:
+	mutex_unlock(&dev->struct_mutex);
+out:
+	return ret;
+}
+
+/*
+ * XXX i915_udv_fault is copypasta of udv_fault from uvm_device.c.
+ */
+static int
+i915_udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
+    int npages, int centeridx, vm_prot_t access_type, int flags,
+    paddr_t gtt_paddr)
+{
+	struct vm_map_entry *entry = ufi->entry;
+	struct uvm_object *uobj = entry->object.uvm_obj;
+	vaddr_t curr_va;
+	off_t curr_offset;
+	paddr_t paddr;
+	u_int mmapflags;
+	int lcv, retval;
+	vm_prot_t mapprot;
+	UVMHIST_FUNC("i915_udv_fault"); UVMHIST_CALLED(maphist);
+	UVMHIST_LOG(maphist,"  flags=%d", flags,0,0,0);
+
+	/*
+	 * we do not allow device mappings to be mapped copy-on-write
+	 * so we kill any attempt to do so here.
+	 */
+
+	if (UVM_ET_ISCOPYONWRITE(entry)) {
+		UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
+		entry->etype, 0,0,0);
+		uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
+		return(EIO);
+	}
+
+	/*
+	 * now we must determine the offset in udv to use and the VA to
+	 * use for pmap_enter.  note that we always use orig_map's pmap
+	 * for pmap_enter (even if we have a submap).   since virtual
+	 * addresses in a submap must match the main map, this is ok.
+	 */
+
+	/* udv offset = (offset from start of entry) + entry's offset */
+	curr_offset = entry->offset + (vaddr - entry->start);
+	/* pmap va = vaddr (virtual address of pps[0]) */
+	curr_va = vaddr;
+
+	/*
+	 * loop over the page range entering in as needed
+	 */
+
+	retval = 0;
+	for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
+	    curr_va += PAGE_SIZE) {
+		if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
+			continue;
+
+		if (pps[lcv] == PGO_DONTCARE)
+			continue;
+
+		paddr = (gtt_paddr + curr_offset);
+		mmapflags = 0;
+		mapprot = ufi->entry->protection;
+		UVMHIST_LOG(maphist,
+		    "  MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d",
+		    ufi->orig_map->pmap, curr_va, paddr, mapprot);
+		if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr, mapprot,
+		    PMAP_CANFAIL | mapprot | mmapflags) != 0) {
+			/*
+			 * pmap_enter() didn't have the resource to
+			 * enter this mapping.  Unlock everything,
+			 * wait for the pagedaemon to free up some
+			 * pages, and then tell uvm_fault() to start
+			 * the fault again.
+			 *
+			 * XXX Needs some rethinking for the PGO_ALLPAGES
+			 * XXX case.
+			 */
+			pmap_update(ufi->orig_map->pmap);	/* sync what we have so far */
+			uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
+			    uobj);
+			uvm_wait("i915flt");
+			return (ERESTART);
+		}
+	}
+
+	pmap_update(ufi->orig_map->pmap);
+	uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
+	return (retval);
+}
+#else
 /**
  * i915_gem_fault - fault a page into the GTT
  * vma: VMA in question
@@ -1436,6 +1735,7 @@ out:
 		return VM_FAULT_SIGBUS;
 	}
 }
+#endif
 
 /**
  * i915_gem_release_mmap - remove physical page mappings
@@ -1457,10 +1757,21 @@ i915_gem_release_mmap(struct drm_i915_ge
 	if (!obj->fault_mappable)
 		return;
 
+#ifdef __NetBSD__		/* XXX gem gtt fault */
+	{
+		struct vm_page *page;
+
+		KASSERT(obj->pages != NULL);
+		/* Force a fresh fault for each page.  */
+		TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue)
+			pmap_page_protect(page, VM_PROT_NONE);
+	}
+#else
 	if (obj->base.dev->dev_mapping)
 		unmap_mapping_range(obj->base.dev->dev_mapping,
 				    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
 				    obj->base.size, 1);
+#endif
 
 	obj->fault_mappable = false;
 }
@@ -1656,10 +1967,22 @@ i915_gem_mmap_gtt_ioctl(struct drm_devic
 static void
 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
+#ifndef __NetBSD__
 	struct inode *inode;
+#endif
 
 	i915_gem_object_free_mmap_offset(obj);
 
+#ifdef __NetBSD__
+	{
+		struct uvm_object *const uobj = obj->base.gemo_shm_uao;
+
+		if (uobj != NULL)
+			/* XXX Calling pgo_put like this is bogus.  */
+			(*uobj->pgops->pgo_put)(uobj, 0, obj->base.size,
+			    (PGO_ALLPAGES | PGO_FREE));
+	}
+#else
 	if (obj->base.filp == NULL)
 		return;
 
@@ -1670,6 +1993,7 @@ i915_gem_object_truncate(struct drm_i915
 	 */
 	inode = obj->base.filp->f_path.dentry->d_inode;
 	shmem_truncate_range(inode, 0, (loff_t)-1);
+#endif
 
 	obj->madv = __I915_MADV_PURGED;
 }
@@ -1680,6 +2004,37 @@ i915_gem_object_is_purgeable(struct drm_
 	return obj->madv == I915_MADV_DONTNEED;
 }
 
+#ifdef __NetBSD__
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *const dev = obj->base.dev;
+	int ret;
+
+	/* XXX Cargo-culted from the Linux code.  */
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
+
+	ret = i915_gem_object_set_to_cpu_domain(obj, true);
+	if (ret) {
+		WARN_ON(ret != -EIO);
+		i915_gem_clflush_object(obj);
+		obj->base.read_domains = obj->base.write_domain =
+		    I915_GEM_DOMAIN_CPU;
+	}
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_save_bit_17_swizzle(obj);
+
+	/* XXX Maintain dirty flag?  */
+
+	bus_dmamap_unload(dev->dmat, obj->igo_dmamap);
+	bus_dmamap_destroy(dev->dmat, obj->igo_dmamap);
+	bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
+	    obj->base.size, obj->pages, obj->igo_nsegs);
+
+	kfree(obj->pages);
+}
+#else
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
@@ -1721,6 +2076,7 @@ i915_gem_object_put_pages_gtt(struct drm
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
 }
+#endif
 
 static int
 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
@@ -1799,6 +2155,63 @@ i915_gem_shrink_all(struct drm_i915_priv
 		i915_gem_object_put_pages(obj);
 }
 
+#ifdef __NetBSD__
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *const dev = obj->base.dev;
+	int error;
+
+	/* XXX Cargo-culted from the Linux code.  */
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+
+	KASSERT(obj->pages == NULL);
+	TAILQ_INIT(&obj->igo_pageq);
+	obj->pages = kcalloc((obj->base.size / PAGE_SIZE),
+	    sizeof(obj->pages[0]), GFP_KERNEL);
+	if (obj->pages == NULL) {
+		error = -ENOMEM;
+		goto fail0;
+	}
+
+	/* XXX errno NetBSD->Linux */
+	error = -bus_dmamem_wire_uvm_object(dev->dmat, obj->base.gemo_shm_uao,
+	    0, obj->base.size, &obj->igo_pageq, PAGE_SIZE, 0, obj->pages,
+	    (obj->base.size / PAGE_SIZE), &obj->igo_nsegs, BUS_DMA_NOWAIT);
+	if (error)
+		/* XXX Try i915_gem_purge, i915_gem_shrink_all.  */
+		goto fail1;
+	KASSERT(0 < obj->igo_nsegs);
+	KASSERT(obj->igo_nsegs <= (obj->base.size / PAGE_SIZE));
+
+	/* XXX errno NetBSD->Linux */
+	error = -bus_dmamap_create(dev->dmat, obj->base.size, obj->igo_nsegs,
+	    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &obj->igo_dmamap);
+	if (error)
+		goto fail2;
+
+	/* XXX errno NetBSD->Linux */
+	error = -bus_dmamap_load_raw(dev->dmat, obj->igo_dmamap, obj->pages,
+	    obj->igo_nsegs, obj->base.size, BUS_DMA_NOWAIT);
+	if (error)
+		goto fail3;
+
+	/* XXX Cargo-culted from the Linux code.  */
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	/* Success!  */
+	return 0;
+
+fail3:	bus_dmamap_destroy(dev->dmat, obj->igo_dmamap);
+fail2:	bus_dmamem_unwire_uvm_object(dev->dmat, obj->base.gemo_shm_uao, 0,
+	    obj->base.size, obj->pages, (obj->base.size / PAGE_SIZE));
+fail1:	kfree(obj->pages);
+	obj->pages = NULL;
+fail0:	return error;
+}
+#else
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
@@ -1877,6 +2290,7 @@ err_pages:
 	kfree(st);
 	return PTR_ERR(page);
 }
+#endif
 
 /* Ensure that the associated pages are gathered from the backing storage
  * and pinned into our object. i915_gem_object_get_pages() may be called
@@ -3065,7 +3479,11 @@ i915_gem_clflush_object(struct drm_i915_
 
 	trace_i915_gem_object_clflush(obj);
 
+#ifdef __NetBSD__
+	drm_clflush_pglist(&obj->igo_pageq);
+#else
 	drm_clflush_sg(obj->pages);
+#endif
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
@@ -3738,8 +4156,10 @@ struct drm_i915_gem_object *i915_gem_all
 						  size_t size)
 {
 	struct drm_i915_gem_object *obj;
+#ifndef __NetBSD__		/* XXX >32bit dma?  */
 	struct address_space *mapping;
 	u32 mask;
+#endif
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (obj == NULL)
@@ -3750,6 +4170,7 @@ struct drm_i915_gem_object *i915_gem_all
 		return NULL;
 	}
 
+#ifndef __NetBSD__		/* XXX >32bit dma?  */
 	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
 	if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
 		/* 965gm cannot relocate objects above 4GiB. */
@@ -3759,6 +4180,7 @@ struct drm_i915_gem_object *i915_gem_all
 
 	mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	mapping_set_gfp_mask(mapping, mask);
+#endif
 
 	i915_gem_object_init(obj, &i915_gem_object_ops);
 
@@ -3821,8 +4243,10 @@ void i915_gem_free_object(struct drm_gem
 
 	BUG_ON(obj->pages);
 
+#ifndef __NetBSD__		/* XXX drm prime */
 	if (obj->base.import_attach)
 		drm_prime_gem_destroy(&obj->base, NULL);
+#endif
 
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3997,6 +4421,9 @@ cleanup_render_ring:
 static bool
 intel_enable_ppgtt(struct drm_device *dev)
 {
+#ifdef __NetBSD__		/* XXX ppgtt */
+	return false;
+#else
 	if (i915_enable_ppgtt >= 0)
 		return i915_enable_ppgtt;
 
@@ -4007,6 +4434,7 @@ intel_enable_ppgtt(struct drm_device *de
 #endif
 
 	return true;
+#endif
 }
 
 int i915_gem_init(struct drm_device *dev)
@@ -4183,7 +4611,11 @@ i915_gem_load(struct drm_device *dev)
 	i915_gem_reset_fences(dev);
 
 	i915_gem_detect_bit_6_swizzle(dev);
+#ifdef __NetBSD__
+	DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue, "i915flip");
+#else
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
+#endif
 
 	dev_priv->mm.interruptible = true;
 
@@ -4217,9 +4649,11 @@ static int i915_gem_init_phys_object(str
 		ret = -ENOMEM;
 		goto kfree_obj;
 	}
+#ifndef __NetBSD__		/* XXX x86 wc?  */
 #ifdef CONFIG_X86
 	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
 #endif
+#endif
 
 	dev_priv->mm.phys_objs[id - 1] = phys_obj;
 
@@ -4242,9 +4676,11 @@ static void i915_gem_free_phys_object(st
 		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
 	}
 
+#ifndef __NetBSD__		/* XXX x86 wb?  */
 #ifdef CONFIG_X86
 	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
 #endif
+#endif
 	drm_pci_free(dev, phys_obj->handle);
 	kfree(phys_obj);
 	dev_priv->mm.phys_objs[id - 1] = NULL;
@@ -4261,7 +4697,9 @@ void i915_gem_free_all_phys_object(struc
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_i915_gem_object *obj)
 {
+#ifndef __NetBSD__
 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+#endif
 	char *vaddr;
 	int i;
 	int page_count;
@@ -4272,6 +4710,36 @@ void i915_gem_detach_phys_object(struct 
 
 	page_count = obj->base.size / PAGE_SIZE;
 	for (i = 0; i < page_count; i++) {
+#ifdef __NetBSD__
+		/* XXX Just use ubc_uiomove?  */
+		struct pglist pages;
+		int error;
+
+		TAILQ_INIT(&pages);
+		error = uvm_obj_wirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
+		    (i+1)*PAGE_SIZE, &pages);
+		if (error) {
+			printf("unable to map page %d of i915 gem obj: %d\n",
+			    i, error);
+			continue;
+		}
+
+		KASSERT(!TAILQ_EMPTY(&pages));
+		struct vm_page *const page = TAILQ_FIRST(&pages);
+		TAILQ_REMOVE(&pages, page, pageq.queue);
+		KASSERT(TAILQ_EMPTY(&pages));
+
+		char *const dst = kmap_atomic(container_of(page, struct page,
+			p_vmp));
+		(void)memcpy(dst, vaddr + (i*PAGE_SIZE), PAGE_SIZE);
+		kunmap_atomic(dst);
+
+		drm_clflush_page(container_of(page, struct page, p_vmp));
+		page->flags &= ~PG_CLEAN;
+		/* XXX mark page accessed */
+		uvm_obj_unwirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
+		    (i+1)*PAGE_SIZE);
+#else
 		struct page *page = shmem_read_mapping_page(mapping, i);
 		if (!IS_ERR(page)) {
 			char *dst = kmap_atomic(page);
@@ -4284,6 +4752,7 @@ void i915_gem_detach_phys_object(struct 
 			mark_page_accessed(page);
 			page_cache_release(page);
 		}
+#endif
 	}
 	i915_gem_chipset_flush(dev);
 
@@ -4297,7 +4766,9 @@ i915_gem_attach_phys_object(struct drm_d
 			    int id,
 			    int align)
 {
+#ifndef __NetBSD__
 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+#endif
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret = 0;
 	int page_count;
@@ -4330,6 +4801,32 @@ i915_gem_attach_phys_object(struct drm_d
 	page_count = obj->base.size / PAGE_SIZE;
 
 	for (i = 0; i < page_count; i++) {
+#ifdef __NetBSD__
+		char *const vaddr = obj->phys_obj->handle->vaddr;
+		struct pglist pages;
+		int error;
+
+		TAILQ_INIT(&pages);
+		error = uvm_obj_wirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
+		    (i+1)*PAGE_SIZE, &pages);
+		if (error)
+			/* XXX errno NetBSD->Linux */
+			return -error;
+
+		KASSERT(!TAILQ_EMPTY(&pages));
+		struct vm_page *const page = TAILQ_FIRST(&pages);
+		TAILQ_REMOVE(&pages, page, pageq.queue);
+		KASSERT(TAILQ_EMPTY(&pages));
+
+		char *const src = kmap_atomic(container_of(page, struct page,
+			p_vmp));
+		(void)memcpy(vaddr + (i*PAGE_SIZE), src, PAGE_SIZE);
+		kunmap_atomic(src);
+
+		/* XXX mark page accessed */
+		uvm_obj_unwirepages(obj->base.gemo_shm_uao, i*PAGE_SIZE,
+		    (i+1)*PAGE_SIZE);
+#else
 		struct page *page;
 		char *dst, *src;
 
@@ -4344,6 +4841,7 @@ i915_gem_attach_phys_object(struct drm_d
 
 		mark_page_accessed(page);
 		page_cache_release(page);
+#endif
 	}
 
 	return 0;
@@ -4355,7 +4853,7 @@ i915_gem_phys_pwrite(struct drm_device *
 		     struct drm_i915_gem_pwrite *args,
 		     struct drm_file *file_priv)
 {
-	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
+	void *vaddr = (char *)obj->phys_obj->handle->vaddr + args->offset;
 	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
@@ -4397,6 +4895,7 @@ void i915_gem_release(struct drm_device 
 	spin_unlock(&file_priv->mm.lock);
 }
 
+#ifndef __NetBSD__		/* XXX */
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
 	if (!mutex_is_locked(mutex))
@@ -4409,10 +4908,14 @@ static bool mutex_is_locked_by(struct mu
 	return false;
 #endif
 }
+#endif
 
 static int
 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 {
+#ifdef __NetBSD__		/* XXX shrinkers */
+	return 0;
+#else
 	struct drm_i915_private *dev_priv =
 		container_of(shrinker,
 			     struct drm_i915_private,
@@ -4453,4 +4956,5 @@ i915_gem_inactive_shrink(struct shrinker
 	if (unlock)
 		mutex_unlock(&dev->struct_mutex);
 	return cnt;
+#endif
 }

Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c:1.1.1.1.2.3 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c:1.1.1.1.2.4
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c:1.1.1.1.2.3	Sun Sep  8 15:42:12 2013
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c	Sun Sep  8 15:52:20 2013
@@ -101,6 +101,19 @@ static inline int use_cpu_reloc(struct d
 		obj->cache_level != I915_CACHE_NONE);
 }
 
+#ifdef __NetBSD__
+#  define	__gtt_iomem
+#  define	__iomem	__gtt_iomem
+
+static inline void
+iowrite32(uint32_t value, uint32_t __acpi_iomem *ptr)
+{
+
+	__insn_barrier();
+	*ptr = value;
+}
+#endif
+
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_objects *eb,
@@ -190,9 +203,11 @@ i915_gem_execbuffer_relocate_entry(struc
 		return ret;
 	}
 
+#ifndef __NetBSD__              /* XXX atomic GEM reloc fast path */
 	/* We can't wait for rendering with pagefaults disabled */
 	if (obj->active && in_atomic())
 		return -EFAULT;
+#endif
 
 	reloc->delta += target_offset;
 	if (use_cpu_reloc(obj)) {
@@ -225,9 +240,13 @@ i915_gem_execbuffer_relocate_entry(struc
 		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 						      reloc->offset & PAGE_MASK);
 		reloc_entry = (uint32_t __iomem *)
-			(reloc_page + (reloc->offset & ~PAGE_MASK));
+			((char *)reloc_page + (reloc->offset & ~PAGE_MASK));
 		iowrite32(reloc->delta, reloc_entry);
+#ifdef __NetBSD__               /* XXX io mapping */
+                io_mapping_unmap_atomic(dev_priv->mm.gtt_mapping, reloc_page);
+#else
 		io_mapping_unmap_atomic(reloc_page);
+#endif
 	}
 
 	/* and update the user's relocation entry */
@@ -236,6 +255,12 @@ i915_gem_execbuffer_relocate_entry(struc
 	return 0;
 }
 
+#ifdef __NetBSD__
+#  undef	__gtt_iomem
+#  undef	__iomem
+#endif
+
+#ifndef __NetBSD__              /* XXX atomic GEM reloc fast path */
 static int
 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
 				    struct eb_objects *eb)
@@ -281,6 +306,7 @@ i915_gem_execbuffer_relocate_object(stru
 	return 0;
 #undef N_RELOC
 }
+#endif
 
 static int
 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
@@ -304,9 +330,14 @@ i915_gem_execbuffer_relocate(struct drm_
 			     struct eb_objects *eb,
 			     struct list_head *objects)
 {
+#ifndef __NetBSD__
 	struct drm_i915_gem_object *obj;
+#endif
 	int ret = 0;
 
+#ifdef __NetBSD__              /* XXX atomic GEM reloc fast path */
+        ret = -EFAULT;
+#else
 	/* This is the fast path and we cannot handle a pagefault whilst
 	 * holding the struct mutex lest the user pass in the relocations
 	 * contained within a mmaped bo. For in such a case we, the page
@@ -321,6 +352,7 @@ i915_gem_execbuffer_relocate(struct drm_
 			break;
 	}
 	pagefault_enable();
+#endif
 
 	return ret;
 }
@@ -826,8 +858,13 @@ i915_gem_do_execbuffer(struct drm_device
 
 	flags = 0;
 	if (args->flags & I915_EXEC_SECURE) {
+#ifdef __NetBSD__
+		if (!file->is_master || !DRM_SUSER())
+		    return -EPERM;
+#else
 		if (!file->is_master || !capable(CAP_SYS_ADMIN))
 		    return -EPERM;
+#endif
 
 		flags |= I915_DISPATCH_SECURE;
 	}

Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c:1.1.1.1.2.2 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c:1.1.1.1.2.3
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c:1.1.1.1.2.2	Tue Jul 23 21:28:22 2013
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c	Sun Sep  8 15:52:20 2013
@@ -472,13 +472,30 @@ i915_gem_swizzle_page(struct page *page)
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+#ifdef __NetBSD__
+	struct vm_page *page;
+#else
 	struct scatterlist *sg;
 	int page_count = obj->base.size >> PAGE_SHIFT;
+#endif
 	int i;
 
 	if (obj->bit_17 == NULL)
 		return;
 
+#ifdef __NetBSD__
+	i = 0;
+	TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+		unsigned char new_bit_17 = VM_PAGE_TO_PHYS(page) >> 17;
+		if ((new_bit_17 & 0x1) !=
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(container_of(page, struct page,
+				p_vmp));
+			page->flags &= ~PG_CLEAN;
+		}
+		i += 1;
+	}
+#else
 	for_each_sg(obj->pages->sgl, sg, page_count, i) {
 		struct page *page = sg_page(sg);
 		char new_bit_17 = page_to_phys(page) >> 17;
@@ -488,12 +505,17 @@ i915_gem_object_do_bit_17_swizzle(struct
 			set_page_dirty(page);
 		}
 	}
+#endif
 }
 
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
+#ifdef __NetBSD__
+	struct vm_page *page;
+#else
 	struct scatterlist *sg;
+#endif
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
@@ -507,6 +529,16 @@ i915_gem_object_save_bit_17_swizzle(stru
 		}
 	}
 
+#ifdef __NetBSD__
+	i = 0;
+	TAILQ_FOREACH(page, &obj->igo_pageq, pageq.queue) {
+		if (ISSET(VM_PAGE_TO_PHYS(page), __BIT(17)))
+			__set_bit(i, obj->bit_17);
+		else
+			__clear_bit(i, obj->bit_17);
+		i += 1;
+	}
+#else
 	for_each_sg(obj->pages->sgl, sg, page_count, i) {
 		struct page *page = sg_page(sg);
 		if (page_to_phys(page) & (1 << 17))
@@ -514,4 +546,5 @@ i915_gem_object_save_bit_17_swizzle(stru
 		else
 			__clear_bit(i, obj->bit_17);
 	}
+#endif
 }

Index: src/sys/modules/i915drm2/Makefile
diff -u src/sys/modules/i915drm2/Makefile:1.1.2.8 src/sys/modules/i915drm2/Makefile:1.1.2.9
--- src/sys/modules/i915drm2/Makefile:1.1.2.8	Wed Jul 24 03:52:13 2013
+++ src/sys/modules/i915drm2/Makefile	Sun Sep  8 15:52:20 2013
@@ -1,4 +1,4 @@
-# $NetBSD: Makefile,v 1.1.2.8 2013/07/24 03:52:13 riastradh Exp $
+# $NetBSD: Makefile,v 1.1.2.9 2013/09/08 15:52:20 riastradh Exp $
 
 .include "../Makefile.inc"
 .include "../drm2/Makefile.inc"
@@ -20,15 +20,15 @@ SRCS+=	dvo_tfp410.c
 #SRCS+=	i915_debugfs.c		# XXX No debugfs in NetBSD.
 SRCS+=	i915_dma.c
 SRCS+=	i915_drv.c
-SRCS+=	i915_gem.c		# XXX overridden for now
-#SRCS+=	i915_gem_context.c
-#SRCS+=	i915_gem_debug.c
+SRCS+=	i915_gem.c
+SRCS+=	i915_gem_context.c
+SRCS+=	i915_gem_debug.c
 #SRCS+=	i915_gem_dmabuf.c
-#SRCS+=	i915_gem_evict.c
-#SRCS+=	i915_gem_execbuffer.c
-#SRCS+=	i915_gem_gtt.c
-#SRCS+=	i915_gem_stolen.c
-#SRCS+=	i915_gem_tiling.c
+SRCS+=	i915_gem_evict.c
+SRCS+=	i915_gem_execbuffer.c
+SRCS+=	i915_gem_gtt.c
+SRCS+=	i915_gem_stolen.c
+SRCS+=	i915_gem_tiling.c
 #SRCS+=	i915_ioc32.c
 SRCS+=	i915_irq.c
 SRCS+=	i915_suspend.c
@@ -55,5 +55,6 @@ SRCS+=	intel_tv.c
 
 SRCS+=	i915_module.c
 SRCS+=	i915_pci.c
+SRCS+=	intel_gtt.c
 
 .include <bsd.kmodule.mk>

Added files:

Index: src/sys/external/bsd/drm2/i915drm/i915_gem_gtt.c
diff -u /dev/null src/sys/external/bsd/drm2/i915drm/i915_gem_gtt.c:1.1.2.1
--- /dev/null	Sun Sep  8 15:52:20 2013
+++ src/sys/external/bsd/drm2/i915drm/i915_gem_gtt.c	Sun Sep  8 15:52:20 2013
@@ -0,0 +1,432 @@
+/*	$NetBSD: i915_gem_gtt.c,v 1.1.2.1 2013/09/08 15:52:20 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2013 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.1.2.1 2013/09/08 15:52:20 riastradh Exp $");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kmem.h>
+#include <sys/systm.h>
+
+#include <dev/pci/pcivar.h>
+
+#include <drm/drmP.h>
+
+#include "i915_drv.h"
+
+static void	i915_gtt_color_adjust(struct drm_mm_node *, unsigned long,
+		    unsigned long *, unsigned long *);
+static void	i915_ggtt_clear_range(struct drm_device *, unsigned, unsigned);
+static void	gen6_ggtt_bind_object(struct drm_i915_gem_object *,
+		    enum i915_cache_level);
+static void	gen6_ggtt_clear_range(struct drm_device *, unsigned, unsigned);
+
+#define	SNB_GMCH_GGMS	(SNB_GMCH_GGMS_MASK << SNB_GMCH_GGMS_SHIFT)
+#define	SNB_GMCH_GMS	(SNB_GMCH_GMS_MASK << SNB_GMCH_GMS_SHIFT)
+#define	IVB_GMCH_GMS	(IVB_GMCH_GMS_MASK << IVB_GMCH_GMS_SHIFT)
+
+typedef uint32_t gtt_pte_t;
+
+#define	GEN6_PTE_VALID		__BIT(0)
+#define	GEN6_PTE_UNCACHED	__BIT(1)
+#define	HSW_PTE_UNCACHED	(0)
+#define	GEN6_PTE_CACHE_LLC	__BIT(2)
+#define	GEN6_PTE_CACHE_LLC_MLC	__BIT(3)
+
+static uint32_t
+gen6_pte_addr_encode(bus_addr_t addr)
+{
+	/* XXX KASSERT bounds?  Must be at most 36-bit, it seems.  */
+	return (addr | ((addr >> 28) & 0xff0));
+}
+
+static gtt_pte_t
+pte_encode(struct drm_device *dev, bus_addr_t addr,
+    enum i915_cache_level level)
+{
+	uint32_t flags = GEN6_PTE_VALID;
+
+	switch (level) {
+	case I915_CACHE_LLC_MLC:
+		flags |= (IS_HASWELL(dev)? GEN6_PTE_CACHE_LLC
+		    : GEN6_PTE_CACHE_LLC_MLC);
+		break;
+
+	case I915_CACHE_LLC:
+		flags |= GEN6_PTE_CACHE_LLC;
+		break;
+
+	case I915_CACHE_NONE:
+		flags |= (IS_HASWELL(dev)? HSW_PTE_UNCACHED
+		    : GEN6_PTE_UNCACHED);
+		break;
+
+	default:
+		panic("invalid i915 GTT cache level: %d", (int)level);
+		break;
+	}
+
+	return (gen6_pte_addr_encode(addr) | flags);
+}
+
+int
+i915_gem_gtt_init(struct drm_device *dev)
+{
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+	struct pci_attach_args *const pa = &dev->pdev->pd_pa;
+	struct intel_gtt *gtt;
+	uint16_t snb_gmch_ctl, ggms, gms;
+	int nsegs;
+	int ret;
+
+	if (INTEL_INFO(dev)->gen < 6) {
+		/* XXX gen<6 */
+		DRM_ERROR("device is too old for drm2 for now!\n");
+		return -ENODEV;
+	}
+
+	gtt = kmem_zalloc(sizeof(*gtt), KM_NOSLEEP);
+
+	/* XXX pci_set_dma_mask?  pci_set_consistent_dma_mask?  */
+	drm_limit_dma_space(dev, 0, 0x0000000fffffffffULL);
+
+	gtt->gma_bus_addr = dev->bus_maps[2].bm_base;
+
+	snb_gmch_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag, SNB_GMCH_CTRL);
+
+	/* GMS: Graphics Mode Select.  */
+	if (INTEL_INFO(dev)->gen < 7) {
+		gms = __SHIFTOUT(snb_gmch_ctl, SNB_GMCH_GMS);
+		gtt->stolen_size = (gms << 25);
+	} else {
+		gms = __SHIFTOUT(snb_gmch_ctl, IVB_GMCH_GMS);
+		static const unsigned sizes[] = {
+			0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352
+		};
+		gtt->stolen_size = sizes[gms] << 20;
+	}
+
+	/* GGMS: GTT Graphics Memory Size.  */
+	ggms = __SHIFTOUT(snb_gmch_ctl, SNB_GMCH_GGMS) << 20;
+	gtt->gtt_total_entries = (ggms << 20) / sizeof(gtt_pte_t);
+
+	gtt->gtt_mappable_entries = (dev->bus_maps[2].bm_size >> PAGE_SHIFT);
+	if (((gtt->gtt_mappable_entries >> 8) < 64) ||
+	    (gtt->gtt_total_entries < gtt->gtt_mappable_entries)) {
+		DRM_ERROR("unknown GMADR entries: %d\n",
+		    gtt->gtt_mappable_entries);
+		ret = -ENXIO;
+		goto fail0;
+	}
+
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamem_alloc(dev->dmat, PAGE_SIZE, PAGE_SIZE, 0,
+	    &gtt->gtt_scratch_seg, 1, &nsegs, 0);
+	if (ret)
+		goto fail0;
+	KASSERT(nsegs == 1);
+
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_create(dev->dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 0,
+	    &gtt->gtt_scratch_map);
+	if (ret)
+		goto fail1;
+
+	/* XXX errno NetBSD->Linux */
+	ret = -bus_dmamap_load_raw(dev->dmat, gtt->gtt_scratch_map,
+	    &gtt->gtt_scratch_seg, 1, PAGE_SIZE, BUS_DMA_NOCACHE);
+	if (ret)
+		goto fail2;
+
+	/* Linux sez:  For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+	if (dev->bus_maps[0].bm_size < (gtt->gtt_total_entries *
+		sizeof(gtt_pte_t))) {
+		DRM_ERROR("BAR0 too small for GTT: 0x%"PRIxMAX" < 0x%"PRIxMAX
+		    "\n",
+		    dev->bus_maps[0].bm_size,
+		    (gtt->gtt_total_entries * sizeof(gtt_pte_t)));
+		ret = -ENODEV;
+		goto fail3;
+	}
+	if (bus_space_map(dev->bst, (dev->bus_maps[0].bm_base + (2<<20)),
+		(gtt->gtt_total_entries * sizeof(gtt_pte_t)),
+		0,
+		&gtt->gtt_bsh)) {
+		DRM_ERROR("unable to map GTT\n");
+		ret = -ENODEV;
+		goto fail3;
+	}
+
+	DRM_INFO("Memory usable by graphics device = %dM\n",
+	    gtt->gtt_total_entries >> 8);
+	DRM_DEBUG_DRIVER("GMADR size = %dM\n", gtt->gtt_mappable_entries >> 8);
+	DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", gtt->stolen_size >> 20);
+
+	/* Success!  */
+	dev_priv->mm.gtt = gtt;
+	return 0;
+
+fail3:	bus_dmamap_unload(dev->dmat, gtt->gtt_scratch_map);
+fail2:	bus_dmamap_destroy(dev->dmat, gtt->gtt_scratch_map);
+fail1:	bus_dmamem_free(dev->dmat, &gtt->gtt_scratch_seg, 1);
+fail0:	kmem_free(gtt, sizeof(*gtt));
+	return ret;
+}
+
+void
+i915_gem_gtt_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+	struct intel_gtt *const gtt = dev_priv->mm.gtt;
+
+	bus_space_unmap(dev->bst, gtt->gtt_bsh,
+	    (gtt->gtt_total_entries * sizeof(gtt_pte_t)));
+	bus_dmamap_unload(dev->dmat, gtt->gtt_scratch_map);
+	bus_dmamap_destroy(dev->dmat, gtt->gtt_scratch_map);
+	bus_dmamem_free(dev->dmat, &gtt->gtt_scratch_seg, 1);
+	kmem_free(gtt, sizeof(*gtt));
+}
+
+void
+i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
+    unsigned long mappable_end, unsigned long end)
+{
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+
+	KASSERT(start <= end);
+	KASSERT(start <= mappable_end);
+	KASSERT(PAGE_SIZE <= (end - start));
+	drm_mm_init(&dev_priv->mm.gtt_space, start, (end - start - PAGE_SIZE));
+	if (!HAS_LLC(dev))
+		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+
+	dev_priv->mm.gtt_start = start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
+	dev_priv->mm.gtt_end = end;
+	dev_priv->mm.gtt_total = (end - start);
+	dev_priv->mm.mappable_gtt_total = (MIN(end, mappable_end) - start);
+
+	i915_ggtt_clear_range(dev, (start >> PAGE_SHIFT),
+	    ((end - start) >> PAGE_SHIFT));
+}
+
+static void
+i915_gtt_color_adjust(struct drm_mm_node *node, unsigned long color,
+    unsigned long *start, unsigned long *end)
+{
+
+	if (node->color != color)
+		*start += PAGE_SIZE;
+	if (list_empty(&node->node_list))
+		return;
+	node = list_entry(node->node_list.next, struct drm_mm_node, node_list);
+	if (node->allocated && (node->color != color))
+		*end -= PAGE_SIZE;
+}
+
+void
+i915_gem_init_ppgtt(struct drm_device *dev __unused)
+{
+}
+
+int
+i915_gem_init_aliasing_ppgtt(struct drm_device *dev __unused)
+{
+	return -ENODEV;
+}
+
+void
+i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev __unused)
+{
+}
+
+void
+i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt __unused,
+    struct drm_i915_gem_object *obj __unused,
+    enum i915_cache_level cache_level __unused)
+{
+	panic("%s: not implemented", __func__);
+}
+
+void
+i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt __unused,
+    struct drm_i915_gem_object *obj __unused)
+{
+	panic("%s: not implemented", __func__);
+}
+
+int
+i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *const dev = obj->base.dev;
+	int error;
+
+	if (obj->has_dma_mapping)
+		return 0;
+
+	/* XXX errno NetBSD->Linux */
+	error = -bus_dmamap_load_raw(dev->dmat, obj->igo_dmamap, obj->pages,
+	    obj->igo_nsegs, obj->base.size, BUS_DMA_NOWAIT);
+	if (error)
+		goto fail0;
+
+	/* Success!  */
+	return 0;
+
+fail0:	return error;
+}
+
+void
+i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+    enum i915_cache_level cache_level)
+{
+
+	KASSERT(6 < INTEL_INFO(obj->base.dev)->gen); /* XXX gen<6 */
+	gen6_ggtt_bind_object(obj, cache_level);
+	obj->has_global_gtt_mapping = 1;
+}
+
+void
+i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+
+	i915_ggtt_clear_range(obj->base.dev,
+	    (obj->gtt_space->start >> PAGE_SHIFT),
+	    (obj->base.size >> PAGE_SHIFT));
+	obj->has_global_gtt_mapping = 0;
+}
+
+void
+i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *const dev = obj->base.dev;
+
+	/* XXX Idle, for gen<6.  */
+
+	if (obj->has_dma_mapping)
+		return;
+
+	bus_dmamap_unload(dev->dmat, obj->igo_dmamap);
+}
+
+void
+i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	i915_ggtt_clear_range(dev, (dev_priv->mm.gtt_start >> PAGE_SHIFT),
+	    ((dev_priv->mm.gtt_start - dev_priv->mm.gtt_end) >> PAGE_SHIFT));
+
+	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
+		i915_gem_clflush_object(obj);
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+	}
+
+	i915_gem_chipset_flush(dev);
+}
+
+static void
+i915_ggtt_clear_range(struct drm_device *dev, unsigned start_page,
+    unsigned npages)
+{
+
+	KASSERT(6 <= INTEL_INFO(dev)->gen);
+
+	gen6_ggtt_clear_range(dev, start_page, npages);
+}
+
+static void
+gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+    enum i915_cache_level cache_level)
+{
+	struct drm_device *const dev = obj->base.dev;
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+	const bus_space_tag_t bst = dev->bst;
+	const bus_space_handle_t bsh = dev_priv->mm.gtt->gtt_bsh;
+	const unsigned first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+	bus_addr_t addr;
+	bus_size_t len;
+	unsigned int seg, i = 0;
+
+	for (seg = 0; seg < obj->igo_dmamap->dm_nsegs; seg++) {
+		addr = obj->igo_dmamap->dm_segs[seg].ds_addr;
+		len = obj->igo_dmamap->dm_segs[seg].ds_len;
+		do {
+			KASSERT(PAGE_SIZE <= len);
+			bus_space_write_4(bst, bsh, 4*(first_entry + i),
+			    pte_encode(dev, addr, cache_level));
+			addr += PAGE_SIZE;
+			len -= PAGE_SIZE;
+			i += 1;
+		} while (0 < len);
+	}
+
+	KASSERT(first_entry <= dev_priv->mm.gtt->gtt_total_entries);
+	KASSERT(i <= (dev_priv->mm.gtt->gtt_total_entries - first_entry));
+	KASSERT(i == (obj->base.size >> PAGE_SHIFT));
+
+	/* XXX Why could i ever be zero?  */
+	if (0 < i) {
+		/* Posting read and sanity check.  */
+		/* XXX Shouldn't there be a bus_space_sync?  */
+		const uint32_t expected = pte_encode(dev, addr, cache_level);
+		const uint32_t actual = bus_space_read_4(bst, bsh,
+		    (first_entry + (4*(i-1))));
+		if (actual != expected)
+			aprint_error_dev(dev->dev, "mismatched PTE"
+			    ": 0x%"PRIxMAX" != 0x%"PRIxMAX"\n",
+			    (uintmax_t)actual, (uintmax_t)expected);
+	}
+
+	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+	POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+static void
+gen6_ggtt_clear_range(struct drm_device *dev, unsigned start_page,
+    unsigned npages)
+{
+	struct drm_i915_private *const dev_priv = dev->dev_private;
+	const bus_space_tag_t bst = dev->bst;
+	const bus_space_handle_t bsh = dev_priv->mm.gtt->gtt_bsh;
+	const unsigned n = (dev_priv->mm.gtt->gtt_total_entries - start_page);
+	const gtt_pte_t scratch_pte = pte_encode(dev,
+	    dev_priv->mm.gtt->gtt_scratch_map->dm_segs[0].ds_addr,
+	    I915_CACHE_LLC);
+	unsigned int i;
+
+	for (i = 0; i < n; i++)
+		bus_space_write_4(bst, bsh, 4*(start_page + i), scratch_pte);
+	bus_space_read_4(bst, bsh, 4*start_page);
+}
Index: src/sys/external/bsd/drm2/i915drm/intel_gtt.c
diff -u /dev/null src/sys/external/bsd/drm2/i915drm/intel_gtt.c:1.1.2.1
--- /dev/null	Sun Sep  8 15:52:20 2013
+++ src/sys/external/bsd/drm2/i915drm/intel_gtt.c	Sun Sep  8 15:52:20 2013
@@ -0,0 +1,48 @@
+/*	$NetBSD: intel_gtt.c,v 1.1.2.1 2013/09/08 15:52:20 riastradh Exp $	*/
+
+/*-
+ * Copyright (c) 2013 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Taylor R. Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Intel GTT stubs */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: intel_gtt.c,v 1.1.2.1 2013/09/08 15:52:20 riastradh Exp $");
+
+#include "drm/intel-gtt.h"
+
+bool
+intel_enable_gtt(void)
+{
+	return false;
+}
+
+void
+intel_gtt_chipset_flush(void)
+{
+}

Reply via email to