Author: pebender
Date: Sat Apr 25 17:59:25 2009
New Revision: 4662

Added:
     
trunk/gar-minimyth/script/kernel-2.6.29/linux/files/linux-2.6.29.1-intel_2009q1.patch
Modified:
    trunk/gar-minimyth/script/kernel-2.6.29/linux/Makefile
    trunk/gar-minimyth/script/kernel-2.6.29/linux/checksums

Log:
- Patched the 2.6.29 kernel with the Intel Linux Graphics 2009Q1 patch.



Modified: trunk/gar-minimyth/script/kernel-2.6.29/linux/Makefile
==============================================================================
--- trunk/gar-minimyth/script/kernel-2.6.29/linux/Makefile      (original)
+++ trunk/gar-minimyth/script/kernel-2.6.29/linux/Makefile      Sat Apr 25  
17:59:25 2009
@@ -6,6 +6,7 @@
  CONFIGFILE = $(DISTNAME)-$(GARCH_FAMILY)-$(GARCH).config
  DISTFILES = $(DISTNAME).tar.bz2 $(CONFIGFILE)
  PATCHFILES  = $(DISTNAME)-perl.patch.gar
+PATCHFILES += $(DISTNAME)-intel_2009q1.patch
  PATCHFILES += $(DISTNAME)-bug_11154.patch
  PATCHFILES += $(DISTNAME)-bttv_lirc.patch
  PATCHFILES += $(DISTNAME)-unionfs_2.5.1.patch

Modified: trunk/gar-minimyth/script/kernel-2.6.29/linux/checksums
==============================================================================
--- trunk/gar-minimyth/script/kernel-2.6.29/linux/checksums     (original)
+++ trunk/gar-minimyth/script/kernel-2.6.29/linux/checksums     Sat Apr 25  
17:59:25 2009
@@ -1,5 +1,6 @@
  4ada43caecb08fe2af71b416b6f586d8  download/linux-2.6.29.1.tar.bz2
  fa49c46c3825660ffb1598a67470b1cb  download/linux-2.6.29.1-perl.patch.gar
+a7c173b189eb35aef3698140e84c6df6   
download/linux-2.6.29.1-intel_2009q1.patch
  30630fd3c470ff32067797152bb3ab08  download/linux-2.6.29.1-bug_11154.patch
  80c6af64e0c8f0231b157ec4e6255e78  download/linux-2.6.29.1-bttv_lirc.patch
  79b640d592459762af3c673127a5e1be   
download/linux-2.6.29.1-unionfs_2.5.1.patch

Added:  
trunk/gar-minimyth/script/kernel-2.6.29/linux/files/linux-2.6.29.1-intel_2009q1.patch
==============================================================================
--- (empty file)
+++  
trunk/gar-minimyth/script/kernel-2.6.29/linux/files/linux-2.6.29.1-intel_2009q1.patch
    
Sat Apr 25 17:59:25 2009
@@ -0,0 +1,1561 @@
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/drm_gem.c  
linux-2.6.29.1-new/drivers/gpu/drm/drm_gem.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/drm_gem.c       2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/drm_gem.c       2009-04-25  
06:29:46.000000000 -0700
+@@ -505,7 +505,6 @@
+       struct drm_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+-      unsigned long prot;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+@@ -538,11 +537,7 @@
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       /* FIXME: use pgprot_writecombine when available */
+-      prot = pgprot_val(vma->vm_page_prot);
+-#ifdef CONFIG_X86
+-      prot |= _PAGE_CACHE_WC;
+-#endif
+-      vma->vm_page_prot = __pgprot(prot);
++      vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_dma.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_dma.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_dma.c 2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_dma.c 2009-04-25  
06:29:46.000000000 -0700
+@@ -41,7 +41,6 @@
+ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_i915_master_private *master_priv =  
dev->primary->master->driver_priv;
+       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+       u32 last_acthd = I915_READ(acthd_reg);
+@@ -58,8 +57,12 @@
+               if (ring->space >= n)
+                       return 0;
+
+-              if (master_priv->sarea_priv)
+-                      master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++              if (dev->primary->master) {
++                      struct drm_i915_master_private *master_priv =  
dev->primary->master->driver_priv;
++                      if (master_priv->sarea_priv)
++                              master_priv->sarea_priv->perf_boxes |= 
I915_BOX_WAIT;
++              }
++
+
+               if (ring->head != last_head)
+                       i = 0;
+@@ -356,7 +359,7 @@
+       return ret;
+ }
+
+-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,  
int dwords)
++static int i915_emit_cmds(struct drm_device * dev, int *buffer, int  
dwords)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+@@ -370,8 +373,7 @@
+       for (i = 0; i < dwords;) {
+               int cmd, sz;
+
+-              if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+-                      return -EINVAL;
++              cmd = buffer[i];
+
+               if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+                       return -EINVAL;
+@@ -379,11 +381,7 @@
+               OUT_RING(cmd);
+
+               while (++i, --sz) {
+-                      if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+-                                                       sizeof(cmd))) {
+-                              return -EINVAL;
+-                      }
+-                      OUT_RING(cmd);
++                      OUT_RING(buffer[i]);
+               }
+       }
+
+@@ -397,17 +395,13 @@
+
+ int
+ i915_emit_box(struct drm_device *dev,
+-            struct drm_clip_rect __user *boxes,
++            struct drm_clip_rect *boxes,
+             int i, int DR1, int DR4)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect box;
++      struct drm_clip_rect box = boxes[i];
+       RING_LOCALS;
+
+-      if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+-              return -EFAULT;
+-      }
+-
+       if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) 
{
+               DRM_ERROR("Bad box %d,%d..%d,%d\n",
+                         box.x1, box.y1, box.x2, box.y2);
+@@ -460,7 +454,9 @@
+ }
+
+ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+-                                 drm_i915_cmdbuffer_t * cmd)
++                                 drm_i915_cmdbuffer_t *cmd,
++                                 struct drm_clip_rect *cliprects,
++                                 void *cmdbuf)
+ {
+       int nbox = cmd->num_cliprects;
+       int i = 0, count, ret;
+@@ -476,13 +472,13 @@
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      ret = i915_emit_box(dev, cmd->cliprects, i,
++                      ret = i915_emit_box(dev, cliprects, i,
+                                           cmd->DR1, cmd->DR4);
+                       if (ret)
+                               return ret;
+               }
+
+-              ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
++              ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+               if (ret)
+                       return ret;
+       }
+@@ -492,10 +488,10 @@
+ }
+
+ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+-                                   drm_i915_batchbuffer_t * batch)
++                                   drm_i915_batchbuffer_t * batch,
++                                   struct drm_clip_rect *cliprects)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect __user *boxes = batch->cliprects;
+       int nbox = batch->num_cliprects;
+       int i = 0, count;
+       RING_LOCALS;
+@@ -511,7 +507,7 @@
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      int ret = i915_emit_box(dev, boxes, i,
++                      int ret = i915_emit_box(dev, cliprects, i,
+                                               batch->DR1, batch->DR4);
+                       if (ret)
+                               return ret;
+@@ -626,6 +622,7 @@
+           master_priv->sarea_priv;
+       drm_i915_batchbuffer_t *batch = data;
+       int ret;
++      struct drm_clip_rect *cliprects = NULL;
+
+       if (!dev_priv->allow_batchbuffer) {
+               DRM_ERROR("Batchbuffer ioctl disabled\n");
+@@ -637,17 +634,35 @@
+
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+-      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+-                                                     batch->num_cliprects *
+-                                                     sizeof(struct 
drm_clip_rect)))
+-              return -EFAULT;
++      if (batch->num_cliprects < 0)
++              return -EINVAL;
++
++      if (batch->num_cliprects) {
++              cliprects = drm_calloc(batch->num_cliprects,
++                                     sizeof(struct drm_clip_rect),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      return -ENOMEM;
++
++              ret = copy_from_user(cliprects, batch->cliprects,
++                                   batch->num_cliprects *
++                                   sizeof(struct drm_clip_rect));
++              if (ret != 0)
++                      goto fail_free;
++      }
+
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_dispatch_batchbuffer(dev, batch);
++      ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+       mutex_unlock(&dev->struct_mutex);
+
+       if (sarea_priv)
+               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++
++fail_free:
++      drm_free(cliprects,
++               batch->num_cliprects * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
+       return ret;
+ }
+
+@@ -659,6 +674,8 @@
+       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+           master_priv->sarea_priv;
+       drm_i915_cmdbuffer_t *cmdbuf = data;
++      struct drm_clip_rect *cliprects = NULL;
++      void *batch_data;
+       int ret;
+
+       DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+@@ -666,25 +683,50 @@
+
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+-      if (cmdbuf->num_cliprects &&
+-          DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+-                              cmdbuf->num_cliprects *
+-                              sizeof(struct drm_clip_rect))) {
+-              DRM_ERROR("Fault accessing cliprects\n");
+-              return -EFAULT;
++      if (cmdbuf->num_cliprects < 0)
++              return -EINVAL;
++
++      batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
++      if (batch_data == NULL)
++              return -ENOMEM;
++
++      ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
++      if (ret != 0)
++              goto fail_batch_free;
++
++      if (cmdbuf->num_cliprects) {
++              cliprects = drm_calloc(cmdbuf->num_cliprects,
++                                     sizeof(struct drm_clip_rect),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      goto fail_batch_free;
++
++              ret = copy_from_user(cliprects, cmdbuf->cliprects,
++                                   cmdbuf->num_cliprects *
++                                   sizeof(struct drm_clip_rect));
++              if (ret != 0)
++                      goto fail_clip_free;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
++      ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret) {
+               DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+-              return ret;
++              goto fail_batch_free;
+       }
+
+       if (sarea_priv)
+               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+-      return 0;
++
++fail_batch_free:
++      drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
++fail_clip_free:
++      drm_free(cliprects,
++               cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
++      return ret;
+ }
+
+ static int i915_flip_bufs(struct drm_device *dev, void *data,
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_drv.h  
linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_drv.h
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_drv.h 2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_drv.h 2009-04-25  
06:29:46.000000000 -0700
+@@ -404,7 +404,8 @@
+       /** AGP memory structure for our GTT binding. */
+       DRM_AGP_MEM *agp_mem;
+
+-      struct page **page_list;
++      struct page **pages;
++      int pages_refcount;
+
+       /**
+        * Current offset of the object in GTT space.
+@@ -519,7 +520,7 @@
+ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+                             unsigned long arg);
+ extern int i915_emit_box(struct drm_device *dev,
+-                       struct drm_clip_rect __user *boxes,
++                       struct drm_clip_rect *boxes,
+                        int i, int DR1, int DR4);
+
+ /* i915_irq.c */
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_gem.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_gem.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_gem.c 2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_gem.c 2009-04-25  
06:29:46.000000000 -0700
+@@ -43,8 +43,8 @@
+                                                    uint64_t offset,
+                                                    uint64_t size);
+ static void i915_gem_object_set_to_full_cpu_read_domain(struct  
drm_gem_object *obj);
+-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
++static int i915_gem_object_get_pages(struct drm_gem_object *obj);
++static void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+                                          unsigned alignment);
+@@ -136,6 +136,224 @@
+       return 0;
+ }
+
++static inline int
++fast_shmem_read(struct page **pages,
++              loff_t page_base, int page_offset,
++              char __user *data,
++              int length)
++{
++      char __iomem *vaddr;
++      int ret;
++
++      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
++      if (vaddr == NULL)
++              return -ENOMEM;
++      ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
++      kunmap_atomic(vaddr, KM_USER0);
++
++      return ret;
++}
++
++static inline int
++slow_shmem_copy(struct page *dst_page,
++              int dst_offset,
++              struct page *src_page,
++              int src_offset,
++              int length)
++{
++      char *dst_vaddr, *src_vaddr;
++
++      dst_vaddr = kmap_atomic(dst_page, KM_USER0);
++      if (dst_vaddr == NULL)
++              return -ENOMEM;
++
++      src_vaddr = kmap_atomic(src_page, KM_USER1);
++      if (src_vaddr == NULL) {
++              kunmap_atomic(dst_vaddr, KM_USER0);
++              return -ENOMEM;
++      }
++
++      memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
++
++      kunmap_atomic(src_vaddr, KM_USER1);
++      kunmap_atomic(dst_vaddr, KM_USER0);
++
++      return 0;
++}
++
++/**
++ * This is the fast shmem pread path, which attempts to copy_from_user  
directly
++ * from the backing pages of the object to the user's address space.  On a
++ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
++ */
++static int
++i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object  
*obj,
++                        struct drm_i915_gem_pread *args,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset, page_base;
++      char __user *user_data;
++      int page_offset, page_length;
++      int ret;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
++                                                      args->size);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * page_base = page offset within aperture
++               * page_offset = offset within page
++               * page_length = bytes to copy for this page
++               */
++              page_base = (offset & ~(PAGE_SIZE-1));
++              page_offset = offset & (PAGE_SIZE-1);
++              page_length = remain;
++              if ((page_offset + remain) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - page_offset;
++
++              ret = fast_shmem_read(obj_priv->pages,
++                                    page_base, page_offset,
++                                    user_data, page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              user_data += page_length;
++              offset += page_length;
++      }
++
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * This is the fallback shmem pread path, which allocates temporary  
storage
++ * in kernel space to copy_to_user into outside of the struct_mutex, so we
++ * can copy out of the object's backing pages while holding the struct  
mutex
++ * and not take page faults.
++ */
++static int
++i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object  
*obj,
++                        struct drm_i915_gem_pread *args,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct mm_struct *mm = current->mm;
++      struct page **user_pages;
++      ssize_t remain;
++      loff_t offset, pinned_pages, i;
++      loff_t first_data_page, last_data_page, num_pages;
++      int shmem_page_index, shmem_page_offset;
++      int data_page_index,  data_page_offset;
++      int page_length;
++      int ret;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, yet we want to hold it while
++       * dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto fail_put_user_pages;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
++                                                      args->size);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * shmem_page_index = page number within shmem file
++               * shmem_page_offset = offset within page in shmem file
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              shmem_page_index = offset / PAGE_SIZE;
++              shmem_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((shmem_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - shmem_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_shmem_copy(user_pages[data_page_index],
++                                    data_page_offset,
++                                    obj_priv->pages[shmem_page_index],
++                                    shmem_page_offset,
++                                    page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              data_ptr += page_length;
++              offset += page_length;
++      }
++
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++fail_put_user_pages:
++      for (i = 0; i < pinned_pages; i++) {
++              SetPageDirty(user_pages[i]);
++              page_cache_release(user_pages[i]);
++      }
++      kfree(user_pages);
++
++      return ret;
++}
++
+ /**
+  * Reads data from the object referenced by handle.
+  *
+@@ -148,8 +366,6 @@
+       struct drm_i915_gem_pread *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+-      ssize_t read;
+-      loff_t offset;
+       int ret;
+
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+@@ -167,33 +383,13 @@
+               return -EINVAL;
+       }
+
+-      mutex_lock(&dev->struct_mutex);
+-
+-      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+-                                                      args->size);
+-      if (ret != 0) {
+-              drm_gem_object_unreference(obj);
+-              mutex_unlock(&dev->struct_mutex);
+-              return ret;
+-      }
+-
+-      offset = args->offset;
+-
+-      read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+-                      args->size, &offset);
+-      if (read != args->size) {
+-              drm_gem_object_unreference(obj);
+-              mutex_unlock(&dev->struct_mutex);
+-              if (read < 0)
+-                      return read;
+-              else
+-                      return -EINVAL;
+-      }
++      ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
++      if (ret != 0)
++              ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+
+       drm_gem_object_unreference(obj);
+-      mutex_unlock(&dev->struct_mutex);
+
+-      return 0;
++      return ret;
+ }
+
+ /* This is the fast write path which cannot handle
+@@ -223,29 +419,54 @@
+  */
+
+ static inline int
+-slow_user_write(struct io_mapping *mapping,
+-              loff_t page_base, int page_offset,
+-              char __user *user_data,
+-              int length)
++slow_kernel_write(struct io_mapping *mapping,
++                loff_t gtt_base, int gtt_offset,
++                struct page *user_page, int user_offset,
++                int length)
++{
++      char *src_vaddr, *dst_vaddr;
++      unsigned long unwritten;
++
++      dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
++      src_vaddr = kmap_atomic(user_page, KM_USER1);
++      unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
++                                                    src_vaddr + user_offset,
++                                                    length);
++      kunmap_atomic(src_vaddr, KM_USER1);
++      io_mapping_unmap_atomic(dst_vaddr);
++      if (unwritten)
++              return -EFAULT;
++      return 0;
++}
++
++static inline int
++fast_shmem_write(struct page **pages,
++               loff_t page_base, int page_offset,
++               char __user *data,
++               int length)
+ {
+       char __iomem *vaddr;
+       unsigned long unwritten;
+
+-      vaddr = io_mapping_map_wc(mapping, page_base);
++      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+-              return -EFAULT;
+-      unwritten = __copy_from_user(vaddr + page_offset,
+-                                   user_data, length);
+-      io_mapping_unmap(vaddr);
++              return -ENOMEM;
++      unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, 
length);
++      kunmap_atomic(vaddr, KM_USER0);
++
+       if (unwritten)
+               return -EFAULT;
+       return 0;
+ }
+
++/**
++ * This is the fast pwrite path, where we copy the data directly from the
++ * user into the GTT, uncached.
++ */
+ static int
+-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+-                  struct drm_i915_gem_pwrite *args,
+-                  struct drm_file *file_priv)
++i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object  
*obj,
++                       struct drm_i915_gem_pwrite *args,
++                       struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -273,7 +494,6 @@
+
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+-      obj_priv->dirty = 1;
+
+       while (remain > 0) {
+               /* Operation in this page
+@@ -292,16 +512,11 @@
+                                      page_offset, user_data, page_length);
+
+               /* If we get a fault while copying data, then (presumably) our
+-               * source page isn't available. In this case, use the
+-               * non-atomic function
++               * source page isn't available.  Return the error and we'll
++               * retry in the slow path.
+                */
+-              if (ret) {
+-                      ret = slow_user_write (dev_priv->mm.gtt_mapping,
+-                                             page_base, page_offset,
+-                                             user_data, page_length);
+-                      if (ret)
+-                              goto fail;
+-              }
++              if (ret)
++                      goto fail;
+
+               remain -= page_length;
+               user_data += page_length;
+@@ -315,39 +530,284 @@
+       return ret;
+ }
+
++/**
++ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
++ * the memory and maps it using kmap_atomic for copying.
++ *
++ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
++ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
++ */
+ static int
+-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+-                    struct drm_i915_gem_pwrite *args,
+-                    struct drm_file *file_priv)
++i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object  
*obj,
++                       struct drm_i915_gem_pwrite *args,
++                       struct drm_file *file_priv)
+ {
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      ssize_t remain;
++      loff_t gtt_page_base, offset;
++      loff_t first_data_page, last_data_page, num_pages;
++      loff_t pinned_pages, i;
++      struct page **user_pages;
++      struct mm_struct *mm = current->mm;
++      int gtt_page_offset, data_page_offset, data_page_index, page_length;
+       int ret;
+-      loff_t offset;
+-      ssize_t written;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, and all of the pwrite implementations
++       * want to hold it while dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto out_unpin_pages;
++      }
+
+       mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_object_pin(obj, 0);
++      if (ret)
++              goto out_unlock;
++
++      ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++      if (ret)
++              goto out_unpin_object;
++
++      obj_priv = obj->driver_private;
++      offset = obj_priv->gtt_offset + args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * gtt_page_base = page offset within aperture
++               * gtt_page_offset = offset within page in aperture
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              gtt_page_base = offset & PAGE_MASK;
++              gtt_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((gtt_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - gtt_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
++                                      gtt_page_base, gtt_page_offset,
++                                      user_pages[data_page_index],
++                                      data_page_offset,
++                                      page_length);
++
++              /* If we get a fault while copying data, then (presumably) our
++               * source page isn't available.  Return the error and we'll
++               * retry in the slow path.
++               */
++              if (ret)
++                      goto out_unpin_object;
++
++              remain -= page_length;
++              offset += page_length;
++              data_ptr += page_length;
++      }
++
++out_unpin_object:
++      i915_gem_object_unpin(obj);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++out_unpin_pages:
++      for (i = 0; i < pinned_pages; i++)
++              page_cache_release(user_pages[i]);
++      kfree(user_pages);
++
++      return ret;
++}
++
++/**
++ * This is the fast shmem pwrite path, which attempts to directly
++ * copy_from_user into the kmapped pages backing the object.
++ */
++static int
++i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object  
*obj,
++                         struct drm_i915_gem_pwrite *args,
++                         struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset, page_base;
++      char __user *user_data;
++      int page_offset, page_length;
++      int ret;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
+
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-      if (ret) {
+-              mutex_unlock(&dev->struct_mutex);
+-              return ret;
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++      obj_priv->dirty = 1;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * page_base = page offset within aperture
++               * page_offset = offset within page
++               * page_length = bytes to copy for this page
++               */
++              page_base = (offset & ~(PAGE_SIZE-1));
++              page_offset = offset & (PAGE_SIZE-1);
++              page_length = remain;
++              if ((page_offset + remain) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - page_offset;
++
++              ret = fast_shmem_write(obj_priv->pages,
++                                     page_base, page_offset,
++                                     user_data, page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              user_data += page_length;
++              offset += page_length;
+       }
+
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * This is the fallback shmem pwrite path, which uses get_user_pages to  
pin
++ * the memory and maps it using kmap_atomic for copying.
++ *
++ * This avoids taking mmap_sem for faulting on the user's address while  
the
++ * struct_mutex is held.
++ */
++static int
++i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object  
*obj,
++                         struct drm_i915_gem_pwrite *args,
++                         struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct mm_struct *mm = current->mm;
++      struct page **user_pages;
++      ssize_t remain;
++      loff_t offset, pinned_pages, i;
++      loff_t first_data_page, last_data_page, num_pages;
++      int shmem_page_index, shmem_page_offset;
++      int data_page_index,  data_page_offset;
++      int page_length;
++      int ret;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, and all of the pwrite implementations
++       * want to hold it while dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto fail_put_user_pages;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_to_cpu_domain(obj, 1);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
+       offset = args->offset;
++      obj_priv->dirty = 1;
+
+-      written = vfs_write(obj->filp,
+-                          (char __user *)(uintptr_t) args->data_ptr,
+-                          args->size, &offset);
+-      if (written != args->size) {
+-              mutex_unlock(&dev->struct_mutex);
+-              if (written < 0)
+-                      return written;
+-              else
+-                      return -EINVAL;
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * shmem_page_index = page number within shmem file
++               * shmem_page_offset = offset within page in shmem file
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              shmem_page_index = offset / PAGE_SIZE;
++              shmem_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((shmem_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - shmem_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
++                                    shmem_page_offset,
++                                    user_pages[data_page_index],
++                                    data_page_offset,
++                                    page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              data_ptr += page_length;
++              offset += page_length;
+       }
+
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
++fail_put_user_pages:
++      for (i = 0; i < pinned_pages; i++)
++              page_cache_release(user_pages[i]);
++      kfree(user_pages);
+
+-      return 0;
++      return ret;
+ }
+
+ /**
+@@ -388,10 +848,19 @@
+       if (obj_priv->phys_obj)
+               ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+       else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+-               dev->gtt_total != 0)
+-              ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+-      else
+-              ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
++               dev->gtt_total != 0) {
++              ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
++              if (ret == -EFAULT) {
++                      ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
++                                                     file_priv);
++              }
++      } else {
++              ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
++              if (ret == -EFAULT) {
++                      ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
++                                                       file_priv);
++              }
++      }
+
+ #if WATCH_PWRITE
+       if (ret)
+@@ -603,6 +1072,7 @@
+       case -EAGAIN:
+               return VM_FAULT_OOM;
+       case -EFAULT:
++      case -EINVAL:
+               return VM_FAULT_SIGBUS;
+       default:
+               return VM_FAULT_NOPAGE;
+@@ -816,29 +1286,30 @@
+ }
+
+ static void
+-i915_gem_object_free_page_list(struct drm_gem_object *obj)
++i915_gem_object_put_pages(struct drm_gem_object *obj)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count = obj->size / PAGE_SIZE;
+       int i;
+
+-      if (obj_priv->page_list == NULL)
+-              return;
++      BUG_ON(obj_priv->pages_refcount == 0);
+
++      if (--obj_priv->pages_refcount != 0)
++              return;
+
+       for (i = 0; i < page_count; i++)
+-              if (obj_priv->page_list[i] != NULL) {
++              if (obj_priv->pages[i] != NULL) {
+                       if (obj_priv->dirty)
+-                              set_page_dirty(obj_priv->page_list[i]);
+-                      mark_page_accessed(obj_priv->page_list[i]);
+-                      page_cache_release(obj_priv->page_list[i]);
++                              set_page_dirty(obj_priv->pages[i]);
++                      mark_page_accessed(obj_priv->pages[i]);
++                      page_cache_release(obj_priv->pages[i]);
+               }
+       obj_priv->dirty = 0;
+
+-      drm_free(obj_priv->page_list,
++      drm_free(obj_priv->pages,
+                page_count * sizeof(struct page *),
+                DRM_MEM_DRIVER);
+-      obj_priv->page_list = NULL;
++      obj_priv->pages = NULL;
+ }
+
+ static void
+@@ -1290,7 +1761,7 @@
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+
+-      i915_gem_object_free_page_list(obj);
++      i915_gem_object_put_pages(obj);
+
+       if (obj_priv->gtt_space) {
+               atomic_dec(&dev->gtt_count);
+@@ -1409,7 +1880,7 @@
+ }
+
+ static int
+-i915_gem_object_get_page_list(struct drm_gem_object *obj)
++i915_gem_object_get_pages(struct drm_gem_object *obj)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count, i;
+@@ -1418,18 +1889,19 @@
+       struct page *page;
+       int ret;
+
+-      if (obj_priv->page_list)
++      if (obj_priv->pages_refcount++ != 0)
+               return 0;
+
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        */
+       page_count = obj->size / PAGE_SIZE;
+-      BUG_ON(obj_priv->page_list != NULL);
+-      obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+-                                       DRM_MEM_DRIVER);
+-      if (obj_priv->page_list == NULL) {
++      BUG_ON(obj_priv->pages != NULL);
++      obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
++                                   DRM_MEM_DRIVER);
++      if (obj_priv->pages == NULL) {
+               DRM_ERROR("Faled to allocate page list\n");
++              obj_priv->pages_refcount--;
+               return -ENOMEM;
+       }
+
+@@ -1440,10 +1912,10 @@
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       DRM_ERROR("read_mapping_page failed: %d\n", ret);
+-                      i915_gem_object_free_page_list(obj);
++                      i915_gem_object_put_pages(obj);
+                       return ret;
+               }
+-              obj_priv->page_list[i] = page;
++              obj_priv->pages[i] = page;
+       }
+       return 0;
+ }
+@@ -1766,7 +2238,7 @@
+       DRM_INFO("Binding object of size %d at 0x%08x\n",
+                obj->size, obj_priv->gtt_offset);
+ #endif
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret) {
+               drm_mm_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+@@ -1778,12 +2250,12 @@
+        * into the GTT.
+        */
+       obj_priv->agp_mem = drm_agp_bind_pages(dev,
+-                                             obj_priv->page_list,
++                                             obj_priv->pages,
+                                              page_count,
+                                              obj_priv->gtt_offset,
+                                              obj_priv->agp_type);
+       if (obj_priv->agp_mem == NULL) {
+-              i915_gem_object_free_page_list(obj);
++              i915_gem_object_put_pages(obj);
+               drm_mm_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+               return -ENOMEM;
+@@ -1810,10 +2282,10 @@
+        * to GPU, and we can ignore the cache flush because it'll happen
+        * again at bind time.
+        */
+-      if (obj_priv->page_list == NULL)
++      if (obj_priv->pages == NULL)
+               return;
+
+-      drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
++      drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ }
+
+ /** Flushes any GPU write domain for the object if it's dirty. */
+@@ -2158,7 +2630,7 @@
+               for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+                       if (obj_priv->page_cpu_valid[i])
+                               continue;
+-                      drm_clflush_pages(obj_priv->page_list + i, 1);
++                      drm_clflush_pages(obj_priv->pages + i, 1);
+               }
+               drm_agp_chipset_flush(dev);
+       }
+@@ -2224,7 +2696,7 @@
+               if (obj_priv->page_cpu_valid[i])
+                       continue;
+
+-              drm_clflush_pages(obj_priv->page_list + i, 1);
++              drm_clflush_pages(obj_priv->pages + i, 1);
+
+               obj_priv->page_cpu_valid[i] = 1;
+       }
+@@ -2423,11 +2895,10 @@
+ static int
+ i915_dispatch_gem_execbuffer(struct drm_device *dev,
+                             struct drm_i915_gem_execbuffer *exec,
++                            struct drm_clip_rect *cliprects,
+                             uint64_t exec_offset)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+-                                           (uintptr_t) exec->cliprects_ptr;
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t        exec_start, exec_len;
+@@ -2448,7 +2919,7 @@
+
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      int ret = i915_emit_box(dev, boxes, i,
++                      int ret = i915_emit_box(dev, cliprects, i,
+                                               exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+@@ -2515,6 +2986,7 @@
+       struct drm_gem_object **object_list = NULL;
+       struct drm_gem_object *batch_obj;
+       struct drm_i915_gem_object *obj_priv;
++      struct drm_clip_rect *cliprects = NULL;
+       int ret, i, pinned = 0;
+       uint64_t exec_offset;
+       uint32_t seqno, flush_domains;
+@@ -2551,6 +3023,23 @@
+               goto pre_mutex_err;
+       }
+
++      if (args->num_cliprects != 0) {
++              cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      goto pre_mutex_err;
++
++              ret = copy_from_user(cliprects,
++                                   (struct drm_clip_rect __user *)
++                                   (uintptr_t) args->cliprects_ptr,
++                                   sizeof(*cliprects) * args->num_cliprects);
++              if (ret != 0) {
++                      DRM_ERROR("copy %d cliprects failed: %d\n",
++                                args->num_cliprects, ret);
++                      goto pre_mutex_err;
++              }
++      }
++
+       mutex_lock(&dev->struct_mutex);
+
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+@@ -2687,7 +3176,7 @@
+ #endif
+
+       /* Exec the batchbuffer */
+-      ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
++      ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       if (ret) {
+               DRM_ERROR("dispatch failed %d\n", ret);
+               goto err;
+@@ -2756,6 +3245,8 @@
+                DRM_MEM_DRIVER);
+       drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+                DRM_MEM_DRIVER);
++      drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
++               DRM_MEM_DRIVER);
+
+       return ret;
+ }
+@@ -3192,7 +3683,7 @@
+
+       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+
+-      dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
++      dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
+       if (dev_priv->hw_status_page == NULL) {
+               DRM_ERROR("Failed to map status page.\n");
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+@@ -3222,7 +3713,7 @@
+       obj = dev_priv->hws_obj;
+       obj_priv = obj->driver_private;
+
+-      kunmap(obj_priv->page_list[0]);
++      kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       dev_priv->hws_obj = NULL;
+@@ -3525,20 +4016,20 @@
+       if (!obj_priv->phys_obj)
+               return;
+
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               goto out;
+
+       page_count = obj->size / PAGE_SIZE;
+
+       for (i = 0; i < page_count; i++) {
+-              char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
++              char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+               memcpy(dst, src, PAGE_SIZE);
+               kunmap_atomic(dst, KM_USER0);
+       }
+-      drm_clflush_pages(obj_priv->page_list, page_count);
++      drm_clflush_pages(obj_priv->pages, page_count);
+       drm_agp_chipset_flush(dev);
+ out:
+       obj_priv->phys_obj->cur_obj = NULL;
+@@ -3581,7 +4072,7 @@
+       obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
+       obj_priv->phys_obj->cur_obj = obj;
+
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret) {
+               DRM_ERROR("failed to get page list\n");
+               goto out;
+@@ -3590,7 +4081,7 @@
+       page_count = obj->size / PAGE_SIZE;
+
+       for (i = 0; i < page_count; i++) {
+-              char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
++              char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+
+               memcpy(dst, src, PAGE_SIZE);
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_gem_tiling.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_gem_tiling.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_gem_tiling.c  2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_gem_tiling.c  2009-04-25  
06:29:46.000000000 -0700
+@@ -96,16 +96,16 @@
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+-      } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
+-                 IS_GM45(dev)) {
++      } else if (IS_MOBILE(dev)) {
+               uint32_t dcc;
+
+-              /* On 915-945 and GM965, channel interleave by the CPU is
+-               * determined by DCC.  The CPU will alternate based on bit 6
+-               * in interleaved mode, and the GPU will then also alternate
+-               * on bit 6, 9, and 10 for X, but the CPU may also optionally
+-               * alternate based on bit 17 (XOR not disabled and XOR
+-               * bit == 17).
++              /* On mobile 9xx chipsets, channel interleave by the CPU is
++               * determined by DCC.  For single-channel, neither the CPU
++               * nor the GPU do swizzling.  For dual channel interleaved,
++               * the GPU's interleave is bit 9 and 10 for X tiled, and bit
++               * 9 for Y tiled.  The CPU's interleave is independent, and
++               * can be based on either bit 11 (haven't seen this yet) or
++               * bit 17 (common).
+                */
+               dcc = I915_READ(DCC);
+               switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+@@ -115,19 +115,18 @@
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       break;
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+-                      if (IS_I915G(dev) || IS_I915GM(dev) ||
+-                          dcc & DCC_CHANNEL_XOR_DISABLE) {
++                      if (dcc & DCC_CHANNEL_XOR_DISABLE) {
++                              /* This is the base swizzling by the GPU for
++                               * tiled buffers.
++                               */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+-                      } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
+-                                 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+-                              /* GM965/GM45 does either bit 11 or bit 17
+-                               * swizzling.
+-                               */
++                      } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
++                              /* Bit 11 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+                       } else {
+-                              /* Bit 17 or perhaps other swizzling */
++                              /* Bit 17 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       }
+@@ -266,7 +265,6 @@
+       mutex_lock(&dev->struct_mutex);
+
+       if (args->tiling_mode == I915_TILING_NONE) {
+-              obj_priv->tiling_mode = I915_TILING_NONE;
+               args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+       } else {
+               if (args->tiling_mode == I915_TILING_X)
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_reg.h  
linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_reg.h
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/i915_reg.h 2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/i915_reg.h 2009-04-25  
06:29:46.000000000 -0700
+@@ -629,6 +629,22 @@
+ #define   TV_HOTPLUG_INT_EN                   (1 << 18)
+ #define   CRT_HOTPLUG_INT_EN                  (1 << 9)
+ #define   CRT_HOTPLUG_FORCE_DETECT            (1 << 3)
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_32      (0 << 8)
++/* must use period 64 on GM45 according to docs */
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_64      (1 << 8)
++#define CRT_HOTPLUG_DAC_ON_TIME_2M            (0 << 7)
++#define CRT_HOTPLUG_DAC_ON_TIME_4M            (1 << 7)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_40                (0 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_50                (1 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_60                (2 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_70                (3 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK      (3 << 5)
++#define CRT_HOTPLUG_DETECT_DELAY_1G           (0 << 4)
++#define CRT_HOTPLUG_DETECT_DELAY_2G           (1 << 4)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV      (0 << 2)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV      (1 << 2)
++#define CRT_HOTPLUG_MASK                      (0x3fc) /* Bits 9-2 */
++
+
+ #define PORT_HOTPLUG_STAT     0x61114
+ #define   HDMIB_HOTPLUG_INT_STATUS            (1 << 29)
+@@ -1415,6 +1431,7 @@
+ #define   DISPPLANE_NO_LINE_DOUBLE            0
+ #define   DISPPLANE_STEREO_POLARITY_FIRST     0
+ #define   DISPPLANE_STEREO_POLARITY_SECOND    (1<<18)
++#define   DISPPLANE_TILED                     (1<<10)
+ #define DSPAADDR              0x70184
+ #define DSPASTRIDE            0x70188
+ #define DSPAPOS                       0x7018C /* reserved */
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_crt.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_crt.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_crt.c        2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_crt.c        2009-04-25  
06:29:46.000000000 -0700
+@@ -133,20 +133,39 @@
+ {
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+-      u32 temp;
+-
+-      unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+-
+-      temp = I915_READ(PORT_HOTPLUG_EN);
+-
+-      I915_WRITE(PORT_HOTPLUG_EN,
+-                 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+-
+-      do {
+-              if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
+-                      break;
+-              msleep(1);
+-      } while (time_after(timeout, jiffies));
++      u32 hotplug_en;
++      int i, tries = 0;
++      /*
++       * On 4 series desktop, CRT detect sequence need to be done twice
++       * to get a reliable result.
++       */
++
++      if (IS_G4X(dev) && !IS_GM45(dev))
++              tries = 2;
++      else
++              tries = 1;
++      hotplug_en = I915_READ(PORT_HOTPLUG_EN);
++      hotplug_en &= ~(CRT_HOTPLUG_MASK);
++      hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
++
++      if (IS_GM45(dev))
++              hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
++
++      hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
++
++      for (i = 0; i < tries ; i++) {
++              unsigned long timeout;
++              /* turn on the FORCE_DETECT */
++              I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
++              timeout = jiffies + msecs_to_jiffies(1000);
++              /* wait for FORCE_DETECT to go off */
++              do {
++                      if (!(I915_READ(PORT_HOTPLUG_EN) &
++                                      CRT_HOTPLUG_FORCE_DETECT))
++                              break;
++                      msleep(1);
++              } while (time_after(timeout, jiffies));
++      }
+
+       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
+           CRT_HOTPLUG_MONITOR_COLOR)
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_display.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_display.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_display.c    2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_display.c    2009-04-25  
06:29:46.000000000 -0700
+@@ -338,6 +338,7 @@
+       int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR);
+       int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+       int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++      int dsptileoff = (pipe == 0 ? DSPATILEOFF : DSPBTILEOFF);
+       int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+       u32 dspcntr, alignment;
+       int ret;
+@@ -414,6 +415,13 @@
+               mutex_unlock(&dev->struct_mutex);
+               return -EINVAL;
+       }
++      if (IS_I965G(dev)) {
++              if (obj_priv->tiling_mode != I915_TILING_NONE)
++                      dspcntr |= DISPPLANE_TILED;
++              else
++                      dspcntr &= ~DISPPLANE_TILED;
++      }
++
+       I915_WRITE(dspcntr_reg, dspcntr);
+
+       Start = obj_priv->gtt_offset;
+@@ -426,6 +434,7 @@
+               I915_READ(dspbase);
+               I915_WRITE(dspsurf, Start);
+               I915_READ(dspsurf);
++              I915_WRITE(dsptileoff, (y << 16) | x);
+       } else {
+               I915_WRITE(dspbase, Start + Offset);
+               I915_READ(dspbase);
+@@ -1474,13 +1483,21 @@
+
+       if (IS_I9XX(dev)) {
+               int found;
++              u32 reg;
+
+               if (I915_READ(SDVOB) & SDVO_DETECTED) {
+                       found = intel_sdvo_init(dev, SDVOB);
+                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+                               intel_hdmi_init(dev, SDVOB);
+               }
+-              if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
++
++              /* Before G4X SDVOC doesn't have its own detect register */
++              if (IS_G4X(dev))
++                      reg = SDVOC;
++              else
++                      reg = SDVOB;
++
++              if (I915_READ(reg) & SDVO_DETECTED) {
+                       found = intel_sdvo_init(dev, SDVOC);
+                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+                               intel_hdmi_init(dev, SDVOC);
+diff -Naur linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_tv.c  
linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_tv.c
+--- linux-2.6.29.1-old/drivers/gpu/drm/i915/intel_tv.c 2009-04-02  
13:55:27.000000000 -0700
++++ linux-2.6.29.1-new/drivers/gpu/drm/i915/intel_tv.c 2009-04-25  
06:29:46.000000000 -0700
+@@ -1558,33 +1558,49 @@
+       struct drm_device *dev = connector->dev;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
++      struct drm_encoder *encoder = &intel_output->enc;
++      struct drm_crtc *crtc = encoder->crtc;
+       int ret = 0;
++      bool changed = false;
+
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret < 0)
+               goto out;
+
+-      if (property == dev->mode_config.tv_left_margin_property)
++      if (property == dev->mode_config.tv_left_margin_property &&
++              tv_priv->margin[TV_MARGIN_LEFT] != val) {
+               tv_priv->margin[TV_MARGIN_LEFT] = val;
+-      else if (property == dev->mode_config.tv_right_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_right_margin_property &&
++              tv_priv->margin[TV_MARGIN_RIGHT] != val) {
+               tv_priv->margin[TV_MARGIN_RIGHT] = val;
+-      else if (property == dev->mode_config.tv_top_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_top_margin_property &&
++              tv_priv->margin[TV_MARGIN_TOP] != val) {
+               tv_priv->margin[TV_MARGIN_TOP] = val;
+-      else if (property == dev->mode_config.tv_bottom_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_bottom_margin_property &&
++              tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
+               tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+-      else if (property == dev->mode_config.tv_mode_property) {
++              changed = true;
++      } else if (property == dev->mode_config.tv_mode_property) {
+               if (val >= NUM_TV_MODES) {
+                       ret = -EINVAL;
+                       goto out;
+               }
++              if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
++                      goto out;
++
+               tv_priv->tv_format = tv_modes[val].name;
+-              intel_tv_mode_set(&intel_output->enc, NULL, NULL);
++              changed = true;
+       } else {
+               ret = -EINVAL;
+               goto out;
+       }
+
+-      intel_tv_mode_set(&intel_output->enc, NULL, NULL);
++      if (changed && crtc)
++              drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
++                              crtc->y, crtc->fb);
+ out:
+       return ret;
+ }

--~--~---------~--~----~------------~-------~--~----~
You received this message because you are subscribed to the Google Groups 
"minimyth-commits" group.
To post to this group, send email to [email protected]
To unsubscribe from this group, send email to 
[email protected]
For more options, visit this group at 
http://groups.google.com/group/minimyth-commits?hl=en
-~----------~----~----~----~------~----~------~--~---

Reply via email to