Author: bapt
Date: Wed Aug 26 21:35:16 2015
New Revision: 287174
URL: https://svnweb.freebsd.org/changeset/base/287174

Log:
  Reduce diff against linux 3.8
  
  Reviewed by:  dumbbell
  Differential Revision:        https://reviews.freebsd.org/D3492

Modified:
  head/sys/dev/drm2/i915/i915_gem.c
  head/sys/dev/drm2/i915/i915_gem_context.c
  head/sys/dev/drm2/i915/i915_gem_execbuffer.c
  head/sys/dev/drm2/i915/i915_gem_gtt.c
  head/sys/dev/drm2/i915/i915_gem_tiling.c

Modified: head/sys/dev/drm2/i915/i915_gem.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem.c   Wed Aug 26 21:33:43 2015        
(r287173)
+++ head/sys/dev/drm2/i915/i915_gem.c   Wed Aug 26 21:35:16 2015        
(r287174)
@@ -1,4 +1,4 @@
-/*-
+/*
  * Copyright © 2008 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -203,11 +203,10 @@ int i915_mutex_lock_interruptible(struct
        return 0;
 }
 
-static bool
+static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-
-       return !obj->active;
+       return obj->gtt_space && !obj->active;
 }
 
 int
@@ -1239,9 +1238,17 @@ i915_gem_set_domain_ioctl(struct drm_dev
        uint32_t write_domain = args->write_domain;
        int ret;
 
-       if ((write_domain & I915_GEM_GPU_DOMAINS) != 0 ||
-           (read_domains & I915_GEM_GPU_DOMAINS) != 0 ||
-           (write_domain != 0 && read_domains != write_domain))
+       /* Only handle setting domains to types used by the CPU. */
+       if (write_domain & I915_GEM_GPU_DOMAINS)
+               return -EINVAL;
+
+       if (read_domains & I915_GEM_GPU_DOMAINS)
+               return -EINVAL;
+
+       /* Having something in the write domain implies it's in the read
+        * domain, and only that read domain.  Enforce that in the request.
+        */
+       if (write_domain != 0 && read_domains != write_domain)
                return -EINVAL;
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -1686,13 +1693,11 @@ i915_gem_get_unfenced_gtt_alignment(stru
                                    uint32_t size,
                                    int tiling_mode)
 {
-       if (tiling_mode == I915_TILING_NONE)
-               return 4096;
-
        /*
         * Minimum alignment is 4k (GTT page size) for sane hw.
         */
-       if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev))
+       if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+           tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /* Previous hardware however needs to be aligned to a power-of-two
@@ -3155,7 +3160,7 @@ i915_gem_object_set_to_gtt_domain(struct
 
        ret = i915_gem_object_flush_gpu_write_domain(obj);
        if (ret)
-               return (ret);
+               return ret;
 
        if (obj->pending_gpu_write || write) {
                ret = i915_gem_object_wait_rendering(obj);
@@ -3366,6 +3371,12 @@ i915_gem_object_finish_gpu(struct drm_i9
        return 0;
 }
 
+/**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+ * This function returns when the move is complete, including waiting on
+ * flushes to occur.
+ */
 int
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
@@ -3644,7 +3655,6 @@ int
 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv)
 {
-
        return i915_gem_ring_throttle(dev, file_priv);
 }
 
@@ -4101,6 +4111,10 @@ i915_gem_unload(struct drm_device *dev)
        EVENTHANDLER_DEREGISTER(vm_lowmem, dev_priv->mm.i915_lowmem);
 }
 
+/*
+ * Create a physically contiguous memory object for this object
+ * e.g. for cursor + overlay regs
+ */
 static int i915_gem_init_phys_object(struct drm_device *dev,
                                     int id, int size, int align)
 {

Modified: head/sys/dev/drm2/i915/i915_gem_context.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem_context.c   Wed Aug 26 21:33:43 2015        
(r287173)
+++ head/sys/dev/drm2/i915/i915_gem_context.c   Wed Aug 26 21:35:16 2015        
(r287174)
@@ -302,7 +302,7 @@ void i915_gem_context_fini(struct drm_de
        do_destroy(dev_priv->rings[RCS].default_context);
 }
 
-static int context_idr_cleanup(uint32_t id, void *p, void *data)
+static int context_idr_cleanup(int id, void *p, void *data)
 {
        struct i915_hw_context *ctx = p;
 

Modified: head/sys/dev/drm2/i915/i915_gem_execbuffer.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem_execbuffer.c        Wed Aug 26 21:33:43 
2015        (r287173)
+++ head/sys/dev/drm2/i915/i915_gem_execbuffer.c        Wed Aug 26 21:35:16 
2015        (r287174)
@@ -405,10 +405,7 @@ i915_gem_execbuffer_relocate_entry(struc
                if (ret)
                        return ret;
 
-               /*
-                * Map the page containing the relocation we're going
-                * to perform.
-                */
+               /* Map the page containing the relocation we're going to 
perform.  */
                reloc->offset += obj->gtt_offset;
                reloc_page = pmap_mapdev_attr(dev->agp->base + (reloc->offset &
                    ~PAGE_MASK), PAGE_SIZE, PAT_WRITE_COMBINING);
@@ -436,7 +433,7 @@ i915_gem_execbuffer_relocate_entry(struc
 
 static int
 i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-    struct eb_objects *eb)
+                                   struct eb_objects *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -459,7 +456,7 @@ i915_gem_execbuffer_relocate_object(stru
 
                do {
                        u64 offset = r->presumed_offset;
- 
+
                        ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
                        if (ret)
                                return ret;
@@ -475,13 +472,15 @@ i915_gem_execbuffer_relocate_object(stru
                        r++;
                } while (--count);
        }
+
+       return 0;
 #undef N_RELOC
-       return (0);
 }
 
 static int
 i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-    struct eb_objects *eb, struct drm_i915_gem_relocation_entry *relocs)
+                                        struct eb_objects *eb,
+                                        struct drm_i915_gem_relocation_entry 
*relocs)
 {
        const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
        int i, ret;
@@ -520,11 +519,12 @@ i915_gem_execbuffer_relocate(struct drm_
 
        list_for_each_entry(obj, objects, exec_list) {
                ret = i915_gem_execbuffer_relocate_object(obj, eb);
-               if (ret != 0)
+               if (ret)
                        break;
        }
        vm_fault_enable_pagefaults(pflags);
-       return (ret);
+
+       return ret;
 }
 
 #define  __EXEC_OBJECT_HAS_FENCE (1<<31)
@@ -583,9 +583,9 @@ i915_gem_execbuffer_reserve(struct intel
 {
        drm_i915_private_t *dev_priv;
        struct drm_i915_gem_object *obj;
-       int ret, retry;
-       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        struct list_head ordered_objects;
+       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+       int ret, retry;
 
        dev_priv = ring->dev->dev_private;
        INIT_LIST_HEAD(&ordered_objects);
@@ -619,12 +619,11 @@ i915_gem_execbuffer_reserve(struct intel
         *
         * 1a. Unbind all objects that do not match the GTT constraints for
         *     the execbuffer (fenceable, mappable, alignment etc).
-        * 1b. Increment pin count for already bound objects and obtain
-        *     a fence register if required.
+        * 1b. Increment pin count for already bound objects.
         * 2.  Bind new objects.
         * 3.  Decrement pin count.
         *
-        * This avoid unnecessary unbinding of later objects in order to makr
+        * This avoid unnecessary unbinding of later objects in order to make
         * room for the earlier objects *unless* we need to defragment.
         */
        retry = 0;
@@ -735,9 +734,12 @@ err:
 
 static int
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
-    struct drm_file *file, struct intel_ring_buffer *ring,
-    struct list_head *objects, struct eb_objects *eb,
-    struct drm_i915_gem_exec_object2 *exec, int count)
+                                 struct drm_file *file,
+                                 struct intel_ring_buffer *ring,
+                                 struct list_head *objects,
+                                 struct eb_objects *eb,
+                                 struct drm_i915_gem_exec_object2 *exec,
+                                 int count)
 {
        struct drm_i915_gem_relocation_entry *reloc;
        struct drm_i915_gem_object *obj;
@@ -812,7 +814,7 @@ i915_gem_execbuffer_relocate_slow(struct
        list_for_each_entry(obj, objects, exec_list) {
                int offset = obj->exec_entry - exec;
                ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-                   reloc + reloc_offset[offset]);
+                                                              reloc + 
reloc_offset[offset]);
                if (ret)
                        goto err;
        }
@@ -1210,7 +1212,7 @@ i915_gem_do_execbuffer(struct drm_device
 
        if (args->num_cliprects != 0) {
                if (ring != &dev_priv->rings[RCS]) {
-       DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+                       DRM_DEBUG("clip rectangles are only valid with the 
render ring\n");
                        ret = -EINVAL;
                        goto pre_struct_lock_err;
                }
@@ -1256,6 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device
        INIT_LIST_HEAD(&objects);
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_i915_gem_object *obj;
+
                obj = to_intel_bo(drm_gem_object_lookup(dev, file,
                                                        exec[i].handle));
                if (&obj->base == NULL) {
@@ -1294,7 +1297,9 @@ i915_gem_do_execbuffer(struct drm_device
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
-                           &objects, eb, exec, args->buffer_count);
+                                                               &objects, eb,
+                                                               exec,
+                                                               
args->buffer_count);
                        DRM_LOCK_ASSERT(dev);
                }
                if (ret)
@@ -1368,17 +1373,18 @@ i915_gem_do_execbuffer(struct drm_device
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
                        ret = i915_emit_box_p(dev, &cliprects[i],
-                           args->DR1, args->DR4);
+                                           args->DR1, args->DR4);
                        if (ret)
                                goto err;
 
-                       ret = ring->dispatch_execbuffer(ring, exec_start,
-                           exec_len);
+                       ret = ring->dispatch_execbuffer(ring,
+                                                       exec_start, exec_len);
                        if (ret)
                                goto err;
                }
        } else {
-               ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+               ret = ring->dispatch_execbuffer(ring,
+                                               exec_start, exec_len);
                if (ret)
                        goto err;
        }
@@ -1391,8 +1397,9 @@ err:
        while (!list_empty(&objects)) {
                struct drm_i915_gem_object *obj;
 
-               obj = list_first_entry(&objects, struct drm_i915_gem_object,
-                   exec_list);
+               obj = list_first_entry(&objects,
+                                      struct drm_i915_gem_object,
+                                      exec_list);
                list_del_init(&obj->exec_list);
                drm_gem_object_unreference(&obj->base);
        }
@@ -1520,7 +1527,7 @@ i915_gem_execbuffer2(struct drm_device *
                DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
                free(exec2_list, DRM_I915_GEM);
-               return (ret);
+               return -EFAULT;
        }
 
        ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);

Modified: head/sys/dev/drm2/i915/i915_gem_gtt.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem_gtt.c       Wed Aug 26 21:33:43 2015        
(r287173)
+++ head/sys/dev/drm2/i915/i915_gem_gtt.c       Wed Aug 26 21:35:16 2015        
(r287174)
@@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$");
 #include <sys/sf_buf.h>
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
-static void
-i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
-    unsigned first_entry, unsigned num_entries)
+static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+                                  unsigned first_entry,
+                                  unsigned num_entries)
 {
        uint32_t *pt_vaddr;
        uint32_t scratch_pte;
@@ -71,20 +71,17 @@ i915_ppgtt_clear_range(struct i915_hw_pp
 
 }
 
-int
-i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
+int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_hw_ppgtt *ppgtt;
-       u_int first_pd_entry_in_global_pt, i;
+       unsigned first_pd_entry_in_global_pt;
+       int i;
 
-       dev_priv = dev->dev_private;
 
-       /*
-        * ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
+       /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
         * entries. For aliasing ppgtt support we just steal them at the end for
-        * now.
-        */
+        * now.  */
        first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
 
        ppgtt = malloc(sizeof(*ppgtt), DRM_I915_GEM, M_WAITOK | M_ZERO);
@@ -152,9 +149,9 @@ i915_ppgtt_insert_pages(struct i915_hw_p
        }
 }
 
-void
-i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-    struct drm_i915_gem_object *obj, enum i915_cache_level cache_level)
+void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
+                           struct drm_i915_gem_object *obj,
+                           enum i915_cache_level cache_level)
 {
        struct drm_device *dev;
        struct drm_i915_private *dev_priv;
@@ -185,22 +182,23 @@ i915_ppgtt_bind_object(struct i915_hw_pp
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj)
 {
-       i915_ppgtt_clear_range(ppgtt, obj->gtt_space->start >> PAGE_SHIFT,
-           obj->base.size >> PAGE_SHIFT);
+       i915_ppgtt_clear_range(ppgtt,
+                              obj->gtt_space->start >> PAGE_SHIFT,
+                              obj->base.size >> PAGE_SHIFT);
 }
 
 void i915_gem_init_ppgtt(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv;
-       struct i915_hw_ppgtt *ppgtt;
-       uint32_t pd_offset, pd_entry;
-       vm_paddr_t pt_addr;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t pd_offset;
        struct intel_ring_buffer *ring;
-       u_int first_pd_entry_in_global_pt, i;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       u_int first_pd_entry_in_global_pt;
+       vm_paddr_t pt_addr;
+       uint32_t pd_entry;
+       int i;
 
-       dev_priv = dev->dev_private;
-       ppgtt = dev_priv->mm.aliasing_ppgtt;
-       if (ppgtt == NULL)
+       if (!dev_priv->mm.aliasing_ppgtt)
                return;
 
        first_pd_entry_in_global_pt = 512 * 1024 - I915_PPGTT_PD_ENTRIES;
@@ -244,6 +242,28 @@ void i915_gem_init_ppgtt(struct drm_devi
        }
 }
 
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+       bool ret = dev_priv->mm.interruptible;
+
+       if (dev_priv->mm.gtt.do_idle_maps) {
+               dev_priv->mm.interruptible = false;
+               if (i915_gpu_idle(dev_priv->dev)) {
+                       DRM_ERROR("Couldn't idle GPU\n");
+                       /* Wait a bit, in hopes it avoids the hang */
+                       DELAY(10);
+               }
+       }
+
+       return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+       if (dev_priv->mm.gtt.do_idle_maps)
+               dev_priv->mm.interruptible = interruptible;
+}
+
 void
 i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 {
@@ -293,42 +313,14 @@ cache_level_to_agp_type(struct drm_devic
        }
 }
 
-static bool
-do_idling(struct drm_i915_private *dev_priv)
-{
-       bool ret = dev_priv->mm.interruptible;
-
-       if (dev_priv->mm.gtt.do_idle_maps) {
-               dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
-                       DRM_ERROR("Couldn't idle GPU\n");
-                       /* Wait a bit, in hopes it avoids the hang */
-                       DELAY(10);
-               }
-       }
-
-       return ret;
-}
-
-static void
-undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
-{
-
-       if (dev_priv->mm.gtt.do_idle_maps)
-               dev_priv->mm.interruptible = interruptible;
-}
-
-void
-i915_gem_restore_gtt_mappings(struct drm_device *dev)
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
-       dev_priv = dev->dev_private;
-
        /* First fill our portion of the GTT with scratch pages */
        intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
-           (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+                             (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / 
PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
                i915_gem_clflush_object(obj);
@@ -338,11 +330,10 @@ i915_gem_restore_gtt_mappings(struct drm
        intel_gtt_chipset_flush();
 }
 
-int
-i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
 
-       return (0);
+       return 0;
 }
 
 void
@@ -363,8 +354,7 @@ i915_gem_gtt_bind_object(struct drm_i915
        obj->has_global_gtt_mapping = 1;
 }
 
-void
-i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
 
        intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
@@ -373,24 +363,21 @@ i915_gem_gtt_unbind_object(struct drm_i9
        obj->has_global_gtt_mapping = 0;
 }
 
-void
-i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        bool interruptible;
 
-       dev = obj->base.dev;
-       dev_priv = dev->dev_private;
-
        interruptible = do_idling(dev_priv);
 
        undo_idling(dev_priv, interruptible);
 }
 
-int
-i915_gem_init_global_gtt(struct drm_device *dev, unsigned long start,
-    unsigned long mappable_end, unsigned long end)
+int i915_gem_init_global_gtt(struct drm_device *dev,
+                            unsigned long start,
+                            unsigned long mappable_end,
+                            unsigned long end)
 {
        drm_i915_private_t *dev_priv;
        unsigned long mappable;

Modified: head/sys/dev/drm2/i915/i915_gem_tiling.c
==============================================================================
--- head/sys/dev/drm2/i915/i915_gem_tiling.c    Wed Aug 26 21:33:43 2015        
(r287173)
+++ head/sys/dev/drm2/i915/i915_gem_tiling.c    Wed Aug 26 21:35:16 2015        
(r287174)
@@ -209,7 +209,7 @@ i915_tiling_ok(struct drm_device *dev, i
 
        /* Linear is always fine */
        if (tiling_mode == I915_TILING_NONE)
-               return (true);
+               return true;
 
        if (IS_GEN2(dev) ||
            (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
@@ -222,35 +222,35 @@ i915_tiling_ok(struct drm_device *dev, i
                /* i965 stores the end address of the gtt mapping in the fence
                 * reg, so dont bother to check the size */
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
-                       return (false);
+                       return false;
        } else {
                if (stride > 8192)
-                       return (false);
+                       return false;
 
                if (IS_GEN3(dev)) {
                        if (size > I830_FENCE_MAX_SIZE_VAL << 20)
-                               return (false);
+                               return false;
                } else {
                        if (size > I830_FENCE_MAX_SIZE_VAL << 19)
-                               return (false);
+                               return false;
                }
        }
 
        /* 965+ just needs multiples of tile width */
        if (INTEL_INFO(dev)->gen >= 4) {
                if (stride & (tile_width - 1))
-                       return (false);
-               return (true);
+                       return false;
+               return true;
        }
 
        /* Pre-965 needs power of two tile widths */
        if (stride < tile_width)
-               return (false);
+               return false;
 
        if (stride & (stride - 1))
-               return (false);
+               return false;
 
-       return (true);
+       return true;
 }
 
 /* Is the current GTT allocation valid for the change in tiling? */
@@ -260,17 +260,17 @@ i915_gem_object_fence_ok(struct drm_i915
        u32 size;
 
        if (tiling_mode == I915_TILING_NONE)
-               return (true);
+               return true;
 
        if (INTEL_INFO(obj->base.dev)->gen >= 4)
-               return (true);
+               return true;
 
        if (INTEL_INFO(obj->base.dev)->gen == 3) {
                if (obj->gtt_offset & ~I915_FENCE_START_MASK)
-                       return (false);
+                       return false;
        } else {
                if (obj->gtt_offset & ~I830_FENCE_START_MASK)
-                       return (false);
+                       return false;
        }
 
        /*
@@ -286,12 +286,12 @@ i915_gem_object_fence_ok(struct drm_i915
                size <<= 1;
 
        if (obj->gtt_space->size != size)
-               return (false);
+               return false;
 
        if (obj->gtt_offset & (size - 1))
-               return (false);
+               return false;
 
-       return (true);
+       return true;
 }
 
 /**
@@ -305,9 +305,8 @@ i915_gem_set_tiling(struct drm_device *d
        struct drm_i915_gem_set_tiling *args = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
-       int ret;
+       int ret = 0;
 
-       ret = 0;
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
@@ -370,15 +369,15 @@ i915_gem_set_tiling(struct drm_device *d
 
                obj->map_and_fenceable =
                        obj->gtt_space == NULL ||
-                   (obj->gtt_offset + obj->base.size <=
-                   dev_priv->mm.gtt_mappable_end &&
-                   i915_gem_object_fence_ok(obj, args->tiling_mode));
+                       (obj->gtt_offset + obj->base.size <= 
dev_priv->mm.gtt_mappable_end &&
+                        i915_gem_object_fence_ok(obj, args->tiling_mode));
 
                /* Rebind if we need a change of alignment */
                if (!obj->map_and_fenceable) {
-                       uint32_t unfenced_alignment =
-                           i915_gem_get_unfenced_gtt_alignment(dev,
-                               obj->base.size, args->tiling_mode);
+                       u32 unfenced_alignment =
+                               i915_gem_get_unfenced_gtt_alignment(dev,
+                                                                   
obj->base.size,
+                                                                   
args->tiling_mode);
                        if (obj->gtt_offset & (unfenced_alignment - 1))
                                ret = i915_gem_object_unbind(obj);
                }
@@ -388,7 +387,6 @@ i915_gem_set_tiling(struct drm_device *d
                                obj->fenced_gpu_access ||
                                obj->fence_reg != I915_FENCE_REG_NONE;
 
-
                        obj->tiling_mode = args->tiling_mode;
                        obj->stride = args->stride;
 
@@ -402,7 +400,7 @@ i915_gem_set_tiling(struct drm_device *d
        drm_gem_object_unreference(&obj->base);
        DRM_UNLOCK(dev);
 
-       return (ret);
+       return ret;
 }
 
 /**
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to