From: Chris Wilson <ch...@chris-wilson.co.uk>

We need to check that we avoid integer overflows when looking up a page,
and so fix all the instances where we have mistakenly used a plain
integer instead of a more suitable long. Be pedantic and add integer
typechecking to the lookup so that we can be sure that we are safe.
And it also uses pgoff_t as our page lookups must remain compatible with
the page cache, pgoff_t is currently exactly unsigned long.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Signed-off-by: Gwan-gyeong Mun <gwan-gyeong....@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursu...@linux.intel.com>
Cc: Matthew Auld <matthew.a...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  7 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h    | 67 ++++++++++++++-----
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     | 25 ++++---
 drivers/gpu/drm/i915/gem/i915_gem_ttm.c       |  2 +-
 .../drm/i915/gem/selftests/i915_gem_context.c | 12 ++--
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  8 +--
 .../drm/i915/gem/selftests/i915_gem_object.c  |  8 +--
 drivers/gpu/drm/i915/i915_gem.c               | 18 +++--
 drivers/gpu/drm/i915/i915_utils.h             |  6 ++
 drivers/gpu/drm/i915/i915_vma.c               |  8 +--
 10 files changed, 106 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 06b1b188ce5a..a5af74f78d57 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -421,10 +421,11 @@ void __i915_gem_object_invalidate_frontbuffer(struct 
drm_i915_gem_object *obj,
 static void
 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 
offset, void *dst, int size)
 {
+       pgoff_t idx = offset >> PAGE_SHIFT;
        void *src_map;
        void *src_ptr;
 
-       src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> 
PAGE_SHIFT));
+       src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
 
        src_ptr = src_map + offset_in_page(offset);
        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
@@ -437,9 +438,10 @@ i915_gem_object_read_from_page_kmap(struct 
drm_i915_gem_object *obj, u64 offset,
 static void
 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 
offset, void *dst, int size)
 {
+       pgoff_t idx = offset >> PAGE_SHIFT;
+       dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
        void __iomem *src_map;
        void __iomem *src_ptr;
-       dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> 
PAGE_SHIFT);
 
        src_map = io_mapping_map_wc(&obj->mm.region->iomap,
                                    dma - obj->mm.region->region.start,
@@ -468,6 +470,7 @@ i915_gem_object_read_from_page_iomap(struct 
drm_i915_gem_object *obj, u64 offset
  */
 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 
offset, void *dst, int size)
 {
+       GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
        GEM_BUG_ON(offset >= obj->base.size);
        GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
        GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e11d82a9f7c3..22c4ba0cd106 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -27,8 +27,10 @@ enum intel_region_id;
  * spot such a local variable, please consider fixing!
  *
  * Aside from our own locals (for which we have no excuse!):
- * - sg_table embeds unsigned int for num_pages
- * - get_user_pages*() mixed ints with longs
+ * - sg_table embeds unsigned int for nents
+ *
+ * We can check for invalidly typed locals with typecheck(), see for example
+ * i915_gem_object_get_sg().
  */
 #define GEM_CHECK_SIZE_OVERFLOW(sz) \
        GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
@@ -366,41 +368,70 @@ int i915_gem_object_set_tiling(struct drm_i915_gem_object 
*obj,
 struct scatterlist *
 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
-                        unsigned int n,
-                        unsigned int *offset, bool dma);
+                        pgoff_t  n,
+                        unsigned int *offset);
+
+#define __i915_gem_object_get_sg(obj, it, n, offset) ({ \
+       exactly_pgoff_t(n); \
+       (__i915_gem_object_get_sg)(obj, it, n, offset); \
+})
 
 static inline struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-                      unsigned int n,
+i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
                       unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, 
false);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
 }
 
+#define i915_gem_object_get_sg(obj, n, offset) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_sg)(obj, n, offset); \
+})
+
 static inline struct scatterlist *
-i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
-                          unsigned int n,
+i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
                           unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, 
true);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
 }
 
+#define i915_gem_object_get_sg_dma(obj, n, offset) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_sg_dma)(obj, n, offset); \
+})
+
 struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
-                        unsigned int n);
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
+
+#define i915_gem_object_get_page(obj, n) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_page)(obj, n); \
+})
 
 struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-                              unsigned int n);
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
+
+#define i915_gem_object_get_dirty_page(obj, n) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_dirty_page)(obj, n); \
+})
 
 dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
-                                   unsigned long n,
+i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
                                    unsigned int *len);
 
+#define i915_gem_object_get_dma_address_len(obj, n, len) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_dma_address_len)(obj, n, len); \
+})
+
 dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
-                               unsigned long n);
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
+
+#define i915_gem_object_get_dma_address(obj, n) ({ \
+       exactly_pgoff_t(n); \
+       (i915_gem_object_get_dma_address)(obj, n); \
+})
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                                 struct sg_table *pages,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 97c820eee115..1d1edcb3514b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -503,14 +503,16 @@ void __i915_gem_object_release_map(struct 
drm_i915_gem_object *obj)
 }
 
 struct scatterlist *
-__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+(__i915_gem_object_get_sg)(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
-                        unsigned int n,
-                        unsigned int *offset,
-                        bool dma)
+                        pgoff_t n,
+                        unsigned int *offset)
+
 {
-       struct scatterlist *sg;
+       const bool dma = iter == &obj->mm.get_dma_page ||
+                        iter == &obj->ttm.get_io_page;
        unsigned int idx, count;
+       struct scatterlist *sg;
 
        might_sleep();
        GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
@@ -618,7 +620,7 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 }
 
 struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+(i915_gem_object_get_page)(struct drm_i915_gem_object *obj, pgoff_t n)
 {
        struct scatterlist *sg;
        unsigned int offset;
@@ -631,8 +633,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, 
unsigned int n)
 
 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
 struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
-                              unsigned int n)
+(i915_gem_object_get_dirty_page)(struct drm_i915_gem_object *obj, pgoff_t n)
 {
        struct page *page;
 
@@ -644,9 +645,8 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object 
*obj,
 }
 
 dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
-                                   unsigned long n,
-                                   unsigned int *len)
+(i915_gem_object_get_dma_address_len)(struct drm_i915_gem_object *obj,
+                                     pgoff_t n, unsigned int *len)
 {
        struct scatterlist *sg;
        unsigned int offset;
@@ -660,8 +660,7 @@ i915_gem_object_get_dma_address_len(struct 
drm_i915_gem_object *obj,
 }
 
 dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
-                               unsigned long n)
+(i915_gem_object_get_dma_address)(struct drm_i915_gem_object *obj, pgoff_t n)
 {
        return i915_gem_object_get_dma_address_len(obj, n, NULL);
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 4c25d9b2f138..bcdbefd8f269 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -711,7 +711,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct 
ttm_buffer_object *bo,
        GEM_WARN_ON(bo->ttm);
 
        base = obj->mm.region->iomap.base - obj->mm.region->region.start;
-       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, 
&ofs, true);
+       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, 
&ofs);
 
        return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 93a67422ca3b..84d6edd9c204 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -455,7 +455,8 @@ static int gpu_fill(struct intel_context *ce,
 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
 {
        const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
-       unsigned int n, m, need_flush;
+       unsigned int need_flush;
+       unsigned long n, m;
        int err;
 
        i915_gem_object_lock(obj, NULL);
@@ -485,7 +486,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 
value)
 static noinline int cpu_check(struct drm_i915_gem_object *obj,
                              unsigned int idx, unsigned int max)
 {
-       unsigned int n, m, needs_flush;
+       unsigned int needs_flush;
+       unsigned long n;
        int err;
 
        i915_gem_object_lock(obj, NULL);
@@ -494,7 +496,7 @@ static noinline int cpu_check(struct drm_i915_gem_object 
*obj,
                goto out_unlock;
 
        for (n = 0; n < real_page_count(obj); n++) {
-               u32 *map;
+               u32 *map, m;
 
                map = kmap_atomic(i915_gem_object_get_page(obj, n));
                if (needs_flush & CLFLUSH_BEFORE)
@@ -502,7 +504,7 @@ static noinline int cpu_check(struct drm_i915_gem_object 
*obj,
 
                for (m = 0; m < max; m++) {
                        if (map[m] != m) {
-                               pr_err("%pS: Invalid value at object %d page 
%d/%ld, offset %d/%d: found %x expected %x\n",
+                               pr_err("%pS: Invalid value at object %d page 
%ld/%ld, offset %d/%d: found %x expected %x\n",
                                       __builtin_return_address(0), idx,
                                       n, real_page_count(obj), m, max,
                                       map[m], m);
@@ -513,7 +515,7 @@ static noinline int cpu_check(struct drm_i915_gem_object 
*obj,
 
                for (; m < DW_PER_PAGE; m++) {
                        if (map[m] != STACK_MAGIC) {
-                               pr_err("%pS: Invalid value at object %d page 
%d, offset %d: found %x expected %x (uninitialised)\n",
+                               pr_err("%pS: Invalid value at object %d page 
%ld, offset %d: found %x expected %x (uninitialised)\n",
                                       __builtin_return_address(0), idx, n, m,
                                       map[m], STACK_MAGIC);
                                err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5bc93a1ce3e3..b674c1d7a045 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -93,11 +93,11 @@ static int check_partial_mapping(struct drm_i915_gem_object 
*obj,
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt_view view;
        struct i915_vma *vma;
+       unsigned long offset;
        unsigned long page;
        u32 __iomem *io;
        struct page *p;
        unsigned int n;
-       u64 offset;
        u32 *cpu;
        int err;
 
@@ -154,7 +154,7 @@ static int check_partial_mapping(struct drm_i915_gem_object 
*obj,
        cpu = kmap(p) + offset_in_page(offset);
        drm_clflush_virt_range(cpu, sizeof(*cpu));
        if (*cpu != (u32)page) {
-               pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, 
row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to 
page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+               pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, 
row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to 
page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
                       page, n,
                       view.partial.offset,
                       view.partial.size,
@@ -210,10 +210,10 @@ static int check_partial_mappings(struct 
drm_i915_gem_object *obj,
        for_each_prime_number_from(page, 1, npages) {
                struct i915_ggtt_view view =
                        compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+               unsigned long offset;
                u32 __iomem *io;
                struct page *p;
                unsigned int n;
-               u64 offset;
                u32 *cpu;
 
                GEM_BUG_ON(view.partial.size > nreal);
@@ -250,7 +250,7 @@ static int check_partial_mappings(struct 
drm_i915_gem_object *obj,
                cpu = kmap(p) + offset_in_page(offset);
                drm_clflush_virt_range(cpu, sizeof(*cpu));
                if (*cpu != (u32)page) {
-                       pr_err("Partial view for %lu [%u] (offset=%llu, size=%u 
[%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected 
write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+                       pr_err("Partial view for %lu [%u] (offset=%llu, size=%u 
[%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected 
write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
                               page, n,
                               view.partial.offset,
                               view.partial.size,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index fe0a890775e2..bf30763ee6bc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -33,10 +33,10 @@ static int igt_gem_object(void *arg)
 
 static int igt_gem_huge(void *arg)
 {
-       const unsigned int nreal = 509; /* just to be awkward */
+       const unsigned long nreal = 509; /* just to be awkward */
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
-       unsigned int n;
+       unsigned long n;
        int err;
 
        /* Basic sanitycheck of our huge fake object allocation */
@@ -49,7 +49,7 @@ static int igt_gem_huge(void *arg)
 
        err = i915_gem_object_pin_pages_unlocked(obj);
        if (err) {
-               pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+               pr_err("Failed to allocate %lu pages (%lu total), err=%d\n",
                       nreal, obj->base.size / PAGE_SIZE, err);
                goto out;
        }
@@ -57,7 +57,7 @@ static int igt_gem_huge(void *arg)
        for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
                if (i915_gem_object_get_page(obj, n) !=
                    i915_gem_object_get_page(obj, n % nreal)) {
-                       pr_err("Page lookup mismatch at index %u [%u]\n",
+                       pr_err("Page lookup mismatch at index %lu [%lu]\n",
                               n, n % nreal);
                        err = -EINVAL;
                        goto out_unpin;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 702e5b89be22..dba58a3c3238 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -229,8 +229,9 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pread *args)
 {
        unsigned int needs_clflush;
-       unsigned int idx, offset;
        char __user *user_data;
+       unsigned long offset;
+       pgoff_t idx;
        u64 remain;
        int ret;
 
@@ -383,13 +384,17 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+       unsigned long remain, offset;
        intel_wakeref_t wakeref;
        struct drm_mm_node node;
        void __user *user_data;
        struct i915_vma *vma;
-       u64 remain, offset;
        int ret = 0;
 
+       if (overflows_type(args->size, remain) ||
+           overflows_type(args->offset, offset))
+               return -EINVAL;
+
        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 
        vma = i915_gem_gtt_prepare(obj, &node, false);
@@ -540,13 +545,17 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
        struct intel_runtime_pm *rpm = &i915->runtime_pm;
+       unsigned long remain, offset;
        intel_wakeref_t wakeref;
        struct drm_mm_node node;
        struct i915_vma *vma;
-       u64 remain, offset;
        void __user *user_data;
        int ret = 0;
 
+       if (overflows_type(args->size, remain) ||
+           overflows_type(args->offset, offset))
+               return -EINVAL;
+
        if (i915_gem_object_has_struct_page(obj)) {
                /*
                 * Avoid waking the device up if we can fallback, as
@@ -654,8 +663,9 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
 {
        unsigned int partial_cacheline_write;
        unsigned int needs_clflush;
-       unsigned int offset, idx;
        void __user *user_data;
+       unsigned long offset;
+       pgoff_t idx;
        u64 remain;
        int ret;
 
diff --git a/drivers/gpu/drm/i915/i915_utils.h 
b/drivers/gpu/drm/i915/i915_utils.h
index ea7648e3aa0e..b0b24f352fa9 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -441,4 +441,10 @@ static inline bool i915_run_as_guest(void)
 
 bool i915_vtd_active(struct drm_i915_private *i915);
 
+/* A poor man's -Wconversion: only allow variables of an exact type. */
+#define exact_type(T, n) \
+       BUILD_BUG_ON(!__builtin_constant_p(n) && 
!__builtin_types_compatible_p(T, typeof(n)))
+
+#define exactly_pgoff_t(n) exact_type(pgoff_t, n)
+
 #endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 0bffb70b3c5f..686aa0214e68 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -902,7 +902,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int 
offset,
             struct sg_table *st, struct scatterlist *sg)
 {
        unsigned int column, row;
-       unsigned int src_idx;
+       pgoff_t src_idx;
 
        for (column = 0; column < width; column++) {
                unsigned int left;
@@ -1008,7 +1008,7 @@ add_padding_pages(unsigned int count,
 
 static struct scatterlist *
 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
-                             unsigned int offset, unsigned int alignment_pad,
+                             unsigned long offset, unsigned int alignment_pad,
                              unsigned int width, unsigned int height,
                              unsigned int src_stride, unsigned int dst_stride,
                              struct sg_table *st, struct scatterlist *sg,
@@ -1067,7 +1067,7 @@ remap_tiled_color_plane_pages(struct drm_i915_gem_object 
*obj,
 
 static struct scatterlist *
 remap_contiguous_pages(struct drm_i915_gem_object *obj,
-                      unsigned int obj_offset,
+                      pgoff_t obj_offset,
                       unsigned int count,
                       struct sg_table *st, struct scatterlist *sg)
 {
@@ -1100,7 +1100,7 @@ remap_contiguous_pages(struct drm_i915_gem_object *obj,
 
 static struct scatterlist *
 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
-                              unsigned int obj_offset, unsigned int 
alignment_pad,
+                              pgoff_t obj_offset, unsigned int alignment_pad,
                               unsigned int size,
                               struct sg_table *st, struct scatterlist *sg,
                               unsigned int *gtt_offset)
-- 
2.34.1

Reply via email to