Am 14.06.24 um 15:21 schrieb Thomas Zimmermann:
Add offset and size parameters to ttm_bo_vmap() to allow for partial
mappings of a buffer object. This brings the functionality on par with
ttm_bo_kmap().

Well the long term plan was to remove this functionality from ttm_bo_kmap() and nuke that function sooner or later.

What exactly is the use case for partial mappings?

Regards,
Christian.


Callers pass the byte offset and size within the buffer object and
receive a page-aligned mapping of the buffer object's memory for the
specified area.

Also update all callers of ttm_bo_vmap() for the new parameters. As
before, existing callers map the buffer object's complete memory.

Signed-off-by: Thomas Zimmermann <tzimmerm...@suse.de>
---
  drivers/gpu/drm/drm_gem_ttm_helper.c  |  2 +-
  drivers/gpu/drm/drm_gem_vram_helper.c |  2 +-
  drivers/gpu/drm/loongson/lsdc_gem.c   |  2 +-
  drivers/gpu/drm/qxl/qxl_object.c      |  2 +-
  drivers/gpu/drm/ttm/ttm_bo_util.c     | 21 +++++++++++++++------
  drivers/gpu/drm/xe/xe_lrc.c           |  2 +-
  drivers/gpu/drm/xe/xe_vm.c            |  2 +-
  include/drm/ttm/ttm_bo.h              |  4 +++-
  8 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c 
b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b5..f26b7c9077a68 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -67,7 +67,7 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
  {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
- return ttm_bo_vmap(bo, map);
+       return ttm_bo_vmap(bo, 0, gem->size, map);
  }
  EXPORT_SYMBOL(drm_gem_ttm_vmap);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 6027584406af6..1670f9a459a9d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -398,7 +398,7 @@ int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, 
struct iosys_map *map)
         * no mapping present.
         */
        if (iosys_map_is_null(&gbo->map)) {
-               ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+               ret = ttm_bo_vmap(&gbo->bo, 0, gbo->bo.base.size, &gbo->map);
                if (ret)
                        return ret;
        }
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c 
b/drivers/gpu/drm/loongson/lsdc_gem.c
index a720d8f532093..f709960c781b9 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -77,7 +77,7 @@ static int lsdc_gem_object_vmap(struct drm_gem_object *obj, 
struct iosys_map *ma
                return ret;
        }
- ret = ttm_bo_vmap(tbo, &lbo->map);
+       ret = ttm_bo_vmap(tbo, 0, tbo->base.size, &lbo->map);
        if (ret) {
                drm_err(obj->dev, "ttm bo vmap failed\n");
                lsdc_bo_unpin(lbo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 5893e27a7ae50..9f06d5e26a32c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -164,7 +164,7 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map 
*map)
                goto out;
        }
- r = ttm_bo_vmap(&bo->tbo, &bo->map);
+       r = ttm_bo_vmap(&bo->tbo, 0, bo->tbo.base.size, &bo->map);
        if (r) {
                qxl_bo_unpin_locked(bo);
                return r;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index a9df0deff2deb..31f9772f05dac 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -457,17 +457,23 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
   * ttm_bo_vmap
   *
   * @bo: The buffer object.
+ * @offset: Byte offset into the buffer.
+ * @size: Number of bytes to map.
   * @map: pointer to a struct iosys_map representing the map.
   *
   * Sets up a kernel virtual mapping, using ioremap or vmap to the
   * data in the buffer object. The parameter @map returns the virtual
   * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ * The address stored in @map will be aligned to the next lower page
+ * boundaries.
   *
   * Returns
   * -ENOMEM: Out of memory.
   * -EINVAL: Invalid range.
   */
-int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
+int ttm_bo_vmap(struct ttm_buffer_object *bo,
+               unsigned long offset, unsigned long size,
+               struct iosys_map *map)
  {
        struct ttm_resource *mem = bo->resource;
        int ret;
@@ -483,18 +489,18 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct 
iosys_map *map)
                u16 alloc_flags;
if (mem->bus.addr) {
-                       vaddr_iomem = (void __iomem *)mem->bus.addr;
+                       vaddr_iomem = (u8 __iomem *)mem->bus.addr + offset;
                        alloc_flags = ttm_bo_map_premapped;
                } else if (mem->bus.caching == ttm_write_combined) {
-                       vaddr_iomem = ioremap_wc(mem->bus.offset, 
bo->base.size);
+                       vaddr_iomem = ioremap_wc(mem->bus.offset + offset, 
size);
                        alloc_flags = ttm_bo_map_iomap;
  #ifdef CONFIG_X86
                } else if (mem->bus.caching == ttm_cached) {
-                       vaddr_iomem = ioremap_cache(mem->bus.offset, 
bo->base.size);
+                       vaddr_iomem = ioremap_cache(mem->bus.offset + offset, 
size);
                        alloc_flags = ttm_bo_map_iomap;
  #endif
                } else {
-                       vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
+                       vaddr_iomem = ioremap(mem->bus.offset + offset, size);
                        alloc_flags = ttm_bo_map_iomap;
                }
@@ -510,6 +516,9 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
                        .no_wait_gpu = false
                };
                struct ttm_tt *ttm = bo->ttm;
+               unsigned long start_page = offset >> PAGE_SHIFT;
+               unsigned long aligned_size = size + (offset - (start_page << 
PAGE_SHIFT));
+               unsigned long num_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
                pgprot_t prot;
                void *vaddr;
                u16 alloc_flags;
@@ -523,7 +532,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct 
iosys_map *map)
                 * or to make the buffer object look contiguous.
                 */
                prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
-               vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
+               vaddr = vmap(ttm->pages + start_page, num_pages, 0, prot);
                if (!vaddr)
                        return -ENOMEM;
                alloc_flags = ttm_bo_map_vmap;
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index c1bb85d2e243f..3a68fe6d592ed 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1595,7 +1595,7 @@ void xe_lrc_snapshot_capture_delayed(struct 
xe_lrc_snapshot *snapshot)
                goto put_bo;
xe_bo_lock(bo, false);
-       if (!ttm_bo_vmap(&bo->ttm, &src)) {
+       if (!ttm_bo_vmap(&bo->ttm, 0, bo->ttm.base.size, &src)) {
                xe_map_memcpy_from(xe_bo_device(bo),
                                   snapshot->lrc_snapshot, &src, 
snapshot->lrc_offset,
                                   snapshot->lrc_size);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 99bf7412475c0..81306c32f5d09 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -3485,7 +3485,7 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot 
*snap)
if (bo) {
                        xe_bo_lock(bo, false);
-                       err = ttm_bo_vmap(&bo->ttm, &src);
+                       err = ttm_bo_vmap(&bo->ttm, 0, bo->ttm.base.size, &src);
                        if (!err) {
                                xe_map_memcpy_from(xe_bo_device(bo),
                                                   snap->snap[i].data,
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 6ccf96c91f3ae..c421ffe3563b1 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -375,7 +375,9 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct 
ttm_buffer_object *bo,
  int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
                unsigned long num_pages, struct ttm_bo_kmap_obj *map);
  void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
-int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
+int ttm_bo_vmap(struct ttm_buffer_object *bo,
+               unsigned long offset, unsigned long size,
+               struct iosys_map *map);
  void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
  int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
  int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx 
*ctx,

Reply via email to