- transition from "ioctl" interfaces

Signed-off-by: Ben Skeggs <bske...@nvidia.com>
---
 .../gpu/drm/nouveau/include/nvif/driverif.h   |  8 ++++
 drivers/gpu/drm/nouveau/include/nvif/if000c.h | 12 ------
 drivers/gpu/drm/nouveau/include/nvif/vmm.h    |  8 ----
 drivers/gpu/drm/nouveau/nouveau_bo.c          | 20 +++++-----
 drivers/gpu/drm/nouveau/nvif/vmm.c            | 32 +--------------
 .../gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c    | 40 +++++--------------
 6 files changed, 29 insertions(+), 91 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/include/nvif/driverif.h 
b/drivers/gpu/drm/nouveau/include/nvif/driverif.h
index 8f1410ab8256..7540fe4d02c0 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driverif.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driverif.h
@@ -101,6 +101,14 @@ struct nvif_vmm_impl {
        u64 start;
        u64 limit;
        u8 page_nr;
+
+       struct {
+               u8 shift;
+               bool sparse;
+               bool vram;
+               bool host;
+               bool comp;
+       } page[8];
 };
 
 struct nvif_mmu_impl {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if000c.h 
b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
index c505c5de0088..f554062469c3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if000c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if000c.h
@@ -1,7 +1,6 @@
 #ifndef __NVIF_IF000C_H__
 #define __NVIF_IF000C_H__
 
-#define NVIF_VMM_V0_PAGE                                                   0x00
 #define NVIF_VMM_V0_GET                                                    0x01
 #define NVIF_VMM_V0_PUT                                                    0x02
 #define NVIF_VMM_V0_MAP                                                    0x03
@@ -11,17 +10,6 @@
 #define NVIF_VMM_V0_RAW                                                    0x07
 #define NVIF_VMM_V0_MTHD(i)                                         ((i) + 
0x80)
 
-struct nvif_vmm_page_v0 {
-       __u8  version;
-       __u8  index;
-       __u8  shift;
-       __u8  sparse;
-       __u8  vram;
-       __u8  host;
-       __u8  comp;
-       __u8  pad07[1];
-};
-
 struct nvif_vmm_get_v0 {
        __u8  version;
 #define NVIF_VMM_GET_V0_ADDR                                               0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/vmm.h 
b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
index c4b5d53d9bfd..3019ddc98782 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/vmm.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/vmm.h
@@ -20,14 +20,6 @@ struct nvif_vmm {
        const struct nvif_vmm_impl *impl;
        struct nvif_vmm_priv *priv;
        struct nvif_object object;
-
-       struct {
-               u8 shift;
-               bool sparse:1;
-               bool vram:1;
-               bool host:1;
-               bool comp:1;
-       } *page;
 };
 
 int nvif_vmm_ctor(struct nvif_mmu *, const char *name,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c 
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 29e00f8fdd12..ed81872e2dba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -273,21 +273,21 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int 
*align, u32 domain,
                         * Skip page sizes that can't support needed domains.
                         */
                        if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
-                           (domain & NOUVEAU_GEM_DOMAIN_VRAM) && 
!vmm->page[i].vram)
+                           (domain & NOUVEAU_GEM_DOMAIN_VRAM) && 
!vmm->impl->page[i].vram)
                                continue;
                        if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
-                           (!vmm->page[i].host || vmm->page[i].shift > 
PAGE_SHIFT))
+                           (!vmm->impl->page[i].host || 
vmm->impl->page[i].shift > PAGE_SHIFT))
                                continue;
 
                        /* Select this page size if it's the first that supports
                         * the potential memory domains, or when it's compatible
                         * with the requested compression settings.
                         */
-                       if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
+                       if (pi < 0 || !nvbo->comp || vmm->impl->page[i].comp)
                                pi = i;
 
                        /* Stop once the buffer is larger than the current page 
size. */
-                       if (*size >= 1ULL << vmm->page[i].shift)
+                       if (*size >= 1ULL << vmm->impl->page[i].shift)
                                break;
                }
 
@@ -297,12 +297,12 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int 
*align, u32 domain,
                }
 
                /* Disable compression if suitable settings couldn't be found. 
*/
-               if (nvbo->comp && !vmm->page[pi].comp) {
+               if (nvbo->comp && !vmm->impl->page[pi].comp) {
                        if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
                                nvbo->kind = mmu->impl->kind[nvbo->kind];
                        nvbo->comp = 0;
                }
-               nvbo->page = vmm->page[pi].shift;
+               nvbo->page = vmm->impl->page[pi].shift;
        } else {
                /* reject other tile flags when in VM mode. */
                if (tile_mode)
@@ -319,24 +319,24 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int 
*align, u32 domain,
                         *
                         * Skip page sizes that can't support needed domains.
                         */
-                       if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && 
!vmm->page[i].vram)
+                       if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && 
!vmm->impl->page[i].vram)
                                continue;
                        if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
-                           (!vmm->page[i].host || vmm->page[i].shift > 
PAGE_SHIFT))
+                           (!vmm->impl->page[i].host || 
vmm->impl->page[i].shift > PAGE_SHIFT))
                                continue;
 
                        /* pick the last one as it will be smallest. */
                        pi = i;
 
                        /* Stop once the buffer is larger than the current page 
size. */
-                       if (*size >= 1ULL << vmm->page[i].shift)
+                       if (*size >= 1ULL << vmm->impl->page[i].shift)
                                break;
                }
                if (WARN_ON(pi < 0)) {
                        kfree(nvbo);
                        return ERR_PTR(-EINVAL);
                }
-               nvbo->page = vmm->page[pi].shift;
+               nvbo->page = vmm->impl->page[pi].shift;
        }
 
        nouveau_bo_fixup_align(nvbo, align, size);
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c 
b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 35564e9c93ab..73c63bfd1e38 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -196,7 +196,6 @@ nvif_vmm_dtor(struct nvif_vmm *vmm)
        if (!vmm->impl)
                return;
 
-       kfree(vmm->page);
        vmm->impl->del(vmm->priv);
        vmm->impl = NULL;
 }
@@ -207,9 +206,7 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name,
              struct nvif_vmm *vmm)
 {
        const u32 oclass = mmu->impl->vmm.oclass;
-       int ret, i;
-
-       vmm->page = NULL;
+       int ret;
 
        ret = mmu->impl->vmm.new(mmu->priv, type, addr, size, argv, argc, 
&vmm->impl, &vmm->priv,
                                 nvif_handle(&vmm->object));
@@ -218,32 +215,5 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name,
                return ret;
 
        nvif_object_ctor(&mmu->object, name ?: "nvifVmm", 0, oclass, 
&vmm->object);
-
-       vmm->page = kmalloc_array(vmm->impl->page_nr, sizeof(*vmm->page),
-                                 GFP_KERNEL);
-       if (!vmm->page) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       for (i = 0; i < vmm->impl->page_nr; i++) {
-               struct nvif_vmm_page_v0 args = { .index = i };
-
-               ret = nvif_object_mthd(&vmm->object, NVIF_VMM_V0_PAGE,
-                                      &args, sizeof(args));
-               if (ret)
-                       break;
-
-               vmm->page[i].shift = args.shift;
-               vmm->page[i].sparse = args.sparse;
-               vmm->page[i].vram = args.vram;
-               vmm->page[i].host = args.host;
-               vmm->page[i].comp = args.comp;
-       }
-
-done:
-       if (ret)
-               nvif_vmm_dtor(vmm);
-
        return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c 
b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 28d491a03c59..15aa6a37060b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -307,34 +307,6 @@ nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 
argc)
        return ret;
 }
 
-static int
-nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
-{
-       union {
-               struct nvif_vmm_page_v0 v0;
-       } *args = argv;
-       const struct nvkm_vmm_page *page;
-       int ret = -ENOSYS;
-       u8 type, index, nr;
-
-       page = uvmm->vmm->func->page;
-       for (nr = 0; page[nr].shift; nr++);
-
-       if (!(nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
-               if ((index = args->v0.index) >= nr)
-                       return -EINVAL;
-               type = page[index].type;
-               args->v0.shift = page[index].shift;
-               args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
-               args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
-               args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
-               args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
-       } else
-               return -ENOSYS;
-
-       return 0;
-}
-
 static inline int
 nvkm_uvmm_page_index(struct nvif_vmm_priv *uvmm, u64 size, u8 shift, u8 *refd)
 {
@@ -502,7 +474,6 @@ nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void 
*argv, u32 argc)
 {
        struct nvif_vmm_priv *uvmm = container_of(object, typeof(*uvmm), 
object);
        switch (mthd) {
-       case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
        case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
        case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
        case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
@@ -595,8 +566,17 @@ nvkm_uvmm_new(struct nvkm_mmu *mmu, u8 type, u64 addr, u64 
size, void *argv, u32
        uvmm->impl.limit = uvmm->vmm->limit;
 
        page = uvmm->vmm->func->page;
-       while (page && (page++)->shift)
+       for (int i = 0; page->shift; i++, page++) {
+               if (WARN_ON(i >= ARRAY_SIZE(uvmm->impl.page)))
+                       break;
+
+               uvmm->impl.page[i].shift  = page->shift;
+               uvmm->impl.page[i].sparse = !!(page->type & 
NVKM_VMM_PAGE_SPARSE);
+               uvmm->impl.page[i].vram   = !!(page->type & NVKM_VMM_PAGE_VRAM);
+               uvmm->impl.page[i].host   = !!(page->type & NVKM_VMM_PAGE_HOST);
+               uvmm->impl.page[i].comp   = !!(page->type & NVKM_VMM_PAGE_COMP);
                uvmm->impl.page_nr++;
+       }
 
        *pimpl = &uvmm->impl;
        *ppriv = uvmm;
-- 
2.41.0

Reply via email to