Module Name: src
Committed By: riastradh
Date: Sun Dec 19 11:33:31 UTC 2021
Modified Files:
src/sys/external/bsd/drm2/dist/drm: drm_prime.c
src/sys/external/bsd/drm2/dist/drm/i915: i915_gem_gtt.c i915_gem_gtt.h
i915_scatterlist.h i915_vma.h i915_vma_types.h
src/sys/external/bsd/drm2/dist/drm/i915/gem: i915_gem_clflush.c
i915_gem_dmabuf.c i915_gem_execbuffer.c i915_gem_internal.c
i915_gem_mman.c i915_gem_mman.h i915_gem_object.c i915_gem_object.h
i915_gem_object_types.h i915_gem_pages.c i915_gem_phys.c
i915_gem_pm.c i915_gem_shmem.c i915_gem_stolen.c
src/sys/external/bsd/drm2/dist/drm/i915/gt: intel_gtt.h
src/sys/external/bsd/drm2/dist/include/drm: drm_cache.h drm_prime.h
src/sys/external/bsd/drm2/drm: drm_cache.c
src/sys/external/bsd/drm2/include/asm: uaccess.h
src/sys/external/bsd/drm2/include/drm: bus_dma_hacks.h
src/sys/external/bsd/drm2/include/linux: bitmap.h dma-buf.h
dma-mapping.h mutex.h radix-tree.h scatterlist.h
src/sys/external/bsd/drm2/linux: files.drmkms_linux linux_dma_buf.c
Added Files:
src/sys/external/bsd/drm2/linux: linux_sg.c
Log Message:
drm: Another pass over i915 and some supporting logic.
This makes a shim around sg_table, which essentially represents two
things:
1. an array of pages (roughly corresponding to bus_dma_segment_t[])
2. an array of DMA addresses stored in a bus_dmamap_t
Both parts are optional; different parts of i915 use sg_tables to
pass around one or both of the two parts. This helps to reduce the
ifdefs by quite a bit, although it's not always clear which part of
an sg_table any particular interface is actually using which is why I
was reluctant to do this before.
To generate a diff of this commit:
cvs rdiff -u -r1.15 -r1.16 src/sys/external/bsd/drm2/dist/drm/drm_prime.c
cvs rdiff -u -r1.22 -r1.23 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c
cvs rdiff -u -r1.10 -r1.11 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h
cvs rdiff -u -r1.5 -r1.6 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h \
src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h
cvs rdiff -u -r1.3 -r1.4 \
src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h
cvs rdiff -u -r1.4 -r1.5 \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
cvs rdiff -u -r1.5 -r1.6 \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h
cvs rdiff -u -r1.3 -r1.4 \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
cvs rdiff -u -r1.6 -r1.7 \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c
cvs rdiff -u -r1.2 -r1.3 \
src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c
cvs rdiff -u -r1.10 -r1.11 \
src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h
cvs rdiff -u -r1.9 -r1.10 \
src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h
cvs rdiff -u -r1.6 -r1.7 \
src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h
cvs rdiff -u -r1.17 -r1.18 src/sys/external/bsd/drm2/drm/drm_cache.c
cvs rdiff -u -r1.9 -r1.10 src/sys/external/bsd/drm2/include/asm/uaccess.h
cvs rdiff -u -r1.21 -r1.22 \
src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h
cvs rdiff -u -r1.11 -r1.12 src/sys/external/bsd/drm2/include/linux/bitmap.h
cvs rdiff -u -r1.10 -r1.11 src/sys/external/bsd/drm2/include/linux/dma-buf.h
cvs rdiff -u -r1.8 -r1.9 \
src/sys/external/bsd/drm2/include/linux/dma-mapping.h
cvs rdiff -u -r1.16 -r1.17 src/sys/external/bsd/drm2/include/linux/mutex.h
cvs rdiff -u -r1.4 -r1.5 src/sys/external/bsd/drm2/include/linux/radix-tree.h
cvs rdiff -u -r1.3 -r1.4 \
src/sys/external/bsd/drm2/include/linux/scatterlist.h
cvs rdiff -u -r1.31 -r1.32 src/sys/external/bsd/drm2/linux/files.drmkms_linux
cvs rdiff -u -r1.9 -r1.10 src/sys/external/bsd/drm2/linux/linux_dma_buf.c
cvs rdiff -u -r0 -r1.1 src/sys/external/bsd/drm2/linux/linux_sg.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/external/bsd/drm2/dist/drm/drm_prime.c
diff -u src/sys/external/bsd/drm2/dist/drm/drm_prime.c:1.15 src/sys/external/bsd/drm2/dist/drm/drm_prime.c:1.16
--- src/sys/external/bsd/drm2/dist/drm/drm_prime.c:1.15 Sun Dec 19 11:32:53 2021
+++ src/sys/external/bsd/drm2/dist/drm/drm_prime.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: drm_prime.c,v 1.15 2021/12/19 11:32:53 riastradh Exp $ */
+/* $NetBSD: drm_prime.c,v 1.16 2021/12/19 11:33:30 riastradh Exp $ */
/*
* Copyright © 2012 Red Hat
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.15 2021/12/19 11:32:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: drm_prime.c,v 1.16 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/export.h>
#include <linux/dma-buf.h>
@@ -52,72 +52,6 @@ __KERNEL_RCSID(0, "$NetBSD: drm_prime.c,
#include <linux/nbsd-namespace.h>
-/*
- * We use struct sg_table just to pass around an array of pages from
- * one device to another in drm prime. Since this is _not_ a complete
- * implementation of Linux's sg table abstraction (e.g., it does not
- * remember DMA addresses and RAM pages separately, and it doesn't
- * support the nested chained iteration of Linux scatterlists), we
- * isolate it to this file and make all callers go through a few extra
- * subroutines (drm_prime_sg_size, drm_prime_sg_free, &c.) to use it.
- * Don't use this outside drm prime!
- */
-
-struct sg_table {
- paddr_t *sgt_pgs;
- unsigned sgt_npgs;
-};
-
-static int
-sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned npages, bus_size_t offset, bus_size_t size, gfp_t gfp)
-{
- unsigned i;
-
- KASSERT(offset == 0);
- KASSERT(size == npages << PAGE_SHIFT);
-
- sgt->sgt_pgs = kcalloc(npages, sizeof(sgt->sgt_pgs[0]), gfp);
- if (sgt->sgt_pgs == NULL)
- return -ENOMEM;
- sgt->sgt_npgs = npages;
-
- for (i = 0; i < npages; i++)
- sgt->sgt_pgs[i] = VM_PAGE_TO_PHYS(&pages[i]->p_vmp);
-
- return 0;
-}
-
-static int
-sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
- const bus_dma_segment_t *segs, int nsegs, gfp_t gfp)
-{
- int ret;
-
- KASSERT(nsegs > 0);
- sgt->sgt_pgs = kcalloc(nsegs, sizeof(sgt->sgt_pgs[0]), gfp);
- if (sgt->sgt_pgs == NULL)
- return -ENOMEM;
- sgt->sgt_npgs = nsegs;
-
- /* XXX errno NetBSD->Linux */
- ret = -bus_dmamem_export_pages(dmat, segs, nsegs, sgt->sgt_pgs,
- sgt->sgt_npgs);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void
-sg_free_table(struct sg_table *sgt)
-{
-
- kfree(sgt->sgt_pgs);
- sgt->sgt_pgs = NULL;
- sgt->sgt_npgs = 0;
-}
-
#endif /* __NetBSD__ */
/**
@@ -827,14 +761,12 @@ struct sg_table *drm_gem_map_dma_buf(str
else
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
-#ifndef __NetBSD__ /* We map/unmap elsewhere. */
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC)) {
sg_free_table(sgt);
kfree(sgt);
sgt = ERR_PTR(-ENOMEM);
}
-#endif
return sgt;
}
@@ -855,10 +787,8 @@ void drm_gem_unmap_dma_buf(struct dma_bu
if (!sgt)
return;
-#ifndef __NetBSD__ /* We map/unmap elsewhere. */
dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
DMA_ATTR_SKIP_CPU_SYNC);
-#endif
sg_free_table(sgt);
kfree(sgt);
}
@@ -1111,9 +1041,15 @@ EXPORT_SYMBOL(drm_gem_prime_export);
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
*/
+#ifdef __NetBSD__
+struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
+ struct dma_buf *dma_buf,
+ bus_dma_tag_t attach_dev)
+#else
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev)
+#endif
{
struct dma_buf_attachment *attach;
struct sg_table *sgt;
@@ -1184,7 +1120,11 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+#ifdef __NetBSD__
+ return drm_gem_prime_import_dev(dev, dma_buf, dev->dmat);
+#else
return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
+#endif
}
EXPORT_SYMBOL(drm_gem_prime_import);
@@ -1218,7 +1158,7 @@ bus_size_t
drm_prime_sg_size(struct sg_table *sg)
{
- return sg->sgt_npgs << PAGE_SHIFT;
+ return sg->sgl->sg_npgs << PAGE_SHIFT;
}
void
@@ -1235,8 +1175,8 @@ drm_prime_sg_to_bus_dmamem(bus_dma_tag_t
{
/* XXX errno NetBSD->Linux */
- return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs, sgt->sgt_pgs,
- sgt->sgt_npgs);
+ return -bus_dmamem_import_pages(dmat, segs, nsegs, rsegs,
+ sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
}
int
@@ -1245,10 +1185,10 @@ drm_prime_bus_dmamap_load_sgt(bus_dma_ta
{
bus_dma_segment_t *segs;
bus_size_t size = drm_prime_sg_size(sgt);
- int nsegs = sgt->sgt_npgs;
+ int nsegs = sgt->sgl->sg_npgs;
int ret;
- segs = kcalloc(sgt->sgt_npgs, sizeof(segs[0]), GFP_KERNEL);
+ segs = kcalloc(sgt->sgl->sg_npgs, sizeof(segs[0]), GFP_KERNEL);
if (segs == NULL) {
ret = -ENOMEM;
goto out0;
@@ -1257,7 +1197,7 @@ drm_prime_bus_dmamap_load_sgt(bus_dma_ta
ret = drm_prime_sg_to_bus_dmamem(dmat, segs, nsegs, &nsegs, sgt);
if (ret)
goto out1;
- KASSERT(nsegs <= sgt->sgt_npgs);
+ KASSERT(nsegs <= sgt->sgl->sg_npgs);
/* XXX errno NetBSD->Linux */
ret = -bus_dmamap_load_raw(dmat, map, segs, nsegs, size,
@@ -1274,8 +1214,9 @@ drm_prime_sg_importable(bus_dma_tag_t dm
{
unsigned i;
- for (i = 0; i < sgt->sgt_npgs; i++) {
- if (bus_dmatag_bounces_paddr(dmat, sgt->sgt_pgs[i]))
+ for (i = 0; i < sgt->sgl->sg_npgs; i++) {
+ if (bus_dmatag_bounces_paddr(dmat,
+ VM_PAGE_TO_PHYS(&sgt->sgl->sg_pgs[i]->p_vmp)))
return false;
}
return true;
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.22 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.23
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c:1.22 Sun Dec 19 11:32:54 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_gtt.c,v 1.22 2021/12/19 11:32:54 riastradh Exp $ */
+/* $NetBSD: i915_gem_gtt.c,v 1.23 2021/12/19 11:33:30 riastradh Exp $ */
// SPDX-License-Identifier: MIT
/*
@@ -6,7 +6,7 @@
* Copyright © 2020 Intel Corporation
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.22 2021/12/19 11:32:54 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt.c,v 1.23 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/slab.h> /* fault-inject.h is not standalone! */
@@ -41,23 +41,15 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_gtt
#define _PAGE_PAT PTE_PAT /* 0x80 page attribute table on PTE */
#endif
-#ifdef __NetBSD__
-int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
- bus_dmamap_t pages)
-#else
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
-#endif
{
do {
#ifdef __NetBSD__
- /*
- * XXX Not sure whether caller should be passing DMA
- * map or page list.
- */
- if (bus_dmamap_load_pages(obj->base.dev->dmat, pages,
- obj->mm.pagearray, obj->base.size, BUS_DMA_NOWAIT)
- == 0)
+ if (dma_map_sg_attrs(obj->base.dev->dmat,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_NO_WARN))
return 0;
#else
if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
@@ -83,17 +75,12 @@ int i915_gem_gtt_prepare_pages(struct dr
return -ENOSPC;
}
-#ifdef __NetBSD__
-void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
- bus_dmamap_t pages)
-#else
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
-#endif
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
#ifdef __NetBSD__
- bus_dma_tag_t dmat = dev_priv->drm.dmat;
+ bus_dma_tag_t kdev = dev_priv->drm.dmat;
#else
struct device *kdev = &dev_priv->drm.pdev->dev;
#endif
@@ -109,11 +96,7 @@ void i915_gem_gtt_finish_pages(struct dr
}
}
-#ifdef __NetBSD__
- bus_dmamap_unload(dmat, pages);
-#else
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
-#endif
}
/**
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h:1.10 src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h:1.11
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h:1.10 Sun Dec 19 11:31:40 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_gem_gtt.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_gtt.h,v 1.10 2021/12/19 11:31:40 riastradh Exp $ */
+/* $NetBSD: i915_gem_gtt.h,v 1.11 2021/12/19 11:33:30 riastradh Exp $ */
/* SPDX-License-Identifier: MIT */
/*
@@ -19,17 +19,10 @@
struct drm_i915_gem_object;
struct i915_address_space;
-#ifdef __NetBSD__
-int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
- bus_dmamap_t pages);
-void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
- bus_dmamap_t pages);
-#else
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-#endif
int i915_gem_gtt_reserve(struct i915_address_space *vm,
struct drm_mm_node *node,
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h:1.5 src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h:1.6
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h:1.5 Sun Dec 19 11:11:35 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_scatterlist.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_scatterlist.h,v 1.5 2021/12/19 11:11:35 riastradh Exp $ */
+/* $NetBSD: i915_scatterlist.h,v 1.6 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -15,7 +15,32 @@
#include "i915_gem.h"
-#ifdef __linux__
+#ifdef __NetBSD__
+
+struct sgt_iter {
+ unsigned i;
+};
+
+#define for_each_sgt_page(pp, iter, sgt) \
+ for ((iter)->i = 0; \
+ ((iter)->i < (sgt)->sgt_npgs \
+ ? ((pp) = (sgt)->sgt_pgs[(iter)->i], 1) \
+ : 0); \
+ (iter)->i++)
+
+static inline unsigned
+i915_sg_page_sizes(struct scatterlist *sg)
+{
+ unsigned i, page_sizes = 0;
+
+ for (i = 0; i < sg->sg_dmamap->dm_nsegs; i++)
+ page_sizes |= sg->sg_dmamap->dm_segs[i].ds_len;
+
+ return page_sizes;
+}
+
+#else
+
/*
* Optimised SGL iterator for GEM objects
*/
@@ -124,6 +149,7 @@ static inline unsigned int i915_sg_segme
return size;
}
+
#endif
bool i915_sg_trim(struct sg_table *orig_st);
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h:1.5 src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h:1.6
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h:1.5 Sun Dec 19 11:12:06 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_vma.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_vma.h,v 1.5 2021/12/19 11:12:06 riastradh Exp $ */
+/* $NetBSD: i915_vma.h,v 1.6 2021/12/19 11:33:30 riastradh Exp $ */
/*
* Copyright © 2016 Intel Corporation
@@ -62,19 +62,21 @@ int __must_check i915_vma_move_to_active
#ifdef __linux__
#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
+#define __i915_vma_flags_const(v) ((const unsigned long *)&(v)->flags.counter)
#else
#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags)
+#define __i915_vma_flags_const(v) ((const unsigned long *)&(v)->flags)
#endif
-static inline bool i915_vma_is_ggtt(struct i915_vma *vma)
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
+ return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags_const(vma));
}
-static inline bool i915_vma_has_ggtt_write(struct i915_vma *vma)
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma));
+ return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags_const(vma));
}
static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
@@ -91,9 +93,9 @@ static inline bool i915_vma_unset_ggtt_w
void i915_vma_flush_writes(struct i915_vma *vma);
-static inline bool i915_vma_is_map_and_fenceable(struct i915_vma *vma)
+static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
+ return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags_const(vma));
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
@@ -107,17 +109,17 @@ static inline void i915_vma_unset_userfa
return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
}
-static inline bool i915_vma_has_userfault(struct i915_vma *vma)
+static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
- return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma));
+ return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags_const(vma));
}
-static inline bool i915_vma_is_closed(struct i915_vma *vma)
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
{
return !list_empty(&vma->closed_link);
}
-static inline u32 i915_ggtt_offset(struct i915_vma *vma)
+static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -126,7 +128,7 @@ static inline u32 i915_ggtt_offset(struc
return lower_32_bits(vma->node.start);
}
-static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
+static inline u32 i915_ggtt_pin_bias(const struct i915_vma *vma)
{
return i915_vm_to_ggtt(vma->vm)->pin_bias;
}
@@ -322,9 +324,7 @@ static inline struct page *i915_vma_firs
{
GEM_BUG_ON(!vma->pages);
#ifdef __NetBSD__
- GEM_BUG_ON(!vma->segs);
- return container_of(PHYS_TO_VM_PAGE(vma->segs[0].ds_addr), struct page,
- p_vmp);
+ return vma->pages->sgl->sg_pgs[0];
#else
return sg_page(vma->pages->sgl);
#endif
Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h:1.3 src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h:1.3 Sun Dec 19 01:24:26 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/i915_vma_types.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_vma_types.h,v 1.3 2021/12/19 01:24:26 riastradh Exp $ */
+/* $NetBSD: i915_vma_types.h,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/* SPDX-License-Identifier: MIT */
/*
@@ -183,13 +183,7 @@ struct i915_vma {
struct drm_i915_gem_object *obj;
struct dma_resv *resv; /** Alias of obj->resv */
-#ifdef __NetBSD__
- bus_dma_segment_t *segs;
- int nsegs;
- bus_dmamap_t pages;
-#else
struct sg_table *pages;
-#endif
void __iomem *iomap;
void *private; /* owned by creator */
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c:1.4 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c:1.5
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c:1.4 Sun Dec 19 11:32:53 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_clflush.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_clflush.c,v 1.4 2021/12/19 11:32:53 riastradh Exp $ */
+/* $NetBSD: i915_gem_clflush.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_clflush.c,v 1.4 2021/12/19 11:32:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_clflush.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $");
#include "display/intel_frontbuffer.h"
@@ -24,11 +24,7 @@ struct clflush {
static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-#ifdef __NetBSD__
- drm_clflush_pages(obj->mm.pagearray, obj->base.size >> PAGE_SHIFT);
-#else
drm_clflush_sg(obj->mm.pages);
-#endif
i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
}
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h:1.4 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h:1.5
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h:1.4 Sun Dec 19 11:10:56 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_object.h,v 1.4 2021/12/19 11:10:56 riastradh Exp $ */
+/* $NetBSD: i915_gem_object.h,v 1.5 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -37,11 +37,7 @@ i915_gem_object_create_shmem_from_data(s
extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
-#ifdef __NetBSD__
- bus_dmamap_t pages,
-#else
struct sg_table *pages,
-#endif
bool needs_clflush);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
@@ -280,13 +276,8 @@ i915_gem_object_get_dma_address(struct d
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-#ifdef __NetBSD__
- bus_dmamap_t pages
-#else
struct sg_table *pages,
- unsigned int sg_page_sizes
-#endif
- );
+ unsigned int sg_page_sizes);
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.4 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.5
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.4 Sun Dec 19 01:34:08 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_pages.c,v 1.4 2021/12/19 01:34:08 riastradh Exp $ */
+/* $NetBSD: i915_gem_pages.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_pages.c,v 1.4 2021/12/19 01:34:08 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_pages.c,v 1.5 2021/12/19 11:33:30 riastradh Exp $");
#include "i915_drv.h"
#include "i915_gem_object.h"
@@ -15,14 +15,14 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_pag
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
-void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
#ifdef __NetBSD__
- bus_dmamap_t pages,
-#else
- struct sg_table *pages,
- unsigned int sg_page_sizes
+#include <sys/param.h>
+#include <uvm/uvm_extern.h>
#endif
- )
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -55,10 +55,8 @@ void __i915_gem_object_set_pages(struct
obj->mm.quirked = true;
}
-#ifndef __NetBSD__
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
-#endif
/*
* Calculate the supported page-sizes which fit into the given
@@ -172,10 +170,14 @@ static void __i915_gem_object_reset_page
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
+#ifdef __NetBSD__
+ uvm_km_free(kernel_map, (vaddr_t)ptr, obj->base.size, UVM_KMF_VAONLY);
+#else
if (is_vmalloc_addr(ptr))
vunmap(ptr);
else
kunmap(kmap_to_page(ptr));
+#endif
}
struct sg_table *
@@ -248,17 +250,67 @@ unlock:
return err;
}
+#ifndef __NetBSD__
static inline pte_t iomap_pte(resource_size_t base,
dma_addr_t offset,
pgprot_t prot)
{
return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
}
+#endif
/* The 'mapping' part of i915_gem_object_pin_map() below */
static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
+#ifdef __NetBSD__
+ vaddr_t va;
+ struct page *page;
+ paddr_t pa;
+ unsigned i;
+ int kmflags = UVM_KMF_VAONLY|UVM_KMF_WAITVA;
+ int prot = VM_PROT_READ|VM_PROT_WRITE;
+ int flags = 0;
+
+ /*
+ * XXX Be nice if we had bus_dmamem segments so we could use
+ * bus_dmamem_map, but we don't so we can't.
+ */
+
+ /* Verify the object is reasonable to map. */
+ /* XXX sync with below */
+ if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
+ return NULL;
+
+ /* Incorporate mapping type into pmap flags. */
+ switch (type) {
+ case I915_MAP_WC:
+ flags |= PMAP_WRITE_COMBINE;
+ break;
+ case I915_MAP_WB:
+ default:
+ break;
+ }
+
+ /* Allow failure if >1 page. */
+ if (obj->base.size > PAGE_SIZE)
+ kmflags |= UVM_KMF_CANFAIL;
+
+ /* Allocate a contiguous chunk of KVA. */
+ va = uvm_km_alloc(kernel_map, obj->base.size, PAGE_SIZE, kmflags);
+ if (va == 0)
+ return NULL;
+
+ /* Wire the KVA to the right physical addresses. */
+ for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
+ page = obj->mm.pages->sgl->sg_pgs[i];
+ pa = VM_PAGE_TO_PHYS(&page->p_vmp);
+ pmap_kenter_pa(va + i*PAGE_SIZE, pa, prot, flags);
+ }
+ pmap_update(pmap_kernel());
+
+ return (void *)va;
+#else
unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->mm.pages;
pte_t *stack[32], **mem;
@@ -323,6 +375,7 @@ static void *i915_gem_object_map(struct
kvfree(mem);
return area->addr;
+#endif
}
/* get, pin, and map the pages of the object into kernel space */
@@ -540,28 +593,9 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
#ifdef __NetBSD__
- struct vm_page *page;
-
- if (obj->phys_handle) {
- vaddr_t va = (vaddr_t)obj->phys_handle->vaddr;
- paddr_t pa;
- if (!pmap_extract(pmap_kernel(), va + n*PAGE_SIZE, &pa))
- panic("i915 gem object phys-attached but not mapped:"
- " obj=%p pgno=%d va=%p", obj, n,
- obj->phys_handle->vaddr);
- page = PHYS_TO_VM_PAGE(pa);
- } else {
- /*
- * Pages must be pinned so that we need not hold the
- * lock to prevent them from disappearing.
- */
- KASSERT(obj->mm.pages != NULL);
- mutex_enter(obj->base.filp->vmobjlock);
- page = uvm_pagelookup(obj->base.filp, ptoa(n));
- mutex_exit(obj->base.filp->vmobjlock);
- }
- KASSERT(page != NULL);
- return container_of(page, struct page, p_vmp);
+ GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
+ KASSERT(n < obj->mm.pages->sgl->sg_npgs);
+ return obj->mm.pages->sgl->sg_pgs[n];
#else
struct scatterlist *sg;
unsigned int offset;
@@ -593,13 +627,16 @@ i915_gem_object_get_dma_address_len(stru
unsigned int *len)
{
#ifdef __NetBSD__
+ bus_dmamap_t map = obj->mm.pages->sgl->sg_dmamap;
bus_addr_t poff = (bus_addr_t)n << PAGE_SHIFT;
unsigned seg;
- for (seg = 0; seg < obj->mm.pages->dm_nsegs; seg++) {
- if (poff <= obj->mm.pages->dm_segs[seg].ds_len)
- return obj->mm.pages->dm_segs[seg].ds_addr + poff;
- poff -= obj->mm.pages->dm_segs[seg].ds_len;
+ for (seg = 0; seg < map->dm_nsegs; seg++) {
+ if (poff < map->dm_segs[seg].ds_len) {
+ *len = map->dm_segs[seg].ds_len - poff;
+ return map->dm_segs[seg].ds_addr + poff;
+ }
+ poff -= map->dm_segs[seg].ds_len;
}
KASSERT(0);
return 0;
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c:1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c:1.6
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c:1.5 Sun Dec 19 11:32:53 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_dmabuf.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_dmabuf.c,v 1.5 2021/12/19 11:32:53 riastradh Exp $ */
+/* $NetBSD: i915_gem_dmabuf.c,v 1.6 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_dmabuf.c,v 1.5 2021/12/19 11:32:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_dmabuf.c,v 1.6 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/dma-buf.h>
#include <linux/highmem.h>
@@ -29,23 +29,13 @@ static struct sg_table *i915_gem_map_dma
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
-#ifdef __NetBSD__
- int ret;
-#else
struct scatterlist *src, *dst;
int ret, i;
-#endif
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
-#ifdef __NetBSD__
- st = drm_prime_pages_to_sg(obj->mm.pagearray,
- obj->base.size >> PAGE_SHIFT);
- if (IS_ERR(st))
- goto err_unpin_pages;
-#else
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
@@ -57,6 +47,14 @@ static struct sg_table *i915_gem_map_dma
if (ret)
goto err_free;
+#ifdef __NetBSD__
+ __USE(i);
+ __USE(src);
+ __USE(dst);
+ memcpy(st->sgl->sg_pgs, obj->mm.pages->sgl->sg_pgs,
+ obj->mm.pages->nents * sizeof(st->sgl->sg_pgs[0]));
+#else
+
src = obj->mm.pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->mm.pages->nents; i++) {
@@ -64,21 +62,19 @@ static struct sg_table *i915_gem_map_dma
dst = sg_next(dst);
src = sg_next(src);
}
+#endif
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret = -ENOMEM;
goto err_free_sg;
}
-#endif
return st;
-#ifndef __NetBSD__
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
-#endif
err_unpin_pages:
i915_gem_object_unpin_pages(obj);
err:
@@ -91,13 +87,9 @@ static void i915_gem_unmap_dma_buf(struc
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-#ifdef __NetBSD__
- drm_prime_sg_free(sg);
-#else
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
-#endif
i915_gem_object_unpin_pages(obj);
}
@@ -282,7 +274,7 @@ struct drm_gem_object *i915_gem_prime_im
}
/* need to attach */
- attach = dma_buf_attach(dma_buf, dev->dev);
+ attach = dma_buf_attach(dma_buf, dev->dmat);
if (IS_ERR(attach))
return ERR_CAST(attach);
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h:1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h:1.6
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h:1.5 Sun Dec 19 11:19:40 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_mman.h,v 1.5 2021/12/19 11:19:40 riastradh Exp $ */
+/* $NetBSD: i915_gem_mman.h,v 1.6 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -21,6 +21,7 @@ struct mutex;
int i915_gem_mmap_gtt_version(void);
#ifdef __NetBSD__
+const struct uvm_pagerops *const i915_gem_uvm_ops;
int i915_gem_mmap_object(struct drm_device *, off_t, size_t, int,
struct uvm_object **, voff_t *, struct file *);
#else
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h:1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h:1.6
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h:1.5 Sun Dec 19 11:32:53 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object_types.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_object_types.h,v 1.5 2021/12/19 11:32:53 riastradh Exp $ */
+/* $NetBSD: i915_gem_object_types.h,v 1.6 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -214,15 +214,21 @@ struct drm_i915_gem_object {
struct list_head region_link;
#ifdef __NetBSD__
- struct page **pagearray;/* wired pages of normal objects */
- struct sg_table *sg; /* drm prime objects */
- bus_dma_segment_t *segs;/* internal objects */
- unsigned nsegs;
- int rsegs;
- bus_dmamap_t pages; /* expedient misnomer */
-#else
- struct sg_table *pages;
+ /* internal objects */
+ union {
+ struct {
+ bus_dma_segment_t *segs;
+ int nsegs;
+ int rsegs;
+ } internal;
+ struct {
+ bus_dma_segment_t seg;
+ void *kva;
+ } phys;
+ } u;
#endif
+
+ struct sg_table *pages;
void *mapping;
struct i915_page_sizes {
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c:1.3 Sun Dec 19 01:39:57 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_execbuffer.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_execbuffer.c,v 1.3 2021/12/19 01:39:57 riastradh Exp $ */
+/* $NetBSD: i915_gem_execbuffer.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_execbuffer.c,v 1.3 2021/12/19 01:39:57 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_execbuffer.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/intel-iommu.h>
#include <linux/dma-resv.h>
@@ -17,6 +17,10 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_exe
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
+#ifdef __NetBSD__
+#include <sys/filedesc.h>
+#endif
+
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_ioctls.h"
@@ -2667,7 +2671,7 @@ i915_gem_do_execbuffer(struct drm_device
#ifdef __NetBSD__
err = -fd_allocfile(&fp, &out_fence_fd);
if (err)
- goto err_in_fence;
+ goto err_exec_fence;
#else
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
@@ -2817,7 +2821,11 @@ err_request:
if (out_fence) {
if (err == 0) {
+#ifdef __NetBSD__
+ fd_affix(curproc, fp, out_fence_fd);
+#else
fd_install(out_fence_fd, out_fence->file);
+#endif
args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
args->rsvd2 |= (u64)out_fence_fd << 32;
out_fence_fd = -1;
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c:1.3 Sun Dec 19 01:38:51 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_internal.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $ */
+/* $NetBSD: i915_gem_internal.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.3 2021/12/19 01:38:51 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -44,55 +44,80 @@ static int i915_gem_object_get_pages_int
struct drm_i915_private *i915 = to_i915(obj->base.dev);
#ifdef __NetBSD__
bus_dma_tag_t dmat = i915->drm.dmat;
- bus_dmamap_t map;
+ struct sg_table *sgt = NULL;
size_t nsegs;
- unsigned sizes, seg;
+ bool alloced = false, prepared = false;
int ret;
- KASSERT(obj->mm.segs == NULL);
- nsegs = obj->mm.nsegs = obj->base.size/PAGE_SIZE;
- if (nsegs > UINT_MAX || nsegs > SIZE_MAX/sizeof(obj->mm.segs[0]))
- return -ENOMEM;
- obj->mm.segs = kmem_alloc(nsegs * sizeof(obj->mm.segs[0]), KM_NOSLEEP);
- if (obj->mm.segs == NULL)
- return -ENOMEM;
+ obj->mm.u.internal.rsegs = obj->mm.u.internal.nsegs = 0;
- /* XXX errno NetBSD->Linux */
- ret = -bus_dmamem_alloc(dmat, obj->base.size, PAGE_SIZE, 0,
- obj->mm.segs, nsegs, &obj->mm.rsegs, BUS_DMA_NOWAIT);
- if (ret)
- goto out0;
+ KASSERT(obj->mm.u.internal.segs == NULL);
+ nsegs = obj->base.size >> PAGE_SHIFT;
+ if (nsegs > INT_MAX ||
+ nsegs > SIZE_MAX/sizeof(obj->mm.u.internal.segs[0])) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ obj->mm.u.internal.segs = kmem_alloc(
+ nsegs * sizeof(obj->mm.u.internal.segs[0]),
+ KM_NOSLEEP);
+ if (obj->mm.u.internal.segs == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ obj->mm.u.internal.nsegs = nsegs;
/* XXX errno NetBSD->Linux */
- ret = -bus_dmamap_create(dmat, obj->base.size, obj->mm.rsegs,
- obj->base.size, 0, BUS_DMA_NOWAIT, &map);
+ ret = -bus_dmamem_alloc(dmat, obj->base.size, PAGE_SIZE, 0,
+ obj->mm.u.internal.segs, nsegs, &obj->mm.u.internal.rsegs,
+ BUS_DMA_NOWAIT);
if (ret)
- goto out1;
+ goto out;
- /* XXX errno NetBSD->Linux */
- ret = -bus_dmamap_load_raw(dmat, map, obj->mm.segs, obj->mm.rsegs,
- obj->base.size, BUS_DMA_NOWAIT);
- if (ret)
- goto out2;
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (sgt == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (sg_alloc_table_from_bus_dmamem(sgt, dmat, obj->mm.u.internal.segs,
+ obj->mm.u.internal.rsegs, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ alloced = true;
- ret = i915_gem_gtt_prepare_pages(obj, map);
+ ret = i915_gem_gtt_prepare_pages(obj, sgt);
if (ret)
- goto out3;
-
- for (sizes = 0, seg = 0; seg < map->dm_nsegs; seg++)
- sizes |= map->dm_segs[seg].ds_len;
+ goto out;
+ prepared = true;
obj->mm.madv = I915_MADV_DONTNEED;
- __i915_gem_object_set_pages(obj, map, sizes);
+ __i915_gem_object_set_pages(obj, sgt, i915_sg_page_sizes(sgt->sgl));
return 0;
-out4: __unused
- i915_gem_gtt_finish_pages(obj, map);
-out3: bus_dmamap_unload(dmat, obj->mm.pages);
-out2: bus_dmamap_destroy(dmat, obj->mm.pages);
-out1: bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs);
-out0: kmem_free(obj->mm.segs, nsegs * sizeof(obj->mm.segs[0]));
+out: if (ret) {
+ if (prepared)
+ i915_gem_gtt_finish_pages(obj, sgt);
+ if (alloced)
+ sg_free_table(sgt);
+ if (sgt) {
+ kfree(sgt);
+ sgt = NULL;
+ }
+ if (obj->mm.u.internal.rsegs) {
+ bus_dmamem_free(dmat, obj->mm.u.internal.segs,
+ obj->mm.u.internal.rsegs);
+ obj->mm.u.internal.rsegs = 0;
+ }
+ if (obj->mm.u.internal.nsegs) {
+ kmem_free(obj->mm.u.internal.segs,
+ (obj->mm.u.internal.nsegs *
+ sizeof(obj->mm.u.internal.segs[0])));
+ obj->mm.u.internal.nsegs = 0;
+ obj->mm.u.internal.segs = NULL;
+ }
+ }
return ret;
#else
struct sg_table *st;
@@ -190,24 +215,20 @@ err:
#endif
}
-#ifdef __NetBSD__
-static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
- bus_dmamap_t pages)
-#else
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
struct sg_table *pages)
-#endif
{
i915_gem_gtt_finish_pages(obj, pages);
#ifdef __NetBSD__
- bus_dma_tag_t dmat = obj->base.dev->dmat;
- bus_dmamap_unload(dmat, pages);
- bus_dmamap_destroy(dmat, pages);
- bus_dmamem_free(dmat, obj->mm.segs, obj->mm.rsegs);
- obj->mm.rsegs = 0;
- kmem_free(obj->mm.segs, obj->mm.nsegs * sizeof(obj->mm.segs[0]));
- obj->mm.segs = NULL;
- obj->mm.nsegs = 0;
+ sg_free_table(pages);
+ kfree(pages);
+ bus_dmamem_free(obj->base.dev->dmat, obj->mm.u.internal.segs,
+ obj->mm.u.internal.rsegs);
+ obj->mm.u.internal.rsegs = 0;
+ kmem_free(obj->mm.u.internal.segs,
+ obj->mm.u.internal.nsegs * sizeof(obj->mm.u.internal.segs[0]));
+ obj->mm.u.internal.nsegs = 0;
+ obj->mm.u.internal.segs = NULL;
#else
internal_free_pages(pages);
#endif
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c:1.3 Sun Dec 19 01:34:08 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_object.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_object.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $ */
+/* $NetBSD: i915_gem_object.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/*
* Copyright © 2017 Intel Corporation
@@ -25,8 +25,9 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_object.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_object.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
+#include <linux/bitmap.h>
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
@@ -39,6 +40,8 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_obj
#include "i915_globals.h"
#include "i915_trace.h"
+#include <linux/nbsd-namespace.h>
+
static struct i915_global_object {
struct i915_global base;
struct kmem_cache *slab_objects;
@@ -68,7 +71,11 @@ void i915_gem_object_init(struct drm_i91
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
+#ifdef __NetBSD__
+ memset(obj->mmo.offsets, 0, sizeof(obj->mmo.offsets));
+#else
obj->mmo.offsets = RB_ROOT;
+#endif
init_rcu_head(&obj->rcu);
@@ -124,8 +131,17 @@ void i915_gem_close_object(struct drm_ge
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
+#ifdef __NetBSD__
+ __USE(mn);
+ for (enum i915_mmap_type t = 0; t < I915_MMAP_NTYPES; t++) {
+ if ((mmo = obj->mmo.offsets[t]) == NULL)
+ continue;
+ drm_vma_node_revoke(&mmo->vma_node, file);
+ }
+#else
rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
drm_vma_node_revoke(&mmo->vma_node, file);
+#endif
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -204,6 +220,17 @@ static void __i915_gem_free_objects(stru
i915_gem_object_release_mmap(obj);
+#ifdef __NetBSD__
+ __USE(mn);
+ for (enum i915_mmap_type t = 0; t < I915_MMAP_NTYPES; t++) {
+ if ((mmo = obj->mmo.offsets[t]) == NULL)
+ continue;
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ }
+ memset(obj->mmo.offsets, 0, sizeof(obj->mmo.offsets));
+#else
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets,
offset) {
@@ -212,6 +239,7 @@ static void __i915_gem_free_objects(stru
kfree(mmo);
}
obj->mmo.offsets = RB_ROOT;
+#endif
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.3 Sun Dec 19 01:34:08 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_phys.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $ */
+/* $NetBSD: i915_gem_phys.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/highmem.h>
#include <linux/shmem_fs.h>
@@ -24,9 +24,15 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_phy
#include "i915_gem_region.h"
#include "i915_scatterlist.h"
+#include <linux/nbsd-namespace.h>
+
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
+#ifdef __NetBSD__
+ struct uvm_object *uobj = obj->base.filp;
+#else
struct address_space *mapping = obj->base.filp->f_mapping;
+#endif
struct scatterlist *sg;
struct sg_table *st;
dma_addr_t dma;
@@ -37,16 +43,40 @@ static int i915_gem_object_get_pages_phy
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
+
/*
* Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
+#ifdef __NetBSD__
+ __USE(dma);
+ bus_dma_tag_t dmat = obj->base.dev->dmat;
+ bool loaded = false;
+ int rsegs = 0;
+ int ret;
+
+ vaddr = NULL;
+
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamem_alloc(dmat, roundup_pow_of_two(obj->base.size),
+ roundup_pow_of_two(obj->base.size), 0, &obj->mm.u.phys.seg, 1,
+ &rsegs, BUS_DMA_WAITOK);
+ if (ret)
+ return -ENOMEM;
+ KASSERT(rsegs == 1);
+ ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1,
+ roundup_pow_of_two(obj->base.size), &vaddr, BUS_DMA_WAITOK);
+ if (ret)
+ goto err_pci;
+ obj->mm.u.phys.kva = vaddr;
+#else
vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
roundup_pow_of_two(obj->base.size),
&dma, GFP_KERNEL);
if (!vaddr)
return -ENOMEM;
+#endif
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
@@ -56,43 +86,96 @@ static int i915_gem_object_get_pages_phy
goto err_st;
sg = st->sgl;
+#ifdef __NetBSD__
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamap_create(dmat, roundup_pow_of_two(obj->base.size), 1,
+ roundup_pow_of_two(obj->base.size), 0, BUS_DMA_WAITOK,
+ &sg->sg_dmamap);
+ if (ret)
+ goto err_st;
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamap_load_raw(dmat, sg->sg_dmamap, &obj->mm.u.phys.seg, 1,
+ roundup_pow_of_two(obj->base.size), BUS_DMA_WAITOK);
+ if (ret)
+ goto err_st;
+ loaded = true;
+#else
sg->offset = 0;
sg->length = obj->base.size;
sg_assign_page(sg, (struct page *)vaddr);
sg_dma_address(sg) = dma;
sg_dma_len(sg) = obj->base.size;
+#endif
dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
void *src;
+#ifdef __NetBSD__
+ struct vm_page *vm_page;
+
+ /* XXX errno NetBSD->Linux */
+ ret = -uvm_obj_wirepages(uobj, i*PAGE_SIZE, (i + 1)*PAGE_SIZE,
+ NULL);
+ if (ret)
+ goto err_st;
+ rw_enter(uobj->vmobjlock, RW_READER);
+ vm_page = uvm_pagelookup(uobj, i*PAGE_SIZE);
+ rw_exit(uobj->vmobjlock);
+ KASSERT(vm_page);
+ page = container_of(vm_page, struct page, p_vmp);
+#else
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_st;
+#endif
src = kmap_atomic(page);
memcpy(dst, src, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE);
kunmap_atomic(src);
+#ifdef __NetBSD__
+ uvm_obj_unwirepages(uobj, i*PAGE_SIZE, (i + 1)*PAGE_SIZE);
+#else
put_page(page);
+#endif
dst += PAGE_SIZE;
}
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- __i915_gem_object_set_pages(obj, st, sg->length);
+ __i915_gem_object_set_pages(obj, st, obj->base.size);
return 0;
err_st:
+#ifdef __NetBSD__
+ if (loaded)
+ bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+ if (st->sgl->sg_dmamap) {
+ bus_dmamap_destroy(dmat, st->sgl->sg_dmamap);
+ st->sgl->sg_dmamap = NULL;
+ }
+#endif
kfree(st);
+ sg_free_table(st);
err_pci:
+#ifdef __NetBSD__
+ if (vaddr) {
+ bus_dmamem_unmap(dmat, vaddr,
+ roundup_pow_of_two(obj->base.size));
+ }
+ obj->mm.u.phys.kva = NULL;
+ if (rsegs)
+ bus_dmamem_free(dmat, &obj->mm.u.phys.seg, rsegs);
+#else
dma_free_coherent(&obj->base.dev->pdev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
+#endif
return -ENOMEM;
}
@@ -100,13 +183,23 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+#ifdef __NetBSD__
+ bus_dma_tag_t dmat = obj->base.dev->dmat;
+ void *vaddr = obj->mm.u.phys.kva;
+#else
dma_addr_t dma = sg_dma_address(pages->sgl);
void *vaddr = sg_page(pages->sgl);
+#endif
__i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
+#ifdef __NetBSD__
+ struct uvm_object *uobj = obj->base.filp;
+ struct vm_page *vm_page;
+#else
struct address_space *mapping = obj->base.filp->f_mapping;
+#endif
void *src = vaddr;
int i;
@@ -114,9 +207,20 @@ i915_gem_object_put_pages_phys(struct dr
struct page *page;
char *dst;
+#ifdef __NetBSD__
+ if (uvm_obj_wirepages(uobj, i*PAGE_SIZE,
+ (i + 1)*PAGE_SIZE, NULL) != 0)
+ continue;
+ rw_enter(uobj->vmobjlock, RW_READER);
+ vm_page = uvm_pagelookup(uobj, i*PAGE_SIZE);
+ rw_exit(uobj->vmobjlock);
+ KASSERT(vm_page);
+ page = container_of(vm_page, struct page, p_vmp);
+#else
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
+#endif
dst = kmap_atomic(page);
drm_clflush_virt_range(src, PAGE_SIZE);
@@ -124,26 +228,49 @@ i915_gem_object_put_pages_phys(struct dr
kunmap_atomic(dst);
set_page_dirty(page);
+#ifdef __NetBSD__
+ uvm_obj_unwirepages(uobj, i*PAGE_SIZE,
+ (i + 1)*PAGE_SIZE);
+#else
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
+#endif
src += PAGE_SIZE;
}
obj->mm.dirty = false;
}
+#ifdef __NetBSD__
+ bus_dmamap_unload(dmat, pages->sgl->sg_dmamap);
+ bus_dmamap_destroy(dmat, pages->sgl->sg_dmamap);
+ pages->sgl->sg_dmamap = NULL;
+#endif
+
sg_free_table(pages);
kfree(pages);
+#ifdef __NetBSD__
+ bus_dmamem_unmap(dmat, obj->mm.u.phys.kva,
+ roundup_pow_of_two(obj->base.size));
+ obj->mm.u.phys.kva = NULL;
+ bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1);
+#else
dma_free_coherent(&obj->base.dev->pdev->dev,
roundup_pow_of_two(obj->base.size),
vaddr, dma);
+#endif
}
static void phys_release(struct drm_i915_gem_object *obj)
{
+#ifdef __NetBSD__
+ /* XXX Who acquires the reference? */
+ uao_detach(obj->base.filp);
+#else
fput(obj->base.filp);
+#endif
}
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
@@ -155,11 +282,7 @@ static const struct drm_i915_gem_object_
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
-#ifdef __NetBSD__
- bus_dmamap_t pages;
-#else
struct sg_table *pages;
-#endif
int err;
if (align > obj->base.size)
@@ -213,13 +336,7 @@ int i915_gem_object_attach_phys(struct d
err_xfer:
obj->ops = &i915_gem_shmem_ops;
if (!IS_ERR_OR_NULL(pages)) {
-#ifdef __NetBSD__
- unsigned int sg_page_sizes = 0, seg;
- for (seg = 0; seg < pages->dm_nsegs; seg++)
- sg_page_sizes |= pages->dm_segs[seg].ds_len;
-#else
unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
-#endif
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.3 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.4
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.3 Sun Dec 19 01:40:12 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_stolen.c,v 1.3 2021/12/19 01:40:12 riastradh Exp $ */
+/* $NetBSD: i915_gem_stolen.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.3 2021/12/19 01:40:12 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/errno.h>
#include <linux/mutex.h>
@@ -122,7 +122,9 @@ static int i915_adjust_stolen(struct drm
}
}
-#ifndef __NetBSD__ /* XXX */
+#ifdef __NetBSD__ /* XXX */
+ __USE(r);
+#else
/*
* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
@@ -498,35 +500,45 @@ static int i915_gem_init_stolen(struct d
return 0;
}
-#ifdef __NetBSD__
-static bus_dmamap_t
-#else
static struct sg_table *
-#endif
i915_pages_create_for_stolen(struct drm_device *dev,
resource_size_t offset, resource_size_t size)
{
struct drm_i915_private *i915 = to_i915(dev);
+ struct sg_table *st;
#ifdef __NetBSD__
bus_dma_tag_t dmat = i915->drm.dmat;
- bus_dmamap_t dmamap = NULL;
- bus_dma_segment_t *seg;
- int nseg, i;
+ bus_dma_segment_t *seg = NULL;
+ int nseg = 0, i;
+ bool loaded = false;
int ret;
#else
- struct sg_table *st;
struct scatterlist *sg;
#endif
GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
+ /* We hide that we have no struct page backing our stolen object
+ * by wrapping the contiguous physical allocation with a fake
+ * dma mapping in a single scatterlist.
+ */
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (st == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
#ifdef __NetBSD__
KASSERT((size % PAGE_SIZE) == 0);
nseg = size / PAGE_SIZE;
seg = kmem_alloc(nseg * sizeof(seg[0]), KM_SLEEP);
/*
- * x86 bus_dmamap_load_raw fails to respect the maxsegsz we
+ * XXX x86 bus_dmamap_load_raw fails to respect the maxsegsz we
* pass to bus_dmamap_create, so we have to create page-sized
* segments to begin with.
*/
@@ -538,60 +550,50 @@ i915_pages_create_for_stolen(struct drm_
/* XXX errno NetBSD->Linux */
ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0,
- BUS_DMA_WAITOK, &dmamap);
+ BUS_DMA_WAITOK, &st->sgl->sg_dmamap);
if (ret) {
DRM_ERROR("failed to create DMA map for stolen object: %d\n",
ret);
-fail0: dmamap = NULL; /* paranoia */
+ st->sgl->sg_dmamap = NULL;
goto out;
}
/* XXX errno NetBSD->Liux */
- ret = -bus_dmamap_load_raw(dmat, dmamap, seg, nseg, size,
+ ret = -bus_dmamap_load_raw(dmat, st->sgl->sg_dmamap, seg, nseg, size,
BUS_DMA_WAITOK);
if (ret) {
DRM_ERROR("failed to load DMA map for stolen object: %d\n",
ret);
-fail1: __unused
- bus_dmamap_destroy(dmat, dmamap);
- goto fail0;
+ goto out;
}
+ loaded = true;
-out: kmem_free(seg, nseg*sizeof(seg[0]));
- return ret ? ERR_PTR(ret) : dmamap;
-#else
- /* We hide that we have no struct page backing our stolen object
- * by wrapping the contiguous physical allocation with a fake
- * dma mapping in a single scatterlist.
- */
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return ERR_PTR(-ENOMEM);
-
- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+out: if (ret) {
+ if (loaded)
+ bus_dmamap_unload(dmat, st->sgl->sg_dmamap);
+ if (st->sgl->sg_dmamap) {
+ bus_dmamap_destroy(dmat, st->sgl->sg_dmamap);
+ st->sgl->sg_dmamap = NULL;
+ }
+ sg_free_table(st);
kfree(st);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
-
+#else
sg = st->sgl;
sg->offset = 0;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
sg_dma_len(sg) = size;
+#endif
return st;
-#endif
}
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
-#ifdef __NetBSD__
- bus_dmamap_t pages =
-#else
struct sg_table *pages =
-#endif
i915_pages_create_for_stolen(obj->base.dev,
obj->stolen->start,
obj->stolen->size);
@@ -608,12 +610,12 @@ static void i915_gem_object_put_pages_st
{
/* Should only be called from i915_gem_object_release_stolen() */
#ifdef __NetBSD__
- bus_dmamap_unload(obj->base.dev->dmat, pages);
- bus_dmamap_destroy(obj->base.dev->dmat, pages);
-#else
+ bus_dmamap_unload(obj->base.dev->dmat, pages->sgl->sg_dmamap);
+ bus_dmamap_destroy(obj->base.dev->dmat, pages->sgl->sg_dmamap);
+ pages->sgl->sg_dmamap = NULL;
+#endif
sg_free_table(pages);
kfree(pages);
-#endif
}
static void
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.6 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.7
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.6 Sun Dec 19 11:33:02 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_mman.c,v 1.6 2021/12/19 11:33:02 riastradh Exp $ */
+/* $NetBSD: i915_gem_mman.c,v 1.7 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.6 2021/12/19 11:33:02 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.7 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/anon_inodes.h>
#include <linux/mman.h>
@@ -79,7 +79,7 @@ i915_gem_mmap_ioctl(struct drm_device *d
#ifdef __NetBSD__
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- if (dev->quirks & QUIRK_NETBSD_VERSION_CALLED)
+ if (i915->quirks & QUIRK_NETBSD_VERSION_CALLED)
args->flags = 0;
#endif
@@ -244,8 +244,7 @@ static int
i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
int npages, int centeridx, vm_prot_t access_type, int flags)
{
- struct uvm_object *uobj = ufi->entry->object.uvm_obj;
- struct ...
+ panic("NYI");
}
#else
@@ -514,7 +513,7 @@ void i915_gem_object_release_mmap_offset
(void)mmo;
(void)mn;
- for (t = 0; t < I915_MMA_NTYPES; t++) {
+ for (t = 0; t < I915_MMAP_NTYPES; t++) {
if (t == I915_MMAP_TYPE_GTT)
continue;
/*
@@ -522,7 +521,7 @@ void i915_gem_object_release_mmap_offset
* spin lock, probably?
*/
for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
- page = obj->mm.pagearray[i];
+ page = obj->mm.pages->sgl->sg_pgs[i];
vm_page = &page->p_vmp;
pmap_page_protect(vm_page, VM_PROT_NONE);
}
@@ -669,7 +668,11 @@ mmap_offset_attach(struct drm_i915_gem_o
mmo->obj = obj;
mmo->mmap_type = mmap_type;
+#ifdef __NetBSD__
+ drm_vma_node_init(&mmo->vma_node);
+#else
drm_vma_node_reset(&mmo->vma_node);
+#endif
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
&mmo->vma_node, obj->base.size / PAGE_SIZE);
@@ -696,6 +699,7 @@ out:
return mmo;
err:
+ drm_vma_node_destroy(&mmo->vma_node);
kfree(mmo);
return ERR_PTR(err);
}
@@ -833,6 +837,7 @@ int
i915_gem_mmap_object(struct drm_device *dev, off_t byte_offset, size_t nbytes,
int prot, struct uvm_object **uobjp, voff_t *uoffsetp, struct file *fp)
{
+ __USE(i915_gem_fault);
panic("NYI");
}
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c:1.6 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c:1.7
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c:1.6 Sun Dec 19 11:32:53 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_shmem.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_shmem.c,v 1.6 2021/12/19 11:32:53 riastradh Exp $ */
+/* $NetBSD: i915_gem_shmem.c,v 1.7 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_shmem.c,v 1.6 2021/12/19 11:32:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_shmem.c,v 1.7 2021/12/19 11:33:30 riastradh Exp $");
#include <linux/pagevec.h>
#include <linux/swap.h>
@@ -282,11 +282,7 @@ put:
void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
-#ifdef __NetBSD__
- bus_dmamap_t pages,
-#else
struct sg_table *pages,
-#endif
bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
@@ -297,27 +293,13 @@ __i915_gem_object_release_shmem(struct d
if (needs_clflush &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-#ifdef __NetBSD__
- /*
- * XXX Should maybe use bus_dmamap_sync instead --
- * shouldn't really touch obj->mm here since the caller
- * already pulled off the pages.
- */
- drm_clflush_pages(obj->mm.pagearray,
- obj->base.size >> PAGE_SHIFT);
-#else
drm_clflush_sg(pages);
-#endif
__start_cpu_write(obj);
}
static void
-#ifdef __NetBSD__
-shmem_put_pages(struct drm_i915_gem_object *obj, bus_dmamap_t map)
-#else
shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
-#endif
{
struct sgt_iter sgt_iter;
struct pagevec pvec;
@@ -445,7 +427,12 @@ static void shmem_release(struct drm_i91
{
i915_gem_object_release_memory_region(obj);
+#ifdef __NetBSD__
+ /* XXX Who acquires the reference? */
+ uao_detach(obj->base.filp);
+#else
fput(obj->base.filp);
+#endif
}
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c:1.2 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c:1.3
--- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c:1.2 Sat Dec 18 23:45:30 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pm.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: i915_gem_pm.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */
+/* $NetBSD: i915_gem_pm.c,v 1.3 2021/12/19 11:33:30 riastradh Exp $ */
/*
* SPDX-License-Identifier: MIT
@@ -7,7 +7,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: i915_gem_pm.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: i915_gem_pm.c,v 1.3 2021/12/19 11:33:30 riastradh Exp $");
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
@@ -16,6 +16,8 @@ __KERNEL_RCSID(0, "$NetBSD: i915_gem_pm.
#include "i915_drv.h"
+#include <linux/nbsd-namespace.h>
+
void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
Index: src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h
diff -u src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h:1.10 src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h:1.11
--- src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h:1.10 Sun Dec 19 11:24:08 2021
+++ src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_gtt.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: intel_gtt.h,v 1.10 2021/12/19 11:24:08 riastradh Exp $ */
+/* $NetBSD: intel_gtt.h,v 1.11 2021/12/19 11:33:30 riastradh Exp $ */
/* SPDX-License-Identifier: MIT */
/*
@@ -643,7 +643,7 @@ struct sgt_dma {
static inline struct sgt_dma
sgt_dma(struct i915_vma *vma)
{
- return (struct sgt_dma) { vma->pages, 0, 0 };
+ return (struct sgt_dma) { vma->pages->sgl->sg_dmamap, 0, 0 };
}
#else
static inline struct sgt_dma {
Index: src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h
diff -u src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h:1.9 src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h:1.10
--- src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h:1.9 Sun Dec 19 11:32:54 2021
+++ src/sys/external/bsd/drm2/dist/include/drm/drm_cache.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: drm_cache.h,v 1.9 2021/12/19 11:32:54 riastradh Exp $ */
+/* $NetBSD: drm_cache.h,v 1.10 2021/12/19 11:33:30 riastradh Exp $ */
/**************************************************************************
*
@@ -40,13 +40,8 @@
struct page;
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
-#ifdef __NetBSD__ /* XXX drm clflush */
-void drm_clflush_page(struct page *);
-void drm_clflush_virt_range(const void *, size_t);
-#else
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
-#endif
bool drm_need_swiotlb(int dma_bits);
Index: src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h
diff -u src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h:1.6 src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h:1.7
--- src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h:1.6 Sun Dec 19 11:32:54 2021
+++ src/sys/external/bsd/drm2/dist/include/drm/drm_prime.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: drm_prime.h,v 1.6 2021/12/19 11:32:54 riastradh Exp $ */
+/* $NetBSD: drm_prime.h,v 1.7 2021/12/19 11:33:30 riastradh Exp $ */
/*
* Copyright © 2012 Red Hat
@@ -102,9 +102,15 @@ struct dma_buf *drm_gem_prime_export(str
int flags);
/* helper functions for importing */
+#ifdef __NetBSD__
+struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
+ struct dma_buf *dma_buf,
+ bus_dma_tag_t attach_dev);
+#else
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev);
+#endif
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
Index: src/sys/external/bsd/drm2/drm/drm_cache.c
diff -u src/sys/external/bsd/drm2/drm/drm_cache.c:1.17 src/sys/external/bsd/drm2/drm/drm_cache.c:1.18
--- src/sys/external/bsd/drm2/drm/drm_cache.c:1.17 Sun Dec 19 11:32:54 2021
+++ src/sys/external/bsd/drm2/drm/drm_cache.c Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: drm_cache.c,v 1.17 2021/12/19 11:32:54 riastradh Exp $ */
+/* $NetBSD: drm_cache.c,v 1.18 2021/12/19 11:33:30 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.17 2021/12/19 11:32:54 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: drm_cache.c,v 1.18 2021/12/19 11:33:30 riastradh Exp $");
#include <sys/param.h>
#include <sys/types.h>
@@ -72,21 +72,13 @@ drm_clflush_pages(struct page **pages, u
}
void
-drm_clflush_page(struct page *page)
+drm_clflush_sg(struct sg_table *sgt)
{
-#if defined(DRM_CLFLUSH)
- if (drm_md_clflush_finegrained_p()) {
- drm_md_clflush_begin();
- drm_md_clflush_page(page);
- drm_md_clflush_commit();
- } else {
- drm_md_clflush_all();
- }
-#endif
+ drm_clflush_pages(sgt->sgl->sg_pgs, sgt->sgl->sg_npgs);
}
void
-drm_clflush_virt_range(const void *vaddr, size_t nbytes)
+drm_clflush_virt_range(void *vaddr, unsigned long nbytes)
{
#if defined(DRM_CLFLUSH)
if (drm_md_clflush_finegrained_p()) {
Index: src/sys/external/bsd/drm2/include/asm/uaccess.h
diff -u src/sys/external/bsd/drm2/include/asm/uaccess.h:1.9 src/sys/external/bsd/drm2/include/asm/uaccess.h:1.10
--- src/sys/external/bsd/drm2/include/asm/uaccess.h:1.9 Sun Dec 19 11:24:14 2021
+++ src/sys/external/bsd/drm2/include/asm/uaccess.h Sun Dec 19 11:33:30 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: uaccess.h,v 1.9 2021/12/19 11:24:14 riastradh Exp $ */
+/* $NetBSD: uaccess.h,v 1.10 2021/12/19 11:33:30 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -76,7 +76,7 @@ copy_to_user(void *user_addr, const void
#define __get_user get_user
#define __put_user put_user
-#define user_access_begin() __nothing
+#define user_access_begin(P,N) access_ok(P,N)
#define user_access_end() __nothing
#define unsafe_put_user(KERNEL_RVAL, USER_PTR, LABEL) do { \
Index: src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h
diff -u src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h:1.21 src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h:1.22
--- src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h:1.21 Sun Dec 19 11:32:54 2021
+++ src/sys/external/bsd/drm2/include/drm/bus_dma_hacks.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: bus_dma_hacks.h,v 1.21 2021/12/19 11:32:54 riastradh Exp $ */
+/* $NetBSD: bus_dma_hacks.h,v 1.22 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -194,7 +194,7 @@ out: if (segs != stacksegs) {
static inline int
bus_dmamem_export_pages(bus_dma_tag_t dmat, const bus_dma_segment_t *segs,
- int nsegs, paddr_t *pgs, unsigned npgs)
+ int nsegs, struct page **pgs, unsigned npgs)
{
int seg;
unsigned i;
@@ -208,7 +208,8 @@ bus_dmamem_export_pages(bus_dma_tag_t dm
paddr_t paddr = BUS_MEM_TO_PHYS(dmat, baddr);
KASSERT(i < npgs);
- pgs[i++] = paddr;
+ pgs[i++] = container_of(PHYS_TO_VM_PAGE(paddr),
+ struct page, p_vmp);
baddr += PAGE_SIZE;
len -= PAGE_SIZE;
@@ -222,14 +223,14 @@ bus_dmamem_export_pages(bus_dma_tag_t dm
static inline int
bus_dmamem_import_pages(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
- int nsegs, int *rsegs, const paddr_t *pgs, unsigned npgs)
+ int nsegs, int *rsegs, struct page *const *pgs, unsigned npgs)
{
int seg;
unsigned i;
seg = 0;
for (i = 0; i < npgs; i++) {
- paddr_t paddr = pgs[i];
+ paddr_t paddr = VM_PAGE_TO_PHYS(&pgs[i]->p_vmp);
bus_addr_t baddr = PHYS_TO_BUS_MEM(dmat, paddr);
if (seg > 0 && segs[seg - 1].ds_addr + PAGE_SIZE == baddr) {
Index: src/sys/external/bsd/drm2/include/linux/bitmap.h
diff -u src/sys/external/bsd/drm2/include/linux/bitmap.h:1.11 src/sys/external/bsd/drm2/include/linux/bitmap.h:1.12
--- src/sys/external/bsd/drm2/include/linux/bitmap.h:1.11 Sun Dec 19 11:17:02 2021
+++ src/sys/external/bsd/drm2/include/linux/bitmap.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: bitmap.h,v 1.11 2021/12/19 11:17:02 riastradh Exp $ */
+/* $NetBSD: bitmap.h,v 1.12 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -226,4 +226,11 @@ bitmap_zalloc(size_t nbits, gfp_t gfp)
return kcalloc(n, sizeof(unsigned long), gfp);
}
+static inline void
+bitmap_free(unsigned long *bitmap)
+{
+
+ kfree(bitmap);
+}
+
#endif /* _LINUX_BITMAP_H_ */
Index: src/sys/external/bsd/drm2/include/linux/dma-buf.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.10 src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.11
--- src/sys/external/bsd/drm2/include/linux/dma-buf.h:1.10 Sun Dec 19 10:38:23 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-buf.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: dma-buf.h,v 1.10 2021/12/19 10:38:23 riastradh Exp $ */
+/* $NetBSD: dma-buf.h,v 1.11 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -37,7 +37,9 @@
#include <sys/mutex.h>
#include <linux/err.h>
+#include <linux/dma-mapping.h>
#include <linux/dma-resv.h>
+#include <linux/scatterlist.h>
struct device;
struct dma_buf;
@@ -50,13 +52,6 @@ struct dma_resv;
struct sg_table;
struct uvm_object;
-enum dma_data_direction {
- DMA_NONE = 0,
- DMA_TO_DEVICE = 1,
- DMA_FROM_DEVICE = 2,
- DMA_BIDIRECTIONAL = 3,
-};
-
struct dma_buf_ops {
bool cache_sgt_mapping;
int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
@@ -90,7 +85,7 @@ struct dma_buf {
struct dma_buf_attachment {
void *priv;
struct dma_buf *dmabuf;
- struct device *dev;
+ bus_dma_tag_t dev; /* XXX expedient misnomer */
};
struct dma_buf_export_info {
@@ -128,7 +123,7 @@ void get_dma_buf(struct dma_buf *);
void dma_buf_put(struct dma_buf *);
struct dma_buf_attachment *
- dma_buf_attach(struct dma_buf *, struct device *);
+ dma_buf_attach(struct dma_buf *, bus_dma_tag_t);
void dma_buf_detach(struct dma_buf *, struct dma_buf_attachment *);
struct sg_table *
Index: src/sys/external/bsd/drm2/include/linux/dma-mapping.h
diff -u src/sys/external/bsd/drm2/include/linux/dma-mapping.h:1.8 src/sys/external/bsd/drm2/include/linux/dma-mapping.h:1.9
--- src/sys/external/bsd/drm2/include/linux/dma-mapping.h:1.8 Sun Dec 19 09:57:01 2021
+++ src/sys/external/bsd/drm2/include/linux/dma-mapping.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: dma-mapping.h,v 1.8 2021/12/19 09:57:01 riastradh Exp $ */
+/* $NetBSD: dma-mapping.h,v 1.9 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -39,6 +39,23 @@
#include <linux/types.h>
+enum dma_data_direction {
+ DMA_NONE = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_BIDIRECTIONAL = 3,
+
+ PCI_DMA_NONE = DMA_NONE,
+ PCI_TO_DEVICE = DMA_TO_DEVICE,
+ PCI_FROM_DEVICE = DMA_FROM_DEVICE,
+ PCI_DMA_BIDIRECTIONAL = DMA_BIDIRECTIONAL,
+};
+
+enum {
+ DMA_ATTR_NO_WARN = __BIT(0),
+ DMA_ATTR_SKIP_CPU_SYNC = __BIT(1),
+};
+
static inline uintmax_t
DMA_BIT_MASK(unsigned nbits)
{
Index: src/sys/external/bsd/drm2/include/linux/mutex.h
diff -u src/sys/external/bsd/drm2/include/linux/mutex.h:1.16 src/sys/external/bsd/drm2/include/linux/mutex.h:1.17
--- src/sys/external/bsd/drm2/include/linux/mutex.h:1.16 Sun Dec 19 01:21:22 2021
+++ src/sys/external/bsd/drm2/include/linux/mutex.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: mutex.h,v 1.16 2021/12/19 01:21:22 riastradh Exp $ */
+/* $NetBSD: mutex.h,v 1.17 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -115,6 +115,13 @@ mutex_lock_nested(struct mutex *mutex, u
mutex_lock(mutex);
}
+static inline int
+mutex_lock_interruptible_nested(struct mutex *mutex,
+ unsigned subclass __unused)
+{
+ return mutex_lock_interruptible(mutex);
+}
+
/*
* `recursive locking is bad, do not use this ever.'
* -- linux/scripts/checkpath.pl
Index: src/sys/external/bsd/drm2/include/linux/radix-tree.h
diff -u src/sys/external/bsd/drm2/include/linux/radix-tree.h:1.4 src/sys/external/bsd/drm2/include/linux/radix-tree.h:1.5
--- src/sys/external/bsd/drm2/include/linux/radix-tree.h:1.4 Sun Dec 19 01:35:18 2021
+++ src/sys/external/bsd/drm2/include/linux/radix-tree.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: radix-tree.h,v 1.4 2021/12/19 01:35:18 riastradh Exp $ */
+/* $NetBSD: radix-tree.h,v 1.5 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -49,6 +49,7 @@ struct radix_tree_root {
};
struct radix_tree_iter {
+ unsigned long index;
};
void INIT_RADIX_TREE(struct radix_tree_root *, gfp_t);
Index: src/sys/external/bsd/drm2/include/linux/scatterlist.h
diff -u src/sys/external/bsd/drm2/include/linux/scatterlist.h:1.3 src/sys/external/bsd/drm2/include/linux/scatterlist.h:1.4
--- src/sys/external/bsd/drm2/include/linux/scatterlist.h:1.3 Sun Dec 19 10:51:24 2021
+++ src/sys/external/bsd/drm2/include/linux/scatterlist.h Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: scatterlist.h,v 1.3 2021/12/19 10:51:24 riastradh Exp $ */
+/* $NetBSD: scatterlist.h,v 1.4 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -32,7 +32,44 @@
#ifndef _LINUX_SCATTERLIST_H_
#define _LINUX_SCATTERLIST_H_
+#include <sys/bus.h>
+
#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/gfp.h>
#include <linux/types.h>
+/* namespace */
+#define dma_map_sg linux_dma_map_sg
+#define dma_map_sg_attrs linux_dma_map_sg_attrs
+#define dma_unmap_sg linux_dma_unmap_sg
+#define dma_unmap_sg_attrs linux_dma_unmap_sg_attrs
+#define sg_alloc_table linux_sg_alloc_table
+#define sg_alloc_table_from_bus_dmamem linux_sg_alloc_table_from_bus_dmamem
+#define sg_alloc_table_from_pages linux_sg_alloc_table_from_pages
+#define sg_free_table linux_sg_free_table
+
+struct page;
+
+struct sg_table {
+ struct scatterlist {
+ struct page **sg_pgs;
+ unsigned sg_npgs;
+ bus_dmamap_t sg_dmamap;
+ } sgl[1];
+ unsigned nents;
+};
+
+int sg_alloc_table(struct sg_table *, unsigned, gfp_t);
+int sg_alloc_table_from_pages(struct sg_table *, struct page **, unsigned,
+ bus_size_t, bus_size_t, gfp_t);
+int sg_alloc_table_from_bus_dmamem(struct sg_table *, bus_dma_tag_t,
+ const bus_dma_segment_t *, int, gfp_t);
+void sg_free_table(struct sg_table *);
+
+int dma_map_sg(bus_dma_tag_t, struct scatterlist *, int, int);
+int dma_map_sg_attrs(bus_dma_tag_t, struct scatterlist *, int, int, int);
+void dma_unmap_sg(bus_dma_tag_t, struct scatterlist *, int, int);
+void dma_unmap_sg_attrs(bus_dma_tag_t, struct scatterlist *, int, int, int);
+
#endif /* _LINUX_SCATTERLIST_H_ */
Index: src/sys/external/bsd/drm2/linux/files.drmkms_linux
diff -u src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.31 src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.32
--- src/sys/external/bsd/drm2/linux/files.drmkms_linux:1.31 Sun Dec 19 11:23:52 2021
+++ src/sys/external/bsd/drm2/linux/files.drmkms_linux Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-# $NetBSD: files.drmkms_linux,v 1.31 2021/12/19 11:23:52 riastradh Exp $
+# $NetBSD: files.drmkms_linux,v 1.32 2021/12/19 11:33:31 riastradh Exp $
define drmkms_linux: i2cexec, i2c_bitbang
@@ -21,6 +21,7 @@ file external/bsd/drm2/linux/linux_list_
file external/bsd/drm2/linux/linux_module.c drmkms_linux
file external/bsd/drm2/linux/linux_pci.c drmkms_linux
file external/bsd/drm2/linux/linux_rwsem.c drmkms_linux
+file external/bsd/drm2/linux/linux_sg.c drmkms_linux
file external/bsd/drm2/linux/linux_stop_machine.c drmkms_linux
file external/bsd/drm2/linux/linux_sync_file.c drmkms_linux
file external/bsd/drm2/linux/linux_wait_bit.c drmkms_linux
Index: src/sys/external/bsd/drm2/linux/linux_dma_buf.c
diff -u src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.9 src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.10
--- src/sys/external/bsd/drm2/linux/linux_dma_buf.c:1.9 Sun Dec 19 10:19:53 2021
+++ src/sys/external/bsd/drm2/linux/linux_dma_buf.c Sun Dec 19 11:33:31 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: linux_dma_buf.c,v 1.9 2021/12/19 10:19:53 riastradh Exp $ */
+/* $NetBSD: linux_dma_buf.c,v 1.10 2021/12/19 11:33:31 riastradh Exp $ */
/*-
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.9 2021/12/19 10:19:53 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_dma_buf.c,v 1.10 2021/12/19 11:33:31 riastradh Exp $");
#include <sys/types.h>
#include <sys/atomic.h>
@@ -177,14 +177,14 @@ dma_buf_put(struct dma_buf *dmabuf)
}
struct dma_buf_attachment *
-dma_buf_attach(struct dma_buf *dmabuf, struct device *dev)
+dma_buf_attach(struct dma_buf *dmabuf, bus_dma_tag_t dmat)
{
struct dma_buf_attachment *attach;
int ret = 0;
attach = kmem_zalloc(sizeof(*attach), KM_SLEEP);
attach->dmabuf = dmabuf;
- attach->dev = dev;
+ attach->dev = dmat;
mutex_enter(&dmabuf->db_lock);
if (dmabuf->ops->attach)
Added files:
Index: src/sys/external/bsd/drm2/linux/linux_sg.c
diff -u /dev/null src/sys/external/bsd/drm2/linux/linux_sg.c:1.1
--- /dev/null Sun Dec 19 11:33:31 2021
+++ src/sys/external/bsd/drm2/linux/linux_sg.c Sun Dec 19 11:33:31 2021
@@ -0,0 +1,187 @@
+/* $NetBSD: linux_sg.c,v 1.1 2021/12/19 11:33:31 riastradh Exp $ */
+
+/*-
+ * Copyright (c) 2021 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: linux_sg.c,v 1.1 2021/12/19 11:33:31 riastradh Exp $");
+
+#include <sys/bus.h>
+#include <sys/errno.h>
+
+#include <drm/bus_dma_hacks.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+int
+sg_alloc_table(struct sg_table *sgt, unsigned npgs, gfp_t gfp)
+{
+
+ sgt->sgl->sg_pgs = kcalloc(npgs, sizeof(sgt->sgl->sg_pgs[0]), gfp);
+ if (sgt->sgl->sg_pgs == NULL)
+ return -ENOMEM;
+ sgt->sgl->sg_npgs = sgt->nents = npgs;
+ sgt->sgl->sg_dmamap = NULL;
+
+ return 0;
+}
+
+int
+sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pgs,
+ unsigned npgs, bus_size_t offset, bus_size_t size, gfp_t gfp)
+{
+ unsigned i;
+ int ret;
+
+ KASSERT(offset == 0);
+ KASSERT(size == (bus_size_t)npgs << PAGE_SHIFT);
+
+ ret = sg_alloc_table(sgt, npgs, gfp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npgs; i++)
+ sgt->sgl->sg_pgs[i] = pgs[i];
+
+ return 0;
+}
+
+int
+sg_alloc_table_from_bus_dmamem(struct sg_table *sgt, bus_dma_tag_t dmat,
+ const bus_dma_segment_t *seg, int nseg, gfp_t gfp)
+{
+ int ret;
+
+ KASSERT(nseg >= 1);
+
+ ret = sg_alloc_table(sgt, nseg, gfp);
+ if (ret)
+ return ret;
+
+ /* XXX errno NetBSD->Linux */
+ ret = -bus_dmamem_export_pages(dmat, seg, nseg, sgt->sgl->sg_pgs,
+ sgt->sgl->sg_npgs);
+ if (ret)
+ goto out;
+
+ /* Success! */
+ ret = 0;
+
+out: if (ret)
+ sg_free_table(sgt);
+ return ret;
+}
+
+void
+sg_free_table(struct sg_table *sgt)
+{
+
+ KASSERT(sgt->sgl->sg_dmamap == NULL);
+ kfree(sgt->sgl->sg_pgs);
+ sgt->sgl->sg_pgs = NULL;
+ sgt->sgl->sg_npgs = 0;
+}
+
+int
+dma_map_sg(bus_dma_tag_t dmat, struct scatterlist *sg, int nents, int dir)
+{
+
+ return dma_map_sg_attrs(dmat, sg, nents, dir, 0);
+}
+
+int
+dma_map_sg_attrs(bus_dma_tag_t dmat, struct scatterlist *sg, int nents,
+ int dir, int attrs)
+{
+ int flags = 0;
+ bool loaded = false;
+ int ret, error = 0;
+
+ KASSERT(sg->sg_dmamap == NULL);
+ KASSERT(nents >= 1);
+
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ flags |= BUS_DMA_WRITE;
+ break;
+ case DMA_FROM_DEVICE:
+ flags |= BUS_DMA_READ;
+ break;
+ case DMA_BIDIRECTIONAL:
+ break;
+ case DMA_NONE:
+ panic("invalid DMA direction %d", dir);
+ }
+
+ error = bus_dmamap_create(dmat, (bus_size_t)sg->sg_npgs << PAGE_SHIFT,
+ nents, PAGE_SIZE, 0, BUS_DMA_WAITOK, &sg->sg_dmamap);
+ if (error)
+ goto out;
+ KASSERT(sg->sg_dmamap);
+
+ error = bus_dmamap_load_pages(dmat, sg->sg_dmamap, sg->sg_pgs,
+ (bus_size_t)sg->sg_npgs << PAGE_SHIFT, BUS_DMA_WAITOK|flags);
+ if (error)
+ goto out;
+ loaded = true;
+
+ /* Success! */
+ KASSERT(sg->sg_dmamap->dm_nsegs > 0);
+ KASSERT(sg->sg_dmamap->dm_nsegs <= nents);
+ ret = sg->sg_dmamap->dm_nsegs;
+
+out: if (error) {
+ if (loaded)
+ bus_dmamap_unload(dmat, sg->sg_dmamap);
+ loaded = false;
+ if (sg->sg_dmamap)
+ bus_dmamap_destroy(dmat, sg->sg_dmamap);
+ sg->sg_dmamap = NULL;
+ ret = 0;
+ }
+ return ret;
+}
+
+void
+dma_unmap_sg(bus_dma_tag_t dmat, struct scatterlist *sg, int nents, int dir)
+{
+
+ dma_unmap_sg_attrs(dmat, sg, nents, dir, 0);
+}
+
+void
+dma_unmap_sg_attrs(bus_dma_tag_t dmat, struct scatterlist *sg, int nents,
+ int dir, int attrs)
+{
+
+ bus_dmamap_unload(dmat, sg->sg_dmamap);
+ bus_dmamap_destroy(dmat, sg->sg_dmamap);
+ sg->sg_dmamap = NULL;
+}