Re: [PATCH 07/12] dma-mapping: move CONFIG_DMA_CMA to kernel/dma/Kconfig

2019-02-11 Thread Greg Kroah-Hartman
On Mon, Feb 11, 2019 at 02:35:49PM +0100, Christoph Hellwig wrote:
> This is where all the related code already lives.
> 
> Signed-off-by: Christoph Hellwig 
> ---
>  drivers/base/Kconfig | 77 
>  kernel/dma/Kconfig   | 77 
>  2 files changed, 77 insertions(+), 77 deletions(-)

Much nicer, thanks!

Reviewed-by: Greg Kroah-Hartman 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 09/12] dma-mapping: remove the DMA_MEMORY_EXCLUSIVE flag

2019-02-11 Thread Greg Kroah-Hartman
On Mon, Feb 11, 2019 at 02:35:51PM +0100, Christoph Hellwig wrote:
> All users of dma_declare_coherent want their allocations to be
> exclusive, so default to exclusive allocations.
> 
> Signed-off-by: Christoph Hellwig 
> ---
>  Documentation/DMA-API.txt |  9 +--
>  arch/arm/mach-imx/mach-imx27_visstrim_m10.c   | 12 +++--
>  arch/arm/mach-imx/mach-mx31moboard.c  |  3 +--
>  arch/sh/boards/mach-ap325rxa/setup.c  |  5 ++--
>  arch/sh/boards/mach-ecovec24/setup.c  |  6 ++---
>  arch/sh/boards/mach-kfr2r09/setup.c   |  5 ++--
>  arch/sh/boards/mach-migor/setup.c |  5 ++--
>  arch/sh/boards/mach-se/7724/setup.c   |  6 ++---
>  arch/sh/drivers/pci/fixups-dreamcast.c|  3 +--
>  .../soc_camera/sh_mobile_ceu_camera.c |  3 +--
>  drivers/usb/host/ohci-sm501.c |  3 +--
>  drivers/usb/host/ohci-tmio.c  |  2 +-
>  include/linux/dma-mapping.h   |  7 ++
>  kernel/dma/coherent.c | 25 ++-
>  14 files changed, 29 insertions(+), 65 deletions(-)

Reviewed-by: Greg Kroah-Hartman 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 02/12] device.h: dma_mem is only needed for HAVE_GENERIC_DMA_COHERENT

2019-02-11 Thread Greg Kroah-Hartman
On Mon, Feb 11, 2019 at 02:35:44PM +0100, Christoph Hellwig wrote:
> No need to carry an unused field around.
> 
> Signed-off-by: Christoph Hellwig 
> ---
>  include/linux/device.h | 2 ++
>  1 file changed, 2 insertions(+)

Reviewed-by: Greg Kroah-Hartman 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 01/18] MIPS: lantiq: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
On Thu, Feb 07, 2019 at 11:29:14PM +, Paul Burton wrote:
> Would you like this to go through the MIPS tree or elsewhere? If the
> latter:
> 
> Acked-by: Paul Burton 

Please pick it up through the mips tree!
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 11/14] mmc: sh_mmcif: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do an atomic kmap for the page
while doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/sh_mmcif.c | 59 +++--
 1 file changed, 37 insertions(+), 22 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 81bd9afb0980..24c3f13bafdb 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -228,7 +228,7 @@ struct sh_mmcif_host {
bool dying;
long timeout;
void __iomem *addr;
-   u32 *pio_ptr;
+   u32 pio_offset;
spinlock_t lock;/* protect sh_mmcif_host::state */
enum sh_mmcif_state state;
enum sh_mmcif_wait_for wait_for;
@@ -595,7 +595,7 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
return ret;
 }
 
-static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host)
 {
struct mmc_data *data = host->mrq->data;
 
@@ -606,10 +606,10 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host 
*host, u32 *p)
 
if (host->sg_blkidx == data->sg->length) {
host->sg_blkidx = 0;
-   if (++host->sg_idx < data->sg_len)
-   host->pio_ptr = sg_virt(++data->sg);
-   } else {
-   host->pio_ptr = p;
+   if (++host->sg_idx < data->sg_len) {
+   data->sg++;
+   host->pio_offset = data->sg->offset / 4;
+   }
}
 
return host->sg_idx != data->sg_len;
@@ -631,8 +631,8 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
 {
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
-   u32 *p = sg_virt(data->sg);
-   int i;
+   u32 *p;
+   int off, i;
 
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
@@ -640,8 +640,11 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
return false;
}
 
+   p = sg_kmap_atomic(data->sg);
+   off = data->sg->offset / 4;
for (i = 0; i < host->blocksize / 4; i++)
-   *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+   p[off++] = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+   sg_kunmap_atomic(data->sg, p);
 
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
@@ -664,7 +667,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->wait_for = MMCIF_WAIT_FOR_MREAD;
host->sg_idx = 0;
host->sg_blkidx = 0;
-   host->pio_ptr = sg_virt(data->sg);
+   host->pio_offset = data->sg->offset / 4;
 
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
 }
@@ -673,7 +676,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
 {
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
-   u32 *p = host->pio_ptr;
+   u32 *p;
int i;
 
if (host->sd_error) {
@@ -684,10 +687,14 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host 
*host)
 
BUG_ON(!data->sg->length);
 
-   for (i = 0; i < host->blocksize / 4; i++)
-   *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+   p = sg_kmap_atomic(data->sg);
+   for (i = 0; i < host->blocksize / 4; i++) {
+   p[host->pio_offset++] =
+   sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+   }
+   sg_kunmap_atomic(data->sg, p);
 
-   if (!sh_mmcif_next_block(host, p))
+   if (!sh_mmcif_next_block(host))
return false;
 
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
@@ -711,8 +718,8 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
 {
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
-   u32 *p = sg_virt(data->sg);
-   int i;
+   u32 *p;
+   int off, i;
 
if (host->sd_error) {
data->error = sh_mmcif_error_manage(host);
@@ -720,8 +727,11 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host 
*host)
return false;
}
 
+   p = sg_kmap_atomic(data->sg);
+   off = data->sg->offset / 4;
for (i = 0; i < host->blocksize / 4; i++)
-   sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+   sh_mmcif_writel(host->addr, MMCIF_CE_DATA, p[off++]);
+   sg_kunmap_atomic(data->sg, p);
 
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
@@ -744,7 +754,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->wait_for = MMCIF_WAIT_FOR_MWRITE;
host->sg_idx = 0;
host->sg_blkidx = 0;
-   host->pio_ptr = sg_virt(data->sg);
+   

[PATCH 07/14] mmc: omap: handle chained sglists

2019-02-11 Thread Christoph Hellwig
Use the proper sg_next() helper to move to the next scatterlist element
to support chained scatterlists.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/omap.c | 16 ++--
 1 file changed, 6 insertions(+), 10 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 6741c95f2281..8cd39bc087fa 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -147,7 +147,7 @@ struct mmc_omap_host {
struct mmc_data *stop_data;
 
unsigned intsg_len;
-   int sg_idx;
+   struct scatterlist  *cur_sg;
u32 buffer_offset;
u32 buffer_bytes_left;
u32 total_bytes_left;
@@ -645,11 +645,8 @@ mmc_omap_cmd_timer(struct timer_list *t)
 static void
 mmc_omap_sg_to_buf(struct mmc_omap_host *host)
 {
-   struct scatterlist *sg;
-
-   sg = host->data->sg + host->sg_idx;
-   host->buffer_bytes_left = sg->length;
-   host->buffer_offset = sg->offset;
+   host->buffer_bytes_left = host->cur_sg->length;
+   host->buffer_offset = host->cur_sg->offset;
if (host->buffer_bytes_left > host->total_bytes_left)
host->buffer_bytes_left = host->total_bytes_left;
 }
@@ -666,13 +663,12 @@ mmc_omap_clk_timer(struct timer_list *t)
 static void
 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
 {
-   struct scatterlist *sg = host->data->sg + host->sg_idx;
+   struct scatterlist *sg = host->cur_sg;
int n, nwords;
void *p;
 
if (host->buffer_bytes_left == 0) {
-   host->sg_idx++;
-   BUG_ON(host->sg_idx == host->sg_len);
+   host->cur_sg = sg_next(host->cur_sg);
mmc_omap_sg_to_buf(host);
}
n = 64;
@@ -984,7 +980,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct 
mmc_request *req)
}
}
 
-   host->sg_idx = 0;
+   host->cur_sg = host->data->sg;
if (use_dma) {
enum dma_data_direction dma_data_dir;
struct dma_async_tx_descriptor *tx;
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 14/14] dma-mapping: remove dma_max_pfn

2019-02-11 Thread Christoph Hellwig
These days the DMA mapping code must bounce buffer for any not supported
address, and if they driver needs to optimize for natively supported
ranged it should use dma_get_required_mask.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/include/asm/dma-mapping.h | 7 ---
 include/linux/dma-mapping.h| 7 ---
 2 files changed, 14 deletions(-)

diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 31d3b96f0f4b..496b36b9a7ff 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -89,13 +89,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, 
void *addr)
 }
 #endif
 
-/* The ARM override for dma_max_pfn() */
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
-   return dma_to_pfn(dev, *dev->dma_mask);
-}
-#define dma_max_pfn(dev) dma_max_pfn(dev)
-
 #define arch_setup_dma_ops arch_setup_dma_ops
 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
   const struct iommu_ops *iommu, bool coherent);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f6ded992c183..c6dbc287e466 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -710,13 +710,6 @@ static inline int dma_set_seg_boundary(struct device *dev, 
unsigned long mask)
return -EIO;
 }
 
-#ifndef dma_max_pfn
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
-   return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
-}
-#endif
-
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef ARCH_DMA_MINALIGN
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 13/14] mmc: core: don't use block layer bounce buffers

2019-02-11 Thread Christoph Hellwig
All MMC and SD host drivers are highmem safe now, and bounce buffering
for addressing limitations is handled in the DMA layer now.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/core/queue.c | 5 -
 1 file changed, 5 deletions(-)

diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 71cd2411329e..1c92a2a4d641 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -354,17 +354,12 @@ static const struct blk_mq_ops mmc_mq_ops = {
 static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
 {
struct mmc_host *host = card->host;
-   u64 limit = BLK_BOUNCE_HIGH;
-
-   if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-   limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
 
-   blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 08/14] mmc: s3cmci: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do an atomic kmap for the page
while doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/s3cmci.c | 107 +++---
 drivers/mmc/host/s3cmci.h |   3 +-
 2 files changed, 55 insertions(+), 55 deletions(-)

diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 10f5219b3b40..989fefea19f1 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -317,26 +317,17 @@ static void s3cmci_check_sdio_irq(struct s3cmci_host 
*host)
}
 }
 
-static inline int get_data_buffer(struct s3cmci_host *host,
- u32 *bytes, u32 **pointer)
+static inline int get_data_buffer(struct s3cmci_host *host)
 {
-   struct scatterlist *sg;
-
-   if (host->pio_active == XFER_NONE)
-   return -EINVAL;
-
-   if ((!host->mrq) || (!host->mrq->data))
-   return -EINVAL;
-
if (host->pio_sgptr >= host->mrq->data->sg_len) {
dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
  host->pio_sgptr, host->mrq->data->sg_len);
return -EBUSY;
}
-   sg = >mrq->data->sg[host->pio_sgptr];
+   host->cur_sg = >mrq->data->sg[host->pio_sgptr];
 
-   *bytes = sg->length;
-   *pointer = sg_virt(sg);
+   host->pio_bytes = host->cur_sg->length;
+   host->pio_offset = host->cur_sg->offset;
 
host->pio_sgptr++;
 
@@ -422,11 +413,16 @@ static void s3cmci_disable_irq(struct s3cmci_host *host, 
bool transfer)
 
 static void do_pio_read(struct s3cmci_host *host)
 {
-   int res;
u32 fifo;
u32 *ptr;
u32 fifo_words;
void __iomem *from_ptr;
+   void *buf;
+
+   if (host->pio_active == XFER_NONE)
+   goto done;
+   if (!host->mrq || !host->mrq->data)
+   goto done;
 
/* write real prescaler to host, it might be set slow to fix */
writel(host->prescaler, host->base + S3C2410_SDIPRE);
@@ -435,20 +431,12 @@ static void do_pio_read(struct s3cmci_host *host)
 
while ((fifo = fifo_count(host))) {
if (!host->pio_bytes) {
-   res = get_data_buffer(host, >pio_bytes,
- >pio_ptr);
-   if (res) {
-   host->pio_active = XFER_NONE;
-   host->complete_what = COMPLETION_FINALIZE;
-
-   dbg(host, dbg_pio, "pio_read(): "
-   "complete (no more data).\n");
-   return;
-   }
+   if (get_data_buffer(host) < 0)
+   goto done;
 
dbg(host, dbg_pio,
-   "pio_read(): new target: [%i]@[%p]\n",
-   host->pio_bytes, host->pio_ptr);
+   "pio_read(): new target: [%i]@[%zu]\n",
+   host->pio_bytes, host->pio_offset);
}
 
dbg(host, dbg_pio,
@@ -470,63 +458,65 @@ static void do_pio_read(struct s3cmci_host *host)
host->pio_count += fifo;
 
fifo_words = fifo >> 2;
-   ptr = host->pio_ptr;
-   while (fifo_words--)
+
+   buf = (sg_kmap_atomic(host->cur_sg) + host->pio_offset);
+   ptr = buf;
+   while (fifo_words--) {
*ptr++ = readl(from_ptr);
-   host->pio_ptr = ptr;
+   host->pio_offset += 4;
+   }
 
if (fifo & 3) {
u32 n = fifo & 3;
u32 data = readl(from_ptr);
-   u8 *p = (u8 *)host->pio_ptr;
+   u8 *p = (u8 *)ptr;
 
while (n--) {
*p++ = data;
data >>= 8;
+   host->pio_offset++;
}
}
+   sg_kunmap_atomic(host->cur_sg, buf);
}
 
if (!host->pio_bytes) {
-   res = get_data_buffer(host, >pio_bytes, >pio_ptr);
-   if (res) {
-   dbg(host, dbg_pio,
-   "pio_read(): complete (no more buffers).\n");
-   host->pio_active = XFER_NONE;
-   host->complete_what = COMPLETION_FINALIZE;
-
-   return;
-   }
+   if (get_data_buffer(host) < 0)
+   goto done;
}
 
enable_imask(host,
 S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
+   return;
+
+done:
+   host->pio_active = XFER_NONE;
+   host->complete_what = COMPLETION_FINALIZE;
+   

[PATCH 10/14] mmc: mvsdio: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do an atomic kmap for the page
while doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/mvsdio.c | 33 +
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index e22bbff89c8d..d04c78125a4d 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -42,7 +42,8 @@ struct mvsd_host {
unsigned int intr_en;
unsigned int ctrl;
unsigned int pio_size;
-   void *pio_ptr;
+   struct scatterlist *pio_sg;
+   unsigned int pio_offset; /* offset in words into the segment */
unsigned int sg_frags;
unsigned int ns_per_clk;
unsigned int clock;
@@ -96,9 +97,9 @@ static int mvsd_setup_data(struct mvsd_host *host, struct 
mmc_data *data)
if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
 
-   dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u 
(%d)\n",
+   dev_dbg(host->dev, "data %s at 0x%08llx: blocks=%d blksz=%d tmout=%u 
(%d)\n",
(data->flags & MMC_DATA_READ) ? "read" : "write",
-   (u32)sg_virt(data->sg), data->blocks, data->blksz,
+   (u64)sg_phys(data->sg), data->blocks, data->blksz,
tmout, tmout_index);
 
host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK;
@@ -118,10 +119,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct 
mmc_data *data)
 * boundary.
 */
host->pio_size = data->blocks * data->blksz;
-   host->pio_ptr = sg_virt(data->sg);
+   host->pio_sg = data->sg;
+   host->pio_offset = data->sg->offset / 2;
if (!nodma)
-   dev_dbg(host->dev, "fallback to PIO for data at 0x%p 
size %d\n",
-   host->pio_ptr, host->pio_size);
+   dev_dbg(host->dev, "fallback to PIO for data at 0x%x 
size %d\n",
+   host->pio_offset, host->pio_size);
return 1;
} else {
dma_addr_t phys_addr;
@@ -291,8 +293,9 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct 
mmc_data *data,
 {
void __iomem *iobase = host->base;
 
-   if (host->pio_ptr) {
-   host->pio_ptr = NULL;
+   if (host->pio_sg) {
+   host->pio_sg = NULL;
+   host->pio_offset = 0;
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
@@ -376,8 +379,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
if (host->pio_size &&
(intr_status & host->intr_en &
 (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
-   u16 *p = host->pio_ptr;
+   u16 *base = sg_kmap_atomic(host->pio_sg);
+   u16 *p = base + host->pio_offset;
int s = host->pio_size;
+
while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
readsw(iobase + MVSD_FIFO, p, 16);
p += 16;
@@ -416,13 +421,15 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
-   host->pio_ptr = p;
+   host->pio_offset = p - base;
host->pio_size = s;
+   sg_kunmap_atomic(host->pio_sg, base);
irq_handled = 1;
} else if (host->pio_size &&
   (intr_status & host->intr_en &
(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
-   u16 *p = host->pio_ptr;
+   u16 *base = sg_kmap_atomic(host->pio_sg);
+   u16 *p = base + host->pio_offset;
int s = host->pio_size;
/*
 * The TX_FIFO_8W bit is unreliable. When set, bursting
@@ -453,8 +460,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
-   host->pio_ptr = p;
+   host->pio_offset = p - base;
host->pio_size = s;
+   sg_kunmap_atomic(host->pio_sg, base);
irq_handled = 1;
}
 
@@ -737,6 +745,7 @@ static int mvsd_probe(struct platform_device *pdev)
clk_prepare_enable(host->clk);
 
mmc->ops = _ops;
+   mmc->need_kmap = 1;
 
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org

[PATCH 12/14] mmc: sh_mmcif: handle chained sglists

2019-02-11 Thread Christoph Hellwig
Use the proper sg_next() helper to move to the next scatterlist element
to support chained scatterlists.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/sh_mmcif.c | 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 24c3f13bafdb..9e59dbe6ef30 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -234,7 +234,6 @@ struct sh_mmcif_host {
enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work;
size_t blocksize;
-   int sg_idx;
int sg_blkidx;
bool power;
bool ccs_enable;/* Command Completion Signal support */
@@ -606,13 +605,13 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host 
*host)
 
if (host->sg_blkidx == data->sg->length) {
host->sg_blkidx = 0;
-   if (++host->sg_idx < data->sg_len) {
-   data->sg++;
-   host->pio_offset = data->sg->offset / 4;
-   }
+   data->sg = sg_next(data->sg);
+   if (!data->sg)
+   return false;
+   host->pio_offset = data->sg->offset / 4;
}
 
-   return host->sg_idx != data->sg_len;
+   return true;
 }
 
 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
@@ -665,7 +664,6 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK;
 
host->wait_for = MMCIF_WAIT_FOR_MREAD;
-   host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_offset = data->sg->offset / 4;
 
@@ -752,7 +750,6 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
BLOCK_SIZE_MASK;
 
host->wait_for = MMCIF_WAIT_FOR_MWRITE;
-   host->sg_idx = 0;
host->sg_blkidx = 0;
host->pio_offset = data->sg->offset / 4;
 
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 09/14] mmc: s3cmci: handle chained sglists

2019-02-11 Thread Christoph Hellwig
Use the proper sg_next() helper to move to the next scatterlist element
to support chained scatterlists.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/s3cmci.c | 19 +--
 drivers/mmc/host/s3cmci.h |  2 +-
 2 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 989fefea19f1..df7c27f78abf 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -319,20 +319,19 @@ static void s3cmci_check_sdio_irq(struct s3cmci_host 
*host)
 
 static inline int get_data_buffer(struct s3cmci_host *host)
 {
-   if (host->pio_sgptr >= host->mrq->data->sg_len) {
-   dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
- host->pio_sgptr, host->mrq->data->sg_len);
+   if (!host->next_sg) {
+   dbg(host, dbg_debug, "no more buffers (%i)\n",
+ host->mrq->data->sg_len);
return -EBUSY;
}
-   host->cur_sg = >mrq->data->sg[host->pio_sgptr];
+   host->cur_sg = host->next_sg;
+   host->next_sg = sg_next(host->next_sg);
 
host->pio_bytes = host->cur_sg->length;
host->pio_offset = host->cur_sg->offset;
 
-   host->pio_sgptr++;
-
-   dbg(host, dbg_sg, "new buffer (%i/%i)\n",
-   host->pio_sgptr, host->mrq->data->sg_len);
+   dbg(host, dbg_sg, "new buffer (%i)\n",
+   host->mrq->data->sg_len);
 
return 0;
 }
@@ -1051,8 +1050,8 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, 
struct mmc_data *data)
 
BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
 
-   host->pio_sgptr = 0;
-   host->cur_sg = >mrq->data->sg[host->pio_sgptr];
+   host->cur_sg = host->mrq->data->sg;
+   host->next_sg = sg_next(host->cur_sg);
host->pio_bytes = 0;
host->pio_count = 0;
host->pio_active = rw ? XFER_WRITE : XFER_READ;
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index 4320f7d832dc..caf1078d07d1 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -51,7 +51,7 @@ struct s3cmci_host {
int dma_complete;
 
struct scatterlist  *cur_sg;
-   u32 pio_sgptr;
+   struct scatterlist  *next_sg;
u32 pio_bytes;
u32 pio_count;
u32 pio_offset;
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 06/14] mmc: omap: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do an atomic kmap for the page
while doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/omap.c | 15 ++-
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index c60a7625b1fa..6741c95f2281 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -148,7 +148,7 @@ struct mmc_omap_host {
 
unsigned intsg_len;
int sg_idx;
-   u16 *   buffer;
+   u32 buffer_offset;
u32 buffer_bytes_left;
u32 total_bytes_left;
 
@@ -649,7 +649,7 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
 
sg = host->data->sg + host->sg_idx;
host->buffer_bytes_left = sg->length;
-   host->buffer = sg_virt(sg);
+   host->buffer_offset = sg->offset;
if (host->buffer_bytes_left > host->total_bytes_left)
host->buffer_bytes_left = host->total_bytes_left;
 }
@@ -666,7 +666,9 @@ mmc_omap_clk_timer(struct timer_list *t)
 static void
 mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
 {
+   struct scatterlist *sg = host->data->sg + host->sg_idx;
int n, nwords;
+   void *p;
 
if (host->buffer_bytes_left == 0) {
host->sg_idx++;
@@ -684,15 +686,17 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
 
+   p = sg_kmap_atomic(sg);
if (write) {
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ p + host->buffer_offset, nwords);
} else {
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
-host->buffer, nwords);
+p + host->buffer_offset, nwords);
}
+   sg_kunmap_atomic(sg, p);
 
-   host->buffer += nwords;
+   host->buffer_offset += nwords;
 }
 
 #ifdef CONFIG_MMC_DEBUG
@@ -1250,6 +1254,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, 
int id)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE;
 
mmc->ops = _omap_ops;
+   mmc->need_kmap = 1;
mmc->f_min = 40;
 
if (mmc_omap2())
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 04/14] mmc: davinci: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do an atomic kmap for the page
while doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/davinci_mmc.c | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 9e68c3645e22..6a16d7a1d5bc 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -194,11 +194,12 @@ struct mmc_davinci_host {
 #define DAVINCI_MMC_DATADIR_WRITE  2
unsigned char data_dir;
 
-   /* buffer is used during PIO of one scatterlist segment, and
-* is updated along with buffer_bytes_left.  bytes_left applies
-* to all N blocks of the PIO transfer.
+   /*
+* buffer_offset is used during PIO of one scatterlist segment, and is
+* updated along with buffer_bytes_left.  bytes_left applies to all N
+* blocks of the PIO transfer.
 */
-   u8 *buffer;
+   u32 buffer_offset;
u32 buffer_bytes_left;
u32 bytes_left;
 
@@ -229,8 +230,8 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
 /* PIO only */
 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
 {
+   host->buffer_offset = host->sg->offset;
host->buffer_bytes_left = sg_dma_len(host->sg);
-   host->buffer = sg_virt(host->sg);
if (host->buffer_bytes_left > host->bytes_left)
host->buffer_bytes_left = host->bytes_left;
 }
@@ -238,7 +239,7 @@ static void mmc_davinci_sg_to_buf(struct mmc_davinci_host 
*host)
 static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
 {
-   u8 *p;
+   u8 *p, *base;
unsigned int i;
 
if (host->buffer_bytes_left == 0) {
@@ -246,7 +247,8 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host 
*host,
mmc_davinci_sg_to_buf(host);
}
 
-   p = host->buffer;
+   base = sg_kmap_atomic(host->sg);
+   p = base + host->buffer_offset;
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
host->buffer_bytes_left -= n;
@@ -275,7 +277,8 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host 
*host,
p = p + (n & 3);
}
}
-   host->buffer = p;
+   host->buffer_offset = p - base;
+   sg_kunmap_atomic(host->sg, base);
 }
 
 static void mmc_davinci_start_command(struct mmc_davinci_host *host,
@@ -572,7 +575,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, 
struct mmc_request *req)
host->base + DAVINCI_MMCFIFOCTL);
}
 
-   host->buffer = NULL;
+   host->buffer_offset = 0;
host->bytes_left = data->blocks * data->blksz;
 
/* For now we try to use DMA whenever we won't need partial FIFO
@@ -1291,6 +1294,7 @@ static int davinci_mmcsd_probe(struct platform_device 
*pdev)
 
mmc->ops = _davinci_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+   mmc->need_kmap = 1;
 
/* With no iommu coalescing pages, each phys_seg is a hw_seg.
 * Each hw_seg uses one EDMA parameter RAM slot, always one
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 05/14] mmc: moxart: handle highmem pages

2019-02-11 Thread Christoph Hellwig
Instead of setting up a kernel pointer to track the current PIO address,
track the offset in the current page, and do a kmap for the page while
doing the actual PIO operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/host/moxart-mmc.c | 20 
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index a0670e9cd012..116964e6506d 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -311,7 +311,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
if (host->data_len == data->bytes_xfered)
return;
 
-   sgp = sg_virt(host->cur_sg);
+   sgp = kmap(sg_page(host->cur_sg)) + host->cur_sg->offset;
remain = host->data_remain;
 
if (data->flags & MMC_DATA_WRITE) {
@@ -319,8 +319,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
if (moxart_wait_for_status(host, FIFO_URUN, )
 == -ETIMEDOUT) {
data->error = -ETIMEDOUT;
-   complete(>pio_complete);
-   return;
+   goto done;
}
for (len = 0; len < remain && len < host->fifo_width;) {
iowrite32(*sgp, host->base + REG_DATA_WINDOW);
@@ -335,8 +334,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
if (moxart_wait_for_status(host, FIFO_ORUN, )
== -ETIMEDOUT) {
data->error = -ETIMEDOUT;
-   complete(>pio_complete);
-   return;
+   goto done;
}
for (len = 0; len < remain && len < host->fifo_width;) {
/* SCR data must be read in big endian. */
@@ -356,10 +354,15 @@ static void moxart_transfer_pio(struct moxart_host *host)
data->bytes_xfered += host->data_remain - remain;
host->data_remain = remain;
 
-   if (host->data_len != data->bytes_xfered)
+   if (host->data_len != data->bytes_xfered) {
+   kunmap(sg_page(host->cur_sg));
moxart_next_sg(host);
-   else
-   complete(>pio_complete);
+   return;
+   }
+
+done:
+   kunmap(sg_page(host->cur_sg));
+   complete(>pio_complete);
 }
 
 static void moxart_prepare_data(struct moxart_host *host)
@@ -614,6 +617,7 @@ static int moxart_probe(struct platform_device *pdev)
spin_lock_init(>lock);
 
mmc->ops = _ops;
+   mmc->need_kmap = 1;
mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2);
mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2);
mmc->ocr_avail = 0x00;  /* Support 2.0v - 3.6v power. */
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 02/14] mmc: remove the unused use_blk_mq field from struct mmc_host

2019-02-11 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 include/linux/mmc/host.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4d35ff36ceff..4eadf01b4a93 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -397,7 +397,6 @@ struct mmc_host {
unsigned intdoing_retune:1; /* re-tuning in progress */
unsigned intretune_now:1;   /* do re-tuning at next req */
unsigned intretune_paused:1; /* re-tuning is temporarily 
disabled */
-   unsigned intuse_blk_mq:1;   /* use blk-mq */
 
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable 
devices */
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 01/14] scatterlist: add sg_kmap_atomic / sg_kunmap_atomic helpers

2019-02-11 Thread Christoph Hellwig
This avoids bug prone open coding of the sg offset handling and
also helps to document the limitations of mapping scatterlist
entries.

Signed-off-by: Christoph Hellwig 
---
 include/linux/scatterlist.h | 26 ++
 1 file changed, 26 insertions(+)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b96f0d0b5b8f..524cd8448a48 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,6 +2,7 @@
 #ifndef _LINUX_SCATTERLIST_H
 #define _LINUX_SCATTERLIST_H
 
+#include 
 #include 
 #include 
 #include 
@@ -239,6 +240,31 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
 }
 
+/**
+ * sg_kmap_atomic - map a S/G list entry to a kernel address
+ * @sg:scatterlist entry
+ *
+ * Return a kernel address for scatterlist entry by kmapping it.  Note that
+ * this function must only be called on scatterlist entries that do not span
+ * multiple pages.
+ */
+static inline void *sg_kmap_atomic(struct scatterlist *sg)
+{
+   if (WARN_ON_ONCE(sg->offset + sg->length > PAGE_SIZE))
+   return NULL;
+   return kmap_atomic(sg_page(sg)) + sg->offset;
+}
+
+/**
+ * sg_kunmap_atomic - unmap a S/G list entry to a kernel address
+ * @sg:scatterlist entry
+ * @ptr:   address returned from sg_kmap_atomic
+ */
+static inline void sg_kunmap_atomic(struct scatterlist *sg, void *ptr)
+{
+   kunmap_atomic(ptr - sg->offset);
+}
+
 /**
  * sg_init_marker - Initialize markers in sg table
  * @sgl:  The SG table
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 03/14] mmc: add a need_kmap flag to struct mmc_host

2019-02-11 Thread Christoph Hellwig
If we want to get rid of the block layer bounce buffering for highmem we
need to ensure no segment spans multiple pages so that we can kmap it.
Add a flag to struct mmc_host so that we can handle the block and DMA
layer interactions in common code.

Signed-off-by: Christoph Hellwig 
---
 drivers/mmc/core/queue.c | 13 +
 include/linux/mmc/host.h |  1 +
 2 files changed, 14 insertions(+)

diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 35cc138b096d..71cd2411329e 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -370,6 +370,19 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct 
mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 
+   /*
+* If the host requires kmapping for PIO we need to ensure
+* that no segment spans a page boundary.
+*/
+   if (host->need_kmap) {
+   unsigned int dma_boundary = host->max_seg_size - 1;
+
+   if (dma_boundary >= PAGE_SIZE)
+   dma_boundary = PAGE_SIZE - 1;
+   blk_queue_segment_boundary(mq->queue, dma_boundary);
+   dma_set_seg_boundary(mmc_dev(host), dma_boundary);
+   }
+
INIT_WORK(>recovery_work, mmc_mq_recovery_handler);
INIT_WORK(>complete_work, mmc_blk_mq_complete_work);
 
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4eadf01b4a93..87f8a89d2f70 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -397,6 +397,7 @@ struct mmc_host {
unsigned intdoing_retune:1; /* re-tuning in progress */
unsigned intretune_now:1;   /* do re-tuning at next req */
unsigned intretune_paused:1; /* re-tuning is temporarily 
disabled */
+   unsigned intneed_kmap:1;/* only allow single page 
segments */
 
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable 
devices */
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


remove block layer bounce buffering for MMC v2

2019-02-11 Thread Christoph Hellwig
Hi everyone,

this series converts the remaining MMC host drivers to properly kmap the
scatterlist entries it does PIO operations on, and then goes on to
remove the usage of block layer bounce buffering (which I plan to remove
eventually) from the MMC layer.

As a bonus I've converted various drivers to the proper scatterlist
helpers so that at least in theory they are ready for chained
scatterlists.

All the changes are compile tested only as I don't have any of the
hardware, so a careful review would be appreciated.

Changes since v1:
 - fix a missing kunmap_atomic in mvsdio
 - fix a stray whitespace in s3cmci
 - add new sg_kmap_atomic and sg_kunmap_atomic helpers
 - set the DMA and block layer dma boundary
 - use pointer arithmetics to reduce the amount of changes in
   various drivers

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: What is the meaning of PASID_MIN?

2019-02-11 Thread Lu Baolu



On 2/12/19 11:02 AM, Peter Xu wrote:

On Tue, Feb 12, 2019 at 10:44:23AM +0800, Lu Baolu wrote:

Hi,

On 2/12/19 3:28 AM, Matthew Wilcox wrote:


I'm looking at commit 562831747f6299abd481b5b00bd4fa19d5c8a259
which fails to adequately explain why we can't use PASID 0.  Commit
af39507305fb83a5d3c475c2851f4d59545d8a18 also doesn't explain why PASID
0 is no longer usable for the intel-svm driver.


Sorry that we didn't make it clear.



There are a load of simplifications that could be made to this, but I
don't know which ones to suggest without a clear understanding of the
problem you're actually trying to solve.



PASID 0 has been reserved by Intel IOMMU driver for RID_PASID purpose.

VT-d scalable mode treats all address translation as PASID granularity.
For DMA requests-with-PASID, the PASID value in the DMA request will be
used. For DMA requests-without-PASID, VT-d will use a static PASID value
specified in the RID_PASID field of the context entry. PASID 0 has been
reserved for this usage for all devices.

(Please refer to 9.4 of the spec 3.0 for more details.)


Hi, Baolu,


Hi Peter,



I have a similar confusion.

If PASID==0 is reserved for requests-without-PASID, then does it mean
that for each scalable mode context entry the RID_PASID field will
always be zero?


Yes.


 Or say, since we already have the per-context-entry
RID_PASID field which seems to be configurable, why we still need to
reserve the PASID==0?


We decided to set RID_PASID always to 0. This will make things simple
especially for virtual IOMMU case. It will also be compatible with other
arch's which reserves PASID 0 for legacy translation.

Best regards,
Lu Baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: What is the meaning of PASID_MIN?

2019-02-11 Thread Peter Xu
On Tue, Feb 12, 2019 at 10:44:23AM +0800, Lu Baolu wrote:
> Hi,
> 
> On 2/12/19 3:28 AM, Matthew Wilcox wrote:
> > 
> > I'm looking at commit 562831747f6299abd481b5b00bd4fa19d5c8a259
> > which fails to adequately explain why we can't use PASID 0.  Commit
> > af39507305fb83a5d3c475c2851f4d59545d8a18 also doesn't explain why PASID
> > 0 is no longer usable for the intel-svm driver.
> 
> Sorry that we didn't make it clear.
> 
> > 
> > There are a load of simplifications that could be made to this, but I
> > don't know which ones to suggest without a clear understanding of the
> > problem you're actually trying to solve.
> > 
> 
> PASID 0 has been reserved by Intel IOMMU driver for RID_PASID purpose.
> 
> VT-d scalable mode treats all address translation as PASID granularity.
> For DMA requests-with-PASID, the PASID value in the DMA request will be
> used. For DMA requests-without-PASID, VT-d will use a static PASID value
> specified in the RID_PASID field of the context entry. PASID 0 has been
> reserved for this usage for all devices.
> 
> (Please refer to 9.4 of the spec 3.0 for more details.)

Hi, Baolu,

I have a similar confusion.

If PASID==0 is reserved for requests-without-PASID, then does it mean
that for each scalable mode context entry the RID_PASID field will
always be zero?  Or say, since we already have the per-context-entry
RID_PASID field which seems to be configurable, why we still need to
reserve the PASID==0?

Thanks,

-- 
Peter Xu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: What is the meaning of PASID_MIN?

2019-02-11 Thread Lu Baolu

Hi,

On 2/12/19 3:28 AM, Matthew Wilcox wrote:


I'm looking at commit 562831747f6299abd481b5b00bd4fa19d5c8a259
which fails to adequately explain why we can't use PASID 0.  Commit
af39507305fb83a5d3c475c2851f4d59545d8a18 also doesn't explain why PASID
0 is no longer usable for the intel-svm driver.


Sorry that we didn't make it clear.



There are a load of simplifications that could be made to this, but I
don't know which ones to suggest without a clear understanding of the
problem you're actually trying to solve.



PASID 0 has been reserved by Intel IOMMU driver for RID_PASID purpose.

VT-d scalable mode treats all address translation as PASID granularity.
For DMA requests-with-PASID, the PASID value in the DMA request will be
used. For DMA requests-without-PASID, VT-d will use a static PASID value
specified in the RID_PASID field of the context entry. PASID 0 has been
reserved for this usage for all devices.

(Please refer to 9.4 of the spec 3.0 for more details.)

Best regards,
Lu Baolu
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 06/12] dma-mapping: improve selection of dma_declare_coherent availability

2019-02-11 Thread Paul Burton
Hi Christoph,

On Mon, Feb 11, 2019 at 02:35:48PM +0100, Christoph Hellwig wrote:
> This API is primarily used through DT entries, but two architectures
> and two drivers call it directly.  So instead of selecting the config
> symbol for random architectures pull it in implicitly for the actual
> users.  Also rename the Kconfig option to describe the feature better.
> 
> Signed-off-by: Christoph Hellwig 

Acked-by: Paul Burton  # MIPS

Thanks,
Paul
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/2] dma-mapping: add a kconfig symbol for arch_setup_dma_ops availability

2019-02-11 Thread Paul Burton
Hi Christoph,

On Mon, Feb 04, 2019 at 09:14:19AM +0100, Christoph Hellwig wrote:
> Signed-off-by: Christoph Hellwig 
> ---
>  arch/arc/Kconfig |  1 +
>  arch/arc/include/asm/Kbuild  |  1 +
>  arch/arc/include/asm/dma-mapping.h   | 13 -
>  arch/arm/Kconfig |  1 +
>  arch/arm/include/asm/dma-mapping.h   |  4 
>  arch/arm64/Kconfig   |  1 +
>  arch/arm64/include/asm/dma-mapping.h |  4 
>  arch/mips/Kconfig|  1 +
>  arch/mips/include/asm/dma-mapping.h  | 10 --
>  arch/mips/mm/dma-noncoherent.c   |  8 

Acked-by: Paul Burton  # MIPS

Thanks,
Paul
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 0/2] Add PGR response PASID requirement check in Intel IOMMU.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

Intel IOMMU responds automatically when receiving page-requests from
a PCIe endpoint and the page-request queue is full and it cannot accept
any more page-requests. When it auto-responds to page-requests with a
success to the endpoint, it automatically responds with the PASID if
the page-request had a PASID in the incoming request. IOMMU doesn't
actually have any place to check device capabilities (like whether
the device expects PASID in PGR response or not) before sending the
response message. Due to this restriction Intel IOMMU driver only
enables PASID, if the endpoint is compliant to Intel IOMMU's.

Changes since v1:
 * Changed interface name to pci_prg_resp_pasid_required().
 * Update comment header format.

Kuppuswamy Sathyanarayanan (2):
  PCI/ATS: Add pci_prg_resp_pasid_required() interface.
  iommu/vt-d: Enable PASID only if device expects PASID in PRG Response.

 drivers/iommu/intel-iommu.c   |  3 ++-
 drivers/pci/ats.c | 31 +++
 include/linux/pci-ats.h   |  5 +
 include/uapi/linux/pci_regs.h |  1 +
 4 files changed, 39 insertions(+), 1 deletion(-)

-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 1/2] PCI/ATS: Add pci_prg_resp_pasid_required() interface.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

Return the PRG Response PASID Required bit in the Page Request
Status Register.

As per PCIe spec r4.0, sec 10.5.2.3, if this bit is Set then the device
expects a PASID TLP Prefix on PRG Response Messages when the
corresponding Page Requests had a PASID TLP Prefix. If Clear, the device
does not expect PASID TLP Prefixes on any PRG Response Message, and the
device behavior is undefined if this bit is Clear and the device
receives a PRG Response Message with a PASID TLP Prefix. Also the device
behavior is undefined in the this bit is Set and the device receives a
PRG Response Message with no PASID TLP Prefix when the corresponding
Page Requests had a PASID TLP Prefix.

This function will be used by drivers like IOMMU, if it is required to
check the status of the PRG Response PASID Required bit before enabling
the PASID support of the device.

Cc: Ashok Raj 
Cc: Jacob Pan 
Cc: Keith Busch 
Suggested-by: Ashok Raj 
Signed-off-by: Kuppuswamy Sathyanarayanan 

---
 drivers/pci/ats.c | 31 +++
 include/linux/pci-ats.h   |  5 +
 include/uapi/linux/pci_regs.h |  1 +
 3 files changed, 37 insertions(+)

diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 5b78f3b1b918..f843cd846dff 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -368,6 +368,37 @@ int pci_pasid_features(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL_GPL(pci_pasid_features);
 
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ *  status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response message, 0 otherwise.
+ *
+ * Even though the PRG response PASID status is read from PRI status
+ * register, since this API will mainly be used by PASID users, this
+ * function is defined within #ifdef CONFIG_PCI_PASID instead of
+ * CONFIG_PCI_PRI.
+ *
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+   u16 status;
+   int pos;
+
+   pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+   if (!pos)
+   return 0;
+
+   pci_read_config_word(pdev, pos + PCI_PRI_STATUS, );
+
+   if (status & PCI_PRI_STATUS_PASID)
+   return 1;
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
+
 #define PASID_NUMBER_SHIFT 8
 #define PASID_NUMBER_MASK  (0x1f << PASID_NUMBER_SHIFT)
 /**
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 7c4b8e27268c..facfd6a18fe1 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -40,6 +40,7 @@ void pci_disable_pasid(struct pci_dev *pdev);
 void pci_restore_pasid_state(struct pci_dev *pdev);
 int pci_pasid_features(struct pci_dev *pdev);
 int pci_max_pasids(struct pci_dev *pdev);
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
 
 #else  /* CONFIG_PCI_PASID */
 
@@ -66,6 +67,10 @@ static inline int pci_max_pasids(struct pci_dev *pdev)
return -EINVAL;
 }
 
+static int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+   return 0;
+}
 #endif /* CONFIG_PCI_PASID */
 
 
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e1e9888c85e6..898be572b010 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -880,6 +880,7 @@
 #define  PCI_PRI_STATUS_RF 0x001   /* Response Failure */
 #define  PCI_PRI_STATUS_UPRGI  0x002   /* Unexpected PRG index */
 #define  PCI_PRI_STATUS_STOPPED0x100   /* PRI Stopped */
+#define  PCI_PRI_STATUS_PASID  0x8000  /* PRG Response PASID Required */
 #define PCI_PRI_MAX_REQ0x08/* PRI max reqs supported */
 #define PCI_PRI_ALLOC_REQ  0x0c/* PRI max reqs allowed */
 #define PCI_EXT_CAP_PRI_SIZEOF 16
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 2/2] iommu/vt-d: Enable PASID only if device expects PASID in PRG Response.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

In Intel IOMMU, if the Page Request Queue (PRQ) is full, it will
automatically respond to the device with a success message as a keep
alive. And when sending the success message, IOMMU will include PASID in
the Response Message when the Page Request has a PASID in Request
Message and It does not check against the PRG Response PASID requirement
of the device before sending the response. Also, If the device receives the
PRG response with PASID when its not expecting it then the device behavior
is undefined. So enable PASID support only if device expects PASID in PRG
response message.

Cc: Ashok Raj 
Cc: Jacob Pan 
Cc: Keith Busch 
Suggested-by: Ashok Raj 
Signed-off-by: Kuppuswamy Sathyanarayanan 

---
 drivers/iommu/intel-iommu.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1457f931218e..af2e4a011787 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1399,7 +1399,8 @@ static void iommu_enable_dev_iotlb(struct 
device_domain_info *info)
   undefined. So always enable PASID support on devices which
   have it, even if we can't yet know if we're ever going to
   use it. */
-   if (info->pasid_supported && !pci_enable_pasid(pdev, 
info->pasid_supported & ~1))
+   if (info->pasid_supported && pci_prg_resp_pasid_required(pdev) &&
+   !pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
 
if (info->pri_supported && !pci_reset_pri(pdev) && 
!pci_enable_pri(pdev, 32))
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v2 0/2] Add page alignment check in Intel IOMMU.

2019-02-11 Thread sathyanarayanan kuppuswamy



On 2/11/19 1:43 PM, sathyanarayanan.kuppusw...@linux.intel.com wrote:

From: Kuppuswamy Sathyanarayanan 

As per Intel vt-d specification, Rev 3.0 (section 7.5.1.1, title "Page Request 
Descriptor"), Intel IOMMU Page Request Descriptor only provides bits[63:12] of the 
page address. Hence its required to enforce that the device will only send page request 
with page-aligned address. So, this patch set adds support to verify whether the device 
uses page aligned address before enabling the ATS service in Intel IOMMU driver.

Please ignore this mail. This is my older copy.


Kuppuswamy Sathyanarayanan (2):
   PCI: ATS: Add function to check ATS page alignment status.
   iommu/vt-d: Enable ATS only if the device uses page aligned address.

  drivers/iommu/intel-iommu.c   |  1 +
  drivers/pci/ats.c | 24 
  include/linux/pci.h   |  2 ++
  include/uapi/linux/pci_regs.h |  1 +
  4 files changed, 28 insertions(+)


--
Sathyanarayanan Kuppuswamy
Linux kernel developer

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 2/2] iommu/vt-d: Enable ATS only if the device uses page aligned address.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

As per Intel vt-d specification, Rev 3.0 (section 7.5.1.1, title "Page
Request Descriptor"), Intel IOMMU page request descriptor only uses
bits[63:12] of the page address. Hence Intel IOMMU driver would only
permit devices that advertise they would only send page aligned requests
to participate in ATS service.

Cc: Ashok Raj 
Cc: Jacob Pan 
Cc: Keith Busch 
Suggested-by: Ashok Raj 
Signed-off-by: Kuppuswamy Sathyanarayanan 

---
 drivers/iommu/intel-iommu.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1457f931218e..6a0b5270cd2e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1406,6 +1406,7 @@ static void iommu_enable_dev_iotlb(struct 
device_domain_info *info)
info->pri_enabled = 1;
 #endif
if (!pdev->untrusted && info->ats_supported &&
+   pci_ats_page_aligned(pdev) &&
!pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
domain_update_iotlb(info->domain);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 0/2] Add page alignment check in Intel IOMMU.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

As per Intel vt-d specification, Rev 3.0 (section 7.5.1.1, title
"Page Request Descriptor"), Intel IOMMU page request descriptor
only uses bits[63:12] of the Page Address. Hence its required to
enforce that the device will only send page request with
page-aligned address. So, this patch set adds support to verify
whether the device uses page aligned address before enabling the
ATS service in Intel IOMMU driver.

Changes since v1:
 * Fixed issue with PCI_ATS_CAP_PAGE_ALIGNED macro.
 * Fixed comments.

Kuppuswamy Sathyanarayanan (2):
  PCI/ATS: Add pci_ats_page_aligned() interface
  iommu/vt-d: Enable ATS only if the device uses page aligned address.

 drivers/iommu/intel-iommu.c   |  1 +
 drivers/pci/ats.c | 27 +++
 include/linux/pci.h   |  2 ++
 include/uapi/linux/pci_regs.h |  1 +
 4 files changed, 31 insertions(+)

-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 1/2] PCI/ATS: Add pci_ats_page_aligned() interface

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

Return the Page Aligned Request bit in the ATS Capability Register.

As per PCIe spec r4.0, sec 10.5.1.2, If Page Aligned Request bit is
set, then it indicates the Untranslated Addresses generated by the
device are alwayis always aligned to a 4096 byte boundary.

This interface will be used by drivers like IOMMU, if it is required
to check whether the Untranslated Address generated by the device will
be aligned before enabling the ATS service.

Cc: Ashok Raj 
Cc: Jacob Pan 
Cc: Keith Busch 
Suggested-by: Ashok Raj 
Signed-off-by: Kuppuswamy Sathyanarayanan 

---
 drivers/pci/ats.c | 27 +++
 include/linux/pci.h   |  2 ++
 include/uapi/linux/pci_regs.h |  1 +
 3 files changed, 30 insertions(+)

diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 5b78f3b1b918..b3c7f1496081 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -142,6 +142,33 @@ int pci_ats_queue_depth(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
 
+/**
+ * pci_ats_page_aligned - Return Page Aligned Request bit status.
+ * @pdev: the PCI device
+ *
+ * Returns 1, if Untranslated Addresses generated by the device are
+ * always aligned or 0 otherwise.
+ *
+ * Per PCIe spec r4.0, sec 10.5.1.2, If Page Aligned Request bit is
+ * set, it indicates the Untranslated Addresses generated by the
+ * device are always aligned to a 4096 byte boundary.
+ */
+int pci_ats_page_aligned(struct pci_dev *pdev)
+{
+   u16 cap;
+
+   if (!pdev->ats_cap)
+   return 0;
+
+   pci_read_config_word(pdev, pdev->ats_cap + PCI_ATS_CAP, );
+
+   if (cap & PCI_ATS_CAP_PAGE_ALIGNED)
+   return 1;
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
+
 #ifdef CONFIG_PCI_PRI
 /**
  * pci_enable_pri - Enable PRI capability
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 65f1d8c2f082..9724a8c0496b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1524,11 +1524,13 @@ void pci_ats_init(struct pci_dev *dev);
 int pci_enable_ats(struct pci_dev *dev, int ps);
 void pci_disable_ats(struct pci_dev *dev);
 int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
 #else
 static inline void pci_ats_init(struct pci_dev *d) { }
 static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
 static inline void pci_disable_ats(struct pci_dev *d) { }
 static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
 #endif
 
 #ifdef CONFIG_PCIE_PTM
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e1e9888c85e6..7973bb02ed4b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -866,6 +866,7 @@
 #define PCI_ATS_CAP0x04/* ATS Capability Register */
 #define  PCI_ATS_CAP_QDEP(x)   ((x) & 0x1f)/* Invalidate Queue Depth */
 #define  PCI_ATS_MAX_QDEP  32  /* Max Invalidate Queue Depth */
+#define  PCI_ATS_CAP_PAGE_ALIGNED  0x0020 /* Page Aligned Request */
 #define PCI_ATS_CTRL   0x06/* ATS Control Register */
 #define  PCI_ATS_CTRL_ENABLE   0x8000  /* ATS Enable */
 #define  PCI_ATS_CTRL_STU(x)   ((x) & 0x1f)/* Smallest Translation Unit */
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2 0/2] Add page alignment check in Intel IOMMU.

2019-02-11 Thread sathyanarayanan . kuppuswamy
From: Kuppuswamy Sathyanarayanan 

As per Intel vt-d specification, Rev 3.0 (section 7.5.1.1, title "Page Request 
Descriptor"), Intel IOMMU Page Request Descriptor only provides bits[63:12] of 
the page address. Hence its required to enforce that the device will only send 
page request with page-aligned address. So, this patch set adds support to 
verify whether the device uses page aligned address before enabling the ATS 
service in Intel IOMMU driver.

Kuppuswamy Sathyanarayanan (2):
  PCI: ATS: Add function to check ATS page alignment status.
  iommu/vt-d: Enable ATS only if the device uses page aligned address.

 drivers/iommu/intel-iommu.c   |  1 +
 drivers/pci/ats.c | 24 
 include/linux/pci.h   |  2 ++
 include/uapi/linux/pci_regs.h |  1 +
 4 files changed, 28 insertions(+)

-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1 1/2] PCI: ATS: Add function to check ATS page aligned request status.

2019-02-11 Thread Sinan Kaya

On 2/11/2019 2:15 PM, Raj, Ashok wrote:

It seems rather odd we have to check for ATS version.

I always assumed unspecified bits (Reserved) must be 0. We only check
this if ATS is enabled, and this particular bit wasn't given away for another
feature.

Is it really required to check for ATS version before consuming this?


Reading again, it looks like version check is not necessary since it
is implied by the presence of this bit per this paragraph.

Page Aligned Request – If Set, indicates the Untranslated Address is 
always aligned to a 4096 byte boundary.  Setting this bit is 
recommended.  This bit permits software to distinguish between 
implementations compatible with earlier version of this specification 
that permitted a requester to supply anything in bits [11:2].

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

What is the meaning of PASID_MIN?

2019-02-11 Thread Matthew Wilcox


I'm looking at commit 562831747f6299abd481b5b00bd4fa19d5c8a259
which fails to adequately explain why we can't use PASID 0.  Commit
af39507305fb83a5d3c475c2851f4d59545d8a18 also doesn't explain why PASID
0 is no longer usable for the intel-svm driver.

There are a load of simplifications that could be made to this, but I
don't know which ones to suggest without a clear understanding of the
problem you're actually trying to solve.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1 1/2] PCI: ATS: Add function to check ATS page aligned request status.

2019-02-11 Thread sathyanarayanan kuppuswamy



On 2/11/19 11:15 AM, Raj, Ashok wrote:

On Fri, Feb 08, 2019 at 11:49:55PM -0500, Sinan Kaya wrote:

On 2/8/2019 8:02 PM, sathyanarayanan kuppuswamy wrote:

This means that you should probably have some kind of version check
here.

There is no version field in ATS v1.0 spec. Also, If I follow the history
log in PCI spec, I think ATS if first added at v1.2. Please correct me if
I am wrong.

v1.2 was incorporated into PCIe spec at that time. However, the ATS spec
is old and there could be some HW that could claim to be ATS compatible.
I know AMD GPUs declare ATS capability.

It seems rather odd we have to check for ATS version.

I always assumed unspecified bits (Reserved) must be 0. We only check
this if ATS is enabled, and this particular bit wasn't given away for another
feature.

Is it really required to check for ATS version before consuming this?
If the version check is required then, it needs to be added before 
reading "Invalidate Queue Depth" value as well.




See this ECN

https://composter.com.ua/documents/ats_r1.1_26Jan09.pdf

You need to validate the version field from ATS capability header to be
1 before reading this register.

See Table 5-1:  ATS Extended Capability Header


--
Sathyanarayanan Kuppuswamy
Linux kernel developer

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1 1/2] PCI: ATS: Add function to check ATS page aligned request status.

2019-02-11 Thread Raj, Ashok
On Fri, Feb 08, 2019 at 11:49:55PM -0500, Sinan Kaya wrote:
> On 2/8/2019 8:02 PM, sathyanarayanan kuppuswamy wrote:
> >>This means that you should probably have some kind of version check
> >>here.
> >
> >There is no version field in ATS v1.0 spec. Also, If I follow the history
> >log in PCI spec, I think ATS if first added at v1.2. Please correct me if
> >I am wrong.
> 
> v1.2 was incorporated into PCIe spec at that time. However, the ATS spec
> is old and there could be some HW that could claim to be ATS compatible.
> I know AMD GPUs declare ATS capability.

It seems rather odd we have to check for ATS version.

I always assumed unspecified bits (Reserved) must be 0. We only check
this if ATS is enabled, and this particular bit wasn't given away for another
feature.

Is it really required to check for ATS version before consuming this?


> 
> See this ECN
> 
> https://composter.com.ua/documents/ats_r1.1_26Jan09.pdf
> 
> You need to validate the version field from ATS capability header to be
> 1 before reading this register.
> 
> See Table 5-1:  ATS Extended Capability Header
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCHv2 1/9] mm: Introduce new vm_insert_range and vm_insert_range_buggy API

2019-02-11 Thread Souptick Joarder
On Fri, Feb 8, 2019 at 10:52 AM Souptick Joarder  wrote:
>
> On Thu, Feb 7, 2019 at 10:17 PM Matthew Wilcox  wrote:
> >
> > On Thu, Feb 07, 2019 at 09:19:47PM +0530, Souptick Joarder wrote:
> > > Just thought to take opinion for documentation before placing it in v3.
> > > Does it looks fine ?
> > >
> > > +/**
> > > + * __vm_insert_range - insert range of kernel pages into user vma
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + * @offset: user's requested vm_pgoff
> > > + *
> > > + * This allow drivers to insert range of kernel pages into a user vma.
> > > + *
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +static int __vm_insert_range(struct vm_area_struct *vma, struct page 
> > > **pages,
> > > +   unsigned long num, unsigned long offset)
> >
> > For static functions, I prefer to leave off the second '*', ie make it
> > formatted like a docbook comment, but not be processed like a docbook
> > comment.  That avoids cluttering the html with descriptions of internal
> > functions that people can't actually call.
> >
> > > +/**
> > > + * vm_insert_range - insert range of kernel pages starts with non zero 
> > > offset
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + *
> > > + * Maps an object consisting of `num' `pages', catering for the user's
> >
> > Rather than using `num', you should use @num.
> >
> > > + * requested vm_pgoff
> > > + *
> > > + * If we fail to insert any page into the vma, the function will return
> > > + * immediately leaving any previously inserted pages present.  Callers
> > > + * from the mmap handler may immediately return the error as their caller
> > > + * will destroy the vma, removing any successfully inserted pages. Other
> > > + * callers should make their own arrangements for calling unmap_region().
> > > + *
> > > + * Context: Process context. Called by mmap handlers.
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +int vm_insert_range(struct vm_area_struct *vma, struct page **pages,
> > > +   unsigned long num)
> > >
> > >
> > > +/**
> > > + * vm_insert_range_buggy - insert range of kernel pages starts with zero 
> > > offset
> > > + * @vma: user vma to map to
> > > + * @pages: pointer to array of source kernel pages
> > > + * @num: number of pages in page array
> > > + *
> > > + * Similar to vm_insert_range(), except that it explicitly sets 
> > > @vm_pgoff to
> >
> > But vm_pgoff isn't a parameter, so it's misleading to format it as such.
> >
> > > + * 0. This function is intended for the drivers that did not consider
> > > + * @vm_pgoff.
> > > + *
> > > + * Context: Process context. Called by mmap handlers.
> > > + * Return: 0 on success and error code otherwise.
> > > + */
> > > +int vm_insert_range_buggy(struct vm_area_struct *vma, struct page 
> > > **pages,
> > > +   unsigned long num)
> >
> > I don't think we should call it 'buggy'.  'zero' would make more sense
> > as a suffix.
>
> suffix can be *zero or zero_offset* whichever suits better.
>
> >
> > Given how this interface has evolved, I'm no longer sure than
> > 'vm_insert_range' makes sense as the name for it.  Is it perhaps
> > 'vm_map_object' or 'vm_map_pages'?
> >
>
> I prefer vm_map_pages. Considering it, both the interface name can be changed
> to *vm_insert_range -> vm_map_pages* and *vm_insert_range_buggy ->
> vm_map_pages_{zero/zero_offset}.
>
> As this is only change in interface name and rest of code remain same
> shall I post it in v3 ( with additional change log mentioned about interface
> name changed) ?
>
> or,
>
> It will be a new patch series ( with carry forward all the Reviewed-by
> / Tested-by on
> vm_insert_range/ vm_insert_range_buggy ) ?

Any suggestion on this minor query ?
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 1/2] dma-mapping: add a kconfig symbol for arch_setup_dma_ops availability

2019-02-11 Thread Vineet Gupta
+CC Eugeniy

As resident ARC DMA expert can you please this a quick spin.

-Vineet

On 2/4/19 12:14 AM, Christoph Hellwig wrote:
> Signed-off-by: Christoph Hellwig 
> ---
>  arch/arc/Kconfig |  1 +
>  arch/arc/include/asm/Kbuild  |  1 +
>  arch/arc/include/asm/dma-mapping.h   | 13 -
>  arch/arm/Kconfig |  1 +
>  arch/arm/include/asm/dma-mapping.h   |  4 
>  arch/arm64/Kconfig   |  1 +
>  arch/arm64/include/asm/dma-mapping.h |  4 
>  arch/mips/Kconfig|  1 +
>  arch/mips/include/asm/dma-mapping.h  | 10 --
>  arch/mips/mm/dma-noncoherent.c   |  8 
>  include/linux/dma-mapping.h  | 12 
>  kernel/dma/Kconfig   |  3 +++
>  12 files changed, 24 insertions(+), 35 deletions(-)
>  delete mode 100644 arch/arc/include/asm/dma-mapping.h
>
> diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
> index 376366a7db81..2ab27d88eb1c 100644
> --- a/arch/arc/Kconfig
> +++ b/arch/arc/Kconfig
> @@ -11,6 +11,7 @@ config ARC
>   select ARC_TIMERS
>   select ARCH_HAS_DMA_COHERENT_TO_PFN
>   select ARCH_HAS_PTE_SPECIAL
> + select ARCH_HAS_SETUP_DMA_OPS
>   select ARCH_HAS_SYNC_DMA_FOR_CPU
>   select ARCH_HAS_SYNC_DMA_FOR_DEVICE
>   select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
> diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
> index caa270261521..b41f8881ecc8 100644
> --- a/arch/arc/include/asm/Kbuild
> +++ b/arch/arc/include/asm/Kbuild
> @@ -3,6 +3,7 @@ generic-y += bugs.h
>  generic-y += compat.h
>  generic-y += device.h
>  generic-y += div64.h
> +generic-y += dma-mapping.h
>  generic-y += emergency-restart.h
>  generic-y += extable.h
>  generic-y += ftrace.h
> diff --git a/arch/arc/include/asm/dma-mapping.h 
> b/arch/arc/include/asm/dma-mapping.h
> deleted file mode 100644
> index c946c0a83e76..
> --- a/arch/arc/include/asm/dma-mapping.h
> +++ /dev/null
> @@ -1,13 +0,0 @@
> -// SPDX-License-Identifier:  GPL-2.0
> -// (C) 2018 Synopsys, Inc. (www.synopsys.com)
> -
> -#ifndef ASM_ARC_DMA_MAPPING_H
> -#define ASM_ARC_DMA_MAPPING_H
> -
> -#include 
> -
> -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> - const struct iommu_ops *iommu, bool coherent);
> -#define arch_setup_dma_ops arch_setup_dma_ops
> -
> -#endif
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index 664e918e2624..c1cf44f00870 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -12,6 +12,7 @@ config ARM
>   select ARCH_HAS_MEMBARRIER_SYNC_CORE
>   select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
>   select ARCH_HAS_PHYS_TO_DMA
> + select ARCH_HAS_SETUP_DMA_OPS
>   select ARCH_HAS_SET_MEMORY
>   select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
>   select ARCH_HAS_STRICT_MODULE_RWX if MMU
> diff --git a/arch/arm/include/asm/dma-mapping.h 
> b/arch/arm/include/asm/dma-mapping.h
> index 31d3b96f0f4b..a224b6e39e58 100644
> --- a/arch/arm/include/asm/dma-mapping.h
> +++ b/arch/arm/include/asm/dma-mapping.h
> @@ -96,10 +96,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
>  }
>  #define dma_max_pfn(dev) dma_max_pfn(dev)
>  
> -#define arch_setup_dma_ops arch_setup_dma_ops
> -extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> -const struct iommu_ops *iommu, bool coherent);
> -
>  #ifdef CONFIG_MMU
>  #define arch_teardown_dma_ops arch_teardown_dma_ops
>  extern void arch_teardown_dma_ops(struct device *dev);
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index a4168d366127..63909f318d56 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -22,6 +22,7 @@ config ARM64
>   select ARCH_HAS_KCOV
>   select ARCH_HAS_MEMBARRIER_SYNC_CORE
>   select ARCH_HAS_PTE_SPECIAL
> + select ARCH_HAS_SETUP_DMA_OPS
>   select ARCH_HAS_SET_MEMORY
>   select ARCH_HAS_STRICT_KERNEL_RWX
>   select ARCH_HAS_STRICT_MODULE_RWX
> diff --git a/arch/arm64/include/asm/dma-mapping.h 
> b/arch/arm64/include/asm/dma-mapping.h
> index 95dbf3ef735a..de96507ee2c1 100644
> --- a/arch/arm64/include/asm/dma-mapping.h
> +++ b/arch/arm64/include/asm/dma-mapping.h
> @@ -29,10 +29,6 @@ static inline const struct dma_map_ops 
> *get_arch_dma_ops(struct bus_type *bus)
>   return NULL;
>  }
>  
> -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
> - const struct iommu_ops *iommu, bool coherent);
> -#define arch_setup_dma_ops   arch_setup_dma_ops
> -
>  #ifdef CONFIG_IOMMU_DMA
>  void arch_teardown_dma_ops(struct device *dev);
>  #define arch_teardown_dma_opsarch_teardown_dma_ops
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index 0d14f51d0002..dc5d70f674e0 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -1118,6 +1118,7 @@ config DMA_MAYBE_COHERENT
>  
>  config DMA_PERDEV_COHERENT
>   bool
> + select 

Re: [PATCH 16/19] dma-iommu: don't depend on CONFIG_DMA_DIRECT_REMAP

2019-02-11 Thread Christoph Hellwig
On Wed, Feb 06, 2019 at 11:55:49AM +, Robin Murphy wrote:
> On 14/01/2019 09:41, Christoph Hellwig wrote:
>> For entirely dma coherent architectures there is no good reason to ever
>> remap dma coherent allocation.
>
> Yes there is, namely assembling large buffers without the need for massive 
> CMA areas and compaction overhead under memory fragmentation. That has 
> always been a distinct concern from the DMA_DIRECT_REMAP cases; they've 
> just been able to share a fair few code paths.

Well, I guess I need to reword this - there is no _requirement_ to
remap.  And x86 has been happy to not remap so far and I see absolutely
no reason to force anyone to remap.

>>  Move all the remap and pool code under
>> CONFIG_DMA_DIRECT_REMAP ifdefs, and drop the Kconfig dependency.
>
> As far as I'm concerned that splits things the wrong way. Logically, 
> iommu_dma_alloc() should always have done its own vmap() instead of just 
> returning the bare pages array, but that was tricky to resolve with the 
> design of having the caller handle everything to do with coherency (forcing 
> the caller to unpick that mapping just to remap it yet again in the 
> noncoherent case didn't seem sensible).

I don't parse this.  In the old code base before this series
iommu_dma_alloc is a relatively low-level helper allocating and mapping
pages.  And that one should have done the remapping, and in fact does
so since ("dma-iommu: refactor page array remap helpers").  It just
happens that the function is now called iommu_dma_alloc_remap.

The new iommu_dma_alloc is the high level entry point that handles
every possible case of different allocations, including those where
we do not have a virtual mapping.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v2] dma-mapping: Move debug configuration options to kernel/dma

2019-02-11 Thread Andy Shevchenko
This is a follow up to the commit cf65a0f6f6ff

  ("dma-mapping: move all DMA mapping code to kernel/dma")

which moved source code of DMA API to kernel/dma folder. Since there is
no file left in the lib that require DMA API debugging options move the
latter to kernel/dma as well.

Cc: Christoph Hellwig 
Signed-off-by: Andy Shevchenko 
---

v2: Move to kernel/dma/Kconfig directly

 kernel/dma/Kconfig | 36 
 lib/Kconfig.debug  | 36 
 2 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index ca88b867e7fe..61cebea36d89 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -53,3 +53,39 @@ config DMA_REMAP
 config DMA_DIRECT_REMAP
bool
select DMA_REMAP
+
+config DMA_API_DEBUG
+   bool "Enable debugging of DMA-API usage"
+   select NEED_DMA_MAP_STATE
+   help
+ Enable this option to debug the use of the DMA API by device drivers.
+ With this option you will be able to detect common bugs in device
+ drivers like double-freeing of DMA mappings or freeing mappings that
+ were never allocated.
+
+ This also attempts to catch cases where a page owned by DMA is
+ accessed by the cpu in a way that could cause data corruption.  For
+ example, this enables cow_user_page() to check that the source page is
+ not undergoing DMA.
+
+ This option causes a performance degradation.  Use only if you want to
+ debug device drivers and dma interactions.
+
+ If unsure, say N.
+
+config DMA_API_DEBUG_SG
+   bool "Debug DMA scatter-gather usage"
+   default y
+   depends on DMA_API_DEBUG
+   help
+ Perform extra checking that callers of dma_map_sg() have respected the
+ appropriate segment length/boundary limits for the given device when
+ preparing DMA scatterlists.
+
+ This is particularly likely to have been overlooked in cases where the
+ dma_map_sg() API is used for general bulk mapping of pages rather than
+ preparing literal scatter-gather descriptors, where there is a risk of
+ unexpected behaviour from DMA API implementations if the scatterlist
+ is technically out-of-spec.
+
+ If unsure, say N.
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4df5b24d75e..ef5d7c08e5b9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1655,42 +1655,6 @@ config PROVIDE_OHCI1394_DMA_INIT
 
  See Documentation/debugging-via-ohci1394.txt for more information.
 
-config DMA_API_DEBUG
-   bool "Enable debugging of DMA-API usage"
-   select NEED_DMA_MAP_STATE
-   help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption.  For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation.  Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
-config DMA_API_DEBUG_SG
-   bool "Debug DMA scatter-gather usage"
-   default y
-   depends on DMA_API_DEBUG
-   help
- Perform extra checking that callers of dma_map_sg() have respected the
- appropriate segment length/boundary limits for the given device when
- preparing DMA scatterlists.
-
- This is particularly likely to have been overlooked in cases where the
- dma_map_sg() API is used for general bulk mapping of pages rather than
- preparing literal scatter-gather descriptors, where there is a risk of
- unexpected behaviour from DMA API implementations if the scatterlist
- is technically out-of-spec.
-
- If unsure, say N.
-
 menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1] dma-mapping: Move debug configuration options to kernel/dma

2019-02-11 Thread Andy Shevchenko
On Mon, Feb 11, 2019 at 04:54:50PM +0100, Christoph Hellwig wrote:
> On Mon, Feb 11, 2019 at 05:54:09PM +0200, Andy Shevchenko wrote:
> > This is a follow up to the commit cf65a0f6f6ff
> > 
> >   ("dma-mapping: move all DMA mapping code to kernel/dma")
> > 
> > which moved source code of DMA API to kernel/dma folder. Since there is
> > no file left in the lib that require DMA API debugging options move the
> > latter to kernel/dma as well.
> 
> Can we do this without the separate Kconfig.debug file?

Sure.

> 
> Otherwise this looks good to me.

-- 
With Best Regards,
Andy Shevchenko


___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 06/19] dma-iommu: fix and refactor iommu_dma_mmap

2019-02-11 Thread Christoph Hellwig
On Tue, Feb 05, 2019 at 03:02:23PM +, Robin Murphy wrote:
> On 14/01/2019 09:41, Christoph Hellwig wrote:
>> The current iommu_dma_mmap code does not properly handle memory from the
>> page allocator that hasn't been remapped, which can happen in the rare
>> case of allocations for a coherent device that aren't allowed to block.
>>
>> Fix this by replacing iommu_dma_mmap with a slightly tweaked copy of
>> dma_common_mmap with special handling for the remapped array of
>> pages allocated from __iommu_dma_alloc.
>
> If there's an actual bugfix here, can we make that before all of the other 
> code movement? If it's at all related to other reports of weird mmap 
> behaviour it might warrant backporting, and either way I'm finding it 
> needlessly tough to follow what's going on in this patch :(

The bug fix is to handle non-vmalloc pages.  I'll see if I can do
a smaller and more bandaid-y fix first.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 03/19] dma-iommu: don't use a scatterlist in iommu_dma_alloc

2019-02-11 Thread Christoph Hellwig
On Wed, Feb 06, 2019 at 03:28:28PM +, Robin Murphy wrote:
> Because if iommu_map() only gets called at PAGE_SIZE granularity, then the 
> IOMMU PTEs will be created at PAGE_SIZE (or smaller) granularity, so any 
> effort to get higher-order allocations matching larger IOMMU block sizes is 
> wasted, and we may as well have just done this:
>
>   for (i = 0; i < count; i++) {
>   struct page *page = alloc_page(gfp);
>   ...
>   iommu_map(..., page_to_phys(page), PAGE_SIZE, ...);
>   }

True.  I've dropped this patch.

> Really, it's a shame we have to split huge pages for the CPU remap, since 
> in the common case the CPU MMU will have a matching block size, but IIRC 
> there was something in vmap() or thereabouts that explicitly chokes on 
> them.

That just needs a volunteer to fix the implementation, as there is no
fundamental reason not to remap large pages.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 02/19] dma-iommu: cleanup dma-iommu.h

2019-02-11 Thread Christoph Hellwig
On Wed, Feb 06, 2019 at 03:08:26PM +, Robin Murphy wrote:
> Other than dma-iommu.c itself, none of them *require* it - only arch/arm64 
> selects it (the one from MTK_IOMMU is just bogus), and a lot of the drivers 
> also build for at least one other architecture (and/or arm64 with 
> !IOMMU_API).
>
> Either way, I have no vehement objection to the change, I just don't see 
> any positive value in it.

I've moved the idef back down below the includes.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1] dma-mapping: Move debug configuration options to kernel/dma

2019-02-11 Thread Christoph Hellwig
On Mon, Feb 11, 2019 at 05:54:09PM +0200, Andy Shevchenko wrote:
> This is a follow up to the commit cf65a0f6f6ff
> 
>   ("dma-mapping: move all DMA mapping code to kernel/dma")
> 
> which moved source code of DMA API to kernel/dma folder. Since there is
> no file left in the lib that require DMA API debugging options move the
> latter to kernel/dma as well.

Can we do this without the separate Kconfig.debug file?

Otherwise this looks good to me.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH v1] dma-mapping: Move debug configuration options to kernel/dma

2019-02-11 Thread Andy Shevchenko
This is a follow up to the commit cf65a0f6f6ff

  ("dma-mapping: move all DMA mapping code to kernel/dma")

which moved source code of DMA API to kernel/dma folder. Since there is
no file left in the lib that require DMA API debugging options move the
latter to kernel/dma as well.

Cc: Christoph Hellwig 
Signed-off-by: Andy Shevchenko 
---
 kernel/dma/Kconfig   |  2 ++
 kernel/dma/Kconfig.debug | 36 
 lib/Kconfig.debug| 36 
 3 files changed, 38 insertions(+), 36 deletions(-)
 create mode 100644 kernel/dma/Kconfig.debug

diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index ca88b867e7fe..cba73df57982 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -53,3 +53,5 @@ config DMA_REMAP
 config DMA_DIRECT_REMAP
bool
select DMA_REMAP
+
+source "kernel/dma/Kconfig.debug"
diff --git a/kernel/dma/Kconfig.debug b/kernel/dma/Kconfig.debug
new file mode 100644
index ..2015976cf4ca
--- /dev/null
+++ b/kernel/dma/Kconfig.debug
@@ -0,0 +1,36 @@
+config DMA_API_DEBUG
+   bool "Enable debugging of DMA-API usage"
+   select NEED_DMA_MAP_STATE
+   help
+ Enable this option to debug the use of the DMA API by device drivers.
+ With this option you will be able to detect common bugs in device
+ drivers like double-freeing of DMA mappings or freeing mappings that
+ were never allocated.
+
+ This also attempts to catch cases where a page owned by DMA is
+ accessed by the cpu in a way that could cause data corruption.  For
+ example, this enables cow_user_page() to check that the source page is
+ not undergoing DMA.
+
+ This option causes a performance degradation.  Use only if you want to
+ debug device drivers and dma interactions.
+
+ If unsure, say N.
+
+config DMA_API_DEBUG_SG
+   bool "Debug DMA scatter-gather usage"
+   default y
+   depends on DMA_API_DEBUG
+   help
+ Perform extra checking that callers of dma_map_sg() have respected the
+ appropriate segment length/boundary limits for the given device when
+ preparing DMA scatterlists.
+
+ This is particularly likely to have been overlooked in cases where the
+ dma_map_sg() API is used for general bulk mapping of pages rather than
+ preparing literal scatter-gather descriptors, where there is a risk of
+ unexpected behaviour from DMA API implementations if the scatterlist
+ is technically out-of-spec.
+
+ If unsure, say N.
+
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d4df5b24d75e..ef5d7c08e5b9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1655,42 +1655,6 @@ config PROVIDE_OHCI1394_DMA_INIT
 
  See Documentation/debugging-via-ohci1394.txt for more information.
 
-config DMA_API_DEBUG
-   bool "Enable debugging of DMA-API usage"
-   select NEED_DMA_MAP_STATE
-   help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption.  For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation.  Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
-config DMA_API_DEBUG_SG
-   bool "Debug DMA scatter-gather usage"
-   default y
-   depends on DMA_API_DEBUG
-   help
- Perform extra checking that callers of dma_map_sg() have respected the
- appropriate segment length/boundary limits for the given device when
- preparing DMA scatterlists.
-
- This is particularly likely to have been overlooked in cases where the
- dma_map_sg() API is used for general bulk mapping of pages rather than
- preparing literal scatter-gather descriptors, where there is a risk of
- unexpected behaviour from DMA API implementations if the scatterlist
- is technically out-of-spec.
-
- If unsure, say N.
-
 menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH] fix flush_tlb_all typo

2019-02-11 Thread Tom Murphy
Fix typo, flush_tlb_all should be flush_iotlb_all

Signed-off-by: Tom Murphy 
---
 include/linux/iommu.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a1d28f42cb77..bb4bf1269e5d 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,7 +167,7 @@ struct iommu_resv_region {
  * @detach_dev: detach device from an iommu domain
  * @map: map a physically contiguous memory region to an iommu domain
  * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
  * @tlb_range_add: Add a given iova range to the flush queue for this domain
  * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
  *queue
-- 
2.17.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH V4 2/3] HYPERV/IOMMU: Add Hyper-V stub IOMMU driver

2019-02-11 Thread lantianyu1986
From: Lan Tianyu 

On the bare metal, enabling X2APIC mode requires interrupt remapping
function which helps to deliver irq to cpu with 32-bit APIC ID.
Hyper-V doesn't provide interrupt remapping function so far and Hyper-V
MSI protocol already supports to deliver interrupt to the CPU whose
virtual processor index is more than 255. IO-APIC interrupt still has
8-bit APIC ID limitation.

This patch is to add Hyper-V stub IOMMU driver in order to enable
X2APIC mode successfully in Hyper-V Linux guest. The driver returns X2APIC
interrupt remapping capability when X2APIC mode is available. Otherwise,
it creates a Hyper-V irq domain to limit IO-APIC interrupts' affinity
and make sure cpus assigned with IO-APIC interrupt have 8-bit APIC ID.

Define 24 IO-APIC remapping entries because Hyper-V only expose one
single IO-APIC and one IO-APIC has 24 pins according IO-APIC spec(
https://pdos.csail.mit.edu/6.828/2016/readings/ia32/ioapic.pdf).

Signed-off-by: Lan Tianyu 
---
Change since v3:
   - Make Hyper-V IOMMU as Hyper-V default driver
   - Fix hypervisor_is_type() input parameter
   - Check possible cpu numbers during scan 0~255 cpu's apic id.

Change since v2:
   - Improve comment about why save IO-APIC entry in the irq chip data.
   - Some code improvement.
   - Improve statement in the IOMMU Kconfig.

Change since v1:
  - Remove unused pr_fmt
  - Make ioapic_ir_domain as static variable
  - Remove unused variables cfg and entry in the 
hyperv_irq_remapping_alloc()
  - Fix comments
---
 drivers/iommu/Kconfig |   9 ++
 drivers/iommu/Makefile|   1 +
 drivers/iommu/hyperv-iommu.c  | 194 ++
 drivers/iommu/irq_remapping.c |   3 +
 drivers/iommu/irq_remapping.h |   1 +
 5 files changed, 208 insertions(+)
 create mode 100644 drivers/iommu/hyperv-iommu.c

diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 45d7021..6f07f3b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -437,4 +437,13 @@ config QCOM_IOMMU
help
  Support for IOMMU on certain Qualcomm SoCs.
 
+config HYPERV_IOMMU
+   bool "Hyper-V x2APIC IRQ Handling"
+   depends on HYPERV
+   select IOMMU_API
+   default HYPERV
+   help
+ Stub IOMMU driver to handle IRQs as to allow Hyper-V Linux
+ guests to run with x2APIC mode enabled.
+
 endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index a158a68..8c71a15 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
 obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
 obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
 obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
+obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
new file mode 100644
index 000..c61240e0
--- /dev/null
+++ b/drivers/iommu/hyperv-iommu.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Hyper-V stub IOMMU driver.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author : Lan Tianyu 
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "irq_remapping.h"
+
+#ifdef CONFIG_IRQ_REMAP
+
+/*
+ * According 82093AA IO-APIC spec , IO APIC has a 24-entry Interrupt
+ * Redirection Table. Hyper-V exposes one single IO-APIC and so define
+ * 24 IO APIC remmapping entries.
+ */
+#define IOAPIC_REMAPPING_ENTRY 24
+
+static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE };
+static struct irq_domain *ioapic_ir_domain;
+
+static int hyperv_ir_set_affinity(struct irq_data *data,
+   const struct cpumask *mask, bool force)
+{
+   struct irq_data *parent = data->parent_data;
+   struct irq_cfg *cfg = irqd_cfg(data);
+   struct IO_APIC_route_entry *entry;
+   int ret;
+
+   /* Return error If new irq affinity is out of ioapic_max_cpumask. */
+   if (!cpumask_subset(mask, _max_cpumask))
+   return -EINVAL;
+
+   ret = parent->chip->irq_set_affinity(parent, mask, force);
+   if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+   return ret;
+
+   entry = data->chip_data;
+   entry->dest = cfg->dest_apicid;
+   entry->vector = cfg->vector;
+   send_cleanup_vector(cfg);
+
+   return 0;
+}
+
+static struct irq_chip hyperv_ir_chip = {
+   .name   = "HYPERV-IR",
+   .irq_ack= apic_ack_irq,
+   .irq_set_affinity   = hyperv_ir_set_affinity,
+};
+
+static int hyperv_irq_remapping_alloc(struct irq_domain *domain,
+unsigned int virq, unsigned int nr_irqs,
+void *arg)
+{
+   struct irq_alloc_info *info = arg;
+   struct irq_data *irq_data;
+   struct irq_desc *desc;
+   int ret = 0;
+
+   if (!info || info->type != X86_IRQ_ALLOC_TYPE_IOAPIC || nr_irqs 

[PATCH V4 1/3] x86/Hyper-V: Set x2apic destination mode to physical when x2apic is available

2019-02-11 Thread lantianyu1986
From: Lan Tianyu 

Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
set x2apic destination mode to physcial mode when x2apic is available
and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs have
8-bit APIC id.

Reviewed-by: Thomas Gleixner 
Signed-off-by: Lan Tianyu 
---
Change since v2:
   - Fix compile error due to x2apic_phys
   - Fix comment indent
Change since v1:
   - Remove redundant extern for x2apic_phys
---
 arch/x86/kernel/cpu/mshyperv.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index e81a2db..0c29e4e 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -328,6 +328,16 @@ static void __init ms_hyperv_init_platform(void)
 # ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
 # endif
+
+   /*
+* Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
+* set x2apic destination mode to physcial mode when x2apic is available
+* and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
+* have 8-bit APIC id.
+*/
+   if (IS_ENABLED(CONFIG_X86_X2APIC) && x2apic_supported())
+   x2apic_phys = 1;
+
 #endif
 }
 
-- 
2.7.4

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH V4 0/3] x86/Hyper-V/IOMMU: Add Hyper-V IOMMU driver to support x2apic mode

2019-02-11 Thread lantianyu1986
From: Lan Tianyu 

On the bare metal, enabling X2APIC mode requires interrupt remapping
function which helps to deliver irq to cpu with 32-bit APIC ID.
Hyper-V doesn't provide interrupt remapping function so far and Hyper-V
MSI protocol already supports to deliver interrupt to the CPU whose
virtual processor index is more than 255. IO-APIC interrupt still has
8-bit APIC ID limitation.

This patchset is to add Hyper-V stub IOMMU driver in order to enable
X2APIC mode successfully in Hyper-V Linux guest. The driver returns X2APIC
interrupt remapping capability when X2APIC mode is available. X2APIC
destination mode is set to physical by PATCH 1 when X2APIC is available.
Hyper-V IOMMU driver will scan cpu 0~255 and set cpu into IO-APIC MAX cpu
affinity cpumask if its APIC ID is 8-bit. Driver creates a Hyper-V irq domain
to limit IO-APIC interrupts' affinity and make sure cpus assigned with IO-APIC
interrupt are in the scope of IO-APIC MAX cpu affinity.

Lan Tianyu (3):
  x86/Hyper-V: Set x2apic destination mode to physical when x2apic is   
 available
  HYPERV/IOMMU: Add Hyper-V stub IOMMU driver
  MAINTAINERS: Add Hyper-V IOMMU driver into Hyper-V CORE AND DRIVERS
scope

 MAINTAINERS|   1 +
 arch/x86/kernel/cpu/mshyperv.c |  10 +++
 drivers/iommu/Kconfig  |   9 ++
 drivers/iommu/Makefile |   1 +
 drivers/iommu/hyperv-iommu.c   | 194 +
 drivers/iommu/irq_remapping.c  |   3 +
 drivers/iommu/irq_remapping.h  |   1 +
 7 files changed, 219 insertions(+)
 create mode 100644 drivers/iommu/hyperv-iommu.c

-- 
2.7.4

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: add config symbols for arch_{setup,teardown}_dma_ops

2019-02-11 Thread Catalin Marinas
On Mon, Feb 11, 2019 at 02:21:56PM +0100, Christoph Hellwig wrote:
> Any chance to get a quick review on this small series?

For arm64:

Acked-by: Catalin Marinas 
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 12/12] dma-mapping: remove dma_assign_coherent_memory

2019-02-11 Thread Christoph Hellwig
The only useful bit in this function was the already assigned check.
Once that is moved to dma_init_coherent_memory thee rest can easily
be handled in the two callers.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/coherent.c | 47 +--
 1 file changed, 14 insertions(+), 33 deletions(-)

diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index d7a27008f228..1e3ce71cd993 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -41,6 +41,9 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr,
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
int ret;
 
+   if (*mem)
+   return -EBUSY;
+
if (!size) {
ret = -EINVAL;
goto out;
@@ -88,33 +91,11 @@ static void dma_release_coherent_memory(struct 
dma_coherent_mem *mem)
kfree(mem);
 }
 
-static int dma_assign_coherent_memory(struct device *dev,
- struct dma_coherent_mem *mem)
-{
-   if (!dev)
-   return -ENODEV;
-
-   if (dev->dma_mem)
-   return -EBUSY;
-
-   dev->dma_mem = mem;
-   return 0;
-}
-
 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size)
 {
-   struct dma_coherent_mem *mem;
-   int ret;
-
-   ret = dma_init_coherent_memory(phys_addr, device_addr, size, );
-   if (ret)
-   return ret;
-
-   ret = dma_assign_coherent_memory(dev, mem);
-   if (ret)
-   dma_release_coherent_memory(mem);
-   return ret;
+   return dma_init_coherent_memory(phys_addr, device_addr, size,
+   >dma_mem);
 }
 EXPORT_SYMBOL(dma_declare_coherent_memory);
 
@@ -238,18 +219,18 @@ static int rmem_dma_device_init(struct reserved_mem 
*rmem, struct device *dev)
struct dma_coherent_mem *mem = rmem->priv;
int ret;
 
-   if (!mem) {
-   ret = dma_init_coherent_memory(rmem->base, rmem->base,
-  rmem->size, );
-   if (ret) {
-   pr_err("Reserved memory: failed to init DMA memory pool 
at %pa, size %ld MiB\n",
-   >base, (unsigned long)rmem->size / SZ_1M);
-   return ret;
-   }
+   ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+   );
+   if (ret && ret != -EBUSY) {
+   pr_err("Reserved memory: failed to init DMA memory pool at %pa, 
size %ld MiB\n",
+   >base, (unsigned long)rmem->size / SZ_1M);
+   return ret;
}
+
mem->use_dev_dma_pfn_offset = true;
+   if (dev)
+   dev->dma_mem = mem;
rmem->priv = mem;
-   dma_assign_coherent_memory(dev, mem);
return 0;
 }
 
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 09/12] dma-mapping: remove the DMA_MEMORY_EXCLUSIVE flag

2019-02-11 Thread Christoph Hellwig
All users of dma_declare_coherent want their allocations to be
exclusive, so default to exclusive allocations.

Signed-off-by: Christoph Hellwig 
---
 Documentation/DMA-API.txt |  9 +--
 arch/arm/mach-imx/mach-imx27_visstrim_m10.c   | 12 +++--
 arch/arm/mach-imx/mach-mx31moboard.c  |  3 +--
 arch/sh/boards/mach-ap325rxa/setup.c  |  5 ++--
 arch/sh/boards/mach-ecovec24/setup.c  |  6 ++---
 arch/sh/boards/mach-kfr2r09/setup.c   |  5 ++--
 arch/sh/boards/mach-migor/setup.c |  5 ++--
 arch/sh/boards/mach-se/7724/setup.c   |  6 ++---
 arch/sh/drivers/pci/fixups-dreamcast.c|  3 +--
 .../soc_camera/sh_mobile_ceu_camera.c |  3 +--
 drivers/usb/host/ohci-sm501.c |  3 +--
 drivers/usb/host/ohci-tmio.c  |  2 +-
 include/linux/dma-mapping.h   |  7 ++
 kernel/dma/coherent.c | 25 ++-
 14 files changed, 29 insertions(+), 65 deletions(-)

diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index b9d0cba83877..38e561b773b4 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -566,8 +566,7 @@ boundaries when doing this.
 
int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-   dma_addr_t device_addr, size_t size, int
-   flags)
+   dma_addr_t device_addr, size_t size);
 
 Declare region of memory to be handed out by dma_alloc_coherent() when
 it's asked for coherent memory for this device.
@@ -581,12 +580,6 @@ dma_addr_t in dma_alloc_coherent()).
 
 size is the size of the area (must be multiples of PAGE_SIZE).
 
-flags can be ORed together and are:
-
-- DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions.
-  Do not allow dma_alloc_coherent() to fall back to system memory when
-  it's out of memory in the declared region.
-
 As a simplification for the platforms, only *one* such region of
 memory may be declared per device.
 
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c 
b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 5169dfba9718..07d4fcfe5c2e 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -258,8 +258,7 @@ static void __init visstrim_analog_camera_init(void)
return;
 
dma_declare_coherent_memory(>dev, mx2_camera_base,
-   mx2_camera_base, MX2_CAMERA_BUF_SIZE,
-   DMA_MEMORY_EXCLUSIVE);
+   mx2_camera_base, MX2_CAMERA_BUF_SIZE);
 }
 
 static void __init visstrim_reserve(void)
@@ -445,8 +444,7 @@ static void __init visstrim_coda_init(void)
dma_declare_coherent_memory(>dev,
mx2_camera_base + MX2_CAMERA_BUF_SIZE,
mx2_camera_base + MX2_CAMERA_BUF_SIZE,
-   MX2_CAMERA_BUF_SIZE,
-   DMA_MEMORY_EXCLUSIVE);
+   MX2_CAMERA_BUF_SIZE);
 }
 
 /* DMA deinterlace */
@@ -465,8 +463,7 @@ static void __init visstrim_deinterlace_init(void)
dma_declare_coherent_memory(>dev,
mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE,
mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE,
-   MX2_CAMERA_BUF_SIZE,
-   DMA_MEMORY_EXCLUSIVE);
+   MX2_CAMERA_BUF_SIZE);
 }
 
 /* Emma-PrP for format conversion */
@@ -485,8 +482,7 @@ static void __init visstrim_emmaprp_init(void)
 */
ret = dma_declare_coherent_memory(>dev,
mx2_camera_base, mx2_camera_base,
-   MX2_CAMERA_BUF_SIZE,
-   DMA_MEMORY_EXCLUSIVE);
+   MX2_CAMERA_BUF_SIZE);
if (ret)
pr_err("Failed to declare memory for emmaprp\n");
 }
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c 
b/arch/arm/mach-imx/mach-mx31moboard.c
index 643a3d749703..fe50f4cf00a7 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -475,8 +475,7 @@ static int __init mx31moboard_init_cam(void)
 
ret = dma_declare_coherent_memory(>dev,
  mx3_camera_base, mx3_camera_base,
- MX3_CAMERA_BUF_SIZE,
- DMA_MEMORY_EXCLUSIVE);
+ MX3_CAMERA_BUF_SIZE);
if (ret)
goto err;
 
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c 
b/arch/sh/boards/mach-ap325rxa/setup.c
index 8f234d0435aa..7899b4f51fdd 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ 

[PATCH 10/12] dma-mapping: simplify allocations from per-device coherent memory

2019-02-11 Thread Christoph Hellwig
All users of per-device coherent memory are exclusive, that is if we can't
allocate from the per-device pool we can't use the system memory either.
Unfold the current dma_{alloc,free}_from_dev_coherent implementation and
always use the per-device pool if it exists.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/mm/dma-mapping-nommu.c | 12 ++---
 include/linux/dma-mapping.h | 14 ++
 kernel/dma/coherent.c   | 89 -
 kernel/dma/internal.h   | 19 +++
 kernel/dma/mapping.c| 12 +++--
 5 files changed, 55 insertions(+), 91 deletions(-)
 create mode 100644 kernel/dma/internal.h

diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index f304b10e23a4..c72f024f1e82 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -70,16 +70,10 @@ static void arm_nommu_dma_free(struct device *dev, size_t 
size,
   void *cpu_addr, dma_addr_t dma_addr,
   unsigned long attrs)
 {
-   if (attrs & DMA_ATTR_NON_CONSISTENT) {
+   if (attrs & DMA_ATTR_NON_CONSISTENT)
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
-   } else {
-   int ret = dma_release_from_global_coherent(get_order(size),
-  cpu_addr);
-
-   WARN_ON_ONCE(ret == 0);
-   }
-
-   return;
+   else
+   dma_release_from_global_coherent(size, cpu_addr);
 }
 
 static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index b12fba725f19..018e37a0870e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -158,30 +158,24 @@ static inline int is_device_dma_capable(struct device 
*dev)
  * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
-  dma_addr_t *dma_handle, void **ret);
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-
 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
 
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
+void *dma_alloc_from_global_coherent(size_t size, dma_addr_t *dma_handle);
+void dma_release_from_global_coherent(size_t size, void *vaddr);
 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
  size_t size, int *ret);
 
 #else
-#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
 
-static inline void *dma_alloc_from_global_coherent(ssize_t size,
+static inline void *dma_alloc_from_global_coherent(size_t size,
   dma_addr_t *dma_handle)
 {
return NULL;
 }
 
-static inline int dma_release_from_global_coherent(int order, void *vaddr)
+static inline void dma_release_from_global_coherent(size_t size, void *vaddr)
 {
return 0;
 }
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 29fd6590dc1e..d1da1048e470 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -8,6 +8,7 @@
 #include 
 #include 
 #include 
+#include "internal.h"
 
 struct dma_coherent_mem {
void*virt_base;
@@ -21,13 +22,6 @@ struct dma_coherent_mem {
 
 static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
 
-static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device 
*dev)
-{
-   if (dev && dev->dma_mem)
-   return dev->dma_mem;
-   return NULL;
-}
-
 static inline dma_addr_t dma_get_device_base(struct device *dev,
 struct dma_coherent_mem * mem)
 {
@@ -135,8 +129,8 @@ void dma_release_declared_memory(struct device *dev)
 }
 EXPORT_SYMBOL(dma_release_declared_memory);
 
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
-   ssize_t size, dma_addr_t *dma_handle)
+void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, size_t size,
+   dma_addr_t *dma_handle)
 {
int order = get_order(size);
unsigned long flags;
@@ -165,33 +159,7 @@ static void *__dma_alloc_from_coherent(struct 
dma_coherent_mem *mem,
return NULL;
 }
 
-/**
- * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
- * @dev:   device from which we allocate memory
- * @size:  size of requested memory area
- * @dma_handle:This will be filled with the correct dma handle
- * @ret:   This pointer will be filled with the 

[PATCH 11/12] dma-mapping: handle per-device coherent memory mmap in common code

2019-02-11 Thread Christoph Hellwig
We handle allocation and freeing in common code, so we should handle
mmap the same way.  Also all users of per-device coherent memory are
exclusive, that is if we can't allocate from the per-device pool we
can't use the system memory either.  Unfold the current
dma_mmap_from_dev_coherent implementation and always use the
per-device pool if it exists.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/mm/dma-mapping-nommu.c |  7 ++--
 arch/arm/mm/dma-mapping.c   |  3 --
 arch/arm64/mm/dma-mapping.c |  3 --
 include/linux/dma-mapping.h | 11 ++-
 kernel/dma/coherent.c   | 58 -
 kernel/dma/internal.h   |  2 ++
 kernel/dma/mapping.c|  8 ++---
 7 files changed, 24 insertions(+), 68 deletions(-)

diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index c72f024f1e82..4eeb7e5d9c07 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -80,11 +80,8 @@ static int arm_nommu_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
  void *cpu_addr, dma_addr_t dma_addr, size_t size,
  unsigned long attrs)
 {
-   int ret;
-
-   if (dma_mmap_from_global_coherent(vma, cpu_addr, size, ))
-   return ret;
-
+   if (!(attrs & DMA_ATTR_NON_CONSISTENT))
+   return dma_mmap_from_global_coherent(vma, cpu_addr, size);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
 }
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 3c8534904209..e2993e5a7166 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -830,9 +830,6 @@ static int __arm_dma_mmap(struct device *dev, struct 
vm_area_struct *vma,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
 
-   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
-   return ret;
-
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
ret = remap_pfn_range(vma, vma->vm_start,
  pfn + off,
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 78c0a72f822c..a55be91c1d1a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -246,9 +246,6 @@ static int __iommu_mmap_attrs(struct device *dev, struct 
vm_area_struct *vma,
 
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
 
-   if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, ))
-   return ret;
-
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
/*
 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 018e37a0870e..ae6fe66f97b7 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -158,17 +158,12 @@ static inline int is_device_dma_capable(struct device 
*dev)
  * These three functions are only for dma allocator.
  * Don't use them in device drivers.
  */
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
-   void *cpu_addr, size_t size, int *ret);
-
 void *dma_alloc_from_global_coherent(size_t size, dma_addr_t *dma_handle);
 void dma_release_from_global_coherent(size_t size, void *vaddr);
 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
+ size_t size);
 
 #else
-#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-
 static inline void *dma_alloc_from_global_coherent(size_t size,
   dma_addr_t *dma_handle)
 {
@@ -177,12 +172,10 @@ static inline void *dma_alloc_from_global_coherent(size_t 
size,
 
 static inline void dma_release_from_global_coherent(size_t size, void *vaddr)
 {
-   return 0;
 }
 
 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
-   void *cpu_addr, size_t size,
-   int *ret)
+   void *cpu_addr, size_t size)
 {
return 0;
 }
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index d1da1048e470..d7a27008f228 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -197,60 +197,30 @@ void dma_release_from_global_coherent(size_t size, void 
*vaddr)
__dma_release_from_coherent(dma_coherent_default_memory, size, vaddr);
 }
 
-static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
-   struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+   struct vm_area_struct *vma, void *vaddr, size_t size)
 {
-   if (mem && vaddr >= mem->virt_base && vaddr + size <=
-  

[PATCH 03/12] of: mark early_init_dt_alloc_reserved_memory_arch static

2019-02-11 Thread Christoph Hellwig
This function is only used in of_reserved_mem.c, and never overridden
despite the __weak marker.

Signed-off-by: Christoph Hellwig 
---
 drivers/of/of_reserved_mem.c| 2 +-
 include/linux/of_reserved_mem.h | 7 ---
 2 files changed, 1 insertion(+), 8 deletions(-)

diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1977ee0adcb1..9f165fc1d1a2 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -26,7 +26,7 @@
 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
 static int reserved_mem_count;
 
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
 {
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 67ab8d271df3..60f541912ccf 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -35,13 +35,6 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
   struct device_node *np, int idx);
 void of_reserved_mem_device_release(struct device *dev);
 
-int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
-phys_addr_t align,
-phys_addr_t start,
-phys_addr_t end,
-bool nomap,
-phys_addr_t *res_base);
-
 void fdt_init_reserved_mem(void);
 void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
   phys_addr_t base, phys_addr_t size);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 05/12] dma-mapping: remove an incorrect __iommem annotation

2019-02-11 Thread Christoph Hellwig
memmap return a regular void pointer, not and __iomem one.

Signed-off-by: Christoph Hellwig 
---
 kernel/dma/coherent.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 66f0fb7e9a3a..4b76aba574c2 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -43,7 +43,7 @@ static int dma_init_coherent_memory(
struct dma_coherent_mem **mem)
 {
struct dma_coherent_mem *dma_mem = NULL;
-   void __iomem *mem_base = NULL;
+   void *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
int ret;
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 08/12] dma-mapping: remove dma_mark_declared_memory_occupied

2019-02-11 Thread Christoph Hellwig
This API is not used anywhere, so remove it.

Signed-off-by: Christoph Hellwig 
---
 Documentation/DMA-API.txt   | 17 -
 include/linux/dma-mapping.h |  9 -
 kernel/dma/coherent.c   | 23 ---
 3 files changed, 49 deletions(-)

diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78114ee63057..b9d0cba83877 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -605,23 +605,6 @@ unconditionally having removed all the required 
structures.  It is the
 driver's job to ensure that no parts of this memory region are
 currently in use.
 
-::
-
-   void *
-   dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-
-This is used to occupy specific regions of the declared space
-(dma_alloc_coherent() will hand out the first free region it finds).
-
-device_addr is the *device* address of the region requested.
-
-size is the size (and should be a page-sized multiple).
-
-The return value will be either a pointer to the processor virtual
-address of the memory, or an error (via PTR_ERR()) if any part of the
-region is occupied.
-
 Part III - Debug drivers use of the DMA-API
 ---
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index fde0cfc71824..9df0f4d318c5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -735,8 +735,6 @@ static inline int dma_get_cache_alignment(void)
 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags);
 void dma_release_declared_memory(struct device *dev);
-void *dma_mark_declared_memory_occupied(struct device *dev,
-   dma_addr_t device_addr, size_t size);
 #else
 static inline int
 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -749,13 +747,6 @@ static inline void
 dma_release_declared_memory(struct device *dev)
 {
 }
-
-static inline void *
-dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size)
-{
-   return ERR_PTR(-EBUSY);
-}
 #endif /* CONFIG_DMA_DECLARE_COHERENT */
 
 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 4b76aba574c2..1d12a31af6d7 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -137,29 +137,6 @@ void dma_release_declared_memory(struct device *dev)
 }
 EXPORT_SYMBOL(dma_release_declared_memory);
 
-void *dma_mark_declared_memory_occupied(struct device *dev,
-   dma_addr_t device_addr, size_t size)
-{
-   struct dma_coherent_mem *mem = dev->dma_mem;
-   unsigned long flags;
-   int pos, err;
-
-   size += device_addr & ~PAGE_MASK;
-
-   if (!mem)
-   return ERR_PTR(-EINVAL);
-
-   spin_lock_irqsave(>spinlock, flags);
-   pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
-   err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
-   spin_unlock_irqrestore(>spinlock, flags);
-
-   if (err != 0)
-   return ERR_PTR(err);
-   return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
 static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
 {
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 06/12] dma-mapping: improve selection of dma_declare_coherent availability

2019-02-11 Thread Christoph Hellwig
This API is primarily used through DT entries, but two architectures
and two drivers call it directly.  So instead of selecting the config
symbol for random architectures pull it in implicitly for the actual
users.  Also rename the Kconfig option to describe the feature better.

Signed-off-by: Christoph Hellwig 
---
 arch/arc/Kconfig| 1 -
 arch/arm/Kconfig| 2 +-
 arch/arm64/Kconfig  | 1 -
 arch/csky/Kconfig   | 1 -
 arch/mips/Kconfig   | 1 -
 arch/riscv/Kconfig  | 1 -
 arch/sh/Kconfig | 2 +-
 arch/unicore32/Kconfig  | 1 -
 arch/x86/Kconfig| 1 -
 drivers/mfd/Kconfig | 2 ++
 drivers/of/Kconfig  | 3 ++-
 include/linux/device.h  | 2 +-
 include/linux/dma-mapping.h | 8 
 kernel/dma/Kconfig  | 2 +-
 kernel/dma/Makefile | 2 +-
 15 files changed, 13 insertions(+), 17 deletions(-)

diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 4103f23b6cea..56e9397542e0 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -30,7 +30,6 @@ config ARC
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_FUTEX_CMPXCHG if FUTEX
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_IOREMAP_PROT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9395f138301a..25fbbd3cb91d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -30,6 +30,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if SUSPEND || CPU_IDLE
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
+   select DMA_DECLARE_COHERENT
select DMA_REMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
@@ -72,7 +73,6 @@ config ARM
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_GCC_PLUGINS
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || 
CPU_V7)
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1d22e969bdcb..d558461a5107 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -137,7 +137,6 @@ config ARM64
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_MEMBLOCK_NODE_MAP if NUMA
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 0a9595afe9be..c009a8c63946 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -30,7 +30,6 @@ config CSKY
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0d14f51d0002..ba50dc2d37dc 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -56,7 +56,6 @@ config MIPS
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index feeeaa60697c..51b9c97751bf 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,7 +32,6 @@ config RISCV
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index a9c36f95744a..a3d2a24e75c7 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -7,11 +7,11 @@ config SUPERH
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
+   select DMA_DECLARE_COHERENT
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK_NODE_MAP
select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index c3a41bfe161b..6d2891d37e32 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -4,7 +4,6 @@ config UNICORE32
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
-   select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select GENERIC_ATOMIC64
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig

[PATCH 04/12] of: select OF_RESERVED_MEM automatically

2019-02-11 Thread Christoph Hellwig
The OF_RESERVED_MEM can be used if we have either CMA or the generic
declare coherent code built and we support the early flattened DT.

So don't bother making it a user visible options that is selected
by most configs that fit the above category, but just select it when
the requirements are met.

Signed-off-by: Christoph Hellwig 
---
 arch/arc/Kconfig | 1 -
 arch/arm/Kconfig | 1 -
 arch/arm64/Kconfig   | 1 -
 arch/csky/Kconfig| 1 -
 arch/powerpc/Kconfig | 1 -
 arch/xtensa/Kconfig  | 1 -
 drivers/of/Kconfig   | 5 ++---
 7 files changed, 2 insertions(+), 9 deletions(-)

diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 376366a7db81..4103f23b6cea 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -44,7 +44,6 @@ config ARC
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
-   select OF_RESERVED_MEM
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 664e918e2624..9395f138301a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -101,7 +101,6 @@ config ARM
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
select OF_EARLY_FLATTREE if OF
-   select OF_RESERVED_MEM if OF
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select PCI_SYSCALL if PCI
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a4168d366127..1d22e969bdcb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -163,7 +163,6 @@ config ARM64
select NEED_SG_DMA_LENGTH
select OF
select OF_EARLY_FLATTREE
-   select OF_RESERVED_MEM
select PCI_DOMAINS_GENERIC if PCI
select PCI_ECAM if (ACPI && PCI)
select PCI_SYSCALL if PCI
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 398113c845f5..0a9595afe9be 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -42,7 +42,6 @@ config CSKY
select MODULES_USE_ELF_RELA if MODULES
select OF
select OF_EARLY_FLATTREE
-   select OF_RESERVED_MEM
select PERF_USE_VMALLOC if CPU_CK610
select RTC_LIB
select TIMER_OF
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2890d36eb531..5cc4eea362c6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -233,7 +233,6 @@ config PPC
select NEED_SG_DMA_LENGTH
select OF
select OF_EARLY_FLATTREE
-   select OF_RESERVED_MEM
select OLD_SIGACTIONif PPC32
select OLD_SIGSUSPEND
select PCI_DOMAINS  if PCI
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 20a0756f27ef..e242a405151e 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -447,7 +447,6 @@ config USE_OF
bool "Flattened Device Tree support"
select OF
select OF_EARLY_FLATTREE
-   select OF_RESERVED_MEM
help
  Include support for flattened device tree machine descriptions.
 
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index ad3fcad4d75b..3607fd2810e4 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -81,10 +81,9 @@ config OF_MDIO
  OpenFirmware MDIO bus (Ethernet PHY) accessors
 
 config OF_RESERVED_MEM
-   depends on OF_EARLY_FLATTREE
bool
-   help
- Helpers to allow for reservation of memory regions
+   depends on OF_EARLY_FLATTREE
+   default y if HAVE_GENERIC_DMA_COHERENT || DMA_CMA
 
 config OF_RESOLVE
bool
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 02/12] device.h: dma_mem is only needed for HAVE_GENERIC_DMA_COHERENT

2019-02-11 Thread Christoph Hellwig
No need to carry an unused field around.

Signed-off-by: Christoph Hellwig 
---
 include/linux/device.h | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/include/linux/device.h b/include/linux/device.h
index 6cb4640b6160..be544400acdd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1017,8 +1017,10 @@ struct device {
 
struct list_headdma_pools;  /* dma pools (if dma'ble) */
 
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
 override */
+#endif
 #ifdef CONFIG_DMA_CMA
struct cma *cma_area;   /* contiguous memory area for dma
   allocations */
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 01/12] mfd/sm501: depend on HAS_DMA

2019-02-11 Thread Christoph Hellwig
Currently the sm501 mfd driver can be compiled without any dependencies,
but through the use of dma_declare_coherent it really depends on
having DMA and iomem support.  Normally we don't explicitly require DMA
support as we have stubs for it if on UML, but in this case the driver
selects support for dma_declare_coherent and thus also requires
memmap support.  Guard this by an explicit dependency.

Signed-off-by: Christoph Hellwig 
---
 drivers/mfd/Kconfig | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f461460a2aeb..f15f6489803d 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1066,6 +1066,7 @@ config MFD_SI476X_CORE
 
 config MFD_SM501
tristate "Silicon Motion SM501"
+   depends on HAS_DMA
 ---help---
  This is the core driver for the Silicon Motion SM501 multimedia
  companion chip. This device is a multifunction device which may
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 07/12] dma-mapping: move CONFIG_DMA_CMA to kernel/dma/Kconfig

2019-02-11 Thread Christoph Hellwig
This is where all the related code already lives.

Signed-off-by: Christoph Hellwig 
---
 drivers/base/Kconfig | 77 
 kernel/dma/Kconfig   | 77 
 2 files changed, 77 insertions(+), 77 deletions(-)

diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 3e63a900b330..059700ea3521 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -191,83 +191,6 @@ config DMA_FENCE_TRACE
  lockup related problems for dma-buffers shared across multiple
  devices.
 
-config DMA_CMA
-   bool "DMA Contiguous Memory Allocator"
-   depends on HAVE_DMA_CONTIGUOUS && CMA
-   help
- This enables the Contiguous Memory Allocator which allows drivers
- to allocate big physically-contiguous blocks of memory for use with
- hardware components that do not support I/O map nor scatter-gather.
-
- You can disable CMA by specifying "cma=0" on the kernel's command
- line.
-
- For more information see .
- If unsure, say "n".
-
-if  DMA_CMA
-comment "Default contiguous memory area size:"
-
-config CMA_SIZE_MBYTES
-   int "Size in Mega Bytes"
-   depends on !CMA_SIZE_SEL_PERCENTAGE
-   default 0 if X86
-   default 16
-   help
- Defines the size (in MiB) of the default memory area for Contiguous
- Memory Allocator.  If the size of 0 is selected, CMA is disabled by
- default, but it can be enabled by passing cma=size[MG] to the kernel.
-
-
-config CMA_SIZE_PERCENTAGE
-   int "Percentage of total memory"
-   depends on !CMA_SIZE_SEL_MBYTES
-   default 0 if X86
-   default 10
-   help
- Defines the size of the default memory area for Contiguous Memory
- Allocator as a percentage of the total memory in the system.
- If 0 percent is selected, CMA is disabled by default, but it can be
- enabled by passing cma=size[MG] to the kernel.
-
-choice
-   prompt "Selected region size"
-   default CMA_SIZE_SEL_MBYTES
-
-config CMA_SIZE_SEL_MBYTES
-   bool "Use mega bytes value only"
-
-config CMA_SIZE_SEL_PERCENTAGE
-   bool "Use percentage value only"
-
-config CMA_SIZE_SEL_MIN
-   bool "Use lower value (minimum)"
-
-config CMA_SIZE_SEL_MAX
-   bool "Use higher value (maximum)"
-
-endchoice
-
-config CMA_ALIGNMENT
-   int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
-   range 4 12
-   default 8
-   help
- DMA mapping framework by default aligns all buffers to the smallest
- PAGE_SIZE order which is greater than or equal to the requested buffer
- size. This works well for buffers up to a few hundreds kilobytes, but
- for larger buffers it just a memory waste. With this parameter you can
- specify the maximum PAGE_SIZE order for contiguous buffers. Larger
- buffers will be aligned only to this specified order. The order is
- expressed as a power of two multiplied by the PAGE_SIZE.
-
- For example, if your system defaults to 4KiB pages, the order value
- of 8 means that the buffers will be aligned up to 1MiB only.
-
- If unsure, leave the default value "8".
-
-endif
-
 config GENERIC_ARCH_TOPOLOGY
bool
help
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index b122ab100d66..d785286ad868 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -53,3 +53,80 @@ config DMA_REMAP
 config DMA_DIRECT_REMAP
bool
select DMA_REMAP
+
+config DMA_CMA
+   bool "DMA Contiguous Memory Allocator"
+   depends on HAVE_DMA_CONTIGUOUS && CMA
+   help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ You can disable CMA by specifying "cma=0" on the kernel's command
+ line.
+
+ For more information see .
+ If unsure, say "n".
+
+if  DMA_CMA
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+   int "Size in Mega Bytes"
+   depends on !CMA_SIZE_SEL_PERCENTAGE
+   default 0 if X86
+   default 16
+   help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.  If the size of 0 is selected, CMA is disabled by
+ default, but it can be enabled by passing cma=size[MG] to the kernel.
+
+
+config CMA_SIZE_PERCENTAGE
+   int "Percentage of total memory"
+   depends on !CMA_SIZE_SEL_MBYTES
+   default 0 if X86
+   default 10
+   help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+ If 0 percent is selected, CMA is disabled by default, but it can be
+ enabled by passing cma=size[MG] 

dma_declare_coherent spring cleaning

2019-02-11 Thread Christoph Hellwig
Hi all,

this series removes various bits of dead code and refactors the
remaining functionality around dma_declare_coherent to be a somewhat
more coherent code base.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 12/18] fotg210-udc: remove a bogus dma_sync_single_for_device call

2019-02-11 Thread Felipe Balbi

Hi,

Christoph Hellwig  writes:
> On Fri, Feb 01, 2019 at 05:10:26PM +0100, Christoph Hellwig wrote:
>> On Fri, Feb 01, 2019 at 03:19:41PM +0200, Felipe Balbi wrote:
>> > Christoph Hellwig  writes:
>> > 
>> > > dma_map_single already transfers ownership to the device.
>> > >
>> > > Signed-off-by: Christoph Hellwig 
>> > 
>> > Do you want me to take the USB bits or will you take the entire series?
>> > In case you're taking the entire series:
>> 
>> If you want to take the USB feel free.  I just want most of this in
>> this merge window if possible.
>
> I didn't see in the USB tree yet, so please let me know if you want to
> take it.

sorry for the delay, just took it to my tree.

-- 
balbi


signature.asc
Description: PGP signature
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

[PATCH 1/3] da8xx-fb: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Signed-off-by: Christoph Hellwig 
---
 drivers/video/fbdev/da8xx-fb.c | 13 +++--
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 43f2a4816860..ec62274b914b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1097,9 +1097,9 @@ static int fb_remove(struct platform_device *dev)
 
unregister_framebuffer(info);
fb_dealloc_cmap(>cmap);
-   dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+   dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
  par->p_palette_base);
-   dma_free_coherent(NULL, par->vram_size, par->vram_virt,
+   dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
  par->vram_phys);
pm_runtime_put_sync(>dev);
pm_runtime_disable(>dev);
@@ -1425,7 +1425,7 @@ static int fb_probe(struct platform_device *device)
par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
 
-   par->vram_virt = dma_alloc_coherent(NULL,
+   par->vram_virt = dma_alloc_coherent(par->dev,
par->vram_size,
>vram_phys,
GFP_KERNEL | GFP_DMA);
@@ -1446,7 +1446,7 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
 
/* allocate palette buffer */
-   par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
+   par->v_palette_base = dma_alloc_coherent(par->dev, PALETTE_SIZE,
 >p_palette_base,
 GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
@@ -1532,11 +1532,12 @@ static int fb_probe(struct platform_device *device)
fb_dealloc_cmap(_fb_info->cmap);
 
 err_release_pl_mem:
-   dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
+   dma_free_coherent(par->dev, PALETTE_SIZE, par->v_palette_base,
  par->p_palette_base);
 
 err_release_fb_mem:
-   dma_free_coherent(NULL, par->vram_size, par->vram_virt, par->vram_phys);
+   dma_free_coherent(par->dev, par->vram_size, par->vram_virt,
+ par->vram_phys);
 
 err_release_fb:
framebuffer_release(da8xx_fb_info);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


fbdev: don't pass a NULL struct device to DMA API functions v2

2019-02-11 Thread Christoph Hellwig
We still have a few drivers which pass a NULL struct device pointer
to DMA API functions, which generally is a bad idea as the API
implementations rely on the device not only for ops selection, but
also the dma mask and various other attributes.

This series contains all easy conversions to pass a struct device,
besides that there also is some arch code that needs separate handling,
a driver that should not use the DMA API at all, and one that is
a complete basket case to be deal with separately.

Changes since v1:
 - split the series, this only contains the fbdev patches
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 3/3] pxa3xx-gcu: pass struct device to dma_mmap_coherent

2019-02-11 Thread Christoph Hellwig
Just like we do for all other DMA operations.

Signed-off-by: Christoph Hellwig 
---
 drivers/video/fbdev/pxa3xx-gcu.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 69cfb337c857..047a2fa4b87e 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -96,6 +96,7 @@ struct pxa3xx_gcu_batch {
 };
 
 struct pxa3xx_gcu_priv {
+   struct device*dev;
void __iomem *mmio_base;
struct clk   *clk;
struct pxa3xx_gcu_shared *shared;
@@ -493,7 +494,7 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct 
*vma)
if (size != SHARED_SIZE)
return -EINVAL;
 
-   return dma_mmap_coherent(NULL, vma,
+   return dma_mmap_coherent(priv->dev, vma,
priv->shared, priv->shared_phys, size);
 
case SHARED_SIZE >> PAGE_SHIFT:
@@ -670,6 +671,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
 
platform_set_drvdata(pdev, priv);
priv->resource_mem = r;
+   priv->dev = dev;
pxa3xx_gcu_reset(priv);
pxa3xx_gcu_init_debug_timer(priv);
 
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 2/3] gbefb: switch to managed version of the DMA allocator

2019-02-11 Thread Christoph Hellwig
gbefb uses managed resources, so it should do the same for DMA
allocations.

Signed-off-by: Christoph Hellwig 
---
 drivers/video/fbdev/gbefb.c | 24 
 1 file changed, 8 insertions(+), 16 deletions(-)

diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index 1a242b1338e9..3fcb33232ba3 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1162,9 +1162,9 @@ static int gbefb_probe(struct platform_device *p_dev)
}
gbe_revision = gbe->ctrlstat & 15;
 
-   gbe_tiles.cpu =
-   dma_alloc_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
-  _tiles.dma, GFP_KERNEL);
+   gbe_tiles.cpu = dmam_alloc_coherent(_dev->dev,
+   GBE_TLB_SIZE * sizeof(uint16_t),
+   _tiles.dma, GFP_KERNEL);
if (!gbe_tiles.cpu) {
printk(KERN_ERR "gbefb: couldn't allocate tiles table\n");
ret = -ENOMEM;
@@ -1178,19 +1178,20 @@ static int gbefb_probe(struct platform_device *p_dev)
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't map framebuffer\n");
ret = -ENOMEM;
-   goto out_tiles_free;
+   goto out_release_mem_region;
}
 
gbe_dma_addr = 0;
} else {
/* try to allocate memory with the classical allocator
 * this has high chance to fail on low memory machines */
-   gbe_mem = dma_alloc_wc(NULL, gbe_mem_size, _dma_addr,
-  GFP_KERNEL);
+   gbe_mem = dmam_alloc_attrs(_dev->dev, gbe_mem_size,
+   _dma_addr, GFP_KERNEL,
+   DMA_ATTR_WRITE_COMBINE);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer 
memory\n");
ret = -ENOMEM;
-   goto out_tiles_free;
+   goto out_release_mem_region;
}
 
gbe_mem_phys = (unsigned long) gbe_dma_addr;
@@ -1237,11 +1238,6 @@ static int gbefb_probe(struct platform_device *p_dev)
 
 out_gbe_unmap:
arch_phys_wc_del(par->wc_cookie);
-   if (gbe_dma_addr)
-   dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
-out_tiles_free:
-   dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
- (void *)gbe_tiles.cpu, gbe_tiles.dma);
 out_release_mem_region:
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
 out_release_framebuffer:
@@ -1258,10 +1254,6 @@ static int gbefb_remove(struct platform_device* p_dev)
unregister_framebuffer(info);
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
-   if (gbe_dma_addr)
-   dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
-   dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
- (void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
gbefb_remove_sysfs(_dev->dev);
framebuffer_release(info);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: add config symbols for arch_{setup,teardown}_dma_ops

2019-02-11 Thread Christoph Hellwig
Any chance to get a quick review on this small series?

On Mon, Feb 04, 2019 at 09:14:18AM +0100, Christoph Hellwig wrote:
> Hi all,
> 
> this series adds kconfig symbols to indicate that the architecture
> provides the arch_setup_dma_ops and arch_teardown_dma_ops hooks.
> 
> This avoids polluting dma-mapping.h which is included by just about
> every driver with implementation details, and also removes some
> clutter.
> ___
> iommu mailing list
> iommu@lists.linux-foundation.org
> https://lists.linuxfoundation.org/mailman/listinfo/iommu
---end quoted text---
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 8/8] smc911x: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Note that smc911x apparently is a PIO chip with an external DMA
handshake, and we probably use the wrong device here.  But at least
it matches the mapping side, which apparently works or at least
worked in the not too distant past.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/smsc/smc911x.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/smsc/smc911x.c 
b/drivers/net/ethernet/smsc/smc911x.c
index 8355dfbb8ec3..b550e624500d 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1188,7 +1188,7 @@ smc911x_tx_dma_irq(void *data)
 
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
-   dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+   dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
@@ -1219,7 +1219,7 @@ smc911x_rx_dma_irq(void *data)
 
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
-   dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+   dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
BUG_ON(skb == NULL);
lp->current_rx_skb = NULL;
PRINT_PKT(skb->data, skb->len);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 4/8] lantiq_etop: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Note this driver seems to lack dma_unmap_* calls entirely, but fixing
that is left for another time.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/lantiq_etop.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/lantiq_etop.c 
b/drivers/net/ethernet/lantiq_etop.c
index 32ac9045cdae..f9bb890733b5 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -112,10 +112,12 @@ struct ltq_etop_priv {
 static int
 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
 {
+   struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
-   ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
+   ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(>pdev->dev,
ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
@@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
 
spin_lock_irqsave(>lock, flags);
-   desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
+   desc->addr = ((unsigned int) dma_map_single(>pdev->dev, 
skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 7/8] meth: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Also use GFP_KERNEL instead of GFP_ATOMIC as the gfp_t for the memory
allocation, as we aren't in interrupt context or under a lock.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/sgi/meth.c | 25 +++--
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 0e1b7e960b98..67954a9e3675 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -68,6 +68,8 @@ module_param(timeout, int, 0);
  * packets in and out, so there is place for a packet
  */
 struct meth_private {
+   struct platform_device *pdev;
+
/* in-memory copy of MAC Control register */
u64 mac_ctrl;
 
@@ -211,8 +213,8 @@ static void meth_check_link(struct net_device *dev)
 static int meth_init_tx_ring(struct meth_private *priv)
 {
/* Init TX ring */
-   priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
-  >tx_ring_dma, GFP_ATOMIC);
+   priv->tx_ring = dma_alloc_coherent(>pdev->dev,
+   TX_RING_BUFFER_SIZE, >tx_ring_dma, GFP_KERNEL);
if (!priv->tx_ring)
return -ENOMEM;
 
@@ -236,7 +238,7 @@ static int meth_init_rx_ring(struct meth_private *priv)
priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
/* I'll need to re-sync it after each RX */
priv->rx_ring_dmas[i] =
-   dma_map_single(NULL, priv->rx_ring[i],
+   dma_map_single(>pdev->dev, priv->rx_ring[i],
   METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[i];
}
@@ -253,7 +255,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
-   dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring,
+   dma_free_coherent(>pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
  priv->tx_ring_dma);
 }
 
@@ -263,7 +265,7 @@ static void meth_free_rx_ring(struct meth_private *priv)
int i;
 
for (i = 0; i < RX_RING_ENTRIES; i++) {
-   dma_unmap_single(NULL, priv->rx_ring_dmas[i],
+   dma_unmap_single(>pdev->dev, priv->rx_ring_dmas[i],
 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
priv->rx_ring[i] = 0;
priv->rx_ring_dmas[i] = 0;
@@ -393,7 +395,8 @@ static void meth_rx(struct net_device* dev, unsigned long 
int_status)
fifo_rptr = (fifo_rptr - 1) & 0x0f;
}
while (priv->rx_write != fifo_rptr) {
-   dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write],
+   dma_unmap_single(>pdev->dev,
+priv->rx_ring_dmas[priv->rx_write],
 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
status = priv->rx_ring[priv->rx_write]->status.raw;
 #if MFE_DEBUG
@@ -454,7 +457,8 @@ static void meth_rx(struct net_device* dev, unsigned long 
int_status)
priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
priv->rx_ring[priv->rx_write]->status.raw = 0;
priv->rx_ring_dmas[priv->rx_write] =
-   dma_map_single(NULL, priv->rx_ring[priv->rx_write],
+   dma_map_single(>pdev->dev,
+  priv->rx_ring[priv->rx_write],
   METH_RX_BUFF_SIZE, DMA_FROM_DEVICE);
mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
ADVANCE_RX_PTR(priv->rx_write);
@@ -637,7 +641,7 @@ static void meth_tx_1page_prepare(struct meth_private *priv,
}
 
/* first page */
-   catbuf = dma_map_single(NULL, buffer_data, buffer_len,
+   catbuf = dma_map_single(>pdev->dev, buffer_data, buffer_len,
DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
desc->data.cat_buf[0].form.len = buffer_len - 1;
@@ -663,12 +667,12 @@ static void meth_tx_2page_prepare(struct meth_private 
*priv,
}
 
/* first page */
-   catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
+   catbuf1 = dma_map_single(>pdev->dev, buffer1_data, buffer1_len,
 DMA_TO_DEVICE);
desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
desc->data.cat_buf[0].form.len = buffer1_len - 1;
/* second page */
-   catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
+   catbuf2 = dma_map_single(>pdev->dev, buffer2_data, buffer2_len,
   

[PATCH 6/8] moxart_ether: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/moxa/moxart_ether.c | 11 +++
 drivers/net/ethernet/moxa/moxart_ether.h |  1 +
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/moxa/moxart_ether.c 
b/drivers/net/ethernet/moxa/moxart_ether.c
index b34055ac476f..00dec0ffb11b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -81,11 +81,13 @@ static void moxart_mac_free_memory(struct net_device *ndev)
 priv->rx_buf_size, DMA_FROM_DEVICE);
 
if (priv->tx_desc_base)
-   dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+   dma_free_coherent(>pdev->dev,
+ TX_REG_DESC_SIZE * TX_DESC_NUM,
  priv->tx_desc_base, priv->tx_base);
 
if (priv->rx_desc_base)
-   dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+   dma_free_coherent(>pdev->dev,
+ RX_REG_DESC_SIZE * RX_DESC_NUM,
  priv->rx_desc_base, priv->rx_base);
 
kfree(priv->tx_buf_base);
@@ -476,6 +478,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
 
priv = netdev_priv(ndev);
priv->ndev = ndev;
+   priv->pdev = pdev;
 
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ndev->base_addr = res->start;
@@ -491,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE;
 
-   priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+   priv->tx_desc_base = dma_alloc_coherent(>dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, >tx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) {
@@ -499,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail;
}
 
-   priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+   priv->rx_desc_base = dma_alloc_coherent(>dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, >rx_base,
GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h 
b/drivers/net/ethernet/moxa/moxart_ether.h
index bee608b547d1..bf4c3029cd0c 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.h
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -292,6 +292,7 @@
 #define LINK_STATUS0x4
 
 struct moxart_mac_priv_t {
+   struct platform_device *pdev;
void __iomem *base;
unsigned int reg_maccr;
unsigned int reg_imr;
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 3/8] macb_main: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Signed-off-by: Christoph Hellwig 
Acked-by: Nicolas Ferre 
---
 drivers/net/ethernet/cadence/macb_main.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
index eaabe8c278ec..4d1509f431d7 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3673,9 +3673,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff 
*skb,
/* Store packet information (to free when Tx completed) */
lp->skb = skb;
lp->skb_length = skb->len;
-   lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
-   DMA_TO_DEVICE);
-   if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+   lp->skb_physaddr = dma_map_single(>pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+   if (dma_mapping_error(>pdev->dev, lp->skb_physaddr)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
@@ -3765,7 +3765,7 @@ static irqreturn_t at91ether_interrupt(int irq, void 
*dev_id)
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
-   dma_unmap_single(NULL, lp->skb_physaddr,
+   dma_unmap_single(>pdev->dev, lp->skb_physaddr,
 lp->skb_length, DMA_TO_DEVICE);
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->skb_length;
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 2/8] au1000_eth: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/amd/au1000_eth.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ethernet/amd/au1000_eth.c 
b/drivers/net/ethernet/amd/au1000_eth.c
index e833d1b3fe18..e5073aeea06a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
 * Snooping works fine with eth on all au1xxx
 */
-   aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE *
+   aup->vaddr = (u32)dma_alloc_attrs(>dev, MAX_BUF_SIZE *
  (NUM_TX_BUFFS + NUM_RX_BUFFS),
  >dma_addr, 0,
  DMA_ATTR_NON_CONSISTENT);
@@ -1349,7 +1349,7 @@ static int au1000_probe(struct platform_device *pdev)
 err_remap2:
iounmap(aup->mac);
 err_remap1:
-   dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+   dma_free_attrs(>dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);
 err_vaddr:
@@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev)
if (aup->tx_db_inuse[i])
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
 
-   dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+   dma_free_attrs(>dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr,
DMA_ATTR_NON_CONSISTENT);
 
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 5/8] pxa168_eth: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Note that this driver seems to entirely lack dma_map_single error
handling, but that is left for another time.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/ethernet/marvell/pxa168_eth.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c 
b/drivers/net/ethernet/marvell/pxa168_eth.c
index f8a6d6e3cb7a..35f2142aac5e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -201,6 +201,7 @@ struct tx_desc {
 };
 
 struct pxa168_eth_private {
+   struct platform_device *pdev;
int port_num;   /* User Ethernet port number*/
int phy_addr;
int phy_speed;
@@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev)
used_rx_desc = pep->rx_used_desc_q;
p_used_rx_desc = >p_rx_desc_area[used_rx_desc];
size = skb_end_pointer(skb) - skb->data;
-   p_used_rx_desc->buf_ptr = dma_map_single(NULL,
+   p_used_rx_desc->buf_ptr = dma_map_single(>pdev->dev,
 skb->data,
 size,
 DMA_FROM_DEVICE);
@@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force)
netdev_err(dev, "Error in TX\n");
dev->stats.tx_errors++;
}
-   dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
+   dma_unmap_single(>pdev->dev, addr, count, DMA_TO_DEVICE);
if (skb)
dev_kfree_skb_irq(skb);
released++;
@@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget)
if (rx_next_curr_desc == rx_used_desc)
pep->rx_resource_err = 1;
pep->rx_desc_count--;
-   dma_unmap_single(NULL, rx_desc->buf_ptr,
+   dma_unmap_single(>pdev->dev, rx_desc->buf_ptr,
 rx_desc->buf_size,
 DMA_FROM_DEVICE);
received_packets++;
@@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct 
net_device *dev)
length = skb->len;
pep->tx_skb[tx_index] = skb;
desc->byte_cnt = length;
-   desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+   desc->buf_ptr = dma_map_single(>pdev->dev, skb->data, length,
+   DMA_TO_DEVICE);
 
skb_tx_timestamp(skb);
 
@@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (err)
goto err_free_mdio;
 
+   pep->pdev = pdev;
SET_NETDEV_DEV(dev, >dev);
pxa168_init_hw(pep);
err = register_netdev(dev);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


net: don't pass a NULL struct device to DMA API functions v2

2019-02-11 Thread Christoph Hellwig
We still have a few drivers which pass a NULL struct device pointer
to DMA API functions, which generally is a bad idea as the API
implementations rely on the device not only for ops selection, but
also the dma mask and various other attributes.

This series contains all easy conversions to pass a struct device,
besides that there also is some arch code that needs separate handling,
a driver that should not use the DMA API at all, and one that is
a complete basket case to be deal with separately.

Changes since v1:
 - fix an inverted ifdef in CAIF
 - update the smc911x changelog
 - split the series, this only contains the networking patches
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


[PATCH 1/8] net: caif: pass struct device to DMA API functions

2019-02-11 Thread Christoph Hellwig
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons.  Pass the easily
available struct device from the platform_device to remedy this.

Also use the proper Kconfig symbol to check for DMA API availability.

Signed-off-by: Christoph Hellwig 
---
 drivers/net/caif/caif_spi.c | 30 --
 1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index d28a1398c091..7608bc3e00df 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail 
alignment.");
 #define LOW_WATER_MARK   100
 #define HIGH_WATER_MARK  (LOW_WATER_MARK*5)
 
-#ifdef CONFIG_UML
+#ifndef CONFIG_HAS_DMA
 
 /*
  * We sometimes use UML for debugging, but it cannot handle
  * dma_alloc_coherent so we have to wrap it.
  */
-static inline void *dma_alloc(dma_addr_t *daddr)
+static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
 {
return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
 }
 
-static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
+   dma_addr_t handle)
 {
kfree(cpu_addr);
 }
 
 #else
 
-static inline void *dma_alloc(dma_addr_t *daddr)
+static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
 {
-   return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
+   return dma_alloc_coherent(>pdev->dev, SPI_DMA_BUF_LEN, daddr,
GFP_KERNEL);
 }
 
-static inline void dma_free(void *cpu_addr, dma_addr_t handle)
+static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
+   dma_addr_t handle)
 {
-   dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
+   dma_free_coherent(>pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
 }
-#endif /* CONFIG_UML */
+#endif /* CONFIG_HAS_DMA */
 
 #ifdef CONFIG_DEBUG_FS
 
@@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev)
}
 
/* Allocate DMA buffers. */
-   cfspi->xfer.va_tx[0] = dma_alloc(>xfer.pa_tx[0]);
+   cfspi->xfer.va_tx[0] = dma_alloc(cfspi, >xfer.pa_tx[0]);
if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
goto err_dma_alloc_tx_0;
}
 
-   cfspi->xfer.va_rx = dma_alloc(>xfer.pa_rx);
+   cfspi->xfer.va_rx = dma_alloc(cfspi, >xfer.pa_rx);
 
if (!cfspi->xfer.va_rx) {
res = -ENODEV;
@@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev)
return 0;
 
  err_create_wq:
-   dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+   dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
  err_dma_alloc_rx:
-   dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+   dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
  err_dma_alloc_tx_0:
return res;
 }
@@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev)
 
cfspi->ndev = NULL;
/* Free DMA buffers. */
-   dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
-   dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+   dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+   dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
set_bit(SPI_TERMINATE, >state);
wake_up_interruptible(>wait);
destroy_workqueue(cfspi->wq);
-- 
2.20.1

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH 12/18] fotg210-udc: remove a bogus dma_sync_single_for_device call

2019-02-11 Thread Christoph Hellwig
On Fri, Feb 01, 2019 at 05:10:26PM +0100, Christoph Hellwig wrote:
> On Fri, Feb 01, 2019 at 03:19:41PM +0200, Felipe Balbi wrote:
> > Christoph Hellwig  writes:
> > 
> > > dma_map_single already transfers ownership to the device.
> > >
> > > Signed-off-by: Christoph Hellwig 
> > 
> > Do you want me to take the USB bits or will you take the entire series?
> > In case you're taking the entire series:
> 
> If you want to take the USB feel free.  I just want most of this in
> this merge window if possible.

I didn't see in the USB tree yet, so please let me know if you want to
take it.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH v1 0/7] iommu: Minor cleanups and dev_printk() usage

2019-02-11 Thread Joerg Roedel
On Fri, Feb 08, 2019 at 04:05:33PM -0600, Bjorn Helgaas wrote:
> I've had these minor iommu cleanups lying around for a while, but the ugly
> dmesg logs from [1] prompted me to finally post them.  Take what you like,
> ignore the rest, and tell me so I can clear out my queue of old stuff.
> 
> These fix no actual bugs.
> 
> [1] 
> https://lore.kernel.org/linux-pci/1547649064-19019-1-git-send-email-liudongdo...@huawei.com

Applied patches 1-6 to their respective branches, thanks Bjorn.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] fix flush_tlb_all typo

2019-02-11 Thread Joerg Roedel
On Fri, Feb 08, 2019 at 02:20:44PM +, Tom Murphy wrote:
> ---
>  include/linux/iommu.h | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)

The patch has no Signed-off-by, please add it and a proper
commit-message and re-send.

___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] iommu: Allow io-pgtable to be used outside of drivers/iommu/

2019-02-11 Thread Joerg Roedel
On Tue, Feb 05, 2019 at 11:20:30AM -0600, Rob Herring wrote:
> On Tue, Feb 5, 2019 at 10:55 AM Christoph Hellwig  wrote:
> >
> > On Tue, Feb 05, 2019 at 10:37:31AM -0600, Rob Herring wrote:
> > > Move io-pgtable.h to include/linux/ and export alloc_io_pgtable_ops
> > > and free_io_pgtable_ops. This enables drivers outside drivers/iommu/ to
> > > use the ARM page table library. Specifically, some ARM Mali GPUs use the
> > > ARM page table formats.
> >
> > Maybe rename it to arm-io-pgtable.h to make the usage a little more
> > clear?
> 
> I should drop the first 'ARM' in the commit message.

Dropped and applied, thanks.
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH] iommu/ipmmu-vmsa: fix device reference leaks

2019-02-11 Thread j...@8bytes.org
Adding a few more people to Cc.

On Sun, Feb 03, 2019 at 10:27:09AM +, wen yang wrote:
> Make sure to drop the reference to the device taken by
> of_find_device_by_node() on driver unbind.
> 
> Signed-off-by: Wen Yang 
> Cc: Joerg Roedel 
> Cc: iommu@lists.linux-foundation.org
> Cc: linux-ker...@vger.kernel.org
> ---
>  drivers/iommu/ipmmu-vmsa.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
> index 7a4529c..cebf56d 100644
> --- a/drivers/iommu/ipmmu-vmsa.c
> +++ b/drivers/iommu/ipmmu-vmsa.c
> @@ -756,6 +756,9 @@ static int ipmmu_init_platform_device(struct device *dev,
>  
>   fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
>  
> + if (!fwspec->iommu_priv)
> + put_device(_pdev->dev);
> +
>   return 0;
>  }
>  
> -- 
> 2.7.4
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH/RFC] driver core: Postpone DMA tear-down until after devres release

2019-02-11 Thread Robin Murphy

On 08/02/2019 18:55, Geert Uytterhoeven wrote:

Hi Robin,

On Fri, Feb 8, 2019 at 6:55 PM Robin Murphy  wrote:

On 08/02/2019 16:40, Joerg Roedel wrote:

On Thu, Feb 07, 2019 at 08:36:53PM +0100, Geert Uytterhoeven wrote:

diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8ac10af17c0043a3..d62487d024559620 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -968,9 +968,9 @@ static void __device_release_driver(struct device *dev, 
struct device *parent)
  drv->remove(dev);

  device_links_driver_cleanup(dev);
-arch_teardown_dma_ops(dev);

  devres_release_all(dev);
+arch_teardown_dma_ops(dev);
  dev->driver = NULL;
  dev_set_drvdata(dev, NULL);
  if (dev->pm_domain && dev->pm_domain->dismiss)


Thanks for the fix! Should it also be tagged for stable and get a Fixes


FTR, Greg has added it to driver-core-testing, with a CC to stable.


So I see, great!


tag? I know it only triggers with a fix in v5.0-rc, but still...


I think so:

Fixes: 09515ef5ddad ("of/acpi: Configure dma operations at probe time
for platform/amba/pci bus devices")


Thanks! It won't backport cleanly due to commit dc3c05504d38849f
("dma-mapping: remove dma_deconfigure") in v4.20, though.


Ah yes - backports beyond that should simply be a case of moving the 
dma_deconfigure() wrapper in the same manner.


Thanks,
Robin.


There aren't many drivers using dmam_alloc_*(), let alone which would
also find themselves behind an IOMMU on an Arm system, but it turns out
I actually have another one which can reproduce the BUG() with 5.0-rc.


SATA core uses dmam_alloc_*().


I've tried a 4.12 kernel with a bit of instrumentation[1] and sure
enough the devres-managed buffer is freed with the wrong ops[2] even
then. How it manages not to blow up more catastrophically I have no
idea... I guess at best it just leaks the buffers and IOMMU mappings,
and at worst quietly frees random other pages instead.


May depend on the actual ops, and whether CMA is used or not.

Gr{oetje,eeting}s,

 Geert


___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V3 2/3] HYPERV/IOMMU: Add Hyper-V stub IOMMU driver

2019-02-11 Thread Tianyu Lan
Hi Olaf:
 Thanks for your review.

On Fri, Feb 8, 2019 at 10:52 PM Olaf Hering  wrote:
>
> On Thu, Feb 07, lantianyu1...@gmail.com wrote:
>
> > +++ b/drivers/iommu/Kconfig
> > +config HYPERV_IOMMU
> > + bool "Hyper-V x2APIC IRQ Handling"
> > + depends on HYPERV
> > + select IOMMU_API
> > + help
>
>
> Consider adding 'default HYPERV' like some other drivers already do it.
>
> Olaf

Good suggestion and will update. Thanks.

-- 
Best regards
Tianyu Lan
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu


Re: [PATCH V3 2/3] HYPERV/IOMMU: Add Hyper-V stub IOMMU driver

2019-02-11 Thread Tianyu Lan
On Fri, Feb 8, 2019 at 1:15 AM Vitaly Kuznetsov  wrote:
>
> lantianyu1...@gmail.com writes:
>
> > From: Lan Tianyu 
> >
> > On the bare metal, enabling X2APIC mode requires interrupt remapping
> > function which helps to deliver irq to cpu with 32-bit APIC ID.
> > Hyper-V doesn't provide interrupt remapping function so far and Hyper-V
> > MSI protocol already supports to deliver interrupt to the CPU whose
> > virtual processor index is more than 255. IO-APIC interrupt still has
> > 8-bit APIC ID limitation.
> >
> > This patch is to add Hyper-V stub IOMMU driver in order to enable
> > X2APIC mode successfully in Hyper-V Linux guest. The driver returns X2APIC
> > interrupt remapping capability when X2APIC mode is available. Otherwise,
> > it creates a Hyper-V irq domain to limit IO-APIC interrupts' affinity
> > and make sure cpus assigned with IO-APIC interrupt have 8-bit APIC ID.
> >
> > Define 24 IO-APIC remapping entries because Hyper-V only expose one
> > single IO-APIC and one IO-APIC has 24 pins according IO-APIC spec(
> > https://pdos.csail.mit.edu/6.828/2016/readings/ia32/ioapic.pdf).
> >
> > Signed-off-by: Lan Tianyu 
> > ---
> > Change since v2:
> >- Improve comment about why save IO-APIC entry in the irq chip data.
> >- Some code improvement.
> >- Improve statement in the IOMMU Kconfig.
> >
> > Change since v1:
> >   - Remove unused pr_fmt
> >   - Make ioapic_ir_domain as static variable
> >   - Remove unused variables cfg and entry in the 
> > hyperv_irq_remapping_alloc()
> >   - Fix comments
> > ---
> >  drivers/iommu/Kconfig |   8 ++
> >  drivers/iommu/Makefile|   1 +
> >  drivers/iommu/hyperv-iommu.c  | 194 
> > ++
> >  drivers/iommu/irq_remapping.c |   3 +
> >  drivers/iommu/irq_remapping.h |   1 +
> >  5 files changed, 207 insertions(+)
> >  create mode 100644 drivers/iommu/hyperv-iommu.c
> >
> > diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
> > index 45d7021..6090935 100644
> > --- a/drivers/iommu/Kconfig
> > +++ b/drivers/iommu/Kconfig
> > @@ -437,4 +437,12 @@ config QCOM_IOMMU
> >   help
> > Support for IOMMU on certain Qualcomm SoCs.
> >
> > +config HYPERV_IOMMU
> > + bool "Hyper-V x2APIC IRQ Handling"
> > + depends on HYPERV
> > + select IOMMU_API
> > + help
> > +   Stub IOMMU driver to handle IRQs as to allow Hyper-V Linux
> > +   guests to run with x2APIC mode enabled.
> > +
> >  endif # IOMMU_SUPPORT
> > diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
> > index a158a68..8c71a15 100644
> > --- a/drivers/iommu/Makefile
> > +++ b/drivers/iommu/Makefile
> > @@ -32,3 +32,4 @@ obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
> >  obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
> >  obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
> >  obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
> > +obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
> > diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
> > new file mode 100644
> > index 000..d8572c5
> > --- /dev/null
> > +++ b/drivers/iommu/hyperv-iommu.c
> > @@ -0,0 +1,194 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +
> > +/*
> > + * Hyper-V stub IOMMU driver.
> > + *
> > + * Copyright (C) 2019, Microsoft, Inc.
> > + *
> > + * Author : Lan Tianyu 
> > + */
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#include "irq_remapping.h"
> > +
> > +#ifdef CONFIG_IRQ_REMAP
> > +
> > +/*
> > + * According 82093AA IO-APIC spec , IO APIC has a 24-entry Interrupt
> > + * Redirection Table. Hyper-V exposes one single IO-APIC and so define
> > + * 24 IO APIC remmapping entries.
> > + */
> > +#define IOAPIC_REMAPPING_ENTRY 24
> > +
> > +static cpumask_t ioapic_max_cpumask = { CPU_BITS_NONE };
> > +static struct irq_domain *ioapic_ir_domain;
> > +
> > +static int hyperv_ir_set_affinity(struct irq_data *data,
> > + const struct cpumask *mask, bool force)
> > +{
> > + struct irq_data *parent = data->parent_data;
> > + struct irq_cfg *cfg = irqd_cfg(data);
> > + struct IO_APIC_route_entry *entry;
> > + int ret;
> > +
> > + /* Return error If new irq affinity is out of ioapic_max_cpumask. */
> > + if (!cpumask_subset(mask, _max_cpumask))
> > + return -EINVAL;
> > +
> > + ret = parent->chip->irq_set_affinity(parent, mask, force);
> > + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
> > + return ret;
> > +
> > + entry = data->chip_data;
> > + entry->dest = cfg->dest_apicid;
> > + entry->vector = cfg->vector;
> > + send_cleanup_vector(cfg);
> > +
> > + return 0;
> > +}
> > +
> > +static struct irq_chip hyperv_ir_chip = {
> > + .name   = "HYPERV-IR",
> > + .irq_ack= apic_ack_irq,
> > + .irq_set_affinity   = hyperv_ir_set_affinity,
> > +};
> > +
> > +static int 

Re: [PATCH V3 2/3] HYPERV/IOMMU: Add Hyper-V stub IOMMU driver

2019-02-11 Thread Tianyu Lan
Hi Alex:
Thanks for your review.

On Fri, Feb 8, 2019 at 2:15 AM Alex Williamson
 wrote:
>
> On Thu,  7 Feb 2019 23:33:48 +0800
> lantianyu1...@gmail.com wrote:
>
> > From: Lan Tianyu 
> >
> > On the bare metal, enabling X2APIC mode requires interrupt remapping
> > function which helps to deliver irq to cpu with 32-bit APIC ID.
> > Hyper-V doesn't provide interrupt remapping function so far and Hyper-V
> > MSI protocol already supports to deliver interrupt to the CPU whose
> > virtual processor index is more than 255. IO-APIC interrupt still has
> > 8-bit APIC ID limitation.
> >
> > This patch is to add Hyper-V stub IOMMU driver in order to enable
> > X2APIC mode successfully in Hyper-V Linux guest. The driver returns X2APIC
> > interrupt remapping capability when X2APIC mode is available. Otherwise,
> > it creates a Hyper-V irq domain to limit IO-APIC interrupts' affinity
> > and make sure cpus assigned with IO-APIC interrupt have 8-bit APIC ID.
> >
> > Define 24 IO-APIC remapping entries because Hyper-V only expose one
> > single IO-APIC and one IO-APIC has 24 pins according IO-APIC spec(
> > https://pdos.csail.mit.edu/6.828/2016/readings/ia32/ioapic.pdf).
> >
> > Signed-off-by: Lan Tianyu 
> > ---
> > Change since v2:
> >- Improve comment about why save IO-APIC entry in the irq chip data.
> >- Some code improvement.
> >- Improve statement in the IOMMU Kconfig.
> >
> > Change since v1:
> >   - Remove unused pr_fmt
> >   - Make ioapic_ir_domain as static variable
> >   - Remove unused variables cfg and entry in the 
> > hyperv_irq_remapping_alloc()
> >   - Fix comments
> > ---
> >  drivers/iommu/Kconfig |   8 ++
> >  drivers/iommu/Makefile|   1 +
> >  drivers/iommu/hyperv-iommu.c  | 194 
> > ++
> >  drivers/iommu/irq_remapping.c |   3 +
> >  drivers/iommu/irq_remapping.h |   1 +
> >  5 files changed, 207 insertions(+)
> >  create mode 100644 drivers/iommu/hyperv-iommu.c
> ...
> > diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
> > new file mode 100644
> > index 000..d8572c5
> > --- /dev/null
> > +++ b/drivers/iommu/hyperv-iommu.c
> ...
> > +static int __init hyperv_prepare_irq_remapping(void)
> > +{
> > + struct fwnode_handle *fn;
> > + int i;
> > +
> > + if (!hypervisor_is_type(x86_hyper_type) ||
> > + !x2apic_supported())
> > + return -ENODEV;
> > +
> > + fn = irq_domain_alloc_named_id_fwnode("HYPERV-IR", 0);
> > + if (!fn)
> > + return -ENOMEM;
> > +
> > + ioapic_ir_domain =
> > + irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
> > + 0, IOAPIC_REMAPPING_ENTRY, fn,
> > + _ir_domain_ops, NULL);
> > +
> > + irq_domain_free_fwnode(fn);
> > +
> > + /*
> > +  * Hyper-V doesn't provide irq remapping function for
> > +  * IO-APIC and so IO-APIC only accepts 8-bit APIC ID.
> > +  * Cpu's APIC ID is read from ACPI MADT table and APIC IDs
> > +  * in the MADT table on Hyper-v are sorted monotonic increasingly.
> > +  * APIC ID reflects cpu topology. There maybe some APIC ID
> > +  * gaps when cpu number in a socket is not power of two. Prepare
> > +  * max cpu affinity for IOAPIC irqs. Scan cpu 0-255 and set cpu
> > +  * into ioapic_max_cpumask if its APIC ID is less than 256.
> > +  */
> > + for (i = 0; i < 256; i++)
> > + if (cpu_physical_id(i) < 256)
> > + cpumask_set_cpu(i, _max_cpumask);
>
> This looks sketchy.  What if NR_CPUS is less than 256?  Thanks,

Nice catch. I should check NR_CPUS here. Will update. Thanks.


-- 
Best regards
Tianyu Lan
___
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu