[PATCH] ocxl: Make use of the helper macro LIST_HEAD()

2022-02-08 Thread Cai Huoqing
Replace "struct list_head head = LIST_HEAD_INIT(head)" with
"LIST_HEAD(head)" to simplify the code.

Signed-off-by: Cai Huoqing 
---
 drivers/misc/ocxl/link.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index ab039c115381..9670d02c927f 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -94,7 +94,7 @@ struct ocxl_link {
struct spa *spa;
void *platform_data;
 };
-static struct list_head links_list = LIST_HEAD_INIT(links_list);
+static LIST_HEAD(links_list);
 static DEFINE_MUTEX(links_list_lock);
 
 enum xsl_response {
-- 
2.25.1



[PATCH] oc: fsl: dpio: Make use of the helper macro LIST_HEAD()

2022-02-08 Thread Cai Huoqing
Replace "struct list_head head = LIST_HEAD_INIT(head)" with
"LIST_HEAD(head)" to simplify the code.

Signed-off-by: Cai Huoqing 
---
 drivers/soc/fsl/dpio/dpio-service.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/soc/fsl/dpio/dpio-service.c 
b/drivers/soc/fsl/dpio/dpio-service.c
index 1d2b27e3ea63..36f0a9b799b1 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -51,7 +51,7 @@ struct dpaa2_io_store {
 
 /* keep a per cpu array of DPIOs for fast access */
 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
-static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
+static LIST_HEAD(dpio_list);
 static DEFINE_SPINLOCK(dpio_list_lock);
 
 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
-- 
2.25.1



[PATCH] soc/fsl/qman: test: Make use of the helper function kthread_run_on_cpu()

2021-12-02 Thread Cai Huoqing
Replace kthread_create/kthread_bind/wake_up_process() with
kthread_run_on_cpu() to simplify the code.

Signed-off-by: Cai Huoqing 
---
 drivers/soc/fsl/qbman/qman_test_stash.c | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c 
b/drivers/soc/fsl/qbman/qman_test_stash.c
index b7e8e5ec884c..7ab259cb139e 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -108,14 +108,12 @@ static int on_all_cpus(int (*fn)(void))
.fn = fn,
.started = ATOMIC_INIT(0)
};
-   struct task_struct *k = kthread_create(bstrap_fn, ,
-   "hotpotato%d", cpu);
+   struct task_struct *k = kthread_run_on_cpu(bstrap_fn, ,
+  cpu, "hotpotato/%u");
int ret;
 
if (IS_ERR(k))
return -ENOMEM;
-   kthread_bind(k, cpu);
-   wake_up_process(k);
/*
 * If we call kthread_stop() before the "wake up" has had an
 * effect, then the thread may exit with -EINTR without ever
-- 
2.25.1



[PATCH v2] tpm: ibmvtpm: Make use of dma_alloc_noncoherent()

2021-10-11 Thread Cai Huoqing
Replacing kmalloc/kfree/get_zeroed_page/free_page/dma_map_single/
dma_unmap_single() with dma_alloc_noncoherent/dma_free_noncoherent()
helps to reduce code size, and simplify the code, and the hardware
can keep DMA coherent itself.

Signed-off-by: Cai Huoqing 
---
v1->v2:
*Change to dma_alloc/free_noncoherent from dma_alloc/free_coherent.
*Update changelog.

 drivers/char/tpm/tpm_ibmvtpm.c | 63 +++---
 1 file changed, 20 insertions(+), 43 deletions(-)

diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 3af4c07a9342..b4552f8400b8 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -356,15 +356,13 @@ static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
-   dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
-CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
-   free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
-
-   if (ibmvtpm->rtce_buf) {
-   dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
-ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
-   kfree(ibmvtpm->rtce_buf);
-   }
+   dma_free_noncoherent(ibmvtpm->dev, CRQ_RES_BUF_SIZE, crq_q->crq_addr,
+crq_q->crq_dma_handle, DMA_BIDIRECTIONAL);
+
+   if (ibmvtpm->rtce_buf)
+   dma_free_noncoherent(ibmvtpm->dev,
+ibmvtpm->rtce_size, ibmvtpm->rtce_buf,
+ibmvtpm->rtce_dma_handle, 
DMA_BIDIRECTIONAL);
 
kfree(ibmvtpm);
/* For tpm_ibmvtpm_get_desired_dma */
@@ -522,23 +520,12 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
return;
}
ibmvtpm->rtce_size = be16_to_cpu(crq->len);
-   ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
-   GFP_ATOMIC);
-   if (!ibmvtpm->rtce_buf) {
-   dev_err(ibmvtpm->dev, "Failed to allocate 
memory for rtce buffer\n");
-   return;
-   }
-
-   ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
-   ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
-   DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(ibmvtpm->dev,
- ibmvtpm->rtce_dma_handle)) {
-   kfree(ibmvtpm->rtce_buf);
-   ibmvtpm->rtce_buf = NULL;
-   dev_err(ibmvtpm->dev, "Failed to dma map rtce 
buffer\n");
-   }
+   ibmvtpm->rtce_buf = dma_alloc_noncoherent(ibmvtpm->dev,
+ 
ibmvtpm->rtce_size,
+ 
>rtce_dma_handle,
+ 
DMA_BIDIRECTIONAL, GFP_ATOMIC);
+   if (!ibmvtpm->rtce_buf)
+   dev_err(ibmvtpm->dev, "Failed to dma allocate 
rtce buffer\n");
 
return;
case VTPM_GET_VERSION_RES:
@@ -618,22 +605,14 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
ibmvtpm->vdev = vio_dev;
 
crq_q = >crq_queue;
-   crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
-   if (!crq_q->crq_addr) {
-   dev_err(dev, "Unable to allocate memory for crq_addr\n");
-   goto cleanup;
-   }
 
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
init_waitqueue_head(_q->wq);
-   ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
-CRQ_RES_BUF_SIZE,
-DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
-   dev_err(dev, "dma mapping failed\n");
+   crq_q->crq_addr = dma_alloc_noncoherent(dev, CRQ_RES_BUF_SIZE,
+   >crq_dma_handle,
+   DMA_BIDIRECTIONAL, GFP_KERNEL);
+   if (!crq_q->crq_addr)
goto cleanup;
-   }
 
rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
@@ -642,7 +621,7 @@ static int tpm_ibmvtpm_probe(struc

[PATCH v2] scsi: ibmvscsi: Use dma_alloc_noncoherent() instead of get_zeroed_page/dma_map_single()

2021-10-11 Thread Cai Huoqing
Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
with dma_alloc_noncoherent/dma_free_noncoherent() helps to reduce
code size, and simplify the code, and the hardware can keeep DMA
coherent itself.

Signed-off-by: Cai Huoqing 
---
v1->v2:
*Change to dma_alloc/free_noncoherent from dma_alloc/free_coherent.
*Update changelog.

 drivers/scsi/ibmvscsi/ibmvfc.c   | 16 
 drivers/scsi/ibmvscsi/ibmvscsi.c | 29 +
 2 files changed, 13 insertions(+), 32 deletions(-)

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1f1586ad48fe..6e95fd02fd25 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -869,8 +869,8 @@ static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
 {
struct device *dev = vhost->dev;
 
-   dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
-   free_page((unsigned long)queue->msgs.handle);
+   dma_free_noncoherent(dev, PAGE_SIZE, queue->msgs.handle,
+queue->msg_token, DMA_BIDIRECTIONAL);
queue->msgs.handle = NULL;
 
ibmvfc_free_event_pool(vhost, queue);
@@ -5663,19 +5663,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
return -ENOMEM;
}
 
-   queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+   queue->msgs.handle = dma_alloc_noncoherent(dev, PAGE_SIZE, 
>msg_token,
+  DMA_BIDIRECTIONAL, 
GFP_KERNEL);
if (!queue->msgs.handle)
return -ENOMEM;
 
-   queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(dev, queue->msg_token)) {
-   free_page((unsigned long)queue->msgs.handle);
-   queue->msgs.handle = NULL;
-   return -ENOMEM;
-   }
-
queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ea8e01f49cba..68409c298c74 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -151,10 +151,8 @@ static void ibmvscsi_release_crq_queue(struct crq_queue 
*queue,
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-   dma_unmap_single(hostdata->dev,
-queue->msg_token,
-queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-   free_page((unsigned long)queue->msgs);
+   dma_free_noncoherent(hostdata->dev, PAGE_SIZE,
+queue->msgs, queue->msg_token, DMA_BIDIRECTIONAL);
 }
 
 /**
@@ -331,18 +329,12 @@ static int ibmvscsi_init_crq_queue(struct crq_queue 
*queue,
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 
-   queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-
-   if (!queue->msgs)
-   goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
-   queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
- queue->size * sizeof(*queue->msgs),
- DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(hostdata->dev, queue->msg_token))
-   goto map_failed;
+   queue->msgs = dma_alloc_noncoherent(hostdata->dev,
+   PAGE_SIZE, >msg_token,
+   DMA_BIDIRECTIONAL, GFP_KERNEL);
+   if (!queue->msg)
+   goto malloc_failed;
 
gather_partition_info();
set_adapter_info(hostdata);
@@ -395,11 +387,8 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
   reg_crq_failed:
-   dma_unmap_single(hostdata->dev,
-queue->msg_token,
-queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-  map_failed:
-   free_page((unsigned long)queue->msgs);
+   dma_free_noncoherent(hostdata->dev, PAGE_SIZE, queue->msg,
+queue->msg_token, DMA_BIDIRECTIONAL);
   malloc_failed:
return -1;
 }
-- 
2.25.1



[PATCH] tpm: ibmvtpm: Make use of dma_alloc_coherent()

2021-10-10 Thread Cai Huoqing
Replacing kmalloc/kfree/get_zeroed_page/free_page/dma_map_single/
dma_unmap_single() with dma_alloc_coherent/dma_free_coherent()
helps to reduce code size, and simplify the code, and coherent
DMA will not clear the cache every time.

Signed-off-by: Cai Huoqing 
---
 drivers/char/tpm/tpm_ibmvtpm.c | 61 ++
 1 file changed, 18 insertions(+), 43 deletions(-)

diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 3af4c07a9342..5f55a14ee824 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -356,15 +356,12 @@ static void tpm_ibmvtpm_remove(struct vio_dev *vdev)
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
-   dma_unmap_single(ibmvtpm->dev, ibmvtpm->crq_dma_handle,
-CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL);
-   free_page((unsigned long)ibmvtpm->crq_queue.crq_addr);
-
-   if (ibmvtpm->rtce_buf) {
-   dma_unmap_single(ibmvtpm->dev, ibmvtpm->rtce_dma_handle,
-ibmvtpm->rtce_size, DMA_BIDIRECTIONAL);
-   kfree(ibmvtpm->rtce_buf);
-   }
+   dma_free_coherent(ibmvtpm->dev, CRQ_RES_BUF_SIZE,
+ crq_q->crq_addr, crq_q->crq_dma_handle);
+
+   if (ibmvtpm->rtce_buf)
+   dma_free_coherent(ibmvtpm->dev, ibmvtpm->rtce_size,
+ ibmvtpm->rtce_buf, ibmvtpm->rtce_dma_handle);
 
kfree(ibmvtpm);
/* For tpm_ibmvtpm_get_desired_dma */
@@ -522,23 +519,12 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
return;
}
ibmvtpm->rtce_size = be16_to_cpu(crq->len);
-   ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
-   GFP_ATOMIC);
-   if (!ibmvtpm->rtce_buf) {
-   dev_err(ibmvtpm->dev, "Failed to allocate 
memory for rtce buffer\n");
-   return;
-   }
-
-   ibmvtpm->rtce_dma_handle = dma_map_single(ibmvtpm->dev,
-   ibmvtpm->rtce_buf, ibmvtpm->rtce_size,
-   DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(ibmvtpm->dev,
- ibmvtpm->rtce_dma_handle)) {
-   kfree(ibmvtpm->rtce_buf);
-   ibmvtpm->rtce_buf = NULL;
-   dev_err(ibmvtpm->dev, "Failed to dma map rtce 
buffer\n");
-   }
+   ibmvtpm->rtce_buf = dma_alloc_coherent(ibmvtpm->dev,
+  
ibmvtpm->rtce_size,
+  
>rtce_dma_handle,
+  GFP_ATOMIC);
+   if (!ibmvtpm->rtce_buf)
+   dev_err(ibmvtpm->dev, "Failed to dma allocate 
rtce buffer\n");
 
return;
case VTPM_GET_VERSION_RES:
@@ -618,22 +604,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
ibmvtpm->vdev = vio_dev;
 
crq_q = >crq_queue;
-   crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
-   if (!crq_q->crq_addr) {
-   dev_err(dev, "Unable to allocate memory for crq_addr\n");
-   goto cleanup;
-   }
 
crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
init_waitqueue_head(_q->wq);
-   ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
-CRQ_RES_BUF_SIZE,
-DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(dev, ibmvtpm->crq_dma_handle)) {
-   dev_err(dev, "dma mapping failed\n");
+   crq_q->crq_addr = dma_alloc_coherent(dev, CRQ_RES_BUF_SIZE,
+   >crq_dma_handle, 
GFP_KERNEL);
+   if (!crq_q->crq_addr)
goto cleanup;
-   }
 
rc = plpar_hcall_norets(H_REG_CRQ, vio_dev->unit_address,
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
@@ -642,7 +619,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
 
if (rc) {
dev_err(dev, "Unable to register CRQ rc=%d\n", rc);
-   goto reg_crq_cleanup;
+   goto cleanup;
}
 
rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
@@ -704,13 +681,11 @@ static int tpm_ibmvtpm_p

[PATCH] scsi: ibmvscsi: Use dma_alloc_coherent() instead of get_zeroed_page/dma_map_single()

2021-10-10 Thread Cai Huoqing
Replacing get_zeroed_page/free_page/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing 
---
 drivers/scsi/ibmvscsi/ibmvfc.c   | 15 +++
 drivers/scsi/ibmvscsi/ibmvscsi.c | 26 ++
 2 files changed, 9 insertions(+), 32 deletions(-)

diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1f1586ad48fe..f65d1a78b272 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -869,8 +869,7 @@ static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
 {
struct device *dev = vhost->dev;
 
-   dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
-   free_page((unsigned long)queue->msgs.handle);
+   dma_free_coherent(dev, PAGE_SIZE, queue->msgs.handle, queue->msg_token);
queue->msgs.handle = NULL;
 
ibmvfc_free_event_pool(vhost, queue);
@@ -5663,19 +5662,11 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
return -ENOMEM;
}
 
-   queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+   queue->msgs.handle = dma_alloc_coherent(dev, PAGE_SIZE,
+   >msg_token, GFP_KERNEL);
if (!queue->msgs.handle)
return -ENOMEM;
 
-   queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(dev, queue->msg_token)) {
-   free_page((unsigned long)queue->msgs.handle);
-   queue->msgs.handle = NULL;
-   return -ENOMEM;
-   }
-
queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ea8e01f49cba..61b315d1edbc 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -151,10 +151,7 @@ static void ibmvscsi_release_crq_queue(struct crq_queue 
*queue,
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
-   dma_unmap_single(hostdata->dev,
-queue->msg_token,
-queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-   free_page((unsigned long)queue->msgs);
+   dma_free_coherent(hostdata->dev, PAGE_SIZE, queue->msgs, 
queue->msg_token);
 }
 
 /**
@@ -331,18 +328,11 @@ static int ibmvscsi_init_crq_queue(struct crq_queue 
*queue,
int retrc;
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 
-   queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
-
-   if (!queue->msgs)
-   goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
-   queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
- queue->size * sizeof(*queue->msgs),
- DMA_BIDIRECTIONAL);
-
-   if (dma_mapping_error(hostdata->dev, queue->msg_token))
-   goto map_failed;
+   queue->msgs = dma_alloc_coherent(hostdata->dev, PAGE_SIZE,
+   >msg_token, GFP_KERNEL);
+   if (!queue->msg)
+   goto malloc_failed;
 
gather_partition_info();
set_adapter_info(hostdata);
@@ -395,11 +385,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
   reg_crq_failed:
-   dma_unmap_single(hostdata->dev,
-queue->msg_token,
-queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-  map_failed:
-   free_page((unsigned long)queue->msgs);
+   dma_free_coherent(hostdata->dev, PAGE_SIZE, queue->msg, 
queue->msg_token);
   malloc_failed:
return -1;
 }
-- 
2.25.1



[PATCH v2] ibmveth: Use dma_alloc_coherent() instead of kmalloc/dma_map_single()

2021-09-26 Thread Cai Huoqing
Replacing kmalloc/kfree/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing 
---
v1->v2: Remove extra change in Kconfig

 drivers/net/ethernet/ibm/ibmveth.c | 25 +
 1 file changed, 9 insertions(+), 16 deletions(-)

diff --git a/drivers/net/ethernet/ibm/ibmveth.c 
b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..3aedb680adb8 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -605,17 +605,13 @@ static int ibmveth_open(struct net_device *netdev)
}
 
rc = -ENOMEM;
-   adapter->bounce_buffer =
-   kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
-   if (!adapter->bounce_buffer)
-   goto out_free_irq;
 
-   adapter->bounce_buffer_dma =
-   dma_map_single(>vdev->dev, adapter->bounce_buffer,
-  netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
-   if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
-   netdev_err(netdev, "unable to map bounce buffer\n");
-   goto out_free_bounce_buffer;
+   adapter->bounce_buffer = dma_alloc_coherent(>vdev->dev,
+   netdev->mtu + 
IBMVETH_BUFF_OH,
+   
>bounce_buffer_dma, GFP_KERNEL);
+   if (!adapter->bounce_buffer) {
+   netdev_err(netdev, "unable to alloc bounce buffer\n");
+   goto out_free_irq;
}
 
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +623,6 @@ static int ibmveth_open(struct net_device *netdev)
 
return 0;
 
-out_free_bounce_buffer:
-   kfree(adapter->bounce_buffer);
 out_free_irq:
free_irq(netdev->irq, netdev);
 out_free_buffer_pools:
@@ -702,10 +696,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
 >rx_buff_pool[i]);
 
-   dma_unmap_single(>vdev->dev, adapter->bounce_buffer_dma,
-adapter->netdev->mtu + IBMVETH_BUFF_OH,
-DMA_BIDIRECTIONAL);
-   kfree(adapter->bounce_buffer);
+   dma_free_coherent(>vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
 
netdev_dbg(netdev, "close complete\n");
 
-- 
2.25.1



[PATCH] ibmveth: Use dma_alloc_coherent() instead of kmalloc/dma_map_single()

2021-09-26 Thread Cai Huoqing
Replacing kmalloc/kfree/dma_map_single/dma_unmap_single()
with dma_alloc_coherent/dma_free_coherent() helps to reduce
code size, and simplify the code, and coherent DMA will not
clear the cache every time.

Signed-off-by: Cai Huoqing 
---
 drivers/net/ethernet/ibm/Kconfig   |  4 ++--
 drivers/net/ethernet/ibm/ibmveth.c | 25 +
 2 files changed, 11 insertions(+), 18 deletions(-)

diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index c0c112d95b89..926c0642784d 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_IBM
 
 config IBMVETH
tristate "IBM LAN Virtual Ethernet support"
-   depends on PPC_PSERIES
+   depends on PPC_PSERIES || COMPILE_TEST
help
  This driver supports virtual ethernet adapters on newer IBM iSeries
  and pSeries systems.
@@ -40,7 +40,7 @@ config EHEA
 
 config IBMVNIC
tristate "IBM Virtual NIC support"
-   depends on PPC_PSERIES
+   depends on PPC_PSERIES || COMPILE_TEST
help
  This driver supports Virtual NIC adapters on IBM i and IBM System p
  systems.
diff --git a/drivers/net/ethernet/ibm/ibmveth.c 
b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..3aedb680adb8 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -605,17 +605,13 @@ static int ibmveth_open(struct net_device *netdev)
}
 
rc = -ENOMEM;
-   adapter->bounce_buffer =
-   kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
-   if (!adapter->bounce_buffer)
-   goto out_free_irq;
 
-   adapter->bounce_buffer_dma =
-   dma_map_single(>vdev->dev, adapter->bounce_buffer,
-  netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
-   if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
-   netdev_err(netdev, "unable to map bounce buffer\n");
-   goto out_free_bounce_buffer;
+   adapter->bounce_buffer = dma_alloc_coherent(>vdev->dev,
+   netdev->mtu + 
IBMVETH_BUFF_OH,
+   
>bounce_buffer_dma, GFP_KERNEL);
+   if (!adapter->bounce_buffer) {
+   netdev_err(netdev, "unable to alloc bounce buffer\n");
+   goto out_free_irq;
}
 
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +623,6 @@ static int ibmveth_open(struct net_device *netdev)
 
return 0;
 
-out_free_bounce_buffer:
-   kfree(adapter->bounce_buffer);
 out_free_irq:
free_irq(netdev->irq, netdev);
 out_free_buffer_pools:
@@ -702,10 +696,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
 >rx_buff_pool[i]);
 
-   dma_unmap_single(>vdev->dev, adapter->bounce_buffer_dma,
-adapter->netdev->mtu + IBMVETH_BUFF_OH,
-DMA_BIDIRECTIONAL);
-   kfree(adapter->bounce_buffer);
+   dma_free_coherent(>vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
 
netdev_dbg(netdev, "close complete\n");
 
-- 
2.25.1



[PATCH 1/3] soc: mediatek: pwrap: Make use of the helper function devm_platform_ioremap_resource_byname()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource_byname() helper instead of
calling platform_get_resource_byname() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/mediatek/mtk-pmic-wrap.c | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c 
b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 952bc554f443..29f1bd42f7a8 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -2116,7 +2116,6 @@ static int pwrap_probe(struct platform_device *pdev)
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_slave_id = NULL;
-   struct resource *res;
 
if (np->child)
of_slave_id = of_match_node(of_slave_match_tbl, np->child);
@@ -2136,8 +2135,7 @@ static int pwrap_probe(struct platform_device *pdev)
wrp->slave = of_slave_id->data;
wrp->dev = >dev;
 
-   res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
-   wrp->base = devm_ioremap_resource(wrp->dev, res);
+   wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap");
if (IS_ERR(wrp->base))
return PTR_ERR(wrp->base);
 
@@ -2151,9 +2149,7 @@ static int pwrap_probe(struct platform_device *pdev)
}
 
if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) {
-   res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-   "pwrap-bridge");
-   wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
+   wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, 
"pwrap-bridge");
if (IS_ERR(wrp->bridge_base))
return PTR_ERR(wrp->bridge_base);
 
-- 
2.25.1



[PATCH] soc: sunxi_sram: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/sunxi/sunxi_sram.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 42833e33a96c..a8f3876963a0 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -331,7 +331,6 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
 
 static int sunxi_sram_probe(struct platform_device *pdev)
 {
-   struct resource *res;
struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
@@ -342,8 +341,7 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   base = devm_ioremap_resource(>dev, res);
+   base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
 
-- 
2.25.1



[PATCH] soc: ixp4xx/qmgr: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/ixp4xx/ixp4xx-qmgr.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
index 9154c7029b05..72b5a10e3104 100644
--- a/drivers/soc/ixp4xx/ixp4xx-qmgr.c
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -377,13 +377,9 @@ static int ixp4xx_qmgr_probe(struct platform_device *pdev)
int i, err;
irq_handler_t handler1, handler2;
struct device *dev = >dev;
-   struct resource *res;
int irq1, irq2;
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   if (!res)
-   return -ENODEV;
-   qmgr_regs = devm_ioremap_resource(dev, res);
+   qmgr_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qmgr_regs))
return PTR_ERR(qmgr_regs);
 
-- 
2.25.1



[PATCH 1/2] soc: bcm: bcm-pmb: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/bcm/bcm63xx/bcm-pmb.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c 
b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 774465c119be..7bbe46ea5f94 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -276,7 +276,6 @@ static int bcm_pmb_probe(struct platform_device *pdev)
struct device *dev = >dev;
const struct bcm_pmb_pd_data *table;
const struct bcm_pmb_pd_data *e;
-   struct resource *res;
struct bcm_pmb *pmb;
int max_id;
int err;
@@ -287,8 +286,7 @@ static int bcm_pmb_probe(struct platform_device *pdev)
 
pmb->dev = dev;
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   pmb->base = devm_ioremap_resource(>dev, res);
+   pmb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmb->base))
return PTR_ERR(pmb->base);
 
-- 
2.25.1



[PATCH 1/2] soc: amlogic: canvas: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/amlogic/meson-canvas.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/soc/amlogic/meson-canvas.c 
b/drivers/soc/amlogic/meson-canvas.c
index d0329ad170d1..383b0cfc584e 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -168,7 +168,6 @@ EXPORT_SYMBOL_GPL(meson_canvas_free);
 
 static int meson_canvas_probe(struct platform_device *pdev)
 {
-   struct resource *res;
struct meson_canvas *canvas;
struct device *dev = >dev;
 
@@ -176,8 +175,7 @@ static int meson_canvas_probe(struct platform_device *pdev)
if (!canvas)
return -ENOMEM;
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   canvas->reg_base = devm_ioremap_resource(dev, res);
+   canvas->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(canvas->reg_base))
return PTR_ERR(canvas->reg_base);
 
-- 
2.25.1



[PATCH 1/2] soc: fsl: guts: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/fsl/guts.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index d5e9a5f2c087..072473a16f4d 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -140,7 +140,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
 {
struct device_node *np = pdev->dev.of_node;
struct device *dev = >dev;
-   struct resource *res;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -152,8 +151,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
 
guts->little_endian = of_property_read_bool(np, "little-endian");
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   guts->regs = devm_ioremap_resource(dev, res);
+   guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
 
-- 
2.25.1



[PATCH 2/2] soc: fsl: rcpm: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/fsl/rcpm.c | 7 +--
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index 90d3f4060b0c..3d0cae30c769 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -146,7 +146,6 @@ static const struct dev_pm_ops rcpm_pm_ops = {
 static int rcpm_probe(struct platform_device *pdev)
 {
struct device   *dev = >dev;
-   struct resource *r;
struct rcpm *rcpm;
int ret;
 
@@ -154,11 +153,7 @@ static int rcpm_probe(struct platform_device *pdev)
if (!rcpm)
return -ENOMEM;
 
-   r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   if (!r)
-   return -ENODEV;
-
-   rcpm->ippdexpcr_base = devm_ioremap_resource(>dev, r);
+   rcpm->ippdexpcr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcpm->ippdexpcr_base)) {
ret =  PTR_ERR(rcpm->ippdexpcr_base);
return ret;
-- 
2.25.1



[PATCH 1/2] soc: fsl: guts: Make use of the helper function devm_platform_ioremap_resource()

2021-09-08 Thread Cai Huoqing
Use the devm_platform_ioremap_resource() helper instead of
calling platform_get_resource() and devm_ioremap_resource()
separately

Signed-off-by: Cai Huoqing 
---
 drivers/soc/fsl/guts.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index d5e9a5f2c087..072473a16f4d 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -140,7 +140,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
 {
struct device_node *np = pdev->dev.of_node;
struct device *dev = >dev;
-   struct resource *res;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -152,8 +151,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
 
guts->little_endian = of_property_read_bool(np, "little-endian");
 
-   res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-   guts->regs = devm_ioremap_resource(dev, res);
+   guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
 
-- 
2.25.1