From: Zi Yan <z...@nvidia.com>

Both src and dst page lists should match the page size at each
page and the length of both lists is shared.

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 mm/copy_page.c | 166 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/internal.h  |   4 ++
 2 files changed, 170 insertions(+)

diff --git a/mm/copy_page.c b/mm/copy_page.c
index 5e7a797..84f1c02 100644
--- a/mm/copy_page.c
+++ b/mm/copy_page.c
@@ -417,3 +417,169 @@ int copy_page_dma(struct page *to, struct page *from, int 
nr_pages)
 
        return copy_page_dma_always(to, from, nr_pages);
 }
+
+/*
+ * Use DMA copy a list of pages to a new location
+ *
+ * Just put each page into individual DMA channel.
+ *
+ * */
+int copy_page_lists_dma_always(struct page **to, struct page **from, int 
nr_items)
+{
+       struct dma_async_tx_descriptor **tx = NULL;
+       dma_cookie_t *cookie = NULL;
+       enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+       struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+       int ret_val = 0;
+       int total_available_chans = NUM_AVAIL_DMA_CHAN;
+       int i;
+       int page_idx;
+
+       for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+               if (!copy_chan[i]) {
+                       total_available_chans = i;
+               }
+       }
+       if (total_available_chans != NUM_AVAIL_DMA_CHAN) {
+               pr_err("%d channels are missing\n", NUM_AVAIL_DMA_CHAN - 
total_available_chans);
+       }
+       if (limit_dma_chans < total_available_chans)
+               total_available_chans = limit_dma_chans;
+
+       /* round down to closest 2^x value  */
+       total_available_chans = 1<<ilog2(total_available_chans);
+
+       total_available_chans = min_t(int, total_available_chans, nr_items);
+
+
+       tx = kzalloc(sizeof(struct dma_async_tx_descriptor*)*nr_items, 
GFP_KERNEL);
+       if (!tx) {
+               ret_val = -ENOMEM;
+               goto out;
+       }
+       cookie = kzalloc(sizeof(dma_cookie_t)*nr_items, GFP_KERNEL);
+       if (!cookie) {
+               ret_val = -ENOMEM;
+               goto out_free_tx;
+       }
+
+       for (i = 0; i < total_available_chans; ++i) {
+               int num_xfer_per_dev = nr_items / total_available_chans;
+
+               if (i < (nr_items % total_available_chans))
+                       num_xfer_per_dev += 1;
+
+               if (num_xfer_per_dev > 128) {
+                       ret_val = -ENOMEM;
+                       pr_err("%s: too many pages to be transferred\n", 
__func__);
+                       goto out_free_both;
+               }
+
+               unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev,
+                                               2 * num_xfer_per_dev, 
GFP_NOWAIT);
+               if (!unmap[i]) {
+                       pr_err("%s: no unmap data at chan %d\n", __func__, i);
+                       ret_val = -ENODEV;
+                       goto unmap_dma;
+               }
+       }
+
+       page_idx = 0;
+       for (i = 0; i < total_available_chans; ++i) {
+               int num_xfer_per_dev = nr_items / total_available_chans;
+               int xfer_idx;
+
+               if (i < (nr_items % total_available_chans))
+                       num_xfer_per_dev += 1;
+
+               unmap[i]->to_cnt = num_xfer_per_dev;
+               unmap[i]->from_cnt = num_xfer_per_dev;
+               unmap[i]->len = hpage_nr_pages(from[i]) * PAGE_SIZE;
+
+               for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, 
++page_idx) {
+                       size_t page_len = hpage_nr_pages(from[page_idx]) * 
PAGE_SIZE;
+
+                       BUG_ON(page_len != hpage_nr_pages(to[page_idx]) * 
PAGE_SIZE);
+                       BUG_ON(unmap[i]->len != page_len);
+
+                       unmap[i]->addr[xfer_idx] =
+                                dma_map_page(copy_dev[i]->dev, from[page_idx],
+                                                         0,
+                                                         page_len,
+                                                         DMA_TO_DEVICE);
+
+                       unmap[i]->addr[xfer_idx+num_xfer_per_dev] =
+                                dma_map_page(copy_dev[i]->dev, to[page_idx],
+                                                         0,
+                                                         page_len,
+                                                         DMA_FROM_DEVICE);
+               }
+       }
+
+       page_idx = 0;
+       for (i = 0; i < total_available_chans; ++i) {
+               int num_xfer_per_dev = nr_items / total_available_chans;
+               int xfer_idx;
+
+               if (i < (nr_items % total_available_chans))
+                       num_xfer_per_dev += 1;
+
+               for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, 
++page_idx) {
+
+                       tx[page_idx] = 
copy_dev[i]->device_prep_dma_memcpy(copy_chan[i],
+                                                               
unmap[i]->addr[xfer_idx + num_xfer_per_dev],
+                                                               
unmap[i]->addr[xfer_idx],
+                                                               unmap[i]->len,
+                                                               flags[i]);
+                       if (!tx[page_idx]) {
+                               pr_err("%s: no tx descriptor at chan %d xfer 
%d\n",
+                                          __func__, i, xfer_idx);
+                               ret_val = -ENODEV;
+                               goto unmap_dma;
+                       }
+
+                       cookie[page_idx] = 
tx[page_idx]->tx_submit(tx[page_idx]);
+
+                       if (dma_submit_error(cookie[page_idx])) {
+                               pr_err("%s: submission error at chan %d xfer 
%d\n",
+                                          __func__, i, xfer_idx);
+                               ret_val = -ENODEV;
+                               goto unmap_dma;
+                       }
+               }
+
+               dma_async_issue_pending(copy_chan[i]);
+       }
+
+       page_idx = 0;
+       for (i = 0; i < total_available_chans; ++i) {
+               int num_xfer_per_dev = nr_items / total_available_chans;
+               int xfer_idx;
+
+               if (i < (nr_items % total_available_chans))
+                       num_xfer_per_dev += 1;
+
+               for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx, 
++page_idx) {
+
+                       if (dma_sync_wait(copy_chan[i], cookie[page_idx]) != 
DMA_COMPLETE) {
+                               ret_val = -6;
+                               pr_err("%s: dma does not complete at chan %d, 
xfer %d\n",
+                                          __func__, i, xfer_idx);
+                       }
+               }
+       }
+
+unmap_dma:
+       for (i = 0; i < total_available_chans; ++i) {
+               if (unmap[i])
+                       dmaengine_unmap_put(unmap[i]);
+       }
+
+out_free_both:
+       kfree(cookie);
+out_free_tx:
+       kfree(tx);
+out:
+
+       return ret_val;
+}
diff --git a/mm/internal.h b/mm/internal.h
index 9eeaf2b..cb1a610 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -555,4 +555,8 @@ static inline bool is_migrate_highatomic_page(struct page 
*page)
 
 void setup_zone_pageset(struct zone *zone);
 extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
+
+extern int copy_page_lists_dma_always(struct page **to,
+                       struct page **from, int nr_pages);
+
 #endif /* __MM_INTERNAL_H */
-- 
2.7.4

Reply via email to