linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Zi Yan <zi.yan@sent.com>
To: linux-mm@kvack.org
Cc: dnellans@nvidia.com, apopple@au1.ibm.com,
	paulmck@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	zi.yan@cs.rutgers.edu
Subject: [RFC PATCH 14/14] mm: Add copy_page_lists_dma_always to support copy a list of pages.
Date: Fri, 17 Feb 2017 10:05:51 -0500	[thread overview]
Message-ID: <20170217150551.117028-15-zi.yan@sent.com> (raw)
In-Reply-To: <20170217150551.117028-1-zi.yan@sent.com>

From: Zi Yan <ziy@nvidia.com>

Both src and dst page lists should match the page size at each
page and the length of both lists is shared.

Signed-off-by: Zi Yan <ziy@nvidia.com>
---
 mm/copy_pages.c | 158 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 mm/internal.h   |   3 ++
 mm/migrate.c    |   5 +-
 3 files changed, 165 insertions(+), 1 deletion(-)

diff --git a/mm/copy_pages.c b/mm/copy_pages.c
index f135bf505183..cf674840a830 100644
--- a/mm/copy_pages.c
+++ b/mm/copy_pages.c
@@ -560,3 +560,161 @@ int copy_page_dma(struct page *to, struct page *from, int nr_pages)
 
 	return copy_page_dma_always(to, from, nr_pages);
 }
+
+/*
+ * Use DMA copy a list of pages to a new location
+ *
+ * Just put each page into individual DMA channel.
+ *
+ * */
+int copy_page_lists_dma_always(struct page **to, struct page **from, int nr_pages)
+{
+	struct dma_async_tx_descriptor **tx = NULL;
+	dma_cookie_t *cookie = NULL;
+	enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+	struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+	int ret_val = 0;
+	int total_available_chans = NUM_AVAIL_DMA_CHAN;
+	int i;
+
+	for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+		if (!copy_chan[i])
+			total_available_chans = i;
+	}
+	if (total_available_chans != NUM_AVAIL_DMA_CHAN)
+		pr_err("%d channels are missing\n", NUM_AVAIL_DMA_CHAN - total_available_chans);
+
+	if (limit_dma_chans < total_available_chans)
+		total_available_chans = limit_dma_chans;
+
+	/* round down to closest 2^x value  */
+	total_available_chans = 1<<ilog2(total_available_chans);
+
+	total_available_chans = min_t(int, total_available_chans, nr_pages);
+
+
+	tx = kzalloc(sizeof(struct dma_async_tx_descriptor*)*nr_pages, GFP_KERNEL);
+	if (!tx) {
+		ret_val = -ENOMEM;
+		goto out;
+	}
+	cookie = kzalloc(sizeof(dma_cookie_t)*nr_pages, GFP_KERNEL);
+	if (!cookie) {
+		ret_val = -ENOMEM;
+		goto out_free_tx;
+	}
+
+	
+	for (i = 0; i < total_available_chans; ++i) {
+		int num_xfer_per_dev = nr_pages / total_available_chans;
+		
+		if (i < (nr_pages % total_available_chans))
+			num_xfer_per_dev += 1;
+
+		unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev, 
+						2*num_xfer_per_dev, GFP_NOWAIT);
+		if (!unmap[i]) {
+			pr_err("%s: no unmap data at chan %d\n", __func__, i);
+			ret_val = -ENODEV;
+			goto unmap_dma;
+		}
+	}
+
+	for (i = 0; i < total_available_chans; ++i) {
+		int num_xfer_per_dev = nr_pages / total_available_chans;
+		int xfer_idx;
+		
+		if (i < (nr_pages % total_available_chans))
+			num_xfer_per_dev += 1;
+
+		unmap[i]->to_cnt = num_xfer_per_dev;
+		unmap[i]->from_cnt = num_xfer_per_dev;
+		unmap[i]->len = hpage_nr_pages(from[i]) * PAGE_SIZE; 
+
+		for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx) {
+			int page_idx = i + xfer_idx * total_available_chans;
+			size_t page_len = hpage_nr_pages(from[page_idx]) * PAGE_SIZE;
+
+			BUG_ON(page_len != hpage_nr_pages(to[page_idx]) * PAGE_SIZE);
+			BUG_ON(unmap[i]->len != page_len);
+
+			unmap[i]->addr[xfer_idx] = 
+				 dma_map_page(copy_dev[i]->dev, from[page_idx], 
+							  0,
+							  page_len,
+							  DMA_TO_DEVICE);
+
+			unmap[i]->addr[xfer_idx+num_xfer_per_dev] = 
+				 dma_map_page(copy_dev[i]->dev, to[page_idx], 
+							  0,
+							  page_len,
+							  DMA_FROM_DEVICE);
+		}
+	}
+
+	for (i = 0; i < total_available_chans; ++i) {
+		int num_xfer_per_dev = nr_pages / total_available_chans;
+		int xfer_idx;
+		
+		if (i < (nr_pages % total_available_chans))
+			num_xfer_per_dev += 1;
+
+		for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx) {
+			int page_idx = i + xfer_idx * total_available_chans;
+
+			tx[page_idx] = copy_dev[i]->device_prep_dma_memcpy(copy_chan[i], 
+								unmap[i]->addr[xfer_idx + num_xfer_per_dev],
+								unmap[i]->addr[xfer_idx], 
+								unmap[i]->len,
+								flags[i]);
+			if (!tx[page_idx]) {
+				pr_err("%s: no tx descriptor at chan %d xfer %d\n", 
+					   __func__, i, xfer_idx);
+				ret_val = -ENODEV;
+				goto unmap_dma;
+			}
+
+			cookie[page_idx] = tx[page_idx]->tx_submit(tx[page_idx]);
+
+			if (dma_submit_error(cookie[page_idx])) {
+				pr_err("%s: submission error at chan %d xfer %d\n",
+					   __func__, i, xfer_idx);
+				ret_val = -ENODEV;
+				goto unmap_dma;
+			}
+		}
+
+		dma_async_issue_pending(copy_chan[i]);
+	}
+
+	for (i = 0; i < total_available_chans; ++i) {
+		int num_xfer_per_dev = nr_pages / total_available_chans;
+		int xfer_idx;
+		
+		if (i < (nr_pages % total_available_chans))
+			num_xfer_per_dev += 1;
+
+		for (xfer_idx = 0; xfer_idx < num_xfer_per_dev; ++xfer_idx) {
+			int page_idx = i + xfer_idx * total_available_chans;
+
+			if (dma_sync_wait(copy_chan[i], cookie[page_idx]) != DMA_COMPLETE) {
+				ret_val = -EFAULT;
+				pr_err("%s: dma does not complete at chan %d, xfer %d\n",
+					   __func__, i, xfer_idx);
+			}
+		}
+	}
+
+unmap_dma:
+	for (i = 0; i < total_available_chans; ++i) {
+		if (unmap[i])
+			dmaengine_unmap_put(unmap[i]);
+	}
+
+	kfree(cookie);
+out_free_tx:
+	kfree(tx);
+out:
+
+	return ret_val;
+}
diff --git a/mm/internal.h b/mm/internal.h
index b99a634b4d09..32048e89bfda 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -500,10 +500,13 @@ extern const struct trace_print_flags gfpflag_names[];
 
 extern int copy_page_lists_mthread(struct page **to,
 			struct page **from, int nr_pages);
+extern int copy_page_lists_dma_always(struct page **to,
+			struct page **from, int nr_pages);
 
 extern int exchange_page_mthread(struct page *to, struct page *from,
 			int nr_pages);
 extern int exchange_page_lists_mthread(struct page **to,
 						  struct page **from, 
 						  int nr_pages);
+
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/migrate.c b/mm/migrate.c
index 464bc9ba8083..63e44ac65184 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1611,7 +1611,10 @@ static int copy_to_new_pages_concur(struct list_head *unmapped_list_ptr,
 
 	BUG_ON(idx != num_pages);
 	
-	if (mode & MIGRATE_MT)
+	if (mode & MIGRATE_DMA)
+		rc = copy_page_lists_dma_always(dst_page_list, src_page_list,
+							num_pages);
+	else if (mode & MIGRATE_MT)
 		rc = copy_page_lists_mthread(dst_page_list, src_page_list,
 							num_pages);
 
-- 
2.11.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

      parent reply	other threads:[~2017-02-17 15:06 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-17 15:05 [RFC PATCH 00/14] Accelerating page migrations Zi Yan
2017-02-17 15:05 ` [RFC PATCH 01/14] mm/migrate: Add new mode parameter to migrate_page_copy() function Zi Yan
2017-02-17 15:05 ` [RFC PATCH 02/14] mm/migrate: Make migrate_mode types non-exclussive Zi Yan
2017-02-17 15:05 ` [RFC PATCH 03/14] mm/migrate: Add copy_pages_mthread function Zi Yan
2017-02-23  6:06   ` Naoya Horiguchi
2017-02-23  7:50     ` Anshuman Khandual
2017-02-23  8:02       ` Naoya Horiguchi
2017-03-09  5:35         ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 04/14] mm/migrate: Add new migrate mode MIGRATE_MT Zi Yan
2017-02-23  6:54   ` Naoya Horiguchi
2017-02-23  7:54     ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 05/14] mm/migrate: Add new migration flag MPOL_MF_MOVE_MT for syscalls Zi Yan
2017-02-17 15:05 ` [RFC PATCH 06/14] sysctl: Add global tunable mt_page_copy Zi Yan
2017-02-17 15:05 ` [RFC PATCH 07/14] migrate: Add copy_page_lists_mthread() function Zi Yan
2017-02-23  8:54   ` Naoya Horiguchi
2017-03-09 13:02     ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 08/14] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2017-02-24  8:25   ` Naoya Horiguchi
2017-02-24 15:05     ` Zi Yan
2017-02-17 15:05 ` [RFC PATCH 09/14] mm: migrate: Add exchange_page_mthread() and exchange_page_lists_mthread() to exchange two pages or two page lists Zi Yan
2017-02-17 15:05 ` [RFC PATCH 10/14] mm: Add exchange_pages and exchange_pages_concur functions to exchange two lists of pages instead of two migrate_pages() Zi Yan
2017-02-17 15:05 ` [RFC PATCH 11/14] mm: migrate: Add exchange_pages syscall to exchange two page lists Zi Yan
2017-02-17 15:05 ` [RFC PATCH 12/14] migrate: Add copy_page_dma to use DMA Engine to copy pages Zi Yan
2017-02-17 15:05 ` [RFC PATCH 13/14] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2017-02-17 15:05 ` Zi Yan [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170217150551.117028-15-zi.yan@sent.com \
    --to=zi.yan@sent.com \
    --cc=apopple@au1.ibm.com \
    --cc=dnellans@nvidia.com \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=linux-mm@kvack.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=zi.yan@cs.rutgers.edu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox