From: Zi Yan <zi.yan@sent.com>
To: linux-mm@kvack.org
Cc: dnellans@nvidia.com, apopple@au1.ibm.com,
paulmck@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
zi.yan@cs.rutgers.edu
Subject: [RFC PATCH 12/14] migrate: Add copy_page_dma to use DMA Engine to copy pages.
Date: Fri, 17 Feb 2017 10:05:49 -0500 [thread overview]
Message-ID: <20170217150551.117028-13-zi.yan@sent.com> (raw)
In-Reply-To: <20170217150551.117028-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
vm.use_all_dma_chans will grab all usable DMA channels
vm.limit_dma_chans will limit how many DMA channels in use
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/linux/highmem.h | 1 +
include/linux/sched/sysctl.h | 4 +
kernel/sysctl.c | 21 ++++
mm/copy_pages.c | 281 +++++++++++++++++++++++++++++++++++++++++++
4 files changed, 307 insertions(+)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e1f4f1b82812..1388ff5d0e53 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -237,6 +237,7 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
#endif
int copy_pages_mthread(struct page *to, struct page *from, int nr_pages);
+int copy_page_dma(struct page *to, struct page *from, int nr_pages);
static inline void copy_highpage(struct page *to, struct page *from)
{
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 22db1e63707e..d5efb4093386 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -78,4 +78,8 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int sysctl_dma_page_migration(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#endif /* _SCHED_SYSCTL_H */
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 70a654146519..55c812c313b8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -99,6 +99,10 @@
extern int mt_page_copy;
+extern int use_all_dma_chans;
+extern int limit_dma_chans;
+
+
/* External variables not in a header file. */
extern int suid_dumpable;
#ifdef CONFIG_COREDUMP
@@ -1372,6 +1376,23 @@ static struct ctl_table vm_table[] = {
.extra2 = &one,
},
{
+ .procname = "use_all_dma_chans",
+ .data = &use_all_dma_chans,
+ .maxlen = sizeof(use_all_dma_chans),
+ .mode = 0644,
+ .proc_handler = sysctl_dma_page_migration,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "limit_dma_chans",
+ .data = &limit_dma_chans,
+ .maxlen = sizeof(limit_dma_chans),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ },
+ {
.procname = "hugetlb_shm_group",
.data = &sysctl_hugetlb_shm_group,
.maxlen = sizeof(gid_t),
diff --git a/mm/copy_pages.c b/mm/copy_pages.c
index 879e2d944ad0..f135bf505183 100644
--- a/mm/copy_pages.c
+++ b/mm/copy_pages.c
@@ -10,7 +10,16 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/freezer.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#define NUM_AVAIL_DMA_CHAN 16
+
+int use_all_dma_chans = 0;
+int limit_dma_chans = NUM_AVAIL_DMA_CHAN;
+
+struct dma_chan *copy_chan[NUM_AVAIL_DMA_CHAN] = {0};
+struct dma_device *copy_dev[NUM_AVAIL_DMA_CHAN] = {0};
/*
* nr_copythreads can be the highest number of threads for given node
* on any architecture. The actual number of copy threads will be
@@ -279,3 +288,275 @@ int exchange_page_lists_mthread(struct page **to, struct page **from,
return err;
}
+
+#ifdef CONFIG_PROC_SYSCTL
+int sysctl_dma_page_migration(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int err = 0;
+ int use_all_dma_chans_prior_val = use_all_dma_chans;
+ dma_cap_mask_t copy_mask;
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (err < 0)
+ return err;
+ if (write) {
+ /* Grab all DMA channels */
+ if (use_all_dma_chans_prior_val == 0 && use_all_dma_chans == 1) {
+ int i;
+
+ dma_cap_zero(copy_mask);
+ dma_cap_set(DMA_MEMCPY, copy_mask);
+
+ dmaengine_get();
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (!copy_chan[i])
+ copy_chan[i] = dma_request_channel(copy_mask, NULL, NULL);
+ if (!copy_chan[i]) {
+ pr_err("%s: cannot grab channel: %d\n", __func__, i);
+ continue;
+ }
+
+ copy_dev[i] = copy_chan[i]->device;
+
+ if (!copy_dev[i]) {
+ pr_err("%s: no device: %d\n", __func__, i);
+ continue;
+ }
+ }
+
+ }
+ /* Release all DMA channels */
+ else if (use_all_dma_chans_prior_val == 1 && use_all_dma_chans == 0) {
+ int i;
+
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (copy_chan[i]) {
+ dma_release_channel(copy_chan[i]);
+ copy_chan[i] = NULL;
+ copy_dev[i] = NULL;
+ }
+ }
+
+ dmaengine_put();
+ }
+
+ if (err)
+ use_all_dma_chans = use_all_dma_chans_prior_val;
+ }
+ return err;
+}
+
+#endif
+
+static int copy_page_dma_once(struct page *to, struct page *from, int nr_pages)
+{
+ static struct dma_chan *copy_chan = NULL;
+ struct dma_device *device = NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags = 0;
+ struct dmaengine_unmap_data *unmap = NULL;
+ dma_cap_mask_t mask;
+ int ret_val = 0;
+
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
+
+ dmaengine_get();
+
+ copy_chan = dma_request_channel(mask, NULL, NULL);
+
+ if (!copy_chan) {
+ pr_err("%s: cannot get a channel\n", __func__);
+ ret_val = -1;
+ goto no_chan;
+ }
+
+ device = copy_chan->device;
+
+ if (!device) {
+ pr_err("%s: cannot get a device\n", __func__);
+ ret_val = -2;
+ goto release;
+ }
+
+ unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
+
+ if (!unmap) {
+ pr_err("%s: cannot get unmap data\n", __func__);
+ ret_val = -3;
+ goto release;
+ }
+
+ unmap->to_cnt = 1;
+ unmap->addr[0] = dma_map_page(device->dev, from, 0, PAGE_SIZE*nr_pages,
+ DMA_TO_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dma_map_page(device->dev, to, 0, PAGE_SIZE*nr_pages,
+ DMA_FROM_DEVICE);
+ unmap->len = PAGE_SIZE*nr_pages;
+
+ tx = device->device_prep_dma_memcpy(copy_chan,
+ unmap->addr[1],
+ unmap->addr[0], unmap->len,
+ flags);
+
+ if (!tx) {
+ pr_err("%s: null tx descriptor\n", __func__);
+ ret_val = -4;
+ goto unmap_dma;
+ }
+
+ cookie = tx->tx_submit(tx);
+
+ if (dma_submit_error(cookie)) {
+ pr_err("%s: submission error\n", __func__);
+ ret_val = -5;
+ goto unmap_dma;
+ }
+
+ if (dma_sync_wait(copy_chan, cookie) != DMA_COMPLETE) {
+ pr_err("%s: dma does not complete properly\n", __func__);
+ ret_val = -6;
+ }
+
+unmap_dma:
+ dmaengine_unmap_put(unmap);
+release:
+ if (copy_chan) {
+ dma_release_channel(copy_chan);
+ }
+no_chan:
+ dmaengine_put();
+
+ return ret_val;
+}
+
+static int copy_page_dma_always(struct page *to, struct page *from, int nr_pages)
+{
+ struct dma_async_tx_descriptor *tx[NUM_AVAIL_DMA_CHAN] = {0};
+ dma_cookie_t cookie[NUM_AVAIL_DMA_CHAN];
+ enum dma_ctrl_flags flags[NUM_AVAIL_DMA_CHAN] = {0};
+ struct dmaengine_unmap_data *unmap[NUM_AVAIL_DMA_CHAN] = {0};
+ int ret_val = 0;
+ int total_available_chans = NUM_AVAIL_DMA_CHAN;
+ int i;
+ size_t page_offset;
+
+ for (i = 0; i < NUM_AVAIL_DMA_CHAN; ++i) {
+ if (!copy_chan[i])
+ total_available_chans = i;
+ }
+ if (total_available_chans != NUM_AVAIL_DMA_CHAN) {
+ pr_err("%d channels are missing", NUM_AVAIL_DMA_CHAN - total_available_chans);
+ }
+
+ total_available_chans = min_t(int, total_available_chans, limit_dma_chans);
+
+ /* round down to closest 2^x value */
+ total_available_chans = 1<<ilog2(total_available_chans);
+
+ if ((nr_pages != 1) && (nr_pages % total_available_chans != 0))
+ return -EFAULT;
+
+ for (i = 0; i < total_available_chans; ++i) {
+ unmap[i] = dmaengine_get_unmap_data(copy_dev[i]->dev, 2, GFP_NOWAIT);
+ if (!unmap[i]) {
+ pr_err("%s: no unmap data at chan %d\n", __func__, i);
+ ret_val = -EFAULT;
+ goto unmap_dma;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (nr_pages == 1) {
+ page_offset = PAGE_SIZE / total_available_chans;
+
+ unmap[i]->to_cnt = 1;
+ unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev, from, page_offset*i,
+ page_offset,
+ DMA_TO_DEVICE);
+ unmap[i]->from_cnt = 1;
+ unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev, to, page_offset*i,
+ page_offset,
+ DMA_FROM_DEVICE);
+ unmap[i]->len = page_offset;
+ } else {
+ page_offset = nr_pages / total_available_chans;
+
+ unmap[i]->to_cnt = 1;
+ unmap[i]->addr[0] = dma_map_page(copy_dev[i]->dev,
+ from + page_offset*i,
+ 0,
+ PAGE_SIZE*page_offset,
+ DMA_TO_DEVICE);
+ unmap[i]->from_cnt = 1;
+ unmap[i]->addr[1] = dma_map_page(copy_dev[i]->dev,
+ to + page_offset*i,
+ 0,
+ PAGE_SIZE*page_offset,
+ DMA_FROM_DEVICE);
+ unmap[i]->len = PAGE_SIZE*page_offset;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ tx[i] = copy_dev[i]->device_prep_dma_memcpy(copy_chan[i],
+ unmap[i]->addr[1],
+ unmap[i]->addr[0],
+ unmap[i]->len,
+ flags[i]);
+ if (!tx[i]) {
+ pr_err("%s: no tx descriptor at chan %d\n", __func__, i);
+ ret_val = -EFAULT;
+ goto unmap_dma;
+ }
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ cookie[i] = tx[i]->tx_submit(tx[i]);
+
+ if (dma_submit_error(cookie[i])) {
+ pr_err("%s: submission error at chan %d\n", __func__, i);
+ ret_val = -EFAULT;
+ goto unmap_dma;
+ }
+
+ dma_async_issue_pending(copy_chan[i]);
+ }
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (dma_sync_wait(copy_chan[i], cookie[i]) != DMA_COMPLETE) {
+ ret_val = -EFAULT;
+ pr_err("%s: dma does not complete at chan %d\n", __func__, i);
+ }
+ }
+
+unmap_dma:
+
+ for (i = 0; i < total_available_chans; ++i) {
+ if (unmap[i])
+ dmaengine_unmap_put(unmap[i]);
+ }
+
+ return ret_val;
+}
+
+int copy_page_dma(struct page *to, struct page *from, int nr_pages)
+{
+ BUG_ON(hpage_nr_pages(from) != nr_pages);
+ BUG_ON(hpage_nr_pages(to) != nr_pages);
+
+ if (!use_all_dma_chans) {
+ return copy_page_dma_once(to, from, nr_pages);
+ }
+
+ return copy_page_dma_always(to, from, nr_pages);
+}
--
2.11.0
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-02-17 15:06 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-17 15:05 [RFC PATCH 00/14] Accelerating page migrations Zi Yan
2017-02-17 15:05 ` [RFC PATCH 01/14] mm/migrate: Add new mode parameter to migrate_page_copy() function Zi Yan
2017-02-17 15:05 ` [RFC PATCH 02/14] mm/migrate: Make migrate_mode types non-exclussive Zi Yan
2017-02-17 15:05 ` [RFC PATCH 03/14] mm/migrate: Add copy_pages_mthread function Zi Yan
2017-02-23 6:06 ` Naoya Horiguchi
2017-02-23 7:50 ` Anshuman Khandual
2017-02-23 8:02 ` Naoya Horiguchi
2017-03-09 5:35 ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 04/14] mm/migrate: Add new migrate mode MIGRATE_MT Zi Yan
2017-02-23 6:54 ` Naoya Horiguchi
2017-02-23 7:54 ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 05/14] mm/migrate: Add new migration flag MPOL_MF_MOVE_MT for syscalls Zi Yan
2017-02-17 15:05 ` [RFC PATCH 06/14] sysctl: Add global tunable mt_page_copy Zi Yan
2017-02-17 15:05 ` [RFC PATCH 07/14] migrate: Add copy_page_lists_mthread() function Zi Yan
2017-02-23 8:54 ` Naoya Horiguchi
2017-03-09 13:02 ` Anshuman Khandual
2017-02-17 15:05 ` [RFC PATCH 08/14] mm: migrate: Add concurrent page migration into move_pages syscall Zi Yan
2017-02-24 8:25 ` Naoya Horiguchi
2017-02-24 15:05 ` Zi Yan
2017-02-17 15:05 ` [RFC PATCH 09/14] mm: migrate: Add exchange_page_mthread() and exchange_page_lists_mthread() to exchange two pages or two page lists Zi Yan
2017-02-17 15:05 ` [RFC PATCH 10/14] mm: Add exchange_pages and exchange_pages_concur functions to exchange two lists of pages instead of two migrate_pages() Zi Yan
2017-02-17 15:05 ` [RFC PATCH 11/14] mm: migrate: Add exchange_pages syscall to exchange two page lists Zi Yan
2017-02-17 15:05 ` Zi Yan [this message]
2017-02-17 15:05 ` [RFC PATCH 13/14] mm: migrate: Add copy_page_dma into migrate_page_copy Zi Yan
2017-02-17 15:05 ` [RFC PATCH 14/14] mm: Add copy_page_lists_dma_always to support copy a list of pages Zi Yan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170217150551.117028-13-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=apopple@au1.ibm.com \
--cc=dnellans@nvidia.com \
--cc=khandual@linux.vnet.ibm.com \
--cc=linux-mm@kvack.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=zi.yan@cs.rutgers.edu \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox