From: Shivank Garg <shivankg@amd.com>
To: <akpm@linux-foundation.org>, <linux-kernel@vger.kernel.org>,
<linux-mm@kvack.org>
Cc: <bharata@amd.com>, <raghavendra.kodsarathimmappa@amd.com>,
<Michael.Day@amd.com>, <dmaengine@vger.kernel.org>,
<vkoul@kernel.org>, <shivankg@amd.com>,
Mike Day <michael.day@amd.com>
Subject: [RFC PATCH 4/5] mm: add support for DMA folio Migration
Date: Sat, 15 Jun 2024 03:45:24 +0530 [thread overview]
Message-ID: <20240614221525.19170-5-shivankg@amd.com> (raw)
In-Reply-To: <20240614221525.19170-1-shivankg@amd.com>
From: Mike Day <michael.day@amd.com>
DMA drivers should implement following functions to enable folio migration
offloading:
migrate_dma() - This function takes src and dst folios list undergoing
migration. It is responsible for transfer of page content between the
src and dst folios.
can_migrate_dma() - It performs necessary checks if DMA-migration is
supported for the give src and dst folios.
DMA driver should include a mechanism to call start_offloading and
stop_offloading for enabling and disabling migration offload respectively.
Signed-off-by: Mike Day <michael.day@amd.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
include/linux/migrate_dma.h | 36 ++++++++++++++++++++++++++
mm/Kconfig | 8 ++++++
mm/Makefile | 1 +
mm/migrate.c | 40 +++++++++++++++++++++++++++--
mm/migrate_dma.c | 51 +++++++++++++++++++++++++++++++++++++
5 files changed, 134 insertions(+), 2 deletions(-)
create mode 100644 include/linux/migrate_dma.h
create mode 100644 mm/migrate_dma.c
diff --git a/include/linux/migrate_dma.h b/include/linux/migrate_dma.h
new file mode 100644
index 000000000000..307b234450c3
--- /dev/null
+++ b/include/linux/migrate_dma.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MIGRATE_DMA_H
+#define _MIGRATE_DMA_H
+#include <linux/migrate_mode.h>
+
+#define MIGRATOR_NAME_LEN 32
+struct migrator {
+ char name[MIGRATOR_NAME_LEN];
+ void (*migrate_dma)(struct list_head *dst_list, struct list_head *src_list);
+ bool (*can_migrate_dma)(struct folio *dst, struct folio *src);
+ struct rcu_head srcu_head;
+ struct module *owner;
+};
+
+extern struct migrator migrator;
+extern struct mutex migrator_mut;
+extern struct srcu_struct mig_srcu;
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head);
+void dma_update_migrator(struct migrator *mig);
+unsigned char *get_active_migrator_name(void);
+bool can_dma_migrate(struct folio *dst, struct folio *src);
+void start_offloading(struct migrator *migrator);
+void stop_offloading(void);
+#else
+static inline void srcu_mig_cb(struct rcu_head *head) { };
+static inline void dma_update_migrator(struct migrator *mig) { };
+static inline unsigned char *get_active_migrator_name(void) { return NULL; };
+static inline bool can_dma_migrate(struct folio *dst, struct folio *src) {return true; };
+static inline void start_offloading(struct migrator *migrator) { };
+static inline void stop_offloading(void) { };
+#endif /* CONFIG_DMA_MIGRATION */
+
+#endif /* _MIGRATE_DMA_H */
diff --git a/mm/Kconfig b/mm/Kconfig
index ffc3a2ba3a8c..e3ff6583fedb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -662,6 +662,14 @@ config MIGRATION
config DEVICE_MIGRATION
def_bool MIGRATION && ZONE_DEVICE
+config DMA_MIGRATION
+ bool "Migrate Pages offloading copy to DMA"
+ def_bool n
+ depends on MIGRATION
+ help
+ An interface allowing external modules or driver to offload
+ page copying in page migration.
+
config ARCH_ENABLE_HUGEPAGE_MIGRATION
bool
diff --git a/mm/Makefile b/mm/Makefile
index e4b5b75aaec9..1e31fb79d700 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o
obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_DMA_MIGRATION) += migrate_dma.o
obj-$(CONFIG_NUMA) += memory-tiers.o
obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
diff --git a/mm/migrate.c b/mm/migrate.c
index fce69a494742..db826e3862a1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -50,6 +50,7 @@
#include <linux/random.h>
#include <linux/sched/sysctl.h>
#include <linux/memory-tiers.h>
+#include <linux/migrate_dma.h>
#include <asm/tlbflush.h>
@@ -656,6 +657,37 @@ void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
}
EXPORT_SYMBOL(folio_migrate_copy);
+DEFINE_STATIC_CALL(_folios_copy, folios_copy);
+DEFINE_STATIC_CALL(_can_dma_migrate, can_dma_migrate);
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head)
+{
+ static_call_query(_folios_copy);
+}
+
+void dma_update_migrator(struct migrator *mig)
+{
+ int index;
+
+ mutex_lock(&migrator_mut);
+ index = srcu_read_lock(&mig_srcu);
+ strscpy(migrator.name, mig ? mig->name : "kernel", MIGRATOR_NAME_LEN);
+ static_call_update(_folios_copy, mig ? mig->migrate_dma : folios_copy);
+ static_call_update(_can_dma_migrate, mig ? mig->can_migrate_dma : can_dma_migrate);
+ if (READ_ONCE(migrator.owner))
+ module_put(migrator.owner);
+ xchg(&migrator.owner, mig ? mig->owner : NULL);
+ if (READ_ONCE(migrator.owner))
+ try_module_get(migrator.owner);
+ srcu_read_unlock(&mig_srcu, index);
+ mutex_unlock(&migrator_mut);
+ call_srcu(&mig_srcu, &migrator.srcu_head, srcu_mig_cb);
+ srcu_barrier(&mig_srcu);
+}
+
+#endif /* CONFIG_DMA_MIGRATION */
+
/************************************************************
* Migration functions
***********************************************************/
@@ -1686,6 +1718,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
struct anon_vma *anon_vma = NULL;
bool is_lru;
int is_thp = 0;
+ bool can_migrate = true;
struct migrate_folio_info *mig_info, *mig_info2;
LIST_HEAD(temp_src_folios);
LIST_HEAD(temp_dst_folios);
@@ -1720,7 +1753,10 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
* This does everything except the page copy. The actual page copy
* is handled later in a batch manner.
*/
- if (likely(is_lru)) {
+ can_migrate = static_call(_can_dma_migrate)(dst, folio);
+ if (unlikely(!can_migrate))
+ rc = -EAGAIN;
+ else if (likely(is_lru)) {
struct address_space *mapping = folio_mapping(folio);
if (!mapping)
@@ -1786,7 +1822,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
goto out;
/* Batch copy the folios */
- folios_copy(dst_folios, src_folios);
+ static_call(_folios_copy)(dst_folios, src_folios);
/*
* Iterate the folio lists to remove migration pte and restore them
diff --git a/mm/migrate_dma.c b/mm/migrate_dma.c
new file mode 100644
index 000000000000..c8b078fdff17
--- /dev/null
+++ b/mm/migrate_dma.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/migrate.h>
+#include <linux/migrate_dma.h>
+#include <linux/rculist.h>
+#include <linux/static_call.h>
+
+atomic_t dispatch_to_dma = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(dispatch_to_dma);
+
+DEFINE_MUTEX(migrator_mut);
+DEFINE_SRCU(mig_srcu);
+
+struct migrator migrator = {
+ .name = "kernel",
+ .migrate_dma = folios_copy,
+ .can_migrate_dma = can_dma_migrate,
+ .srcu_head.func = srcu_mig_cb,
+ .owner = NULL,
+};
+
+bool can_dma_migrate(struct folio *dst, struct folio *src)
+{
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dma_migrate);
+
+void start_offloading(struct migrator *m)
+{
+ int offloading = 0;
+
+ pr_info("starting migration offload by %s\n", m->name);
+ dma_update_migrator(m);
+ atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 1);
+}
+EXPORT_SYMBOL_GPL(start_offloading);
+
+void stop_offloading(void)
+{
+ int offloading = 1;
+
+ pr_info("stopping migration offload by %s\n", migrator.name);
+ dma_update_migrator(NULL);
+ atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 0);
+}
+EXPORT_SYMBOL_GPL(stop_offloading);
+
+unsigned char *get_active_migrator_name(void)
+{
+ return migrator.name;
+}
+EXPORT_SYMBOL_GPL(get_active_migrator_name);
--
2.34.1
next prev parent reply other threads:[~2024-06-14 22:21 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-06-14 22:15 [RFC PATCH 0/5] Enhancements to Page Migration with Batch Offloading via DMA Shivank Garg
2024-06-14 22:15 ` [RFC PATCH 1/5] mm: separate move/undo doing on folio list from migrate_pages_batch() Shivank Garg
2024-06-14 22:15 ` [RFC PATCH 2/5] mm: add folios_copy() for copying pages in batch during migration Shivank Garg
2024-06-14 22:15 ` [RFC PATCH 3/5] mm: add migrate_folios_batch_move to batch the folio move operations Shivank Garg
2024-06-14 22:15 ` Shivank Garg [this message]
2024-06-14 22:15 ` [RFC PATCH 5/5] dcbm: add dma core batch migrator for batch page offloading Shivank Garg
2024-06-15 4:02 ` [RFC PATCH 0/5] Enhancements to Page Migration with Batch Offloading via DMA Matthew Wilcox
2024-06-17 11:40 ` Garg, Shivank
2024-06-25 8:57 ` Garg, Shivank
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240614221525.19170-5-shivankg@amd.com \
--to=shivankg@amd.com \
--cc=Michael.Day@amd.com \
--cc=akpm@linux-foundation.org \
--cc=bharata@amd.com \
--cc=dmaengine@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=raghavendra.kodsarathimmappa@amd.com \
--cc=vkoul@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox