From: SeongJae Park <sjpark@amazon.com>
To: <akpm@linux-foundation.org>
Cc: SeongJae Park <sjpark@amazon.de>, <Jonathan.Cameron@Huawei.com>,
<aarcange@redhat.com>, <acme@kernel.org>,
<alexander.shishkin@linux.intel.com>, <amit@kernel.org>,
<benh@kernel.crashing.org>, <brendan.d.gregg@gmail.com>,
<brendanhiggins@google.com>, <cai@lca.pw>,
<colin.king@canonical.com>, <corbet@lwn.net>, <david@redhat.com>,
<dwmw@amazon.com>, <elver@google.com>, <fan.du@intel.com>,
<foersleo@amazon.de>, <gthelen@google.com>, <irogers@google.com>,
<jolsa@redhat.com>, <kirill@shutemov.name>,
<mark.rutland@arm.com>, <mgorman@suse.de>, <minchan@kernel.org>,
<mingo@redhat.com>, <namhyung@kernel.org>, <peterz@infradead.org>,
<rdunlap@infradead.org>, <riel@surriel.com>,
<rientjes@google.com>, <rostedt@goodmis.org>, <rppt@kernel.org>,
<sblbir@amazon.com>, <shakeelb@google.com>, <shuah@kernel.org>,
<sj38.park@gmail.com>, <snu@amazon.de>, <vbabka@suse.cz>,
<vdavydov.dev@gmail.com>, <yang.shi@linux.alibaba.com>,
<ying.huang@intel.com>, <zgf574564920@gmail.com>,
<linux-damon@amazon.com>, <linux-mm@kvack.org>,
<linux-doc@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Subject: [RFC v10 06/13] mm/damon/vaddr: Separate commonly usable functions
Date: Wed, 16 Dec 2020 10:42:14 +0100 [thread overview]
Message-ID: <20201216094221.11898-7-sjpark@amazon.com> (raw)
In-Reply-To: <20201216094221.11898-1-sjpark@amazon.com>
From: SeongJae Park <sjpark@amazon.de>
This commit moves functions in the default virtual address spaces
monitoring primitives that commonly usable from other address spaces
like physical address space into a header file. Those will be reused by
the physical address space monitoring primitives in the following
commit.
Signed-off-by: SeongJae Park <sjpark@amazon.de>
---
mm/damon/Makefile | 2 +-
mm/damon/prmtv-common.c | 104 ++++++++++++++++++++++++++++++++++++++
mm/damon/prmtv-common.h | 21 ++++++++
mm/damon/vaddr.c | 108 +---------------------------------------
4 files changed, 128 insertions(+), 107 deletions(-)
create mode 100644 mm/damon/prmtv-common.c
create mode 100644 mm/damon/prmtv-common.h
diff --git a/mm/damon/Makefile b/mm/damon/Makefile
index fed4be3bace3..99b1bfe01ff5 100644
--- a/mm/damon/Makefile
+++ b/mm/damon/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DAMON) := core.o
-obj-$(CONFIG_DAMON_VADDR) += vaddr.o
+obj-$(CONFIG_DAMON_VADDR) += prmtv-common.o vaddr.o
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
diff --git a/mm/damon/prmtv-common.c b/mm/damon/prmtv-common.c
new file mode 100644
index 000000000000..6cdb96cbc9ef
--- /dev/null
+++ b/mm/damon/prmtv-common.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common Primitives for Data Access Monitoring
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#include "prmtv-common.h"
+
+static void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm,
+ unsigned long addr)
+{
+ bool referenced = false;
+ struct page *page = pte_page(*pte);
+
+ if (pte_young(*pte)) {
+ referenced = true;
+ *pte = pte_mkold(*pte);
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
+ if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
+ referenced = true;
+#endif /* CONFIG_MMU_NOTIFIER */
+
+ if (referenced)
+ set_page_young(page);
+
+ set_page_idle(page);
+}
+
+static void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm,
+ unsigned long addr)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool referenced = false;
+ struct page *page = pmd_page(*pmd);
+
+ if (pmd_young(*pmd)) {
+ referenced = true;
+ *pmd = pmd_mkold(*pmd);
+ }
+
+#ifdef CONFIG_MMU_NOTIFIER
+ if (mmu_notifier_clear_young(mm, addr,
+ addr + ((1UL) << HPAGE_PMD_SHIFT)))
+ referenced = true;
+#endif /* CONFIG_MMU_NOTIFIER */
+
+ if (referenced)
+ set_page_young(page);
+
+ set_page_idle(page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+}
+
+void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *pte = NULL;
+ pmd_t *pmd = NULL;
+ spinlock_t *ptl;
+
+ if (follow_pte_pmd(mm, addr, NULL, &pte, &pmd, &ptl))
+ return;
+
+ if (pte) {
+ damon_ptep_mkold(pte, mm, addr);
+ pte_unmap_unlock(pte, ptl);
+ } else {
+ damon_pmdp_mkold(pmd, mm, addr);
+ spin_unlock(ptl);
+ }
+}
+
+bool damon_va_young(struct mm_struct *mm, unsigned long addr,
+ unsigned long *page_sz)
+{
+ pte_t *pte = NULL;
+ pmd_t *pmd = NULL;
+ spinlock_t *ptl;
+ bool young = false;
+
+ if (follow_pte_pmd(mm, addr, NULL, &pte, &pmd, &ptl))
+ return false;
+
+ *page_sz = PAGE_SIZE;
+ if (pte) {
+ young = pte_young(*pte);
+ if (!young)
+ young = !page_is_idle(pte_page(*pte));
+ pte_unmap_unlock(pte, ptl);
+ return young;
+ }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ young = pmd_young(*pmd);
+ if (!young)
+ young = !page_is_idle(pmd_page(*pmd));
+ spin_unlock(ptl);
+ *page_sz = ((1UL) << HPAGE_PMD_SHIFT);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ return young;
+}
diff --git a/mm/damon/prmtv-common.h b/mm/damon/prmtv-common.h
new file mode 100644
index 000000000000..a66a6139b4fc
--- /dev/null
+++ b/mm/damon/prmtv-common.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Primitives for Data Access Monitoring
+ *
+ * Author: SeongJae Park <sjpark@amazon.de>
+ */
+
+#include <linux/damon.h>
+#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
+#include <linux/page_idle.h>
+#include <linux/random.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+
+/* Get a random number in [l, r) */
+#define damon_rand(l, r) (l + prandom_u32_max(r - l))
+
+void damon_va_mkold(struct mm_struct *mm, unsigned long addr);
+bool damon_va_young(struct mm_struct *mm, unsigned long addr,
+ unsigned long *page_sz);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 2075f07f728b..915b12329c6e 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -8,22 +8,14 @@
#define pr_fmt(fmt) "damon-va: " fmt
#include <asm-generic/mman-common.h>
-#include <linux/damon.h>
-#include <linux/mm.h>
-#include <linux/mmu_notifier.h>
-#include <linux/page_idle.h>
-#include <linux/random.h>
-#include <linux/sched/mm.h>
-#include <linux/slab.h>
+
+#include "prmtv-common.h"
#ifdef CONFIG_DAMON_VADDR_KUNIT_TEST
#undef DAMON_MIN_REGION
#define DAMON_MIN_REGION 1
#endif
-/* Get a random number in [l, r) */
-#define damon_rand(l, r) (l + prandom_u32_max(r - l))
-
/*
* 't->id' should be the pointer to the relevant 'struct pid' having reference
* count. Caller must put the returned task, unless it is NULL.
@@ -370,71 +362,6 @@ void damon_va_update_regions(struct damon_ctx *ctx)
}
}
-static void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm,
- unsigned long addr)
-{
- bool referenced = false;
- struct page *page = pte_page(*pte);
-
- if (pte_young(*pte)) {
- referenced = true;
- *pte = pte_mkold(*pte);
- }
-
-#ifdef CONFIG_MMU_NOTIFIER
- if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE))
- referenced = true;
-#endif /* CONFIG_MMU_NOTIFIER */
-
- if (referenced)
- set_page_young(page);
-
- set_page_idle(page);
-}
-
-static void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm,
- unsigned long addr)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- bool referenced = false;
- struct page *page = pmd_page(*pmd);
-
- if (pmd_young(*pmd)) {
- referenced = true;
- *pmd = pmd_mkold(*pmd);
- }
-
-#ifdef CONFIG_MMU_NOTIFIER
- if (mmu_notifier_clear_young(mm, addr,
- addr + ((1UL) << HPAGE_PMD_SHIFT)))
- referenced = true;
-#endif /* CONFIG_MMU_NOTIFIER */
-
- if (referenced)
- set_page_young(page);
-
- set_page_idle(page);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-}
-
-static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
-{
- pte_t *pte = NULL;
- pmd_t *pmd = NULL;
- spinlock_t *ptl;
-
- if (follow_pte_pmd(mm, addr, NULL, &pte, &pmd, &ptl))
- return;
-
- if (pte) {
- damon_ptep_mkold(pte, mm, addr);
- pte_unmap_unlock(pte, ptl);
- } else {
- damon_pmdp_mkold(pmd, mm, addr);
- spin_unlock(ptl);
- }
-}
-
/*
* Functions for the access checking of the regions
*/
@@ -463,37 +390,6 @@ void damon_va_prepare_access_checks(struct damon_ctx *ctx)
}
}
-static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
- unsigned long *page_sz)
-{
- pte_t *pte = NULL;
- pmd_t *pmd = NULL;
- spinlock_t *ptl;
- bool young = false;
-
- if (follow_pte_pmd(mm, addr, NULL, &pte, &pmd, &ptl))
- return false;
-
- *page_sz = PAGE_SIZE;
- if (pte) {
- young = pte_young(*pte);
- if (!young)
- young = !page_is_idle(pte_page(*pte));
- pte_unmap_unlock(pte, ptl);
- return young;
- }
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- young = pmd_young(*pmd);
- if (!young)
- young = !page_is_idle(pmd_page(*pmd));
- spin_unlock(ptl);
- *page_sz = ((1UL) << HPAGE_PMD_SHIFT);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
- return young;
-}
-
/*
* Check whether the region was accessed after the last preparation
*
--
2.17.1
next prev parent reply other threads:[~2020-12-16 9:45 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-16 9:42 [RFC v10 00/13] DAMON: Support Physical Memory Address Space and Page-granularity Idleness Monitoring SeongJae Park
2020-12-16 9:42 ` [RFC v10 01/13] damon/dbgfs: Allow users to set initial monitoring target regions SeongJae Park
2021-01-19 18:41 ` SeongJae Park
2020-12-16 9:42 ` [RFC v10 02/13] tools/damon: Support init target regions specification SeongJae Park
2020-12-16 9:42 ` [RFC v10 03/13] damon/dbgfs-test: Add a unit test case for 'init_regions' SeongJae Park
2020-12-16 9:42 ` [RFC v10 04/13] selftests/damon/_chk_record: Do not check number of gaps SeongJae Park
2020-12-16 9:42 ` [RFC v10 05/13] Docs/admin-guide/mm/damon: Document 'init_regions' feature SeongJae Park
2020-12-16 9:42 ` SeongJae Park [this message]
2020-12-16 9:42 ` [RFC v10 07/13] mm/damon: Implement primitives for physical address space monitoring SeongJae Park
2020-12-16 9:42 ` [RFC v10 08/13] damon/dbgfs: Support physical memory monitoring SeongJae Park
2020-12-16 9:42 ` [RFC v10 09/13] tools/damon/record: " SeongJae Park
2020-12-16 9:42 ` [RFC v10 10/13] tools/damon/record: Support NUMA specific recording SeongJae Park
2020-12-16 9:42 ` [RFC v10 11/13] Docs/DAMON: Document physical memory monitoring support SeongJae Park
2020-12-16 9:42 ` [RFC v10 12/13] mm/damon/paddr: Separate commonly usable functions SeongJae Park
2020-12-16 9:42 ` [RFC v10 13/13] mm/damon: Implement primitives for page granularity idleness monitoring SeongJae Park
2021-05-05 11:26 ` DAMON-based Proactive Reclamation for The Physical Address Space SeongJae Park
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201216094221.11898-7-sjpark@amazon.com \
--to=sjpark@amazon.com \
--cc=Jonathan.Cameron@Huawei.com \
--cc=aarcange@redhat.com \
--cc=acme@kernel.org \
--cc=akpm@linux-foundation.org \
--cc=alexander.shishkin@linux.intel.com \
--cc=amit@kernel.org \
--cc=benh@kernel.crashing.org \
--cc=brendan.d.gregg@gmail.com \
--cc=brendanhiggins@google.com \
--cc=cai@lca.pw \
--cc=colin.king@canonical.com \
--cc=corbet@lwn.net \
--cc=david@redhat.com \
--cc=dwmw@amazon.com \
--cc=elver@google.com \
--cc=fan.du@intel.com \
--cc=foersleo@amazon.de \
--cc=gthelen@google.com \
--cc=irogers@google.com \
--cc=jolsa@redhat.com \
--cc=kirill@shutemov.name \
--cc=linux-damon@amazon.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mark.rutland@arm.com \
--cc=mgorman@suse.de \
--cc=minchan@kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=peterz@infradead.org \
--cc=rdunlap@infradead.org \
--cc=riel@surriel.com \
--cc=rientjes@google.com \
--cc=rostedt@goodmis.org \
--cc=rppt@kernel.org \
--cc=sblbir@amazon.com \
--cc=shakeelb@google.com \
--cc=shuah@kernel.org \
--cc=sj38.park@gmail.com \
--cc=sjpark@amazon.de \
--cc=snu@amazon.de \
--cc=vbabka@suse.cz \
--cc=vdavydov.dev@gmail.com \
--cc=yang.shi@linux.alibaba.com \
--cc=ying.huang@intel.com \
--cc=zgf574564920@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox