linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Kairui Song <ryncsn@gmail.com>
To: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Kairui Song <kasong@tencent.com>
Subject: [RFC PATCH 2/7] mm: move check_mm to memory.c
Date: Fri, 29 Jul 2022 04:45:06 +0800	[thread overview]
Message-ID: <20220728204511.56348-3-ryncsn@gmail.com> (raw)
In-Reply-To: <20220728204511.56348-1-ryncsn@gmail.com>

From: Kairui Song <kasong@tencent.com>

No function change, make it possible to do extra mm operation on mm exit,
prepare for following commits.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 include/linux/mm.h |  3 +++
 kernel/fork.c      | 33 ---------------------------------
 mm/memory.c        | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 35 insertions(+), 33 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6346f7e77dc7..81ad91621078 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1993,6 +1993,9 @@ static inline bool get_user_page_fast_only(unsigned long addr,
 {
 	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
 }
+
+void check_mm(struct mm_struct *mm);
+
 /*
  * per-process(per-mm_struct) statistics.
  */
diff --git a/kernel/fork.c b/kernel/fork.c
index c090ebd55063..86a239772208 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -128,15 +128,6 @@ int nr_threads;			/* The idle threads do not count.. */
 
 static int max_threads;		/* tunable limit on nr_threads */
 
-#define NAMED_ARRAY_INDEX(x)	[x] = __stringify(x)
-
-static const char * const resident_page_types[] = {
-	NAMED_ARRAY_INDEX(MM_FILEPAGES),
-	NAMED_ARRAY_INDEX(MM_ANONPAGES),
-	NAMED_ARRAY_INDEX(MM_SWAPENTS),
-	NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
-};
-
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
@@ -748,30 +739,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 #define mm_free_pgd(mm)
 #endif /* CONFIG_MMU */
 
-static void check_mm(struct mm_struct *mm)
-{
-	int i;
-
-	BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
-			 "Please make sure 'struct resident_page_types[]' is updated as well");
-
-	for (i = 0; i < NR_MM_COUNTERS; i++) {
-		long x = atomic_long_read(&mm->rss_stat.count[i]);
-
-		if (unlikely(x))
-			pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
-				 mm, resident_page_types[i], x);
-	}
-
-	if (mm_pgtables_bytes(mm))
-		pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
-				mm_pgtables_bytes(mm));
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
-	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
-#endif
-}
-
 #define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
 #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
 
diff --git a/mm/memory.c b/mm/memory.c
index 6bf7826e666b..c0597214f9b3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -179,6 +179,38 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
 
+#define NAMED_ARRAY_INDEX(x)	[x] = __stringify(x)
+static const char * const resident_page_types[] = {
+	NAMED_ARRAY_INDEX(MM_FILEPAGES),
+	NAMED_ARRAY_INDEX(MM_ANONPAGES),
+	NAMED_ARRAY_INDEX(MM_SWAPENTS),
+	NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
+};
+
+void check_mm(struct mm_struct *mm)
+{
+	int i;
+
+	BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
+			 "Please make sure 'struct resident_page_types[]' is updated as well");
+
+	for (i = 0; i < NR_MM_COUNTERS; i++) {
+		long x = atomic_long_read(&mm->rss_stat.count[i]);
+
+		if (unlikely(x))
+			pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
+				 mm, resident_page_types[i], x);
+	}
+
+	if (mm_pgtables_bytes(mm))
+		pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
+				mm_pgtables_bytes(mm));
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
+#endif
+}
+
 /*
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
-- 
2.35.2



  parent reply	other threads:[~2022-07-28 20:45 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-28 20:45 [RFC PATCH 0/7] Replace per-task RSS cache with per-CPU RSS cache Kairui Song
2022-07-28 20:45 ` [RFC PATCH 1/7] mm: remove the per-task RSS counter cache Kairui Song
2022-07-28 20:45 ` Kairui Song [this message]
2022-07-28 20:45 ` [RFC PATCH 3/7] mm/headers: change emun order of MM_COUNTERS Kairui Song
2022-07-28 20:45 ` [RFC PATCH 4/7] mm: introduce a generic per-CPU RSS cache Kairui Song
2022-07-28 20:45 ` [RFC PATCH 5/7] mm: try use fast path for pmd setting as well Kairui Song
2022-07-28 20:45 ` [RFC PATCH 6/7] mm: introduce CONFIG_ARCH_PCP_RSS_USE_CPUMASK Kairui Song
2022-07-28 20:45 ` [RFC PATCH 7/7] x86_64/tlb, mm: enable cpumask optimzation for RSS cache Kairui Song

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220728204511.56348-3-ryncsn@gmail.com \
    --to=ryncsn@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=kasong@tencent.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox