linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Wale Zhang <wale.zhang.ftd@gmail.com>
To: akpm@linux-foundation.org, chrisl@kernel.org, ziy@nvidia.com
Cc: lorenzo.stoakes@oracle.com, david@kernel.org, baohua@kernel.org,
	matthew.brost@intel.com, linux-mm@kvack.org,
	Wale Zhang <wale.zhang.ftd@gmail.com>
Subject: [PATCH v2] mm/swapops,rmap: remove should-never-be-compiled codes.
Date: Tue, 30 Dec 2025 21:01:10 +0800	[thread overview]
Message-ID: <20251230130110.1366374-1-wale.zhang.ftd@gmail.com> (raw)

mm/swapops,rmap: remove should-never-be-compiled codes, included
folio_add_return_large_mapcount(), folio_sub_return_large_mapcount(),
set_pmd_migration_entry() and remove_migration_pmd().

Link: https://lore.kernel.org/linux-mm/CAHrEdeunY-YpDC7AoTFcppAvHCJpEJRp=GTQ4psRKRi_3fhB0Q@mail.gmail.com/

Signed-off-by: Wale Zhang <wale.zhang.ftd@gmail.com>
---
 include/linux/rmap.h    | 17 ++-----
 include/linux/swapops.h | 12 -----
 mm/migrate_device.c     |  5 ++-
 mm/rmap.c               | 98 ++++++++++++++++++++---------------------
 4 files changed, 54 insertions(+), 78 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index daa92a58585d..44dccd1821eb 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -354,33 +354,24 @@ static inline void folio_add_large_mapcount(struct folio *folio,
 	atomic_add(diff, &folio->_large_mapcount);
 }
 
-static inline int folio_add_return_large_mapcount(struct folio *folio,
-		int diff, struct vm_area_struct *vma)
-{
-	BUILD_BUG();
-}
-
 static inline void folio_sub_large_mapcount(struct folio *folio,
 		int diff, struct vm_area_struct *vma)
 {
 	atomic_sub(diff, &folio->_large_mapcount);
 }
 
-static inline int folio_sub_return_large_mapcount(struct folio *folio,
-		int diff, struct vm_area_struct *vma)
-{
-	BUILD_BUG();
-}
 #endif /* CONFIG_MM_ID */
 
 #define folio_inc_large_mapcount(folio, vma) \
 	folio_add_large_mapcount(folio, 1, vma)
-#define folio_inc_return_large_mapcount(folio, vma) \
-	folio_add_return_large_mapcount(folio, 1, vma)
 #define folio_dec_large_mapcount(folio, vma) \
 	folio_sub_large_mapcount(folio, 1, vma)
+#ifdef CONFIG_NO_PAGE_MAPCOUNT
+#define folio_inc_return_large_mapcount(folio, vma) \
+	folio_add_return_large_mapcount(folio, 1, vma)
 #define folio_dec_return_large_mapcount(folio, vma) \
 	folio_sub_return_large_mapcount(folio, 1, vma)
+#endif
 
 /* RMAP flags, currently only relevant for some anon rmap operations. */
 typedef int __bitwise rmap_t;
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 8cfc966eae48..d6ca56efc489 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -339,18 +339,6 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
 }
 
 #else  /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
-		struct page *page)
-{
-	BUILD_BUG();
-}
-
-static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
-		struct page *new)
-{
-	BUILD_BUG();
-}
-
 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
 
 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 23379663b1e1..13b2cd12e612 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -195,8 +195,8 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
 		return migrate_vma_collect_skip(start, end, walk);
 	}
 
-	if (thp_migration_supported() &&
-		(migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+	if ((migrate->flags & MIGRATE_VMA_SELECT_COMPOUND) &&
 		(IS_ALIGNED(start, HPAGE_PMD_SIZE) &&
 		 IS_ALIGNED(end, HPAGE_PMD_SIZE))) {
 
@@ -228,6 +228,7 @@ static int migrate_vma_collect_huge_pmd(pmd_t *pmdp, unsigned long start,
 	}
 
 fallback:
+#endif
 	spin_unlock(ptl);
 	if (!folio_test_large(folio))
 		goto done;
diff --git a/mm/rmap.c b/mm/rmap.c
index f955f02d570e..81c7f2becc21 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1232,7 +1232,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
 		struct page *page, int nr_pages, struct vm_area_struct *vma,
 		enum pgtable_level level)
 {
-	atomic_t *mapped = &folio->_nr_pages_mapped;
+	__maybe_unused atomic_t *mapped = &folio->_nr_pages_mapped;
 	const int orig_nr_pages = nr_pages;
 	int first = 0, nr = 0, nr_pmdmapped = 0;
 
@@ -1245,16 +1245,14 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
 			break;
 		}
 
-		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
-			nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
-			if (nr == orig_nr_pages)
-				/* Was completely unmapped. */
-				nr = folio_large_nr_pages(folio);
-			else
-				nr = 0;
-			break;
-		}
-
+#ifdef CONFIG_NO_PAGE_MAPCOUNT
+		nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
+		if (nr == orig_nr_pages)
+			/* Was completely unmapped. */
+			nr = folio_large_nr_pages(folio);
+		else
+			nr = 0;
+#else
 		do {
 			first += atomic_inc_and_test(&page->_mapcount);
 		} while (page++, --nr_pages > 0);
@@ -1264,22 +1262,21 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
 			nr = first;
 
 		folio_add_large_mapcount(folio, orig_nr_pages, vma);
+#endif
 		break;
 	case PGTABLE_LEVEL_PMD:
 	case PGTABLE_LEVEL_PUD:
 		first = atomic_inc_and_test(&folio->_entire_mapcount);
-		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
-			if (level == PGTABLE_LEVEL_PMD && first)
-				nr_pmdmapped = folio_large_nr_pages(folio);
-			nr = folio_inc_return_large_mapcount(folio, vma);
-			if (nr == 1)
-				/* Was completely unmapped. */
-				nr = folio_large_nr_pages(folio);
-			else
-				nr = 0;
-			break;
-		}
-
+#ifdef CONFIG_NO_PAGE_MAPCOUNT
+		if (level == PGTABLE_LEVEL_PMD && first)
+			nr_pmdmapped = folio_large_nr_pages(folio);
+		nr = folio_inc_return_large_mapcount(folio, vma);
+		if (nr == 1)
+			/* Was completely unmapped. */
+			nr = folio_large_nr_pages(folio);
+		else
+			nr = 0;
+#else
 		if (first) {
 			nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped);
 			if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) {
@@ -1300,6 +1297,7 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
 			}
 		}
 		folio_inc_large_mapcount(folio, vma);
+#endif
 		break;
 	default:
 		BUILD_BUG();
@@ -1656,7 +1654,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
 		struct page *page, int nr_pages, struct vm_area_struct *vma,
 		enum pgtable_level level)
 {
-	atomic_t *mapped = &folio->_nr_pages_mapped;
+	__maybe_unused atomic_t *mapped = &folio->_nr_pages_mapped;
 	int last = 0, nr = 0, nr_pmdmapped = 0;
 	bool partially_mapped = false;
 
@@ -1669,19 +1667,17 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
 			break;
 		}
 
-		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
-			nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
-			if (!nr) {
-				/* Now completely unmapped. */
-				nr = folio_large_nr_pages(folio);
-			} else {
-				partially_mapped = nr < folio_large_nr_pages(folio) &&
-						   !folio_entire_mapcount(folio);
-				nr = 0;
-			}
-			break;
+#ifdef CONFIG_NO_PAGE_MAPCOUNT
+		nr = folio_sub_return_large_mapcount(folio, nr_pages, vma);
+		if (!nr) {
+			/* Now completely unmapped. */
+			nr = folio_large_nr_pages(folio);
+		} else {
+			partially_mapped = nr < folio_large_nr_pages(folio) &&
+				!folio_entire_mapcount(folio);
+			nr = 0;
 		}
-
+#else
 		folio_sub_large_mapcount(folio, nr_pages, vma);
 		do {
 			last += atomic_add_negative(-1, &page->_mapcount);
@@ -1692,25 +1688,24 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
 			nr = last;
 
 		partially_mapped = nr && atomic_read(mapped);
+#endif
 		break;
 	case PGTABLE_LEVEL_PMD:
 	case PGTABLE_LEVEL_PUD:
-		if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
-			last = atomic_add_negative(-1, &folio->_entire_mapcount);
-			if (level == PGTABLE_LEVEL_PMD && last)
-				nr_pmdmapped = folio_large_nr_pages(folio);
-			nr = folio_dec_return_large_mapcount(folio, vma);
-			if (!nr) {
-				/* Now completely unmapped. */
-				nr = folio_large_nr_pages(folio);
-			} else {
-				partially_mapped = last &&
-						   nr < folio_large_nr_pages(folio);
-				nr = 0;
-			}
-			break;
+#ifdef CONFIG_NO_PAGE_MAPCOUNT
+		last = atomic_add_negative(-1, &folio->_entire_mapcount);
+		if (level == PGTABLE_LEVEL_PMD && last)
+			nr_pmdmapped = folio_large_nr_pages(folio);
+		nr = folio_dec_return_large_mapcount(folio, vma);
+		if (!nr) {
+			/* Now completely unmapped. */
+			nr = folio_large_nr_pages(folio);
+		} else {
+			partially_mapped = last &&
+				nr < folio_large_nr_pages(folio);
+			nr = 0;
 		}
-
+#else
 		folio_dec_large_mapcount(folio, vma);
 		last = atomic_add_negative(-1, &folio->_entire_mapcount);
 		if (last) {
@@ -1730,6 +1725,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
 		}
 
 		partially_mapped = nr && nr < nr_pmdmapped;
+#endif
 		break;
 	default:
 		BUILD_BUG();
-- 
2.43.0



             reply	other threads:[~2025-12-30 13:01 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-30 13:01 Wale Zhang [this message]
2025-12-30 18:10 ` Andrew Morton
2025-12-30 19:50 ` David Hildenbrand (Red Hat)
2025-12-30 21:28   ` Barry Song
2025-12-30 21:35     ` Andrew Morton
2025-12-30 21:59       ` David Hildenbrand (Red Hat)
2025-12-31 11:58         ` wale zhang
2025-12-31 12:17           ` David Hildenbrand (Red Hat)
2025-12-31  9:30       ` wale zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251230130110.1366374-1-wale.zhang.ftd@gmail.com \
    --to=wale.zhang.ftd@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=baohua@kernel.org \
    --cc=chrisl@kernel.org \
    --cc=david@kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=matthew.brost@intel.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox