linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm/khugepaged: use map_anon_folio_pmd() in collapse_huge_page()
@ 2025-10-04  9:25 Wei Yang
  2025-10-04 14:26 ` Dev Jain
  2025-10-05  1:48 ` Matthew Wilcox
  0 siblings, 2 replies; 6+ messages in thread
From: Wei Yang @ 2025-10-04  9:25 UTC (permalink / raw)
  To: akpm, david, lorenzo.stoakes, ziy, baolin.wang, Liam.Howlett,
	npache, ryan.roberts, dev.jain, baohua, lance.yang
  Cc: linux-mm, Wei Yang

Unify the process to install pmd folio.

No functional change is intended.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 include/linux/huge_mm.h |  6 ++++++
 mm/huge_memory.c        | 18 ++++++++++--------
 mm/khugepaged.c         |  9 +--------
 3 files changed, 17 insertions(+), 16 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e3ed008a076a..becfba48d17e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -539,6 +539,8 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
 			   pmd_t *pmd, bool freeze);
 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
 			   pmd_t *pmdp, struct folio *folio);
+void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
+		struct vm_area_struct *vma, unsigned long haddr, bool in_pf);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -629,6 +631,10 @@ static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
 	return false;
 }
 
+void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
+		struct vm_area_struct *vma, unsigned long haddr, bool in_pf)
+{}
+
 #define split_huge_pud(__vma, __pmd, __address)	\
 	do { } while (0)
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f13de93637bf..bfe38ca60a10 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1217,8 +1217,8 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
 	return folio;
 }
 
-static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
-		struct vm_area_struct *vma, unsigned long haddr)
+void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
+		struct vm_area_struct *vma, unsigned long haddr, bool in_pf)
 {
 	pmd_t entry;
 
@@ -1228,11 +1228,13 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd,
 	folio_add_lru_vma(folio, vma);
 	set_pmd_at(vma->vm_mm, haddr, pmd, entry);
 	update_mmu_cache_pmd(vma, haddr, pmd);
-	add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
-	count_vm_event(THP_FAULT_ALLOC);
-	count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
-	count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
 	deferred_split_folio(folio, false);
+	if (in_pf) {
+		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+		count_vm_event(THP_FAULT_ALLOC);
+		count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
+		count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
+	}
 }
 
 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
@@ -1271,7 +1273,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 			return ret;
 		}
 		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
-		map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
+		map_anon_folio_pmd(folio, vmf->pmd, vma, haddr, true);
 		mm_inc_nr_ptes(vma->vm_mm);
 		spin_unlock(vmf->ptl);
 	}
@@ -1877,7 +1879,7 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf)
 	if (ret)
 		goto release;
 	(void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd);
-	map_anon_folio_pmd(folio, vmf->pmd, vma, haddr);
+	map_anon_folio_pmd(folio, vmf->pmd, vma, haddr, true);
 	goto unlock;
 release:
 	folio_put(folio);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index f4f57ba69d72..cd187d10dee6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1224,17 +1224,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	__folio_mark_uptodate(folio);
 	pgtable = pmd_pgtable(_pmd);
 
-	_pmd = folio_mk_pmd(folio, vma->vm_page_prot);
-	_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
 	spin_lock(pmd_ptl);
 	BUG_ON(!pmd_none(*pmd));
-	folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
-	folio_add_lru_vma(folio, vma);
 	pgtable_trans_huge_deposit(mm, pmd, pgtable);
-	set_pmd_at(mm, address, pmd, _pmd);
-	update_mmu_cache_pmd(vma, address, pmd);
-	deferred_split_folio(folio, false);
+	map_anon_folio_pmd(folio, pmd, vma, address, false);
 	spin_unlock(pmd_ptl);
 
 	folio = NULL;
-- 
2.34.1



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2025-10-05  2:55 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-10-04  9:25 [PATCH] mm/khugepaged: use map_anon_folio_pmd() in collapse_huge_page() Wei Yang
2025-10-04 14:26 ` Dev Jain
2025-10-05  0:04   ` Wei Yang
2025-10-05  0:09     ` Wei Yang
2025-10-05  1:48 ` Matthew Wilcox
2025-10-05  2:55   ` Wei Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox