linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 50/75] mm/huge_memory: Convert __split_huge_pmd() to take a folio
Date: Fri,  4 Feb 2022 19:58:27 +0000	[thread overview]
Message-ID: <20220204195852.1751729-51-willy@infradead.org> (raw)
In-Reply-To: <20220204195852.1751729-1-willy@infradead.org>

Convert split_huge_pmd_address() at the same time since it only passes
the folio through, and its two callers already have a folio on hand.
Removes numerous calls to compound_head() and removes an assumption
that a page cannot be larger than a PMD.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/huge_mm.h |  8 +++----
 mm/huge_memory.c        | 50 ++++++++++++++++++++---------------------
 mm/rmap.c               |  6 +++--
 3 files changed, 33 insertions(+), 31 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 71c073d411ac..4368b314d9c8 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -194,7 +194,7 @@ static inline int split_huge_page(struct page *page)
 void deferred_split_huge_page(struct page *page);
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-		unsigned long address, bool freeze, struct page *page);
+		unsigned long address, bool freeze, struct folio *folio);
 
 #define split_huge_pmd(__vma, __pmd, __address)				\
 	do {								\
@@ -207,7 +207,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
-		bool freeze, struct page *page);
+		bool freeze, struct folio *folio);
 
 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
 		unsigned long address);
@@ -406,9 +406,9 @@ static inline void deferred_split_huge_page(struct page *page) {}
 	do { } while (0)
 
 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-		unsigned long address, bool freeze, struct page *page) {}
+		unsigned long address, bool freeze, struct folio *folio) {}
 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
-		unsigned long address, bool freeze, struct page *page) {}
+		unsigned long address, bool freeze, struct folio *folio) {}
 
 #define split_huge_pud(__vma, __pmd, __address)	\
 	do { } while (0)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 94e591d638eb..f934b93d08ca 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2143,11 +2143,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-		unsigned long address, bool freeze, struct page *page)
+		unsigned long address, bool freeze, struct folio *folio)
 {
 	spinlock_t *ptl;
 	struct mmu_notifier_range range;
-	bool do_unlock_page = false;
+	bool do_unlock_folio = false;
 	pmd_t _pmd;
 
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -2157,20 +2157,20 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 	ptl = pmd_lock(vma->vm_mm, pmd);
 
 	/*
-	 * If caller asks to setup a migration entries, we need a page to check
-	 * pmd against. Otherwise we can end up replacing wrong page.
+	 * If caller asks to setup a migration entry, we need a folio to check
+	 * pmd against. Otherwise we can end up replacing wrong folio.
 	 */
-	VM_BUG_ON(freeze && !page);
-	if (page) {
-		VM_WARN_ON_ONCE(!PageLocked(page));
-		if (page != pmd_page(*pmd))
+	VM_BUG_ON(freeze && !folio);
+	if (folio) {
+		VM_WARN_ON_ONCE(!folio_test_locked(folio));
+		if (folio != page_folio(pmd_page(*pmd)))
 			goto out;
 	}
 
 repeat:
 	if (pmd_trans_huge(*pmd)) {
-		if (!page) {
-			page = pmd_page(*pmd);
+		if (!folio) {
+			folio = page_folio(pmd_page(*pmd));
 			/*
 			 * An anonymous page must be locked, to ensure that a
 			 * concurrent reuse_swap_page() sees stable mapcount;
@@ -2178,33 +2178,33 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 			 * and page lock must not be taken when zap_pmd_range()
 			 * calls __split_huge_pmd() while i_mmap_lock is held.
 			 */
-			if (PageAnon(page)) {
-				if (unlikely(!trylock_page(page))) {
-					get_page(page);
+			if (folio_test_anon(folio)) {
+				if (unlikely(!folio_trylock(folio))) {
+					folio_get(folio);
 					_pmd = *pmd;
 					spin_unlock(ptl);
-					lock_page(page);
+					folio_lock(folio);
 					spin_lock(ptl);
 					if (unlikely(!pmd_same(*pmd, _pmd))) {
-						unlock_page(page);
-						put_page(page);
-						page = NULL;
+						folio_unlock(folio);
+						folio_put(folio);
+						folio = NULL;
 						goto repeat;
 					}
-					put_page(page);
+					folio_put(folio);
 				}
-				do_unlock_page = true;
+				do_unlock_folio = true;
 			}
 		}
-		if (PageMlocked(page))
-			clear_page_mlock(page);
+		if (folio_test_mlocked(folio))
+			folio_end_mlock(folio);
 	} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
 		goto out;
 	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
 out:
 	spin_unlock(ptl);
-	if (do_unlock_page)
-		unlock_page(page);
+	if (do_unlock_folio)
+		folio_unlock(folio);
 	/*
 	 * No need to double call mmu_notifier->invalidate_range() callback.
 	 * They are 3 cases to consider inside __split_huge_pmd_locked():
@@ -2222,7 +2222,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
-		bool freeze, struct page *page)
+		bool freeze, struct folio *folio)
 {
 	pgd_t *pgd;
 	p4d_t *p4d;
@@ -2243,7 +2243,7 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
 
 	pmd = pmd_offset(pud, address);
 
-	__split_huge_pmd(vma, pmd, address, freeze, page);
+	__split_huge_pmd(vma, pmd, address, freeze, folio);
 }
 
 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
diff --git a/mm/rmap.c b/mm/rmap.c
index a383e25fb196..42a147746ff8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1422,6 +1422,7 @@ void page_remove_rmap(struct page *page, bool compound)
 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 		     unsigned long address, void *arg)
 {
+	struct folio *folio = page_folio(page);
 	struct mm_struct *mm = vma->vm_mm;
 	struct page_vma_mapped_walk pvmw = {
 		.vma = vma,
@@ -1444,7 +1445,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 		pvmw.flags = PVMW_SYNC;
 
 	if (flags & TTU_SPLIT_HUGE_PMD)
-		split_huge_pmd_address(vma, address, false, page);
+		split_huge_pmd_address(vma, address, false, folio);
 
 	/*
 	 * For THP, we have to assume the worse case ie pmd for invalidation.
@@ -1721,6 +1722,7 @@ void try_to_unmap(struct page *page, enum ttu_flags flags)
 static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 		     unsigned long address, void *arg)
 {
+	struct folio *folio = page_folio(page);
 	struct mm_struct *mm = vma->vm_mm;
 	struct page_vma_mapped_walk pvmw = {
 		.vma = vma,
@@ -1747,7 +1749,7 @@ static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
 	 * TTU_SPLIT_HUGE_PMD and it wants to freeze.
 	 */
 	if (flags & TTU_SPLIT_HUGE_PMD)
-		split_huge_pmd_address(vma, address, true, page);
+		split_huge_pmd_address(vma, address, true, folio);
 
 	/*
 	 * For THP, we have to assume the worse case ie pmd for invalidation.
-- 
2.34.1



  parent reply	other threads:[~2022-02-04 19:59 UTC|newest]

Thread overview: 115+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-04 19:57 [PATCH 00/75] MM folio patches for 5.18 Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 01/75] mm/gup: Increment the page refcount before the pincount Matthew Wilcox (Oracle)
2022-02-04 21:13   ` John Hubbard
2022-02-04 21:28     ` Matthew Wilcox
2022-02-07  7:45   ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 02/75] mm/gup: Remove for_each_compound_range() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 03/75] mm/gup: Remove for_each_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 04/75] mm/gup: Change the calling convention for compound_range_next() Matthew Wilcox (Oracle)
2022-02-07  7:45   ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 05/75] mm/gup: Optimise compound_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 06/75] mm/gup: Change the calling convention for compound_next() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 07/75] mm/gup: Fix some contiguous memmap assumptions Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 08/75] mm/gup: Remove an assumption of a contiguous memmap Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 09/75] mm/gup: Handle page split race more efficiently Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 10/75] mm/gup: Remove hpage_pincount_add() Matthew Wilcox (Oracle)
2022-02-04 21:29   ` John Hubbard
2022-02-07  7:46   ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 11/75] mm/gup: Remove hpage_pincount_sub() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 12/75] mm: Make compound_pincount always available Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 13/75] mm: Add folio_pincount_ptr() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 14/75] mm: Turn page_maybe_dma_pinned() into folio_maybe_dma_pinned() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 15/75] mm/gup: Add try_get_folio() and try_grab_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 16/75] mm/gup: Convert try_grab_page() to use a folio Matthew Wilcox (Oracle)
2022-02-06  2:12   ` John Hubbard
2022-02-07  7:47   ` Christoph Hellwig
2022-02-04 19:57 ` [PATCH 17/75] mm: Remove page_cache_add_speculative() and page_cache_get_speculative() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 18/75] mm/gup: Add gup_put_folio() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 19/75] mm/hugetlb: Use try_grab_folio() instead of try_grab_compound_head() Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 20/75] mm/gup: Convert gup_pte_range() to use a folio Matthew Wilcox (Oracle)
2022-02-06 14:52   ` Mark Hemment
2022-02-11 20:20     ` Matthew Wilcox
2022-02-04 19:57 ` [PATCH 21/75] mm/gup: Convert gup_hugepte() " Matthew Wilcox (Oracle)
2022-02-04 19:57 ` [PATCH 22/75] mm/gup: Convert gup_huge_pmd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 23/75] mm/gup: Convert gup_huge_pud() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 24/75] mm/gup: Convert gup_huge_pgd() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 25/75] mm/gup: Turn compound_next() into gup_folio_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 26/75] mm/gup: Turn compound_range_next() into gup_folio_range_next() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 27/75] mm: Turn isolate_lru_page() into folio_isolate_lru() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 28/75] mm/gup: Convert check_and_migrate_movable_pages() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 29/75] mm/workingset: Convert workingset_eviction() to take " Matthew Wilcox (Oracle)
2022-02-07  7:49   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 30/75] mm/memcg: Convert mem_cgroup_swapout() " Matthew Wilcox (Oracle)
2022-02-07  7:49   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 31/75] mm: Add lru_to_folio() Matthew Wilcox (Oracle)
2022-02-07  7:50   ` Christoph Hellwig
2022-02-11 20:24     ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 32/75] mm: Turn putback_lru_page() into folio_putback_lru() Matthew Wilcox (Oracle)
2022-02-07  7:50   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 33/75] mm/vmscan: Convert __remove_mapping() to take a folio Matthew Wilcox (Oracle)
2022-02-07  7:51   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 34/75] mm/vmscan: Turn page_check_dirty_writeback() into folio_check_dirty_writeback() Matthew Wilcox (Oracle)
2022-02-07  7:51   ` Christoph Hellwig
2022-02-12  1:49     ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 35/75] mm: Turn head_compound_mapcount() into folio_entire_mapcount() Matthew Wilcox (Oracle)
2022-02-07  7:52   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 36/75] mm: Add folio_mapcount() Matthew Wilcox (Oracle)
2022-02-07  7:53   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 37/75] mm: Add split_folio_to_list() Matthew Wilcox (Oracle)
2022-02-07  7:54   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 38/75] mm: Add folio_is_zone_device() and folio_is_device_private() Matthew Wilcox (Oracle)
2022-02-07  7:54   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 39/75] mm: Add folio_pgoff() Matthew Wilcox (Oracle)
2022-02-07  7:55   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 40/75] mm: Add pvmw_set_page() and pvmw_set_folio() Matthew Wilcox (Oracle)
2022-02-07  7:55   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 41/75] hexagon: Add pmd_pfn() Matthew Wilcox (Oracle)
2022-02-06 18:13   ` Mike Rapoport
2022-02-06 20:46     ` Matthew Wilcox
2022-02-06 21:33       ` Mike Rapoport
2022-02-06 22:05         ` Matthew Wilcox
2022-02-07 14:24           ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 42/75] mm: Convert page_vma_mapped_walk to work on PFNs Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 43/75] mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio Matthew Wilcox (Oracle)
2022-02-07  7:57   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 44/75] mm/rmap: Use a folio in page_mkclean_one() Matthew Wilcox (Oracle)
2022-02-07  7:57   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 45/75] mm/rmap: Turn page_referenced() into folio_referenced() Matthew Wilcox (Oracle)
2022-02-07  7:58   ` Christoph Hellwig
2022-02-04 19:58 ` [PATCH 46/75] mm/mlock: Turn clear_page_mlock() into folio_end_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 47/75] mm/mlock: Turn mlock_vma_page() into mlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-07 10:46   ` Mike Rapoport
2022-02-04 19:58 ` [PATCH 48/75] mm/rmap: Turn page_mlock() into folio_mlock() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 49/75] mm/mlock: Turn munlock_vma_page() into munlock_vma_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` Matthew Wilcox (Oracle) [this message]
2022-02-04 19:58 ` [PATCH 51/75] mm/rmap: Convert try_to_unmap() to take a folio Matthew Wilcox (Oracle)
2022-02-09 14:24   ` Mauricio Faria de Oliveira
2022-02-09 14:29     ` Matthew Wilcox
2022-02-04 19:58 ` [PATCH 52/75] mm/rmap: Convert try_to_migrate() to folios Matthew Wilcox (Oracle)
2022-02-09 15:27   ` Zi Yan
2022-02-04 19:58 ` [PATCH 53/75] mm/rmap: Convert make_device_exclusive_range() to use folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 54/75] mm/migrate: Convert remove_migration_ptes() to folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 55/75] mm/damon: Convert damon_pa_mkold() to use a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 56/75] mm/damon: Convert damon_pa_young() " Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 57/75] mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 58/75] mm: Turn page_anon_vma() into folio_anon_vma() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 59/75] mm/rmap: Convert rmap_walk() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 60/75] mm/rmap: Constify the rmap_walk_control argument Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 61/75] mm/vmscan: Free non-shmem folios without splitting them Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 62/75] mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 63/75] mm/vmscan: Account large folios correctly Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 64/75] mm/vmscan: Turn page_check_references() into folio_check_references() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 65/75] mm/vmscan: Convert pageout() to take a folio Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 66/75] mm: Turn can_split_huge_page() into can_split_folio() Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 67/75] mm/filemap: Allow large folios to be added to the page cache Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 68/75] mm: Fix READ_ONLY_THP warning Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 69/75] mm: Make large folios depend on THP Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 70/75] mm: Support arbitrary THP sizes Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 71/75] mm/readahead: Add large folio readahead Matthew Wilcox (Oracle)
2022-02-06 13:10   ` Mark Hemment
2022-02-04 19:58 ` [PATCH 72/75] mm/readahead: Align file mappings for non-DAX Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 73/75] mm/readahead: Switch to page_cache_ra_order Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 74/75] mm/filemap: Support VM_HUGEPAGE for file mappings Matthew Wilcox (Oracle)
2022-02-04 19:58 ` [PATCH 75/75] selftests/vm/transhuge-stress: Support file-backed PMD folios Matthew Wilcox (Oracle)
2022-02-13 22:31 ` [PATCH 00/75] MM folio patches for 5.18 John Hubbard
2022-02-14  4:33 ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220204195852.1751729-51-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox