linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Qi Zheng <zhengqi.arch@bytedance.com>
To: david@redhat.com, hughd@google.com, willy@infradead.org,
	mgorman@suse.de, muchun.song@linux.dev, vbabka@kernel.org,
	akpm@linux-foundation.org, zokeefe@google.com,
	rientjes@google.com, jannh@google.com, peterx@redhat.com
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
	Qi Zheng <zhengqi.arch@bytedance.com>
Subject: [PATCH v1 4/7] mm: zap_present_ptes: return whether the PTE page is unreclaimable
Date: Thu, 17 Oct 2024 17:47:23 +0800	[thread overview]
Message-ID: <84a9fddde9993e4a5108f188193fd9c8ff1c5c31.1729157502.git.zhengqi.arch@bytedance.com> (raw)
In-Reply-To: <cover.1729157502.git.zhengqi.arch@bytedance.com>

In the following two cases, the PTE page cannot be empty and cannot be
reclaimed:

1. an uffd-wp pte was re-installed
2. should_zap_folio() return false

Let's expose this information to the caller through is_pt_unreclaimable,
so that subsequent commits can use this information in zap_pte_range() to
detect whether the PTE page can be reclaimed.

Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
---
 mm/memory.c | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 534d9d52b5ebe..cc89ede8ce2ab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1501,7 +1501,7 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, struct folio *folio,
 		struct page *page, pte_t *pte, pte_t ptent, unsigned int nr,
 		unsigned long addr, struct zap_details *details, int *rss,
-		bool *force_flush, bool *force_break)
+		bool *force_flush, bool *force_break, bool *is_pt_unreclaimable)
 {
 	struct mm_struct *mm = tlb->mm;
 	bool delay_rmap = false;
@@ -1527,8 +1527,8 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
 	arch_check_zapped_pte(vma, ptent);
 	tlb_remove_tlb_entries(tlb, pte, nr, addr);
 	if (unlikely(userfaultfd_pte_wp(vma, ptent)))
-		zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details,
-					      ptent);
+		*is_pt_unreclaimable =
+			zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent);
 
 	if (!delay_rmap) {
 		folio_remove_rmap_ptes(folio, page, nr, vma);
@@ -1552,7 +1552,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
 		struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
 		unsigned int max_nr, unsigned long addr,
 		struct zap_details *details, int *rss, bool *force_flush,
-		bool *force_break)
+		bool *force_break, bool *is_pt_unreclaimable)
 {
 	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
 	struct mm_struct *mm = tlb->mm;
@@ -1567,15 +1567,18 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
 		arch_check_zapped_pte(vma, ptent);
 		tlb_remove_tlb_entry(tlb, pte, addr);
 		if (userfaultfd_pte_wp(vma, ptent))
-			zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
-						      details, ptent);
+			*is_pt_unreclaimable =
+				zap_install_uffd_wp_if_needed(vma, addr, pte, 1,
+							      details, ptent);
 		ksm_might_unmap_zero_page(mm, ptent);
 		return 1;
 	}
 
 	folio = page_folio(page);
-	if (unlikely(!should_zap_folio(details, folio)))
+	if (unlikely(!should_zap_folio(details, folio))) {
+		*is_pt_unreclaimable = true;
 		return 1;
+	}
 
 	/*
 	 * Make sure that the common "small folio" case is as fast as possible
@@ -1587,11 +1590,12 @@ static inline int zap_present_ptes(struct mmu_gather *tlb,
 
 		zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr,
 				       addr, details, rss, force_flush,
-				       force_break);
+				       force_break, is_pt_unreclaimable);
 		return nr;
 	}
 	zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr,
-			       details, rss, force_flush, force_break);
+			       details, rss, force_flush, force_break,
+			       is_pt_unreclaimable);
 	return 1;
 }
 
@@ -1622,6 +1626,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 		pte_t ptent = ptep_get(pte);
 		struct folio *folio;
 		struct page *page;
+		bool is_pt_unreclaimable = false;
 		int max_nr;
 
 		nr = 1;
@@ -1635,7 +1640,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
 			max_nr = (end - addr) / PAGE_SIZE;
 			nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr,
 					      addr, details, rss, &force_flush,
-					      &force_break);
+					      &force_break, &is_pt_unreclaimable);
 			if (unlikely(force_break)) {
 				addr += nr * PAGE_SIZE;
 				break;
-- 
2.20.1



  parent reply	other threads:[~2024-10-17  9:48 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-17  9:47 [PATCH v1 0/7] synchronously scan and reclaim empty user PTE pages Qi Zheng
2024-10-17  9:47 ` [PATCH v1 1/7] mm: khugepaged: retract_page_tables() use pte_offset_map_lock() Qi Zheng
2024-10-17 18:00   ` Jann Horn
2024-10-18  2:15     ` Qi Zheng
2024-10-17  9:47 ` [PATCH v1 2/7] mm: make zap_pte_range() handle full within-PMD range Qi Zheng
2024-10-17 18:06   ` Jann Horn
2024-10-18  2:23     ` Qi Zheng
2024-10-17  9:47 ` [PATCH v1 3/7] mm: zap_install_uffd_wp_if_needed: return whether uffd-wp pte has been re-installed Qi Zheng
2024-10-17  9:47 ` Qi Zheng [this message]
2024-10-17  9:47 ` [PATCH v1 5/7] mm: pgtable: try to reclaim empty PTE page in madvise(MADV_DONTNEED) Qi Zheng
2024-10-17 18:43   ` Jann Horn
2024-10-18  2:53     ` Qi Zheng
2024-10-18  2:58       ` Qi Zheng
2024-10-24 13:21     ` Will Deacon
2024-10-25  2:43       ` Qi Zheng
2024-10-17  9:47 ` [PATCH v1 6/7] x86: mm: free page table pages by RCU instead of semi RCU Qi Zheng
2024-10-17  9:47 ` [PATCH v1 7/7] x86: select ARCH_SUPPORTS_PT_RECLAIM if X86_64 Qi Zheng
2024-10-23  6:54   ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=84a9fddde9993e4a5108f188193fd9c8ff1c5c31.1729157502.git.zhengqi.arch@bytedance.com \
    --to=zhengqi.arch@bytedance.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=jannh@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=muchun.song@linux.dev \
    --cc=peterx@redhat.com \
    --cc=rientjes@google.com \
    --cc=vbabka@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=zokeefe@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox