linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Byungchul Park <byungchul@sk.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: kernel_team@skhynix.com, akpm@linux-foundation.org,
	vernhao@tencent.com, mgorman@techsingularity.net,
	hughd@google.com, willy@infradead.org, david@redhat.com,
	peterz@infradead.org, luto@kernel.org, tglx@linutronix.de,
	mingo@redhat.com, bp@alien8.de, rjgolo@gmail.com
Subject: [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 20/25] mm, fs: skip tlb flushes for luf'd filemap that already has been done
Date: Wed, 26 Feb 2025 21:01:27 +0900	[thread overview]
Message-ID: <20250226120132.28469-20-byungchul@sk.com> (raw)
In-Reply-To: <20250226120132.28469-1-byungchul@sk.com>

For luf'd filemap, tlb shootdown is performed when updating page cache,
no matter whether tlb flushes required already has been done or not.

By storing luf meta data in struct address_space and updating the luf
meta data properly, we can skip unnecessary tlb flush.

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 fs/inode.c               |  1 +
 include/linux/fs.h       |  4 ++-
 include/linux/mm_types.h |  2 ++
 mm/memory.c              |  4 +--
 mm/rmap.c                | 59 +++++++++++++++++++++++++---------------
 mm/truncate.c            | 14 +++++-----
 mm/vmscan.c              |  2 +-
 7 files changed, 53 insertions(+), 33 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index 5587aabdaa5ee..752fb2df6f3b3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -475,6 +475,7 @@ static void __address_space_init_once(struct address_space *mapping)
 	init_rwsem(&mapping->i_mmap_rwsem);
 	INIT_LIST_HEAD(&mapping->i_private_list);
 	spin_lock_init(&mapping->i_private_lock);
+	luf_batch_init(&mapping->luf_batch);
 	mapping->i_mmap = RB_ROOT_CACHED;
 }
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 78aaf769d32d1..a2f014b31028f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -498,6 +498,7 @@ extern const struct address_space_operations empty_aops;
  * @i_private_lock: For use by the owner of the address_space.
  * @i_private_list: For use by the owner of the address_space.
  * @i_private_data: For use by the owner of the address_space.
+ * @luf_batch: Data to track need of tlb flush by luf.
  */
 struct address_space {
 	struct inode		*host;
@@ -519,6 +520,7 @@ struct address_space {
 	struct list_head	i_private_list;
 	struct rw_semaphore	i_mmap_rwsem;
 	void *			i_private_data;
+	struct luf_batch	luf_batch;
 } __attribute__((aligned(sizeof(long)))) __randomize_layout;
 	/*
 	 * On most architectures that alignment is already the case; but
@@ -545,7 +547,7 @@ static inline int mapping_write_begin(struct file *file,
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
 	if (!ret)
-		luf_flush(0);
+		luf_flush_mapping(mapping);
 
 	return ret;
 }
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c32ef19a25056..d73a3eb0f7b21 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1362,10 +1362,12 @@ extern void tlb_finish_mmu(struct mmu_gather *tlb);
 void luf_flush(unsigned short luf_key);
 void luf_flush_mm(struct mm_struct *mm);
 void luf_flush_vma(struct vm_area_struct *vma);
+void luf_flush_mapping(struct address_space *mapping);
 #else
 static inline void luf_flush(unsigned short luf_key) {}
 static inline void luf_flush_mm(struct mm_struct *mm) {}
 static inline void luf_flush_vma(struct vm_area_struct *vma) {}
+static inline void luf_flush_mapping(struct address_space *mapping) {}
 #endif
 
 struct vm_fault;
diff --git a/mm/memory.c b/mm/memory.c
index 93e5879583b07..62137ab258d2c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6296,10 +6296,10 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 	if (flush) {
 		/*
 		 * If it has a VM_SHARED mapping, all the mms involved
-		 * should be luf_flush'ed.
+		 * in the struct address_space should be luf_flush'ed.
 		 */
 		if (mapping)
-			luf_flush(0);
+			luf_flush_mapping(mapping);
 		luf_flush_mm(mm);
 	}
 
diff --git a/mm/rmap.c b/mm/rmap.c
index fe9c4606ae542..f5c5190be24e0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -691,7 +691,7 @@ void fold_batch(struct tlbflush_unmap_batch *dst,
 #define NR_LUF_BATCH (1 << (sizeof(short) * 8))
 
 /*
- * Use 0th entry as accumulated batch.
+ * XXX: Reserve the 0th entry for later use.
  */
 struct luf_batch luf_batch[NR_LUF_BATCH];
 
@@ -936,7 +936,7 @@ void luf_flush_vma(struct vm_area_struct *vma)
 		mapping = vma->vm_file->f_mapping;
 
 	if (mapping)
-		luf_flush(0);
+		luf_flush_mapping(mapping);
 	luf_flush_mm(mm);
 }
 
@@ -962,6 +962,29 @@ void luf_flush_mm(struct mm_struct *mm)
 	try_to_unmap_flush();
 }
 
+void luf_flush_mapping(struct address_space *mapping)
+{
+	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+	struct luf_batch *lb;
+	unsigned long flags;
+	unsigned long lb_ugen;
+
+	if (!mapping)
+		return;
+
+	lb = &mapping->luf_batch;
+	read_lock_irqsave(&lb->lock, flags);
+	fold_batch(tlb_ubc, &lb->batch, false);
+	lb_ugen = lb->ugen;
+	read_unlock_irqrestore(&lb->lock, flags);
+
+	if (arch_tlbbatch_diet(&tlb_ubc->arch, lb_ugen))
+		return;
+
+	try_to_unmap_flush();
+}
+EXPORT_SYMBOL(luf_flush_mapping);
+
 /*
  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
  * important if a PTE was dirty when it was unmapped that it's flushed
@@ -1010,7 +1033,8 @@ void try_to_unmap_flush_dirty(void)
 
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
 		unsigned long start, unsigned long end,
-		struct vm_area_struct *vma)
+		struct vm_area_struct *vma,
+		struct address_space *mapping)
 {
 	struct tlbflush_unmap_batch *tlb_ubc;
 	int batch;
@@ -1032,27 +1056,15 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
 		tlb_ubc = &current->tlb_ubc;
 	else {
 		tlb_ubc = &current->tlb_ubc_ro;
+
 		fold_luf_batch_mm(&mm->luf_batch, mm);
+		if (mapping)
+			fold_luf_batch_mm(&mapping->luf_batch, mm);
 	}
 
 	arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end);
 	tlb_ubc->flush_required = true;
 
-	if (can_luf_test()) {
-		struct luf_batch *lb;
-		unsigned long flags;
-
-		/*
-		 * Accumulate to the 0th entry right away so that
-		 * luf_flush(0) can be uesed to properly perform pending
-		 * TLB flush once this unmapping is observed.
-		 */
-		lb = &luf_batch[0];
-		write_lock_irqsave(&lb->lock, flags);
-		__fold_luf_batch(lb, tlb_ubc, new_luf_ugen());
-		write_unlock_irqrestore(&lb->lock, flags);
-	}
-
 	/*
 	 * Ensure compiler does not re-order the setting of tlb_flush_batched
 	 * before the PTE is cleared.
@@ -1134,7 +1146,8 @@ void flush_tlb_batched_pending(struct mm_struct *mm)
 #else
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval,
 		unsigned long start, unsigned long end,
-		struct vm_area_struct *vma)
+		struct vm_area_struct *vma,
+		struct address_space *mapping)
 {
 }
 
@@ -1511,7 +1524,7 @@ int folio_mkclean(struct folio *folio)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 
 	return cleaned;
 }
@@ -2198,6 +2211,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 	unsigned long nr_pages = 1, end_addr;
 	unsigned long pfn;
 	unsigned long hsz = 0;
+	struct address_space *mapping = folio_mapping(folio);
 
 	/*
 	 * When racing against e.g. zap_pte_range() on another cpu,
@@ -2359,7 +2373,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 			 * and traps if the PTE is unmapped.
 			 */
 			if (should_defer_flush(mm, flags))
-				set_tlb_ubc_flush_pending(mm, pteval, address, end_addr, vma);
+				set_tlb_ubc_flush_pending(mm, pteval, address, end_addr, vma, mapping);
 			else
 				flush_tlb_range(vma, address, end_addr);
 			if (pte_dirty(pteval))
@@ -2611,6 +2625,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
 	enum ttu_flags flags = (enum ttu_flags)(long)arg;
 	unsigned long pfn;
 	unsigned long hsz = 0;
+	struct address_space *mapping = folio_mapping(folio);
 
 	/*
 	 * When racing against e.g. zap_pte_range() on another cpu,
@@ -2758,7 +2773,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
 				 */
 				pteval = ptep_get_and_clear(mm, address, pvmw.pte);
 
-				set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE, vma);
+				set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE, vma, mapping);
 			} else {
 				pteval = ptep_clear_flush(vma, address, pvmw.pte);
 			}
diff --git a/mm/truncate.c b/mm/truncate.c
index 68c9ded2f789b..8c133b93cefe8 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -142,7 +142,7 @@ void folio_invalidate(struct folio *folio, size_t offset, size_t length)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(folio->mapping);
 }
 EXPORT_SYMBOL_GPL(folio_invalidate);
 
@@ -183,7 +183,7 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 	return 0;
 }
 
@@ -234,7 +234,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(folio->mapping);
 
 	if (!folio_test_large(folio))
 		return true;
@@ -324,7 +324,7 @@ long mapping_evict_folio(struct address_space *mapping, struct folio *folio)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 
 	return ret;
 }
@@ -459,7 +459,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 }
 EXPORT_SYMBOL(truncate_inode_pages_range);
 
@@ -579,7 +579,7 @@ unsigned long mapping_try_invalidate(struct address_space *mapping,
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 	return count;
 }
 
@@ -749,7 +749,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 422b9a03a6753..f145c09629b97 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -853,7 +853,7 @@ long remove_mapping(struct address_space *mapping, struct folio *folio)
 	/*
 	 * Ensure to clean stale tlb entries for this mapping.
 	 */
-	luf_flush(0);
+	luf_flush_mapping(mapping);
 
 	return ret;
 }
-- 
2.17.1



  parent reply	other threads:[~2025-02-26 12:02 UTC|newest]

Thread overview: 102+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-20  5:20 [RFC PATCH v12 00/26] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 01/26] x86/tlb: add APIs manipulating tlb batch's arch data Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 02/26] arm64/tlbflush: " Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 03/26] riscv/tlb: " Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 04/26] x86/tlb, riscv/tlb, mm/rmap: separate arch_tlbbatch_clear() out of arch_tlbbatch_flush() Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 05/26] mm/buddy: make room for a new variable, luf_key, in struct page Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 06/26] mm: move should_skip_kasan_poison() to mm/internal.h Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 07/26] mm: introduce luf_ugen to be used as a global timestamp Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 08/26] mm: introduce luf_batch to be used as hash table to store luf meta data Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 09/26] mm: introduce API to perform tlb shootdown on exit from page allocator Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 10/26] mm: introduce APIs to check if the page allocation is tlb shootdownable Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 11/26] mm: deliver luf_key to pcp or buddy on free after unmapping Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 12/26] mm: delimit critical sections to take off pages from pcp or buddy alloctor Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 13/26] mm: introduce pend_list in struct free_area to track luf'd pages Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 14/26] mm/rmap: recognize read-only tlb entries during batched tlb flush Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 15/26] fs, filemap: refactor to gather the scattered ->write_{begin,end}() calls Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 16/26] mm: implement LUF(Lazy Unmap Flush) defering tlb flush when folios get unmapped Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 17/26] x86/tlb, riscv/tlb, arm64/tlbflush, mm: remove cpus from tlb shootdown that already have been done Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 18/26] mm/page_alloc: retry 3 times to take pcp pages on luf check failure Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 19/26] mm: skip luf tlb flush for luf'd mm that already has been done Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 20/26] mm, fs: skip tlb flushes for luf'd filemap " Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 21/26] mm: perform luf tlb shootdown per zone in batched manner Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 22/26] mm/page_alloc: not allow to tlb shootdown if !preemptable() && non_luf_pages_ok() Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 23/26] mm: separate move/undo parts from migrate_pages_batch() Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 24/26] mm/migrate: apply luf mechanism to unmapping during migration Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 25/26] mm/vmscan: apply luf mechanism to unmapping during folio reclaim Byungchul Park
2025-02-20  5:20 ` [RFC PATCH v12 26/26] mm/luf: implement luf debug feature Byungchul Park
2025-02-20 10:32 ` [RFC PATCH v12 00/26] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Hillf Danton
2025-02-20 10:51   ` Byungchul Park
2025-02-20 11:09   ` Byungchul Park
2025-02-20 11:49     ` Hillf Danton
2025-02-20 12:20       ` Byungchul Park
2025-02-20 12:40       ` Byungchul Park
2025-02-20 13:54       ` Matthew Wilcox
2025-02-20 15:09         ` Steven Rostedt
2025-02-20 22:53           ` Kent Overstreet
2025-02-20 23:05             ` Steven Rostedt
2025-02-20 23:21               ` Kent Overstreet
2025-02-20 23:25           ` Hillf Danton
2025-02-20 23:44             ` Steven Rostedt
     [not found]             ` <20250221230556.2479-1-hdanton@sina.com>
2025-02-22  7:16               ` Greg KH
     [not found]               ` <20250222101100.2531-1-hdanton@sina.com>
2025-02-22 13:57                 ` Greg KH
2025-03-10 23:24       ` Dan Williams
2025-03-10 23:53         ` Barry Song
     [not found]       ` <20250619134922.1219-1-hdanton@sina.com>
2025-06-20 17:00         ` Dan Williams
2025-02-20 15:15 ` Dave Hansen
2025-02-20 15:29   ` Vlastimil Babka
2025-02-20 23:37     ` Byungchul Park
2025-02-26 11:30       ` RFC v12 rebased on v6.14-rc4 Byungchul Park
2025-02-26 12:03         ` [RFC PATCH v12 based on v6.14-rc4 01/25] x86/tlb: add APIs manipulating tlb batch's arch data Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 02/25] arm64/tlbflush: " Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 03/25] riscv/tlb: " Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 04/25] x86/tlb, riscv/tlb, mm/rmap: separate arch_tlbbatch_clear() out of arch_tlbbatch_flush() Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 05/25] mm/buddy: make room for a new variable, luf_key, in struct page Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 06/25] mm: move should_skip_kasan_poison() to mm/internal.h Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 07/25] mm: introduce luf_ugen to be used as a global timestamp Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 08/25] mm: introduce luf_batch to be used as hash table to store luf meta data Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 09/25] mm: introduce API to perform tlb shootdown on exit from page allocator Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 10/25] mm: introduce APIs to check if the page allocation is tlb shootdownable Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 11/25] mm: deliver luf_key to pcp or buddy on free after unmapping Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 12/25] mm: delimit critical sections to take off pages from pcp or buddy alloctor Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 13/25] mm: introduce pend_list in struct free_area to track luf'd pages Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 14/25] mm/rmap: recognize read-only tlb entries during batched tlb flush Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 15/25] fs, filemap: refactor to gather the scattered ->write_{begin,end}() calls Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 16/25] mm: implement LUF(Lazy Unmap Flush) defering tlb flush when folios get unmapped Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 17/25] x86/tlb, riscv/tlb, arm64/tlbflush, mm: remove cpus from tlb shootdown that already have been done Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 18/25] mm/page_alloc: retry 3 times to take pcp pages on luf check failure Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 19/25] mm: skip luf tlb flush for luf'd mm that already has been done Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 20/25] mm, fs: skip tlb flushes for luf'd filemap " Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 21/25] mm: perform luf tlb shootdown per zone in batched manner Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 22/25] mm/page_alloc: not allow to tlb shootdown if !preemptable() && non_luf_pages_ok() Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 23/25] mm/migrate: apply luf mechanism to unmapping during migration Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 24/25] mm/vmscan: apply luf mechanism to unmapping during folio reclaim Byungchul Park
2025-02-26 12:03           ` [RFC PATCH v12 based on v6.14-rc4 25/25] mm/luf: implement luf debug feature Byungchul Park
2025-02-26 11:33       ` RFC v12 rebased on mm-unstable as of Feb 21, 2025 Byungchul Park
2025-02-26 12:01         ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 01/25] x86/tlb: add APIs manipulating tlb batch's arch data Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 02/25] arm64/tlbflush: " Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 03/25] riscv/tlb: " Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 04/25] x86/tlb, riscv/tlb, mm/rmap: separate arch_tlbbatch_clear() out of arch_tlbbatch_flush() Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 05/25] mm/buddy: make room for a new variable, luf_key, in struct page Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 06/25] mm: move should_skip_kasan_poison() to mm/internal.h Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 07/25] mm: introduce luf_ugen to be used as a global timestamp Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 08/25] mm: introduce luf_batch to be used as hash table to store luf meta data Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 09/25] mm: introduce API to perform tlb shootdown on exit from page allocator Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 10/25] mm: introduce APIs to check if the page allocation is tlb shootdownable Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 11/25] mm: deliver luf_key to pcp or buddy on free after unmapping Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 12/25] mm: delimit critical sections to take off pages from pcp or buddy alloctor Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 13/25] mm: introduce pend_list in struct free_area to track luf'd pages Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 14/25] mm/rmap: recognize read-only tlb entries during batched tlb flush Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 15/25] fs, filemap: refactor to gather the scattered ->write_{begin,end}() calls Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 16/25] mm: implement LUF(Lazy Unmap Flush) defering tlb flush when folios get unmapped Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 17/25] x86/tlb, riscv/tlb, arm64/tlbflush, mm: remove cpus from tlb shootdown that already have been done Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 18/25] mm/page_alloc: retry 3 times to take pcp pages on luf check failure Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 19/25] mm: skip luf tlb flush for luf'd mm that already has been done Byungchul Park
2025-02-26 12:01           ` Byungchul Park [this message]
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 21/25] mm: perform luf tlb shootdown per zone in batched manner Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 22/25] mm/page_alloc: not allow to tlb shootdown if !preemptable() && non_luf_pages_ok() Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 23/25] mm/migrate: apply luf mechanism to unmapping during migration Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 24/25] mm/vmscan: apply luf mechanism to unmapping during folio reclaim Byungchul Park
2025-02-26 12:01           ` [RFC PATCH v12 based on mm-unstable as of Feb 21, 2025 25/25] mm/luf: implement luf debug feature Byungchul Park
2025-02-22  1:14     ` [RFC PATCH v12 00/26] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Shakeel Butt
2025-02-20 23:23   ` Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250226120132.28469-20-byungchul@sk.com \
    --to=byungchul@sk.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=kernel_team@skhynix.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rjgolo@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=vernhao@tencent.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox