From: Kairui Song <ryncsn@gmail.com>
To: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>,
Baoquan He <bhe@redhat.com>, Barry Song <baohua@kernel.org>,
Chris Li <chrisl@kernel.org>, Nhat Pham <nphamcs@gmail.com>,
Yosry Ahmed <yosry.ahmed@linux.dev>,
David Hildenbrand <david@kernel.org>,
Johannes Weiner <hannes@cmpxchg.org>,
Youngjun Park <youngjun.park@lge.com>,
Hugh Dickins <hughd@google.com>,
Baolin Wang <baolin.wang@linux.alibaba.com>,
Ying Huang <ying.huang@linux.alibaba.com>,
Kemeng Shi <shikemeng@huaweicloud.com>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-kernel@vger.kernel.org, Kairui Song <kasong@tencent.com>
Subject: [PATCH v4 16/19] mm, swap: check swap table directly for checking cache
Date: Fri, 05 Dec 2025 03:29:24 +0800 [thread overview]
Message-ID: <20251205-swap-table-p2-v4-16-cb7e28a26a40@tencent.com> (raw)
In-Reply-To: <20251205-swap-table-p2-v4-0-cb7e28a26a40@tencent.com>
From: Kairui Song <kasong@tencent.com>
Instead of looking at the swap map, check swap table directly to tell
if a swap slot is cached. Prepares for the removal of SWAP_HAS_CACHE.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/swap.h | 11 ++++++++---
mm/swap_state.c | 16 ++++++++++++++++
mm/swapfile.c | 55 +++++++++++++++++++++++++++++--------------------------
mm/userfaultfd.c | 10 +++-------
4 files changed, 56 insertions(+), 36 deletions(-)
diff --git a/mm/swap.h b/mm/swap.h
index ec1ef7d0c35b..3692e143eeba 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -275,6 +275,7 @@ void __swapcache_clear_cached(struct swap_info_struct *si,
* swap entries in the page table, similar to locking swap cache folio.
* - See the comment of get_swap_device() for more complex usage.
*/
+bool swap_cache_has_folio(swp_entry_t entry);
struct folio *swap_cache_get_folio(swp_entry_t entry);
void *swap_cache_get_shadow(swp_entry_t entry);
void swap_cache_del_folio(struct folio *folio);
@@ -335,8 +336,6 @@ static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr,
static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
{
- struct swap_info_struct *si = __swap_entry_to_info(entry);
- pgoff_t offset = swp_offset(entry);
int i;
/*
@@ -345,8 +344,9 @@ static inline int non_swapcache_batch(swp_entry_t entry, int max_nr)
* be in conflict with the folio in swap cache.
*/
for (i = 0; i < max_nr; i++) {
- if ((si->swap_map[offset + i] & SWAP_HAS_CACHE))
+ if (swap_cache_has_folio(entry))
return i;
+ entry.val++;
}
return i;
@@ -449,6 +449,11 @@ static inline int swap_writeout(struct folio *folio,
return 0;
}
+static inline bool swap_cache_has_folio(swp_entry_t entry)
+{
+ return false;
+}
+
static inline struct folio *swap_cache_get_folio(swp_entry_t entry)
{
return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index f478a16f43e9..6bf7556ca408 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -103,6 +103,22 @@ struct folio *swap_cache_get_folio(swp_entry_t entry)
return NULL;
}
+/**
+ * swap_cache_has_folio - Check if a swap slot has cache.
+ * @entry: swap entry indicating the slot.
+ *
+ * Context: Caller must ensure @entry is valid and protect the swap
+ * device with reference count or locks.
+ */
+bool swap_cache_has_folio(swp_entry_t entry)
+{
+ unsigned long swp_tb;
+
+ swp_tb = swap_table_get(__swap_entry_to_cluster(entry),
+ swp_cluster_offset(entry));
+ return swp_tb_is_folio(swp_tb);
+}
+
/**
* swap_cache_get_shadow - Looks up a shadow in the swap cache.
* @entry: swap entry used for the lookup.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index aaa8790241a8..2cb3bfef3234 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -792,23 +792,18 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
unsigned int nr_pages = 1 << order;
unsigned long offset = start, end = start + nr_pages;
unsigned char *map = si->swap_map;
- int nr_reclaim;
+ unsigned long swp_tb;
spin_unlock(&ci->lock);
do {
- switch (READ_ONCE(map[offset])) {
- case 0:
+ if (swap_count(READ_ONCE(map[offset])))
break;
- case SWAP_HAS_CACHE:
- nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
- if (nr_reclaim < 0)
- goto out;
- break;
- default:
- goto out;
+ swp_tb = swap_table_get(ci, offset % SWAPFILE_CLUSTER);
+ if (swp_tb_is_folio(swp_tb)) {
+ if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY) < 0)
+ break;
}
} while (++offset < end);
-out:
spin_lock(&ci->lock);
/*
@@ -829,37 +824,41 @@ static bool cluster_reclaim_range(struct swap_info_struct *si,
* Recheck the range no matter reclaim succeeded or not, the slot
* could have been be freed while we are not holding the lock.
*/
- for (offset = start; offset < end; offset++)
- if (READ_ONCE(map[offset]))
+ for (offset = start; offset < end; offset++) {
+ swp_tb = __swap_table_get(ci, offset % SWAPFILE_CLUSTER);
+ if (swap_count(map[offset]) || !swp_tb_is_null(swp_tb))
return false;
+ }
return true;
}
static bool cluster_scan_range(struct swap_info_struct *si,
struct swap_cluster_info *ci,
- unsigned long start, unsigned int nr_pages,
+ unsigned long offset, unsigned int nr_pages,
bool *need_reclaim)
{
- unsigned long offset, end = start + nr_pages;
+ unsigned long end = offset + nr_pages;
unsigned char *map = si->swap_map;
+ unsigned long swp_tb;
if (cluster_is_empty(ci))
return true;
- for (offset = start; offset < end; offset++) {
- switch (READ_ONCE(map[offset])) {
- case 0:
- continue;
- case SWAP_HAS_CACHE:
+ do {
+ if (swap_count(map[offset]))
+ return false;
+ swp_tb = __swap_table_get(ci, offset % SWAPFILE_CLUSTER);
+ if (swp_tb_is_folio(swp_tb)) {
+ WARN_ON_ONCE(!(map[offset] & SWAP_HAS_CACHE));
if (!vm_swap_full())
return false;
*need_reclaim = true;
- continue;
- default:
- return false;
+ } else {
+ /* A entry with no count and no cache must be null */
+ VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb));
}
- }
+ } while (++offset < end);
return true;
}
@@ -1030,7 +1029,8 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
to_scan--;
while (offset < end) {
- if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
+ if (!swap_count(READ_ONCE(map[offset])) &&
+ swp_tb_is_folio(__swap_table_get(ci, offset % SWAPFILE_CLUSTER))) {
spin_unlock(&ci->lock);
nr_reclaim = __try_to_reclaim_swap(si, offset,
TTRS_ANYWAY);
@@ -1980,6 +1980,7 @@ void swap_put_entries_direct(swp_entry_t entry, int nr)
struct swap_info_struct *si;
bool any_only_cache = false;
unsigned long offset;
+ unsigned long swp_tb;
si = get_swap_device(entry);
if (WARN_ON_ONCE(!si))
@@ -2004,7 +2005,9 @@ void swap_put_entries_direct(swp_entry_t entry, int nr)
*/
for (offset = start_offset; offset < end_offset; offset += nr) {
nr = 1;
- if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
+ swp_tb = swap_table_get(__swap_offset_to_cluster(si, offset),
+ offset % SWAPFILE_CLUSTER);
+ if (!swap_count(READ_ONCE(si->swap_map[offset])) && swp_tb_is_folio(swp_tb)) {
/*
* Folios are always naturally aligned in swap so
* advance forward to the next boundary. Zero means no
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e6dfd5f28acd..3f28aa319988 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1190,17 +1190,13 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
* Check if the swap entry is cached after acquiring the src_pte
* lock. Otherwise, we might miss a newly loaded swap cache folio.
*
- * Check swap_map directly to minimize overhead, READ_ONCE is sufficient.
* We are trying to catch newly added swap cache, the only possible case is
* when a folio is swapped in and out again staying in swap cache, using the
* same entry before the PTE check above. The PTL is acquired and released
- * twice, each time after updating the swap_map's flag. So holding
- * the PTL here ensures we see the updated value. False positive is possible,
- * e.g. SWP_SYNCHRONOUS_IO swapin may set the flag without touching the
- * cache, or during the tiny synchronization window between swap cache and
- * swap_map, but it will be gone very quickly, worst result is retry jitters.
+ * twice, each time after updating the swap table. So holding
+ * the PTL here ensures we see the updated value.
*/
- if (READ_ONCE(si->swap_map[swp_offset(entry)]) & SWAP_HAS_CACHE) {
+ if (swap_cache_has_folio(entry)) {
double_pt_unlock(dst_ptl, src_ptl);
return -EAGAIN;
}
--
2.52.0
next prev parent reply other threads:[~2025-12-04 19:30 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-04 19:29 [PATCH v4 00/19] mm, swap: swap table phase II: unify swapin use swap cache and cleanup flags Kairui Song
2025-12-04 19:29 ` [PATCH v4 01/19] mm, swap: rename __read_swap_cache_async to swap_cache_alloc_folio Kairui Song
2025-12-04 19:29 ` [PATCH v4 02/19] mm, swap: split swap cache preparation loop into a standalone helper Kairui Song
2025-12-04 19:29 ` [PATCH v4 03/19] mm, swap: never bypass the swap cache even for SWP_SYNCHRONOUS_IO Kairui Song
2025-12-04 19:29 ` [PATCH v4 04/19] mm, swap: always try to free swap cache for SWP_SYNCHRONOUS_IO devices Kairui Song
2025-12-04 19:29 ` [PATCH v4 05/19] mm, swap: simplify the code and reduce indention Kairui Song
2025-12-04 19:29 ` [PATCH v4 06/19] mm, swap: free the swap cache after folio is mapped Kairui Song
2025-12-04 19:29 ` [PATCH v4 07/19] mm/shmem: never bypass the swap cache for SWP_SYNCHRONOUS_IO Kairui Song
2025-12-04 19:29 ` [PATCH v4 08/19] mm/shmem, swap: remove SWAP_MAP_SHMEM Kairui Song
2025-12-04 19:29 ` [PATCH v4 09/19] mm, swap: swap entry of a bad slot should not be considered as swapped out Kairui Song
2025-12-04 19:29 ` [PATCH v4 10/19] mm, swap: consolidate cluster reclaim and usability check Kairui Song
2025-12-04 19:29 ` [PATCH v4 11/19] mm, swap: split locked entry duplicating into a standalone helper Kairui Song
2025-12-04 19:29 ` [PATCH v4 12/19] mm, swap: use swap cache as the swap in synchronize layer Kairui Song
2025-12-04 19:29 ` [PATCH v4 13/19] mm, swap: remove workaround for unsynchronized swap map cache state Kairui Song
2025-12-04 19:29 ` [PATCH v4 14/19] mm, swap: cleanup swap entry management workflow Kairui Song
2025-12-04 19:29 ` [PATCH v4 15/19] mm, swap: add folio to swap cache directly on allocation Kairui Song
2025-12-04 19:29 ` Kairui Song [this message]
2025-12-04 19:29 ` [PATCH v4 17/19] mm, swap: clean up and improve swap entries freeing Kairui Song
2025-12-04 19:29 ` [PATCH v4 18/19] mm, swap: drop the SWAP_HAS_CACHE flag Kairui Song
2025-12-04 19:29 ` [PATCH v4 19/19] mm, swap: remove no longer needed _swap_info_get Kairui Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251205-swap-table-p2-v4-16-cb7e28a26a40@tencent.com \
--to=ryncsn@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=baohua@kernel.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=bhe@redhat.com \
--cc=chrisl@kernel.org \
--cc=david@kernel.org \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kasong@tencent.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=nphamcs@gmail.com \
--cc=shikemeng@huaweicloud.com \
--cc=willy@infradead.org \
--cc=ying.huang@linux.alibaba.com \
--cc=yosry.ahmed@linux.dev \
--cc=youngjun.park@lge.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox