From: Baoquan He <bhe@redhat.com>
To: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: akpm@linux-foundation.org, kasong@tencent.com,
tim.c.chen@linux.intel.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH v3 6/8] mm: swap: free each cluster individually in swap_entries_put_map_nr()
Date: Mon, 24 Mar 2025 08:52:16 +0800 [thread overview]
Message-ID: <Z+CswNiG/QVPcYB1@MiWiFi-R3L-srv> (raw)
In-Reply-To: <20250320114829.25751-7-shikemeng@huaweicloud.com>
On 03/20/25 at 07:48pm, Kemeng Shi wrote:
> 1. Factor out general swap_entries_put_map() helper to drop entries belong
~~~~~
s/belong/belonging/
> to one cluster. If entries are last map, free entries in batch, otherwise
> put entries with cluster lock acquired and released only once.
> 2. Iterate and call swap_entries_put_map() for each cluster in
> swap_entries_put_nr() to leverage batch-remove for last map belong to one
~~~~~
ditto
> cluster and reduce lock acquire/release in fallback case.
> 3. As swap_entries_put_nr() won't handle SWAP_HSA_CACHE drop, rename it to
> swap_entries_put_map_nr().
> 4. As we won't drop each entry invidually with swap_entry_put() now, do
> reclaim in free_swap_and_cache_nr() is because swap_entries_put_map_nr()
~~~ remove 'is' ?
> is general routine to drop reference and the relcaim work should only be
> done in free_swap_and_cache_nr(). Remove stale comment accordingly.
>
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
> mm/swapfile.c | 70 +++++++++++++++++++++++----------------------------
> 1 file changed, 32 insertions(+), 38 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 6f11619665e8..646efccdd2ec 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -1455,25 +1455,10 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry)
> return NULL;
> }
>
> -static unsigned char swap_entry_put(struct swap_info_struct *si,
> - swp_entry_t entry)
> -{
> - struct swap_cluster_info *ci;
> - unsigned long offset = swp_offset(entry);
> - unsigned char usage;
> -
> - ci = lock_cluster(si, offset);
> - usage = swap_entry_put_locked(si, ci, entry, 1);
> - unlock_cluster(ci);
> -
> - return usage;
> -}
> -
> -static bool swap_entries_put_nr(struct swap_info_struct *si,
> - swp_entry_t entry, int nr)
> +static bool swap_entries_put_map(struct swap_info_struct *si,
> + swp_entry_t entry, int nr)
> {
> unsigned long offset = swp_offset(entry);
> - unsigned int type = swp_type(entry);
> struct swap_cluster_info *ci;
> bool has_cache = false;
> unsigned char count;
> @@ -1484,14 +1469,10 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
> count = swap_count(data_race(si->swap_map[offset]));
> if (count != 1 && count != SWAP_MAP_SHMEM)
> goto fallback;
> - /* cross into another cluster */
> - if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
> - goto fallback;
>
> ci = lock_cluster(si, offset);
> if (!swap_is_last_map(si, offset, nr, &has_cache)) {
> - unlock_cluster(ci);
> - goto fallback;
> + goto locked_fallback;
> }
> if (!has_cache)
> swap_entries_free(si, ci, entry, nr);
> @@ -1503,15 +1484,34 @@ static bool swap_entries_put_nr(struct swap_info_struct *si,
> return has_cache;
>
> fallback:
> - for (i = 0; i < nr; i++) {
> - if (data_race(si->swap_map[offset + i])) {
> - count = swap_entry_put(si, swp_entry(type, offset + i));
> - if (count == SWAP_HAS_CACHE)
> - has_cache = true;
> - } else {
> - WARN_ON_ONCE(1);
> - }
> + ci = lock_cluster(si, offset);
> +locked_fallback:
> + for (i = 0; i < nr; i++, entry.val++) {
> + count = swap_entry_put_locked(si, ci, entry, 1);
> + if (count == SWAP_HAS_CACHE)
> + has_cache = true;
> + }
> + unlock_cluster(ci);
> + return has_cache;
> +
> +}
> +
> +static bool swap_entries_put_map_nr(struct swap_info_struct *si,
> + swp_entry_t entry, int nr)
> +{
> + int cluster_nr, cluster_rest;
> + unsigned long offset = swp_offset(entry);
> + bool has_cache = false;
> +
> + cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER;
> + while (nr) {
> + cluster_nr = min(nr, cluster_rest);
> + has_cache |= swap_entries_put_map(si, entry, cluster_nr);
> + cluster_rest = SWAPFILE_CLUSTER;
> + nr -= cluster_nr;
> + entry.val += cluster_nr;
> }
> +
> return has_cache;
> }
>
> @@ -1806,7 +1806,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> /*
> * First free all entries in the range.
> */
> - any_only_cache = swap_entries_put_nr(si, entry, nr);
> + any_only_cache = swap_entries_put_map_nr(si, entry, nr);
>
> /*
> * Short-circuit the below loop if none of the entries had their
> @@ -1816,13 +1816,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
> goto out;
>
> /*
> - * Now go back over the range trying to reclaim the swap cache. This is
> - * more efficient for large folios because we will only try to reclaim
> - * the swap once per folio in the common case. If we do
> - * swap_entry_put() and __try_to_reclaim_swap() in the same loop, the
> - * latter will get a reference and lock the folio for every individual
> - * page but will only succeed once the swap slot for every subpage is
> - * zero.
> + * Now go back over the range trying to reclaim the swap cache.
> */
> for (offset = start_offset; offset < end_offset; offset += nr) {
> nr = 1;
> --
> 2.30.0
>
>
next prev parent reply other threads:[~2025-03-24 0:52 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-20 11:48 [PATCH v3 0/8] Minor cleanups and improvements to swap freeing code Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 1/8] mm: swap: rename __swap_[entry/entries]_free[_locked] to swap_[entry/entries]_put[_locked] Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 2/8] mm: swap: enable swap_entry_range_free() to drop any kind of last ref Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 3/8] mm: swap: use swap_entries_free() to free swap entry in swap_entry_put_locked() Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 4/8] mm: swap: use swap_entries_free() drop last ref count in swap_entries_put_nr() Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 5/8] mm: swap: drop last SWAP_MAP_SHMEM flag in batch " Kemeng Shi
2025-03-20 11:48 ` [PATCH v3 6/8] mm: swap: free each cluster individually in swap_entries_put_map_nr() Kemeng Shi
2025-03-24 0:52 ` Baoquan He [this message]
2025-03-20 11:48 ` [PATCH v3 7/8] mm: swap: factor out helper to drop cache of entries within a single cluster Kemeng Shi
2025-03-24 1:07 ` Baoquan He
2025-03-20 11:48 ` [PATCH v3 8/8] mm: swap: replace cluster_swap_free_nr() with swap_entries_put_[map/cache]() Kemeng Shi
2025-03-24 9:43 ` [PATCH v3 0/8] Minor cleanups and improvements to swap freeing code Baoquan He
2025-03-25 6:45 ` Kemeng Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Z+CswNiG/QVPcYB1@MiWiFi-R3L-srv \
--to=bhe@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=kasong@tencent.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=shikemeng@huaweicloud.com \
--cc=tim.c.chen@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox