From: Matt Fleming <matt@readmodwrite.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Jens Axboe <axboe@kernel.dk>, Minchan Kim <minchan@kernel.org>,
Sergey Senozhatsky <senozhatsky@chromium.org>,
Chris Li <chrisl@kernel.org>, Kairui Song <kasong@tencent.com>,
Kemeng Shi <shikemeng@huaweicloud.com>,
Nhat Pham <nphamcs@gmail.com>, Baoquan He <bhe@redhat.com>,
Barry Song <baohua@kernel.org>,
Vlastimil Babka <vbabka@kernel.org>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>,
Brendan Jackman <jackmanb@google.com>,
Johannes Weiner <hannes@cmpxchg.org>, Zi Yan <ziy@nvidia.com>,
linux-block@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-mm@kvack.org, kernel-team@cloudflare.com,
Matt Fleming <mfleming@cloudflare.com>
Subject: [RFC PATCH 1/1] mm: Reduce direct reclaim stalls with RAM-backed swap
Date: Tue, 3 Mar 2026 11:53:58 +0000 [thread overview]
Message-ID: <20260303115358.1323188-2-matt@readmodwrite.com> (raw)
In-Reply-To: <20260303115358.1323188-1-matt@readmodwrite.com>
From: Matt Fleming <mfleming@cloudflare.com>
The current should_reclaim_retry() code does not account for the fact
the number of logical swap pages available for RAM-backed swap (zram,
brd) is dependent on having enough free physical pages, and simply
always assumes that enough pages are reclaimable to satisfy the
allocation.
For instance, given a system with a 200GiB zram device (10% used) and
100MB of free physical pages, should_reclaim_retry() incorrectly
concludes that it can swap 180GiB worth of anon pages to swap.
Because it appears to be always possible to write to swap, the OOM
killer is delayed and the system retries in direct reclaim for
prolonged periods (20-30 minutes observed in production).
Fix this by excluding anon pages from the reclaimable estimate when all
active swap devices are RAM-backed. Once file-backed pages are exhausted
the watermark check fails and the kernel falls through to OOM as
expected.
To identify RAM-backed swap devices at swapon time, introduce
BLK_FEAT_RAM_BACKED (set by zram and brd) and SWP_RAM_BACKED
(swapfile.c). A cached bool swap_all_ram_backed is maintained under
swap_lock by swap_update_all_ram_backed() during swapon/swapoff, which
is locklessly accessed in should_reclaim_retry().
Signed-off-by: Matt Fleming <mfleming@cloudflare.com>
---
drivers/block/brd.c | 3 ++-
drivers/block/zram/zram_drv.c | 3 ++-
include/linux/blkdev.h | 8 ++++++
include/linux/swap.h | 9 +++++++
mm/page_alloc.c | 23 ++++++++++++++++-
mm/swapfile.c | 47 ++++++++++++++++++++++++++++++++++-
6 files changed, 89 insertions(+), 4 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 00cc8122068f..c021dd51ff0a 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -310,7 +310,8 @@ static int brd_alloc(int i)
.max_discard_segments = 1,
.discard_granularity = PAGE_SIZE,
.features = BLK_FEAT_SYNCHRONOUS |
- BLK_FEAT_NOWAIT,
+ BLK_FEAT_NOWAIT |
+ BLK_FEAT_RAM_BACKED,
};
brd = brd_find_or_alloc_device(i);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index bca33403fc8b..8075bab39e62 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -3074,7 +3074,8 @@ static int zram_add(void)
.max_write_zeroes_sectors = UINT_MAX,
#endif
.features = BLK_FEAT_STABLE_WRITES |
- BLK_FEAT_SYNCHRONOUS,
+ BLK_FEAT_SYNCHRONOUS |
+ BLK_FEAT_RAM_BACKED,
};
struct zram *zram;
int ret, device_id;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d463b9b5a0a5..3666837e8774 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -334,6 +334,9 @@ typedef unsigned int __bitwise blk_features_t;
/* is a zoned device */
#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
+/* storage is backed by system RAM (e.g. zram, brd) */
+#define BLK_FEAT_RAM_BACKED ((__force blk_features_t)(1u << 11))
+
/* supports PCI(e) p2p requests */
#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
@@ -1477,6 +1480,11 @@ static inline bool bdev_synchronous(struct block_device *bdev)
return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
+static inline bool bdev_ram_backed(struct block_device *bdev)
+{
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_RAM_BACKED;
+}
+
static inline bool bdev_stable_writes(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 62fc7499b408..844727fe929c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -216,6 +216,7 @@ enum {
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
+ SWP_RAM_BACKED = (1 << 13), /* swap device uses main memory (e.g. zram) */
/* add others here before... */
};
@@ -451,6 +452,11 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
+extern bool swap_all_ram_backed;
+static inline bool swap_is_all_ram_backed(void)
+{
+ return READ_ONCE(swap_all_ram_backed);
+}
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
@@ -508,6 +514,9 @@ static inline void put_swap_device(struct swap_info_struct *si)
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
+
+static inline bool swap_is_all_ram_backed(void) { return false; }
+
#define free_folio_and_swap_cache(folio) \
folio_put(folio)
#define free_pages_and_swap_cache(pages, nr) \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d4b6f1a554e..c1a8f4620baa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -37,6 +37,7 @@
#include <linux/vmstat.h>
#include <linux/fault-inject.h>
#include <linux/compaction.h>
+#include <linux/swap.h>
#include <trace/events/kmem.h>
#include <trace/events/oom.h>
#include <linux/prefetch.h>
@@ -4604,6 +4605,7 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
struct zone *zone;
struct zoneref *z;
bool ret = false;
+ bool ram_backed_swap = swap_is_all_ram_backed();
/*
* Costly allocations might have made a progress but this doesn't mean
@@ -4637,7 +4639,26 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
!__cpuset_zone_allowed(zone, gfp_mask))
continue;
- available = reclaimable = zone_reclaimable_pages(zone);
+ if (ram_backed_swap) {
+ /*
+ * Exclude anon pages when all swap is RAM-backed.
+ * The reclaimable estimate assumes anon can be
+ * reclaimed using free swap slots, but those slots
+ * are only logical accounting for zram: storing the
+ * swapped data still consumes physical pages. Free
+ * RAM is the real limit, so counting anon inflates
+ * 'available', keeps the watermark check passing,
+ * and delays falling through to OOM.
+ */
+ reclaimable =
+ zone_page_state_snapshot(zone,
+ NR_ZONE_INACTIVE_FILE) +
+ zone_page_state_snapshot(zone,
+ NR_ZONE_ACTIVE_FILE);
+ } else {
+ reclaimable = zone_reclaimable_pages(zone);
+ }
+ available = reclaimable;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 94af29d1de88..18713618f35c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -64,6 +64,7 @@ static bool folio_swapcache_freeable(struct folio *folio);
static void move_cluster(struct swap_info_struct *si,
struct swap_cluster_info *ci, struct list_head *list,
enum swap_cluster_flags new_flags);
+static void swap_update_all_ram_backed(void);
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -74,8 +75,15 @@ atomic_long_t nr_swap_pages;
* check to see if any swap space is available.
*/
EXPORT_SYMBOL_GPL(nr_swap_pages);
-/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
+
+/*
+ * Updates to these globals are serialized by swap_lock.
+ * Read locklessly in vm_swap_full() (total_swap_pages) and
+ * should_reclaim_retry() (swap_all_ram_backed).
+ */
long total_swap_pages;
+bool swap_all_ram_backed;
+
#define DEF_SWAP_PRIO -1
unsigned long swapfile_maximum_size;
#ifdef CONFIG_MIGRATION
@@ -2670,6 +2678,8 @@ static void _enable_swap_info(struct swap_info_struct *si)
plist_add(&si->list, &swap_active_head);
+ swap_update_all_ram_backed();
+
/* Add back to available list */
add_to_avail_list(si, true);
}
@@ -2813,6 +2823,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_lock(&p->lock);
del_from_avail_list(p, true);
plist_del(&p->list, &swap_active_head);
+ swap_update_all_ram_backed();
atomic_long_sub(p->pages, &nr_swap_pages);
total_swap_pages -= p->pages;
spin_unlock(&p->lock);
@@ -3460,6 +3471,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
if (si->bdev && bdev_synchronous(si->bdev))
si->flags |= SWP_SYNCHRONOUS_IO;
+ if (si->bdev && bdev_ram_backed(si->bdev))
+ si->flags |= SWP_RAM_BACKED;
+
if (si->bdev && bdev_nonrot(si->bdev)) {
si->flags |= SWP_SOLIDSTATE;
} else {
@@ -3587,6 +3601,37 @@ void si_swapinfo(struct sysinfo *val)
spin_unlock(&swap_lock);
}
+/*
+ * Recompute swap_all_ram_backed. Must be called with swap_lock held
+ * whenever a swap device is added to or removed from swap_active_head.
+ *
+ * swap_all_ram_backed is true when every active swap device is backed
+ * by main memory (e.g. zram, brd). False if there are no swap devices
+ * configured or at least one of them is backed by disk.
+ *
+ * With RAM-backed swap, swapping out an anonymous page does not yield
+ * net free pages because the driver must allocate physical RAM to
+ * store the compressed data.
+ *
+ * See should_reclaim_retry().
+ */
+static void swap_update_all_ram_backed(void)
+{
+ struct swap_info_struct *si;
+ bool all_ram = !plist_head_empty(&swap_active_head);
+
+ assert_spin_locked(&swap_lock);
+
+ plist_for_each_entry(si, &swap_active_head, list) {
+ if (!(si->flags & SWP_RAM_BACKED)) {
+ all_ram = false;
+ break;
+ }
+ }
+
+ WRITE_ONCE(swap_all_ram_backed, all_ram);
+}
+
/*
* Verify that nr swap entries are valid and increment their swap map counts.
*
--
2.43.0
next prev parent reply other threads:[~2026-03-03 11:54 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-03 11:53 [RFC PATCH 0/1] " Matt Fleming
2026-03-03 11:53 ` Matt Fleming [this message]
2026-03-03 14:10 ` [RFC PATCH 1/1] " Christoph Hellwig
2026-03-03 14:59 ` [RFC PATCH 0/1] " Shakeel Butt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260303115358.1323188-2-matt@readmodwrite.com \
--to=matt@readmodwrite.com \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=baohua@kernel.org \
--cc=bhe@redhat.com \
--cc=chrisl@kernel.org \
--cc=hannes@cmpxchg.org \
--cc=jackmanb@google.com \
--cc=kasong@tencent.com \
--cc=kernel-team@cloudflare.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mfleming@cloudflare.com \
--cc=mhocko@suse.com \
--cc=minchan@kernel.org \
--cc=nphamcs@gmail.com \
--cc=senozhatsky@chromium.org \
--cc=shikemeng@huaweicloud.com \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox