From: Sergey Senozhatsky <senozhatsky@chromium.org>
To: Andrew Morton <akpm@linux-foundation.org>,
Minchan Kim <minchan@kernel.org>,
Yuwen Chen <ywen.chen@foxmail.com>,
Richard Chang <richardycc@google.com>
Cc: Brian Geffon <bgeffon@google.com>,
Fengyu Lian <licayy@outlook.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-block@vger.kernel.org,
Sergey Senozhatsky <senozhatsky@chromium.org>
Subject: [PATCHv6 2/6] zram: add writeback batch size device attr
Date: Sat, 22 Nov 2025 16:40:25 +0900 [thread overview]
Message-ID: <20251122074029.3948921-3-senozhatsky@chromium.org> (raw)
In-Reply-To: <20251122074029.3948921-1-senozhatsky@chromium.org>
Introduce writeback_batch_size device attribute so that
the maximum number of in-flight writeback bio requests
can be configured at run-time per-device. This essentially
enables batched bio writeback.
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
drivers/block/zram/zram_drv.c | 46 ++++++++++++++++++++++++++++++-----
drivers/block/zram/zram_drv.h | 1 +
2 files changed, 41 insertions(+), 6 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 06ea56f0a00f..5906ba061165 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -590,6 +590,40 @@ static ssize_t writeback_limit_show(struct device *dev,
return sysfs_emit(buf, "%llu\n", val);
}
+static ssize_t writeback_batch_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u32 val;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->wb_batch_size = val;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->wb_batch_size;
+ up_read(&zram->init_lock);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
static void reset_bdev(struct zram *zram)
{
if (!zram->backing_dev)
@@ -781,10 +815,7 @@ static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
kfree(wb_ctl);
}
-/* XXX: should be a per-device sysfs attr */
-#define ZRAM_WB_REQ_CNT 32
-
-static struct zram_wb_ctl *init_wb_ctl(void)
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
{
struct zram_wb_ctl *wb_ctl;
int i;
@@ -799,7 +830,7 @@ static struct zram_wb_ctl *init_wb_ctl(void)
init_waitqueue_head(&wb_ctl->done_wait);
spin_lock_init(&wb_ctl->done_lock);
- for (i = 0; i < ZRAM_WB_REQ_CNT; i++) {
+ for (i = 0; i < zram->wb_batch_size; i++) {
struct zram_wb_req *req;
/*
@@ -1200,7 +1231,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- wb_ctl = init_wb_ctl();
+ wb_ctl = init_wb_ctl(zram);
if (!wb_ctl) {
ret = -ENOMEM;
goto release_init_lock;
@@ -2843,6 +2874,7 @@ static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
@@ -2864,6 +2896,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
+ &dev_attr_writeback_batch_size.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -2925,6 +2958,7 @@ static int zram_add(void)
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
+ zram->wb_batch_size = 32;
spin_lock_init(&zram->wb_limit_lock);
#endif
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6cee93f9c0d0..1a647f42c1a4 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -129,6 +129,7 @@ struct zram {
struct file *backing_dev;
spinlock_t wb_limit_lock;
bool wb_limit_enable;
+ u32 wb_batch_size;
u64 bd_wb_limit;
struct block_device *bdev;
unsigned long *bitmap;
--
2.52.0.460.gd25c4c69ec-goog
next prev parent reply other threads:[~2025-11-22 7:40 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-22 7:40 [PATCHv6 0/6] zram: introduce writeback bio batching Sergey Senozhatsky
2025-11-22 7:40 ` [PATCHv6 1/6] " Sergey Senozhatsky
2025-11-22 7:40 ` Sergey Senozhatsky [this message]
2025-11-24 15:50 ` [PATCHv6 2/6] zram: add writeback batch size device attr Brian Geffon
2025-11-22 7:40 ` [PATCHv6 3/6] zram: take write lock in wb limit store handlers Sergey Senozhatsky
2025-11-22 7:40 ` [PATCHv6 4/6] zram: drop wb_limit_lock Sergey Senozhatsky
2025-11-22 7:40 ` [PATCHv6 5/6] zram: rework bdev block allocation Sergey Senozhatsky
2025-11-22 7:40 ` [PATCHv6 6/6] zram: read slot block idx under slot lock Sergey Senozhatsky
2025-11-22 21:54 ` [PATCHv6 0/6] zram: introduce writeback bio batching Andrew Morton
2025-11-22 23:49 ` Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251122074029.3948921-3-senozhatsky@chromium.org \
--to=senozhatsky@chromium.org \
--cc=akpm@linux-foundation.org \
--cc=bgeffon@google.com \
--cc=licayy@outlook.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=richardycc@google.com \
--cc=ywen.chen@foxmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox