From: Sergey Senozhatsky <senozhatsky@chromium.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>,
Hillf Danton <hdanton@sina.com>, Kairui Song <ryncsn@gmail.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Minchan Kim <minchan@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Sergey Senozhatsky <senozhatsky@chromium.org>
Subject: [PATCH v9 02/19] zram: permit preemption with active compression stream
Date: Thu, 27 Feb 2025 13:35:20 +0900 [thread overview]
Message-ID: <20250227043618.88380-3-senozhatsky@chromium.org> (raw)
In-Reply-To: <20250227043618.88380-1-senozhatsky@chromium.org>
Currently, per-CPU stream access is done from a non-preemptible
(atomic) section, which imposes the same atomicity requirements on
compression backends as entry spin-lock, and makes it impossible
to use algorithms that can schedule/wait/sleep during compression
and decompression.
Switch to preemptible per-CPU model, similar to the one used
in zswap. Instead of a per-CPU local lock, each stream carries
a mutex which is locked throughout entire time zram uses it
for compression or decompression, so that cpu-dead event waits
for zram to stop using a particular per-CPU stream and release
it.
Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
drivers/block/zram/zcomp.c | 41 +++++++++++++++++++++++++----------
drivers/block/zram/zcomp.h | 6 ++---
drivers/block/zram/zram_drv.c | 20 ++++++++---------
3 files changed, 42 insertions(+), 25 deletions(-)
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index bb514403e305..53e4c37441be 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -6,7 +6,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
@@ -109,13 +109,29 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{
- local_lock(&comp->stream->lock);
- return this_cpu_ptr(comp->stream);
+ for (;;) {
+ struct zcomp_strm *zstrm = raw_cpu_ptr(comp->stream);
+
+ /*
+ * Inspired by zswap
+ *
+ * stream is returned with ->mutex locked which prevents
+ * cpu_dead() from releasing this stream under us, however
+ * there is still a race window between raw_cpu_ptr() and
+ * mutex_lock(), during which we could have been migrated
+ * from a CPU that has already destroyed its stream. If
+ * so then unlock and re-try on the current CPU.
+ */
+ mutex_lock(&zstrm->lock);
+ if (likely(zstrm->buffer))
+ return zstrm;
+ mutex_unlock(&zstrm->lock);
+ }
}
-void zcomp_stream_put(struct zcomp *comp)
+void zcomp_stream_put(struct zcomp_strm *zstrm)
{
- local_unlock(&comp->stream->lock);
+ mutex_unlock(&zstrm->lock);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
@@ -151,12 +167,9 @@ int zcomp_decompress(struct zcomp *comp, struct zcomp_strm *zstrm,
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
int ret;
- zstrm = per_cpu_ptr(comp->stream, cpu);
- local_lock_init(&zstrm->lock);
-
ret = zcomp_strm_init(comp, zstrm);
if (ret)
pr_err("Can't allocate a compression stream\n");
@@ -166,16 +179,17 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
- struct zcomp_strm *zstrm;
+ struct zcomp_strm *zstrm = per_cpu_ptr(comp->stream, cpu);
- zstrm = per_cpu_ptr(comp->stream, cpu);
+ mutex_lock(&zstrm->lock);
zcomp_strm_free(comp, zstrm);
+ mutex_unlock(&zstrm->lock);
return 0;
}
static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
{
- int ret;
+ int ret, cpu;
comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream)
@@ -186,6 +200,9 @@ static int zcomp_init(struct zcomp *comp, struct zcomp_params *params)
if (ret)
goto cleanup;
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(comp->stream, cpu)->lock);
+
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
if (ret < 0)
goto cleanup;
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index ad5762813842..23b8236b9090 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -3,7 +3,7 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/local_lock.h>
+#include <linux/mutex.h>
#define ZCOMP_PARAM_NO_LEVEL INT_MIN
@@ -31,7 +31,7 @@ struct zcomp_ctx {
};
struct zcomp_strm {
- local_lock_t lock;
+ struct mutex lock;
/* compression buffer */
void *buffer;
struct zcomp_ctx ctx;
@@ -77,7 +77,7 @@ struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp);
-void zcomp_stream_put(struct zcomp *comp);
+void zcomp_stream_put(struct zcomp_strm *zstrm);
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
const void *src, unsigned int *dst_len);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index ddf03f6cbeed..545e64ee6234 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1607,7 +1607,7 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
kunmap_local(dst);
zs_unmap_object(zram->mem_pool, handle);
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return ret;
}
@@ -1768,14 +1768,14 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
kunmap_local(mem);
if (unlikely(ret)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
pr_err("Compression failed! err=%d\n", ret);
zs_free(zram->mem_pool, handle);
return ret;
}
if (comp_len >= huge_class_size) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
return write_incompressible_page(zram, page, index);
}
@@ -1799,7 +1799,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
atomic64_inc(&zram->stats.writestall);
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
@@ -1811,7 +1811,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
}
if (!zram_can_store_page(zram)) {
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
@@ -1819,7 +1819,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
memcpy(dst, zstrm->buffer, comp_len);
- zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
+ zcomp_stream_put(zstrm);
zs_unmap_object(zram->mem_pool, handle);
zram_slot_lock(zram, index);
@@ -1978,7 +1978,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
kunmap_local(src);
if (ret) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return ret;
}
@@ -1988,7 +1988,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
/* Continue until we make progress */
if (class_index_new >= class_index_old ||
(threshold && comp_len_new >= threshold)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
continue;
}
@@ -2046,13 +2046,13 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
__GFP_HIGHMEM |
__GFP_MOVABLE);
if (IS_ERR_VALUE(handle_new)) {
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
return PTR_ERR((void *)handle_new);
}
dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
memcpy(dst, zstrm->buffer, comp_len_new);
- zcomp_stream_put(zram->comps[prio]);
+ zcomp_stream_put(zstrm);
zs_unmap_object(zram->mem_pool, handle_new);
--
2.48.1.658.g4767266eb4-goog
next prev parent reply other threads:[~2025-02-27 4:36 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-27 4:35 [PATCH v9 00/19] zsmalloc/zram: there be preemption Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 01/19] zram: sleepable entry locking Sergey Senozhatsky
2025-02-27 4:35 ` Sergey Senozhatsky [this message]
2025-02-27 4:35 ` [PATCH v9 03/19] zram: remove unused crypto include Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 04/19] zram: remove max_comp_streams device attr Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 05/19] zram: remove second stage of handle allocation Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 06/19] zram: no-warn about zsmalloc " Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 07/19] zram: remove writestall zram_stats member Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 08/19] zram: limit max recompress prio to num_active_comps Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 09/19] zram: filter out recomp targets based on priority Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 10/19] zram: rework recompression loop Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 11/19] zram: move post-processing target allocation Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 12/19] zsmalloc: rename pool lock Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 13/19] zsmalloc: make zspage lock preemptible Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 14/19] zsmalloc: introduce new object mapping API Sergey Senozhatsky
2025-02-27 5:48 ` Yosry Ahmed
2025-02-27 6:54 ` Sergey Senozhatsky
2025-02-27 7:09 ` Yosry Ahmed
2025-03-01 7:22 ` Herbert Xu
2025-03-03 2:42 ` Sergey Senozhatsky
2025-03-03 2:51 ` Herbert Xu
2025-02-27 4:35 ` [PATCH v9 15/19] zram: switch to new zsmalloc " Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 16/19] zram: permit reclaim in zstd custom allocator Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 17/19] zram: do not leak page on recompress_store error path Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 18/19] zram: do not leak page on writeback_store " Sergey Senozhatsky
2025-02-27 4:35 ` [PATCH v9 19/19] zram: add might_sleep to zcomp API Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250227043618.88380-3-senozhatsky@chromium.org \
--to=senozhatsky@chromium.org \
--cc=akpm@linux-foundation.org \
--cc=bigeasy@linutronix.de \
--cc=hdanton@sina.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=ryncsn@gmail.com \
--cc=yosry.ahmed@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox