From: Sergey Senozhatsky <senozhatsky@chromium.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>,
Hillf Danton <hdanton@sina.com>, Kairui Song <ryncsn@gmail.com>,
Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
Minchan Kim <minchan@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Sergey Senozhatsky <senozhatsky@chromium.org>
Subject: [PATCH v7 10/17] zsmalloc: rename pool lock
Date: Fri, 21 Feb 2025 18:38:03 +0900 [thread overview]
Message-ID: <20250221093832.1949691-11-senozhatsky@chromium.org> (raw)
In-Reply-To: <20250221093832.1949691-1-senozhatsky@chromium.org>
The old name comes from the times when the pool did not have
compaction (defragmentation). Rename it to ->lock because these
days it synchronizes not only migration.
Reviewed-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
mm/zsmalloc.c | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 817626a351f8..1424ee73cbb5 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -18,7 +18,7 @@
/*
* lock ordering:
* page_lock
- * pool->migrate_lock
+ * pool->lock
* class->lock
* zspage->lock
*/
@@ -223,8 +223,8 @@ struct zs_pool {
#ifdef CONFIG_COMPACTION
struct work_struct free_work;
#endif
- /* protect page/zspage migration */
- rwlock_t migrate_lock;
+ /* protect zspage migration/compaction */
+ rwlock_t lock;
atomic_t compaction_in_progress;
};
@@ -1206,7 +1206,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
BUG_ON(in_interrupt());
/* It guarantees it can get zspage from handle safely */
- read_lock(&pool->migrate_lock);
+ read_lock(&pool->lock);
obj = handle_to_obj(handle);
obj_to_location(obj, &zpdesc, &obj_idx);
zspage = get_zspage(zpdesc);
@@ -1218,7 +1218,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
* which is smaller granularity.
*/
migrate_read_lock(zspage);
- read_unlock(&pool->migrate_lock);
+ read_unlock(&pool->lock);
class = zspage_class(pool, zspage);
off = offset_in_page(class->size * obj_idx);
@@ -1450,16 +1450,16 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
return;
/*
- * The pool->migrate_lock protects the race with zpage's migration
+ * The pool->lock protects the race with zpage's migration
* so it's safe to get the page from handle.
*/
- read_lock(&pool->migrate_lock);
+ read_lock(&pool->lock);
obj = handle_to_obj(handle);
obj_to_zpdesc(obj, &f_zpdesc);
zspage = get_zspage(f_zpdesc);
class = zspage_class(pool, zspage);
spin_lock(&class->lock);
- read_unlock(&pool->migrate_lock);
+ read_unlock(&pool->lock);
class_stat_sub(class, ZS_OBJS_INUSE, 1);
obj_free(class->size, obj);
@@ -1796,7 +1796,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* The pool migrate_lock protects the race between zpage migration
* and zs_free.
*/
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
class = zspage_class(pool, zspage);
/*
@@ -1833,7 +1833,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* Since we complete the data copy and set up new zspage structure,
* it's okay to release migration_lock.
*/
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
spin_unlock(&class->lock);
migrate_write_unlock(zspage);
@@ -1956,7 +1956,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
* protect the race between zpage migration and zs_free
* as well as zpage allocation/free
*/
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
spin_lock(&class->lock);
while (zs_can_compact(class)) {
int fg;
@@ -1983,14 +1983,14 @@ static unsigned long __zs_compact(struct zs_pool *pool,
src_zspage = NULL;
if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100
- || rwlock_is_contended(&pool->migrate_lock)) {
+ || rwlock_is_contended(&pool->lock)) {
putback_zspage(class, dst_zspage);
dst_zspage = NULL;
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
cond_resched();
- write_lock(&pool->migrate_lock);
+ write_lock(&pool->lock);
spin_lock(&class->lock);
}
}
@@ -2002,7 +2002,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
putback_zspage(class, dst_zspage);
spin_unlock(&class->lock);
- write_unlock(&pool->migrate_lock);
+ write_unlock(&pool->lock);
return pages_freed;
}
@@ -2014,10 +2014,10 @@ unsigned long zs_compact(struct zs_pool *pool)
unsigned long pages_freed = 0;
/*
- * Pool compaction is performed under pool->migrate_lock so it is basically
+ * Pool compaction is performed under pool->lock so it is basically
* single-threaded. Having more than one thread in __zs_compact()
- * will increase pool->migrate_lock contention, which will impact other
- * zsmalloc operations that need pool->migrate_lock.
+ * will increase pool->lock contention, which will impact other
+ * zsmalloc operations that need pool->lock.
*/
if (atomic_xchg(&pool->compaction_in_progress, 1))
return 0;
@@ -2139,7 +2139,7 @@ struct zs_pool *zs_create_pool(const char *name)
return NULL;
init_deferred_free(pool);
- rwlock_init(&pool->migrate_lock);
+ rwlock_init(&pool->lock);
atomic_set(&pool->compaction_in_progress, 0);
pool->name = kstrdup(name, GFP_KERNEL);
--
2.48.1.601.g30ceb7b040-goog
next prev parent reply other threads:[~2025-02-21 9:39 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-21 9:37 [PATCH v7 00/17] zsmalloc/zram: there be preemption Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 01/17] zram: sleepable entry locking Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 02/17] zram: permit preemption with active compression stream Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 03/17] zram: remove unused crypto include Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 04/17] zram: remove max_comp_streams device attr Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 05/17] zram: remove two-staged handle allocation Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 06/17] zram: remove writestall zram_stats member Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 07/17] zram: limit max recompress prio to num_active_comps Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 08/17] zram: filter out recomp targets based on priority Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 09/17] zram: rework recompression loop Sergey Senozhatsky
2025-02-21 9:38 ` Sergey Senozhatsky [this message]
2025-02-21 9:38 ` [PATCH v7 11/17] zsmalloc: make zspage lock preemptible Sergey Senozhatsky
2025-02-21 19:48 ` Yosry Ahmed
2025-02-21 19:52 ` Yosry Ahmed
2025-02-21 22:29 ` Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 12/17] zsmalloc: introduce new object mapping API Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 13/17] zram: switch to new zsmalloc " Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 14/17] zram: permit reclaim in zstd custom allocator Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 15/17] zram: do not leak page on recompress_store error path Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 16/17] zram: do not leak page on writeback_store " Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 17/17] zram: add might_sleep to zcomp API Sergey Senozhatsky
-- strict thread matches above, loose matches on Subject: below --
2025-02-14 4:50 [PATCH v6 00/17] zsmalloc/zram: there be preemption Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 01/17] zram: sleepable entry locking Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 02/17] zram: permit preemption with active compression stream Sergey Senozhatsky
2025-02-20 19:10 ` Yosry Ahmed
2025-02-14 4:50 ` [PATCH v6 03/17] zram: remove unused crypto include Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 04/17] zram: remove max_comp_streams device attr Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 05/17] zram: remove two-staged handle allocation Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 06/17] zram: remove writestall zram_stats member Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 07/17] zram: limit max recompress prio to num_active_comps Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 08/17] zram: filter out recomp targets based on priority Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 09/17] zram: rework recompression loop Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 10/17] zsmalloc: rename pool lock Sergey Senozhatsky
2025-02-20 19:12 ` Yosry Ahmed
2025-02-14 4:50 ` [PATCH v6 11/17] zsmalloc: make zspage lock preemptible Sergey Senozhatsky
2025-02-20 19:18 ` Yosry Ahmed
2025-02-20 19:19 ` Yosry Ahmed
2025-02-21 1:20 ` Sergey Senozhatsky
2025-02-21 21:01 ` [PATCH v7 " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 12/17] zsmalloc: introduce new object mapping API Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 13/17] zram: switch to new zsmalloc " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 14/17] zram: permit reclaim in zstd custom allocator Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 15/17] zram: do not leak page on recompress_store error path Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 16/17] zram: do not leak page on writeback_store " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 17/17] zram: add might_sleep to zcomp API Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250221093832.1949691-11-senozhatsky@chromium.org \
--to=senozhatsky@chromium.org \
--cc=akpm@linux-foundation.org \
--cc=bigeasy@linutronix.de \
--cc=hdanton@sina.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=ryncsn@gmail.com \
--cc=yosry.ahmed@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox