From: Sergey Senozhatsky <senozhatsky@chromium.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Yosry Ahmed <yosry.ahmed@linux.dev>,
Hillf Danton <hdanton@sina.com>, Kairui Song <ryncsn@gmail.com>,
Minchan Kim <minchan@kernel.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Sergey Senozhatsky <senozhatsky@chromium.org>
Subject: [PATCH v6 11/17] zsmalloc: make zspage lock preemptible
Date: Fri, 14 Feb 2025 13:50:23 +0900 [thread overview]
Message-ID: <20250214045208.1388854-12-senozhatsky@chromium.org> (raw)
In-Reply-To: <20250214045208.1388854-1-senozhatsky@chromium.org>
In order to implement preemptible object mapping we need a zspage lock
that satisfies several preconditions:
- it should be reader-write type of a lock
- it should be possible to hold it from any context, but also being
preemptible if the context allows it
- we never sleep while acquiring but can sleep while holding in read
mode
An rwsemaphore doesn't suffice, due to atomicity requirements, rwlock
doesn't satisfy due to reader-preemptability requirement. It's also
worth to mention, that per-zspage rwsem is a little too memory heavy
(we can easily have double digits megabytes used only on rwsemaphores).
Switch over from rwlock_t to a atomic_t-based implementation of a
reader-writer semaphore that satisfies all of the preconditions.
The spin-lock based zspage_lock is suggested by Hillf Danton.
Suggested-by: Hillf Danton <hdanton@sina.com>
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
mm/zsmalloc.c | 246 +++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 192 insertions(+), 54 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2e338cde0d21..bc679a3e1718 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -226,6 +226,9 @@ struct zs_pool {
/* protect zspage migration/compaction */
rwlock_t lock;
atomic_t compaction_in_progress;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_class;
+#endif
};
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
@@ -257,6 +260,18 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
__free_page(page);
}
+#define ZS_PAGE_UNLOCKED 0
+#define ZS_PAGE_WRLOCKED -1
+
+struct zspage_lock {
+ spinlock_t lock;
+ int cnt;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -269,7 +284,7 @@ struct zspage {
struct zpdesc *first_zpdesc;
struct list_head list; /* fullness list */
struct zs_pool *pool;
- rwlock_t lock;
+ struct zspage_lock zsl;
};
struct mapping_area {
@@ -279,6 +294,148 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
};
+static void zspage_lock_init(struct zspage *zspage)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_init_map(&zspage->zsl.dep_map, "zspage->lock",
+ &zspage->pool->lock_class, 0);
+#endif
+
+ spin_lock_init(&zspage->zsl.lock);
+ zspage->zsl.cnt = ZS_PAGE_UNLOCKED;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void __read_lock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt++;
+ spin_unlock(&zsl->lock);
+
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+}
+
+static inline void __read_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt--;
+ spin_unlock(&zsl->lock);
+}
+
+static inline bool __write_trylock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ if (zsl->cnt == ZS_PAGE_UNLOCKED) {
+ zsl->cnt = ZS_PAGE_WRLOCKED;
+ rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_);
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+ return true;
+ }
+
+ lock_contended(&zsl->dep_map, _RET_IP_);
+ spin_unlock(&zsl->lock);
+ return false;
+}
+
+static inline void __write_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+ spin_unlock(&zsl->lock);
+}
+#else
+static inline void __read_lock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ zsl->cnt++;
+ spin_unlock(&zsl->lock);
+}
+
+static inline void __read_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ zsl->cnt--;
+ spin_unlock(&zsl->lock);
+}
+
+static inline bool __write_trylock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ if (zsl->cnt == ZS_PAGE_UNLOCKED) {
+ zsl->cnt = ZS_PAGE_WRLOCKED;
+ return true;
+ }
+
+ spin_unlock(&zsl->lock);
+ return false;
+}
+
+static inline void __write_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+ spin_unlock(&zsl->lock);
+}
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+/*
+ * The zspage lock can be held from atomic contexts, but it needs to remain
+ * preemptible when held for reading because it remains held outside of those
+ * atomic contexts, otherwise we unnecessarily lose preemptibility.
+ *
+ * To achieve this, the following rules are enforced on readers and writers:
+ *
+ * - Writers are blocked by both writers and readers, while readers are only
+ * blocked by writers (i.e. normal rwlock semantics).
+ *
+ * - Writers are always atomic (to allow readers to spin waiting for them).
+ *
+ * - Writers always use trylock (as the lock may be held be sleeping readers).
+ *
+ * - Readers may spin on the lock (as they can only wait for atomic writers).
+ *
+ * - Readers may sleep while holding the lock (as writes only use trylock).
+ */
+static void zspage_read_lock(struct zspage *zspage)
+{
+ return __read_lock(zspage);
+}
+
+static void zspage_read_unlock(struct zspage *zspage)
+{
+ return __read_unlock(zspage);
+}
+
+static __must_check bool zspage_write_trylock(struct zspage *zspage)
+{
+ return __write_trylock(zspage);
+}
+
+static void zspage_write_unlock(struct zspage *zspage)
+{
+ return __write_unlock(zspage);
+}
+
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetZsHugePage(struct zspage *zspage)
{
@@ -290,12 +447,6 @@ static bool ZsHugePage(struct zspage *zspage)
return zspage->huge;
}
-static void migrate_lock_init(struct zspage *zspage);
-static void migrate_read_lock(struct zspage *zspage);
-static void migrate_read_unlock(struct zspage *zspage);
-static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_unlock(struct zspage *zspage);
-
#ifdef CONFIG_COMPACTION
static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool);
@@ -992,7 +1143,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
return NULL;
zspage->magic = ZSPAGE_MAGIC;
- migrate_lock_init(zspage);
+ zspage->pool = pool;
+ zspage->class = class->index;
+ zspage_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
struct zpdesc *zpdesc;
@@ -1015,8 +1168,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
- zspage->pool = pool;
- zspage->class = class->index;
return zspage;
}
@@ -1217,7 +1368,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
* zs_unmap_object API so delegate the locking from class to zspage
* which is smaller granularity.
*/
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
read_unlock(&pool->lock);
class = zspage_class(pool, zspage);
@@ -1277,7 +1428,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
}
local_unlock(&zs_map_area.lock);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
@@ -1671,18 +1822,18 @@ static void lock_zspage(struct zspage *zspage)
/*
* Pages we haven't locked yet can be migrated off the list while we're
* trying to lock them, so we need to be careful and only attempt to
- * lock each page under migrate_read_lock(). Otherwise, the page we lock
+ * lock each page under zspage_read_lock(). Otherwise, the page we lock
* may no longer belong to the zspage. This means that we may wait for
* the wrong page to unlock, so we must take a reference to the page
- * prior to waiting for it to unlock outside migrate_read_lock().
+ * prior to waiting for it to unlock outside zspage_read_lock().
*/
while (1) {
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
zpdesc = get_first_zpdesc(zspage);
if (zpdesc_trylock(zpdesc))
break;
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
}
@@ -1693,41 +1844,16 @@ static void lock_zspage(struct zspage *zspage)
curr_zpdesc = zpdesc;
} else {
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
}
}
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
#endif /* CONFIG_COMPACTION */
-static void migrate_lock_init(struct zspage *zspage)
-{
- rwlock_init(&zspage->lock);
-}
-
-static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
-{
- read_lock(&zspage->lock);
-}
-
-static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
-{
- read_unlock(&zspage->lock);
-}
-
-static void migrate_write_lock(struct zspage *zspage)
-{
- write_lock(&zspage->lock);
-}
-
-static void migrate_write_unlock(struct zspage *zspage)
-{
- write_unlock(&zspage->lock);
-}
-
#ifdef CONFIG_COMPACTION
static const struct movable_operations zsmalloc_mops;
@@ -1769,7 +1895,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
}
static int zs_page_migrate(struct page *newpage, struct page *page,
- enum migrate_mode mode)
+ enum migrate_mode mode)
{
struct zs_pool *pool;
struct size_class *class;
@@ -1785,9 +1911,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
- /* We're committed, tell the world that this is a Zsmalloc page. */
- __zpdesc_set_zsmalloc(newzpdesc);
-
/* The page is locked, so this pointer must remain valid */
zspage = get_zspage(zpdesc);
pool = zspage->pool;
@@ -1803,8 +1926,15 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* the class lock protects zpage alloc/free in the zspage.
*/
spin_lock(&class->lock);
- /* the migrate_write_lock protects zpage access via zs_map_object */
- migrate_write_lock(zspage);
+ /* the zspage write_lock protects zpage access via zs_map_object */
+ if (!zspage_write_trylock(zspage)) {
+ spin_unlock(&class->lock);
+ write_unlock(&pool->lock);
+ return -EINVAL;
+ }
+
+ /* We're committed, tell the world that this is a Zsmalloc page. */
+ __zpdesc_set_zsmalloc(newzpdesc);
offset = get_first_obj_offset(zpdesc);
s_addr = kmap_local_zpdesc(zpdesc);
@@ -1835,7 +1965,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
*/
write_unlock(&pool->lock);
spin_unlock(&class->lock);
- migrate_write_unlock(zspage);
+ zspage_write_unlock(zspage);
zpdesc_get(newzpdesc);
if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
@@ -1971,9 +2101,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (!src_zspage)
break;
- migrate_write_lock(src_zspage);
+ if (!zspage_write_trylock(src_zspage))
+ break;
+
migrate_zspage(pool, src_zspage, dst_zspage);
- migrate_write_unlock(src_zspage);
+ zspage_write_unlock(src_zspage);
fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
@@ -2233,7 +2365,9 @@ struct zs_pool *zs_create_pool(const char *name)
* trigger compaction manually. Thus, ignore return code.
*/
zs_register_shrinker(pool);
-
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_register_key(&pool->lock_class);
+#endif
return pool;
err:
@@ -2270,6 +2404,10 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_unregister_key(&pool->lock_class);
+#endif
+
destroy_cache(pool);
kfree(pool->name);
kfree(pool);
--
2.48.1.601.g30ceb7b040-goog
next prev parent reply other threads:[~2025-02-14 4:53 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-14 4:50 [PATCH v6 00/17] zsmalloc/zram: there be preemption Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 01/17] zram: sleepable entry locking Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 02/17] zram: permit preemption with active compression stream Sergey Senozhatsky
2025-02-20 19:10 ` Yosry Ahmed
2025-02-14 4:50 ` [PATCH v6 03/17] zram: remove unused crypto include Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 04/17] zram: remove max_comp_streams device attr Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 05/17] zram: remove two-staged handle allocation Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 06/17] zram: remove writestall zram_stats member Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 07/17] zram: limit max recompress prio to num_active_comps Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 08/17] zram: filter out recomp targets based on priority Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 09/17] zram: rework recompression loop Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 10/17] zsmalloc: rename pool lock Sergey Senozhatsky
2025-02-20 19:12 ` Yosry Ahmed
2025-02-14 4:50 ` Sergey Senozhatsky [this message]
2025-02-20 19:18 ` [PATCH v6 11/17] zsmalloc: make zspage lock preemptible Yosry Ahmed
2025-02-20 19:19 ` Yosry Ahmed
2025-02-21 1:20 ` Sergey Senozhatsky
2025-02-21 21:01 ` [PATCH v7 " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 12/17] zsmalloc: introduce new object mapping API Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 13/17] zram: switch to new zsmalloc " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 14/17] zram: permit reclaim in zstd custom allocator Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 15/17] zram: do not leak page on recompress_store error path Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 16/17] zram: do not leak page on writeback_store " Sergey Senozhatsky
2025-02-14 4:50 ` [PATCH v6 17/17] zram: add might_sleep to zcomp API Sergey Senozhatsky
2025-02-21 9:37 [PATCH v7 00/17] zsmalloc/zram: there be preemption Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 01/17] zram: sleepable entry locking Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 02/17] zram: permit preemption with active compression stream Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 03/17] zram: remove unused crypto include Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 04/17] zram: remove max_comp_streams device attr Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 05/17] zram: remove two-staged handle allocation Sergey Senozhatsky
2025-02-21 9:37 ` [PATCH v7 06/17] zram: remove writestall zram_stats member Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 07/17] zram: limit max recompress prio to num_active_comps Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 08/17] zram: filter out recomp targets based on priority Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 09/17] zram: rework recompression loop Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 10/17] zsmalloc: rename pool lock Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 11/17] zsmalloc: make zspage lock preemptible Sergey Senozhatsky
2025-02-21 19:48 ` Yosry Ahmed
2025-02-21 19:52 ` Yosry Ahmed
2025-02-21 22:29 ` Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 12/17] zsmalloc: introduce new object mapping API Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 13/17] zram: switch to new zsmalloc " Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 14/17] zram: permit reclaim in zstd custom allocator Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 15/17] zram: do not leak page on recompress_store error path Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 16/17] zram: do not leak page on writeback_store " Sergey Senozhatsky
2025-02-21 9:38 ` [PATCH v7 17/17] zram: add might_sleep to zcomp API Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250214045208.1388854-12-senozhatsky@chromium.org \
--to=senozhatsky@chromium.org \
--cc=akpm@linux-foundation.org \
--cc=hdanton@sina.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=ryncsn@gmail.com \
--cc=yosry.ahmed@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox