From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
To: Minchan Kim <minchan@kernel.org>,
Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
linux-mm@kvack.org, Matthew Wilcox <willy@infradead.org>,
Vishal Moola <vishal.moola@gmail.com>,
Alex Shi <seakeel@gmail.com>, Hyeonggon Yoo <42.hyeyoo@gmail.com>,
Alex Shi <alexs@kernel.org>
Subject: [PATCH v9 mm-unstable 02/18] mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage()
Date: Tue, 17 Dec 2024 00:04:33 +0900 [thread overview]
Message-ID: <20241216150450.1228021-3-42.hyeyoo@gmail.com> (raw)
In-Reply-To: <20241216150450.1228021-1-42.hyeyoo@gmail.com>
From: Alex Shi <alexs@kernel.org>
Convert trylock_zspage() and lock_zspage() to use zpdesc. To achieve
that, introduce a couple of helper functions:
- zpdesc_lock()
- zpdesc_unlock()
- zpdesc_trylock()
- zpdesc_wait_locked()
- zpdesc_get()
- zpdesc_put()
Here we use the folio version of functions for 2 reasons.
First, zswap.zpool currently only uses order-0 pages and using folio
could save some compound_head checks. Second, folio_put could bypass
devmap checking that we don't need.
BTW, thanks Intel LKP found a build warning on the patch.
Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
mm/zpdesc.h | 30 ++++++++++++++++++++++++
mm/zsmalloc.c | 64 ++++++++++++++++++++++++++++++++++-----------------
2 files changed, 73 insertions(+), 21 deletions(-)
diff --git a/mm/zpdesc.h b/mm/zpdesc.h
index e0852498aecf..c866758feec3 100644
--- a/mm/zpdesc.h
+++ b/mm/zpdesc.h
@@ -104,4 +104,34 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page));
const struct page *: (const struct zpdesc *)(p), \
struct page *: (struct zpdesc *)(p)))
+static inline void zpdesc_lock(struct zpdesc *zpdesc)
+{
+ folio_lock(zpdesc_folio(zpdesc));
+}
+
+static inline bool zpdesc_trylock(struct zpdesc *zpdesc)
+{
+ return folio_trylock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_unlock(struct zpdesc *zpdesc)
+{
+ folio_unlock(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_wait_locked(struct zpdesc *zpdesc)
+{
+ folio_wait_locked(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_get(struct zpdesc *zpdesc)
+{
+ folio_get(zpdesc_folio(zpdesc));
+}
+
+static inline void zpdesc_put(struct zpdesc *zpdesc)
+{
+ folio_put(zpdesc_folio(zpdesc));
+}
+
#endif
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 00d111f011be..51f4a9b78023 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -428,13 +428,17 @@ static __maybe_unused int is_first_page(struct page *page)
return PagePrivate(page);
}
+static inline bool is_first_zpdesc(struct zpdesc *zpdesc)
+{
+ return PagePrivate(zpdesc_page(zpdesc));
+}
+
/* Protected by class->lock */
static inline int get_zspage_inuse(struct zspage *zspage)
{
return zspage->inuse;
}
-
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
{
zspage->inuse += val;
@@ -448,6 +452,14 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
+static struct zpdesc *get_first_zpdesc(struct zspage *zspage)
+{
+ struct zpdesc *first_zpdesc = zspage->first_zpdesc;
+
+ VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc));
+ return first_zpdesc;
+}
+
#define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff
static inline unsigned int get_first_obj_offset(struct page *page)
@@ -734,6 +746,16 @@ static struct page *get_next_page(struct page *page)
return (struct page *)page->index;
}
+static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc)
+{
+ struct zspage *zspage = get_zspage(zpdesc_page(zpdesc));
+
+ if (unlikely(ZsHugePage(zspage)))
+ return NULL;
+
+ return zpdesc->next;
+}
+
/**
* obj_to_location - get (<page>, <obj_idx>) from encoded object value
* @obj: the encoded object value
@@ -803,11 +825,11 @@ static void reset_page(struct page *page)
static int trylock_zspage(struct zspage *zspage)
{
- struct page *cursor, *fail;
+ struct zpdesc *cursor, *fail;
- for (cursor = get_first_page(zspage); cursor != NULL; cursor =
- get_next_page(cursor)) {
- if (!trylock_page(cursor)) {
+ for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor =
+ get_next_zpdesc(cursor)) {
+ if (!zpdesc_trylock(cursor)) {
fail = cursor;
goto unlock;
}
@@ -815,9 +837,9 @@ static int trylock_zspage(struct zspage *zspage)
return 1;
unlock:
- for (cursor = get_first_page(zspage); cursor != fail; cursor =
- get_next_page(cursor))
- unlock_page(cursor);
+ for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor =
+ get_next_zpdesc(cursor))
+ zpdesc_unlock(cursor);
return 0;
}
@@ -1635,7 +1657,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
*/
static void lock_zspage(struct zspage *zspage)
{
- struct page *curr_page, *page;
+ struct zpdesc *curr_zpdesc, *zpdesc;
/*
* Pages we haven't locked yet can be migrated off the list while we're
@@ -1647,24 +1669,24 @@ static void lock_zspage(struct zspage *zspage)
*/
while (1) {
migrate_read_lock(zspage);
- page = get_first_page(zspage);
- if (trylock_page(page))
+ zpdesc = get_first_zpdesc(zspage);
+ if (zpdesc_trylock(zpdesc))
break;
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
}
- curr_page = page;
- while ((page = get_next_page(curr_page))) {
- if (trylock_page(page)) {
- curr_page = page;
+ curr_zpdesc = zpdesc;
+ while ((zpdesc = get_next_zpdesc(curr_zpdesc))) {
+ if (zpdesc_trylock(zpdesc)) {
+ curr_zpdesc = zpdesc;
} else {
- get_page(page);
+ zpdesc_get(zpdesc);
migrate_read_unlock(zspage);
- wait_on_page_locked(page);
- put_page(page);
+ zpdesc_wait_locked(zpdesc);
+ zpdesc_put(zpdesc);
migrate_read_lock(zspage);
}
}
--
2.43.5
next prev parent reply other threads:[~2024-12-16 15:05 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-12-16 15:04 [PATCH v9 mm-unstable 00/18] Add zpdesc memory descriptor for zswap.zpool Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 01/18] mm/zsmalloc: add " Hyeonggon Yoo
2024-12-16 15:04 ` Hyeonggon Yoo [this message]
2024-12-16 15:04 ` [PATCH v9 mm-unstable 03/18] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 04/18] mm/zsmalloc: add and use pfn/zpdesc seeking funcs Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 05/18] mm/zsmalloc: convert obj_malloc() to use zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 06/18] mm/zsmalloc: convert create_page_chain() and its users " Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 07/18] mm/zsmalloc: convert obj_allocated() and related helpers " Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 08/18] mm/zsmalloc: convert init_zspage() " Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 09/18] mm/zsmalloc: convert obj_to_page() and zs_free() " Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 10/18] mm/zsmalloc: add two helpers for zs_page_migrate() and make it " Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 11/18] mm/zsmalloc: convert reset_page to reset_zpdesc Hyeonggon Yoo
2025-01-10 4:43 ` Matthew Wilcox
2025-01-10 6:08 ` Hyeonggon Yoo
2025-01-11 1:32 ` Andrew Morton
2025-01-13 15:29 ` [PATCH v9 mm-unstable 19/19] mm/zsmalloc: reset zpdesc fields in reset_zpdesc() Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 12/18] mm/zsmalloc: convert __free_zspage() to use zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 13/18] mm/zsmalloc: convert location_to_obj() to take zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 14/18] mm/zsmalloc: convert migrate_zspage() to use zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 15/18] mm/zsmalloc: convert get_zspage() to take zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 16/18] mm/zsmalloc: convert SetZsPageMovable and remove unused funcs Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 17/18] mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc Hyeonggon Yoo
2024-12-16 15:04 ` [PATCH v9 mm-unstable 18/18] mm/zsmalloc: introduce __zpdesc_clear/set_zsmalloc() Hyeonggon Yoo
2024-12-26 1:54 ` [PATCH v9 mm-unstable 00/18] Add zpdesc memory descriptor for zswap.zpool Sergey Senozhatsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241216150450.1228021-3-42.hyeyoo@gmail.com \
--to=42.hyeyoo@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=alexs@kernel.org \
--cc=linux-mm@kvack.org \
--cc=minchan@kernel.org \
--cc=seakeel@gmail.com \
--cc=senozhatsky@chromium.org \
--cc=vishal.moola@gmail.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox