* [PATCH RESEND 1/3] mm/zsmalloc: remove set_zspage_mapping()
2024-02-20 11:44 [PATCH RESEND 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
@ 2024-02-20 11:44 ` Chengming Zhou
2024-02-23 5:46 ` Sergey Senozhatsky
2024-02-20 11:44 ` [PATCH RESEND 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
2024-02-20 11:44 ` [PATCH RESEND 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou
2 siblings, 1 reply; 7+ messages in thread
From: Chengming Zhou @ 2024-02-20 11:44 UTC (permalink / raw)
To: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim
Cc: linux-mm, Chengming Zhou, linux-kernel
From: Chengming Zhou <zhouchengming@bytedance.com>
We only need to update zspage->fullness when insert_zspage(), since
zspage->class is never changed after allocated.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/zsmalloc.c | 13 ++-----------
1 file changed, 2 insertions(+), 11 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a48f4651d143..a6653915bf17 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
return pool->size_class[zspage->class];
}
-static void set_zspage_mapping(struct zspage *zspage,
- unsigned int class_idx,
- int fullness)
-{
- zspage->class = class_idx;
- zspage->fullness = fullness;
-}
-
/*
* zsmalloc divides the pool into various size classes where each
* class maintains a list of zspages where each zspage is divided
@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
{
class_stat_inc(class, fullness, 1);
list_add(&zspage->list, &class->fullness_list[fullness]);
+ zspage->fullness = fullness;
}
/*
@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
remove_zspage(class, zspage, currfg);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class_idx, newfg);
out:
return newfg;
}
@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, pages);
init_zspage(class, zspage);
zspage->pool = pool;
+ zspage->class = class->index;
return zspage;
}
@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
obj = obj_malloc(pool, zspage, handle);
newfg = get_fullness_group(class, zspage);
insert_zspage(class, zspage, newfg);
- set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj);
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
fullness = get_fullness_group(class, zspage);
insert_zspage(class, zspage, fullness);
- set_zspage_mapping(zspage, class->index, fullness);
return fullness;
}
--
b4 0.10.1
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH RESEND 1/3] mm/zsmalloc: remove set_zspage_mapping()
2024-02-20 11:44 ` [PATCH RESEND 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
@ 2024-02-23 5:46 ` Sergey Senozhatsky
0 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2024-02-23 5:46 UTC (permalink / raw)
To: Chengming Zhou
Cc: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim, linux-mm, Chengming Zhou, linux-kernel
On (24/02/20 11:44), Chengming Zhou wrote:
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> We only need to update zspage->fullness when insert_zspage(), since
> zspage->class is never changed after allocated.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH RESEND 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter
2024-02-20 11:44 [PATCH RESEND 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
2024-02-20 11:44 ` [PATCH RESEND 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
@ 2024-02-20 11:44 ` Chengming Zhou
2024-02-23 5:47 ` Sergey Senozhatsky
2024-02-20 11:44 ` [PATCH RESEND 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou
2 siblings, 1 reply; 7+ messages in thread
From: Chengming Zhou @ 2024-02-20 11:44 UTC (permalink / raw)
To: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim
Cc: linux-mm, Chengming Zhou, linux-kernel
From: Chengming Zhou <zhouchengming@bytedance.com>
We must remove_zspage() from its current fullness list, then use
insert_zspage() to update its fullness and insert to new fullness list.
Obviously, remove_zspage() doesn't need the fullness parameter.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/zsmalloc.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index a6653915bf17..c39fac9361d7 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -687,10 +687,10 @@ static void insert_zspage(struct size_class *class,
* This function removes the given zspage from the freelist identified
* by <class, fullness_group>.
*/
-static void remove_zspage(struct size_class *class,
- struct zspage *zspage,
- int fullness)
+static void remove_zspage(struct size_class *class, struct zspage *zspage)
{
+ int fullness = zspage->fullness;
+
VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
list_del_init(&zspage->list);
@@ -716,7 +716,7 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
if (newfg == currfg)
goto out;
- remove_zspage(class, zspage, currfg);
+ remove_zspage(class, zspage);
insert_zspage(class, zspage, newfg);
out:
return newfg;
@@ -878,7 +878,7 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class,
return;
}
- remove_zspage(class, zspage, ZS_INUSE_RATIO_0);
+ remove_zspage(class, zspage);
__free_zspage(pool, class, zspage);
}
@@ -1609,7 +1609,7 @@ static struct zspage *isolate_src_zspage(struct size_class *class)
zspage = list_first_entry_or_null(&class->fullness_list[fg],
struct zspage, list);
if (zspage) {
- remove_zspage(class, zspage, fg);
+ remove_zspage(class, zspage);
return zspage;
}
}
@@ -1626,7 +1626,7 @@ static struct zspage *isolate_dst_zspage(struct size_class *class)
zspage = list_first_entry_or_null(&class->fullness_list[fg],
struct zspage, list);
if (zspage) {
- remove_zspage(class, zspage, fg);
+ remove_zspage(class, zspage);
return zspage;
}
}
--
b4 0.10.1
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH RESEND 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter
2024-02-20 11:44 ` [PATCH RESEND 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
@ 2024-02-23 5:47 ` Sergey Senozhatsky
0 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2024-02-23 5:47 UTC (permalink / raw)
To: Chengming Zhou
Cc: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim, linux-mm, Chengming Zhou, linux-kernel
On (24/02/20 11:44), Chengming Zhou wrote:
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> We must remove_zspage() from its current fullness list, then use
> insert_zspage() to update its fullness and insert to new fullness list.
> Obviously, remove_zspage() doesn't need the fullness parameter.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH RESEND 3/3] mm/zsmalloc: remove get_zspage_mapping()
2024-02-20 11:44 [PATCH RESEND 0/3] mm/zsmalloc: some cleanup for get/set_zspage_mapping() Chengming Zhou
2024-02-20 11:44 ` [PATCH RESEND 1/3] mm/zsmalloc: remove set_zspage_mapping() Chengming Zhou
2024-02-20 11:44 ` [PATCH RESEND 2/3] mm/zsmalloc: remove_zspage() don't need fullness parameter Chengming Zhou
@ 2024-02-20 11:44 ` Chengming Zhou
2024-02-23 5:48 ` Sergey Senozhatsky
2 siblings, 1 reply; 7+ messages in thread
From: Chengming Zhou @ 2024-02-20 11:44 UTC (permalink / raw)
To: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim
Cc: linux-mm, Chengming Zhou, linux-kernel
From: Chengming Zhou <zhouchengming@bytedance.com>
Actually we seldom use the class_idx returned from get_zspage_mapping(),
only the zspage->fullness is useful, just use zspage->fullness to remove
this helper.
Note zspage->fullness is not stable outside pool->lock, remove redundant
"VM_BUG_ON(fullness != ZS_INUSE_RATIO_0)" in async_free_zspage() since
we already have the same VM_BUG_ON() in __free_zspage(), which is safe to
access zspage->fullness with pool->lock held.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
mm/zsmalloc.c | 28 ++++------------------------
1 file changed, 4 insertions(+), 24 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c39fac9361d7..63ec385cd670 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -470,16 +470,6 @@ static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
zspage->freeobj = obj;
}
-static void get_zspage_mapping(struct zspage *zspage,
- unsigned int *class_idx,
- int *fullness)
-{
- BUG_ON(zspage->magic != ZSPAGE_MAGIC);
-
- *fullness = zspage->fullness;
- *class_idx = zspage->class;
-}
-
static struct size_class *zspage_class(struct zs_pool *pool,
struct zspage *zspage)
{
@@ -708,12 +698,10 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage)
*/
static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
{
- int class_idx;
- int currfg, newfg;
+ int newfg;
- get_zspage_mapping(zspage, &class_idx, &currfg);
newfg = get_fullness_group(class, zspage);
- if (newfg == currfg)
+ if (newfg == zspage->fullness)
goto out;
remove_zspage(class, zspage);
@@ -835,15 +823,11 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
struct zspage *zspage)
{
struct page *page, *next;
- int fg;
- unsigned int class_idx;
-
- get_zspage_mapping(zspage, &class_idx, &fg);
assert_spin_locked(&pool->lock);
VM_BUG_ON(get_zspage_inuse(zspage));
- VM_BUG_ON(fg != ZS_INUSE_RATIO_0);
+ VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0);
next = page = get_first_page(zspage);
do {
@@ -1857,8 +1841,6 @@ static void async_free_zspage(struct work_struct *work)
{
int i;
struct size_class *class;
- unsigned int class_idx;
- int fullness;
struct zspage *zspage, *tmp;
LIST_HEAD(free_pages);
struct zs_pool *pool = container_of(work, struct zs_pool,
@@ -1879,10 +1861,8 @@ static void async_free_zspage(struct work_struct *work)
list_del(&zspage->list);
lock_zspage(zspage);
- get_zspage_mapping(zspage, &class_idx, &fullness);
- VM_BUG_ON(fullness != ZS_INUSE_RATIO_0);
- class = pool->size_class[class_idx];
spin_lock(&pool->lock);
+ class = zspage_class(pool, zspage);
__free_zspage(pool, class, zspage);
spin_unlock(&pool->lock);
}
--
b4 0.10.1
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH RESEND 3/3] mm/zsmalloc: remove get_zspage_mapping()
2024-02-20 11:44 ` [PATCH RESEND 3/3] mm/zsmalloc: remove get_zspage_mapping() Chengming Zhou
@ 2024-02-23 5:48 ` Sergey Senozhatsky
0 siblings, 0 replies; 7+ messages in thread
From: Sergey Senozhatsky @ 2024-02-23 5:48 UTC (permalink / raw)
To: Chengming Zhou
Cc: hannes, Andrew Morton, Sergey Senozhatsky, nphamcs, yosryahmed,
Minchan Kim, linux-mm, Chengming Zhou, linux-kernel
On (24/02/20 11:44), Chengming Zhou wrote:
> From: Chengming Zhou <zhouchengming@bytedance.com>
>
> Actually we seldom use the class_idx returned from get_zspage_mapping(),
> only the zspage->fullness is useful, just use zspage->fullness to remove
> this helper.
>
> Note zspage->fullness is not stable outside pool->lock, remove redundant
> "VM_BUG_ON(fullness != ZS_INUSE_RATIO_0)" in async_free_zspage() since
> we already have the same VM_BUG_ON() in __free_zspage(), which is safe to
> access zspage->fullness with pool->lock held.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
^ permalink raw reply [flat|nested] 7+ messages in thread