* [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page
@ 2024-10-01 17:53 Pintu Kumar
2024-10-02 3:18 ` Sergey Senozhatsky
2024-10-02 3:38 ` Matthew Wilcox
0 siblings, 2 replies; 5+ messages in thread
From: Pintu Kumar @ 2024-10-01 17:53 UTC (permalink / raw)
To: minchan, senozhatsky, akpm, linux-mm, linux-kernel
Cc: joe, skhan, pintu.ping, Pintu Kumar
The use of kmap_atomic/kunmap_atomic is deprecated.
Replace it will kmap_local_page/kunmap_local all over the place.
Also fix SPDX missing license header.
WARNING: Missing or malformed SPDX-License-Identifier tag in line 1
WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead
+ vaddr = kmap_atomic(page);
Signed-off-by: Pintu Kumar <quic_pintu@quicinc.com>
---
mm/zsmalloc.c | 66 ++++++++++++++++++++++++++-------------------------
1 file changed, 34 insertions(+), 32 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 16a07def09c9..d3ff10160a5f 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
/*
* zsmalloc memory allocator
*
@@ -898,7 +900,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
set_first_obj_offset(page, off);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
@@ -921,7 +923,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage)
*/
link->next = -1UL << OBJ_TAG_BITS;
}
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
@@ -1059,12 +1061,12 @@ static void *__zs_map_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy object to per-cpu buffer */
- addr = kmap_atomic(pages[0]);
+ addr = kmap_local_page(pages[0]);
memcpy(buf, addr + off, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ kunmap_local(addr);
+ addr = kmap_local_page(pages[1]);
memcpy(buf + sizes[0], addr, sizes[1]);
- kunmap_atomic(addr);
+ kunmap_local(addr);
out:
return area->vm_buf;
}
@@ -1089,12 +1091,12 @@ static void __zs_unmap_object(struct mapping_area *area,
sizes[1] = size - sizes[0];
/* copy per-cpu buffer to object */
- addr = kmap_atomic(pages[0]);
+ addr = kmap_local_page(pages[0]);
memcpy(addr + off, buf, sizes[0]);
- kunmap_atomic(addr);
- addr = kmap_atomic(pages[1]);
+ kunmap_local(addr);
+ addr = kmap_local_page(pages[1]);
memcpy(addr, buf + sizes[0], sizes[1]);
- kunmap_atomic(addr);
+ kunmap_local(addr);
out:
/* enable page faults to match kunmap_atomic() return conditions */
@@ -1223,7 +1225,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
- area->vm_addr = kmap_atomic(page);
+ area->vm_addr = kmap_local_page(page);
ret = area->vm_addr + off;
goto out;
}
@@ -1260,7 +1262,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
area = this_cpu_ptr(&zs_map_area);
if (off + class->size <= PAGE_SIZE)
- kunmap_atomic(area->vm_addr);
+ kunmap_local(area->vm_addr);
else {
struct page *pages[2];
@@ -1318,7 +1320,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
for (i = 0; i < nr_page; i++)
m_page = get_next_page(m_page);
- vaddr = kmap_atomic(m_page);
+ vaddr = kmap_local_page(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
if (likely(!ZsHugePage(zspage)))
@@ -1328,7 +1330,7 @@ static unsigned long obj_malloc(struct zs_pool *pool,
/* record handle to page->index */
zspage->first_page->index = handle | OBJ_ALLOCATED_TAG;
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
mod_zspage_inuse(zspage, 1);
obj = location_to_obj(m_page, obj);
@@ -1419,7 +1421,7 @@ static void obj_free(int class_size, unsigned long obj)
f_offset = offset_in_page(class_size * f_objidx);
zspage = get_zspage(f_page);
- vaddr = kmap_atomic(f_page);
+ vaddr = kmap_local_page(f_page);
link = (struct link_free *)(vaddr + f_offset);
/* Insert this object in containing zspage's freelist */
@@ -1429,7 +1431,7 @@ static void obj_free(int class_size, unsigned long obj)
f_page->index = 0;
set_freeobj(zspage, f_objidx);
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
mod_zspage_inuse(zspage, -1);
}
@@ -1492,8 +1494,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
if (d_off + class->size > PAGE_SIZE)
d_size = PAGE_SIZE - d_off;
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = kmap_local_page(s_page);
+ d_addr = kmap_local_page(d_page);
while (1) {
size = min(s_size, d_size);
@@ -1516,26 +1518,26 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
* Documentation/mm/highmem.rst.
*/
if (s_off >= PAGE_SIZE) {
- kunmap_atomic(d_addr);
- kunmap_atomic(s_addr);
+ kunmap_local(d_addr);
+ kunmap_local(s_addr);
s_page = get_next_page(s_page);
- s_addr = kmap_atomic(s_page);
- d_addr = kmap_atomic(d_page);
+ s_addr = kmap_local_page(s_page);
+ d_addr = kmap_local_page(d_page);
s_size = class->size - written;
s_off = 0;
}
if (d_off >= PAGE_SIZE) {
- kunmap_atomic(d_addr);
+ kunmap_local(d_addr);
d_page = get_next_page(d_page);
- d_addr = kmap_atomic(d_page);
+ d_addr = kmap_local_page(d_page);
d_size = class->size - written;
d_off = 0;
}
}
- kunmap_atomic(d_addr);
- kunmap_atomic(s_addr);
+ kunmap_local(d_addr);
+ kunmap_local(s_addr);
}
/*
@@ -1548,7 +1550,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
offset = get_first_obj_offset(page);
offset += class->size * index;
@@ -1561,7 +1563,7 @@ static unsigned long find_alloced_obj(struct size_class *class,
index++;
}
- kunmap_atomic(addr);
+ kunmap_local(addr);
*obj_idx = index;
@@ -1798,14 +1800,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
migrate_write_lock(zspage);
offset = get_first_obj_offset(page);
- s_addr = kmap_atomic(page);
+ s_addr = kmap_local_page(page);
/*
* Here, any user cannot access all objects in the zspage so let's move.
*/
- d_addr = kmap_atomic(newpage);
+ d_addr = kmap_local_page(newpage);
copy_page(d_addr, s_addr);
- kunmap_atomic(d_addr);
+ kunmap_local(d_addr);
for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE;
addr += class->size) {
@@ -1818,7 +1820,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
record_obj(handle, new_obj);
}
}
- kunmap_atomic(s_addr);
+ kunmap_local(s_addr);
replace_sub_page(class, zspage, newpage, page);
/*
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page
2024-10-01 17:53 [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page Pintu Kumar
@ 2024-10-02 3:18 ` Sergey Senozhatsky
2024-10-03 16:55 ` Pintu Agarwal
2024-10-02 3:38 ` Matthew Wilcox
1 sibling, 1 reply; 5+ messages in thread
From: Sergey Senozhatsky @ 2024-10-02 3:18 UTC (permalink / raw)
To: Pintu Kumar
Cc: minchan, senozhatsky, akpm, linux-mm, linux-kernel, joe, skhan,
pintu.ping
On (24/10/01 23:23), Pintu Kumar wrote:
> The use of kmap_atomic/kunmap_atomic is deprecated.
> Replace it will kmap_local_page/kunmap_local all over the place.
> Also fix SPDX missing license header.
>
> WARNING: Missing or malformed SPDX-License-Identifier tag in line 1
>
> WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead
> + vaddr = kmap_atomic(page);
>
Can you also update the comments (that mention kmap/kunmap atomic)?
---
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index d3ff10160a5f..5c3bb8a737b0 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -263,7 +263,7 @@ struct zspage {
struct mapping_area {
local_lock_t lock;
char *vm_buf; /* copy buffer for objects that span pages */
- char *vm_addr; /* address of kmap_atomic()'ed pages */
+ char *vm_addr; /* address of kmap_local() pages */
enum zs_mapmode vm_mm; /* mapping mode */
};
@@ -1050,7 +1050,7 @@ static void *__zs_map_object(struct mapping_area *area,
void *addr;
char *buf = area->vm_buf;
- /* disable page faults to match kmap_atomic() return conditions */
+ /* disable page faults to match kmap_local_page() return conditions */
pagefault_disable();
/* no read fastpath */
@@ -1099,7 +1099,7 @@ static void __zs_unmap_object(struct mapping_area *area,
kunmap_local(addr);
out:
- /* enable page faults to match kunmap_atomic() return conditions */
+ /* enable page faults to match kunmap_local() return conditions */
pagefault_enable();
}
@@ -1510,13 +1510,6 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
d_off += size;
d_size -= size;
- /*
- * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
- * calls must occurs in reverse order of calls to kmap_atomic().
- * So, to call kunmap_atomic(s_addr) we should first call
- * kunmap_atomic(d_addr). For more details see
- * Documentation/mm/highmem.rst.
- */
if (s_off >= PAGE_SIZE) {
kunmap_local(d_addr);
kunmap_local(s_addr);
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page
2024-10-01 17:53 [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page Pintu Kumar
2024-10-02 3:18 ` Sergey Senozhatsky
@ 2024-10-02 3:38 ` Matthew Wilcox
2024-10-03 18:07 ` Pintu Agarwal
1 sibling, 1 reply; 5+ messages in thread
From: Matthew Wilcox @ 2024-10-02 3:38 UTC (permalink / raw)
To: Pintu Kumar
Cc: minchan, senozhatsky, akpm, linux-mm, linux-kernel, joe, skhan,
pintu.ping
On Tue, Oct 01, 2024 at 11:23:58PM +0530, Pintu Kumar wrote:
> @@ -1059,12 +1061,12 @@ static void *__zs_map_object(struct mapping_area *area,
> sizes[1] = size - sizes[0];
>
> /* copy object to per-cpu buffer */
> - addr = kmap_atomic(pages[0]);
> + addr = kmap_local_page(pages[0]);
> memcpy(buf, addr + off, sizes[0]);
> - kunmap_atomic(addr);
> - addr = kmap_atomic(pages[1]);
> + kunmap_local(addr);
> + addr = kmap_local_page(pages[1]);
> memcpy(buf + sizes[0], addr, sizes[1]);
> - kunmap_atomic(addr);
> + kunmap_local(addr);
This looks like memcpy_from_page().
> /* copy per-cpu buffer to object */
> - addr = kmap_atomic(pages[0]);
> + addr = kmap_local_page(pages[0]);
> memcpy(addr + off, buf, sizes[0]);
> - kunmap_atomic(addr);
> - addr = kmap_atomic(pages[1]);
> + kunmap_local(addr);
> + addr = kmap_local_page(pages[1]);
> memcpy(addr, buf + sizes[0], sizes[1]);
> - kunmap_atomic(addr);
> + kunmap_local(addr);
memcpy_from_page()?
> @@ -1798,14 +1800,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
> migrate_write_lock(zspage);
>
> offset = get_first_obj_offset(page);
> - s_addr = kmap_atomic(page);
> + s_addr = kmap_local_page(page);
>
> /*
> * Here, any user cannot access all objects in the zspage so let's move.
> */
> - d_addr = kmap_atomic(newpage);
> + d_addr = kmap_local_page(newpage);
> copy_page(d_addr, s_addr);
> - kunmap_atomic(d_addr);
> + kunmap_local(d_addr);
copy_highpage()?
Maybe check the other uses, see if there are appropriate helpers for
them too.
Also, what testing have you done of this patch?
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page
2024-10-02 3:18 ` Sergey Senozhatsky
@ 2024-10-03 16:55 ` Pintu Agarwal
0 siblings, 0 replies; 5+ messages in thread
From: Pintu Agarwal @ 2024-10-03 16:55 UTC (permalink / raw)
To: Sergey Senozhatsky
Cc: Pintu Kumar, minchan, akpm, linux-mm, linux-kernel, joe, skhan
Hi Sergey,
On Wed, 2 Oct 2024 at 08:48, Sergey Senozhatsky
<senozhatsky@chromium.org> wrote:
>
> On (24/10/01 23:23), Pintu Kumar wrote:
> > The use of kmap_atomic/kunmap_atomic is deprecated.
> > Replace it will kmap_local_page/kunmap_local all over the place.
> > Also fix SPDX missing license header.
> >
> > WARNING: Missing or malformed SPDX-License-Identifier tag in line 1
> >
> > WARNING: Deprecated use of 'kmap_atomic', prefer 'kmap_local_page' instead
> > + vaddr = kmap_atomic(page);
> >
>
> Can you also update the comments (that mention kmap/kunmap atomic)?
>
Oh yes, sorry I missed it in the patch.
Thanks for pointing this out.
I will also check Matthew's comment below and fix it together.
Thanks
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page
2024-10-02 3:38 ` Matthew Wilcox
@ 2024-10-03 18:07 ` Pintu Agarwal
0 siblings, 0 replies; 5+ messages in thread
From: Pintu Agarwal @ 2024-10-03 18:07 UTC (permalink / raw)
To: Matthew Wilcox
Cc: Pintu Kumar, minchan, senozhatsky, akpm, linux-mm, linux-kernel,
joe, skhan
Hi Matthew,
Thank you so much for your review and comments.
On Wed, 2 Oct 2024 at 09:08, Matthew Wilcox <willy@infradead.org> wrote:
>
> On Tue, Oct 01, 2024 at 11:23:58PM +0530, Pintu Kumar wrote:
> > @@ -1059,12 +1061,12 @@ static void *__zs_map_object(struct mapping_area *area,
> > sizes[1] = size - sizes[0];
> >
> > /* copy object to per-cpu buffer */
> > - addr = kmap_atomic(pages[0]);
> > + addr = kmap_local_page(pages[0]);
> > memcpy(buf, addr + off, sizes[0]);
> > - kunmap_atomic(addr);
> > - addr = kmap_atomic(pages[1]);
> > + kunmap_local(addr);
> > + addr = kmap_local_page(pages[1]);
> > memcpy(buf + sizes[0], addr, sizes[1]);
> > - kunmap_atomic(addr);
> > + kunmap_local(addr);
>
> This looks like memcpy_from_page().
>
Yes, I checked and both the above memcpy can be replaced like this:
memcpy_from_page(buf, pages[0], off, sizes[0]);
memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]);
> > /* copy per-cpu buffer to object */
> > - addr = kmap_atomic(pages[0]);
> > + addr = kmap_local_page(pages[0]);
> > memcpy(addr + off, buf, sizes[0]);
> > - kunmap_atomic(addr);
> > - addr = kmap_atomic(pages[1]);
> > + kunmap_local(addr);
> > + addr = kmap_local_page(pages[1]);
> > memcpy(addr, buf + sizes[0], sizes[1]);
> > - kunmap_atomic(addr);
> > + kunmap_local(addr);
>
> memcpy_from_page()?
>
Same here, but I think this is memcpy_to_page().
I replaced it like this:
memcpy_to_page(page[0], off, buf, sizes[0]);
memcpy_to_page(page[1], 0, buf + sizes[0], sizes[1]);
> > @@ -1798,14 +1800,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
> > migrate_write_lock(zspage);
> >
> > offset = get_first_obj_offset(page);
> > - s_addr = kmap_atomic(page);
> > + s_addr = kmap_local_page(page);
> >
> > /*
> > * Here, any user cannot access all objects in the zspage so let's move.
> > */
> > - d_addr = kmap_atomic(newpage);
> > + d_addr = kmap_local_page(newpage);
> > copy_page(d_addr, s_addr);
> > - kunmap_atomic(d_addr);
> > + kunmap_local(d_addr);
>
> copy_highpage()?
>
This looks tricky. It does not look to be a straight-forward
replacement with copy_highpage.
There is a loop in-between which cannot be replaced I think.
I am checking more, but I need some help on this.
>
> Maybe check the other uses, see if there are appropriate helpers for
> them too.
>
Yes sure I am checking more.
Will share the changes with V2 in the new patchset.
> Also, what testing have you done of this patch?
My test setup is as follows:
Enabled: ZRAM, HIGHMEM in Kernel and compiled for arm32.
Used, qemu, arm32, 1GB RAM, ZRAM (128MB) to boot the device.
Using test program, filled zram area, then run stress-ng utility to
simulate memory pressure.
OOM occurred, freed some space, and again triggered allocation.
-------------
total used free shared buff/cache available
Mem: 1001 988 5 0 7 4
Swap: 127 127 0
Total: 1129 1116 5
Node 0, zone Normal 2 1 9 15 11 2 1
0 1 1 0
Node 0, zone HighMem 0 2 5 6 0 0 0
0 0 0 0
Thanks
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-10-03 18:08 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-10-01 17:53 [PATCH 3/3] zsmalloc: replace kmap_atomic with kmap_local_page Pintu Kumar
2024-10-02 3:18 ` Sergey Senozhatsky
2024-10-03 16:55 ` Pintu Agarwal
2024-10-02 3:38 ` Matthew Wilcox
2024-10-03 18:07 ` Pintu Agarwal
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox