From: Yafang Shao <laoar.shao@gmail.com>
To: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
kafai@fb.com, songliubraving@fb.com, yhs@fb.com,
john.fastabend@gmail.com, kpsingh@kernel.org,
quentin@isovalent.com, hannes@cmpxchg.org, mhocko@kernel.org,
roman.gushchin@linux.dev, shakeelb@google.com,
songmuchun@bytedance.com, akpm@linux-foundation.org,
cl@linux.com, penberg@kernel.org, rientjes@google.com,
iamjoonsoo.kim@lge.com, vbabka@suse.cz
Cc: linux-mm@kvack.org, bpf@vger.kernel.org,
Yafang Shao <laoar.shao@gmail.com>
Subject: [RFC PATCH bpf-next 06/10] mm: Add helper to recharge vmalloc'ed address
Date: Sun, 19 Jun 2022 15:50:28 +0000 [thread overview]
Message-ID: <20220619155032.32515-7-laoar.shao@gmail.com> (raw)
In-Reply-To: <20220619155032.32515-1-laoar.shao@gmail.com>
This patch introduces a helper to recharge the corresponding pages
of a given vmalloc'ed address. It is similar with how to recharge
a kmalloced'ed address.
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
include/linux/slab.h | 1 +
include/linux/vmalloc.h | 2 +
mm/util.c | 9 +++++
mm/vmalloc.c | 87 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 99 insertions(+)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 18ab30aa8fe8..e8fb0f6a3660 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -794,6 +794,7 @@ extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flag
__alloc_size(3);
extern void kvfree(const void *addr);
extern void kvfree_sensitive(const void *addr, size_t len);
+bool kvrecharge(const void *addr, int step);
unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 096d48aa3437..37c6d0e7b8d5 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -162,6 +162,8 @@ extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
+bool vrecharge(const void *addr, int step);
+void vuncharge(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
diff --git a/mm/util.c b/mm/util.c
index 0837570c9225..312c05e83132 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -656,6 +656,15 @@ void kvfree(const void *addr)
}
EXPORT_SYMBOL(kvfree);
+bool kvrecharge(const void *addr, int step)
+{
+ if (is_vmalloc_addr(addr))
+ return vrecharge(addr, step);
+
+ return krecharge(addr, step);
+}
+EXPORT_SYMBOL(kvrecharge);
+
/**
* kvfree_sensitive - Free a data object containing sensitive information.
* @addr: address of the data object to be freed.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index effd1ff6a4b4..7da6e429a45f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2745,6 +2745,93 @@ void vfree(const void *addr)
}
EXPORT_SYMBOL(vfree);
+bool vrecharge(const void *addr, int step)
+{
+ struct obj_cgroup *objcg_new;
+ unsigned int page_order;
+ struct vm_struct *area;
+ struct folio *folio;
+ int i;
+
+ WARN_ON(!in_task());
+
+ if (!addr)
+ return true;
+
+ area = find_vm_area(addr);
+ if (unlikely(!area))
+ return true;
+
+ page_order = vm_area_page_order(area);
+
+ switch (step) {
+ case MEMCG_KMEM_PRE_CHARGE:
+ for (i = 0; i < area->nr_pages; i += 1U << page_order) {
+ struct page *page = area->pages[i];
+
+ WARN_ON(!page);
+ objcg_new = get_obj_cgroup_from_current();
+ WARN_ON(!objcg_new);
+ if (obj_cgroup_charge_pages(objcg_new, GFP_KERNEL,
+ 1 << page_order))
+ goto out_pre;
+ cond_resched();
+ }
+ break;
+ case MEMCG_KMEM_UNCHARGE:
+ for (i = 0; i < area->nr_pages; i += 1U << page_order) {
+ struct page *page = area->pages[i];
+ struct obj_cgroup *objcg_old;
+
+ WARN_ON(!page);
+ folio = page_folio(page);
+ WARN_ON(!folio_memcg_kmem(folio));
+ objcg_old = __folio_objcg(folio);
+
+ obj_cgroup_uncharge_pages(objcg_old, 1 << page_order);
+ /* mod memcg from page */
+ mod_memcg_state(page_memcg(page), MEMCG_VMALLOC,
+ -(1U << page_order));
+ page->memcg_data = 0;
+ obj_cgroup_put(objcg_old);
+ cond_resched();
+ }
+ break;
+ case MEMCG_KMEM_POST_CHARGE:
+ objcg_new = obj_cgroup_from_current();
+ for (i = 0; i < area->nr_pages; i += 1U << page_order) {
+ struct page *page = area->pages[i];
+
+ page->memcg_data = (unsigned long)objcg_new | MEMCG_DATA_KMEM;
+ /* mod memcg from current */
+ mod_memcg_state(page_memcg(page), MEMCG_VMALLOC,
+ 1U << page_order);
+
+ }
+ break;
+ case MEMCG_KMEM_CHARGE_ERR:
+ objcg_new = obj_cgroup_from_current();
+ for (i = 0; i < area->nr_pages; i += 1U << page_order) {
+ obj_cgroup_uncharge_pages(objcg_new, 1 << page_order);
+ obj_cgroup_put(objcg_new);
+ cond_resched();
+ }
+ break;
+ }
+
+ return true;
+
+out_pre:
+ for (; i > 0; i -= 1U << page_order) {
+ obj_cgroup_uncharge_pages(objcg_new, 1 << page_order);
+ obj_cgroup_put(objcg_new);
+ cond_resched();
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(vrecharge);
+
/**
* vunmap - release virtual mapping obtained by vmap()
* @addr: memory base address
--
2.17.1
next prev parent reply other threads:[~2022-06-19 15:50 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-19 15:50 [RFC PATCH bpf-next 00/10] bpf, mm: Recharge pages when reuse bpf map Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 01/10] mm, memcg: Add a new helper memcg_should_recharge() Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 02/10] bpftool: Show memcg info of bpf map Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 03/10] mm, memcg: Add new helper obj_cgroup_from_current() Yafang Shao
2022-06-23 3:01 ` Roman Gushchin
2022-06-25 13:54 ` Yafang Shao
2022-06-26 1:52 ` Roman Gushchin
2022-06-19 15:50 ` [RFC PATCH bpf-next 04/10] mm, memcg: Make obj_cgroup_{charge, uncharge}_pages public Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 05/10] mm: Add helper to recharge kmalloc'ed address Yafang Shao
2022-06-19 15:50 ` Yafang Shao [this message]
2022-06-19 15:50 ` [RFC PATCH bpf-next 07/10] mm: Add helper to recharge percpu address Yafang Shao
2022-06-23 5:25 ` Dennis Zhou
2022-06-25 14:18 ` Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 08/10] bpf: Recharge memory when reuse bpf map Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 09/10] bpf: Make bpf_map_{save, release}_memcg public Yafang Shao
2022-06-19 15:50 ` [RFC PATCH bpf-next 10/10] bpf: Support recharge for hash map Yafang Shao
2022-06-21 23:28 ` [RFC PATCH bpf-next 00/10] bpf, mm: Recharge pages when reuse bpf map Alexei Starovoitov
2022-06-22 14:03 ` Yafang Shao
2022-06-23 3:29 ` Roman Gushchin
2022-06-25 3:26 ` Yafang Shao
2022-06-26 3:28 ` Roman Gushchin
2022-06-26 3:32 ` Roman Gushchin
2022-06-26 6:38 ` Yafang Shao
2022-06-26 6:25 ` Yafang Shao
2022-07-02 4:23 ` Roman Gushchin
2022-07-02 15:24 ` Yafang Shao
2022-07-02 15:33 ` Roman Gushchin
2022-06-27 0:40 ` Alexei Starovoitov
2022-06-27 15:02 ` Yafang Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220619155032.32515-7-laoar.shao@gmail.com \
--to=laoar.shao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cl@linux.com \
--cc=daniel@iogearbox.net \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=john.fastabend@gmail.com \
--cc=kafai@fb.com \
--cc=kpsingh@kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@kernel.org \
--cc=penberg@kernel.org \
--cc=quentin@isovalent.com \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=shakeelb@google.com \
--cc=songliubraving@fb.com \
--cc=songmuchun@bytedance.com \
--cc=vbabka@suse.cz \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox