From: Vladimir Davydov <vdavydov@parallels.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>,
Michal Hocko <mhocko@suse.cz>, Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Joonsoo Kim <iamjoonsoo.kim@lge.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: [PATCH -mm 2/8] slab: charge slab pages to the current memory cgroup
Date: Mon, 3 Nov 2014 23:59:40 +0300 [thread overview]
Message-ID: <16d8b42a986bd5931459d11490f959bd9a2c5b7e.1415046910.git.vdavydov@parallels.com> (raw)
In-Reply-To: <cover.1415046910.git.vdavydov@parallels.com>
Currently, new slabs are charged to the memory cgroup that owns the
cache (kmem_cache->memcg_params->memcg), but I'm going to decouple kmem
caches from memory cgroups so I make them charged to the current cgroup.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
---
include/linux/memcontrol.h | 5 -----
mm/memcontrol.c | 14 --------------
mm/slab.c | 22 +++++++++++++++-------
mm/slab.h | 28 ----------------------------
mm/slub.c | 18 ++++++++----------
5 files changed, 23 insertions(+), 64 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e789551d4db0..31b495ff5f3a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -416,9 +416,6 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
-int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
-void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
-
int __memcg_cleanup_cache_params(struct kmem_cache *s);
/**
@@ -490,8 +487,6 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
* @cachep: the original global kmem cache
* @gfp: allocation flags.
- *
- * All memory allocated from a per-memcg cache is charged to the owner memcg.
*/
static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 370a27509e45..8c60d7a30f4f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2778,20 +2778,6 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
memcg_resume_kmem_account();
}
-int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
-{
- unsigned int nr_pages = 1 << order;
-
- return memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
-}
-
-void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
-{
- unsigned int nr_pages = 1 << order;
-
- memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
-}
-
/*
* Return the kmem_cache we're supposed to use for a slab allocation.
* We try to use the current memcg's version of the cache.
diff --git a/mm/slab.c b/mm/slab.c
index 458613d75533..a9eb49f40c0a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1559,6 +1559,19 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
#endif
}
+static inline struct page *alloc_slab_page(gfp_t flags, int nodeid, int order)
+{
+ struct mem_cgroup *memcg = NULL;
+ struct page *page;
+
+ flags |= __GFP_NOTRACK;
+ if (!memcg_kmem_newpage_charge(flags, &memcg, order))
+ return NULL;
+ page = alloc_pages_exact_node(nodeid, flags, order);
+ memcg_kmem_commit_charge(page, memcg, order);
+ return page;
+}
+
/*
* Interface to system's page allocator. No need to hold the
* kmem_cache_node ->list_lock.
@@ -1577,12 +1590,8 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE;
- if (memcg_charge_slab(cachep, flags, cachep->gfporder))
- return NULL;
-
- page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
+ page = alloc_slab_page(flags, nodeid, cachep->gfporder);
if (!page) {
- memcg_uncharge_slab(cachep, cachep->gfporder);
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
}
@@ -1638,8 +1647,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
- __free_pages(page, cachep->gfporder);
- memcg_uncharge_slab(cachep, cachep->gfporder);
+ __free_kmem_pages(page, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
diff --git a/mm/slab.h b/mm/slab.h
index 3347fd77f7be..1ba7ad07dce4 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -227,25 +227,6 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
return s;
return s->memcg_params->root_cache;
}
-
-static __always_inline int memcg_charge_slab(struct kmem_cache *s,
- gfp_t gfp, int order)
-{
- if (!memcg_kmem_enabled())
- return 0;
- if (is_root_cache(s))
- return 0;
- return __memcg_charge_slab(s, gfp, order);
-}
-
-static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
-{
- if (!memcg_kmem_enabled())
- return;
- if (is_root_cache(s))
- return;
- __memcg_uncharge_slab(s, order);
-}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
@@ -273,15 +254,6 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
return s;
}
-
-static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
-{
- return 0;
-}
-
-static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
-{
-}
#endif
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
diff --git a/mm/slub.c b/mm/slub.c
index 80c170e92ffc..205eaca18b7b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1276,15 +1276,16 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
/*
* Slab allocation and freeing
*/
-static inline struct page *alloc_slab_page(struct kmem_cache *s,
- gfp_t flags, int node, struct kmem_cache_order_objects oo)
+static inline struct page *alloc_slab_page(gfp_t flags, int node,
+ struct kmem_cache_order_objects oo)
{
+ struct mem_cgroup *memcg = NULL;
struct page *page;
int order = oo_order(oo);
flags |= __GFP_NOTRACK;
- if (memcg_charge_slab(s, flags, order))
+ if (!memcg_kmem_newpage_charge(flags, &memcg, order))
return NULL;
if (node == NUMA_NO_NODE)
@@ -1292,9 +1293,7 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
else
page = alloc_pages_exact_node(node, flags, order);
- if (!page)
- memcg_uncharge_slab(s, order);
-
+ memcg_kmem_commit_charge(page, memcg, order);
return page;
}
@@ -1317,7 +1316,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
*/
alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
- page = alloc_slab_page(s, alloc_gfp, node, oo);
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
alloc_gfp = flags;
@@ -1325,7 +1324,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
- page = alloc_slab_page(s, alloc_gfp, node, oo);
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (page)
stat(s, ORDER_FALLBACK);
@@ -1438,8 +1437,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page_mapcount_reset(page);
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- __free_pages(page, order);
- memcg_uncharge_slab(s, order);
+ __free_kmem_pages(page, order);
}
#define need_reserve_slab_rcu \
--
1.7.10.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2014-11-03 21:00 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-11-03 20:59 [PATCH -mm 0/8] memcg: reuse per cgroup kmem caches Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 1/8] memcg: do not destroy kmem caches on css offline Vladimir Davydov
2014-11-03 20:59 ` Vladimir Davydov [this message]
2014-11-03 20:59 ` [PATCH -mm 3/8] memcg: decouple per memcg kmem cache from the owner memcg Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 4/8] memcg: zap memcg_{un}register_cache Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 5/8] memcg: free kmem cache id on css offline Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 6/8] memcg: introduce memcg_kmem_should_charge helper Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 7/8] slab: introduce slab_free helper Vladimir Davydov
2014-11-05 18:42 ` Christoph Lameter
2014-11-06 10:59 ` Vladimir Davydov
2014-11-03 20:59 ` [PATCH -mm 8/8] slab: recharge slab pages to the allocating memory cgroup Vladimir Davydov
2014-11-05 18:43 ` Christoph Lameter
2014-11-06 9:17 ` Vladimir Davydov
2014-11-06 15:01 ` Christoph Lameter
2014-11-06 15:22 ` Vladimir Davydov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=16d8b42a986bd5931459d11490f959bd9a2c5b7e.1415046910.git.vdavydov@parallels.com \
--to=vdavydov@parallels.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=hannes@cmpxchg.org \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox