From: js1304@gmail.com
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>,
Pekka Enberg <penberg@kernel.org>,
David Rientjes <rientjes@google.com>,
Jesper Dangaard Brouer <brouer@redhat.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: [PATCH v2 08/11] mm/slab: make cache_grow() handle the page allocated on arbitrary node
Date: Tue, 12 Apr 2016 13:51:03 +0900 [thread overview]
Message-ID: <1460436666-20462-9-git-send-email-iamjoonsoo.kim@lge.com> (raw)
In-Reply-To: <1460436666-20462-1-git-send-email-iamjoonsoo.kim@lge.com>
From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Currently, cache_grow() assumes that allocated page's nodeid would be same
with parameter nodeid which is used for allocation request. If we discard
this assumption, we can handle fallback_alloc() case gracefully. So, this
patch makes cache_grow() handle the page allocated on arbitrary node and
clean-up relevant code.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
mm/slab.c | 60 +++++++++++++++++++++---------------------------------------
1 file changed, 21 insertions(+), 39 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index a3422bc..1910589 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2543,13 +2543,14 @@ static void slab_map_pages(struct kmem_cache *cache, struct page *page,
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(struct kmem_cache *cachep,
- gfp_t flags, int nodeid, struct page *page)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *freelist;
size_t offset;
gfp_t local_flags;
+ int page_node;
struct kmem_cache_node *n;
+ struct page *page;
/*
* Be lazy and only check for valid flags here, keeping it out of the
@@ -2577,12 +2578,12 @@ static int cache_grow(struct kmem_cache *cachep,
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
- if (!page)
- page = kmem_getpages(cachep, local_flags, nodeid);
+ page = kmem_getpages(cachep, local_flags, nodeid);
if (!page)
goto failed;
- n = get_node(cachep, nodeid);
+ page_node = page_to_nid(page);
+ n = get_node(cachep, page_node);
/* Get colour for the slab, and cal the next value. */
n->colour_next++;
@@ -2597,7 +2598,7 @@ static int cache_grow(struct kmem_cache *cachep,
/* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset,
- local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
+ local_flags & ~GFP_CONSTRAINT_MASK, page_node);
if (OFF_SLAB(cachep) && !freelist)
goto opps1;
@@ -2616,13 +2617,13 @@ static int cache_grow(struct kmem_cache *cachep,
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num;
spin_unlock(&n->list_lock);
- return 1;
+ return page_node;
opps1:
kmem_freepages(cachep, page);
failed:
if (gfpflags_allow_blocking(local_flags))
local_irq_disable();
- return 0;
+ return -1;
}
#if DEBUG
@@ -2903,14 +2904,14 @@ alloc_done:
return obj;
}
- x = cache_grow(cachep, gfp_exact_node(flags), node, NULL);
+ x = cache_grow(cachep, gfp_exact_node(flags), node);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
node = numa_mem_id();
/* no objects in sight? abort */
- if (!x && ac->avail == 0)
+ if (x < 0 && ac->avail == 0)
return NULL;
if (!ac->avail) /* objects refilled by interrupt? */
@@ -3039,7 +3040,6 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist;
- gfp_t local_flags;
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
@@ -3050,8 +3050,6 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
if (flags & __GFP_THISNODE)
return NULL;
- local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
-
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
zonelist = node_zonelist(mempolicy_slab_node(), flags);
@@ -3081,33 +3079,17 @@ retry:
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
- struct page *page;
+ nid = cache_grow(cache, flags, numa_mem_id());
+ if (nid >= 0) {
+ obj = ____cache_alloc_node(cache,
+ gfp_exact_node(flags), nid);
- if (gfpflags_allow_blocking(local_flags))
- local_irq_enable();
- kmem_flagcheck(cache, flags);
- page = kmem_getpages(cache, local_flags, numa_mem_id());
- if (gfpflags_allow_blocking(local_flags))
- local_irq_disable();
- if (page) {
/*
- * Insert into the appropriate per node queues
+ * Another processor may allocate the objects in
+ * the slab since we are not holding any locks.
*/
- nid = page_to_nid(page);
- if (cache_grow(cache, flags, nid, page)) {
- obj = ____cache_alloc_node(cache,
- gfp_exact_node(flags), nid);
- if (!obj)
- /*
- * Another processor may allocate the
- * objects in the slab since we are
- * not holding any locks.
- */
- goto retry;
- } else {
- /* cache_grow already freed obj */
- obj = NULL;
- }
+ if (!obj)
+ goto retry;
}
}
@@ -3158,8 +3140,8 @@ retry:
must_grow:
spin_unlock(&n->list_lock);
- x = cache_grow(cachep, gfp_exact_node(flags), nodeid, NULL);
- if (x)
+ x = cache_grow(cachep, gfp_exact_node(flags), nodeid);
+ if (x >= 0)
goto retry;
return fallback_alloc(cachep, flags);
--
1.9.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2016-04-12 4:51 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-12 4:50 [PATCH v2 00/11] mm/slab: reduce lock contention in alloc path js1304
2016-04-12 4:50 ` [PATCH v2 01/11] mm/slab: fix the theoretical race by holding proper lock js1304
2016-04-12 16:38 ` Christoph Lameter
2016-04-14 1:56 ` Joonsoo Kim
2016-04-12 4:50 ` [PATCH v2 02/11] mm/slab: remove BAD_ALIEN_MAGIC again js1304
2016-04-12 16:41 ` Christoph Lameter
2016-04-12 4:50 ` [PATCH v2 03/11] mm/slab: drain the free slab as much as possible js1304
2016-04-12 4:50 ` [PATCH v2 04/11] mm/slab: factor out kmem_cache_node initialization code js1304
2016-04-12 16:53 ` Christoph Lameter
2016-04-26 0:47 ` Joonsoo Kim
2016-04-12 4:51 ` [PATCH v2 05/11] mm/slab: clean-up kmem_cache_node setup js1304
2016-04-12 16:55 ` Christoph Lameter
2016-04-12 4:51 ` [PATCH v2 06/11] mm/slab: don't keep free slabs if free_objects exceeds free_limit js1304
2016-07-22 11:51 ` Tetsuo Handa
2016-07-26 7:18 ` Joonsoo Kim
2016-04-12 4:51 ` [PATCH v2 07/11] mm/slab: racy access/modify the slab color js1304
2016-04-12 4:51 ` js1304 [this message]
2016-04-12 4:51 ` [PATCH v2 09/11] mm/slab: separate cache_grow() to two parts js1304
2016-04-12 4:51 ` [PATCH v2 10/11] mm/slab: refill cpu cache through a new slab without holding a node lock js1304
2016-04-12 4:51 ` [PATCH v2 11/11] mm/slab: lockless decision to grow cache js1304
2016-04-12 7:24 ` Jesper Dangaard Brouer
2016-04-12 8:16 ` Joonsoo Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1460436666-20462-9-git-send-email-iamjoonsoo.kim@lge.com \
--to=js1304@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=brouer@redhat.com \
--cc=cl@linux.com \
--cc=iamjoonsoo.kim@lge.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox