linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vladimir Davydov <vdavydov@parallels.com>
To: akpm@linux-foundation.org
Cc: cl@linux.com, hannes@cmpxchg.org, mhocko@suse.cz,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH -mm 6/8] slub: do not use cmpxchg for adding cpu partials when irqs disabled
Date: Fri, 30 May 2014 17:51:09 +0400	[thread overview]
Message-ID: <620d4218dd5ea0d12e77396209c5108de6fd4634.1401457502.git.vdavydov@parallels.com> (raw)
In-Reply-To: <cover.1401457502.git.vdavydov@parallels.com>

We add slabs to per cpu partial lists on both objects allocation (see
get_partial_node) and free (see __slab_free). We use the same function,
put_cpu_partial, in both cases.

Since __slab_free can be executed with preempt/irqs enabled, we have to
use cmpxchg for adding a new element to a partial list in order to avoid
races in case we are moved to another cpu or an irq hits while we are in
the middle of put_cpu_partial.

However, get_partial_node is always called with irqs disabled, which
grants us exclusive access to the current cpu's partial list, so there
is no need in any synchronization and therefore cmpxchg is redundant
there.

Let's get rid of this redundancy and access/set per cpu partial list
from get_partial_node w/o cmpxchg-based synchronization.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
---
 mm/slub.c |   46 +++++++++++++++++++++++++++-------------------
 1 file changed, 27 insertions(+), 19 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 2fc84853bffb..ac39cc9b6849 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1603,7 +1603,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
 	return freelist;
 }
 
-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+static void prepare_cpu_partial(struct page *page, struct page *oldpage);
 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
 
 /*
@@ -1643,7 +1643,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 			stat(s, ALLOC_FROM_PARTIAL);
 			object = t;
 		} else {
-			put_cpu_partial(s, page, 0);
+			prepare_cpu_partial(page, c->partial);
+			c->partial = page;
 			stat(s, CPU_PARTIAL_NODE);
 		}
 		if (!kmem_cache_has_cpu_partial(s)
@@ -2015,6 +2016,26 @@ static void unfreeze_partials(struct kmem_cache *s,
 #endif
 }
 
+static void prepare_cpu_partial(struct page *page, struct page *oldpage)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+	int pages = 0;
+	int pobjects = 0;
+
+	if (oldpage) {
+		pages = oldpage->pages;
+		pobjects = oldpage->pobjects;
+	}
+
+	pages++;
+	pobjects += page->objects - page->inuse;
+
+	page->pages = pages;
+	page->pobjects = pobjects;
+	page->next = oldpage;
+#endif
+}
+
 /*
  * Put a page that was just frozen (in __slab_free) into a partial page
  * slot if available. This is done without interrupts disabled and without
@@ -2024,22 +2045,16 @@ static void unfreeze_partials(struct kmem_cache *s,
  * If we did not find a slot then simply move all the partials to the
  * per node partial list.
  */
-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+static void put_cpu_partial(struct kmem_cache *s, struct page *page)
 {
 #ifdef CONFIG_SLUB_CPU_PARTIAL
 	struct page *oldpage;
-	int pages;
-	int pobjects;
 
 	do {
-		pages = 0;
-		pobjects = 0;
 		oldpage = this_cpu_read(s->cpu_slab->partial);
 
 		if (oldpage) {
-			pobjects = oldpage->pobjects;
-			pages = oldpage->pages;
-			if (drain && pobjects > s->cpu_partial) {
+			if (oldpage->pobjects > s->cpu_partial) {
 				unsigned long flags;
 				/*
 				 * partial array is full. Move the existing
@@ -2049,18 +2064,11 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
 				local_irq_restore(flags);
 				oldpage = NULL;
-				pobjects = 0;
-				pages = 0;
 				stat(s, CPU_PARTIAL_DRAIN);
 			}
 		}
 
-		pages++;
-		pobjects += page->objects - page->inuse;
-
-		page->pages = pages;
-		page->pobjects = pobjects;
-		page->next = oldpage;
+		prepare_cpu_partial(page, oldpage);
 
 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
 								!= oldpage);
@@ -2608,7 +2616,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 		 * per cpu partial list.
 		 */
 		if (new.frozen && !was_frozen) {
-			put_cpu_partial(s, page, 1);
+			put_cpu_partial(s, page);
 			stat(s, CPU_PARTIAL_FREE);
 		}
 		/*
-- 
1.7.10.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2014-05-30 13:51 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-30 13:51 [PATCH -mm 0/8] memcg/slab: reintroduce dead cache self-destruction Vladimir Davydov
2014-05-30 13:51 ` [PATCH -mm 1/8] memcg: cleanup memcg_cache_params refcnt usage Vladimir Davydov
2014-05-30 14:31   ` Christoph Lameter
2014-05-30 13:51 ` [PATCH -mm 2/8] memcg: destroy kmem caches when last slab is freed Vladimir Davydov
2014-05-30 14:32   ` Christoph Lameter
2014-05-30 13:51 ` [PATCH -mm 3/8] memcg: mark caches that belong to offline memcgs as dead Vladimir Davydov
2014-05-30 14:33   ` Christoph Lameter
2014-05-30 13:51 ` [PATCH -mm 4/8] slub: never fail kmem_cache_shrink Vladimir Davydov
2014-05-30 14:46   ` Christoph Lameter
2014-05-31 10:18     ` Vladimir Davydov
2014-06-02 15:13       ` Christoph Lameter
2014-05-30 13:51 ` [PATCH -mm 5/8] slab: remove kmem_cache_shrink retval Vladimir Davydov
2014-05-30 14:49   ` Christoph Lameter
2014-05-31 10:27     ` Vladimir Davydov
2014-06-02 15:16       ` Christoph Lameter
2014-06-03  9:06         ` Vladimir Davydov
2014-06-03 14:48           ` Christoph Lameter
2014-06-03 19:00             ` Vladimir Davydov
2014-05-30 13:51 ` Vladimir Davydov [this message]
2014-05-30 13:51 ` [PATCH -mm 7/8] slub: make dead caches discard free slabs immediately Vladimir Davydov
2014-05-30 14:57   ` Christoph Lameter
2014-05-31 11:04     ` Vladimir Davydov
2014-06-02  4:24       ` Joonsoo Kim
2014-06-02 11:47         ` Vladimir Davydov
2014-06-02 14:03           ` Joonsoo Kim
2014-06-02 15:17             ` Christoph Lameter
2014-06-03  8:16             ` Vladimir Davydov
2014-06-04  8:53               ` Joonsoo Kim
2014-06-04  9:47                 ` Vladimir Davydov
2014-05-30 13:51 ` [PATCH -mm 8/8] slab: reap dead memcg caches aggressively Vladimir Davydov
2014-05-30 15:01   ` Christoph Lameter
2014-05-31 11:19     ` Vladimir Davydov
2014-06-02 15:24       ` Christoph Lameter
2014-06-03 20:18         ` Vladimir Davydov
2014-06-02  4:41   ` Joonsoo Kim
2014-06-02 12:10     ` Vladimir Davydov
2014-06-02 14:01       ` Joonsoo Kim
2014-06-03  8:21         ` Vladimir Davydov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=620d4218dd5ea0d12e77396209c5108de6fd4634.1401457502.git.vdavydov@parallels.com \
    --to=vdavydov@parallels.com \
    --cc=akpm@linux-foundation.org \
    --cc=cl@linux.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox