From: clameter@sgi.com
To: akpm@linux-foundation.org
Cc: linux-mm@kvack.org
Subject: [patch 05/10] SLUB: Add MIN_PARTIAL
Date: Thu, 26 Apr 2007 21:27:00 -0700 [thread overview]
Message-ID: <20070427042908.476546262@sgi.com> (raw)
In-Reply-To: <20070427042655.019305162@sgi.com>
[-- Attachment #1: slab_partial --]
[-- Type: text/plain, Size: 4232 bytes --]
We leave a mininum of partial slabs on nodes when we search for
partial slabs on other node. Define a constant for that value.
Then modify slub to keep MIN_PARTIAL slabs around.
This avoids bad situations where a function frees the last object
in a slab (which results in the page being returned to the page
allocator) only to then allocate one again (which requires getting
a page back from the page allocator if the partial list was empty).
Keeping a couple of slabs on the partial list reduces overhead.
Empty slabs are added to the end of the partial list to insure that
partially allocated slabs are consumed first (defragmentation).
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Index: linux-2.6.21-rc7-mm2/mm/slub.c
===================================================================
--- linux-2.6.21-rc7-mm2.orig/mm/slub.c 2007-04-26 11:41:43.000000000 -0700
+++ linux-2.6.21-rc7-mm2/mm/slub.c 2007-04-26 11:41:54.000000000 -0700
@@ -109,6 +109,9 @@
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST
+/* Mininum number of partial slabs */
+#define MIN_PARTIAL 2
+
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
@@ -635,16 +638,8 @@ static int on_freelist(struct kmem_cache
/*
* Tracking of fully allocated slabs for debugging
*/
-static void add_full(struct kmem_cache *s, struct page *page)
+static void add_full(struct kmem_cache_node *n, struct page *page)
{
- struct kmem_cache_node *n;
-
- VM_BUG_ON(!irqs_disabled());
-
- if (!(s->flags & SLAB_STORE_USER))
- return;
-
- n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
list_add(&page->lru, &n->full);
spin_unlock(&n->list_lock);
@@ -923,10 +918,16 @@ static __always_inline int slab_trylock(
/*
* Management of partially allocated slabs
*/
-static void add_partial(struct kmem_cache *s, struct page *page)
+static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
{
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ spin_lock(&n->list_lock);
+ n->nr_partial++;
+ list_add_tail(&page->lru, &n->partial);
+ spin_unlock(&n->list_lock);
+}
+static void add_partial(struct kmem_cache_node *n, struct page *page)
+{
spin_lock(&n->list_lock);
n->nr_partial++;
list_add(&page->lru, &n->partial);
@@ -1026,7 +1027,7 @@ static struct page *get_any_partial(stru
n = get_node(s, zone_to_nid(*z));
if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
- n->nr_partial > 2) {
+ n->nr_partial > MIN_PARTIAL) {
page = get_partial_node(n);
if (page)
return page;
@@ -1060,15 +1061,31 @@ static struct page *get_partial(struct k
*/
static void putback_slab(struct kmem_cache *s, struct page *page)
{
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+
if (page->inuse) {
+
if (page->freelist)
- add_partial(s, page);
- else if (PageError(page))
- add_full(s, page);
+ add_partial(n, page);
+ else if (PageError(page) && (s->flags & SLAB_STORE_USER))
+ add_full(n, page);
slab_unlock(page);
+
} else {
- slab_unlock(page);
- discard_slab(s, page);
+ if (n->nr_partial < MIN_PARTIAL) {
+ /*
+ * Adding an empty page to the partial slabs in order
+ * to avoid page allocator overhead. This page needs to
+ * come after all the others that are not fully empty
+ * in order to make sure that we do maximum
+ * defragmentation.
+ */
+ add_partial_tail(n, page);
+ slab_unlock(page);
+ } else {
+ slab_unlock(page);
+ discard_slab(s, page);
+ }
}
}
@@ -1326,7 +1343,7 @@ checks_ok:
* then add it.
*/
if (unlikely(!prior))
- add_partial(s, page);
+ add_partial(get_node(s, page_to_nid(page)), page);
out_unlock:
slab_unlock(page);
@@ -1542,7 +1559,7 @@ static struct kmem_cache_node * __init e
kmalloc_caches->node[node] = n;
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
- add_partial(kmalloc_caches, page);
+ add_partial(n, page);
return n;
}
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-04-27 4:27 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-04-27 4:26 [patch 00/10] SLUB patches against 2.6.21-rc7-mm2 clameter
2007-04-27 4:26 ` [patch 01/10] SLUB: Remove duplicate VM_BUG_ON clameter
2007-04-27 4:26 ` [patch 02/10] SLUB: Fix sysfs directory handling clameter
2007-04-27 6:31 ` Andrew Morton
2007-04-27 7:02 ` Christoph Lameter
2007-04-27 7:10 ` Andrew Morton
2007-04-27 4:26 ` [patch 03/10] SLUB: debug printk cleanup clameter
2007-04-27 6:32 ` Andrew Morton
2007-04-27 4:26 ` [patch 04/10] SLUB: Conform more to SLABs SLAB_HWCACHE_ALIGN behavior clameter
2007-04-27 4:27 ` clameter [this message]
2007-04-27 4:27 ` [patch 06/10] SLUB: Free slabs and sort partial slab lists in kmem_cache_shrink clameter
2007-04-27 4:27 ` [patch 07/10] SLUB: Major slabinfo update clameter
2007-04-27 4:27 ` [patch 08/10] SLUB: Reduce the order of allocations to avoid fragmentation clameter
2007-04-27 4:27 ` [patch 09/10] SLUB: Exploit page mobility to increase allocation order clameter
2007-04-27 6:32 ` Andrew Morton
2007-04-27 7:04 ` Christoph Lameter
2007-04-27 11:14 ` Mel Gorman
2007-04-27 17:15 ` Christoph Lameter
2007-04-27 4:27 ` [patch 10/10] SLUB: i386 support clameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070427042908.476546262@sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox