From: Christoph Lameter <clameter@sgi.com>
To: akpm@linux-foundation.org
Cc: linux-mm@kvack.org
Subject: [patch 11/14] SLUB: Consolidate add_partial() and add_partial_tail() to one function
Date: Tue, 25 Sep 2007 16:25:54 -0700 [thread overview]
Message-ID: <20070925233008.017150472@sgi.com> (raw)
In-Reply-To: <20070925232543.036615409@sgi.com>
[-- Attachment #1: 0008-slab_defrag_add_partial_tail.patch --]
[-- Type: text/plain, Size: 4346 bytes --]
Add a parameter to add_partial instead of having separate functions.
The parameter allows a more detailed control of where the slab pages
is placed in the partial queues.
If we put slabs back to the front then they are likely immediately used
for allocations. If they are put at the end then we can maximize the time
that the partial slabs spent without being subject to allocations.
When deactivating slab we can put the slabs that had remote objects freed
(we can see that because objects were put on the freelist that requires locks)
to them at the end of the list so that the cachelines of remote processors can
cool down. Slabs that had objects from the local cpu freed to them (objects
exist in the lockless freelist) are put in the front of the list to be reused
ASAP in order to exploit the cache hot state of the local cpu.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
mm/slub.c | 31 +++++++++++++++----------------
1 file changed, 15 insertions(+), 16 deletions(-)
Index: linux-2.6.23-rc8-mm1/mm/slub.c
===================================================================
--- linux-2.6.23-rc8-mm1.orig/mm/slub.c 2007-09-25 14:54:43.000000000 -0700
+++ linux-2.6.23-rc8-mm1/mm/slub.c 2007-09-25 14:55:49.000000000 -0700
@@ -1203,19 +1203,15 @@ static __always_inline int slab_trylock(
/*
* Management of partially allocated slabs
*/
-static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
+static void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
{
spin_lock(&n->list_lock);
n->nr_partial++;
- list_add_tail(&page->lru, &n->partial);
- spin_unlock(&n->list_lock);
-}
-
-static void add_partial(struct kmem_cache_node *n, struct page *page)
-{
- spin_lock(&n->list_lock);
- n->nr_partial++;
- list_add(&page->lru, &n->partial);
+ if (tail)
+ list_add_tail(&page->lru, &n->partial);
+ else
+ list_add(&page->lru, &n->partial);
spin_unlock(&n->list_lock);
}
@@ -1344,7 +1340,7 @@ static struct page *get_partial(struct k
*
* On exit the slab lock will have been dropped.
*/
-static void unfreeze_slab(struct kmem_cache *s, struct page *page)
+static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -1352,7 +1348,7 @@ static void unfreeze_slab(struct kmem_ca
if (page->inuse) {
if (page->freelist)
- add_partial(n, page);
+ add_partial(n, page, tail);
else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
add_full(n, page);
slab_unlock(page);
@@ -1367,7 +1363,7 @@ static void unfreeze_slab(struct kmem_ca
* partial list stays small. kmem_cache_shrink can
* reclaim empty slabs from the partial list.
*/
- add_partial_tail(n, page);
+ add_partial(n, page, 1);
slab_unlock(page);
} else {
slab_unlock(page);
@@ -1382,6 +1378,7 @@ static void unfreeze_slab(struct kmem_ca
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
struct page *page = c->page;
+ int tail = 1;
/*
* Merge cpu freelist into freelist. Typically we get here
* because both freelists are empty. So this is unlikely
@@ -1390,6 +1387,8 @@ static void deactivate_slab(struct kmem_
while (unlikely(c->freelist)) {
void **object;
+ tail = 0; /* Hot objects. Put the slab first */
+
/* Retrieve object from cpu_freelist */
object = c->freelist;
c->freelist = c->freelist[c->offset];
@@ -1400,7 +1399,7 @@ static void deactivate_slab(struct kmem_
page->inuse--;
}
c->page = NULL;
- unfreeze_slab(s, page);
+ unfreeze_slab(s, page, tail);
}
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -1640,7 +1639,7 @@ checks_ok:
* then add it.
*/
if (unlikely(!prior))
- add_partial(get_node(s, page_to_nid(page)), page);
+ add_partial(get_node(s, page_to_nid(page)), page, 0);
out_unlock:
slab_unlock(page);
@@ -2047,7 +2046,7 @@ static struct kmem_cache_node *early_kme
#endif
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
- add_partial(n, page);
+ add_partial(n, page, 0);
return n;
}
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-09-25 23:25 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-09-25 23:25 [patch 00/14] Misc cleanups / fixes Christoph Lameter
2007-09-25 23:25 ` [patch 01/14] Pagecache zeroing: zero_user_segment, zero_user_segments and zero_user Christoph Lameter
2007-09-25 23:25 ` [patch 02/14] Reiser4 portion of zero_user cleanup patch Christoph Lameter
2007-09-25 23:25 ` [patch 03/14] Move vmalloc_to_page() to mm/vmalloc Christoph Lameter
2007-09-25 23:25 ` [patch 04/14] vmalloc: add const to void ..tmp_kallsyms1.o.cmd ..tmp_kallsyms2.o.cmd ..tmp_vmlinux1.cmd ..tmp_vmlinux2.cmd .cf .cf1 .cf2 .cf3 .cfnet .config .config.old .gitignore .mailmap .missing-syscalls.d .pc .tmp_System.map .tmp_kallsyms1.S .tmp_kallsyms1.o .tmp_kallsyms2.S .tmp_kallsyms2.o .tmp_versions .tmp_vmlinux1 .tmp_vmlinux2 .version .vmlinux.cmd .vmlinux.o.cmd COPYING CREDITS Documentation Kbuild MAINTAINERS Makefile Module.symvers README REPORTING-BUGS System.map arch b block crypto drivers fs include init ipc kernel lib linux-2.6.23-rc8-mm1.tar.gz mips mm net patches scripts security sound tar-install test test_out usr vmlinux vmlinux.gz vmlinux.o vmlinux.sym xx parameters Christoph Lameter
2007-09-25 23:25 ` [patch 05/14] i386: Resolve dependency of asm-i386/pgtable.h on highmem.h Christoph Lameter
2007-09-25 23:25 ` [patch 06/14] is_vmalloc_addr(): Check if an address is within the vmalloc boundaries Christoph Lameter
2007-09-25 23:25 ` [patch 07/14] vmalloc: Clean up page array indexing Christoph Lameter
2007-09-25 23:25 ` [patch 08/14] vunmap: return page array passed on vmap() Christoph Lameter
2007-09-25 23:25 ` [patch 09/14] SLUB: Move count_partial() Christoph Lameter
2007-09-25 23:25 ` [patch 10/14] SLUB: Rename NUMA defrag_ratio to remote_node_defrag_ratio Christoph Lameter
2007-09-25 23:25 ` Christoph Lameter [this message]
2007-09-25 23:25 ` [patch 12/14] VM: Allow get_page_unless_zero on compound pages Christoph Lameter
2007-09-25 23:25 ` [patch 13/14] dentries: Extract common code to remove dentry from lru Christoph Lameter
2007-10-22 21:29 ` Andrew Morton
2007-10-25 2:23 ` Christoph Lameter
2007-10-25 2:34 ` Andrew Morton
2007-10-25 2:50 ` Christoph Lameter
2007-10-25 3:03 ` Andrew Morton
2007-09-25 23:25 ` [patch 14/14] bufferhead: Revert constructor removal Christoph Lameter
2007-10-22 21:31 ` Andrew Morton
2007-10-25 2:25 ` Christoph Lameter
2007-09-27 20:25 ` [patch 00/14] Misc cleanups / fixes Andrew Morton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070925233008.017150472@sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox