From: Christoph Lameter <clameter@sgi.com>
To: Nick Piggin <npiggin@suse.de>
Cc: Matt Mackall <mpm@selenic.com>,
Andrew Morton <akpm@linux-foundation.org>,
Linux Memory Management List <linux-mm@kvack.org>
Subject: Re: [patch 1/3] slob: rework freelist handling
Date: Wed, 23 May 2007 19:49:50 -0700 (PDT) [thread overview]
Message-ID: <Pine.LNX.4.64.0705231945450.23981@schroedinger.engr.sgi.com> (raw)
In-Reply-To: <20070524020530.GA13694@wotan.suse.de>
Here is what I got trying to trim down SLUB on x84_64 (UP config)
Full:
text data bss dec hex filename
25928 11351 256 37535 929f mm/slub.o
!CONFIG_SLUB_DEBUG + patch below
text data bss dec hex filename
8639 4735 224 13598 351e mm/slub.o
SLOB
text data bss dec hex filename
4206 96 0 4302 10ce mm/slob.o
So we can get down to about double the text size. Data is of course an
issue. Other 64 bit platforms bloat the code significantly.
Interesting that inlining some functions actually saves memory.
SLUB embedded: Reduce memory use II
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
include/linux/slub_def.h | 4 +++
mm/slub.c | 49 ++++++++++++++++++++++++++++++++---------------
2 files changed, 38 insertions(+), 15 deletions(-)
Index: slub/include/linux/slub_def.h
===================================================================
--- slub.orig/include/linux/slub_def.h 2007-05-23 19:34:50.000000000 -0700
+++ slub/include/linux/slub_def.h 2007-05-23 19:35:12.000000000 -0700
@@ -17,7 +17,9 @@ struct kmem_cache_node {
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
+#ifdef CONFIG_SLUB_DEBUG
struct list_head full;
+#endif
};
/*
@@ -45,7 +47,9 @@ struct kmem_cache {
int align; /* Alignment */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SLUB_DEBUG
struct kobject kobj; /* For sysfs */
+#endif
#ifdef CONFIG_NUMA
int defrag_ratio;
Index: slub/mm/slub.c
===================================================================
--- slub.orig/mm/slub.c 2007-05-23 19:34:50.000000000 -0700
+++ slub/mm/slub.c 2007-05-23 19:35:12.000000000 -0700
@@ -183,7 +183,11 @@ static inline void ClearSlabDebug(struct
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
*/
+#ifdef CONFIG_SLUB_DEBUG
#define MIN_PARTIAL 2
+#else
+#define MIN_PARTIAL 0
+#endif
/*
* Maximum number of desirable partial slabs.
@@ -254,9 +258,9 @@ static int sysfs_slab_add(struct kmem_ca
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
#else
-static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
-static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
-static void sysfs_slab_remove(struct kmem_cache *s) {}
+static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
+static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
+static inline void sysfs_slab_remove(struct kmem_cache *s) {}
#endif
/********************************************************************
@@ -1011,7 +1015,7 @@ static struct page *allocate_slab(struct
return page;
}
-static void setup_object(struct kmem_cache *s, struct page *page,
+static inline void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
@@ -1346,7 +1350,7 @@ static void deactivate_slab(struct kmem_
unfreeze_slab(s, page);
}
-static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
+static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
{
slab_lock(page);
deactivate_slab(s, page, cpu);
@@ -1356,7 +1360,7 @@ static void flush_slab(struct kmem_cache
* Flush cpu slab.
* Called from IPI handler with interrupts disabled.
*/
-static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
+static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct page *page = s->cpu_slab[cpu];
@@ -1490,7 +1494,7 @@ debug:
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
-static void __always_inline *slab_alloc(struct kmem_cache *s,
+static void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
{
struct page *page;
@@ -1595,7 +1599,7 @@ debug:
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
-static void __always_inline slab_free(struct kmem_cache *s,
+static void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
@@ -1764,7 +1768,7 @@ static inline int calculate_order(int si
/*
* Figure out what the alignment of the objects will be.
*/
-static unsigned long calculate_alignment(unsigned long flags,
+static inline unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size)
{
/*
@@ -1786,13 +1790,15 @@ static unsigned long calculate_alignment
return ALIGN(align, sizeof(void *));
}
-static void init_kmem_cache_node(struct kmem_cache_node *n)
+static inline void init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
+#ifdef CONFIG_SLUB_DEBUG
INIT_LIST_HEAD(&n->full);
+#endif
}
#ifdef CONFIG_NUMA
@@ -1877,11 +1883,11 @@ static int init_kmem_cache_nodes(struct
return 1;
}
#else
-static void free_kmem_cache_nodes(struct kmem_cache *s)
+static inline void free_kmem_cache_nodes(struct kmem_cache *s)
{
}
-static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
+static inline int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
init_kmem_cache_node(&s->local_node);
return 1;
@@ -2278,8 +2284,9 @@ size_t ksize(const void *object)
BUG_ON(!page);
s = page->slab;
- BUG_ON(!s);
+#ifdef CONFIG_SLUB_DEBUG
+ BUG_ON(!s);
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
@@ -2295,6 +2302,8 @@ size_t ksize(const void *object)
if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
return s->inuse;
+#endif
+
/*
* Else we can use all the padding etc for the allocation
*/
@@ -2329,6 +2338,7 @@ EXPORT_SYMBOL(kfree);
*/
int kmem_cache_shrink(struct kmem_cache *s)
{
+#ifdef CONFIG_SLUB_DEBUG
int node;
int i;
struct kmem_cache_node *n;
@@ -2392,6 +2402,9 @@ int kmem_cache_shrink(struct kmem_cache
}
kfree(slabs_by_inuse);
+#else
+ flush_all(s);
+#endif
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
@@ -2475,10 +2488,12 @@ void __init kmem_cache_init(void)
slab_state = UP;
+#ifdef CONFIG_SLUB_DEBUG
/* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
+#endif
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
@@ -3659,17 +3674,20 @@ static int sysfs_slab_alias(struct kmem_
return 0;
}
+#endif
static int __init slab_sysfs_init(void)
{
struct list_head *h;
int err;
+#ifdef CONFIG_SLUB_DEBUG
err = subsystem_register(&slab_subsys);
if (err) {
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
+#endif
slab_state = SYSFS;
list_for_each(h, &slab_caches) {
@@ -3678,8 +3696,10 @@ static int __init slab_sysfs_init(void)
err = sysfs_slab_add(s);
BUG_ON(err);
+ kmem_cache_shrink(s);
}
+#ifdef CONFIG_SLUB_DEBUG
while (alias_list) {
struct saved_alias *al = alias_list;
@@ -3690,8 +3710,7 @@ static int __init slab_sysfs_init(void)
}
resiliency_test();
+#endif
return 0;
}
-
__initcall(slab_sysfs_init);
-#endif
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-05-24 2:49 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-05-22 7:39 Nick Piggin
2007-05-22 7:39 ` [patch 2/3] slob: remove bigblock tracking Nick Piggin
2007-05-22 7:41 ` [patch 3/3] slob: improved alignment handling Nick Piggin
2007-05-22 14:53 ` [patch 1/3] slob: rework freelist handling Matt Mackall
2007-05-22 19:18 ` Christoph Lameter
2007-05-23 3:06 ` Nick Piggin
2007-05-23 4:55 ` Christoph Lameter
2007-05-23 4:59 ` Nick Piggin
2007-05-23 5:01 ` Christoph Lameter
2007-05-23 5:03 ` Nick Piggin
2007-05-23 5:06 ` Christoph Lameter
2007-05-23 5:11 ` Nick Piggin
2007-05-23 5:14 ` Christoph Lameter
2007-05-23 5:22 ` Nick Piggin
2007-05-23 5:28 ` Christoph Lameter
2007-05-23 6:17 ` Nick Piggin
2007-05-23 6:28 ` Christoph Lameter
2007-05-23 7:12 ` Nick Piggin
2007-05-23 17:03 ` Christoph Lameter
2007-05-23 18:32 ` Matt Mackall
2007-05-23 19:15 ` Christoph Lameter
2007-05-23 19:58 ` Matt Mackall
2007-05-23 20:02 ` Christoph Lameter
2007-05-23 20:16 ` Christoph Lameter
2007-05-23 21:14 ` Matt Mackall
2007-05-23 21:06 ` Matt Mackall
2007-05-23 22:26 ` Christoph Lameter
2007-05-23 22:42 ` Matt Mackall
2007-05-23 22:48 ` Christoph Lameter
2007-05-24 2:05 ` Nick Piggin
2007-05-24 2:45 ` Christoph Lameter
2007-05-24 2:47 ` Nick Piggin
2007-05-24 2:55 ` Christoph Lameter
2007-05-24 3:17 ` Nick Piggin
2007-05-24 2:49 ` Christoph Lameter [this message]
2007-05-24 3:15 ` Nick Piggin
2007-05-24 3:51 ` Christoph Lameter
2007-05-24 6:11 ` Matt Mackall
2007-05-24 16:36 ` Christoph Lameter
2007-05-24 17:22 ` Matt Mackall
2007-05-24 17:27 ` Christoph Lameter
2007-05-24 17:44 ` Matt Mackall
2007-05-23 6:38 ` Christoph Lameter
2007-05-23 7:18 ` Nick Piggin
2007-05-23 17:06 ` Christoph Lameter
2007-05-23 7:46 ` Nick Piggin
2007-05-23 17:07 ` Christoph Lameter
2007-05-23 19:35 ` Matt Mackall
2007-05-23 19:59 ` Christoph Lameter
2007-05-23 20:51 ` Matt Mackall
2007-05-24 3:39 ` Nick Piggin
2007-05-24 3:55 ` Christoph Lameter
2007-05-24 4:13 ` Nick Piggin
2007-05-24 4:23 ` Christoph Lameter
2007-05-24 4:31 ` Nick Piggin
2007-05-24 4:35 ` Christoph Lameter
2007-05-24 4:39 ` Nick Piggin
2007-05-24 4:46 ` Christoph Lameter
2007-05-24 4:49 ` Nick Piggin
2007-05-24 5:07 ` Christoph Lameter
2007-05-24 3:24 ` Nick Piggin
2007-05-24 3:49 ` Christoph Lameter
2007-05-24 4:01 ` Nick Piggin
2007-05-24 4:05 ` Christoph Lameter
2007-05-24 4:24 ` Nick Piggin
2007-05-23 18:04 ` Christoph Lameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Pine.LNX.4.64.0705231945450.23981@schroedinger.engr.sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
--cc=mpm@selenic.com \
--cc=npiggin@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox