From: clameter@sgi.com
To: akpm@linux-foundation.org
Cc: linux-mm@kvack.org
Subject: [patch 10/17] SLUB: Add macros for scanning objects in a slab
Date: Mon, 07 May 2007 14:22:50 -0700 [thread overview]
Message-ID: <20070507212409.659872065@sgi.com> (raw)
In-Reply-To: <20070507212240.254911542@sgi.com>
[-- Attachment #1: for_each_object --]
[-- Type: text/plain, Size: 4853 bytes --]
Scanning of objects happens in a number of functions. Consolidate that code.
DECLARE_BITMAP instead of coding the declaration for bitmaps.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
mm/slub.c | 75 ++++++++++++++++++++++++++++++++++++--------------------------
1 file changed, 44 insertions(+), 31 deletions(-)
Index: slub/mm/slub.c
===================================================================
--- slub.orig/mm/slub.c 2007-05-07 13:54:26.000000000 -0700
+++ slub/mm/slub.c 2007-05-07 13:54:31.000000000 -0700
@@ -211,6 +211,38 @@ static inline struct kmem_cache_node *ge
}
/*
+ * Slow version of get and set free pointer.
+ *
+ * This version requires touching the cache lines of kmem_cache which
+ * we avoid to do in the fast alloc free paths. There we obtain the offset
+ * from the page struct.
+ */
+static inline void *get_freepointer(struct kmem_cache *s, void *object)
+{
+ return *(void **)(object + s->offset);
+}
+
+static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
+{
+ *(void **)(object + s->offset) = fp;
+}
+
+/* Loop over all objects in a slab */
+#define for_each_object(__p, __s, __addr) \
+ for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
+ __p += (__s)->size)
+
+/* Scan freelist */
+#define for_each_free_object(__p, __s, __free) \
+ for (__p = (__free); __p; __p = get_freepointer((__s), __p))
+
+/* Determine object index from a given position */
+static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
+{
+ return (p - addr) / s->size;
+}
+
+/*
* Object debugging
*/
static void print_section(char *text, u8 *addr, unsigned int length)
@@ -246,23 +278,6 @@ static void print_section(char *text, u8
}
/*
- * Slow version of get and set free pointer.
- *
- * This version requires touching the cache lines of kmem_cache which
- * we avoid to do in the fast alloc free paths. There we obtain the offset
- * from the page struct.
- */
-static void *get_freepointer(struct kmem_cache *s, void *object)
-{
- return *(void **)(object + s->offset);
-}
-
-static void set_freepointer(struct kmem_cache *s, void *object, void *fp)
-{
- *(void **)(object + s->offset) = fp;
-}
-
-/*
* Tracking user of a slab.
*/
struct track {
@@ -854,7 +869,7 @@ static struct page *new_slab(struct kmem
memset(start, POISON_INUSE, PAGE_SIZE << s->order);
last = start;
- for (p = start + s->size; p < end; p += s->size) {
+ for_each_object(p, s, start) {
setup_object(s, page, last);
set_freepointer(s, last, p);
last = p;
@@ -875,12 +890,10 @@ static void __free_slab(struct kmem_cach
int pages = 1 << s->order;
if (unlikely(PageError(page) || s->dtor)) {
- void *start = page_address(page);
- void *end = start + (pages << PAGE_SHIFT);
void *p;
slab_pad_check(s, page);
- for (p = start; p <= end - s->size; p += s->size) {
+ for_each_object(p, s, page_address(page)) {
if (s->dtor)
s->dtor(p, s, 0);
check_object(s, page, p, 0);
@@ -2516,7 +2529,7 @@ static int validate_slab(struct kmem_cac
{
void *p;
void *addr = page_address(page);
- unsigned long map[BITS_TO_LONGS(s->objects)];
+ DECLARE_BITMAP(map, s->objects);
if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
@@ -2525,14 +2538,14 @@ static int validate_slab(struct kmem_cac
/* Now we know that a valid freelist exists */
bitmap_zero(map, s->objects);
- for(p = page->freelist; p; p = get_freepointer(s, p)) {
- set_bit((p - addr) / s->size, map);
+ for_each_free_object(p, s, page->freelist) {
+ set_bit(slab_index(p, s, addr), map);
if (!check_object(s, page, p, 0))
return 0;
}
- for(p = addr; p < addr + s->objects * s->size; p += s->size)
- if (!test_bit((p - addr) / s->size, map))
+ for_each_object(p, s, addr)
+ if (!test_bit(slab_index(p, s, addr), map))
if (!check_object(s, page, p, 1))
return 0;
return 1;
@@ -2704,15 +2717,15 @@ static void process_slab(struct loc_trac
struct page *page, enum track_item alloc)
{
void *addr = page_address(page);
- unsigned long map[BITS_TO_LONGS(s->objects)];
+ DECLARE_BITMAP(map, s->objects);
void *p;
bitmap_zero(map, s->objects);
- for (p = page->freelist; p; p = get_freepointer(s, p))
- set_bit((p - addr) / s->size, map);
+ for_each_free_object(p, s, page->freelist)
+ set_bit(slab_index(p, s, addr), map);
- for (p = addr; p < addr + s->objects * s->size; p += s->size)
- if (!test_bit((p - addr) / s->size, map)) {
+ for_each_object(p, s, addr)
+ if (!test_bit(slab_index(p, s, addr), map)) {
void *addr = get_track(s, p, alloc)->addr;
add_location(t, s, addr);
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-05-07 21:22 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-05-07 21:22 [patch 00/17] SLUB fixes and enhancements against 2.6.21-m1 clameter
2007-05-07 21:22 ` [patch 01/17] SLUB: Add support for dynamic cacheline size determination clameter
2007-05-07 23:10 ` Christoph Lameter
2007-05-07 21:22 ` [patch 02/17] SLUB: Reduce antifrag max order clameter
2007-05-07 21:22 ` [patch 03/17] SLUB: After object padding only needed for Redzoning clameter
2007-05-07 21:22 ` [patch 04/17] SLUB: slabinfo upgrade clameter
2007-05-07 21:22 ` [patch 05/17] Move remote node draining out of slab allocators clameter
2007-05-07 21:22 ` [patch 06/17] SLUB: Use check_valid_pointer in kmem_ptr_validate clameter
2007-05-07 21:22 ` [patch 07/17] SLUB: Clean up krealloc clameter
2007-05-08 2:41 ` Christoph Lameter
2007-05-07 21:22 ` [patch 08/17] SLUB: Get rid of finish_bootstrap clameter
2007-05-07 21:22 ` [patch 09/17] SLUB: Update comments clameter
2007-05-07 21:22 ` clameter [this message]
2007-05-07 21:22 ` [patch 11/17] SLUB: Move resiliency check into SYSFS section clameter
2007-05-07 21:22 ` [patch 12/17] SLUB: Introduce DebugSlab(page) clameter
2007-05-07 21:22 ` [patch 13/17] SLUB: Consolidate trace code clameter
2007-05-07 21:22 ` [patch 14/17] SLUB: Move tracking definitions and check_valid_pointer() away from debug code clameter
2007-05-07 21:22 ` [patch 15/17] SLUB: Add CONFIG_SLUB_DEBUG clameter
2007-05-07 21:22 ` [patch 16/17] SLUB: Include lifetime stats and sets of cpus / nodes in tracking output clameter
2007-05-08 2:48 ` Christoph Lameter
2007-05-07 21:22 ` [patch 17/17] SLUB: Rework slab order determination clameter
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070507212409.659872065@sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox