From: Matthew Dobson <colpatch@us.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: Linux Memory Management <linux-mm@kvack.org>
Subject: [RFC][PATCH 7/8] __cache_grow()
Date: Fri, 18 Nov 2005 11:45:51 -0800 [thread overview]
Message-ID: <437E2F6F.4010509@us.ibm.com> (raw)
In-Reply-To: <437E2C69.4000708@us.ibm.com>
[-- Attachment #1: Type: text/plain, Size: 125 bytes --]
Create a helper for cache_grow() that handles doing the cache coloring and
allocating & initializing the struct slab.
-Matt
[-- Attachment #2: slab_prep-__cache_grow.patch --]
[-- Type: text/x-patch, Size: 6001 bytes --]
Create a helper function, __cache_grow(), called by cache_grow(). This allows
us to move the cache coloring and struct slab allocation & initialization to
its own discrete function.
Also, move both functions below some debugging function definitions, so they can be used
in these functions by the next patch without needing forward declarations.
Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>
Index: linux-2.6.15-rc1+critical_pool/mm/slab.c
===================================================================
--- linux-2.6.15-rc1+critical_pool.orig/mm/slab.c 2005-11-17 16:45:09.979876248 -0800
+++ linux-2.6.15-rc1+critical_pool/mm/slab.c 2005-11-17 16:49:45.118048888 -0800
@@ -2209,95 +2209,6 @@ static void set_slab_attr(kmem_cache_t *
}
}
-/*
- * Grow (by 1) the number of slabs within a cache. This is called by
- * kmem_cache_alloc() when there are no active objs left in a cache.
- */
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nid)
-{
- struct slab *slabp;
- void *objp;
- size_t offset;
- gfp_t local_flags;
- unsigned long ctor_flags;
- struct kmem_list3 *l3;
-
- /*
- * Be lazy and only check for valid flags here,
- * keeping it out of the critical path in kmem_cache_alloc().
- */
- if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
- BUG();
- if (flags & SLAB_NO_GROW)
- return 0;
-
- ctor_flags = SLAB_CTOR_CONSTRUCTOR;
- local_flags = (flags & SLAB_LEVEL_MASK);
- if (!(local_flags & __GFP_WAIT))
- /*
- * Not allowed to sleep. Need to tell a constructor about
- * this - it might need to know...
- */
- ctor_flags |= SLAB_CTOR_ATOMIC;
-
- /* About to mess with non-constant members - lock. */
- check_irq_off();
- spin_lock(&cachep->spinlock);
-
- /* Get colour for the slab, and cal the next value. */
- offset = cachep->colour_next;
- cachep->colour_next++;
- if (cachep->colour_next >= cachep->colour)
- cachep->colour_next = 0;
- offset *= cachep->colour_off;
-
- spin_unlock(&cachep->spinlock);
-
- check_irq_off();
- if (local_flags & __GFP_WAIT)
- local_irq_enable();
-
- /*
- * Ensure caller isn't asking for DMA memory if the slab wasn't created
- * with the SLAB_DMA flag.
- * Also ensure the caller *is* asking for DMA memory if the slab was
- * created with the SLAB_DMA flag.
- */
- kmem_flagcheck(cachep, flags);
-
- /* Get mem for the objects by allocating a physical page from 'nid' */
- if (!(objp = kmem_getpages(cachep, flags, nid)))
- goto out_nomem;
-
- /* Get slab management. */
- if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
- goto out_freepages;
-
- slabp->nid = nid;
- set_slab_attr(cachep, slabp, objp);
-
- cache_init_objs(cachep, slabp, ctor_flags);
-
- if (local_flags & __GFP_WAIT)
- local_irq_disable();
- check_irq_off();
- l3 = cachep->nodelists[nid];
- spin_lock(&l3->list_lock);
-
- /* Make slab active. */
- list_add_tail(&slabp->list, &(l3->slabs_free));
- STATS_INC_GROWN(cachep);
- l3->free_objects += cachep->num;
- spin_unlock(&l3->list_lock);
- return 1;
-out_freepages:
- kmem_freepages(cachep, objp);
-out_nomem:
- if (local_flags & __GFP_WAIT)
- local_irq_disable();
- return 0;
-}
-
#if DEBUG
/*
* Perform extra freeing checks:
@@ -2430,6 +2341,105 @@ bad:
#define check_slabp(x,y) do { } while(0)
#endif
+/**
+ * Helper function for cache_grow(). Handle cache coloring, allocating a
+ * struct slab and initializing the slab.
+ */
+static struct slab *__cache_grow(kmem_cache_t *cachep, void *objp, gfp_t flags)
+{
+ struct slab *slabp;
+ size_t offset;
+ unsigned int local_flags;
+ unsigned long ctor_flags;
+
+ ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+ local_flags = (flags & SLAB_LEVEL_MASK);
+ if (!(local_flags & __GFP_WAIT))
+ /*
+ * Not allowed to sleep. Need to tell a constructor about
+ * this - it might need to know...
+ */
+ ctor_flags |= SLAB_CTOR_ATOMIC;
+
+ /* About to mess with non-constant members - lock. */
+ check_irq_off();
+ spin_lock(&cachep->spinlock);
+
+ /* Get colour for the slab, and cal the next value. */
+ offset = cachep->colour_next;
+ cachep->colour_next++;
+ if (cachep->colour_next >= cachep->colour)
+ cachep->colour_next = 0;
+ offset *= cachep->colour_off;
+
+ spin_unlock(&cachep->spinlock);
+
+ check_irq_off();
+ if (local_flags & __GFP_WAIT)
+ local_irq_enable();
+
+ /* Get slab management. */
+ if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
+ goto out;
+
+ set_slab_attr(cachep, slabp, objp);
+ cache_init_objs(cachep, slabp, ctor_flags);
+
+out:
+ if (local_flags & __GFP_WAIT)
+ local_irq_disable();
+ check_irq_off();
+ return slabp;
+}
+
+/**
+ * Grow (by 1) the number of slabs within a cache. This is called by
+ * kmem_cache_alloc() when there are no active objs left in a cache.
+ */
+static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nid)
+{
+ struct slab *slabp = NULL;
+ void *objp = NULL;
+
+ /*
+ * Be lazy and only check for valid flags here,
+ * keeping it out of the critical path in kmem_cache_alloc().
+ */
+ if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
+ BUG();
+ if (flags & SLAB_NO_GROW)
+ goto out;
+
+ /*
+ * Ensure caller isn't asking for DMA memory if the slab wasn't created
+ * with the SLAB_DMA flag.
+ * Also ensure the caller *is* asking for DMA memory if the slab was
+ * created with the SLAB_DMA flag.
+ */
+ kmem_flagcheck(cachep, flags);
+
+ /* Get mem for the objects by allocating a physical page from 'nid' */
+ if ((objp = kmem_getpages(cachep, flags, nid))) {
+ struct kmem_list3 *l3 = cachep->nodelists[nid];
+
+ if (!(slabp = __cache_grow(cachep, objp, flags))) {
+ kmem_freepages(cachep, objp);
+ objp = NULL;
+ goto out;
+ }
+ slabp->nid = nid;
+
+ STATS_INC_GROWN(cachep);
+ /* Make slab active. */
+ spin_lock(&l3->list_lock);
+ list_add_tail(&slabp->list, &l3->slabs_free);
+ l3->free_objects += cachep->num;
+ spin_unlock(&l3->list_lock);
+ }
+out:
+ return objp != NULL;
+}
+
static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
{
int batchcount;
next prev parent reply other threads:[~2005-11-18 19:45 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-11-18 19:32 [RFC][PATCH 0/8] Critical Page Pool Matthew Dobson
2005-11-18 19:36 ` [RFC][PATCH 1/8] Create " Matthew Dobson
2005-11-19 0:08 ` Paul Jackson
2005-11-21 5:50 ` Matthew Dobson
2005-11-21 5:54 ` Paul Jackson
2005-11-18 19:36 ` [RFC][PATCH 2/8] Create emergency trigger Matthew Dobson
2005-11-19 0:21 ` Paul Jackson
2005-11-21 5:51 ` Matthew Dobson
2005-11-18 19:40 ` [RFC][PATCH 3/8] Slab cleanup Matthew Dobson
2005-11-18 19:41 ` [RFC][PATCH 4/8] Fix a bug in scsi_get_command Matthew Dobson
2005-11-18 19:43 ` [RFC][PATCH 5/8] get_object/return_object Matthew Dobson
2005-11-18 19:44 ` [RFC][PATCH 6/8] slab_destruct Matthew Dobson
2005-11-18 19:44 ` [RFC][PATCH 0/8] Critical Page Pool Avi Kivity
2005-11-18 19:51 ` Matthew Dobson
2005-11-18 20:42 ` Avi Kivity
2005-11-19 0:10 ` Paul Jackson
2005-11-21 5:36 ` Matthew Dobson
2005-11-18 19:45 ` Matthew Dobson [this message]
2005-11-18 19:47 ` [RFC][PATCH 8/8] Add support critical pool support to the slab allocator Matthew Dobson
2005-11-18 19:56 ` [RFC][PATCH 0/8] Critical Page Pool Chris Wright
2005-11-21 5:47 ` Matthew Dobson
2005-11-21 13:29 ` Pavel Machek
2005-12-06 22:54 ` Matthew Dobson
2005-12-10 8:39 ` Pavel Machek
2005-11-20 7:45 ` Keith Owens
2005-11-21 5:53 ` Matthew Dobson
2005-11-20 23:04 ` Pavel Machek
2005-11-21 5:58 ` Matthew Dobson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=437E2F6F.4010509@us.ibm.com \
--to=colpatch@us.ibm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox