From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
To: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>,
Andrew Morton <akpm@linux-foundation.org>,
Joonsoo Kim <js1304@gmail.com>,
David Rientjes <rientjes@google.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
Joonsoo Kim <iamjoonsoo.kim@lge.com>
Subject: [PATCH 11/16] slab: remove kmem_bufctl_t
Date: Thu, 22 Aug 2013 17:44:20 +0900 [thread overview]
Message-ID: <1377161065-30552-12-git-send-email-iamjoonsoo.kim@lge.com> (raw)
In-Reply-To: <1377161065-30552-1-git-send-email-iamjoonsoo.kim@lge.com>
Now, we changed the management method of free objects of the slab and
there is no need to use special value, BUFCTL_END, BUFCTL_FREE and
BUFCTL_ACTIVE. So remove them.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
diff --git a/mm/slab.c b/mm/slab.c
index 4551d57..7216ebe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -163,27 +163,7 @@
*/
static bool pfmemalloc_active __read_mostly;
-/*
- * kmem_bufctl_t:
- *
- * Bufctl's are used for linking objs within a slab
- * linked offsets.
- *
- * This implementation relies on "struct page" for locating the cache &
- * slab an object belongs to.
- * This allows the bufctl structure to be small (one int), but limits
- * the number of objects a slab (not a cache) can contain when off-slab
- * bufctls are used. The limit is the size of the largest general cache
- * that does not use off-slab slabs.
- * For 32bit archs with 4 kB pages, is this 56.
- * This is not serious, as it is only for large objects, when it is unwise
- * to have too many per slab.
- * Note: This limit can be raised by introducing a general cache whose size
- * is less than 512 (PAGE_SIZE<<3), but greater than 256.
- */
-
-typedef unsigned int kmem_bufctl_t;
-#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
+#define SLAB_LIMIT (((unsigned int)(~0U))-1)
/*
* struct slab
@@ -197,7 +177,7 @@ struct slab {
struct list_head list;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
+ unsigned int free;
};
};
@@ -613,7 +593,7 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
- return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(unsigned int), align);
}
/*
@@ -633,7 +613,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* slab is used for:
*
* - The struct slab
- * - One kmem_bufctl_t for each object
+ * - One unsigned int for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
@@ -658,7 +638,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
* into account.
*/
nr_objs = (slab_size - sizeof(struct slab)) /
- (buffer_size + sizeof(kmem_bufctl_t));
+ (buffer_size + sizeof(unsigned int));
/*
* This calculated number will be either the right
@@ -2056,7 +2036,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
* looping condition in cache_grow().
*/
offslab_limit = size - sizeof(struct slab);
- offslab_limit /= sizeof(kmem_bufctl_t);
+ offslab_limit /= sizeof(unsigned int);
if (num > offslab_limit)
break;
@@ -2297,7 +2277,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!cachep->num)
return -E2BIG;
- slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ slab_size = ALIGN(cachep->num * sizeof(unsigned int)
+ sizeof(struct slab), cachep->align);
/*
@@ -2312,7 +2292,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
slab_size =
- cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
+ cachep->num * sizeof(unsigned int) + sizeof(struct slab);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
@@ -2591,9 +2571,9 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep,
return slabp;
}
-static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
+static inline unsigned int *slab_bufctl(struct slab *slabp)
{
- return (kmem_bufctl_t *) (slabp + 1);
+ return (unsigned int *) (slabp + 1);
}
static void cache_init_objs(struct kmem_cache *cachep,
@@ -2672,7 +2652,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
{
unsigned int objnr = obj_to_index(cachep, slabp, objp);
#if DEBUG
- kmem_bufctl_t i;
+ unsigned int i;
/* Verify that the slab belongs to the intended node */
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
--
1.7.9.5
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-08-22 8:44 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-22 8:44 [PATCH 00/16] slab: overload struct slab over struct page to reduce memory usage Joonsoo Kim
2013-08-22 8:44 ` [PATCH 01/16] slab: correct pfmemalloc check Joonsoo Kim
2013-09-11 14:30 ` Christoph Lameter
2013-09-12 6:51 ` Joonsoo Kim
2013-08-22 8:44 ` [PATCH 02/16] slab: change return type of kmem_getpages() to struct page Joonsoo Kim
2013-08-22 17:49 ` Christoph Lameter
2013-08-23 6:40 ` Joonsoo Kim
2013-09-11 14:31 ` Christoph Lameter
2013-08-22 8:44 ` [PATCH 03/16] slab: remove colouroff in struct slab Joonsoo Kim
2013-09-11 14:32 ` Christoph Lameter
2013-08-22 8:44 ` [PATCH 04/16] slab: remove nodeid " Joonsoo Kim
2013-08-22 17:51 ` Christoph Lameter
2013-08-23 6:49 ` Joonsoo Kim
2013-08-22 8:44 ` [PATCH 05/16] slab: remove cachep in struct slab_rcu Joonsoo Kim
2013-08-22 17:53 ` Christoph Lameter
2013-08-23 6:53 ` Joonsoo Kim
2013-08-23 13:42 ` Christoph Lameter
2013-08-23 14:24 ` JoonSoo Kim
2013-08-23 15:41 ` Christoph Lameter
2013-08-23 16:12 ` JoonSoo Kim
2013-09-02 8:38 ` [PATCH 0/4] slab: implement byte sized indexes for the freelist of a slab Joonsoo Kim
2013-09-02 8:38 ` [PATCH 1/4] slab: factor out calculate nr objects in cache_estimate Joonsoo Kim
2013-09-02 8:38 ` [PATCH 2/4] slab: introduce helper functions to get/set free object Joonsoo Kim
2013-09-02 8:38 ` [PATCH 3/4] slab: introduce byte sized index for the freelist of a slab Joonsoo Kim
2013-09-02 8:38 ` [PATCH 4/4] slab: make more slab management structure off the slab Joonsoo Kim
2013-09-03 14:15 ` [PATCH 0/4] slab: implement byte sized indexes for the freelist of a slab Christoph Lameter
2013-09-04 8:33 ` Joonsoo Kim
2013-09-05 6:55 ` Joonsoo Kim
2013-09-05 14:33 ` Christoph Lameter
2013-09-06 5:58 ` Joonsoo Kim
2013-09-04 2:17 ` Wanpeng Li
2013-09-04 2:17 ` Wanpeng Li
[not found] ` <5226985f.4475320a.1c61.2623SMTPIN_ADDED_BROKEN@mx.google.com>
2013-09-04 8:28 ` Joonsoo Kim
2013-09-11 14:33 ` [PATCH 05/16] slab: remove cachep in struct slab_rcu Christoph Lameter
2013-08-22 8:44 ` [PATCH 06/16] slab: put forward freeing slab management object Joonsoo Kim
2013-09-11 14:35 ` Christoph Lameter
2013-08-22 8:44 ` [PATCH 07/16] slab: overloading the RCU head over the LRU for RCU free Joonsoo Kim
2013-08-27 22:06 ` Jonathan Corbet
2013-08-28 6:36 ` Joonsoo Kim
2013-09-11 14:39 ` Christoph Lameter
2013-09-12 6:55 ` Joonsoo Kim
2013-09-12 14:21 ` Christoph Lameter
2013-08-22 8:44 ` [PATCH 08/16] slab: use well-defined macro, virt_to_slab() Joonsoo Kim
2013-09-11 14:40 ` Christoph Lameter
2013-08-22 8:44 ` [PATCH 09/16] slab: use __GFP_COMP flag for allocating slab pages Joonsoo Kim
2013-08-22 18:00 ` Christoph Lameter
2013-08-23 6:55 ` Joonsoo Kim
2013-08-22 8:44 ` [PATCH 10/16] slab: change the management method of free objects of the slab Joonsoo Kim
2013-08-22 8:44 ` Joonsoo Kim [this message]
2013-08-22 8:44 ` [PATCH 12/16] slab: remove SLAB_LIMIT Joonsoo Kim
2013-08-22 8:44 ` [PATCH 13/16] slab: replace free and inuse in struct slab with newly introduced active Joonsoo Kim
2013-08-22 8:44 ` [PATCH 14/16] slab: use struct page for slab management Joonsoo Kim
2013-08-22 8:44 ` [PATCH 15/16] slab: remove useless statement for checking pfmemalloc Joonsoo Kim
2013-08-22 8:44 ` [PATCH 16/16] slab: rename slab_bufctl to slab_freelist Joonsoo Kim
2013-08-22 16:47 ` [PATCH 00/16] slab: overload struct slab over struct page to reduce memory usage Christoph Lameter
2013-08-23 6:35 ` Joonsoo Kim
2013-09-04 3:38 ` Wanpeng Li
2013-09-04 3:38 ` Wanpeng Li
[not found] ` <5226ab2c.02092b0a.5eed.ffffd7e4SMTPIN_ADDED_BROKEN@mx.google.com>
2013-09-04 8:25 ` Joonsoo Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1377161065-30552-12-git-send-email-iamjoonsoo.kim@lge.com \
--to=iamjoonsoo.kim@lge.com \
--cc=akpm@linux-foundation.org \
--cc=cl@linux.com \
--cc=js1304@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@kernel.org \
--cc=rientjes@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox