From: clameter@sgi.com
To: akpm@linux-foundation.org
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Pekka Enberg <penberg@cs.helsinki.fi>,
suresh.b.siddha@intel.com
Subject: [patch 06/26] Slab allocators: Replace explicit zeroing with __GFP_ZERO
Date: Mon, 18 Jun 2007 02:58:44 -0700 [thread overview]
Message-ID: <20070618095914.862238426@sgi.com> (raw)
In-Reply-To: <20070618095838.238615343@sgi.com>
[-- Attachment #1: slab_use_gfpzero_for_kmalloc_node --]
[-- Type: text/plain, Size: 11213 bytes --]
kmalloc_node() and kmem_cache_alloc_node() were not available in
a zeroing variant in the past. But with __GFP_ZERO it is possible
now to do zeroing while allocating.
Use __GFP_ZERO to remove the explicit clearing of memory via memset whereever
we can.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
block/as-iosched.c | 3 +--
block/cfq-iosched.c | 18 +++++++++---------
block/deadline-iosched.c | 3 +--
block/elevator.c | 3 +--
block/genhd.c | 8 ++++----
block/ll_rw_blk.c | 4 ++--
drivers/ide/ide-probe.c | 4 ++--
kernel/timer.c | 4 ++--
lib/genalloc.c | 3 +--
mm/allocpercpu.c | 9 +++------
mm/mempool.c | 3 +--
mm/vmalloc.c | 6 +++---
12 files changed, 30 insertions(+), 38 deletions(-)
Index: linux-2.6.22-rc4-mm2/block/as-iosched.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/as-iosched.c 2007-06-17 15:46:35.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/as-iosched.c 2007-06-17 15:46:59.000000000 -0700
@@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue
{
struct as_data *ad;
- ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
+ ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
if (!ad)
return NULL;
- memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
Index: linux-2.6.22-rc4-mm2/block/cfq-iosched.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/cfq-iosched.c 2007-06-17 15:42:50.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/cfq-iosched.c 2007-06-17 15:47:21.000000000 -0700
@@ -1249,9 +1249,9 @@ cfq_alloc_io_context(struct cfq_data *cf
{
struct cfq_io_context *cic;
- cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
+ cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (cic) {
- memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
@@ -1374,17 +1374,19 @@ retry:
* free memory.
*/
spin_unlock_irq(cfqd->queue->queue_lock);
- new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else {
- cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
if (!cfqq)
goto out;
}
- memset(cfqq, 0, sizeof(*cfqq));
-
RB_CLEAR_NODE(&cfqq->rb_node);
INIT_LIST_HEAD(&cfqq->fifo);
@@ -2046,12 +2048,10 @@ static void *cfq_init_queue(request_queu
{
struct cfq_data *cfqd;
- cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!cfqd)
return NULL;
- memset(cfqd, 0, sizeof(*cfqd));
-
cfqd->service_tree = CFQ_RB_ROOT;
INIT_LIST_HEAD(&cfqd->cic_list);
Index: linux-2.6.22-rc4-mm2/block/deadline-iosched.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/deadline-iosched.c 2007-06-17 15:47:37.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/deadline-iosched.c 2007-06-17 15:47:47.000000000 -0700
@@ -360,10 +360,9 @@ static void *deadline_init_queue(request
{
struct deadline_data *dd;
- dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
if (!dd)
return NULL;
- memset(dd, 0, sizeof(*dd));
INIT_LIST_HEAD(&dd->fifo_list[READ]);
INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
Index: linux-2.6.22-rc4-mm2/block/elevator.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/elevator.c 2007-06-17 15:47:57.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/elevator.c 2007-06-17 15:48:07.000000000 -0700
@@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(reques
elevator_t *eq;
int i;
- eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
+ eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
if (unlikely(!eq))
goto err;
- memset(eq, 0, sizeof(*eq));
eq->ops = &e->ops;
eq->elevator_type = e;
kobject_init(&eq->kobj);
Index: linux-2.6.22-rc4-mm2/block/genhd.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/genhd.c 2007-06-17 15:48:27.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/genhd.c 2007-06-17 15:49:03.000000000 -0700
@@ -726,21 +726,21 @@ struct gendisk *alloc_disk_node(int mino
{
struct gendisk *disk;
- disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
+ disk = kmalloc_node(sizeof(struct gendisk),
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (disk) {
- memset(disk, 0, sizeof(struct gendisk));
if (!init_disk_stats(disk)) {
kfree(disk);
return NULL;
}
if (minors > 1) {
int size = (minors - 1) * sizeof(struct hd_struct *);
- disk->part = kmalloc_node(size, GFP_KERNEL, node_id);
+ disk->part = kmalloc_node(size,
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (!disk->part) {
kfree(disk);
return NULL;
}
- memset(disk->part, 0, size);
}
disk->minors = minors;
kobj_set_kset_s(disk,block_subsys);
Index: linux-2.6.22-rc4-mm2/block/ll_rw_blk.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/block/ll_rw_blk.c 2007-06-17 15:44:27.000000000 -0700
+++ linux-2.6.22-rc4-mm2/block/ll_rw_blk.c 2007-06-17 15:45:03.000000000 -0700
@@ -1828,11 +1828,11 @@ request_queue_t *blk_alloc_queue_node(gf
{
request_queue_t *q;
- q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);
+ q = kmem_cache_alloc_node(requestq_cachep,
+ gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
- memset(q, 0, sizeof(*q));
init_timer(&q->unplug_timer);
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
Index: linux-2.6.22-rc4-mm2/drivers/ide/ide-probe.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/drivers/ide/ide-probe.c 2007-06-17 15:49:57.000000000 -0700
+++ linux-2.6.22-rc4-mm2/drivers/ide/ide-probe.c 2007-06-17 15:50:13.000000000 -0700
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
hwgroup->hwif->next = hwif;
spin_unlock_irq(&ide_lock);
} else {
- hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
+ hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
+ GFP_KERNEL | __GFP_ZERO,
hwif_to_node(hwif->drives[0].hwif));
if (!hwgroup)
goto out_up;
hwif->hwgroup = hwgroup;
- memset(hwgroup, 0, sizeof(ide_hwgroup_t));
hwgroup->hwif = hwif->next = hwif;
hwgroup->rq = NULL;
hwgroup->handler = NULL;
Index: linux-2.6.22-rc4-mm2/kernel/timer.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/kernel/timer.c 2007-06-17 15:50:50.000000000 -0700
+++ linux-2.6.22-rc4-mm2/kernel/timer.c 2007-06-17 15:51:16.000000000 -0700
@@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int
/*
* The APs use this path later in boot
*/
- base = kmalloc_node(sizeof(*base), GFP_KERNEL,
+ base = kmalloc_node(sizeof(*base),
+ GFP_KERNEL | __GFP_ZERO,
cpu_to_node(cpu));
if (!base)
return -ENOMEM;
@@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int
kfree(base);
return -ENOMEM;
}
- memset(base, 0, sizeof(*base));
per_cpu(tvec_bases, cpu) = base;
} else {
/*
Index: linux-2.6.22-rc4-mm2/lib/genalloc.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/lib/genalloc.c 2007-06-17 15:51:38.000000000 -0700
+++ linux-2.6.22-rc4-mm2/lib/genalloc.c 2007-06-17 15:51:56.000000000 -0700
@@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool,
int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
- chunk = kmalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL))
return -1;
- memset(chunk, 0, nbytes);
spin_lock_init(&chunk->lock);
chunk->start_addr = addr;
chunk->end_addr = addr + size;
Index: linux-2.6.22-rc4-mm2/mm/allocpercpu.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/mm/allocpercpu.c 2007-06-17 15:52:19.000000000 -0700
+++ linux-2.6.22-rc4-mm2/mm/allocpercpu.c 2007-06-17 15:52:38.000000000 -0700
@@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, siz
int node = cpu_to_node(cpu);
BUG_ON(pdata->ptrs[cpu]);
- if (node_online(node)) {
- /* FIXME: kzalloc_node(size, gfp, node) */
- pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
- if (pdata->ptrs[cpu])
- memset(pdata->ptrs[cpu], 0, size);
- } else
+ if (node_online(node))
+ pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
+ else
pdata->ptrs[cpu] = kzalloc(size, gfp);
return pdata->ptrs[cpu];
}
Index: linux-2.6.22-rc4-mm2/mm/mempool.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/mm/mempool.c 2007-06-17 15:52:52.000000000 -0700
+++ linux-2.6.22-rc4-mm2/mm/mempool.c 2007-06-17 15:53:19.000000000 -0700
@@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_n
mempool_free_t *free_fn, void *pool_data, int node_id)
{
mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id);
+ pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
if (!pool)
return NULL;
- memset(pool, 0, sizeof(*pool));
pool->elements = kmalloc_node(min_nr * sizeof(void *),
GFP_KERNEL, node_id);
if (!pool->elements) {
Index: linux-2.6.22-rc4-mm2/mm/vmalloc.c
===================================================================
--- linux-2.6.22-rc4-mm2.orig/mm/vmalloc.c 2007-06-17 15:57:18.000000000 -0700
+++ linux-2.6.22-rc4-mm2/mm/vmalloc.c 2007-06-17 16:03:38.000000000 -0700
@@ -434,11 +434,12 @@ void *__vmalloc_area_node(struct vm_stru
area->nr_pages = nr_pages;
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
+ pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
+ PAGE_KERNEL, node);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size,
- (gfp_mask & GFP_LEVEL_MASK),
+ (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
node);
}
area->pages = pages;
@@ -447,7 +448,6 @@ void *__vmalloc_area_node(struct vm_stru
kfree(area);
return NULL;
}
- memset(area->pages, 0, array_size);
for (i = 0; i < area->nr_pages; i++) {
if (node < 0)
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2007-06-18 9:58 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-06-18 9:58 [patch 00/26] Current slab allocator / SLUB patch queue clameter
2007-06-18 9:58 ` [patch 01/26] SLUB Debug: Fix initial object debug state of NUMA bootstrap objects clameter
2007-06-18 9:58 ` [patch 02/26] Slab allocators: Consolidate code for krealloc in mm/util.c clameter
2007-06-18 20:03 ` Pekka Enberg
2007-06-18 9:58 ` [patch 03/26] Slab allocators: Consistent ZERO_SIZE_PTR support and NULL result semantics clameter
2007-06-18 20:08 ` Pekka Enberg
2007-06-18 9:58 ` [patch 04/26] Slab allocators: Support __GFP_ZERO in all allocators clameter
2007-06-18 10:09 ` Paul Mundt
2007-06-18 16:17 ` Christoph Lameter
2007-06-18 20:11 ` Pekka Enberg
2007-06-18 9:58 ` [patch 05/26] Slab allocators: Cleanup zeroing allocations clameter
2007-06-18 20:16 ` Pekka Enberg
2007-06-18 20:26 ` Pekka Enberg
2007-06-18 22:34 ` Christoph Lameter
2007-06-19 5:48 ` Pekka Enberg
2007-06-18 21:55 ` Christoph Lameter
2007-06-19 21:00 ` Matt Mackall
2007-06-19 22:33 ` Christoph Lameter
2007-06-20 6:14 ` Pekka J Enberg
2007-06-18 9:58 ` clameter [this message]
2007-06-19 20:55 ` [patch 06/26] Slab allocators: Replace explicit zeroing with __GFP_ZERO Pekka Enberg
2007-06-28 6:09 ` Andrew Morton
2007-06-18 9:58 ` [patch 07/26] SLUB: Add some more inlines and #ifdef CONFIG_SLUB_DEBUG clameter
2007-06-18 9:58 ` [patch 08/26] SLUB: Extract dma_kmalloc_cache from get_cache clameter
2007-06-18 9:58 ` [patch 09/26] SLUB: Do proper locking during dma slab creation clameter
2007-06-18 9:58 ` [patch 10/26] SLUB: Faster more efficient slab determination for __kmalloc clameter
2007-06-19 20:08 ` Andrew Morton
2007-06-19 22:22 ` Christoph Lameter
2007-06-19 22:29 ` Andrew Morton
2007-06-19 22:38 ` Christoph Lameter
2007-06-19 22:46 ` Andrew Morton
2007-06-25 6:41 ` Nick Piggin
2007-06-18 9:58 ` [patch 11/26] SLUB: Add support for kmem_cache_ops clameter
2007-06-19 20:58 ` Pekka Enberg
2007-06-19 22:32 ` Christoph Lameter
2007-06-18 9:58 ` [patch 12/26] SLUB: Slab defragmentation core clameter
2007-06-26 8:18 ` Andrew Morton
2007-06-26 18:19 ` Christoph Lameter
2007-06-26 18:38 ` Andrew Morton
2007-06-26 18:52 ` Christoph Lameter
2007-06-26 19:13 ` Nish Aravamudan
2007-06-26 19:19 ` Christoph Lameter
2007-06-18 9:58 ` [patch 13/26] SLUB: Extend slabinfo to support -D and -C options clameter
2007-06-18 9:58 ` [patch 14/26] SLUB: Logic to trigger slab defragmentation from memory reclaim clameter
2007-06-18 9:58 ` [patch 15/26] Slab defrag: Support generic defragmentation for inode slab caches clameter
2007-06-26 8:18 ` Andrew Morton
2007-06-26 18:21 ` Christoph Lameter
2007-06-26 19:28 ` Christoph Lameter
2007-06-26 19:37 ` Andrew Morton
2007-06-26 19:41 ` Christoph Lameter
2007-06-18 9:58 ` [patch 16/26] Slab defragmentation: Support defragmentation for extX filesystem inodes clameter
2007-06-18 9:58 ` [patch 17/26] Slab defragmentation: Support inode defragmentation for xfs clameter
2007-06-18 9:58 ` [patch 18/26] Slab defragmentation: Support procfs inode defragmentation clameter
2007-06-18 9:58 ` [patch 19/26] Slab defragmentation: Support reiserfs " clameter
2007-06-18 9:58 ` [patch 20/26] Slab defragmentation: Support inode defragmentation for sockets clameter
2007-06-18 9:58 ` [patch 21/26] Slab defragmentation: support dentry defragmentation clameter
2007-06-26 8:18 ` Andrew Morton
2007-06-26 18:23 ` Christoph Lameter
2007-06-18 9:59 ` [patch 22/26] SLUB: kmem_cache_vacate to support page allocator memory defragmentation clameter
2007-06-18 9:59 ` [patch 23/26] SLUB: Move sysfs operations outside of slub_lock clameter
2007-06-18 9:59 ` [patch 24/26] SLUB: Avoid page struct cacheline bouncing due to remote frees to cpu slab clameter
2007-06-18 9:59 ` [patch 25/26] SLUB: Add an object counter to the kmem_cache_cpu structure clameter
2007-06-18 9:59 ` [patch 26/26] SLUB: Place kmem_cache_cpu structures in a NUMA aware way clameter
2007-06-19 23:17 ` Christoph Lameter
2007-06-18 11:57 ` [patch 00/26] Current slab allocator / SLUB patch queue Michal Piotrowski
2007-06-18 16:46 ` Christoph Lameter
2007-06-18 17:38 ` Michal Piotrowski
2007-06-18 18:05 ` Christoph Lameter
2007-06-18 18:58 ` Michal Piotrowski
2007-06-18 19:00 ` Christoph Lameter
2007-06-18 19:09 ` Michal Piotrowski
2007-06-18 19:19 ` Christoph Lameter
2007-06-18 20:43 ` Michal Piotrowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20070618095914.862238426@sgi.com \
--to=clameter@sgi.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=penberg@cs.helsinki.fi \
--cc=suresh.b.siddha@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox