linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Dobson <colpatch@us.ibm.com>
To: linux-kernel@vger.kernel.org
Cc: sri@us.ibm.com, andrea@suse.de, pavel@suse.cz, linux-mm@kvack.org
Subject: [patch 7/9] mempool - Update other mempool users
Date: Wed, 25 Jan 2006 15:51:39 -0800	[thread overview]
Message-ID: <1138233099.27293.2.camel@localhost.localdomain> (raw)
In-Reply-To: <20060125161321.647368000@localhost.localdomain>

plain text document attachment (critical_mempools)
Fixup existing mempool users to use the new mempool API, part 4.

This patch papers over the three remaining mempool users that are non-trivial
to change over to the new API.  We simply add an UNUSED_NID argument to their
mempool_alloc functions to match the new API.  None of these mempools appear to
support NUMA in any way, so this should not cause any problems.

If anyone familiar with the code involved would like to send proper fixes,
that would be greatly appreciated.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

 md/raid1.c                  |    8 ++++----
 md/raid10.c                 |    7 ++++---
 scsi/scsi_transport_iscsi.c |    4 ++--
 3 files changed, 10 insertions(+), 9 deletions(-)

Index: linux-2.6.16-rc1+critical_mempools/drivers/md/raid10.c
===================================================================
--- linux-2.6.16-rc1+critical_mempools.orig/drivers/md/raid10.c
+++ linux-2.6.16-rc1+critical_mempools/drivers/md/raid10.c
@@ -84,7 +84,7 @@ static void r10bio_pool_free(void *r10_b
  * one for write (we recover only one drive per r10buf)
  *
  */
-static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r10buf_pool_alloc(gfp_t gfp_flags, int UNUSED_NID, void *data)
 {
 	conf_t *conf = data;
 	struct page *page;
@@ -93,7 +93,7 @@ static void * r10buf_pool_alloc(gfp_t gf
 	int i, j;
 	int nalloc;
 
-	r10_bio = r10bio_pool_alloc(gfp_flags, -1, conf);
+	r10_bio = r10bio_pool_alloc(gfp_flags, UNUSED_NID, conf);
 	if (!r10_bio) {
 		unplug_slaves(conf->mddev);
 		return NULL;
@@ -1518,7 +1518,8 @@ static int init_resync(conf_t *conf)
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
 	if (conf->r10buf_pool)
 		BUG();
-	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
+	conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc,
+					   r10buf_pool_free, conf);
 	if (!conf->r10buf_pool)
 		return -ENOMEM;
 	conf->next_resync = 0;
Index: linux-2.6.16-rc1+critical_mempools/drivers/md/raid1.c
===================================================================
--- linux-2.6.16-rc1+critical_mempools.orig/drivers/md/raid1.c
+++ linux-2.6.16-rc1+critical_mempools/drivers/md/raid1.c
@@ -78,7 +78,7 @@ static void r1bio_pool_free(void *r1_bio
 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
 #define RESYNC_WINDOW (2048*1024)
 
-static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1buf_pool_alloc(gfp_t gfp_flags, int UNUSED_NID, void *data)
 {
 	struct pool_info *pi = data;
 	struct page *page;
@@ -86,7 +86,7 @@ static void * r1buf_pool_alloc(gfp_t gfp
 	struct bio *bio;
 	int i, j;
 
-	r1_bio = r1bio_pool_alloc(gfp_flags, -1, pi);
+	r1_bio = r1bio_pool_alloc(gfp_flags, UNUSED_NID, pi);
 	if (!r1_bio) {
 		unplug_slaves(pi->mddev);
 		return NULL;
@@ -1543,8 +1543,8 @@ static int init_resync(conf_t *conf)
 	buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
 	if (conf->r1buf_pool)
 		BUG();
-	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
-					  conf->poolinfo);
+	conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc,
+					  r1buf_pool_free, conf->poolinfo);
 	if (!conf->r1buf_pool)
 		return -ENOMEM;
 	conf->next_resync = 0;
Index: linux-2.6.16-rc1+critical_mempools/drivers/scsi/scsi_transport_iscsi.c
===================================================================
--- linux-2.6.16-rc1+critical_mempools.orig/drivers/scsi/scsi_transport_iscsi.c
+++ linux-2.6.16-rc1+critical_mempools/drivers/scsi/scsi_transport_iscsi.c
@@ -462,8 +462,8 @@ static inline struct list_head *skb_to_l
 	return (struct list_head *)&skb->cb;
 }
 
-static void*
-mempool_zone_alloc_skb(unsigned int gfp_mask, void *pool_data)
+static void *
+mempool_zone_alloc_skb(unsigned int gfp_mask, int UNUSED_NID, void *pool_data)
 {
 	struct mempool_zone *zone = pool_data;
 

--

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

      parent reply	other threads:[~2006-01-25 23:51 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20060125161321.647368000@localhost.localdomain>
2006-01-25 19:39 ` [patch 1/9] mempool - Add page allocator Matthew Dobson
2006-01-25 19:39 ` [patch 2/9] mempool - Use common mempool " Matthew Dobson
2006-01-25 19:40 ` [patch 4/9] mempool - Update mempool page allocator user Matthew Dobson
2006-01-25 19:40 ` [patch 5/9] mempool - Update kmalloc mempool users Matthew Dobson
2006-01-25 19:40 ` [patch 6/9] mempool - Update kzalloc " Matthew Dobson
2006-01-26  7:30   ` Pekka Enberg
2006-01-26 22:03     ` Matthew Dobson
2006-01-25 19:40 ` [patch 8/9] slab - Add *_mempool slab variants Matthew Dobson
2006-01-26  7:41   ` Pekka Enberg
2006-01-26 22:40     ` Matthew Dobson
2006-01-27  7:09       ` Pekka J Enberg
2006-01-27  7:10       ` Pekka J Enberg
2006-01-25 19:40 ` [patch 9/9] slab - Implement single mempool backing for slab allocator Matthew Dobson
2006-01-26  8:11   ` Pekka Enberg
2006-01-26 22:48     ` Matthew Dobson
2006-01-27  7:22       ` Pekka J Enberg
2006-01-25 23:51 ` [patch 1/9] mempool - Add page allocator Matthew Dobson
2006-01-25 23:51 ` [patch 3/9] mempool - Make mempools NUMA aware Matthew Dobson
2006-01-26 17:54   ` Christoph Lameter
2006-01-26 22:57     ` Matthew Dobson
2006-01-26 23:15       ` Christoph Lameter
2006-01-26 23:24         ` Matthew Dobson
2006-01-26 23:29           ` Christoph Lameter
2006-01-27  0:15             ` Matthew Dobson
2006-01-27  0:21               ` Christoph Lameter
2006-01-27  0:34                 ` Matthew Dobson
2006-01-27  0:39                   ` Christoph Lameter
2006-01-27  0:44                     ` Matthew Dobson
2006-01-27  0:57                       ` Christoph Lameter
2006-01-27  1:07                         ` Andi Kleen
2006-01-27 10:51                   ` Paul Jackson
2006-01-28  1:00                     ` Matthew Dobson
2006-01-28  5:08                       ` Paul Jackson
2006-01-28  8:16                       ` Pavel Machek
2006-01-28 16:14                         ` Sridhar Samudrala
2006-01-28 16:41                           ` Pavel Machek
2006-01-28 16:53                             ` Sridhar Samudrala
2006-01-28 22:59                               ` Pavel Machek
2006-01-28 23:10                                 ` Let the flames begin... [was Re: [patch 3/9] mempool - Make mempools NUMA aware] Pavel Machek
2006-01-27  0:23   ` [patch 3/9] mempool - Make mempools NUMA aware Benjamin LaHaise
2006-01-27  0:35     ` Matthew Dobson
2006-01-27  3:23       ` Benjamin LaHaise
2006-01-28  1:08         ` Matthew Dobson
2006-01-25 23:51 ` Matthew Dobson [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1138233099.27293.2.camel@localhost.localdomain \
    --to=colpatch@us.ibm.com \
    --cc=andrea@suse.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=pavel@suse.cz \
    --cc=sri@us.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox