* [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
@ 2007-04-03 3:44 Rusty Russell
2007-04-03 3:47 ` Rusty Russell
` (2 more replies)
0 siblings, 3 replies; 16+ messages in thread
From: Rusty Russell @ 2007-04-03 3:44 UTC (permalink / raw)
To: Andrew Morton
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
I can never remember what the function to register to receive VM pressure
is called. I have to trace down from __alloc_pages() to find it.
It's called "set_shrinker()", and it needs Your Help.
New version:
1) Don't hide struct shrinker. It contains no magic.
2) Don't allocate "struct shrinker". It's not helpful.
3) Call them "register_shrinker" and "unregister_shrinker".
4) Call the function "shrink" not "shrinker".
5) Rename "nr_to_scan" argument to "nr_to_free".
6) Reduce the 17 lines of waffly comments to 10, and document the -1 return.
Comments:
1) The comment in reiserfs4 makes me a little queasy.
2) The wrapper code in xfs might no longer be needed.
3) The placing in the x86-64 "hot function list" for seems a little
unlikely. Clearly, Andi was testing if anyone was paying attention.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
diff -r a6c8dede237c arch/x86_64/kernel/functionlist
--- a/arch/x86_64/kernel/functionlist Tue Apr 03 12:53:59 2007 +1000
+++ b/arch/x86_64/kernel/functionlist Tue Apr 03 13:15:11 2007 +1000
@@ -1118,7 +1118,6 @@
*(.text.simple_strtoll)
*(.text.set_termios)
*(.text.set_task_comm)
-*(.text.set_shrinker)
*(.text.set_normalized_timespec)
*(.text.set_brk)
*(.text.serial_in)
diff -r a6c8dede237c fs/dcache.c
--- a/fs/dcache.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/dcache.c Tue Apr 03 13:09:55 2007 +1000
@@ -884,6 +884,11 @@ static int shrink_dcache_memory(int nr,
}
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dcache_shrinker = {
+ .shrink = shrink_dcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/**
* d_alloc - allocate a dcache entry
@@ -2144,8 +2149,8 @@ static void __init dcache_init(unsigned
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
NULL, NULL);
-
- set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+
+ register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff -r a6c8dede237c fs/dquot.c
--- a/fs/dquot.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/dquot.c Tue Apr 03 13:10:31 2007 +1000
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr,
}
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Put reference to dquot
@@ -1871,7 +1876,7 @@ static int __init dquot_init(void)
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
nr_hash, order, (PAGE_SIZE << order));
- set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+ register_shrinker(&dqcache_shrinker);
return 0;
}
diff -r a6c8dede237c fs/inode.c
--- a/fs/inode.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/inode.c Tue Apr 03 13:11:05 2007 +1000
@@ -474,6 +474,11 @@ static int shrink_icache_memory(int nr,
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker icache_shrinker = {
+ .shrink = shrink_icache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
@@ -1393,7 +1398,7 @@ void __init inode_init(unsigned long mem
SLAB_MEM_SPREAD),
init_once,
NULL);
- set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+ register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff -r a6c8dede237c fs/mbcache.c
--- a/fs/mbcache.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/mbcache.c Tue Apr 03 13:12:37 2007 +1000
@@ -100,7 +100,6 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache
static int __init init_mbcache(void)
{
- mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+ register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
- remove_shrinker(mb_shrinker);
+ unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
diff -r a6c8dede237c fs/nfs/super.c
--- a/fs/nfs/super.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/nfs/super.c Tue Apr 03 13:06:14 2007 +1000
@@ -138,7 +138,10 @@ static const struct super_operations nfs
};
#endif
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Register the NFS filesystems
@@ -159,7 +162,7 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_2;
#endif
- acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ register_shrinker(&acl_shrinker);
return 0;
#ifdef CONFIG_NFS_V4
@@ -177,8 +180,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- if (acl_shrinker != NULL)
- remove_shrinker(acl_shrinker);
+ unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
diff -r a6c8dede237c fs/reiser4/fsdata.c
--- a/fs/reiser4/fsdata.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/reiser4/fsdata.c Tue Apr 03 13:14:52 2007 +1000
@@ -7,7 +7,6 @@
/* cache or dir_cursors */
static struct kmem_cache *d_cursor_cache;
-static struct shrinker *d_cursor_shrinker;
/* list of unused cursors */
static LIST_HEAD(cursor_cache);
@@ -53,6 +52,18 @@ static int d_cursor_shrink(int nr, gfp_t
return d_cursor_unused;
}
+/*
+ * actually, d_cursors are "priceless", because there is no way to
+ * recover information stored in them. On the other hand, we don't
+ * want to consume all kernel memory by them. As a compromise, just
+ * assign higher "seeks" value to d_cursor cache, so that it will be
+ * shrunk only if system is really tight on memory.
+ */
+static struct shrinker d_cursor_shrinker = {
+ .shrink = d_cursor_shrink,
+ .seeks = DEFAULT_SEEKS << 3,
+};
+
/**
* reiser4_init_d_cursor - create d_cursor cache
*
@@ -66,20 +77,7 @@ int reiser4_init_d_cursor(void)
if (d_cursor_cache == NULL)
return RETERR(-ENOMEM);
- /*
- * actually, d_cursors are "priceless", because there is no way to
- * recover information stored in them. On the other hand, we don't
- * want to consume all kernel memory by them. As a compromise, just
- * assign higher "seeks" value to d_cursor cache, so that it will be
- * shrunk only if system is really tight on memory.
- */
- d_cursor_shrinker = set_shrinker(DEFAULT_SEEKS << 3,
- d_cursor_shrink);
- if (d_cursor_shrinker == NULL) {
- destroy_reiser4_cache(&d_cursor_cache);
- d_cursor_cache = NULL;
- return RETERR(-ENOMEM);
- }
+ register_shrinker(&d_cursor_shrinker);
return 0;
}
diff -r a6c8dede237c fs/xfs/linux-2.6/kmem.h
--- a/fs/xfs/linux-2.6/kmem.h Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/xfs/linux-2.6/kmem.h Tue Apr 03 13:08:40 2007 +1000
@@ -110,13 +110,23 @@ static inline kmem_shaker_t
static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
- return set_shrinker(DEFAULT_SEEKS, sfunc);
+ /* FIXME: Perhaps caller should setup & hand in the shrinker? */
+ struct shrinker *shrinker = kmalloc(sizeof *shrinker, GFP_ATOMIC);
+ if (shrinker) {
+ shrinker->shrink = sfunc;
+ shrinker->seeks = DEFAULT_SEEKS;
+ register_shrinker(shrinker);
+ }
+ return shrinker;
}
static inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
- remove_shrinker(shrinker);
+ if (shrinker) {
+ unregister_shrinker(shrinker);
+ kfree(shrinker);
+ }
}
static inline int
diff -r a6c8dede237c include/linux/mm.h
--- a/include/linux/mm.h Tue Apr 03 12:53:59 2007 +1000
+++ b/include/linux/mm.h Tue Apr 03 13:19:45 2007 +1000
@@ -813,27 +813,27 @@ extern unsigned long do_mremap(unsigned
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
- *
- * The callback must return the number of objects which remain in the cache.
- *
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * 'shrink' is passed a count 'nr_to_free' and a gfpmask. It should
+ * attempt to free up to 'nr_to_free' objects, and return the number
+ * of objects which remain in the cache. If it returns -1, it means
+ * it cannot do any scanning at this time.
+ *
+ * 'shrink' will be passed nr_to_free == 0 when the VM is querying the
* cache size, so a fastpath for that case is appropriate.
*/
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
- */
-
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+struct shrinker {
+ int (*shrink)(int nr_to_free, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
+
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
diff -r a6c8dede237c mm/vmscan.c
--- a/mm/vmscan.c Tue Apr 03 12:53:59 2007 +1000
+++ b/mm/vmscan.c Tue Apr 03 13:17:17 2007 +1000
@@ -72,17 +72,6 @@ struct scan_control {
int order;
};
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
- shrinker_t shrinker;
- struct list_head list;
- int seeks; /* seeks to recreate an obj */
- long nr; /* objs pending delete */
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -125,34 +114,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
*/
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
-{
- struct shrinker *shrinker;
-
- shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
- if (shrinker) {
- shrinker->shrinker = theshrinker;
- shrinker->seeks = seeks;
- shrinker->nr = 0;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- }
- return shrinker;
-}
-EXPORT_SYMBOL(set_shrinker);
+void register_shrinker(struct shrinker *shrinker)
+{
+ shrinker->nr = 0;
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
- kfree(shrinker);
-}
-EXPORT_SYMBOL(remove_shrinker);
+}
+EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
/*
@@ -189,11 +169,11 @@ unsigned long shrink_slab(unsigned long
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
- unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+ unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
if (!shrinker->seeks) {
print_symbol("shrinker %s has zero seeks\n",
- (unsigned long)shrinker->shrinker);
+ (unsigned long)shrinker->shrink);
delta = (4 * scanned) / DEFAULT_SEEKS;
} else {
delta = (4 * scanned) / shrinker->seeks;
@@ -223,8 +203,8 @@ unsigned long shrink_slab(unsigned long
int shrink_ret;
int nr_before;
- nr_before = (*shrinker->shrinker)(0, gfp_mask);
- shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+ nr_before = (*shrinker->shrink)(0, gfp_mask);
+ shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 3:44 [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2) Rusty Russell
@ 2007-04-03 3:47 ` Rusty Russell
2007-04-03 3:58 ` Andrew Morton
2007-04-03 9:57 ` Andi Kleen
2 siblings, 0 replies; 16+ messages in thread
From: Rusty Russell @ 2007-04-03 3:47 UTC (permalink / raw)
To: Andrew Morton
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Tue, 2007-04-03 at 13:45 +1000, Rusty Russell wrote:
> It's called "set_shrinker()", and it needs Your Help.
Wrong copy. This is the one which actually compiles reiser4.
==
I can never remember what the function to register to receive VM pressure
is called. I have to trace down from __alloc_pages() to find it.
It's called "set_shrinker()", and it needs Your Help.
New version:
1) Don't hide struct shrinker. It contains no magic.
2) Don't allocate "struct shrinker". It's not helpful.
3) Call them "register_shrinker" and "unregister_shrinker".
4) Call the function "shrink" not "shrinker".
5) Rename "nr_to_scan" argument to "nr_to_free".
6) Reduce the 17 lines of waffly comments to 10, and document the -1 return.
Comments:
1) The comment in reiserfs4 makes me a little queasy.
2) The wrapper code in xfs might no longer be needed.
3) The placing in the x86-64 "hot function list" for seems a little
unlikely. Clearly, Andi was testing if anyone was paying attention.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
diff -r a6c8dede237c arch/x86_64/kernel/functionlist
--- a/arch/x86_64/kernel/functionlist Tue Apr 03 12:53:59 2007 +1000
+++ b/arch/x86_64/kernel/functionlist Tue Apr 03 13:15:11 2007 +1000
@@ -1118,7 +1118,6 @@
*(.text.simple_strtoll)
*(.text.set_termios)
*(.text.set_task_comm)
-*(.text.set_shrinker)
*(.text.set_normalized_timespec)
*(.text.set_brk)
*(.text.serial_in)
diff -r a6c8dede237c fs/dcache.c
--- a/fs/dcache.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/dcache.c Tue Apr 03 13:09:55 2007 +1000
@@ -884,6 +884,11 @@ static int shrink_dcache_memory(int nr,
}
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dcache_shrinker = {
+ .shrink = shrink_dcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/**
* d_alloc - allocate a dcache entry
@@ -2144,8 +2149,8 @@ static void __init dcache_init(unsigned
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
NULL, NULL);
-
- set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+
+ register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff -r a6c8dede237c fs/dquot.c
--- a/fs/dquot.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/dquot.c Tue Apr 03 13:10:31 2007 +1000
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr,
}
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Put reference to dquot
@@ -1871,7 +1876,7 @@ static int __init dquot_init(void)
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
nr_hash, order, (PAGE_SIZE << order));
- set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+ register_shrinker(&dqcache_shrinker);
return 0;
}
diff -r a6c8dede237c fs/inode.c
--- a/fs/inode.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/inode.c Tue Apr 03 13:11:05 2007 +1000
@@ -474,6 +474,11 @@ static int shrink_icache_memory(int nr,
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker icache_shrinker = {
+ .shrink = shrink_icache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
@@ -1393,7 +1398,7 @@ void __init inode_init(unsigned long mem
SLAB_MEM_SPREAD),
init_once,
NULL);
- set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+ register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff -r a6c8dede237c fs/mbcache.c
--- a/fs/mbcache.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/mbcache.c Tue Apr 03 13:12:37 2007 +1000
@@ -100,7 +100,6 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache
static int __init init_mbcache(void)
{
- mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+ register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
- remove_shrinker(mb_shrinker);
+ unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
diff -r a6c8dede237c fs/nfs/super.c
--- a/fs/nfs/super.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/nfs/super.c Tue Apr 03 13:06:14 2007 +1000
@@ -138,7 +138,10 @@ static const struct super_operations nfs
};
#endif
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Register the NFS filesystems
@@ -159,7 +162,7 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_2;
#endif
- acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ register_shrinker(&acl_shrinker);
return 0;
#ifdef CONFIG_NFS_V4
@@ -177,8 +180,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- if (acl_shrinker != NULL)
- remove_shrinker(acl_shrinker);
+ unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
diff -r a6c8dede237c fs/reiser4/fsdata.c
--- a/fs/reiser4/fsdata.c Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/reiser4/fsdata.c Tue Apr 03 13:34:48 2007 +1000
@@ -7,7 +7,6 @@
/* cache or dir_cursors */
static struct kmem_cache *d_cursor_cache;
-static struct shrinker *d_cursor_shrinker;
/* list of unused cursors */
static LIST_HEAD(cursor_cache);
@@ -53,6 +52,18 @@ static int d_cursor_shrink(int nr, gfp_t
return d_cursor_unused;
}
+/*
+ * actually, d_cursors are "priceless", because there is no way to
+ * recover information stored in them. On the other hand, we don't
+ * want to consume all kernel memory by them. As a compromise, just
+ * assign higher "seeks" value to d_cursor cache, so that it will be
+ * shrunk only if system is really tight on memory.
+ */
+static struct shrinker d_cursor_shrinker = {
+ .shrink = d_cursor_shrink,
+ .seeks = DEFAULT_SEEKS << 3,
+};
+
/**
* reiser4_init_d_cursor - create d_cursor cache
*
@@ -66,20 +77,7 @@ int reiser4_init_d_cursor(void)
if (d_cursor_cache == NULL)
return RETERR(-ENOMEM);
- /*
- * actually, d_cursors are "priceless", because there is no way to
- * recover information stored in them. On the other hand, we don't
- * want to consume all kernel memory by them. As a compromise, just
- * assign higher "seeks" value to d_cursor cache, so that it will be
- * shrunk only if system is really tight on memory.
- */
- d_cursor_shrinker = set_shrinker(DEFAULT_SEEKS << 3,
- d_cursor_shrink);
- if (d_cursor_shrinker == NULL) {
- destroy_reiser4_cache(&d_cursor_cache);
- d_cursor_cache = NULL;
- return RETERR(-ENOMEM);
- }
+ register_shrinker(&d_cursor_shrinker);
return 0;
}
@@ -90,9 +88,7 @@ int reiser4_init_d_cursor(void)
*/
void reiser4_done_d_cursor(void)
{
- BUG_ON(d_cursor_shrinker == NULL);
- remove_shrinker(d_cursor_shrinker);
- d_cursor_shrinker = NULL;
+ unregister_shrinker(&d_cursor_shrinker);
destroy_reiser4_cache(&d_cursor_cache);
}
diff -r a6c8dede237c fs/xfs/linux-2.6/kmem.h
--- a/fs/xfs/linux-2.6/kmem.h Tue Apr 03 12:53:59 2007 +1000
+++ b/fs/xfs/linux-2.6/kmem.h Tue Apr 03 13:08:40 2007 +1000
@@ -110,13 +110,23 @@ static inline kmem_shaker_t
static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
- return set_shrinker(DEFAULT_SEEKS, sfunc);
+ /* FIXME: Perhaps caller should setup & hand in the shrinker? */
+ struct shrinker *shrinker = kmalloc(sizeof *shrinker, GFP_ATOMIC);
+ if (shrinker) {
+ shrinker->shrink = sfunc;
+ shrinker->seeks = DEFAULT_SEEKS;
+ register_shrinker(shrinker);
+ }
+ return shrinker;
}
static inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
- remove_shrinker(shrinker);
+ if (shrinker) {
+ unregister_shrinker(shrinker);
+ kfree(shrinker);
+ }
}
static inline int
diff -r a6c8dede237c include/linux/mm.h
--- a/include/linux/mm.h Tue Apr 03 12:53:59 2007 +1000
+++ b/include/linux/mm.h Tue Apr 03 13:19:45 2007 +1000
@@ -813,27 +813,27 @@ extern unsigned long do_mremap(unsigned
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
- *
- * The callback must return the number of objects which remain in the cache.
- *
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * 'shrink' is passed a count 'nr_to_free' and a gfpmask. It should
+ * attempt to free up to 'nr_to_free' objects, and return the number
+ * of objects which remain in the cache. If it returns -1, it means
+ * it cannot do any scanning at this time.
+ *
+ * 'shrink' will be passed nr_to_free == 0 when the VM is querying the
* cache size, so a fastpath for that case is appropriate.
*/
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
- */
-
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+struct shrinker {
+ int (*shrink)(int nr_to_free, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
+
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
diff -r a6c8dede237c mm/vmscan.c
--- a/mm/vmscan.c Tue Apr 03 12:53:59 2007 +1000
+++ b/mm/vmscan.c Tue Apr 03 13:17:17 2007 +1000
@@ -72,17 +72,6 @@ struct scan_control {
int order;
};
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
- shrinker_t shrinker;
- struct list_head list;
- int seeks; /* seeks to recreate an obj */
- long nr; /* objs pending delete */
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -125,34 +114,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
*/
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
-{
- struct shrinker *shrinker;
-
- shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
- if (shrinker) {
- shrinker->shrinker = theshrinker;
- shrinker->seeks = seeks;
- shrinker->nr = 0;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- }
- return shrinker;
-}
-EXPORT_SYMBOL(set_shrinker);
+void register_shrinker(struct shrinker *shrinker)
+{
+ shrinker->nr = 0;
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
- kfree(shrinker);
-}
-EXPORT_SYMBOL(remove_shrinker);
+}
+EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
/*
@@ -189,11 +169,11 @@ unsigned long shrink_slab(unsigned long
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
- unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+ unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
if (!shrinker->seeks) {
print_symbol("shrinker %s has zero seeks\n",
- (unsigned long)shrinker->shrinker);
+ (unsigned long)shrinker->shrink);
delta = (4 * scanned) / DEFAULT_SEEKS;
} else {
delta = (4 * scanned) / shrinker->seeks;
@@ -223,8 +203,8 @@ unsigned long shrink_slab(unsigned long
int shrink_ret;
int nr_before;
- nr_before = (*shrinker->shrinker)(0, gfp_mask);
- shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+ nr_before = (*shrinker->shrink)(0, gfp_mask);
+ shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 3:44 [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2) Rusty Russell
2007-04-03 3:47 ` Rusty Russell
@ 2007-04-03 3:58 ` Andrew Morton
2007-04-03 4:45 ` Rusty Russell
2007-04-03 9:57 ` Andi Kleen
2 siblings, 1 reply; 16+ messages in thread
From: Andrew Morton @ 2007-04-03 3:58 UTC (permalink / raw)
To: Rusty Russell
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Tue, 03 Apr 2007 13:44:45 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
>
> I can never remember what the function to register to receive VM pressure
> is called. I have to trace down from __alloc_pages() to find it.
>
> It's called "set_shrinker()", and it needs Your Help.
>
> New version:
> 1) Don't hide struct shrinker. It contains no magic.
> 2) Don't allocate "struct shrinker". It's not helpful.
> 3) Call them "register_shrinker" and "unregister_shrinker".
> 4) Call the function "shrink" not "shrinker".
> 5) Rename "nr_to_scan" argument to "nr_to_free".
No, it is actually the number to scan. This is >= the number of freed
objects.
This is because, for better of for worse, the VM tries to balance the
scanning rate of the various caches, not the reclaiming rate.
> 6) Reduce the 17 lines of waffly comments to 10, and document the -1 return.
>
> Comments:
> 1) The comment in reiserfs4 makes me a little queasy.
I'm going to have to split this patch up into mainline-bit and reiser4-bit.
And that's OK (it's a regular occurrence). But never miss a chance to whine.
> 2) The wrapper code in xfs might no longer be needed.
> 3) The placing in the x86-64 "hot function list" for seems a little
> unlikely. Clearly, Andi was testing if anyone was paying attention.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 3:58 ` Andrew Morton
@ 2007-04-03 4:45 ` Rusty Russell
2007-04-03 4:57 ` Andrew Morton
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-04-03 4:45 UTC (permalink / raw)
To: Andrew Morton
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Mon, 2007-04-02 at 20:58 -0700, Andrew Morton wrote:
> On Tue, 03 Apr 2007 13:44:45 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
>
> >
> > I can never remember what the function to register to receive VM pressure
> > is called. I have to trace down from __alloc_pages() to find it.
> >
> > It's called "set_shrinker()", and it needs Your Help.
> >
> > New version:
> > 1) Don't hide struct shrinker. It contains no magic.
> > 2) Don't allocate "struct shrinker". It's not helpful.
> > 3) Call them "register_shrinker" and "unregister_shrinker".
> > 4) Call the function "shrink" not "shrinker".
> > 5) Rename "nr_to_scan" argument to "nr_to_free".
>
> No, it is actually the number to scan. This is >= the number of freed
> objects.
>
> This is because, for better of for worse, the VM tries to balance the
> scanning rate of the various caches, not the reclaiming rate.
Err, ok, I completely missed that distinction.
Does that mean the to function correctly every user needs some internal
cursor so it doesn't end up scanning the first N entries over and over?
Rusty.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 4:45 ` Rusty Russell
@ 2007-04-03 4:57 ` Andrew Morton
2007-04-03 5:44 ` [xfs-masters] " David Chinner
2007-04-03 5:47 ` Rusty Russell
0 siblings, 2 replies; 16+ messages in thread
From: Andrew Morton @ 2007-04-03 4:57 UTC (permalink / raw)
To: Rusty Russell
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Tue, 03 Apr 2007 14:45:02 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> On Mon, 2007-04-02 at 20:58 -0700, Andrew Morton wrote:
> > On Tue, 03 Apr 2007 13:44:45 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> >
> > >
> > > I can never remember what the function to register to receive VM pressure
> > > is called. I have to trace down from __alloc_pages() to find it.
> > >
> > > It's called "set_shrinker()", and it needs Your Help.
> > >
> > > New version:
> > > 1) Don't hide struct shrinker. It contains no magic.
> > > 2) Don't allocate "struct shrinker". It's not helpful.
> > > 3) Call them "register_shrinker" and "unregister_shrinker".
> > > 4) Call the function "shrink" not "shrinker".
> > > 5) Rename "nr_to_scan" argument to "nr_to_free".
> >
> > No, it is actually the number to scan. This is >= the number of freed
> > objects.
> >
> > This is because, for better of for worse, the VM tries to balance the
> > scanning rate of the various caches, not the reclaiming rate.
>
> Err, ok, I completely missed that distinction.
>
> Does that mean the to function correctly every user needs some internal
> cursor so it doesn't end up scanning the first N entries over and over?
>
If it wants to be well-behaved, and to behave as the VM expects, yes.
There's an expectation that the callback will be performing some scan-based
aging operation and of course to do LRU (or whatever) aging, the callback
will need to remember where it was up to last time it was called.
But it's just a guideline - callbacks could do something different but
in-the-spirit, I guess.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 4:57 ` Andrew Morton
@ 2007-04-03 5:44 ` David Chinner
2007-04-03 6:01 ` Andrew Morton
2007-04-03 5:47 ` Rusty Russell
1 sibling, 1 reply; 16+ messages in thread
From: David Chinner @ 2007-04-03 5:44 UTC (permalink / raw)
To: xfs-masters
Cc: Rusty Russell, lkml - Kernel Mailing List, linux-mm, reiserfs-dev
On Mon, Apr 02, 2007 at 09:57:02PM -0700, Andrew Morton wrote:
> On Tue, 03 Apr 2007 14:45:02 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
>
> > On Mon, 2007-04-02 at 20:58 -0700, Andrew Morton wrote:
> > > On Tue, 03 Apr 2007 13:44:45 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> > >
> > > >
> > > > I can never remember what the function to register to receive VM pressure
> > > > is called. I have to trace down from __alloc_pages() to find it.
> > > >
> > > > It's called "set_shrinker()", and it needs Your Help.
> > > >
> > > > New version:
> > > > 1) Don't hide struct shrinker. It contains no magic.
> > > > 2) Don't allocate "struct shrinker". It's not helpful.
> > > > 3) Call them "register_shrinker" and "unregister_shrinker".
> > > > 4) Call the function "shrink" not "shrinker".
> > > > 5) Rename "nr_to_scan" argument to "nr_to_free".
> > >
> > > No, it is actually the number to scan. This is >= the number of freed
> > > objects.
> > >
> > > This is because, for better of for worse, the VM tries to balance the
> > > scanning rate of the various caches, not the reclaiming rate.
> >
> > Err, ok, I completely missed that distinction.
> >
> > Does that mean the to function correctly every user needs some internal
> > cursor so it doesn't end up scanning the first N entries over and over?
> >
>
> If it wants to be well-behaved, and to behave as the VM expects, yes.
>
> There's an expectation that the callback will be performing some scan-based
> aging operation and of course to do LRU (or whatever) aging, the callback
> will need to remember where it was up to last time it was called.
>
> But it's just a guideline - callbacks could do something different but
> in-the-spirit, I guess.
In XFS, one of the shrinkers cwthat gets registered calls causes all
the xfsbufd's in the system to run and write back delayed write
metadata - this can't be freed up until it is clean, and this is the
only hook we have that can be used to trigger writeback on memory
pressure. We need this because we can potentially have hundreds of
megabytes of dirty metadata per XFS filesystem.
IOW, the way the VM expects the shrinkers to work can be far, far
away from what subsystems need the shrinker callbacks for....
Cheers,
Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 4:57 ` Andrew Morton
2007-04-03 5:44 ` [xfs-masters] " David Chinner
@ 2007-04-03 5:47 ` Rusty Russell
2007-04-03 6:09 ` Andrew Morton
1 sibling, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-04-03 5:47 UTC (permalink / raw)
To: Andrew Morton
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Mon, 2007-04-02 at 21:57 -0700, Andrew Morton wrote:
> On Tue, 03 Apr 2007 14:45:02 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> > Does that mean the to function correctly every user needs some internal
> > cursor so it doesn't end up scanning the first N entries over and over?
> >
>
> If it wants to be well-behaved, and to behave as the VM expects, yes.
>
> There's an expectation that the callback will be performing some scan-based
> aging operation and of course to do LRU (or whatever) aging, the callback
> will need to remember where it was up to last time it was called.
>
> But it's just a guideline - callbacks could do something different but
> in-the-spirit, I guess.
Hmm, actually the callers I looked at (nfs, dcache, mbcache) seem to use
an LRU list and just walk the first "nr_to_scan" entries, and nr_to_scan
is always 128.
Someone who keeps a cursor will be disadvantaged: the other shrinkers
could well get less effective on repeated calls, but we won't. Someone
who picks entries at random might have the same issue.
I think it is clearest to describe how we expect everyone to work, and
let whoever is getting creative worry about it themselves.
How's this:
==
Cleanup and kernelify shrinker registration.
I can never remember what the function to register to receive VM pressure
is called. I have to trace down from __alloc_pages() to find it.
It's called "set_shrinker()", and it needs Your Help.
New version:
1) Don't hide struct shrinker. It contains no magic.
2) Don't allocate "struct shrinker". It's not helpful.
3) Call them "register_shrinker" and "unregister_shrinker".
4) Call the function "shrink" not "shrinker".
5) Reduce the 17 lines of waffly comments to 13, but document it properly.
Comments:
1) The comment in reiserfs4 makes me a little queasy.
2) The wrapper code in xfs might no longer be needed.
3) The placing in the x86-64 "hot function list" for seems a little
unlikely. Clearly, Andi was testing if anyone was paying attention.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
diff -r 0b43dab739aa arch/x86_64/kernel/functionlist
--- a/arch/x86_64/kernel/functionlist Tue Apr 03 15:37:49 2007 +1000
+++ b/arch/x86_64/kernel/functionlist Tue Apr 03 15:37:53 2007 +1000
@@ -1118,7 +1118,6 @@
*(.text.simple_strtoll)
*(.text.set_termios)
*(.text.set_task_comm)
-*(.text.set_shrinker)
*(.text.set_normalized_timespec)
*(.text.set_brk)
*(.text.serial_in)
diff -r 0b43dab739aa fs/dcache.c
--- a/fs/dcache.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/dcache.c Tue Apr 03 15:37:53 2007 +1000
@@ -884,6 +884,11 @@ static int shrink_dcache_memory(int nr,
}
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dcache_shrinker = {
+ .shrink = shrink_dcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/**
* d_alloc - allocate a dcache entry
@@ -2144,8 +2149,8 @@ static void __init dcache_init(unsigned
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
NULL, NULL);
-
- set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+
+ register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff -r 0b43dab739aa fs/dquot.c
--- a/fs/dquot.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/dquot.c Tue Apr 03 15:37:53 2007 +1000
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr,
}
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Put reference to dquot
@@ -1871,7 +1876,7 @@ static int __init dquot_init(void)
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
nr_hash, order, (PAGE_SIZE << order));
- set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+ register_shrinker(&dqcache_shrinker);
return 0;
}
diff -r 0b43dab739aa fs/inode.c
--- a/fs/inode.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/inode.c Tue Apr 03 15:37:53 2007 +1000
@@ -474,6 +474,11 @@ static int shrink_icache_memory(int nr,
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker icache_shrinker = {
+ .shrink = shrink_icache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
@@ -1393,7 +1398,7 @@ void __init inode_init(unsigned long mem
SLAB_MEM_SPREAD),
init_once,
NULL);
- set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+ register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff -r 0b43dab739aa fs/mbcache.c
--- a/fs/mbcache.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/mbcache.c Tue Apr 03 15:37:53 2007 +1000
@@ -100,7 +100,6 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache
static int __init init_mbcache(void)
{
- mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+ register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
- remove_shrinker(mb_shrinker);
+ unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
diff -r 0b43dab739aa fs/nfs/super.c
--- a/fs/nfs/super.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/nfs/super.c Tue Apr 03 15:37:53 2007 +1000
@@ -138,7 +138,10 @@ static const struct super_operations nfs
};
#endif
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Register the NFS filesystems
@@ -159,7 +162,7 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_2;
#endif
- acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ register_shrinker(&acl_shrinker);
return 0;
#ifdef CONFIG_NFS_V4
@@ -177,8 +180,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- if (acl_shrinker != NULL)
- remove_shrinker(acl_shrinker);
+ unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
diff -r 0b43dab739aa fs/reiser4/fsdata.c
--- a/fs/reiser4/fsdata.c Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/reiser4/fsdata.c Tue Apr 03 15:37:53 2007 +1000
@@ -7,7 +7,6 @@
/* cache or dir_cursors */
static struct kmem_cache *d_cursor_cache;
-static struct shrinker *d_cursor_shrinker;
/* list of unused cursors */
static LIST_HEAD(cursor_cache);
@@ -53,6 +52,18 @@ static int d_cursor_shrink(int nr, gfp_t
return d_cursor_unused;
}
+/*
+ * actually, d_cursors are "priceless", because there is no way to
+ * recover information stored in them. On the other hand, we don't
+ * want to consume all kernel memory by them. As a compromise, just
+ * assign higher "seeks" value to d_cursor cache, so that it will be
+ * shrunk only if system is really tight on memory.
+ */
+static struct shrinker d_cursor_shrinker = {
+ .shrink = d_cursor_shrink,
+ .seeks = DEFAULT_SEEKS << 3,
+};
+
/**
* reiser4_init_d_cursor - create d_cursor cache
*
@@ -66,20 +77,7 @@ int reiser4_init_d_cursor(void)
if (d_cursor_cache == NULL)
return RETERR(-ENOMEM);
- /*
- * actually, d_cursors are "priceless", because there is no way to
- * recover information stored in them. On the other hand, we don't
- * want to consume all kernel memory by them. As a compromise, just
- * assign higher "seeks" value to d_cursor cache, so that it will be
- * shrunk only if system is really tight on memory.
- */
- d_cursor_shrinker = set_shrinker(DEFAULT_SEEKS << 3,
- d_cursor_shrink);
- if (d_cursor_shrinker == NULL) {
- destroy_reiser4_cache(&d_cursor_cache);
- d_cursor_cache = NULL;
- return RETERR(-ENOMEM);
- }
+ register_shrinker(&d_cursor_shrinker);
return 0;
}
@@ -90,9 +88,7 @@ int reiser4_init_d_cursor(void)
*/
void reiser4_done_d_cursor(void)
{
- BUG_ON(d_cursor_shrinker == NULL);
- remove_shrinker(d_cursor_shrinker);
- d_cursor_shrinker = NULL;
+ unregister_shrinker(&d_cursor_shrinker);
destroy_reiser4_cache(&d_cursor_cache);
}
diff -r 0b43dab739aa fs/xfs/linux-2.6/kmem.h
--- a/fs/xfs/linux-2.6/kmem.h Tue Apr 03 15:37:49 2007 +1000
+++ b/fs/xfs/linux-2.6/kmem.h Tue Apr 03 15:37:53 2007 +1000
@@ -110,13 +110,23 @@ static inline kmem_shaker_t
static inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
- return set_shrinker(DEFAULT_SEEKS, sfunc);
+ /* FIXME: Perhaps caller should setup & hand in the shrinker? */
+ struct shrinker *shrinker = kmalloc(sizeof *shrinker, GFP_ATOMIC);
+ if (shrinker) {
+ shrinker->shrink = sfunc;
+ shrinker->seeks = DEFAULT_SEEKS;
+ register_shrinker(shrinker);
+ }
+ return shrinker;
}
static inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
- remove_shrinker(shrinker);
+ if (shrinker) {
+ unregister_shrinker(shrinker);
+ kfree(shrinker);
+ }
}
static inline int
diff -r 0b43dab739aa include/linux/mm.h
--- a/include/linux/mm.h Tue Apr 03 15:37:49 2007 +1000
+++ b/include/linux/mm.h Tue Apr 03 15:42:36 2007 +1000
@@ -813,27 +813,31 @@ extern unsigned long do_mremap(unsigned
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
- *
- * The callback must return the number of objects which remain in the cache.
- *
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
- * cache size, so a fastpath for that case is appropriate.
- */
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
- */
-
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up. It should return the number of objects
+ * which remain in the cache. If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
+ */
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
+
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
diff -r 0b43dab739aa mm/vmscan.c
--- a/mm/vmscan.c Tue Apr 03 15:37:49 2007 +1000
+++ b/mm/vmscan.c Tue Apr 03 15:37:53 2007 +1000
@@ -72,17 +72,6 @@ struct scan_control {
int order;
};
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
- shrinker_t shrinker;
- struct list_head list;
- int seeks; /* seeks to recreate an obj */
- long nr; /* objs pending delete */
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -125,34 +114,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
*/
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
-{
- struct shrinker *shrinker;
-
- shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
- if (shrinker) {
- shrinker->shrinker = theshrinker;
- shrinker->seeks = seeks;
- shrinker->nr = 0;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- }
- return shrinker;
-}
-EXPORT_SYMBOL(set_shrinker);
+void register_shrinker(struct shrinker *shrinker)
+{
+ shrinker->nr = 0;
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
- kfree(shrinker);
-}
-EXPORT_SYMBOL(remove_shrinker);
+}
+EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
/*
@@ -189,11 +169,11 @@ unsigned long shrink_slab(unsigned long
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
- unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+ unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
if (!shrinker->seeks) {
print_symbol("shrinker %s has zero seeks\n",
- (unsigned long)shrinker->shrinker);
+ (unsigned long)shrinker->shrink);
delta = (4 * scanned) / DEFAULT_SEEKS;
} else {
delta = (4 * scanned) / shrinker->seeks;
@@ -223,8 +203,8 @@ unsigned long shrink_slab(unsigned long
int shrink_ret;
int nr_before;
- nr_before = (*shrinker->shrinker)(0, gfp_mask);
- shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+ nr_before = (*shrinker->shrink)(0, gfp_mask);
+ shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 5:44 ` [xfs-masters] " David Chinner
@ 2007-04-03 6:01 ` Andrew Morton
2007-04-03 6:19 ` David Chinner
0 siblings, 1 reply; 16+ messages in thread
From: Andrew Morton @ 2007-04-03 6:01 UTC (permalink / raw)
To: David Chinner
Cc: xfs-masters, Rusty Russell, lkml - Kernel Mailing List, linux-mm,
reiserfs-dev
On Tue, 3 Apr 2007 15:44:19 +1000 David Chinner <dgc@sgi.com> wrote:
> In XFS, one of the shrinkers cwthat gets registered calls causes all
> the xfsbufd's in the system to run and write back delayed write
> metadata - this can't be freed up until it is clean, and this is the
> only hook we have that can be used to trigger writeback on memory
> pressure. We need this because we can potentially have hundreds of
> megabytes of dirty metadata per XFS filesystem.
>
<looks>
Gad, someone went mad in there. Can we do this (please)?
From: Andrew Morton <akpm@linux-foundation.org>
Strip away lots of needless wrapping and type obfuscation in XFS's handling of
cache shrinker registration.
Cc: David Chinner <dgc@sgi.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
fs/xfs/linux-2.6/kmem.h | 19 -------------------
fs/xfs/linux-2.6/xfs_buf.c | 6 +++---
fs/xfs/quota/xfs_qm.c | 6 +++---
3 files changed, 6 insertions(+), 25 deletions(-)
diff -puN fs/xfs/linux-2.6/kmem.h~xfs-clean-up-shrinker-games fs/xfs/linux-2.6/kmem.h
--- a/fs/xfs/linux-2.6/kmem.h~xfs-clean-up-shrinker-games
+++ a/fs/xfs/linux-2.6/kmem.h
@@ -100,25 +100,6 @@ kmem_zone_destroy(kmem_zone_t *zone)
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
-/*
- * Low memory cache shrinkers
- */
-
-typedef struct shrinker *kmem_shaker_t;
-typedef int (*kmem_shake_func_t)(int, gfp_t);
-
-static inline kmem_shaker_t
-kmem_shake_register(kmem_shake_func_t sfunc)
-{
- return set_shrinker(DEFAULT_SEEKS, sfunc);
-}
-
-static inline void
-kmem_shake_deregister(kmem_shaker_t shrinker)
-{
- remove_shrinker(shrinker);
-}
-
static inline int
kmem_shake_allow(gfp_t gfp_mask)
{
diff -puN fs/xfs/linux-2.6/xfs_buf.c~xfs-clean-up-shrinker-games fs/xfs/linux-2.6/xfs_buf.c
--- a/fs/xfs/linux-2.6/xfs_buf.c~xfs-clean-up-shrinker-games
+++ a/fs/xfs/linux-2.6/xfs_buf.c
@@ -35,7 +35,7 @@
#include <linux/freezer.h>
static kmem_zone_t *xfs_buf_zone;
-static kmem_shaker_t xfs_buf_shake;
+static struct shrinker *xfs_buf_shake;
STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
@@ -1837,7 +1837,7 @@ xfs_buf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
+ xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
if (!xfs_buf_shake)
goto out_destroy_xfsdatad_workqueue;
@@ -1859,7 +1859,7 @@ xfs_buf_init(void)
void
xfs_buf_terminate(void)
{
- kmem_shake_deregister(xfs_buf_shake);
+ remove_shrinker(xfs_buf_shake);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
diff -puN fs/xfs/quota/xfs_qm.c~xfs-clean-up-shrinker-games fs/xfs/quota/xfs_qm.c
--- a/fs/xfs/quota/xfs_qm.c~xfs-clean-up-shrinker-games
+++ a/fs/xfs/quota/xfs_qm.c
@@ -62,7 +62,7 @@ uint ndquot;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
-static kmem_shaker_t xfs_qm_shaker;
+static struct shrinker *xfs_qm_shaker;
static cred_t xfs_zerocr;
static xfs_inode_t xfs_zeroino;
@@ -150,7 +150,7 @@ xfs_Gqm_init(void)
} else
xqm->qm_dqzone = qm_dqzone;
- xfs_qm_shaker = kmem_shake_register(xfs_qm_shake);
+ xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
/*
* The t_dqinfo portion of transactions.
@@ -182,7 +182,7 @@ xfs_qm_destroy(
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
- kmem_shake_deregister(xfs_qm_shaker);
+ remove_shrinker(xfs_qm_shaker);
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
_
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 5:47 ` Rusty Russell
@ 2007-04-03 6:09 ` Andrew Morton
2007-04-03 7:18 ` Rusty Russell
0 siblings, 1 reply; 16+ messages in thread
From: Andrew Morton @ 2007-04-03 6:09 UTC (permalink / raw)
To: Rusty Russell
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Tue, 03 Apr 2007 15:47:05 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> On Mon, 2007-04-02 at 21:57 -0700, Andrew Morton wrote:
> > On Tue, 03 Apr 2007 14:45:02 +1000 Rusty Russell <rusty@rustcorp.com.au> wrote:
> > > Does that mean the to function correctly every user needs some internal
> > > cursor so it doesn't end up scanning the first N entries over and over?
> > >
> >
> > If it wants to be well-behaved, and to behave as the VM expects, yes.
> >
> > There's an expectation that the callback will be performing some scan-based
> > aging operation and of course to do LRU (or whatever) aging, the callback
> > will need to remember where it was up to last time it was called.
> >
> > But it's just a guideline - callbacks could do something different but
> > in-the-spirit, I guess.
>
> Hmm, actually the callers I looked at (nfs, dcache, mbcache) seem to use
> an LRU list and just walk the first "nr_to_scan" entries, and nr_to_scan
> is always 128.
That's just because of the batching logic up in shrink_slab(). And iirc we
only break the scanning into lumps of 128 items so we can add a
cond_resched() into it.
> Someone who keeps a cursor will be disadvantaged: the other shrinkers
> could well get less effective on repeated calls, but we won't. Someone
> who picks entries at random might have the same issue.
To examine the balancing one would need to examine the value of total_scan
in shrink_slab(), rather than looking at the value which shrink_slab()
passes into the callback.
> I think it is clearest to describe how we expect everyone to work, and
> let whoever is getting creative worry about it themselves.
>
> How's this:
> ==
> Cleanup and kernelify shrinker registration.
hm, well, six-of-one, VI of the other. We save maybe four kmallocs across
the entire uptime at the cost of exposing stuff kernel-side which doesn't
need to be exposed.
But I think we need to weed that crappiness out of XFS first.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 6:01 ` Andrew Morton
@ 2007-04-03 6:19 ` David Chinner
2007-04-04 0:30 ` Rusty Russell
0 siblings, 1 reply; 16+ messages in thread
From: David Chinner @ 2007-04-03 6:19 UTC (permalink / raw)
To: Andrew Morton
Cc: David Chinner, xfs-masters, Rusty Russell,
lkml - Kernel Mailing List, linux-mm, reiserfs-dev
On Mon, Apr 02, 2007 at 11:01:58PM -0700, Andrew Morton wrote:
> On Tue, 3 Apr 2007 15:44:19 +1000 David Chinner <dgc@sgi.com> wrote:
>
> > In XFS, one of the shrinkers cwthat gets registered calls causes all
> > the xfsbufd's in the system to run and write back delayed write
> > metadata - this can't be freed up until it is clean, and this is the
> > only hook we have that can be used to trigger writeback on memory
> > pressure. We need this because we can potentially have hundreds of
> > megabytes of dirty metadata per XFS filesystem.
> >
>
> <looks>
>
> Gad, someone went mad in there. Can we do this (please)?
Yup, added to my QA tree.
Rusty, can you redo you patch on top of this one? I'll
add it to my QA tree as well...
Cheers,
Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 6:09 ` Andrew Morton
@ 2007-04-03 7:18 ` Rusty Russell
2007-04-03 12:37 ` [xfs-masters] " David Chinner
0 siblings, 1 reply; 16+ messages in thread
From: Rusty Russell @ 2007-04-03 7:18 UTC (permalink / raw)
To: Andrew Morton
Cc: lkml - Kernel Mailing List, linux-mm, xfs-masters, reiserfs-dev
On Mon, 2007-04-02 at 23:09 -0700, Andrew Morton wrote:
> hm, well, six-of-one, VI of the other. We save maybe four kmallocs across
> the entire uptime at the cost of exposing stuff kernel-side which doesn't
> need to be exposed.
This is not about efficiency. When have I *ever* posted optimization
patches?
This is about clarity. We have a standard convention for
register/unregister. And they can't fail. Either of these would be
sufficient to justify a change.
Too many people doing cool new things in the kernel, not enough
polishing of the crap that's already there 8(
> But I think we need to weed that crappiness out of XFS first.
Sure, I'll apply on top of that patch.
Thanks!
Rusty.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 3:44 [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2) Rusty Russell
2007-04-03 3:47 ` Rusty Russell
2007-04-03 3:58 ` Andrew Morton
@ 2007-04-03 9:57 ` Andi Kleen
2 siblings, 0 replies; 16+ messages in thread
From: Andi Kleen @ 2007-04-03 9:57 UTC (permalink / raw)
To: Rusty Russell
Cc: Andrew Morton, lkml - Kernel Mailing List, linux-mm, xfs-masters,
reiserfs-dev
Rusty Russell <rusty@rustcorp.com.au> writes:
> 2) The wrapper code in xfs might no longer be needed.
> 3) The placing in the x86-64 "hot function list" for seems a little
> unlikely. Clearly, Andi was testing if anyone was paying attention.
That came from Arjan. The list is likely quite out of date now
because it hasn't been refreshed for some time. Perhaps should
just remove it again -- was never sure it was worth it.
-Andi
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 7:18 ` Rusty Russell
@ 2007-04-03 12:37 ` David Chinner
2007-04-03 17:36 ` Andrew Morton
0 siblings, 1 reply; 16+ messages in thread
From: David Chinner @ 2007-04-03 12:37 UTC (permalink / raw)
To: xfs-masters
Cc: Andrew Morton, lkml - Kernel Mailing List, linux-mm, reiserfs-dev
On Tue, Apr 03, 2007 at 05:18:25PM +1000, Rusty Russell wrote:
> On Mon, 2007-04-02 at 23:09 -0700, Andrew Morton wrote:
> This is not about efficiency. When have I *ever* posted optimization
> patches?
>
> This is about clarity. We have a standard convention for
> register/unregister. And they can't fail. Either of these would be
> sufficient to justify a change.
>
> Too many people doing cool new things in the kernel, not enough
> polishing of the crap that's already there 8(
>
> > But I think we need to weed that crappiness out of XFS first.
Can anyone else see the contradiction in these statements?
XFS's "crappiness" is a register/unregister interface. The only
reason it's being removed is because it's getting replaced with a
nearly identical register/unregister interface.
Just thought I'd point that out.... ;)
Cheers,
Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 12:37 ` [xfs-masters] " David Chinner
@ 2007-04-03 17:36 ` Andrew Morton
2007-04-04 0:03 ` David Chinner
0 siblings, 1 reply; 16+ messages in thread
From: Andrew Morton @ 2007-04-03 17:36 UTC (permalink / raw)
To: David Chinner
Cc: xfs-masters, lkml - Kernel Mailing List, linux-mm, reiserfs-dev
On Tue, 3 Apr 2007 22:37:06 +1000 David Chinner <dgc@sgi.com> wrote:
> On Tue, Apr 03, 2007 at 05:18:25PM +1000, Rusty Russell wrote:
> > On Mon, 2007-04-02 at 23:09 -0700, Andrew Morton wrote:
> > This is not about efficiency. When have I *ever* posted optimization
> > patches?
> >
> > This is about clarity. We have a standard convention for
> > register/unregister. And they can't fail. Either of these would be
> > sufficient to justify a change.
> >
> > Too many people doing cool new things in the kernel, not enough
> > polishing of the crap that's already there 8(
> >
> > > But I think we need to weed that crappiness out of XFS first.
>
> Can anyone else see the contradiction in these statements?
>
> XFS's "crappiness" is a register/unregister interface. The only
> reason it's being removed is because it's getting replaced with a
> nearly identical register/unregister interface.
Nope. XFS is introducing two new typedefs, one of which is identical to
one which we already have and it has wrapper functions which do little more
than add new names for existing stuff.
What Rusty is doing is changing the API so that the caller registers a
caller-owned struct rather than registering a caller-provided function.
For some reason.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 17:36 ` Andrew Morton
@ 2007-04-04 0:03 ` David Chinner
0 siblings, 0 replies; 16+ messages in thread
From: David Chinner @ 2007-04-04 0:03 UTC (permalink / raw)
To: Andrew Morton
Cc: David Chinner, xfs-masters, lkml - Kernel Mailing List, linux-mm,
reiserfs-dev
On Tue, Apr 03, 2007 at 10:36:27AM -0700, Andrew Morton wrote:
> On Tue, 3 Apr 2007 22:37:06 +1000 David Chinner <dgc@sgi.com> wrote:
>
> > On Tue, Apr 03, 2007 at 05:18:25PM +1000, Rusty Russell wrote:
> > > On Mon, 2007-04-02 at 23:09 -0700, Andrew Morton wrote:
> > > This is not about efficiency. When have I *ever* posted optimization
> > > patches?
> > >
> > > This is about clarity. We have a standard convention for
> > > register/unregister. And they can't fail. Either of these would be
> > > sufficient to justify a change.
> > >
> > > Too many people doing cool new things in the kernel, not enough
> > > polishing of the crap that's already there 8(
> > >
> > > > But I think we need to weed that crappiness out of XFS first.
> >
> > Can anyone else see the contradiction in these statements?
> >
> > XFS's "crappiness" is a register/unregister interface. The only
> > reason it's being removed is because it's getting replaced with a
> > nearly identical register/unregister interface.
>
> Nope. XFS is introducing two new typedefs, one of which is identical to
> one which we already have and it has wrapper functions which do little more
> than add new names for existing stuff.
And the problem with that is? You haven't noticed this in the five
years it's been there providing XFS with a consistent shrinker
interface.....
FWIW, digging back into history, Rusty's first patch basically
brings use back to the same interface we had in 2.4. Here's
the 2.4 version of that function:
kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
kmem_shaker_t shaker = kmalloc(sizeof(*shaker), GFP_KERNEL);
if (!shaker)
return NULL;
memset(shaker, 0, sizeof(*shaker));
shaker->shrink = sfunc;
register_cache(shaker);
return shaker;
}
Cheers,
Dave.
--
Dave Chinner
Principal Engineer
SGI Australian Software Group
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [xfs-masters] Re: [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2)
2007-04-03 6:19 ` David Chinner
@ 2007-04-04 0:30 ` Rusty Russell
0 siblings, 0 replies; 16+ messages in thread
From: Rusty Russell @ 2007-04-04 0:30 UTC (permalink / raw)
To: David Chinner
Cc: Andrew Morton, xfs-masters, lkml - Kernel Mailing List, linux-mm,
reiserfs-dev
On Tue, 2007-04-03 at 16:19 +1000, David Chinner wrote:
> Rusty, can you redo you patch on top of this one? I'll
> add it to my QA tree as well...
Done.
Cleanup and kernelify shrinker registration.
I can never remember what the function to register to receive VM pressure
is called. I have to trace down from __alloc_pages() to find it.
It's called "set_shrinker()", and it needs Your Help.
New version:
1) Don't hide struct shrinker. It contains no magic.
2) Don't allocate "struct shrinker". It's not helpful.
3) Call them "register_shrinker" and "unregister_shrinker".
4) Call the function "shrink" not "shrinker".
5) Reduce the 17 lines of waffly comments to 13, but document it properly.
The comment in reiser4 makes me a little queasy.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
diff -r f29f422360a1 arch/x86_64/kernel/functionlist
--- a/arch/x86_64/kernel/functionlist Wed Apr 04 10:23:00 2007 +1000
+++ b/arch/x86_64/kernel/functionlist Wed Apr 04 10:23:00 2007 +1000
@@ -1118,7 +1118,6 @@
*(.text.simple_strtoll)
*(.text.set_termios)
*(.text.set_task_comm)
-*(.text.set_shrinker)
*(.text.set_normalized_timespec)
*(.text.set_brk)
*(.text.serial_in)
diff -r f29f422360a1 fs/dcache.c
--- a/fs/dcache.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/dcache.c Wed Apr 04 10:23:00 2007 +1000
@@ -884,6 +884,11 @@ static int shrink_dcache_memory(int nr,
}
return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dcache_shrinker = {
+ .shrink = shrink_dcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/**
* d_alloc - allocate a dcache entry
@@ -2144,8 +2149,8 @@ static void __init dcache_init(unsigned
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
NULL, NULL);
-
- set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
+
+ register_shrinker(&dcache_shrinker);
/* Hash may have been set up in dcache_init_early */
if (!hashdist)
diff -r f29f422360a1 fs/dquot.c
--- a/fs/dquot.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/dquot.c Wed Apr 04 10:23:00 2007 +1000
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr,
}
return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
}
+
+static struct shrinker dqcache_shrinker = {
+ .shrink = shrink_dqcache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Put reference to dquot
@@ -1871,7 +1876,7 @@ static int __init dquot_init(void)
printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
nr_hash, order, (PAGE_SIZE << order));
- set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory);
+ register_shrinker(&dqcache_shrinker);
return 0;
}
diff -r f29f422360a1 fs/inode.c
--- a/fs/inode.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/inode.c Wed Apr 04 10:23:01 2007 +1000
@@ -474,6 +474,11 @@ static int shrink_icache_memory(int nr,
return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
+static struct shrinker icache_shrinker = {
+ .shrink = shrink_icache_memory,
+ .seeks = DEFAULT_SEEKS,
+};
+
static void __wait_on_freeing_inode(struct inode *inode);
/*
* Called with the inode lock held.
@@ -1393,7 +1398,7 @@ void __init inode_init(unsigned long mem
SLAB_MEM_SPREAD),
init_once,
NULL);
- set_shrinker(DEFAULT_SEEKS, shrink_icache_memory);
+ register_shrinker(&icache_shrinker);
/* Hash may have been set up in inode_init_early */
if (!hashdist)
diff -r f29f422360a1 fs/mbcache.c
--- a/fs/mbcache.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/mbcache.c Wed Apr 04 10:23:01 2007 +1000
@@ -100,7 +100,6 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock);
-static struct shrinker *mb_shrinker;
static inline int
mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
+static struct shrinker mb_cache_shrinker = {
+ .shrink = mb_cache_shrink_fn,
+ .seeks = DEFAULT_SEEKS,
+};
static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache
static int __init init_mbcache(void)
{
- mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
+ register_shrinker(&mb_cache_shrinker);
return 0;
}
static void __exit exit_mbcache(void)
{
- remove_shrinker(mb_shrinker);
+ unregister_shrinker(&mb_cache_shrinker);
}
module_init(init_mbcache)
diff -r f29f422360a1 fs/nfs/super.c
--- a/fs/nfs/super.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/nfs/super.c Wed Apr 04 10:23:01 2007 +1000
@@ -138,7 +138,10 @@ static const struct super_operations nfs
};
#endif
-static struct shrinker *acl_shrinker;
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
/*
* Register the NFS filesystems
@@ -159,7 +162,7 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_2;
#endif
- acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker);
+ register_shrinker(&acl_shrinker);
return 0;
#ifdef CONFIG_NFS_V4
@@ -177,8 +180,7 @@ error_0:
*/
void __exit unregister_nfs_fs(void)
{
- if (acl_shrinker != NULL)
- remove_shrinker(acl_shrinker);
+ unregister_shrinker(&acl_shrinker);
#ifdef CONFIG_NFS_V4
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
diff -r f29f422360a1 fs/reiser4/fsdata.c
--- a/fs/reiser4/fsdata.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/reiser4/fsdata.c Wed Apr 04 10:23:01 2007 +1000
@@ -7,7 +7,6 @@
/* cache or dir_cursors */
static struct kmem_cache *d_cursor_cache;
-static struct shrinker *d_cursor_shrinker;
/* list of unused cursors */
static LIST_HEAD(cursor_cache);
@@ -53,6 +52,18 @@ static int d_cursor_shrink(int nr, gfp_t
return d_cursor_unused;
}
+/*
+ * actually, d_cursors are "priceless", because there is no way to
+ * recover information stored in them. On the other hand, we don't
+ * want to consume all kernel memory by them. As a compromise, just
+ * assign higher "seeks" value to d_cursor cache, so that it will be
+ * shrunk only if system is really tight on memory.
+ */
+static struct shrinker d_cursor_shrinker = {
+ .shrink = d_cursor_shrink,
+ .seeks = DEFAULT_SEEKS << 3,
+};
+
/**
* reiser4_init_d_cursor - create d_cursor cache
*
@@ -66,20 +77,7 @@ int reiser4_init_d_cursor(void)
if (d_cursor_cache == NULL)
return RETERR(-ENOMEM);
- /*
- * actually, d_cursors are "priceless", because there is no way to
- * recover information stored in them. On the other hand, we don't
- * want to consume all kernel memory by them. As a compromise, just
- * assign higher "seeks" value to d_cursor cache, so that it will be
- * shrunk only if system is really tight on memory.
- */
- d_cursor_shrinker = set_shrinker(DEFAULT_SEEKS << 3,
- d_cursor_shrink);
- if (d_cursor_shrinker == NULL) {
- destroy_reiser4_cache(&d_cursor_cache);
- d_cursor_cache = NULL;
- return RETERR(-ENOMEM);
- }
+ register_shrinker(&d_cursor_shrinker);
return 0;
}
@@ -90,9 +88,7 @@ int reiser4_init_d_cursor(void)
*/
void reiser4_done_d_cursor(void)
{
- BUG_ON(d_cursor_shrinker == NULL);
- remove_shrinker(d_cursor_shrinker);
- d_cursor_shrinker = NULL;
+ unregister_shrinker(&d_cursor_shrinker);
destroy_reiser4_cache(&d_cursor_cache);
}
diff -r f29f422360a1 fs/xfs/linux-2.6/xfs_buf.c
--- a/fs/xfs/linux-2.6/xfs_buf.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/xfs/linux-2.6/xfs_buf.c Wed Apr 04 10:23:01 2007 +1000
@@ -35,10 +35,13 @@
#include <linux/freezer.h>
static kmem_zone_t *xfs_buf_zone;
-static struct shrinker *xfs_buf_shake;
STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
+static struct shrinker xfs_buf_shake = {
+ .shrink = xfsbufd_wakeup,
+ .seeks = DEFAULT_SEEKS,
+};
static struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
@@ -1837,14 +1840,9 @@ xfs_buf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup);
- if (!xfs_buf_shake)
- goto out_destroy_xfsdatad_workqueue;
-
+ register_shrinker(&xfs_buf_shake);
return 0;
- out_destroy_xfsdatad_workqueue:
- destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
@@ -1859,7 +1857,7 @@ void
void
xfs_buf_terminate(void)
{
- remove_shrinker(xfs_buf_shake);
+ unregister_shrinker(&xfs_buf_shake);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(xfs_buf_zone);
diff -r f29f422360a1 fs/xfs/quota/xfs_qm.c
--- a/fs/xfs/quota/xfs_qm.c Wed Apr 04 10:23:00 2007 +1000
+++ b/fs/xfs/quota/xfs_qm.c Wed Apr 04 10:23:13 2007 +1000
@@ -62,7 +62,6 @@ uint ndquot;
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
-static struct shrinker *xfs_qm_shaker;
static cred_t xfs_zerocr;
static xfs_inode_t xfs_zeroino;
@@ -78,6 +77,11 @@ STATIC int xfs_qm_init_quotainos(xfs_mou
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
STATIC int xfs_qm_shake(int, gfp_t);
+
+static struct shrinker xfs_qm_shaker = {
+ .shrink = xfs_qm_shake,
+ .seeks = DEFAULT_SEEKS,
+};
#ifdef DEBUG
extern mutex_t qcheck_lock;
@@ -150,7 +154,7 @@ xfs_Gqm_init(void)
} else
xqm->qm_dqzone = qm_dqzone;
- xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
+ register_shrinker(&xfs_qm_shaker);
/*
* The t_dqinfo portion of transactions.
@@ -182,7 +186,7 @@ xfs_qm_destroy(
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
- remove_shrinker(xfs_qm_shaker);
+ unregister_shrinker(&xfs_qm_shaker);
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
diff -r f29f422360a1 include/linux/mm.h
--- a/include/linux/mm.h Wed Apr 04 10:23:00 2007 +1000
+++ b/include/linux/mm.h Wed Apr 04 10:23:01 2007 +1000
@@ -813,27 +813,31 @@ extern unsigned long do_mremap(unsigned
unsigned long flags, unsigned long new_addr);
/*
- * Prototype to add a shrinker callback for ageable caches.
- *
- * These functions are passed a count `nr_to_scan' and a gfpmask. They should
- * scan `nr_to_scan' objects, attempting to free them.
- *
- * The callback must return the number of objects which remain in the cache.
- *
- * The callback will be passed nr_to_scan == 0 when the VM is querying the
- * cache size, so a fastpath for that case is appropriate.
- */
-typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
-
-/*
- * Add an aging callback. The int is the number of 'seeks' it takes
- * to recreate one of the objects that these functions age.
- */
-
-#define DEFAULT_SEEKS 2
-struct shrinker;
-extern struct shrinker *set_shrinker(int, shrinker_t);
-extern void remove_shrinker(struct shrinker *shrinker);
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up. It should return the number of objects
+ * which remain in the cache. If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
+ */
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
+
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
/*
* Some shared mappigns will want the pages marked read-only
diff -r f29f422360a1 mm/vmscan.c
--- a/mm/vmscan.c Wed Apr 04 10:23:00 2007 +1000
+++ b/mm/vmscan.c Wed Apr 04 10:23:01 2007 +1000
@@ -72,17 +72,6 @@ struct scan_control {
int order;
};
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
- shrinker_t shrinker;
- struct list_head list;
- int seeks; /* seeks to recreate an obj */
- long nr; /* objs pending delete */
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -125,34 +114,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
*/
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
-{
- struct shrinker *shrinker;
-
- shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
- if (shrinker) {
- shrinker->shrinker = theshrinker;
- shrinker->seeks = seeks;
- shrinker->nr = 0;
- down_write(&shrinker_rwsem);
- list_add_tail(&shrinker->list, &shrinker_list);
- up_write(&shrinker_rwsem);
- }
- return shrinker;
-}
-EXPORT_SYMBOL(set_shrinker);
+void register_shrinker(struct shrinker *shrinker)
+{
+ shrinker->nr = 0;
+ down_write(&shrinker_rwsem);
+ list_add_tail(&shrinker->list, &shrinker_list);
+ up_write(&shrinker_rwsem);
+}
+EXPORT_SYMBOL(register_shrinker);
/*
* Remove one
*/
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
{
down_write(&shrinker_rwsem);
list_del(&shrinker->list);
up_write(&shrinker_rwsem);
- kfree(shrinker);
-}
-EXPORT_SYMBOL(remove_shrinker);
+}
+EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
/*
@@ -189,11 +169,11 @@ unsigned long shrink_slab(unsigned long
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
unsigned long total_scan;
- unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+ unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
if (!shrinker->seeks) {
print_symbol("shrinker %s has zero seeks\n",
- (unsigned long)shrinker->shrinker);
+ (unsigned long)shrinker->shrink);
delta = (4 * scanned) / DEFAULT_SEEKS;
} else {
delta = (4 * scanned) / shrinker->seeks;
@@ -223,8 +203,8 @@ unsigned long shrink_slab(unsigned long
int shrink_ret;
int nr_before;
- nr_before = (*shrinker->shrinker)(0, gfp_mask);
- shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+ nr_before = (*shrinker->shrink)(0, gfp_mask);
+ shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
if (shrink_ret == -1)
break;
if (shrink_ret < nr_before)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2007-04-04 0:30 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-04-03 3:44 [PATCH] Cleanup and kernelify shrinker registration (rc5-mm2) Rusty Russell
2007-04-03 3:47 ` Rusty Russell
2007-04-03 3:58 ` Andrew Morton
2007-04-03 4:45 ` Rusty Russell
2007-04-03 4:57 ` Andrew Morton
2007-04-03 5:44 ` [xfs-masters] " David Chinner
2007-04-03 6:01 ` Andrew Morton
2007-04-03 6:19 ` David Chinner
2007-04-04 0:30 ` Rusty Russell
2007-04-03 5:47 ` Rusty Russell
2007-04-03 6:09 ` Andrew Morton
2007-04-03 7:18 ` Rusty Russell
2007-04-03 12:37 ` [xfs-masters] " David Chinner
2007-04-03 17:36 ` Andrew Morton
2007-04-04 0:03 ` David Chinner
2007-04-03 9:57 ` Andi Kleen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox