From: Glauber Costa <glommer@parallels.com>
To: linux-mm@kvack.org
Cc: linux-fsdevel@vger.kernel.org,
containers@lists.linux-foundation.org,
Michal Hocko <mhocko@suse.cz>,
Johannes Weiner <hannes@cmpxchg.org>,
kamezawa.hiroyu@jp.fujitsu.com,
Andrew Morton <akpm@linux-foundation.org>,
Dave Shrinnker <david@fromorbit.com>,
Greg Thelen <gthelen@google.com>,
hughd@google.com, yinghan@google.com,
Dave Chinner <dchinner@redhat.com>,
Glauber Costa <glommer@parallels.com>
Subject: [PATCH v2 07/28] shrinker: convert superblock shrinkers to new API
Date: Fri, 29 Mar 2013 13:13:49 +0400 [thread overview]
Message-ID: <1364548450-28254-8-git-send-email-glommer@parallels.com> (raw)
In-Reply-To: <1364548450-28254-1-git-send-email-glommer@parallels.com>
From: Dave Chinner <dchinner@redhat.com>
Convert superblock shrinker to use the new count/scan API, and
propagate the API changes through to the filesystem callouts. The
filesystem callouts already use a count/scan API, so it's just
changing counters to longs to match the VM API.
This requires the dentry and inode shrinker callouts to be converted
to the count/scan API. This is mainly a mechanical change.
[ glommer: use mult_frac for fractional proportions, build fixes ]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@parallels.com>
---
fs/dcache.c | 10 +++++---
fs/inode.c | 7 +++--
fs/internal.h | 2 ++
fs/super.c | 74 ++++++++++++++++++++++++++++++++---------------------
fs/xfs/xfs_icache.c | 4 +--
fs/xfs/xfs_icache.h | 2 +-
fs/xfs/xfs_super.c | 8 +++---
include/linux/fs.h | 8 ++----
8 files changed, 67 insertions(+), 48 deletions(-)
diff --git a/fs/dcache.c b/fs/dcache.c
index d15420b..2c9fcd6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -866,11 +866,12 @@ static void shrink_dentry_list(struct list_head *list)
* This function may fail to free any resources if all the dentries are in
* use.
*/
-void prune_dcache_sb(struct super_block *sb, int count)
+long prune_dcache_sb(struct super_block *sb, long nr_to_scan)
{
struct dentry *dentry;
LIST_HEAD(referenced);
LIST_HEAD(tmp);
+ long freed = 0;
relock:
spin_lock(&sb->s_dentry_lru_lock);
@@ -895,7 +896,8 @@ relock:
this_cpu_dec(nr_dentry_unused);
sb->s_nr_dentry_unused--;
spin_unlock(&dentry->d_lock);
- if (!--count)
+ freed++;
+ if (!--nr_to_scan)
break;
}
cond_resched_lock(&sb->s_dentry_lru_lock);
@@ -905,6 +907,7 @@ relock:
spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
+ return freed;
}
/*
@@ -1291,9 +1294,8 @@ rename_retry:
void shrink_dcache_parent(struct dentry * parent)
{
LIST_HEAD(dispose);
- int found;
- while ((found = select_parent(parent, &dispose)) != 0)
+ while (select_parent(parent, &dispose))
shrink_dentry_list(&dispose);
}
EXPORT_SYMBOL(shrink_dcache_parent);
diff --git a/fs/inode.c b/fs/inode.c
index f5f7c06..1dd8908 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -704,10 +704,11 @@ static int can_unuse(struct inode *inode)
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
* with this flag set because they are the inodes that are out of order.
*/
-void prune_icache_sb(struct super_block *sb, int nr_to_scan)
+long prune_icache_sb(struct super_block *sb, long nr_to_scan)
{
LIST_HEAD(freeable);
- int nr_scanned;
+ long nr_scanned;
+ long freed = 0;
unsigned long reap = 0;
spin_lock(&sb->s_inode_lru_lock);
@@ -777,6 +778,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
list_move(&inode->i_lru, &freeable);
sb->s_nr_inodes_unused--;
this_cpu_dec(nr_unused);
+ freed++;
}
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -787,6 +789,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
current->reclaim_state->reclaimed_slab += reap;
dispose_list(&freeable);
+ return freed;
}
static void __wait_on_freeing_inode(struct inode *inode);
diff --git a/fs/internal.h b/fs/internal.h
index 507141f..5099f87 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -110,6 +110,7 @@ extern int open_check_o_direct(struct file *f);
* inode.c
*/
extern spinlock_t inode_sb_list_lock;
+extern long prune_icache_sb(struct super_block *sb, long nr_to_scan);
extern void inode_add_lru(struct inode *inode);
/*
@@ -125,3 +126,4 @@ extern int invalidate_inodes(struct super_block *, bool);
* dcache.c
*/
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
+extern long prune_dcache_sb(struct super_block *sb, long nr_to_scan);
diff --git a/fs/super.c b/fs/super.c
index 0be75fb..9d2f2e9 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -53,11 +53,14 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
* shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
* take a passive reference to the superblock to avoid this from occurring.
*/
-static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
+static long super_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct super_block *sb;
- int fs_objects = 0;
- int total_objects;
+ long fs_objects = 0;
+ long total_objects;
+ long freed = 0;
+ long dentries;
+ long inodes;
sb = container_of(shrink, struct super_block, s_shrink);
@@ -65,7 +68,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
* Deadlock avoidance. We may hold various FS locks, and we don't want
* to recurse into the FS that called us in clear_inode() and friends..
*/
- if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ if (!(sc->gfp_mask & __GFP_FS))
return -1;
if (!grab_super_passive(sb))
@@ -77,33 +80,45 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
total_objects = sb->s_nr_dentry_unused +
sb->s_nr_inodes_unused + fs_objects + 1;
- if (sc->nr_to_scan) {
- int dentries;
- int inodes;
-
- /* proportion the scan between the caches */
- dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
- total_objects);
- inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
- total_objects);
- if (fs_objects)
- fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
- total_objects);
- /*
- * prune the dcache first as the icache is pinned by it, then
- * prune the icache, followed by the filesystem specific caches
- */
- prune_dcache_sb(sb, dentries);
- prune_icache_sb(sb, inodes);
+ /* proportion the scan between the caches */
+ dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
+ total_objects);
+ inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
+ total_objects);
- if (fs_objects && sb->s_op->free_cached_objects) {
- sb->s_op->free_cached_objects(sb, fs_objects);
- fs_objects = sb->s_op->nr_cached_objects(sb);
- }
- total_objects = sb->s_nr_dentry_unused +
- sb->s_nr_inodes_unused + fs_objects;
+ /*
+ * prune the dcache first as the icache is pinned by it, then
+ * prune the icache, followed by the filesystem specific caches
+ */
+ freed = prune_dcache_sb(sb, dentries);
+ freed += prune_icache_sb(sb, inodes);
+
+ if (fs_objects) {
+ fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
+ total_objects);
+ freed += sb->s_op->free_cached_objects(sb, fs_objects);
}
+ drop_super(sb);
+ return freed;
+}
+
+static long super_cache_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct super_block *sb;
+ long total_objects = 0;
+
+ sb = container_of(shrink, struct super_block, s_shrink);
+
+ if (!grab_super_passive(sb))
+ return -1;
+
+ if (sb->s_op && sb->s_op->nr_cached_objects)
+ total_objects = sb->s_op->nr_cached_objects(sb);
+
+ total_objects += sb->s_nr_dentry_unused;
+ total_objects += sb->s_nr_inodes_unused;
+
total_objects = vfs_pressure_ratio(total_objects);
drop_super(sb);
return total_objects;
@@ -217,7 +232,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
s->cleancache_poolid = -1;
s->s_shrink.seeks = DEFAULT_SEEKS;
- s->s_shrink.shrink = prune_super;
+ s->s_shrink.scan_objects = super_cache_scan;
+ s->s_shrink.count_objects = super_cache_count;
s->s_shrink.batch = 1024;
}
out:
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 96e344e..b35c311 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1164,7 +1164,7 @@ xfs_reclaim_inodes(
* them to be cleaned, which we hope will not be very long due to the
* background walker having already kicked the IO off on those dirty inodes.
*/
-void
+long
xfs_reclaim_inodes_nr(
struct xfs_mount *mp,
int nr_to_scan)
@@ -1173,7 +1173,7 @@ xfs_reclaim_inodes_nr(
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
- xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
+ return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
}
/*
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index e0f138c..2d6d2d3 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -31,7 +31,7 @@ void xfs_reclaim_worker(struct work_struct *work);
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
int xfs_reclaim_inodes_count(struct xfs_mount *mp);
-void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
+long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ea341ce..1ff991b 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1523,19 +1523,19 @@ xfs_fs_mount(
return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
}
-static int
+static long
xfs_fs_nr_cached_objects(
struct super_block *sb)
{
return xfs_reclaim_inodes_count(XFS_M(sb));
}
-static void
+static long
xfs_fs_free_cached_objects(
struct super_block *sb,
- int nr_to_scan)
+ long nr_to_scan)
{
- xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
+ return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
}
static const struct super_operations xfs_super_operations = {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 02934f5..a49fe84 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1324,10 +1324,6 @@ struct super_block {
int s_readonly_remount;
};
-/* superblock cache pruning functions */
-extern void prune_icache_sb(struct super_block *sb, int nr_to_scan);
-extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan);
-
extern struct timespec current_fs_time(struct super_block *sb);
/*
@@ -1614,8 +1610,8 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- int (*nr_cached_objects)(struct super_block *);
- void (*free_cached_objects)(struct super_block *, int);
+ long (*nr_cached_objects)(struct super_block *);
+ long (*free_cached_objects)(struct super_block *, long);
};
/*
--
1.8.1.4
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-03-29 9:14 UTC|newest]
Thread overview: 97+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-03-29 9:13 [PATCH v2 00/28] memcg-aware slab shrinking Glauber Costa
2013-03-29 9:13 ` [PATCH v2 01/28] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-04-01 7:16 ` Kamezawa Hiroyuki
2013-03-29 9:13 ` [PATCH v2 02/28] vmscan: take at least one pass with shrinkers Glauber Costa
2013-04-01 7:26 ` Kamezawa Hiroyuki
2013-04-01 8:10 ` Glauber Costa
2013-04-10 5:09 ` Ric Mason
2013-04-10 7:32 ` Glauber Costa
2013-04-10 9:19 ` Dave Chinner
2013-04-08 8:42 ` Joonsoo Kim
2013-04-08 8:47 ` Glauber Costa
2013-04-08 9:01 ` Joonsoo Kim
2013-04-08 9:05 ` Glauber Costa
2013-04-09 0:55 ` Joonsoo Kim
2013-04-09 1:29 ` Dave Chinner
2013-04-09 2:05 ` Joonsoo Kim
2013-04-09 7:43 ` Glauber Costa
2013-04-09 9:08 ` Joonsoo Kim
2013-04-09 12:30 ` Dave Chinner
2013-04-10 2:51 ` Joonsoo Kim
2013-04-10 7:30 ` Glauber Costa
2013-04-10 8:19 ` Joonsoo Kim
2013-04-10 8:46 ` Wanpeng Li
2013-04-10 8:46 ` Wanpeng Li
2013-04-10 10:07 ` Dave Chinner
2013-04-10 14:03 ` JoonSoo Kim
2013-04-11 0:41 ` Dave Chinner
2013-04-11 7:27 ` Wanpeng Li
2013-04-11 7:27 ` Wanpeng Li
2013-04-11 7:27 ` Wanpeng Li
2013-04-11 9:25 ` Dave Chinner
[not found] ` <20130410025115.GA5872-Hm3cg6mZ9cc@public.gmane.org>
2013-04-10 8:46 ` Wanpeng Li
2013-03-29 9:13 ` [PATCH v2 03/28] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-04-05 1:09 ` Greg Thelen
2013-04-05 1:15 ` Dave Chinner
2013-04-08 9:14 ` Glauber Costa
2013-04-08 13:18 ` Glauber Costa
2013-04-08 23:26 ` Dave Chinner
2013-04-09 8:02 ` Glauber Costa
2013-04-09 12:47 ` Dave Chinner
2013-03-29 9:13 ` [PATCH v2 04/28] dentry: move to per-sb LRU locks Glauber Costa
2013-03-29 9:13 ` [PATCH v2 05/28] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-04-03 6:51 ` Sha Zhengju
2013-04-03 8:55 ` Glauber Costa
2013-04-04 6:19 ` Dave Chinner
2013-04-04 6:56 ` Glauber Costa
2013-03-29 9:13 ` [PATCH v2 06/28] mm: new shrinker API Glauber Costa
2013-04-05 1:09 ` Greg Thelen
2013-03-29 9:13 ` Glauber Costa [this message]
2013-03-29 9:13 ` [PATCH v2 08/28] list: add a new LRU list type Glauber Costa
2013-04-04 21:53 ` Greg Thelen
2013-04-05 1:20 ` Dave Chinner
2013-04-05 8:01 ` Glauber Costa
2013-04-06 0:04 ` Dave Chinner
2013-03-29 9:13 ` [PATCH v2 09/28] inode: convert inode lru list to generic lru list code Glauber Costa
2013-03-29 9:13 ` [PATCH v2 10/28] dcache: convert to use new lru list infrastructure Glauber Costa
2013-04-08 13:14 ` Glauber Costa
2013-04-08 23:28 ` Dave Chinner
2013-03-29 9:13 ` [PATCH v2 11/28] list_lru: per-node " Glauber Costa
2013-03-29 9:13 ` [PATCH v2 12/28] shrinker: add node awareness Glauber Costa
2013-03-29 9:13 ` [PATCH v2 13/28] fs: convert inode and dentry shrinking to be node aware Glauber Costa
2013-03-29 9:13 ` [PATCH v2 14/28] xfs: convert buftarg LRU to generic code Glauber Costa
2013-03-29 9:13 ` [PATCH v2 15/28] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-03-29 9:13 ` [PATCH v2 16/28] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-03-29 9:13 ` [PATCH v2 17/28] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-03-29 9:14 ` [PATCH v2 18/28] shrinker: convert remaining shrinkers to " Glauber Costa
2013-03-29 9:14 ` [PATCH v2 19/28] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
2013-03-29 9:14 ` [PATCH v2 20/28] shrinker: Kill old ->shrink API Glauber Costa
2013-03-29 9:14 ` [PATCH v2 21/28] vmscan: also shrink slab in memcg pressure Glauber Costa
2013-04-01 7:46 ` Kamezawa Hiroyuki
2013-04-01 8:51 ` Glauber Costa
2013-04-03 10:11 ` Sha Zhengju
2013-04-03 10:43 ` Glauber Costa
2013-04-04 9:35 ` Sha Zhengju
2013-04-05 8:25 ` Glauber Costa
2013-03-29 9:14 ` [PATCH v2 22/28] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa
2013-04-01 8:05 ` Kamezawa Hiroyuki
2013-04-01 8:22 ` Glauber Costa
2013-03-29 9:14 ` [PATCH v2 23/28] lru: add an element to a memcg list Glauber Costa
2013-04-01 8:18 ` Kamezawa Hiroyuki
2013-04-01 8:29 ` Glauber Costa
2013-03-29 9:14 ` [PATCH v2 24/28] list_lru: also include memcg lists in counts and scans Glauber Costa
2013-03-29 9:14 ` [PATCH v2 25/28] list_lru: per-memcg walks Glauber Costa
2013-03-29 9:14 ` [PATCH v2 26/28] memcg: per-memcg kmem shrinking Glauber Costa
2013-04-01 8:31 ` Kamezawa Hiroyuki
2013-04-01 8:48 ` Glauber Costa
2013-04-01 9:01 ` Kamezawa Hiroyuki
2013-04-01 9:14 ` Glauber Costa
2013-04-01 9:35 ` Kamezawa Hiroyuki
2013-03-29 9:14 ` [PATCH v2 27/28] list_lru: reclaim proportionaly between memcgs and nodes Glauber Costa
2013-03-29 9:14 ` [PATCH v2 28/28] super: targeted memcg reclaim Glauber Costa
2013-04-01 12:38 ` [PATCH v2 00/28] memcg-aware slab shrinking Serge Hallyn
2013-04-01 12:45 ` Glauber Costa
2013-04-01 14:12 ` Serge Hallyn
2013-04-08 8:11 ` Glauber Costa
2013-04-02 4:58 ` Dave Chinner
2013-04-02 7:55 ` Glauber Costa
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1364548450-28254-8-git-send-email-glommer@parallels.com \
--to=glommer@parallels.com \
--cc=akpm@linux-foundation.org \
--cc=containers@lists.linux-foundation.org \
--cc=david@fromorbit.com \
--cc=dchinner@redhat.com \
--cc=gthelen@google.com \
--cc=hannes@cmpxchg.org \
--cc=hughd@google.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.cz \
--cc=yinghan@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox