linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Glauber Costa <glommer@parallels.com>
To: linux-mm@kvack.org
Cc: cgroups@vger.kernel.org, Dave Shrinnker <david@fromorbit.com>,
	Serge Hallyn <serge.hallyn@canonical.com>,
	kamezawa.hiroyu@jp.fujitsu.com, Michal Hocko <mhocko@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	hughd@google.com, linux-fsdevel@vger.kernel.org,
	containers@lists.linux-foundation.org,
	Greg Thelen <gthelen@google.com>,
	Dave Chinner <dchinner@redhat.com>
Subject: [PATCH v3 04/32] dentry: move to per-sb LRU locks
Date: Mon,  8 Apr 2013 18:00:31 +0400	[thread overview]
Message-ID: <1365429659-22108-5-git-send-email-glommer@parallels.com> (raw)
In-Reply-To: <1365429659-22108-1-git-send-email-glommer@parallels.com>

From: Dave Chinner <dchinner@redhat.com>

With the dentry LRUs being per-sb structures, there is no real need
for a global dentry_lru_lock. The locking can be made more
fine-grained by moving to a per-sb LRU lock, isolating the LRU
operations of different filesytsems completely from each other.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 fs/dcache.c        | 37 ++++++++++++++++++-------------------
 fs/super.c         |  1 +
 include/linux/fs.h |  4 +++-
 3 files changed, 22 insertions(+), 20 deletions(-)

diff --git a/fs/dcache.c b/fs/dcache.c
index ffdd461..de09780 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -48,7 +48,7 @@
  *   - the dcache hash table
  * s_anon bl list spinlock protects:
  *   - the s_anon list (see __d_drop)
- * dcache_lru_lock protects:
+ * dentry->d_sb->s_dentry_lru_lock protects:
  *   - the dcache lru lists and counters
  * d_lock protects:
  *   - d_flags
@@ -63,7 +63,7 @@
  * Ordering:
  * dentry->d_inode->i_lock
  *   dentry->d_lock
- *     dcache_lru_lock
+ *     dentry->d_sb->s_dentry_lru_lock
  *     dcache_hash_bucket lock
  *     s_anon lock
  *
@@ -81,7 +81,6 @@
 int sysctl_vfs_cache_pressure __read_mostly = 100;
 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
 
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(rename_lock);
@@ -327,11 +326,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
 static void dentry_lru_add(struct dentry *dentry)
 {
 	if (list_empty(&dentry->d_lru)) {
-		spin_lock(&dcache_lru_lock);
+		spin_lock(&dentry->d_sb->s_dentry_lru_lock);
 		list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
 		dentry->d_sb->s_nr_dentry_unused++;
 		this_cpu_inc(nr_dentry_unused);
-		spin_unlock(&dcache_lru_lock);
+		spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
 	}
 }
 
@@ -349,9 +348,9 @@ static void __dentry_lru_del(struct dentry *dentry)
 static void dentry_lru_del(struct dentry *dentry)
 {
 	if (!list_empty(&dentry->d_lru)) {
-		spin_lock(&dcache_lru_lock);
+		spin_lock(&dentry->d_sb->s_dentry_lru_lock);
 		__dentry_lru_del(dentry);
-		spin_unlock(&dcache_lru_lock);
+		spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
 	}
 }
 
@@ -366,15 +365,15 @@ static void dentry_lru_prune(struct dentry *dentry)
 		if (dentry->d_flags & DCACHE_OP_PRUNE)
 			dentry->d_op->d_prune(dentry);
 
-		spin_lock(&dcache_lru_lock);
+		spin_lock(&dentry->d_sb->s_dentry_lru_lock);
 		__dentry_lru_del(dentry);
-		spin_unlock(&dcache_lru_lock);
+		spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
 	}
 }
 
 static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
 {
-	spin_lock(&dcache_lru_lock);
+	spin_lock(&dentry->d_sb->s_dentry_lru_lock);
 	if (list_empty(&dentry->d_lru)) {
 		list_add_tail(&dentry->d_lru, list);
 		dentry->d_sb->s_nr_dentry_unused++;
@@ -382,7 +381,7 @@ static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
 	} else {
 		list_move_tail(&dentry->d_lru, list);
 	}
-	spin_unlock(&dcache_lru_lock);
+	spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
 }
 
 /**
@@ -860,14 +859,14 @@ void prune_dcache_sb(struct super_block *sb, int count)
 	LIST_HEAD(tmp);
 
 relock:
-	spin_lock(&dcache_lru_lock);
+	spin_lock(&sb->s_dentry_lru_lock);
 	while (!list_empty(&sb->s_dentry_lru)) {
 		dentry = list_entry(sb->s_dentry_lru.prev,
 				struct dentry, d_lru);
 		BUG_ON(dentry->d_sb != sb);
 
 		if (!spin_trylock(&dentry->d_lock)) {
-			spin_unlock(&dcache_lru_lock);
+			spin_unlock(&sb->s_dentry_lru_lock);
 			cpu_relax();
 			goto relock;
 		}
@@ -883,11 +882,11 @@ relock:
 			if (!--count)
 				break;
 		}
-		cond_resched_lock(&dcache_lru_lock);
+		cond_resched_lock(&sb->s_dentry_lru_lock);
 	}
 	if (!list_empty(&referenced))
 		list_splice(&referenced, &sb->s_dentry_lru);
-	spin_unlock(&dcache_lru_lock);
+	spin_unlock(&sb->s_dentry_lru_lock);
 
 	shrink_dentry_list(&tmp);
 }
@@ -903,14 +902,14 @@ void shrink_dcache_sb(struct super_block *sb)
 {
 	LIST_HEAD(tmp);
 
-	spin_lock(&dcache_lru_lock);
+	spin_lock(&sb->s_dentry_lru_lock);
 	while (!list_empty(&sb->s_dentry_lru)) {
 		list_splice_init(&sb->s_dentry_lru, &tmp);
-		spin_unlock(&dcache_lru_lock);
+		spin_unlock(&sb->s_dentry_lru_lock);
 		shrink_dentry_list(&tmp);
-		spin_lock(&dcache_lru_lock);
+		spin_lock(&sb->s_dentry_lru_lock);
 	}
-	spin_unlock(&dcache_lru_lock);
+	spin_unlock(&sb->s_dentry_lru_lock);
 }
 EXPORT_SYMBOL(shrink_dcache_sb);
 
diff --git a/fs/super.c b/fs/super.c
index 2a37fd6..0be75fb 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -182,6 +182,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
 		INIT_HLIST_BL_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
 		INIT_LIST_HEAD(&s->s_dentry_lru);
+		spin_lock_init(&s->s_dentry_lru_lock);
 		INIT_LIST_HEAD(&s->s_inode_lru);
 		spin_lock_init(&s->s_inode_lru_lock);
 		INIT_LIST_HEAD(&s->s_mounts);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2c28271..02934f5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1261,7 +1261,9 @@ struct super_block {
 	struct list_head	s_files;
 #endif
 	struct list_head	s_mounts;	/* list of mounts; _not_ for fs use */
-	/* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
+
+	/* s_dentry_lru_lock protects s_dentry_lru and s_nr_dentry_unused */
+	spinlock_t		s_dentry_lru_lock ____cacheline_aligned_in_smp;
 	struct list_head	s_dentry_lru;	/* unused dentry lru */
 	int			s_nr_dentry_unused;	/* # of dentry on lru */
 
-- 
1.8.1.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-04-08 14:01 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-04-08 14:00 [PATCH v3 00/32] memcg-aware slab shrinking with lasers and numbers Glauber Costa
2013-04-08 14:00 ` [PATCH v3 01/32] super: fix calculation of shrinkable objects for small numbers Glauber Costa
2013-04-08 14:00 ` [PATCH v3 02/32] vmscan: take at least one pass with shrinkers Glauber Costa
2013-04-08 14:00 ` [PATCH v3 03/32] dcache: convert dentry_stat.nr_unused to per-cpu counters Glauber Costa
2013-04-08 14:00 ` Glauber Costa [this message]
2013-04-08 14:00 ` [PATCH v3 05/32] dcache: remove dentries from LRU before putting on dispose list Glauber Costa
2013-04-08 14:00 ` [PATCH v3 06/32] mm: new shrinker API Glauber Costa
2013-04-08 14:00 ` [PATCH v3 07/32] shrinker: convert superblock shrinkers to new API Glauber Costa
2013-04-08 14:00 ` [PATCH v3 08/32] list: add a new LRU list type Glauber Costa
2013-04-15  5:35   ` Greg Thelen
2013-04-15 17:56     ` Greg Thelen
2013-04-16 14:43       ` Glauber Costa
2013-04-08 14:00 ` [PATCH v3 09/32] inode: convert inode lru list to generic lru list code Glauber Costa
2013-04-08 14:00 ` [PATCH v3 10/32] dcache: convert to use new lru list infrastructure Glauber Costa
2013-04-08 14:00 ` [PATCH v3 11/32] list_lru: per-node " Glauber Costa
2013-04-15  5:37   ` Greg Thelen
2013-04-08 14:00 ` [PATCH v3 12/32] shrinker: add node awareness Glauber Costa
2013-04-15  5:38   ` Greg Thelen
2013-04-08 14:00 ` [PATCH v3 13/32] fs: convert inode and dentry shrinking to be node aware Glauber Costa
2013-04-08 14:00 ` [PATCH v3 14/32] xfs: convert buftarg LRU to generic code Glauber Costa
2013-04-15  5:38   ` Greg Thelen
2013-04-15 10:14     ` Glauber Costa
2013-04-08 14:00 ` [PATCH v3 15/32] xfs: convert dquot cache lru to list_lru Glauber Costa
2013-04-08 14:00 ` [PATCH v3 16/32] fs: convert fs shrinkers to new scan/count API Glauber Costa
2013-04-08 14:00 ` [PATCH v3 17/32] drivers: convert shrinkers to new count/scan API Glauber Costa
2013-04-08 14:00 ` [PATCH v3 18/32] shrinker: convert remaining shrinkers to " Glauber Costa
2013-04-08 14:00 ` [PATCH v3 19/32] hugepage: convert huge zero page shrinker to new shrinker API Glauber Costa
2013-04-15  5:38   ` Greg Thelen
2013-04-15  8:10   ` Kirill A. Shutemov
2013-04-08 14:00 ` [PATCH v3 20/32] shrinker: Kill old ->shrink API Glauber Costa
2013-04-15  5:38   ` Greg Thelen
2013-04-08 14:00 ` [PATCH v3 21/32] vmscan: also shrink slab in memcg pressure Glauber Costa
2013-04-08 14:00 ` [PATCH v3 22/32] memcg,list_lru: duplicate LRUs upon kmemcg creation Glauber Costa
2013-04-08 14:00 ` [PATCH v3 23/32] lru: add an element to a memcg list Glauber Costa
2013-04-08 14:00 ` [PATCH v3 24/32] list_lru: also include memcg lists in counts and scans Glauber Costa
2013-04-08 14:00 ` [PATCH v3 25/32] list_lru: per-memcg walks Glauber Costa
2013-04-08 14:00 ` [PATCH v3 26/32] memcg: per-memcg kmem shrinking Glauber Costa
2013-04-08 14:00 ` [PATCH v3 27/32] list_lru: reclaim proportionaly between memcgs and nodes Glauber Costa
2013-04-08 14:00 ` [PATCH v3 28/32] memcg: scan cache objects hierarchically Glauber Costa
2013-04-08 14:00 ` [PATCH v3 29/32] memcg: move initialization to memcg creation Glauber Costa
2013-04-08 14:00 ` [PATCH v3 30/32] memcg: shrink dead memcgs upon global memory pressure Glauber Costa
2013-04-08 14:00 ` [PATCH v3 31/32] super: targeted memcg reclaim Glauber Costa
2013-04-08 14:00 ` [PATCH v3 32/32] memcg: debugging facility to access dangling memcgs Glauber Costa
2013-04-08 20:51 ` [PATCH v3 00/32] memcg-aware slab shrinking with lasers and numbers Andrew Morton
2013-04-09  7:25   ` Glauber Costa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1365429659-22108-5-git-send-email-glommer@parallels.com \
    --to=glommer@parallels.com \
    --cc=akpm@linux-foundation.org \
    --cc=cgroups@vger.kernel.org \
    --cc=containers@lists.linux-foundation.org \
    --cc=david@fromorbit.com \
    --cc=dchinner@redhat.com \
    --cc=gthelen@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    --cc=serge.hallyn@canonical.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox