From: Andrea Righi <arighi@develer.com>
To: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Balbir Singh <balbir@linux.vnet.ibm.com>,
Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Vivek Goyal <vgoyal@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Trond Myklebust <trond.myklebust@fys.uio.no>,
Suleiman Souhlal <suleiman@google.com>,
Greg Thelen <gthelen@google.com>,
"Kirill A. Shutemov" <kirill@shutemov.name>,
Andrew Morton <akpm@linux-foundation.org>,
containers@lists.linux-foundation.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH -mmotm 1/5] memcg: disable irq at page cgroup lock
Date: Wed, 10 Mar 2010 00:00:32 +0100 [thread overview]
Message-ID: <1268175636-4673-2-git-send-email-arighi@develer.com> (raw)
In-Reply-To: <1268175636-4673-1-git-send-email-arighi@develer.com>
From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
In current implementation, we don't have to disable irq at lock_page_cgroup()
because the lock is never acquired in interrupt context.
But we are going to call it in later patch in an interrupt context or with
irq disabled, so this patch disables irq at lock_page_cgroup() and enables it
at unlock_page_cgroup().
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
---
include/linux/page_cgroup.h | 16 ++++++++++++++--
mm/memcontrol.c | 43 +++++++++++++++++++++++++------------------
2 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 30b0813..0d2f92c 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -83,16 +83,28 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
return page_zonenum(pc->page);
}
-static inline void lock_page_cgroup(struct page_cgroup *pc)
+static inline void __lock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline void unlock_page_cgroup(struct page_cgroup *pc)
+static inline void __unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
}
+#define lock_page_cgroup(pc, flags) \
+ do { \
+ local_irq_save(flags); \
+ __lock_page_cgroup(pc); \
+ } while (0)
+
+#define unlock_page_cgroup(pc, flags) \
+ do { \
+ __unlock_page_cgroup(pc); \
+ local_irq_restore(flags); \
+ } while (0)
+
#else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7fab84e..a9fd736 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1352,12 +1352,13 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
{
struct mem_cgroup *mem;
struct page_cgroup *pc;
+ unsigned long flags;
pc = lookup_page_cgroup(page);
if (unlikely(!pc))
return;
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
mem = pc->mem_cgroup;
if (!mem)
goto done;
@@ -1371,7 +1372,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], val);
done:
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
}
/*
@@ -1705,11 +1706,12 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
struct page_cgroup *pc;
unsigned short id;
swp_entry_t ent;
+ unsigned long flags;
VM_BUG_ON(!PageLocked(page));
pc = lookup_page_cgroup(page);
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
if (mem && !css_tryget(&mem->css))
@@ -1723,7 +1725,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
mem = NULL;
rcu_read_unlock();
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
return mem;
}
@@ -1736,13 +1738,15 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
struct page_cgroup *pc,
enum charge_type ctype)
{
+ unsigned long flags;
+
/* try_charge() can return NULL to *memcg, taking care of it. */
if (!mem)
return;
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
if (unlikely(PageCgroupUsed(pc))) {
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
mem_cgroup_cancel_charge(mem);
return;
}
@@ -1772,7 +1776,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
mem_cgroup_charge_statistics(mem, pc, true);
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -1842,12 +1846,13 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
int ret = -EINVAL;
- lock_page_cgroup(pc);
+ unsigned long flags;
+ lock_page_cgroup(pc, flags);
if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
__mem_cgroup_move_account(pc, from, to, uncharge);
ret = 0;
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
/*
* check events
*/
@@ -1974,17 +1979,17 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
*/
if (!(gfp_mask & __GFP_WAIT)) {
struct page_cgroup *pc;
-
+ unsigned long flags;
pc = lookup_page_cgroup(page);
if (!pc)
return 0;
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
if (PageCgroupUsed(pc)) {
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
return 0;
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
}
if (unlikely(!mm && !mem))
@@ -2166,6 +2171,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
struct mem_cgroup_per_zone *mz;
+ unsigned long flags;
if (mem_cgroup_disabled())
return NULL;
@@ -2180,7 +2186,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (unlikely(!pc || !PageCgroupUsed(pc)))
return NULL;
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
mem = pc->mem_cgroup;
@@ -2219,7 +2225,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
*/
mz = page_cgroup_zoneinfo(pc);
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
memcg_check_events(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
@@ -2229,7 +2235,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
return mem;
unlock_out:
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
return NULL;
}
@@ -2417,17 +2423,18 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
int ret = 0;
+ unsigned long flags;
if (mem_cgroup_disabled())
return 0;
pc = lookup_page_cgroup(page);
- lock_page_cgroup(pc);
+ lock_page_cgroup(pc, flags);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
}
- unlock_page_cgroup(pc);
+ unlock_page_cgroup(pc, flags);
if (mem) {
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
--
1.6.3.3
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2010-03-09 23:01 UTC|newest]
Thread overview: 74+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-03-09 23:00 [PATCH -mmotm 0/5] memcg: per cgroup dirty limit (v6) Andrea Righi
2010-03-09 23:00 ` Andrea Righi [this message]
2010-03-09 23:00 ` [PATCH -mmotm 2/5] memcg: dirty memory documentation Andrea Righi
2010-03-09 23:00 ` [PATCH -mmotm 3/5] page_cgroup: introduce file cache flags Andrea Righi
2010-03-09 23:00 ` [PATCH -mmotm 4/5] memcg: dirty pages accounting and limiting infrastructure Andrea Righi
2010-03-10 22:23 ` Vivek Goyal
2010-03-11 22:27 ` Andrea Righi
2010-03-09 23:00 ` [PATCH -mmotm 5/5] memcg: dirty pages instrumentation Andrea Righi
2010-03-10 1:36 ` [PATCH -mmotm 0/5] memcg: per cgroup dirty limit (v6) Balbir Singh
2010-03-11 0:39 ` KAMEZAWA Hiroyuki
2010-03-11 1:17 ` KAMEZAWA Hiroyuki
2010-03-11 9:14 ` Peter Zijlstra
2010-03-11 9:25 ` KAMEZAWA Hiroyuki
2010-03-11 9:42 ` KAMEZAWA Hiroyuki
2010-03-11 22:20 ` Andrea Righi
2010-03-12 1:14 ` Daisuke Nishimura
2010-03-12 2:24 ` KAMEZAWA Hiroyuki
2010-03-15 14:48 ` Vivek Goyal
2010-03-12 10:07 ` Andrea Righi
2010-03-11 15:03 ` Vivek Goyal
2010-03-11 23:27 ` Andrea Righi
2010-03-11 23:52 ` KAMEZAWA Hiroyuki
2010-03-12 10:01 ` Andrea Righi
2010-03-15 14:16 ` Vivek Goyal
2010-03-11 23:42 ` KAMEZAWA Hiroyuki
2010-03-12 0:33 ` Andrea Righi
2010-03-15 14:38 ` Vivek Goyal
2010-03-17 22:32 ` Andrea Righi
2010-03-11 22:23 ` Andrea Righi
2010-03-11 18:07 ` Vivek Goyal
2010-03-11 23:59 ` Andrea Righi
2010-03-12 0:03 ` KAMEZAWA Hiroyuki
2010-03-12 9:58 ` Andrea Righi
2010-03-15 14:41 ` Vivek Goyal
2010-03-14 23:26 [PATCH -mmotm 0/5] memcg: per cgroup dirty limit (v7) Andrea Righi
2010-03-14 23:26 ` [PATCH -mmotm 1/5] memcg: disable irq at page cgroup lock Andrea Righi
2010-03-15 0:06 ` KAMEZAWA Hiroyuki
2010-03-15 10:00 ` Andrea Righi
2010-03-17 7:04 ` Balbir Singh
2010-03-17 11:58 ` Balbir Singh
2010-03-17 23:54 ` KAMEZAWA Hiroyuki
2010-03-18 0:45 ` KAMEZAWA Hiroyuki
2010-03-18 2:16 ` Daisuke Nishimura
2010-03-18 2:58 ` KAMEZAWA Hiroyuki
2010-03-18 5:12 ` Balbir Singh
2010-03-18 4:19 ` Balbir Singh
2010-03-18 4:21 ` KAMEZAWA Hiroyuki
2010-03-18 6:25 ` Balbir Singh
2010-03-18 4:35 ` KAMEZAWA Hiroyuki
2010-03-18 16:28 ` Balbir Singh
2010-03-19 1:23 ` KAMEZAWA Hiroyuki
2010-03-19 2:40 ` Balbir Singh
2010-03-19 3:00 ` KAMEZAWA Hiroyuki
[not found] ` <xr93hbnepmj6.fsf@ninji.mtv.corp.google.com>
2010-04-14 6:55 ` Greg Thelen
2010-04-14 9:29 ` KAMEZAWA Hiroyuki
2010-04-14 14:04 ` Vivek Goyal
2010-04-14 19:31 ` Greg Thelen
2010-04-15 0:14 ` KAMEZAWA Hiroyuki
2010-04-14 16:22 ` Greg Thelen
2010-04-15 0:22 ` KAMEZAWA Hiroyuki
2010-04-14 14:05 ` Vivek Goyal
2010-04-14 20:14 ` Greg Thelen
2010-04-15 2:40 ` Daisuke Nishimura
2010-04-15 4:48 ` Greg Thelen
2010-04-15 6:21 ` Daisuke Nishimura
2010-04-15 6:38 ` Greg Thelen
2010-04-15 6:54 ` KAMEZAWA Hiroyuki
2010-04-23 20:17 ` Greg Thelen
2010-04-23 20:54 ` Peter Zijlstra
2010-04-24 15:53 ` Greg Thelen
2010-04-23 20:57 ` Peter Zijlstra
2010-04-24 2:22 ` KAMEZAWA Hiroyuki
2010-04-23 21:19 ` Peter Zijlstra
2010-04-24 2:19 ` KAMEZAWA Hiroyuki
2010-04-14 14:44 ` Balbir Singh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1268175636-4673-2-git-send-email-arighi@develer.com \
--to=arighi@develer.com \
--cc=akpm@linux-foundation.org \
--cc=balbir@linux.vnet.ibm.com \
--cc=containers@lists.linux-foundation.org \
--cc=gthelen@google.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kirill@shutemov.name \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=nishimura@mxp.nes.nec.co.jp \
--cc=peterz@infradead.org \
--cc=suleiman@google.com \
--cc=trond.myklebust@fys.uio.no \
--cc=vgoyal@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox