From: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Li Zefan <lizf@cn.fujitsu.com>, Paul Menage <menage@google.com>,
Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>,
linux-mm <linux-mm@kvack.org>
Subject: [PATCH -mmotm 8/8] memcg: improve performance in moving swap charge
Date: Mon, 21 Dec 2009 14:40:06 +0900 [thread overview]
Message-ID: <20091221144006.65319085.nishimura@mxp.nes.nec.co.jp> (raw)
In-Reply-To: <20091221143106.6ff3ca15.nishimura@mxp.nes.nec.co.jp>
This patch tries to reduce overheads in moving swap charge by:
- Adds a new function(__mem_cgroup_put), which takes "count" as a arg and
decrement mem->refcnt by "count".
- Removed res_counter_uncharge, css_put, and mem_cgroup_put from the path
of moving swap account, and consolidate all of them into mem_cgroup_clear_mc.
We cannot do that about mc.to->refcnt.
These changes reduces the overhead from 1.35sec to 0.9sec to move charges of 1G
anonymous memory(including 500MB swap) in my test environment.
Changelog: 2009/12/21
- don't postpone calling mem_cgroup_get() against the new cgroup(bug fix).
Changelog: 2009/12/04
- new patch
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
---
mm/memcontrol.c | 73 ++++++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 59 insertions(+), 14 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 006f4b6..ffca2ab 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -254,6 +254,7 @@ struct move_charge_struct {
struct mem_cgroup *to;
unsigned long precharge;
unsigned long moved_charge;
+ unsigned long moved_swap;
struct task_struct *moving_task; /* a task moving charges */
wait_queue_head_t waitq; /* a waitq for other context */
/* not to cause oom */
@@ -2279,6 +2280,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* @entry: swap entry to be moved
* @from: mem_cgroup which the entry is moved from
* @to: mem_cgroup which the entry is moved to
+ * @need_fixup: whether we should fixup res_counters and refcounts.
*
* It succeeds only when the swap_cgroup's record for this entry is the same
* as the mem_cgroup's id of @from.
@@ -2289,7 +2291,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* both res and memsw, and called css_get().
*/
static int mem_cgroup_move_swap_account(swp_entry_t entry,
- struct mem_cgroup *from, struct mem_cgroup *to)
+ struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
{
unsigned short old_id, new_id;
@@ -2297,20 +2299,29 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
new_id = css_id(&to->css);
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
- if (!mem_cgroup_is_root(from))
- res_counter_uncharge(&from->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(from, false);
- mem_cgroup_put(from);
+ mem_cgroup_swap_statistics(to, true);
/*
- * we charged both to->res and to->memsw, so we should uncharge
- * to->res.
+ * This function is only called from task migration context now.
+ * It postpones res_counter and refcount handling till the end
+ * of task migration(mem_cgroup_clear_mc()) for performance
+ * improvement. But we cannot postpone mem_cgroup_get(to)
+ * because if the process that has been moved to @to does
+ * swap-in, the refcount of @to might be decreased to 0.
*/
- if (!mem_cgroup_is_root(to))
- res_counter_uncharge(&to->res, PAGE_SIZE);
- mem_cgroup_swap_statistics(to, true);
mem_cgroup_get(to);
- css_put(&to->css);
-
+ if (need_fixup) {
+ if (!mem_cgroup_is_root(from))
+ res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ mem_cgroup_put(from);
+ /*
+ * we charged both to->res and to->memsw, so we should
+ * uncharge to->res.
+ */
+ if (!mem_cgroup_is_root(to))
+ res_counter_uncharge(&to->res, PAGE_SIZE);
+ css_put(&to->css);
+ }
return 0;
}
return -EINVAL;
@@ -3395,9 +3406,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
atomic_inc(&mem->refcnt);
}
-static void mem_cgroup_put(struct mem_cgroup *mem)
+static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
{
- if (atomic_dec_and_test(&mem->refcnt)) {
+ if (atomic_sub_and_test(count, &mem->refcnt)) {
struct mem_cgroup *parent = parent_mem_cgroup(mem);
__mem_cgroup_free(mem);
if (parent)
@@ -3405,6 +3416,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
}
}
+static void mem_cgroup_put(struct mem_cgroup *mem)
+{
+ __mem_cgroup_put(mem, 1);
+}
+
/*
* Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
*/
@@ -3763,6 +3779,29 @@ static void mem_cgroup_clear_mc(void)
__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
mc.moved_charge = 0;
}
+ /* we must fixup refcnts and charges */
+ if (mc.moved_swap) {
+ WARN_ON_ONCE(mc.moved_swap > INT_MAX);
+ /* uncharge swap account from the old cgroup */
+ if (!mem_cgroup_is_root(mc.from))
+ res_counter_uncharge(&mc.from->memsw,
+ PAGE_SIZE * mc.moved_swap);
+ __mem_cgroup_put(mc.from, mc.moved_swap);
+
+ if (!mem_cgroup_is_root(mc.to)) {
+ /*
+ * we charged both to->res and to->memsw, so we should
+ * uncharge to->res.
+ */
+ res_counter_uncharge(&mc.to->res,
+ PAGE_SIZE * mc.moved_swap);
+ VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
+ __css_put(&mc.to->css, mc.moved_swap);
+ }
+ /* we've already done mem_cgroup_get(mc.to) */
+
+ mc.moved_swap = 0;
+ }
mc.from = NULL;
mc.to = NULL;
mc.moving_task = NULL;
@@ -3792,11 +3831,13 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
VM_BUG_ON(mc.to);
VM_BUG_ON(mc.precharge);
VM_BUG_ON(mc.moved_charge);
+ VM_BUG_ON(mc.moved_swap);
VM_BUG_ON(mc.moving_task);
mc.from = from;
mc.to = mem;
mc.precharge = 0;
mc.moved_charge = 0;
+ mc.moved_swap = 0;
mc.moving_task = current;
ret = mem_cgroup_precharge_mc(mm);
@@ -3857,8 +3898,12 @@ put: /* is_target_pte_for_mc() gets the page */
break;
case MC_TARGET_SWAP:
ent = target.ent;
- if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to))
+ if (!mem_cgroup_move_swap_account(ent,
+ mc.from, mc.to, false)) {
mc.precharge--;
+ /* we fixup refcnts and charges later. */
+ mc.moved_swap++;
+ }
break;
default:
break;
--
1.5.6.1
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2009-12-21 5:50 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-12-21 5:31 [PATCH -mmotm 0/8] memcg: move charge at task migration (21/Dec) Daisuke Nishimura
2009-12-21 5:32 ` [PATCH -mmotm 1/8] cgroup: introduce cancel_attach() Daisuke Nishimura
2009-12-21 5:32 ` [PATCH -mmotm 2/8] cgroup: introduce coalesce css_get() and css_put() Daisuke Nishimura
2009-12-21 5:33 ` [PATCH -mmotm 3/8] memcg: add interface to move charge at task migration Daisuke Nishimura
2009-12-21 7:00 ` KAMEZAWA Hiroyuki
2009-12-21 5:35 ` [PATCH -mmotm 4/8] memcg: move charges of anonymous page Daisuke Nishimura
2009-12-21 7:01 ` KAMEZAWA Hiroyuki
2009-12-23 0:26 ` Andrew Morton
2009-12-21 5:36 ` [PATCH -mmotm 5/8] memcg: improve performance in moving charge Daisuke Nishimura
2009-12-21 7:02 ` KAMEZAWA Hiroyuki
2009-12-21 5:37 ` [PATCH -mmotm 6/8] memcg: avoid oom during " Daisuke Nishimura
2009-12-21 7:03 ` KAMEZAWA Hiroyuki
2009-12-21 5:38 ` [PATCH -mmotm 7/8] memcg: move charges of anonymous swap Daisuke Nishimura
2009-12-21 7:04 ` KAMEZAWA Hiroyuki
2010-02-04 3:31 ` Andrew Morton
2010-02-04 5:09 ` Daisuke Nishimura
2010-02-04 5:27 ` KAMEZAWA Hiroyuki
2010-02-04 7:18 ` Paul Mundt
2010-02-04 7:44 ` KAMEZAWA Hiroyuki
2010-02-04 15:32 ` Balbir Singh
2010-02-05 0:38 ` Daisuke Nishimura
2010-02-05 0:54 ` KAMEZAWA Hiroyuki
2010-02-05 1:16 ` Paul Mundt
2010-03-09 23:13 ` Andrew Morton
2010-03-10 2:50 ` Daisuke Nishimura
2009-12-21 5:40 ` Daisuke Nishimura [this message]
2009-12-21 7:05 ` [PATCH -mmotm 8/8] memcg: improve performance in moving swap charge KAMEZAWA Hiroyuki
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20091221144006.65319085.nishimura@mxp.nes.nec.co.jp \
--to=nishimura@mxp.nes.nec.co.jp \
--cc=akpm@linux-foundation.org \
--cc=balbir@linux.vnet.ibm.com \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-mm@kvack.org \
--cc=lizf@cn.fujitsu.com \
--cc=menage@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox