linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
To: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: LKML <linux-kernel@vger.kernel.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"menage@google.com" <menage@google.com>,
	"balbir@linux.vnet.ibm.com" <balbir@linux.vnet.ibm.com>,
	"xemul@openvz.org" <xemul@openvz.org>,
	"lizf@cn.fujitsu.com" <lizf@cn.fujitsu.com>,
	"yamamoto@valinux.co.jp" <yamamoto@valinux.co.jp>
Subject: [RFC][PATCH 3/3] memcg: per node information
Date: Tue, 20 May 2008 18:09:55 +0900	[thread overview]
Message-ID: <20080520180955.70aa5459.kamezawa.hiroyu@jp.fujitsu.com> (raw)
In-Reply-To: <20080520180552.601da567.kamezawa.hiroyu@jp.fujitsu.com>

Show per-node statistics in following format.

 node-id total acitve inactive

[root@iridium bench]# cat /opt/cgroup/memory.numa_stat
0 417611776 99586048 318025728
1 655360000 0 655360000

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>

---
 mm/memcontrol.c |   66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 66 insertions(+)

Index: mm-2.6.26-rc2-mm1/mm/memcontrol.c
===================================================================
--- mm-2.6.26-rc2-mm1.orig/mm/memcontrol.c
+++ mm-2.6.26-rc2-mm1/mm/memcontrol.c
@@ -960,6 +960,66 @@ static int mem_control_stat_show(struct 
 	return 0;
 }
 
+#ifdef CONFIG_NUMA
+static void *memcg_numastat_start(struct seq_file *m, loff_t *pos)
+{
+	loff_t node = *pos;
+	struct pglist_data *pgdat = first_online_pgdat();
+
+	while (pgdat != NULL) {
+		if (!node)
+			break;
+		pgdat = next_online_pgdat(pgdat);
+		node--;
+	}
+	return pgdat;
+}
+
+static void *memcg_numastat_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+	struct pglist_data *pgdat = (struct pglist_data *)arg;
+
+	(*pos)++;
+	return next_online_pgdat(pgdat);
+}
+
+static void memcg_numastat_stop(struct seq_file *m, void *arg)
+{
+}
+
+static int memcg_numastat_show(struct seq_file *m, void *arg)
+{
+	struct pglist_data *pgdat = (struct pglist_data *)arg;
+	int nid = pgdat->node_id;
+	struct cgroup *cgrp = cgroup_of_seqfile(m);
+	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+	struct mem_cgroup_per_zone *mz;
+	long active, inactive, total;
+	int zid;
+
+	active = 0;
+	inactive = 0;
+
+	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+		mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+		active += MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
+		inactive += MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
+	}
+	active *= PAGE_SIZE;
+	inactive *= PAGE_SIZE;
+	total = active + inactive;
+	/* Node Total Active Inactive (Total = Active + Inactive) */
+	return seq_printf(m, "%d %ld %ld %ld\n", nid, total, active, inactive);
+}
+
+struct seq_operations memcg_numastat_op = {
+	.start = memcg_numastat_start,
+	.next  = memcg_numastat_next,
+	.stop  = memcg_numastat_stop,
+	.show  = memcg_numastat_show,
+};
+#endif
+
 static struct cftype mem_cgroup_files[] = {
 	{
 		.name = "usage_in_bytes",
@@ -992,6 +1052,12 @@ static struct cftype mem_cgroup_files[] 
 		.name = "stat",
 		.read_map = mem_control_stat_show,
 	},
+#ifdef CONFIG_NUMA
+	{
+		.name = "numa_stat",
+		.seq_ops = &memcg_numastat_op,
+	},
+#endif
 };
 
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
Index: mm-2.6.26-rc2-mm1/Documentation/controllers/memory_files.txt
===================================================================
--- mm-2.6.26-rc2-mm1.orig/Documentation/controllers/memory_files.txt
+++ mm-2.6.26-rc2-mm1/Documentation/controllers/memory_files.txt
@@ -74,3 +74,13 @@ Files under memory resource controller a
   (write)
   Reset to 0.
 
+* memory.numa_stat
+
+  This file appears only when the kernel is configured as NUMA.
+
+  (read)
+  Show per-node accounting information of acitve/inactive pages.
+  formated as following.
+  nodeid  total active inactive
+
+  total = active + inactive.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2008-05-20  9:09 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-05-20  9:05 [RFC][PATCH 1/3] memcg: documentation for controll file KAMEZAWA Hiroyuki
2008-05-20  9:04 ` Pavel Emelyanov
2008-05-20  9:23   ` KAMEZAWA Hiroyuki
2008-05-20  9:08 ` [RFC][PATCH 2/3] memcg:: seq_ops support for cgroup KAMEZAWA Hiroyuki
2008-05-20  9:23   ` Pavel Emelyanov
2008-05-20 18:46   ` Paul Menage
2008-05-21  0:28     ` KAMEZAWA Hiroyuki
2008-05-21  5:06       ` Paul Menage
2008-05-21  6:06         ` KAMEZAWA Hiroyuki
2008-05-21 13:08         ` Hirokazu Takahashi
2008-05-20  9:09 ` KAMEZAWA Hiroyuki [this message]
2008-05-20  9:33   ` [RFC][PATCH 3/3] memcg: per node information Li Zefan
2008-05-20 10:56     ` KAMEZAWA Hiroyuki

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20080520180955.70aa5459.kamezawa.hiroyu@jp.fujitsu.com \
    --to=kamezawa.hiroyu@jp.fujitsu.com \
    --cc=balbir@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lizf@cn.fujitsu.com \
    --cc=menage@google.com \
    --cc=xemul@openvz.org \
    --cc=yamamoto@valinux.co.jp \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox