From: Hugh Dickins <hugh@veritas.com>
To: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
Hirokazu Takahashi <taka@valinux.co.jp>,
YAMAMOTO Takashi <yamamoto@valinux.co.jp>,
Adrian Bunk <bunk@kernel.org>,
linux-mm@kvack.org
Subject: [PATCH 10/15] memcg: memcontrol uninlined and static
Date: Mon, 25 Feb 2008 23:44:44 +0000 (GMT) [thread overview]
Message-ID: <Pine.LNX.4.64.0802252343230.27067@blonde.site> (raw)
In-Reply-To: <Pine.LNX.4.64.0802252327490.27067@blonde.site>
More cleanup to memcontrol.c, this time changing some of the code generated.
Let the compiler decide what to inline (except for page_cgroup_locked which
is only used when CONFIG_DEBUG_VM): the __always_inline on lock_page_cgroup
etc. was quite a waste since bit_spin_lock etc. are inlines in a header file;
made mem_cgroup_force_empty and mem_cgroup_write_strategy static.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
---
mm/memcontrol.c | 28 +++++++++++-----------------
1 file changed, 11 insertions(+), 17 deletions(-)
--- memcg09/mm/memcontrol.c 2008-02-25 14:06:09.000000000 +0000
+++ memcg10/mm/memcontrol.c 2008-02-25 14:06:12.000000000 +0000
@@ -168,12 +168,12 @@ struct page_cgroup {
#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
-static inline int page_cgroup_nid(struct page_cgroup *pc)
+static int page_cgroup_nid(struct page_cgroup *pc)
{
return page_to_nid(pc->page);
}
-static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
+static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
{
return page_zonenum(pc->page);
}
@@ -199,14 +199,13 @@ static void mem_cgroup_charge_statistics
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
}
-static inline struct mem_cgroup_per_zone *
+static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{
- BUG_ON(!mem->info.nodeinfo[nid]);
return &mem->info.nodeinfo[nid]->zoneinfo[zid];
}
-static inline struct mem_cgroup_per_zone *
+static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct page_cgroup *pc)
{
struct mem_cgroup *mem = pc->mem_cgroup;
@@ -231,16 +230,14 @@ static unsigned long mem_cgroup_get_all_
return total;
}
-static inline
-struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{
return container_of(cgroup_subsys_state(cont,
mem_cgroup_subsys_id), struct mem_cgroup,
css);
}
-static inline
-struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
struct mem_cgroup, css);
@@ -276,13 +273,12 @@ struct page_cgroup *page_get_page_cgroup
return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
}
-static void __always_inline lock_page_cgroup(struct page *page)
+static void lock_page_cgroup(struct page *page)
{
bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
- VM_BUG_ON(!page_cgroup_locked(page));
}
-static void __always_inline unlock_page_cgroup(struct page *page)
+static void unlock_page_cgroup(struct page *page)
{
bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}
@@ -741,16 +737,14 @@ void mem_cgroup_end_migration(struct pag
void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{
struct page_cgroup *pc;
- struct mem_cgroup *mem;
- unsigned long flags;
struct mem_cgroup_per_zone *mz;
+ unsigned long flags;
retry:
pc = page_get_page_cgroup(page);
if (!pc)
return;
- mem = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(pc);
if (clear_page_cgroup(page, pc) != pc)
goto retry;
@@ -822,7 +816,7 @@ retry:
* make mem_cgroup's charge to be 0 if there is no task.
* This enables deleting this mem_cgroup.
*/
-int mem_cgroup_force_empty(struct mem_cgroup *mem)
+static int mem_cgroup_force_empty(struct mem_cgroup *mem)
{
int ret = -EBUSY;
int node, zid;
@@ -852,7 +846,7 @@ out:
return ret;
}
-int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
+static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
{
*tmp = memparse(buf, &buf);
if (*buf != '\0')
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2008-02-25 23:44 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-02-25 23:34 [PATCH 00/15] memcg: fixes and cleanups Hugh Dickins
2008-02-25 23:35 ` [PATCH 01/15] memcg: mm_match_cgroup not vm_match_cgroup Hugh Dickins
2008-02-26 0:39 ` David Rientjes
2008-02-26 3:27 ` Hugh Dickins
2008-02-26 2:41 ` Balbir Singh
2008-02-26 23:46 ` KAMEZAWA Hiroyuki
2008-02-28 3:47 ` Andrew Morton
2008-02-28 7:19 ` David Rientjes
2008-02-28 7:26 ` Andrew Morton
2008-02-28 8:08 ` Hugh Dickins
2008-02-25 23:36 ` [PATCH 02/15] memcg: move_lists on page not page_cgroup Hugh Dickins
2008-02-26 15:52 ` Balbir Singh
2008-02-26 23:45 ` KAMEZAWA Hiroyuki
2008-02-25 23:37 ` [PATCH 03/15] memcg: page_cache_release not __free_page Hugh Dickins
2008-02-26 16:02 ` Balbir Singh
2008-02-26 23:38 ` KAMEZAWA Hiroyuki
2008-02-25 23:38 ` [PATCH 04/15] memcg: when do_swap's do_wp_page fails Hugh Dickins
2008-02-26 23:41 ` KAMEZAWA Hiroyuki
2008-02-27 5:08 ` Balbir Singh
2008-02-27 12:57 ` Hugh Dickins
2008-02-25 23:39 ` [PATCH 05/15] memcg: fix VM_BUG_ON from page migration Hugh Dickins
2008-02-26 1:30 ` KAMEZAWA Hiroyuki
2008-02-27 5:52 ` Balbir Singh
2008-02-27 13:23 ` Hugh Dickins
2008-02-27 13:43 ` Balbir Singh
2008-02-25 23:40 ` [PATCH 06/15] memcg: bad page if page_cgroup when free Hugh Dickins
2008-02-26 23:44 ` KAMEZAWA Hiroyuki
2008-02-27 8:38 ` Balbir Singh
2008-02-25 23:41 ` [PATCH 07/15] memcg: mem_cgroup_charge never NULL Hugh Dickins
2008-02-26 1:32 ` KAMEZAWA Hiroyuki
2008-02-27 8:42 ` Balbir Singh
2008-02-25 23:42 ` [PATCH 08/15] memcg: remove mem_cgroup_uncharge Hugh Dickins
2008-02-26 1:34 ` KAMEZAWA Hiroyuki
2008-02-28 18:22 ` Balbir Singh
2008-02-25 23:43 ` [PATCH 09/15] memcg: memcontrol whitespace cleanups Hugh Dickins
2008-02-25 23:44 ` Hugh Dickins [this message]
2008-02-26 1:36 ` [PATCH 10/15] memcg: memcontrol uninlined and static KAMEZAWA Hiroyuki
2008-02-25 23:46 ` [PATCH 11/15] memcg: remove clear_page_cgroup and atomics Hugh Dickins
2008-02-26 1:38 ` KAMEZAWA Hiroyuki
2008-02-25 23:47 ` [PATCH 12/15] memcg: css_put after remove_list Hugh Dickins
2008-02-26 1:39 ` KAMEZAWA Hiroyuki
2008-02-25 23:49 ` [PATCH 13/15] memcg: fix mem_cgroup_move_lists locking Hugh Dickins
2008-02-26 1:43 ` KAMEZAWA Hiroyuki
2008-02-26 2:56 ` Hugh Dickins
2008-02-25 23:50 ` [PATCH 14/15] memcg: simplify force_empty and move_lists Hugh Dickins, Hirokazu Takahashi
2008-02-26 1:48 ` KAMEZAWA Hiroyuki
2008-02-26 3:23 ` Hugh Dickins
2008-02-26 4:09 ` KAMEZAWA Hiroyuki
2008-02-25 23:51 ` [PATCH 15/15] memcg: fix oops on NULL lru list Hugh Dickins
2008-02-26 1:26 ` [PATCH 00/15] memcg: fixes and cleanups KAMEZAWA Hiroyuki
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Pine.LNX.4.64.0802252343230.27067@blonde.site \
--to=hugh@veritas.com \
--cc=akpm@linux-foundation.org \
--cc=balbir@linux.vnet.ibm.com \
--cc=bunk@kernel.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=linux-mm@kvack.org \
--cc=taka@valinux.co.jp \
--cc=yamamoto@valinux.co.jp \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox