>
>> If no non-root memcg comes to life, we do not need to accquire moving
>> locks, so patch them out.
>>
>> cc: Michal Hocko <
mhocko@suse.cz>
>> cc: Greg Thelen <
gthelen@google.com>
>> cc: KAMEZAWA Hiroyuki <
kamezawa.hiroyu@jp.fujitsu.com>
>> cc: Andrew Morton <
akpm@linux-foundation.org>
>> cc: Fengguang Wu <
fengguang.wu@intel.com>
>> cc: Mel Gorman <
mgorman@suse.de>
>> ---
>> include/linux/memcontrol.h | 15 +++++++++++++++
>> mm/memcontrol.c | 23 ++++++++++++++++++++++-
>> 2 files changed, 37 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>> index ccd35d8..0483e1a 100644
>> --- a/include/linux/memcontrol.h
>> +++ b/include/linux/memcontrol.h
>> @@ -55,6 +55,13 @@ struct mem_cgroup_reclaim_cookie {
>> };
>>
>> #ifdef CONFIG_MEMCG
>> +
>> +extern struct static_key memcg_inuse_key;
>> +static inline bool mem_cgroup_in_use(void)
>> +{
>> + return static_key_false(&memcg_inuse_key);
>> +}
>> +
>> /*
>> * All "charge" functions with gfp_mask should use GFP_KERNEL or
>> * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
>> @@ -159,6 +166,8 @@ static inline void mem_cgroup_begin_update_page_stat(struct page *page,
>> {
>> if (mem_cgroup_disabled())
>> return;
>> + if (!mem_cgroup_in_use())
>> + return;
>> rcu_read_lock();
>> *locked = false;
>> if (atomic_read(&memcg_moving))
>> @@ -172,6 +181,8 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
>> {
>> if (mem_cgroup_disabled())
>> return;
>> + if (!mem_cgroup_in_use())
>> + return;
>> if (*locked)
>> __mem_cgroup_end_update_page_stat(page, flags);
>> rcu_read_unlock();
>> @@ -215,6 +226,10 @@ void mem_cgroup_print_bad_page(struct page *page);
>> #endif
>> #else /* CONFIG_MEMCG */
>> struct mem_cgroup;
>> +static inline bool mem_cgroup_in_use(void)
>> +{
>> + return false;
>> +}
>>
>> static inline int mem_cgroup_newpage_charge(struct page *page,
>> struct mm_struct *mm, gfp_t gfp_mask)
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index 9126abc..a85f7c5 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -463,6 +463,13 @@ enum res_type {
>> #define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
>> #define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
>>
>> +/* static_key used for marking memcg in use or not. We use this jump label to
>> + * patch some memcg page stat accounting code in or out.
>> + * The key will be increased when non-root memcg is created, and be decreased
>> + * when memcg is destroyed.
>> + */
>> +struct static_key memcg_inuse_key;
>> +
>> /*
>> * The memcg_create_mutex will be held whenever a new cgroup is created.
>> * As a consequence, any change that needs to protect against new child cgroups
>> @@ -630,10 +637,22 @@ static void disarm_kmem_keys(struct mem_cgroup *memcg)
>> }
>> #endif /* CONFIG_MEMCG_KMEM */
>>
>> +static void disarm_inuse_keys(struct mem_cgroup *memcg)
>> +{
>> + if (!mem_cgroup_is_root(memcg))
>> + static_key_slow_dec(&memcg_inuse_key);
>> +}
>> +
>> +static void arm_inuse_keys(void)
>> +{
>> + static_key_slow_inc(&memcg_inuse_key);
>> +}
>> +
>> static void disarm_static_keys(struct mem_cgroup *memcg)
>> {
>> disarm_sock_keys(memcg);
>> disarm_kmem_keys(memcg);
>> + disarm_inuse_keys(memcg);
>> }
>>
>> static void drain_all_stock_async(struct mem_cgroup *memcg);
>> @@ -2298,7 +2317,6 @@ void mem_cgroup_update_page_stat(struct page *page,
>> {
>> struct mem_cgroup *memcg;
>> struct page_cgroup *pc = lookup_page_cgroup(page);
>> - unsigned long uninitialized_var(flags);
>>
>> if (mem_cgroup_disabled())
>> return;
>> @@ -6293,6 +6311,9 @@ mem_cgroup_css_online(struct cgroup *cont)
>> }
>>
>> error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
>> + if (!error)
>> + arm_inuse_keys();
>> +
>> mutex_unlock(&memcg_create_mutex);
>> return error;
>> }
>> --
>> 1.7.9.5
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe cgroups" in
>> the body of a message to
majordomo@vger.kernel.org
>> More majordomo info at
http://vger.kernel.org/majordomo-info.html>
> --
> Michal Hocko
> SUSE Labs
--
Thanks,
Sha