linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page
@ 2011-12-13  7:55 Bob Liu
  2011-12-13  8:02 ` KAMEZAWA Hiroyuki
  2011-12-13  8:10 ` Daisuke Nishimura
  0 siblings, 2 replies; 4+ messages in thread
From: Bob Liu @ 2011-12-13  7:55 UTC (permalink / raw)
  To: linux-mm; +Cc: kamezawa.hiroyu, mhocko, hannes, bsingharora, Bob Liu

As the document memcg_test.txt said:
In do_swap_page(), following events occur when pte is unchanged.
	(1) the page (SwapCache) is looked up.
	(2) lock_page()
	(3) try_charge_swapin()
	(4) reuse_swap_page() (may call delete_swap_cache())
	(5) commit_charge_swapin()
	(6) swap_free().

And below situation:
(C) The page has been charged before (2) and reuse_swap_page() doesn't
	call delete_from_swap_cache().

In this case, __mem_cgroup_commit_charge_swapin() may uncharge memsw twice.
See below two uncharge place:

__mem_cgroup_commit_charge_swapin {
	=> __mem_cgroup_commit_charge_lrucare
		=> __mem_cgroup_commit_charge()    <== PageCgroupUsed
			=> __mem_cgroup_cancel_charge()
						<== 1.uncharge memsw here

	if (do_swap_account && PageSwapCache(page)) {
		if (swap_memcg) {
			if (!mem_cgroup_is_root(swap_memcg))
				res_counter_uncharge(&swap_memcg->memsw,
						PAGE_SIZE);
						<== 2.uncharged memsw again here

			mem_cgroup_swap_statistics(swap_memcg, false);
			mem_cgroup_put(swap_memcg);
		}
	}
}

This patch added a return val for __mem_cgroup_commit_charge(), if canceled then
don't uncharge memsw again.

But i didn't find a definite testcase can confirm this situaction current.
Maybe i missed something. Welcome point.

Signed-off-by: Bob Liu <lliubbo@gmail.com>
---
 mm/memcontrol.c |   56 +++++++++++++++++++++++++++++++-----------------------
 1 files changed, 32 insertions(+), 24 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bc396e7..6ead0cd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2416,7 +2416,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 	return memcg;
 }
 
-static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
+/*
+ * return -1 if cancel charge else return 0
+ */
+static int __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 				       struct page *page,
 				       unsigned int nr_pages,
 				       struct page_cgroup *pc,
@@ -2426,7 +2429,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 	if (unlikely(PageCgroupUsed(pc))) {
 		unlock_page_cgroup(pc);
 		__mem_cgroup_cancel_charge(memcg, nr_pages);
-		return;
+		return -1;
 	}
 	/*
 	 * we don't need page_cgroup_lock about tail pages, becase they are not
@@ -2463,6 +2466,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
 	 * if they exceeds softlimit.
 	 */
 	memcg_check_events(memcg, page);
+	return 0;
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2690,20 +2694,21 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
 					enum charge_type ctype);
 
-static void
+static int
 __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
 					enum charge_type ctype)
 {
 	struct page_cgroup *pc = lookup_page_cgroup(page);
+	int ret;
 	/*
 	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
 	 * is already on LRU. It means the page may on some other page_cgroup's
 	 * LRU. Take care of it.
 	 */
 	mem_cgroup_lru_del_before_commit(page);
-	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
+	ret = __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
 	mem_cgroup_lru_add_after_commit(page);
-	return;
+	return ret;
 }
 
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
@@ -2792,13 +2797,14 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
 					enum charge_type ctype)
 {
+	int ret;
 	if (mem_cgroup_disabled())
 		return;
 	if (!memcg)
 		return;
 	cgroup_exclude_rmdir(&memcg->css);
 
-	__mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+	ret = __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
 	/*
 	 * Now swap is on-memory. This means this page may be
 	 * counted both as mem and swap....double count.
@@ -2807,25 +2813,27 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
 	 * may call delete_from_swap_cache() before reach here.
 	 */
 	if (do_swap_account && PageSwapCache(page)) {
-		swp_entry_t ent = {.val = page_private(page)};
-		struct mem_cgroup *swap_memcg;
-		unsigned short id;
+		if(!ret) {
+			swp_entry_t ent = {.val = page_private(page)};
+			struct mem_cgroup *swap_memcg;
+			unsigned short id;
 
-		id = swap_cgroup_record(ent, 0);
-		rcu_read_lock();
-		swap_memcg = mem_cgroup_lookup(id);
-		if (swap_memcg) {
-			/*
-			 * This recorded memcg can be obsolete one. So, avoid
-			 * calling css_tryget
-			 */
-			if (!mem_cgroup_is_root(swap_memcg))
-				res_counter_uncharge(&swap_memcg->memsw,
-						     PAGE_SIZE);
-			mem_cgroup_swap_statistics(swap_memcg, false);
-			mem_cgroup_put(swap_memcg);
+			id = swap_cgroup_record(ent, 0);
+			rcu_read_lock();
+			swap_memcg = mem_cgroup_lookup(id);
+			if (swap_memcg) {
+				/*
+				 * This recorded memcg can be obsolete one. So, avoid
+				 * calling css_tryget
+				 */
+				if (!mem_cgroup_is_root(swap_memcg))
+					res_counter_uncharge(&swap_memcg->memsw,
+							PAGE_SIZE);
+				mem_cgroup_swap_statistics(swap_memcg, false);
+				mem_cgroup_put(swap_memcg);
+			}
+			rcu_read_unlock();
 		}
-		rcu_read_unlock();
 	}
 	/*
 	 * At swapin, we may charge account against cgroup which has no tasks.
-- 
1.7.0.4


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page
  2011-12-13  7:55 [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page Bob Liu
@ 2011-12-13  8:02 ` KAMEZAWA Hiroyuki
  2011-12-13  8:10 ` Daisuke Nishimura
  1 sibling, 0 replies; 4+ messages in thread
From: KAMEZAWA Hiroyuki @ 2011-12-13  8:02 UTC (permalink / raw)
  To: Bob Liu; +Cc: linux-mm, mhocko, hannes, bsingharora

On Tue, 13 Dec 2011 15:55:25 +0800
Bob Liu <lliubbo@gmail.com> wrote:

> As the document memcg_test.txt said:
> In do_swap_page(), following events occur when pte is unchanged.
> 	(1) the page (SwapCache) is looked up.
> 	(2) lock_page()
> 	(3) try_charge_swapin()
> 	(4) reuse_swap_page() (may call delete_swap_cache())
> 	(5) commit_charge_swapin()
> 	(6) swap_free().
> 
> And below situation:
> (C) The page has been charged before (2) and reuse_swap_page() doesn't
> 	call delete_from_swap_cache().
> 
> In this case, __mem_cgroup_commit_charge_swapin() may uncharge memsw twice.
> See below two uncharge place:
> 
> __mem_cgroup_commit_charge_swapin {
> 	=> __mem_cgroup_commit_charge_lrucare
> 		=> __mem_cgroup_commit_charge()    <== PageCgroupUsed
> 			=> __mem_cgroup_cancel_charge()
> 						<== 1.uncharge memsw here
> 
> 	if (do_swap_account && PageSwapCache(page)) {
> 		if (swap_memcg) {
> 			if (!mem_cgroup_is_root(swap_memcg))
> 				res_counter_uncharge(&swap_memcg->memsw,
> 						PAGE_SIZE);
> 						<== 2.uncharged memsw again here
> 
> 			mem_cgroup_swap_statistics(swap_memcg, false);
> 			mem_cgroup_put(swap_memcg);
> 		}
> 	}
> }


How this happens ?

1. all swap-cache handling is serialized by lock_page().
2. If the page_cgroup is marked as PCG_USED, record in swap_cgroup must be cleared.
   and swap_memcg never be found.

There is no real bug. If you want to add VM_BUG_ON() as

  commit_charge() decreases memsw count but swap_cgroup is found.

ok, please write a patch.

Nack to this patch. 

Thanks,
-Kame


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page
  2011-12-13  7:55 [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page Bob Liu
  2011-12-13  8:02 ` KAMEZAWA Hiroyuki
@ 2011-12-13  8:10 ` Daisuke Nishimura
  2011-12-14  2:08   ` Bob Liu
  1 sibling, 1 reply; 4+ messages in thread
From: Daisuke Nishimura @ 2011-12-13  8:10 UTC (permalink / raw)
  To: Bob Liu
  Cc: linux-mm, kamezawa.hiroyu, mhocko, hannes, bsingharora,
	Daisuke Nishimura

Hi,

On Tue, 13 Dec 2011 15:55:25 +0800
Bob Liu <lliubbo@gmail.com> wrote:

> As the document memcg_test.txt said:
> In do_swap_page(), following events occur when pte is unchanged.
> 	(1) the page (SwapCache) is looked up.
> 	(2) lock_page()
> 	(3) try_charge_swapin()
> 	(4) reuse_swap_page() (may call delete_swap_cache())
> 	(5) commit_charge_swapin()
> 	(6) swap_free().
> 
> And below situation:
> (C) The page has been charged before (2) and reuse_swap_page() doesn't
> 	call delete_from_swap_cache().
> 
> In this case, __mem_cgroup_commit_charge_swapin() may uncharge memsw twice.
> See below two uncharge place:
> 
> __mem_cgroup_commit_charge_swapin {
> 	=> __mem_cgroup_commit_charge_lrucare
> 		=> __mem_cgroup_commit_charge()    <== PageCgroupUsed
> 			=> __mem_cgroup_cancel_charge()
> 						<== 1.uncharge memsw here
> 
> 	if (do_swap_account && PageSwapCache(page)) {
> 		if (swap_memcg) {
IIRC, if the page(swapcache) has been already charged as memory, swap_cgroup_record(ent, 0)
returns 0, so swap_memcg is NULL.

Thanks,
Daisuke Nishimura.

> 			if (!mem_cgroup_is_root(swap_memcg))
> 				res_counter_uncharge(&swap_memcg->memsw,
> 						PAGE_SIZE);
> 						<== 2.uncharged memsw again here
> 
> 			mem_cgroup_swap_statistics(swap_memcg, false);
> 			mem_cgroup_put(swap_memcg);
> 		}
> 	}
> }
> 
> This patch added a return val for __mem_cgroup_commit_charge(), if canceled then
> don't uncharge memsw again.
> 
> But i didn't find a definite testcase can confirm this situaction current.
> Maybe i missed something. Welcome point.
> 
> Signed-off-by: Bob Liu <lliubbo@gmail.com>
> ---
>  mm/memcontrol.c |   56 +++++++++++++++++++++++++++++++-----------------------
>  1 files changed, 32 insertions(+), 24 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index bc396e7..6ead0cd 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2416,7 +2416,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
>  	return memcg;
>  }
>  
> -static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
> +/*
> + * return -1 if cancel charge else return 0
> + */
> +static int __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>  				       struct page *page,
>  				       unsigned int nr_pages,
>  				       struct page_cgroup *pc,
> @@ -2426,7 +2429,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>  	if (unlikely(PageCgroupUsed(pc))) {
>  		unlock_page_cgroup(pc);
>  		__mem_cgroup_cancel_charge(memcg, nr_pages);
> -		return;
> +		return -1;
>  	}
>  	/*
>  	 * we don't need page_cgroup_lock about tail pages, becase they are not
> @@ -2463,6 +2466,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>  	 * if they exceeds softlimit.
>  	 */
>  	memcg_check_events(memcg, page);
> +	return 0;
>  }
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> @@ -2690,20 +2694,21 @@ static void
>  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
>  					enum charge_type ctype);
>  
> -static void
> +static int
>  __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
>  					enum charge_type ctype)
>  {
>  	struct page_cgroup *pc = lookup_page_cgroup(page);
> +	int ret;
>  	/*
>  	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
>  	 * is already on LRU. It means the page may on some other page_cgroup's
>  	 * LRU. Take care of it.
>  	 */
>  	mem_cgroup_lru_del_before_commit(page);
> -	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
> +	ret = __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
>  	mem_cgroup_lru_add_after_commit(page);
> -	return;
> +	return ret;
>  }
>  
>  int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
> @@ -2792,13 +2797,14 @@ static void
>  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
>  					enum charge_type ctype)
>  {
> +	int ret;
>  	if (mem_cgroup_disabled())
>  		return;
>  	if (!memcg)
>  		return;
>  	cgroup_exclude_rmdir(&memcg->css);
>  
> -	__mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
> +	ret = __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
>  	/*
>  	 * Now swap is on-memory. This means this page may be
>  	 * counted both as mem and swap....double count.
> @@ -2807,25 +2813,27 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
>  	 * may call delete_from_swap_cache() before reach here.
>  	 */
>  	if (do_swap_account && PageSwapCache(page)) {
> -		swp_entry_t ent = {.val = page_private(page)};
> -		struct mem_cgroup *swap_memcg;
> -		unsigned short id;
> +		if(!ret) {
> +			swp_entry_t ent = {.val = page_private(page)};
> +			struct mem_cgroup *swap_memcg;
> +			unsigned short id;
>  
> -		id = swap_cgroup_record(ent, 0);
> -		rcu_read_lock();
> -		swap_memcg = mem_cgroup_lookup(id);
> -		if (swap_memcg) {
> -			/*
> -			 * This recorded memcg can be obsolete one. So, avoid
> -			 * calling css_tryget
> -			 */
> -			if (!mem_cgroup_is_root(swap_memcg))
> -				res_counter_uncharge(&swap_memcg->memsw,
> -						     PAGE_SIZE);
> -			mem_cgroup_swap_statistics(swap_memcg, false);
> -			mem_cgroup_put(swap_memcg);
> +			id = swap_cgroup_record(ent, 0);
> +			rcu_read_lock();
> +			swap_memcg = mem_cgroup_lookup(id);
> +			if (swap_memcg) {
> +				/*
> +				 * This recorded memcg can be obsolete one. So, avoid
> +				 * calling css_tryget
> +				 */
> +				if (!mem_cgroup_is_root(swap_memcg))
> +					res_counter_uncharge(&swap_memcg->memsw,
> +							PAGE_SIZE);
> +				mem_cgroup_swap_statistics(swap_memcg, false);
> +				mem_cgroup_put(swap_memcg);
> +			}
> +			rcu_read_unlock();
>  		}
> -		rcu_read_unlock();
>  	}
>  	/*
>  	 * At swapin, we may charge account against cgroup which has no tasks.
> -- 
> 1.7.0.4
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page
  2011-12-13  8:10 ` Daisuke Nishimura
@ 2011-12-14  2:08   ` Bob Liu
  0 siblings, 0 replies; 4+ messages in thread
From: Bob Liu @ 2011-12-14  2:08 UTC (permalink / raw)
  To: Daisuke Nishimura; +Cc: linux-mm, kamezawa.hiroyu, mhocko, hannes, bsingharora

On Tue, Dec 13, 2011 at 4:10 PM, Daisuke Nishimura
<nishimura@mxp.nes.nec.co.jp> wrote:
> Hi,
>
> On Tue, 13 Dec 2011 15:55:25 +0800
> Bob Liu <lliubbo@gmail.com> wrote:
>
>> As the document memcg_test.txt said:
>> In do_swap_page(), following events occur when pte is unchanged.
>>       (1) the page (SwapCache) is looked up.
>>       (2) lock_page()
>>       (3) try_charge_swapin()
>>       (4) reuse_swap_page() (may call delete_swap_cache())
>>       (5) commit_charge_swapin()
>>       (6) swap_free().
>>
>> And below situation:
>> (C) The page has been charged before (2) and reuse_swap_page() doesn't
>>       call delete_from_swap_cache().
>>
>> In this case, __mem_cgroup_commit_charge_swapin() may uncharge memsw twice.
>> See below two uncharge place:
>>
>> __mem_cgroup_commit_charge_swapin {
>>       => __mem_cgroup_commit_charge_lrucare
>>               => __mem_cgroup_commit_charge()    <== PageCgroupUsed
>>                       => __mem_cgroup_cancel_charge()
>>                                               <== 1.uncharge memsw here
>>
>>       if (do_swap_account && PageSwapCache(page)) {
>>               if (swap_memcg) {
> IIRC, if the page(swapcache) has been already charged as memory, swap_cgroup_record(ent, 0)
> returns 0, so swap_memcg is NULL.
>

Got it, sorry for my noise.
Thank you for all.

> Thanks,
> Daisuke Nishimura.
>
>>                       if (!mem_cgroup_is_root(swap_memcg))
>>                               res_counter_uncharge(&swap_memcg->memsw,
>>                                               PAGE_SIZE);
>>                                               <== 2.uncharged memsw again here
>>
>>                       mem_cgroup_swap_statistics(swap_memcg, false);
>>                       mem_cgroup_put(swap_memcg);
>>               }
>>       }
>> }
>>
>> This patch added a return val for __mem_cgroup_commit_charge(), if canceled then
>> don't uncharge memsw again.
>>
>> But i didn't find a definite testcase can confirm this situaction current.
>> Maybe i missed something. Welcome point.
>>
>> Signed-off-by: Bob Liu <lliubbo@gmail.com>
>> ---
>>  mm/memcontrol.c |   56 +++++++++++++++++++++++++++++++-----------------------
>>  1 files changed, 32 insertions(+), 24 deletions(-)
>>
>> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
>> index bc396e7..6ead0cd 100644
>> --- a/mm/memcontrol.c
>> +++ b/mm/memcontrol.c
>> @@ -2416,7 +2416,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
>>       return memcg;
>>  }
>>
>> -static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>> +/*
>> + * return -1 if cancel charge else return 0
>> + */
>> +static int __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>>                                      struct page *page,
>>                                      unsigned int nr_pages,
>>                                      struct page_cgroup *pc,
>> @@ -2426,7 +2429,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>>       if (unlikely(PageCgroupUsed(pc))) {
>>               unlock_page_cgroup(pc);
>>               __mem_cgroup_cancel_charge(memcg, nr_pages);
>> -             return;
>> +             return -1;
>>       }
>>       /*
>>        * we don't need page_cgroup_lock about tail pages, becase they are not
>> @@ -2463,6 +2466,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
>>        * if they exceeds softlimit.
>>        */
>>       memcg_check_events(memcg, page);
>> +     return 0;
>>  }
>>
>>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> @@ -2690,20 +2694,21 @@ static void
>>  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
>>                                       enum charge_type ctype);
>>
>> -static void
>> +static int
>>  __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
>>                                       enum charge_type ctype)
>>  {
>>       struct page_cgroup *pc = lookup_page_cgroup(page);
>> +     int ret;
>>       /*
>>        * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
>>        * is already on LRU. It means the page may on some other page_cgroup's
>>        * LRU. Take care of it.
>>        */
>>       mem_cgroup_lru_del_before_commit(page);
>> -     __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
>> +     ret = __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
>>       mem_cgroup_lru_add_after_commit(page);
>> -     return;
>> +     return ret;
>>  }
>>
>>  int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
>> @@ -2792,13 +2797,14 @@ static void
>>  __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
>>                                       enum charge_type ctype)
>>  {
>> +     int ret;
>>       if (mem_cgroup_disabled())
>>               return;
>>       if (!memcg)
>>               return;
>>       cgroup_exclude_rmdir(&memcg->css);
>>
>> -     __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
>> +     ret = __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
>>       /*
>>        * Now swap is on-memory. This means this page may be
>>        * counted both as mem and swap....double count.
>> @@ -2807,25 +2813,27 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
>>        * may call delete_from_swap_cache() before reach here.
>>        */
>>       if (do_swap_account && PageSwapCache(page)) {
>> -             swp_entry_t ent = {.val = page_private(page)};
>> -             struct mem_cgroup *swap_memcg;
>> -             unsigned short id;
>> +             if(!ret) {
>> +                     swp_entry_t ent = {.val = page_private(page)};
>> +                     struct mem_cgroup *swap_memcg;
>> +                     unsigned short id;
>>
>> -             id = swap_cgroup_record(ent, 0);
>> -             rcu_read_lock();
>> -             swap_memcg = mem_cgroup_lookup(id);
>> -             if (swap_memcg) {
>> -                     /*
>> -                      * This recorded memcg can be obsolete one. So, avoid
>> -                      * calling css_tryget
>> -                      */
>> -                     if (!mem_cgroup_is_root(swap_memcg))
>> -                             res_counter_uncharge(&swap_memcg->memsw,
>> -                                                  PAGE_SIZE);
>> -                     mem_cgroup_swap_statistics(swap_memcg, false);
>> -                     mem_cgroup_put(swap_memcg);
>> +                     id = swap_cgroup_record(ent, 0);
>> +                     rcu_read_lock();
>> +                     swap_memcg = mem_cgroup_lookup(id);
>> +                     if (swap_memcg) {
>> +                             /*
>> +                              * This recorded memcg can be obsolete one. So, avoid
>> +                              * calling css_tryget
>> +                              */
>> +                             if (!mem_cgroup_is_root(swap_memcg))
>> +                                     res_counter_uncharge(&swap_memcg->memsw,
>> +                                                     PAGE_SIZE);
>> +                             mem_cgroup_swap_statistics(swap_memcg, false);
>> +                             mem_cgroup_put(swap_memcg);
>> +                     }
>> +                     rcu_read_unlock();
>>               }
>> -             rcu_read_unlock();
>>       }
>>       /*
>>        * At swapin, we may charge account against cgroup which has no tasks.
>> --
>> 1.7.0.4
>>
>>

-- 
Regards,
--Bob

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2011-12-14  2:08 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-12-13  7:55 [RFC][BUGFIX] memcg: fix memsw uncharged twice in do_swap_page Bob Liu
2011-12-13  8:02 ` KAMEZAWA Hiroyuki
2011-12-13  8:10 ` Daisuke Nishimura
2011-12-14  2:08   ` Bob Liu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox