linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* Re: [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix
       [not found] <058201d06de5$9e15edc0$da41c940$@alibaba-inc.com>
@ 2015-04-03  8:14 ` Hillf Danton
  0 siblings, 0 replies; 4+ messages in thread
From: Hillf Danton @ 2015-04-03  8:14 UTC (permalink / raw)
  To: David Rientjes; +Cc: linux-mm, linux-kernel, Andrew Morton

> 
> "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm
> introduces a formal to pass the gfp mask for khugepaged's hugepage
> allocation.  This is just too ugly to live.
> 
> alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by
> anything in GFP_RECLAIM_MASK, which is the only thing that matters for
> memcg reclaim, so just determine the gfp flags once in
> collapse_huge_page() and avoid the complexity.
> 
> Signed-off-by: David Rientjes <rientjes@google.com>
> ---
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>

>  -mm: intended to be folded into
>       mm-memcg-sync-allocation-and-memcg-charge-gfp-flags-for-thp.patch
> 
>  mm/huge_memory.c | 21 ++++++++-------------
>  1 file changed, 8 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2373,16 +2373,12 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
>  }
> 
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>  		       struct vm_area_struct *vma, unsigned long address,
>  		       int node)
>  {
>  	VM_BUG_ON_PAGE(*hpage, *hpage);
> 
> -	/* Only allocate from the target node */
> -	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> -	        __GFP_THISNODE;
> -
>  	/*
>  	 * Before allocating the hugepage, release the mmap_sem read lock.
>  	 * The allocation can take potentially a long time if it involves
> @@ -2391,7 +2387,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
>  	 */
>  	up_read(&mm->mmap_sem);
> 
> -	*hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER);
> +	*hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
>  	if (unlikely(!*hpage)) {
>  		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>  		*hpage = ERR_PTR(-ENOMEM);
> @@ -2445,18 +2441,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
>  }
> 
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>  		       struct vm_area_struct *vma, unsigned long address,
>  		       int node)
>  {
>  	up_read(&mm->mmap_sem);
>  	VM_BUG_ON(!*hpage);
> 
> -	/*
> -	 * khugepaged_alloc_hugepage is doing the preallocation, use the same
> -	 * gfp flags here.
> -	 */
> -	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0);
>  	return  *hpage;
>  }
>  #endif
> @@ -2495,8 +2486,12 @@ static void collapse_huge_page(struct mm_struct *mm,
> 
>  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> 
> +	/* Only allocate from the target node */
> +	gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> +		__GFP_THISNODE;
> +
>  	/* release the mmap_sem read lock. */
> -	new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node);
> +	new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
>  	if (!new_page)
>  		return;
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
> 
> 


--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix
  2015-04-03  1:41             ` [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix David Rientjes
  2015-04-03  8:38               ` Vlastimil Babka
@ 2015-04-03 10:50               ` Michal Hocko
  1 sibling, 0 replies; 4+ messages in thread
From: Michal Hocko @ 2015-04-03 10:50 UTC (permalink / raw)
  To: David Rientjes
  Cc: Andrew Morton, Vlastimil Babka, Johannes Weiner, linux-mm, linux-kernel

On Thu 02-04-15 18:41:18, David Rientjes wrote:
> "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm 
> introduces a formal to pass the gfp mask for khugepaged's hugepage 
> allocation.  This is just too ugly to live.
> 
> alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by 
> anything in GFP_RECLAIM_MASK, which is the only thing that matters for 
> memcg reclaim, so just determine the gfp flags once in 
> collapse_huge_page() and avoid the complexity.
> 
> Signed-off-by: David Rientjes <rientjes@google.com>

Thanks for this cleanup!

Acked-by: Michal Hocko <mhocko@suse.cz>
> ---
>  -mm: intended to be folded into
>       mm-memcg-sync-allocation-and-memcg-charge-gfp-flags-for-thp.patch
> 
>  mm/huge_memory.c | 21 ++++++++-------------
>  1 file changed, 8 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2373,16 +2373,12 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
>  }
>  
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>  		       struct vm_area_struct *vma, unsigned long address,
>  		       int node)
>  {
>  	VM_BUG_ON_PAGE(*hpage, *hpage);
>  
> -	/* Only allocate from the target node */
> -	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> -	        __GFP_THISNODE;
> -
>  	/*
>  	 * Before allocating the hugepage, release the mmap_sem read lock.
>  	 * The allocation can take potentially a long time if it involves
> @@ -2391,7 +2387,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
>  	 */
>  	up_read(&mm->mmap_sem);
>  
> -	*hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER);
> +	*hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
>  	if (unlikely(!*hpage)) {
>  		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>  		*hpage = ERR_PTR(-ENOMEM);
> @@ -2445,18 +2441,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
>  }
>  
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>  		       struct vm_area_struct *vma, unsigned long address,
>  		       int node)
>  {
>  	up_read(&mm->mmap_sem);
>  	VM_BUG_ON(!*hpage);
>  
> -	/*
> -	 * khugepaged_alloc_hugepage is doing the preallocation, use the same
> -	 * gfp flags here.
> -	 */
> -	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0);
>  	return  *hpage;
>  }
>  #endif
> @@ -2495,8 +2486,12 @@ static void collapse_huge_page(struct mm_struct *mm,
>  
>  	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
>  
> +	/* Only allocate from the target node */
> +	gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> +		__GFP_THISNODE;
> +
>  	/* release the mmap_sem read lock. */
> -	new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node);
> +	new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
>  	if (!new_page)
>  		return;
>  
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix
  2015-04-03  1:41             ` [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix David Rientjes
@ 2015-04-03  8:38               ` Vlastimil Babka
  2015-04-03 10:50               ` Michal Hocko
  1 sibling, 0 replies; 4+ messages in thread
From: Vlastimil Babka @ 2015-04-03  8:38 UTC (permalink / raw)
  To: David Rientjes, Andrew Morton
  Cc: Michal Hocko, Johannes Weiner, linux-mm, linux-kernel

On 04/03/2015 03:41 AM, David Rientjes wrote:
> "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm
> introduces a formal to pass the gfp mask for khugepaged's hugepage
> allocation.  This is just too ugly to live.
>
> alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by
> anything in GFP_RECLAIM_MASK, which is the only thing that matters for
> memcg reclaim, so just determine the gfp flags once in
> collapse_huge_page() and avoid the complexity.
>
> Signed-off-by: David Rientjes <rientjes@google.com>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix
  2015-03-18 16:14           ` [PATCH -v2] " Michal Hocko
@ 2015-04-03  1:41             ` David Rientjes
  2015-04-03  8:38               ` Vlastimil Babka
  2015-04-03 10:50               ` Michal Hocko
  0 siblings, 2 replies; 4+ messages in thread
From: David Rientjes @ 2015-04-03  1:41 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Michal Hocko, Vlastimil Babka, Johannes Weiner, linux-mm, linux-kernel

"mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm 
introduces a formal to pass the gfp mask for khugepaged's hugepage 
allocation.  This is just too ugly to live.

alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by 
anything in GFP_RECLAIM_MASK, which is the only thing that matters for 
memcg reclaim, so just determine the gfp flags once in 
collapse_huge_page() and avoid the complexity.

Signed-off-by: David Rientjes <rientjes@google.com>
---
 -mm: intended to be folded into
      mm-memcg-sync-allocation-and-memcg-charge-gfp-flags-for-thp.patch

 mm/huge_memory.c | 21 ++++++++-------------
 1 file changed, 8 insertions(+), 13 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2373,16 +2373,12 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 }
 
 static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
 		       struct vm_area_struct *vma, unsigned long address,
 		       int node)
 {
 	VM_BUG_ON_PAGE(*hpage, *hpage);
 
-	/* Only allocate from the target node */
-	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
-	        __GFP_THISNODE;
-
 	/*
 	 * Before allocating the hugepage, release the mmap_sem read lock.
 	 * The allocation can take potentially a long time if it involves
@@ -2391,7 +2387,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
 	 */
 	up_read(&mm->mmap_sem);
 
-	*hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER);
+	*hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
 	if (unlikely(!*hpage)) {
 		count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
 		*hpage = ERR_PTR(-ENOMEM);
@@ -2445,18 +2441,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 }
 
 static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
 		       struct vm_area_struct *vma, unsigned long address,
 		       int node)
 {
 	up_read(&mm->mmap_sem);
 	VM_BUG_ON(!*hpage);
 
-	/*
-	 * khugepaged_alloc_hugepage is doing the preallocation, use the same
-	 * gfp flags here.
-	 */
-	*gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0);
 	return  *hpage;
 }
 #endif
@@ -2495,8 +2486,12 @@ static void collapse_huge_page(struct mm_struct *mm,
 
 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
+	/* Only allocate from the target node */
+	gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
+		__GFP_THISNODE;
+
 	/* release the mmap_sem read lock. */
-	new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node);
+	new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
 	if (!new_page)
 		return;
 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-04-03 10:50 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <058201d06de5$9e15edc0$da41c940$@alibaba-inc.com>
2015-04-03  8:14 ` [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix Hillf Danton
2015-03-16 14:08 [PATCH] mm, memcg: sync allocation and memcg charge gfp flags for THP Michal Hocko
2015-03-18 14:34 ` Vlastimil Babka
2015-03-18 15:02   ` Michal Hocko
2015-03-18 15:40     ` Vlastimil Babka
2015-03-18 15:59       ` Michal Hocko
2015-03-18 16:09         ` Vlastimil Babka
2015-03-18 16:14           ` [PATCH -v2] " Michal Hocko
2015-04-03  1:41             ` [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix David Rientjes
2015-04-03  8:38               ` Vlastimil Babka
2015-04-03 10:50               ` Michal Hocko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox