linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Baolin Wang <baolin.wang@linux.alibaba.com>
To: Lance Yang <ioworker0@gmail.com>, akpm@linux-foundation.org
Cc: willy@infradead.org, sj@kernel.org, maskray@google.com,
	ziy@nvidia.com, ryan.roberts@arm.com, david@redhat.com,
	21cnbao@gmail.com, mhocko@suse.com, fengwei.yin@intel.com,
	zokeefe@google.com, shy828301@gmail.com, xiehuan09@gmail.com,
	libang.li@antgroup.com, wangkefeng.wang@huawei.com,
	songmuchun@bytedance.com, peterx@redhat.com, minchan@kernel.org,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 2/3] mm/rmap: integrate PMD-mapped folio splitting into pagewalk loop
Date: Tue, 7 May 2024 11:40:15 +0800	[thread overview]
Message-ID: <cc9fd23f-7d87-48a7-a737-acbea8e95fb7@linux.alibaba.com> (raw)
In-Reply-To: <20240501042700.83974-3-ioworker0@gmail.com>



On 2024/5/1 12:26, Lance Yang wrote:
> In preparation for supporting try_to_unmap_one() to unmap PMD-mapped
> folios, start the pagewalk first, then call split_huge_pmd_address()
> to split the folio.
> 
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Lance Yang <ioworker0@gmail.com>
> ---
>   include/linux/huge_mm.h | 20 ++++++++++++++++++++
>   mm/huge_memory.c        | 42 +++++++++++++++++++++--------------------
>   mm/rmap.c               | 24 +++++++++++++++++------
>   3 files changed, 60 insertions(+), 26 deletions(-)
> 
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index c8d3ec116e29..38c4b5537715 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -409,6 +409,20 @@ static inline bool thp_migration_supported(void)
>   	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
>   }
>   
> +void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
> +			   pmd_t *pmd, bool freeze, struct folio *folio);
> +
> +static inline void align_huge_pmd_range(struct vm_area_struct *vma,
> +					unsigned long *start,
> +					unsigned long *end)
> +{
> +	*start = ALIGN(*start, HPAGE_PMD_SIZE);
> +	*end = ALIGN_DOWN(*end, HPAGE_PMD_SIZE);
> +
> +	VM_WARN_ON_ONCE(vma->vm_start > *start);
> +	VM_WARN_ON_ONCE(vma->vm_end < *end);
> +}
> +
>   #else /* CONFIG_TRANSPARENT_HUGEPAGE */
>   
>   static inline bool folio_test_pmd_mappable(struct folio *folio)
> @@ -471,6 +485,12 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>   		unsigned long address, bool freeze, struct folio *folio) {}
>   static inline void split_huge_pmd_address(struct vm_area_struct *vma,
>   		unsigned long address, bool freeze, struct folio *folio) {}
> +static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
> +					 unsigned long address, pmd_t *pmd,
> +					 bool freeze, struct folio *folio) {}
> +static inline void align_huge_pmd_range(struct vm_area_struct *vma,
> +					unsigned long *start,
> +					unsigned long *end) {}
>   
>   #define split_huge_pud(__vma, __pmd, __address)	\
>   	do { } while (0)
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 8261b5669397..145505a1dd05 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2584,6 +2584,27 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
>   	pmd_populate(mm, pmd, pgtable);
>   }
>   
> +void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
> +			   pmd_t *pmd, bool freeze, struct folio *folio)
> +{
> +	VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
> +	VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
> +	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
> +	VM_BUG_ON(freeze && !folio);
> +
> +	/*
> +	 * When the caller requests to set up a migration entry, we
> +	 * require a folio to check the PMD against. Otherwise, there
> +	 * is a risk of replacing the wrong folio.
> +	 */
> +	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
> +	    is_pmd_migration_entry(*pmd)) {
> +		if (folio && folio != pmd_folio(*pmd))
> +			return;
> +		__split_huge_pmd_locked(vma, pmd, address, freeze);
> +	}
> +}
> +
>   void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>   		unsigned long address, bool freeze, struct folio *folio)
>   {
> @@ -2595,26 +2616,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>   				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
>   	mmu_notifier_invalidate_range_start(&range);
>   	ptl = pmd_lock(vma->vm_mm, pmd);
> -
> -	/*
> -	 * If caller asks to setup a migration entry, we need a folio to check
> -	 * pmd against. Otherwise we can end up replacing wrong folio.
> -	 */
> -	VM_BUG_ON(freeze && !folio);
> -	VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
> -
> -	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
> -	    is_pmd_migration_entry(*pmd)) {
> -		/*
> -		 * It's safe to call pmd_page when folio is set because it's
> -		 * guaranteed that pmd is present.
> -		 */
> -		if (folio && folio != pmd_folio(*pmd))
> -			goto out;
> -		__split_huge_pmd_locked(vma, pmd, range.start, freeze);
> -	}
> -
> -out:
> +	split_huge_pmd_locked(vma, range.start, pmd, freeze, folio);
>   	spin_unlock(ptl);
>   	mmu_notifier_invalidate_range_end(&range);
>   }
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 7e2575d669a9..432601154583 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1636,9 +1636,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   	if (flags & TTU_SYNC)
>   		pvmw.flags = PVMW_SYNC;
>   
> -	if (flags & TTU_SPLIT_HUGE_PMD)
> -		split_huge_pmd_address(vma, address, false, folio);
> -
>   	/*
>   	 * For THP, we have to assume the worse case ie pmd for invalidation.
>   	 * For hugetlb, it could be much worse if we need to do pud
> @@ -1650,6 +1647,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   	range.end = vma_address_end(&pvmw);
>   	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
>   				address, range.end);
> +	if (flags & TTU_SPLIT_HUGE_PMD)
> +		align_huge_pmd_range(vma, &range.start, &range.end);

I am not sure why need this alignment?
(1) For a partially mapped THP, 'range.start' and 'range.end' can beyond 
the VMA limits. For a PMD mapped THP, I think the address is already THP 
size alignment returned from vma_address(&folio->page, vma).
(2) The range.end is not used.

>   	if (folio_test_hugetlb(folio)) {
>   		/*
>   		 * If sharing is possible, start and end will be adjusted
> @@ -1664,9 +1663,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   	mmu_notifier_invalidate_range_start(&range);
>   
>   	while (page_vma_mapped_walk(&pvmw)) {
> -		/* Unexpected PMD-mapped THP? */
> -		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
> -
>   		/*
>   		 * If the folio is in an mlock()d vma, we must not swap it out.
>   		 */
> @@ -1678,6 +1674,22 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>   			goto walk_done_err;
>   		}
>   
> +		if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> +			/*
> +			 * We temporarily have to drop the PTL and start once
> +			 * again from that now-PTE-mapped page table.
> +			 */
> +			split_huge_pmd_locked(vma, range.start, pvmw.pmd, false,
> +					      folio);
> +			pvmw.pmd = NULL;
> +			spin_unlock(pvmw.ptl);

IMO, you should also make the 'pvmw.ptl = NULL;' after unlocking as 
page_vma_mapped_walk() did, in case some corner case met.

> +			flags &= ~TTU_SPLIT_HUGE_PMD;
> +			continue;
> +		}
> +
> +		/* Unexpected PMD-mapped THP? */
> +		VM_BUG_ON_FOLIO(!pvmw.pte, folio);
> +
>   		pfn = pte_pfn(ptep_get(pvmw.pte));
>   		subpage = folio_page(folio, pfn - folio_pfn(folio));
>   		address = pvmw.address;


  reply	other threads:[~2024-05-07  3:40 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-01  4:26 [PATCH v4 0/3] Reclaim lazyfree THP without splitting Lance Yang
2024-05-01  4:26 ` [PATCH v4 1/3] mm/rmap: remove duplicated exit code in pagewalk loop Lance Yang
2024-05-07 14:51   ` Zi Yan
2024-05-07 14:53     ` Lance Yang
2024-05-01  4:26 ` [PATCH v4 2/3] mm/rmap: integrate PMD-mapped folio splitting into " Lance Yang
2024-05-07  3:40   ` Baolin Wang [this message]
2024-05-07  4:37     ` Lance Yang
2024-05-07  8:17       ` David Hildenbrand
2024-05-07  8:38         ` Lance Yang
2024-05-07 17:22           ` Andrew Morton
2024-05-07 17:33             ` David Hildenbrand
2024-05-07 17:38               ` Andrew Morton
2024-05-07 18:30                 ` David Hildenbrand
2024-05-07 15:26   ` Zi Yan
2024-05-08  5:43     ` Lance Yang
2024-05-08 14:07       ` Zi Yan
2024-05-08 14:35         ` Lance Yang
2024-05-08 14:48           ` Zi Yan
2024-05-08 14:56             ` Zi Yan
2024-05-08 15:52               ` Jason Gunthorpe
2024-05-08 16:22                 ` Zi Yan
2024-05-08 16:35                   ` Jason Gunthorpe
2024-05-09  8:21                     ` Lance Yang
2024-05-09 14:53                       ` Zi Yan
2024-05-09  8:56                     ` Lance Yang
2024-05-01  4:27 ` [PATCH v4 3/3] mm/vmscan: avoid split lazyfree THP during shrink_folio_list() Lance Yang
2024-05-07  4:00   ` Baolin Wang
2024-05-07  6:32     ` Lance Yang
2024-05-07  8:26       ` Lance Yang
2024-05-07  9:33         ` Baolin Wang
2024-05-07 11:37           ` Lance Yang
2024-05-09  9:36             ` Baolin Wang
2024-05-09 12:17               ` Lance Yang
2024-05-07 16:20   ` Zi Yan
2024-05-08  5:14     ` Lance Yang
2024-05-01 16:08 ` [PATCH v4 0/3] Reclaim lazyfree THP without splitting SeongJae Park
2024-05-02  0:30   ` Lance Yang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cc9fd23f-7d87-48a7-a737-acbea8e95fb7@linux.alibaba.com \
    --to=baolin.wang@linux.alibaba.com \
    --cc=21cnbao@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=fengwei.yin@intel.com \
    --cc=ioworker0@gmail.com \
    --cc=libang.li@antgroup.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maskray@google.com \
    --cc=mhocko@suse.com \
    --cc=minchan@kernel.org \
    --cc=peterx@redhat.com \
    --cc=ryan.roberts@arm.com \
    --cc=shy828301@gmail.com \
    --cc=sj@kernel.org \
    --cc=songmuchun@bytedance.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=willy@infradead.org \
    --cc=xiehuan09@gmail.com \
    --cc=ziy@nvidia.com \
    --cc=zokeefe@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox