linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Yin, Fengwei" <fengwei.yin@intel.com>
To: David Hildenbrand <david@redhat.com>, <linux-kernel@vger.kernel.org>
Cc: <linux-mm@kvack.org>, Andrew Morton <akpm@linux-foundation.org>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Hugh Dickins <hughd@google.com>,
	Ryan Roberts <ryan.roberts@arm.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Muchun Song <muchun.song@linux.dev>, Peter Xu <peterx@redhat.com>
Subject: Re: [PATCH v1 14/39] mm/rmap: introduce folio_add_anon_rmap_[pte|ptes|pmd]()
Date: Fri, 15 Dec 2023 10:26:32 +0800	[thread overview]
Message-ID: <941c2269-98f0-43a7-a516-cb5b4909ae7f@intel.com> (raw)
In-Reply-To: <20231211155652.131054-15-david@redhat.com>



On 12/11/2023 11:56 PM, David Hildenbrand wrote:
> Let's mimic what we did with folio_add_file_rmap_*() so we can similarly
> replace page_add_anon_rmap() next.
> 
> Make the compiler always special-case on the granularity by using
> __always_inline.
> 
> Note that the new functions ignore the RMAP_COMPOUND flag, which we will
> remove as soon as page_add_anon_rmap() is gone.
> 
> Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>

With a small question below.

> ---
>   include/linux/rmap.h |   6 +++
>   mm/rmap.c            | 118 ++++++++++++++++++++++++++++++-------------
>   2 files changed, 88 insertions(+), 36 deletions(-)
> 
> diff --git a/include/linux/rmap.h b/include/linux/rmap.h
> index 7198905dc8be..3b5357cb1c09 100644
> --- a/include/linux/rmap.h
> +++ b/include/linux/rmap.h
> @@ -234,6 +234,12 @@ static inline void __folio_rmap_sanity_checks(struct folio *folio,
>    * rmap interfaces called when adding or removing pte of page
>    */
>   void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
> +void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
> +		struct vm_area_struct *, unsigned long address, rmap_t flags);
> +#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
> +	folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
> +void folio_add_anon_rmap_pmd(struct folio *, struct page *,
> +		struct vm_area_struct *, unsigned long address, rmap_t flags);
>   void page_add_anon_rmap(struct page *, struct vm_area_struct *,
>   		unsigned long address, rmap_t flags);
>   void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
> diff --git a/mm/rmap.c b/mm/rmap.c
> index c5761986a411..7787499fa2ad 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1300,38 +1300,20 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>   		unsigned long address, rmap_t flags)
>   {
>   	struct folio *folio = page_folio(page);
> -	atomic_t *mapped = &folio->_nr_pages_mapped;
> -	int nr = 0, nr_pmdmapped = 0;
> -	bool compound = flags & RMAP_COMPOUND;
> -	bool first;
>   
> -	/* Is page being mapped by PTE? Is this its first map to be added? */
> -	if (likely(!compound)) {
> -		first = atomic_inc_and_test(&page->_mapcount);
> -		nr = first;
> -		if (first && folio_test_large(folio)) {
> -			nr = atomic_inc_return_relaxed(mapped);
> -			nr = (nr < COMPOUND_MAPPED);
> -		}
> -	} else if (folio_test_pmd_mappable(folio)) {
> -		/* That test is redundant: it's for safety or to optimize out */
> +	if (likely(!(flags & RMAP_COMPOUND)))
> +		folio_add_anon_rmap_pte(folio, page, vma, address, flags);
> +	else
> +		folio_add_anon_rmap_pmd(folio, page, vma, address, flags);
> +}
>   
> -		first = atomic_inc_and_test(&folio->_entire_mapcount);
> -		if (first) {
> -			nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
> -			if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
> -				nr_pmdmapped = folio_nr_pages(folio);
> -				nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
> -				/* Raced ahead of a remove and another add? */
> -				if (unlikely(nr < 0))
> -					nr = 0;
> -			} else {
> -				/* Raced ahead of a remove of COMPOUND_MAPPED */
> -				nr = 0;
> -			}
> -		}
> -	}
> +static __always_inline void __folio_add_anon_rmap(struct folio *folio,
> +		struct page *page, int nr_pages, struct vm_area_struct *vma,
> +		unsigned long address, rmap_t flags, enum rmap_mode mode)
> +{
> +	unsigned int i, nr, nr_pmdmapped = 0;
>   
> +	nr = __folio_add_rmap(folio, page, nr_pages, mode, &nr_pmdmapped);
>   	if (nr_pmdmapped)
>   		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
>   	if (nr)
> @@ -1345,18 +1327,34 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>   		 * folio->index right when not given the address of the head
>   		 * page.
>   		 */
> -		VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio);
> +		VM_WARN_ON_FOLIO(folio_test_large(folio) &&
> +				 mode != RMAP_MODE_PMD, folio);
>   		__folio_set_anon(folio, vma, address,
>   				 !!(flags & RMAP_EXCLUSIVE));
>   	} else if (likely(!folio_test_ksm(folio))) {
>   		__page_check_anon_rmap(folio, page, vma, address);
>   	}
> -	if (flags & RMAP_EXCLUSIVE)
> -		SetPageAnonExclusive(page);
> -	/* While PTE-mapping a THP we have a PMD and a PTE mapping. */
> -	VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
> -			  (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
> -			 PageAnonExclusive(page), folio);
> +
> +	if (flags & RMAP_EXCLUSIVE) {
> +		switch (mode) {
> +		case RMAP_MODE_PTE:
> +			for (i = 0; i < nr_pages; i++)
> +				SetPageAnonExclusive(page + i);
> +			break;
> +		case RMAP_MODE_PMD:
> +			SetPageAnonExclusive(page);
> +			break;
> +		}
> +	}
> +	for (i = 0; i < nr_pages; i++) {
> +		struct page *cur_page = page + i;
> +
> +		/* While PTE-mapping a THP we have a PMD and a PTE mapping. */
> +		VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 ||
> +				  (folio_test_large(folio) &&
> +				   folio_entire_mapcount(folio) > 1)) &&
> +				 PageAnonExclusive(cur_page), folio);
> +	}
This change will iterate all pages for PMD case. The original behavior
didn't check all pages. Is this change by purpose? Thanks.

>   
>   	/*
>   	 * For large folio, only mlock it if it's fully mapped to VMA. It's
> @@ -1368,6 +1366,54 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>   		mlock_vma_folio(folio, vma);
>   }
>   
> +/**
> + * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio
> + * @folio:	The folio to add the mappings to
> + * @page:	The first page to add
> + * @nr_pages:	The number of pages which will be mapped
> + * @vma:	The vm area in which the mappings are added
> + * @address:	The user virtual address of the first page to map
> + * @flags:	The rmap flags
> + *
> + * The page range of folio is defined by [first_page, first_page + nr_pages)
> + *
> + * The caller needs to hold the page table lock, and the page must be locked in
> + * the anon_vma case: to serialize mapping,index checking after setting,
> + * and to ensure that an anon folio is not being upgraded racily to a KSM folio
> + * (but KSM folios are never downgraded).
> + */
> +void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page,
> +		int nr_pages, struct vm_area_struct *vma, unsigned long address,
> +		rmap_t flags)
> +{
> +	__folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags,
> +			      RMAP_MODE_PTE);
> +}
> +
> +/**
> + * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio
> + * @folio:	The folio to add the mapping to
> + * @page:	The first page to add
> + * @vma:	The vm area in which the mapping is added
> + * @address:	The user virtual address of the first page to map
> + * @flags:	The rmap flags
> + *
> + * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR)
> + *
> + * The caller needs to hold the page table lock, and the page must be locked in
> + * the anon_vma case: to serialize mapping,index checking after setting.
> + */
> +void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page,
> +		struct vm_area_struct *vma, unsigned long address, rmap_t flags)
> +{
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	__folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags,
> +			      RMAP_MODE_PMD);
> +#else
> +	WARN_ON_ONCE(true);
> +#endif
> +}
> +
>   /**
>    * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
>    * @folio:	The folio to add the mapping to.


  reply	other threads:[~2023-12-15  2:26 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-11 15:56 [PATCH v1 00/39] mm/rmap: interface overhaul David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 01/39] mm/rmap: rename hugepage_add* to hugetlb_add* David Hildenbrand
2023-12-11 16:14   ` Ryan Roberts
2023-12-11 16:24   ` Matthew Wilcox
2023-12-11 15:56 ` [PATCH v1 02/39] mm/rmap: introduce and use hugetlb_remove_rmap() David Hildenbrand
2023-12-11 16:15   ` Ryan Roberts
2023-12-11 16:33   ` Matthew Wilcox
2023-12-11 16:35     ` David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 03/39] mm/rmap: introduce and use hugetlb_add_file_rmap() David Hildenbrand
2023-12-11 16:17   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 04/39] mm/rmap: introduce and use hugetlb_try_dup_anon_rmap() David Hildenbrand
2023-12-11 16:25   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 05/39] mm/rmap: introduce and use hugetlb_try_share_anon_rmap() David Hildenbrand
2023-12-11 16:29   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 06/39] mm/rmap: add hugetlb sanity checks David Hildenbrand
2023-12-11 16:29   ` Ryan Roberts
2023-12-13  9:03   ` David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 07/39] mm/rmap: convert folio_add_file_rmap_range() into folio_add_file_rmap_[pte|ptes|pmd]() David Hildenbrand
2023-12-13  5:33   ` Yin Fengwei
2023-12-13  8:47     ` David Hildenbrand
2023-12-18 15:48   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 08/39] mm/memory: page_add_file_rmap() -> folio_add_file_rmap_[pte|pmd]() David Hildenbrand
2023-12-18 15:56   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 09/39] mm/huge_memory: page_add_file_rmap() -> folio_add_file_rmap_pmd() David Hildenbrand
2023-12-18 15:58   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 10/39] mm/migrate: page_add_file_rmap() -> folio_add_file_rmap_pte() David Hildenbrand
2023-12-18 15:58   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 11/39] mm/userfaultfd: " David Hildenbrand
2023-12-18 15:59   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 12/39] mm/rmap: remove page_add_file_rmap() David Hildenbrand
2023-12-18 16:00   ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 13/39] mm/rmap: factor out adding folio mappings into __folio_add_rmap() David Hildenbrand
2023-12-18 16:07   ` Ryan Roberts
2023-12-18 17:06     ` David Hildenbrand
2023-12-19  8:40       ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 14/39] mm/rmap: introduce folio_add_anon_rmap_[pte|ptes|pmd]() David Hildenbrand
2023-12-15  2:26   ` Yin, Fengwei [this message]
2023-12-15 15:16     ` David Hildenbrand
2023-12-18 16:26   ` Ryan Roberts
2023-12-18 17:02     ` David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 15/39] mm/huge_memory: batch rmap operations in __split_huge_pmd_locked() David Hildenbrand
2023-12-15  2:27   ` Yin, Fengwei
2023-12-15  2:39   ` Yin, Fengwei
2023-12-18 16:22   ` Ryan Roberts
2023-12-18 17:03     ` David Hildenbrand
2023-12-19  8:42       ` Ryan Roberts
2023-12-11 15:56 ` [PATCH v1 16/39] mm/huge_memory: page_add_anon_rmap() -> folio_add_anon_rmap_pmd() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 17/39] mm/migrate: page_add_anon_rmap() -> folio_add_anon_rmap_pte() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 18/39] mm/ksm: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 19/39] mm/swapfile: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 20/39] mm/memory: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 21/39] mm/rmap: remove page_add_anon_rmap() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 22/39] mm/rmap: remove RMAP_COMPOUND David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 23/39] mm/rmap: introduce folio_remove_rmap_[pte|ptes|pmd]() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 24/39] kernel/events/uprobes: page_remove_rmap() -> folio_remove_rmap_pte() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 25/39] mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 26/39] mm/khugepaged: page_remove_rmap() -> folio_remove_rmap_pte() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 27/39] mm/ksm: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 28/39] mm/memory: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 29/39] mm/migrate_device: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 30/39] mm/rmap: " David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 31/39] Documentation: stop referring to page_remove_rmap() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 32/39] mm/rmap: remove page_remove_rmap() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 33/39] mm/rmap: convert page_dup_file_rmap() to folio_dup_file_rmap_[pte|ptes|pmd]() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 34/39] mm/rmap: introduce folio_try_dup_anon_rmap_[pte|ptes|pmd]() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 35/39] mm/huge_memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pmd() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 36/39] mm/memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pte() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 37/39] mm/rmap: remove page_try_dup_anon_rmap() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 38/39] mm: convert page_try_share_anon_rmap() to folio_try_share_anon_rmap_[pte|pmd]() David Hildenbrand
2023-12-11 15:56 ` [PATCH v1 39/39] mm/rmap: rename COMPOUND_MAPPED to ENTIRELY_MAPPED David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=941c2269-98f0-43a7-a516-cb5b4909ae7f@intel.com \
    --to=fengwei.yin@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mike.kravetz@oracle.com \
    --cc=muchun.song@linux.dev \
    --cc=peterx@redhat.com \
    --cc=ryan.roberts@arm.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox