linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Huang, Ying" <ying.huang@intel.com>
To: Barry Song <21cnbao@gmail.com>
Cc: akpm@linux-foundation.org,  linux-mm@kvack.org,
	baolin.wang@linux.alibaba.com,  chrisl@kernel.org,
	 david@redhat.com, hanchuanhua@oppo.com,  hannes@cmpxchg.org,
	 hughd@google.com, kasong@tencent.com,
	 linux-kernel@vger.kernel.org,  ryan.roberts@arm.com,
	surenb@google.com,  v-songbaohua@oppo.com,  willy@infradead.org,
	xiang@kernel.org,  yosryahmed@google.com,  yuzhao@google.com,
	ziy@nvidia.com
Subject: Re: [PATCH v4 6/6] mm: swap: entirely map large folios found in swapcache
Date: Thu, 09 May 2024 15:44:25 +0800	[thread overview]
Message-ID: <875xvnig4m.fsf@yhuang6-desk2.ccr.corp.intel.com> (raw)
In-Reply-To: <20240508224040.190469-7-21cnbao@gmail.com> (Barry Song's message of "Thu, 9 May 2024 10:40:40 +1200")

Barry Song <21cnbao@gmail.com> writes:

> From: Chuanhua Han <hanchuanhua@oppo.com>
>
> When a large folio is found in the swapcache, the current implementation
> requires calling do_swap_page() nr_pages times, resulting in nr_pages
> page faults. This patch opts to map the entire large folio at once to
> minimize page faults. Additionally, redundant checks and early exits
> for ARM64 MTE restoring are removed.
>
> Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com>
> Co-developed-by: Barry Song <v-songbaohua@oppo.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>

LGTM, Thanks!  Feel free to add

Reviewed-by: "Huang, Ying" <ying.huang@intel.com>

in the future version.

> ---
>  mm/memory.c | 59 +++++++++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 48 insertions(+), 11 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index d9434df24d62..8b9e4cab93ed 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	pte_t pte;
>  	vm_fault_t ret = 0;
>  	void *shadow = NULL;
> +	int nr_pages;
> +	unsigned long page_idx;
> +	unsigned long address;
> +	pte_t *ptep;
>  
>  	if (!pte_unmap_same(vmf))
>  		goto out;
> @@ -4166,6 +4170,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		goto out_nomap;
>  	}
>  
> +	nr_pages = 1;
> +	page_idx = 0;
> +	address = vmf->address;
> +	ptep = vmf->pte;
> +	if (folio_test_large(folio) && folio_test_swapcache(folio)) {
> +		int nr = folio_nr_pages(folio);
> +		unsigned long idx = folio_page_idx(folio, page);
> +		unsigned long folio_start = address - idx * PAGE_SIZE;
> +		unsigned long folio_end = folio_start + nr * PAGE_SIZE;
> +		pte_t *folio_ptep;
> +		pte_t folio_pte;
> +
> +		if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start)))
> +			goto check_folio;
> +		if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end)))
> +			goto check_folio;
> +
> +		folio_ptep = vmf->pte - idx;
> +		folio_pte = ptep_get(folio_ptep);
> +		if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) ||
> +		    swap_pte_batch(folio_ptep, nr, folio_pte) != nr)
> +			goto check_folio;
> +
> +		page_idx = idx;
> +		address = folio_start;
> +		ptep = folio_ptep;
> +		nr_pages = nr;
> +		entry = folio->swap;
> +		page = &folio->page;
> +	}
> +
> +check_folio:
>  	/*
>  	 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
>  	 * must never point at an anonymous page in the swapcache that is
> @@ -4225,12 +4261,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	 * We're already holding a reference on the page but haven't mapped it
>  	 * yet.
>  	 */
> -	swap_free(entry);
> +	swap_free_nr(entry, nr_pages);
>  	if (should_try_to_free_swap(folio, vma, vmf->flags))
>  		folio_free_swap(folio);
>  
> -	inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
> -	dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
> +	add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> +	add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages);
>  	pte = mk_pte(page, vma->vm_page_prot);
>  
>  	/*
> @@ -4247,27 +4283,28 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  		}
>  		rmap_flags |= RMAP_EXCLUSIVE;
>  	}
> -	flush_icache_page(vma, page);
> +	folio_ref_add(folio, nr_pages - 1);
> +	flush_icache_pages(vma, page, nr_pages);
>  	if (pte_swp_soft_dirty(vmf->orig_pte))
>  		pte = pte_mksoft_dirty(pte);
>  	if (pte_swp_uffd_wp(vmf->orig_pte))
>  		pte = pte_mkuffd_wp(pte);
> -	vmf->orig_pte = pte;
> +	vmf->orig_pte = pte_advance_pfn(pte, page_idx);
>  
>  	/* ksm created a completely new copy */
>  	if (unlikely(folio != swapcache && swapcache)) {
> -		folio_add_new_anon_rmap(folio, vma, vmf->address);
> +		folio_add_new_anon_rmap(folio, vma, address);
>  		folio_add_lru_vma(folio, vma);
>  	} else {
> -		folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
> +		folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address,
>  					rmap_flags);
>  	}
>  
>  	VM_BUG_ON(!folio_test_anon(folio) ||
>  			(pte_write(pte) && !PageAnonExclusive(page)));
> -	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> -	arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address,
> -			pte, vmf->orig_pte, 1);
> +	set_ptes(vma->vm_mm, address, ptep, pte, nr_pages);
> +	arch_do_swap_page_nr(vma->vm_mm, vma, address,
> +			pte, pte, nr_pages);
>  
>  	folio_unlock(folio);
>  	if (folio != swapcache && swapcache) {
> @@ -4291,7 +4328,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>  	}
>  
>  	/* No need to invalidate - it was non-present before */
> -	update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
> +	update_mmu_cache_range(vmf, vma, address, ptep, nr_pages);
>  unlock:
>  	if (vmf->pte)
>  		pte_unmap_unlock(vmf->pte, vmf->ptl);

--
Best Regards,
Huang, Ying


  reply	other threads:[~2024-05-09  7:46 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-08 22:40 [PATCH v4 0/6] large folios swap-in: handle refault cases first Barry Song
2024-05-08 22:40 ` [PATCH v4 1/6] mm: swap: introduce swap_free_nr() for batched swap_free() Barry Song
2024-05-08 22:40 ` [PATCH v4 2/6] mm: remove the implementation of swap_free() and always use swap_free_nr() Barry Song
2024-05-10  9:11   ` Ryan Roberts
2024-05-08 22:40 ` [PATCH v4 3/6] mm: introduce pte_move_swp_offset() helper which can move offset bidirectionally Barry Song
2024-05-08 22:40 ` [PATCH v4 4/6] mm: introduce arch_do_swap_page_nr() which allows restore metadata for nr pages Barry Song
2024-05-08 22:40 ` [PATCH v4 5/6] mm: swap: make should_try_to_free_swap() support large-folio Barry Song
2024-05-08 22:40 ` [PATCH v4 6/6] mm: swap: entirely map large folios found in swapcache Barry Song
2024-05-09  7:44   ` Huang, Ying [this message]
2024-05-21 21:21 ` [PATCH v4 0/6] large folios swap-in: handle refault cases first Barry Song
2024-05-21 21:59   ` Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=875xvnig4m.fsf@yhuang6-desk2.ccr.corp.intel.com \
    --to=ying.huang@intel.com \
    --cc=21cnbao@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=chrisl@kernel.org \
    --cc=david@redhat.com \
    --cc=hanchuanhua@oppo.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=kasong@tencent.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=ryan.roberts@arm.com \
    --cc=surenb@google.com \
    --cc=v-songbaohua@oppo.com \
    --cc=willy@infradead.org \
    --cc=xiang@kernel.org \
    --cc=yosryahmed@google.com \
    --cc=yuzhao@google.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox