linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Anshuman Khandual <anshuman.khandual@arm.com>
To: Ryan Roberts <ryan.roberts@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Muchun Song <muchun.song@linux.dev>,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Uladzislau Rezki <urezki@gmail.com>,
	Christoph Hellwig <hch@infradead.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Ard Biesheuvel <ardb@kernel.org>, Dev Jain <dev.jain@arm.com>,
	Alexandre Ghiti <alexghiti@rivosinc.com>,
	Steve Capper <steve.capper@linaro.org>,
	Kevin Brodsky <kevin.brodsky@arm.com>
Cc: linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH v1 14/16] mm/vmalloc: Batch arch_sync_kernel_mappings() more efficiently
Date: Mon, 10 Feb 2025 12:41:13 +0530	[thread overview]
Message-ID: <7e184caf-2447-48d4-8d7c-1b63deb0f418@arm.com> (raw)
In-Reply-To: <20250205151003.88959-15-ryan.roberts@arm.com>

On 2/5/25 20:39, Ryan Roberts wrote:
> When page_shift is greater than PAGE_SIZE, __vmap_pages_range_noflush()
> will call vmap_range_noflush() for each individual huge page. But
> vmap_range_noflush() would previously call arch_sync_kernel_mappings()
> directly so this would end up being called for every huge page.
> 
> We can do better than this; refactor the call into the outer
> __vmap_pages_range_noflush() so that it is only called once for the
> entire batch operation.

This makes sense.

> 
> This will benefit performance for arm64 which is about to opt-in to
> using the hook.
> 
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>  mm/vmalloc.c | 60 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 30 insertions(+), 30 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 68950b1824d0..50fd44439875 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -285,40 +285,38 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
>  
>  static int vmap_range_noflush(unsigned long addr, unsigned long end,
>  			phys_addr_t phys_addr, pgprot_t prot,
> -			unsigned int max_page_shift)
> +			unsigned int max_page_shift, pgtbl_mod_mask *mask)
>  {
>  	pgd_t *pgd;
> -	unsigned long start;
>  	unsigned long next;
>  	int err;
> -	pgtbl_mod_mask mask = 0;
>  
>  	might_sleep();
>  	BUG_ON(addr >= end);
>  
> -	start = addr;
>  	pgd = pgd_offset_k(addr);
>  	do {
>  		next = pgd_addr_end(addr, end);
>  		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
> -					max_page_shift, &mask);
> +					max_page_shift, mask);
>  		if (err)
>  			break;
>  	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
>  
> -	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
> -		arch_sync_kernel_mappings(start, end);
> -
>  	return err;
>  }

arch_sync_kernel_mappings() gets dropped here and moved to existing
vmap_range_noflush() callers instead.

>  
>  int vmap_page_range(unsigned long addr, unsigned long end,
>  		    phys_addr_t phys_addr, pgprot_t prot)
>  {
> +	pgtbl_mod_mask mask = 0;
>  	int err;
>  
>  	err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
> -				 ioremap_max_page_shift);
> +				 ioremap_max_page_shift, &mask);
> +	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
> +		arch_sync_kernel_mappings(addr, end);
> +

arch_sync_kernel_mappings() gets moved here.

>  	flush_cache_vmap(addr, end);
>  	if (!err)
>  		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
> @@ -587,29 +585,24 @@ static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
>  }
>  
>  static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
> -		pgprot_t prot, struct page **pages)
> +		pgprot_t prot, struct page **pages, pgtbl_mod_mask *mask)
>  {
> -	unsigned long start = addr;
>  	pgd_t *pgd;
>  	unsigned long next;
>  	int err = 0;
>  	int nr = 0;
> -	pgtbl_mod_mask mask = 0;
>  
>  	BUG_ON(addr >= end);
>  	pgd = pgd_offset_k(addr);
>  	do {
>  		next = pgd_addr_end(addr, end);
>  		if (pgd_bad(*pgd))
> -			mask |= PGTBL_PGD_MODIFIED;
> -		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
> +			*mask |= PGTBL_PGD_MODIFIED;
> +		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, mask);
>  		if (err)
>  			break;
>  	} while (pgd++, addr = next, addr != end);
>  
> -	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
> -		arch_sync_kernel_mappings(start, end);
> -
>  	return err;
>  }
>  
> @@ -626,26 +619,33 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
>  		pgprot_t prot, struct page **pages, unsigned int page_shift)
>  {
>  	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
> +	unsigned long start = addr;
> +	pgtbl_mod_mask mask = 0;
> +	int err = 0;
>  
>  	WARN_ON(page_shift < PAGE_SHIFT);
>  
>  	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
> -			page_shift == PAGE_SHIFT)
> -		return vmap_small_pages_range_noflush(addr, end, prot, pages);
> -
> -	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
> -		int err;
> -
> -		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
> -					page_to_phys(pages[i]), prot,
> -					page_shift);
> -		if (err)
> -			return err;
> +			page_shift == PAGE_SHIFT) {
> +		err = vmap_small_pages_range_noflush(addr, end, prot, pages,
> +						&mask);

Unlike earlier don't return here until arch_sync_kernel_mappings()
gets covered later.

> +	} else {
> +		for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
> +			err = vmap_range_noflush(addr,
> +						addr + (1UL << page_shift),
> +						page_to_phys(pages[i]), prot,
> +						page_shift, &mask);
> +			if (err)
> +				break;
>  
> -		addr += 1UL << page_shift;
> +			addr += 1UL << page_shift;
> +		}
>  	}
>  
> -	return 0;
> +	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
> +		arch_sync_kernel_mappings(start, end);

arch_sync_kernel_mappings() gets moved here after getting dropped
from both vmap_range_noflush() and vmap_small_pages_range_noflush().

> +
> +	return err;
>  }
>  
>  int vmap_pages_range_noflush(unsigned long addr, unsigned long end,

LGTM, and this can stand on its own as well.

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>


  reply	other threads:[~2025-02-10  7:11 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-05 15:09 [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 01/16] mm: hugetlb: Add huge page size param to huge_ptep_get_and_clear() Ryan Roberts
2025-02-06  5:03   ` Anshuman Khandual
2025-02-06 12:15     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 02/16] arm64: hugetlb: Fix huge_ptep_get_and_clear() for non-present ptes Ryan Roberts
2025-02-06  6:15   ` Anshuman Khandual
2025-02-06 12:55     ` Ryan Roberts
2025-02-12 14:44       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 03/16] arm64: hugetlb: Fix flush_hugetlb_tlb_range() invalidation level Ryan Roberts
2025-02-06  6:46   ` Anshuman Khandual
2025-02-06 13:04     ` Ryan Roberts
2025-02-13  4:57       ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 04/16] arm64: hugetlb: Refine tlb maintenance scope Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 05/16] mm/page_table_check: Batch-check pmds/puds just like ptes Ryan Roberts
2025-02-06 10:55   ` Anshuman Khandual
2025-02-06 13:07     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 06/16] arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear() Ryan Roberts
2025-02-06 11:48   ` Anshuman Khandual
2025-02-06 13:26     ` Ryan Roberts
2025-02-07  9:38       ` Ryan Roberts
2025-02-12 15:29         ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 07/16] arm64: hugetlb: Use ___set_ptes() and ___ptep_get_and_clear() Ryan Roberts
2025-02-07  4:09   ` Anshuman Khandual
2025-02-07 10:00     ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 08/16] arm64/mm: Hoist barriers out of ___set_ptes() loop Ryan Roberts
2025-02-07  5:35   ` Anshuman Khandual
2025-02-07 10:38     ` Ryan Roberts
2025-02-12 16:00       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 09/16] arm64/mm: Avoid barriers for invalid or userspace mappings Ryan Roberts
2025-02-07  8:11   ` Anshuman Khandual
2025-02-07 10:53     ` Ryan Roberts
2025-02-12 16:48       ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 10/16] mm/vmalloc: Warn on improper use of vunmap_range() Ryan Roberts
2025-02-07  8:41   ` Anshuman Khandual
2025-02-07 10:59     ` Ryan Roberts
2025-02-13  6:36       ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 11/16] mm/vmalloc: Gracefully unmap huge ptes Ryan Roberts
2025-02-07  9:19   ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 12/16] arm64/mm: Support huge pte-mapped pages in vmap Ryan Roberts
2025-02-07 10:04   ` Anshuman Khandual
2025-02-07 11:20     ` Ryan Roberts
2025-02-13  6:32       ` Anshuman Khandual
2025-02-13  9:09         ` Ryan Roberts
2025-02-17  4:33           ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 13/16] mm: Don't skip arch_sync_kernel_mappings() in error paths Ryan Roberts
2025-02-07 10:21   ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 14/16] mm/vmalloc: Batch arch_sync_kernel_mappings() more efficiently Ryan Roberts
2025-02-10  7:11   ` Anshuman Khandual [this message]
2025-02-05 15:09 ` [PATCH v1 15/16] mm: Generalize arch_sync_kernel_mappings() Ryan Roberts
2025-02-10  7:45   ` Anshuman Khandual
2025-02-10 11:04     ` Ryan Roberts
2025-02-13  5:57       ` Anshuman Khandual
2025-02-13  9:17         ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 16/16] arm64/mm: Defer barriers when updating kernel mappings Ryan Roberts
2025-02-10  8:03   ` Anshuman Khandual
2025-02-10 11:12     ` Ryan Roberts
2025-02-13  5:30       ` Anshuman Khandual
2025-02-13  9:38         ` Ryan Roberts
2025-02-17  4:48           ` Anshuman Khandual
2025-02-17  9:40             ` Ryan Roberts
2025-02-06  7:52 ` [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Andrew Morton
2025-02-06 11:59   ` Ryan Roberts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=7e184caf-2447-48d4-8d7c-1b63deb0f418@arm.com \
    --to=anshuman.khandual@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexghiti@rivosinc.com \
    --cc=ardb@kernel.org \
    --cc=catalin.marinas@arm.com \
    --cc=dev.jain@arm.com \
    --cc=hch@infradead.org \
    --cc=kevin.brodsky@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=muchun.song@linux.dev \
    --cc=pasha.tatashin@soleen.com \
    --cc=ryan.roberts@arm.com \
    --cc=steve.capper@linaro.org \
    --cc=urezki@gmail.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox