From: Anshuman Khandual <anshuman.khandual@arm.com>
To: Ryan Roberts <ryan.roberts@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Andrew Morton <akpm@linux-foundation.org>,
Uladzislau Rezki <urezki@gmail.com>,
Christoph Hellwig <hch@infradead.org>,
David Hildenbrand <david@redhat.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Mark Rutland <mark.rutland@arm.com>,
Alexandre Ghiti <alexghiti@rivosinc.com>,
Kevin Brodsky <kevin.brodsky@arm.com>
Cc: linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 04/11] arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear()
Date: Thu, 24 Apr 2025 14:56:32 +0530 [thread overview]
Message-ID: <dbe04199-51e9-4339-8522-cb440092ab33@arm.com> (raw)
In-Reply-To: <20250422081822.1836315-5-ryan.roberts@arm.com>
On 4/22/25 13:48, Ryan Roberts wrote:
> Refactor __set_ptes(), set_pmd_at() and set_pud_at() so that they are
> all a thin wrapper around a new common __set_ptes_anysz(), which takes
> pgsize parameter. Additionally, refactor __ptep_get_and_clear() and
> pmdp_huge_get_and_clear() to use a new common
> __ptep_get_and_clear_anysz() which also takes a pgsize parameter.
>
> These changes will permit the huge_pte API to efficiently batch-set
> pgtable entries and take advantage of the future barrier optimizations.
> Additionally since the new *_anysz() helpers call the correct
> page_table_check_*_set() API based on pgsize, this means that huge_ptes
> will be able to get proper coverage. Currently the huge_pte API always
> uses the pte API which assumes an entry only covers a single page.
>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
> arch/arm64/include/asm/pgtable.h | 114 ++++++++++++++++++++-----------
> 1 file changed, 73 insertions(+), 41 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index d3b538be1500..d80aa9ba0a16 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -423,23 +423,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
> return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
> }
>
> -static inline void __set_ptes(struct mm_struct *mm,
> - unsigned long __always_unused addr,
> - pte_t *ptep, pte_t pte, unsigned int nr)
> -{
> - page_table_check_ptes_set(mm, ptep, pte, nr);
> - __sync_cache_and_tags(pte, nr);
> -
> - for (;;) {
> - __check_safe_pte_update(mm, ptep, pte);
> - __set_pte(ptep, pte);
> - if (--nr == 0)
> - break;
> - ptep++;
> - pte = pte_advance_pfn(pte, 1);
> - }
> -}
> -
> /*
> * Hugetlb definitions.
> */
> @@ -649,30 +632,62 @@ static inline pgprot_t pud_pgprot(pud_t pud)
> return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
> }
>
> -static inline void __set_pte_at(struct mm_struct *mm,
> - unsigned long __always_unused addr,
> - pte_t *ptep, pte_t pte, unsigned int nr)
> +static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
> + pte_t pte, unsigned int nr,
> + unsigned long pgsize)
> {
> - __sync_cache_and_tags(pte, nr);
> - __check_safe_pte_update(mm, ptep, pte);
> - __set_pte(ptep, pte);
> + unsigned long stride = pgsize >> PAGE_SHIFT;
> +
> + switch (pgsize) {
> + case PAGE_SIZE:
> + page_table_check_ptes_set(mm, ptep, pte, nr);
> + break;
> + case PMD_SIZE:
> + page_table_check_pmds_set(mm, (pmd_t *)ptep, pte_pmd(pte), nr);
> + break;
> +#ifndef __PAGETABLE_PMD_FOLDED
> + case PUD_SIZE:
> + page_table_check_puds_set(mm, (pud_t *)ptep, pte_pud(pte), nr);
> + break;
> +#endif
> + default:
> + VM_WARN_ON(1);
> + }
> +
> + __sync_cache_and_tags(pte, nr * stride);
> +
> + for (;;) {
> + __check_safe_pte_update(mm, ptep, pte);
> + __set_pte(ptep, pte);
> + if (--nr == 0)
> + break;
> + ptep++;
> + pte = pte_advance_pfn(pte, stride);
> + }
> +}
> +
> +static inline void __set_ptes(struct mm_struct *mm,
> + unsigned long __always_unused addr,
> + pte_t *ptep, pte_t pte, unsigned int nr)
> +{
> + __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE);
> }
>
> -static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
> - pmd_t *pmdp, pmd_t pmd)
> +static inline void __set_pmds(struct mm_struct *mm,
> + unsigned long __always_unused addr,
> + pmd_t *pmdp, pmd_t pmd, unsigned int nr)
> {
> - page_table_check_pmd_set(mm, pmdp, pmd);
> - return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
> - PMD_SIZE >> PAGE_SHIFT);
> + __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
> }
> +#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
>
> -static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
> - pud_t *pudp, pud_t pud)
> +static inline void __set_puds(struct mm_struct *mm,
> + unsigned long __always_unused addr,
> + pud_t *pudp, pud_t pud, unsigned int nr)
> {
> - page_table_check_pud_set(mm, pudp, pud);
> - return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
> - PUD_SIZE >> PAGE_SHIFT);
> + __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
> }
> +#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
>
> #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
> #define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
> @@ -1301,16 +1316,37 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
> }
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
>
> -static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
> - unsigned long address, pte_t *ptep)
> +static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
> + pte_t *ptep,
> + unsigned long pgsize)
> {
> pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
>
> - page_table_check_pte_clear(mm, pte);
> + switch (pgsize) {
> + case PAGE_SIZE:
> + page_table_check_pte_clear(mm, pte);
> + break;
> + case PMD_SIZE:
> + page_table_check_pmd_clear(mm, pte_pmd(pte));
> + break;
> +#ifndef __PAGETABLE_PMD_FOLDED
> + case PUD_SIZE:
> + page_table_check_pud_clear(mm, pte_pud(pte));
> + break;
> +#endif
> + default:
> + VM_WARN_ON(1);
> + }
>
> return pte;
> }
>
> +static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
> + unsigned long address, pte_t *ptep)
> +{
> + return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE);
> +}
> +
> static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, unsigned int nr, int full)
> {
> @@ -1347,11 +1383,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
> static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
> unsigned long address, pmd_t *pmdp)
> {
> - pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
> -
> - page_table_check_pmd_clear(mm, pmd);
> -
> - return pmd;
> + return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE));
> }
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
next prev parent reply other threads:[~2025-04-24 9:26 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-04-22 8:18 [PATCH v4 00/11] Perf improvements for hugetlb and vmalloc on arm64 Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 01/11] arm64: hugetlb: Cleanup huge_pte size discovery mechanisms Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 02/11] arm64: hugetlb: Refine tlb maintenance scope Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 03/11] mm/page_table_check: Batch-check pmds/puds just like ptes Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 04/11] arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear() Ryan Roberts
2025-04-24 9:26 ` Anshuman Khandual [this message]
2025-04-22 8:18 ` [PATCH v4 05/11] arm64: hugetlb: Use __set_ptes_anysz() and __ptep_get_and_clear_anysz() Ryan Roberts
2025-04-24 9:40 ` Anshuman Khandual
2025-04-22 8:18 ` [PATCH v4 06/11] arm64/mm: Hoist barriers out of set_ptes_anysz() loop Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 07/11] mm/vmalloc: Warn on improper use of vunmap_range() Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 08/11] mm/vmalloc: Gracefully unmap huge ptes Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 09/11] arm64/mm: Support huge pte-mapped pages in vmap Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 10/11] mm/vmalloc: Enter lazy mmu mode while manipulating vmalloc ptes Ryan Roberts
2025-04-22 8:18 ` [PATCH v4 11/11] arm64/mm: Batch barriers when updating kernel mappings Ryan Roberts
2025-04-24 9:13 ` Anshuman Khandual
2025-04-23 19:18 ` [PATCH v4 00/11] Perf improvements for hugetlb and vmalloc on arm64 Luiz Capitulino
2025-05-08 14:00 ` Ryan Roberts
2025-05-09 13:55 ` Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=dbe04199-51e9-4339-8522-cb440092ab33@arm.com \
--to=anshuman.khandual@arm.com \
--cc=akpm@linux-foundation.org \
--cc=alexghiti@rivosinc.com \
--cc=catalin.marinas@arm.com \
--cc=david@redhat.com \
--cc=hch@infradead.org \
--cc=kevin.brodsky@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mark.rutland@arm.com \
--cc=pasha.tatashin@soleen.com \
--cc=ryan.roberts@arm.com \
--cc=urezki@gmail.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox