From: Anshuman Khandual <anshuman.khandual@arm.com>
To: Ryan Roberts <ryan.roberts@arm.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Muchun Song <muchun.song@linux.dev>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Andrew Morton <akpm@linux-foundation.org>,
Uladzislau Rezki <urezki@gmail.com>,
Christoph Hellwig <hch@infradead.org>,
Mark Rutland <mark.rutland@arm.com>,
Ard Biesheuvel <ardb@kernel.org>, Dev Jain <dev.jain@arm.com>,
Alexandre Ghiti <alexghiti@rivosinc.com>,
Steve Capper <steve.capper@linaro.org>,
Kevin Brodsky <kevin.brodsky@arm.com>
Cc: linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: Re: [PATCH v1 05/16] mm/page_table_check: Batch-check pmds/puds just like ptes
Date: Thu, 6 Feb 2025 16:25:04 +0530 [thread overview]
Message-ID: <d86b6d7f-0ab3-4737-85cd-70cddcffc1e8@arm.com> (raw)
In-Reply-To: <20250205151003.88959-6-ryan.roberts@arm.com>
On 2/5/25 20:39, Ryan Roberts wrote:
> Convert page_table_check_p[mu]d_set(...) to
> page_table_check_p[mu]ds_set(..., nr) to allow checking a contiguous set
> of pmds/puds in single batch. We retain page_table_check_p[mu]d_set(...)
> as macros that call new batch functions with nr=1 for compatibility.
>
> arm64 is about to reorganise its pte/pmd/pud helpers to reuse more code
> and to allow the implementation for huge_pte to more efficiently set
> ptes/pmds/puds in batches. We need these batch-helpers to make the
> refactoring possible.
A very small nit.
Although this justification here is reasonable enough but not sure if
platform specific requirements, need to be spelled out in such detail
for a generic MM change.
>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
Regardless, LGTM.
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
> include/linux/page_table_check.h | 30 +++++++++++++++++-----------
> mm/page_table_check.c | 34 +++++++++++++++++++-------------
> 2 files changed, 38 insertions(+), 26 deletions(-)
>
> diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
> index 6722941c7cb8..289620d4aad3 100644
> --- a/include/linux/page_table_check.h
> +++ b/include/linux/page_table_check.h
> @@ -19,8 +19,10 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
> void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
> void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
> unsigned int nr);
> -void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
> -void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
> +void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
> + unsigned int nr);
> +void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
> + unsigned int nr);
> void __page_table_check_pte_clear_range(struct mm_struct *mm,
> unsigned long addr,
> pmd_t pmd);
> @@ -74,22 +76,22 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
> __page_table_check_ptes_set(mm, ptep, pte, nr);
> }
>
> -static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
> - pmd_t pmd)
> +static inline void page_table_check_pmds_set(struct mm_struct *mm,
> + pmd_t *pmdp, pmd_t pmd, unsigned int nr)
> {
> if (static_branch_likely(&page_table_check_disabled))
> return;
>
> - __page_table_check_pmd_set(mm, pmdp, pmd);
> + __page_table_check_pmds_set(mm, pmdp, pmd, nr);
> }
>
> -static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
> - pud_t pud)
> +static inline void page_table_check_puds_set(struct mm_struct *mm,
> + pud_t *pudp, pud_t pud, unsigned int nr)
> {
> if (static_branch_likely(&page_table_check_disabled))
> return;
>
> - __page_table_check_pud_set(mm, pudp, pud);
> + __page_table_check_puds_set(mm, pudp, pud, nr);
> }
>
> static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
> @@ -129,13 +131,13 @@ static inline void page_table_check_ptes_set(struct mm_struct *mm,
> {
> }
>
> -static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
> - pmd_t pmd)
> +static inline void page_table_check_pmds_set(struct mm_struct *mm,
> + pmd_t *pmdp, pmd_t pmd, unsigned int nr)
> {
> }
>
> -static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
> - pud_t pud)
> +static inline void page_table_check_puds_set(struct mm_struct *mm,
> + pud_t *pudp, pud_t pud, unsigned int nr)
> {
> }
>
> @@ -146,4 +148,8 @@ static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
> }
>
> #endif /* CONFIG_PAGE_TABLE_CHECK */
> +
> +#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
> +#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
> +
> #endif /* __LINUX_PAGE_TABLE_CHECK_H */
> diff --git a/mm/page_table_check.c b/mm/page_table_check.c
> index 509c6ef8de40..dae4a7d776b3 100644
> --- a/mm/page_table_check.c
> +++ b/mm/page_table_check.c
> @@ -234,33 +234,39 @@ static inline void page_table_check_pmd_flags(pmd_t pmd)
> WARN_ON_ONCE(swap_cached_writable(pmd_to_swp_entry(pmd)));
> }
>
> -void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd)
> +void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
> + unsigned int nr)
> {
> + unsigned int i;
> + unsigned long stride = PMD_SIZE >> PAGE_SHIFT;
> +
> if (&init_mm == mm)
> return;
>
> page_table_check_pmd_flags(pmd);
>
> - __page_table_check_pmd_clear(mm, *pmdp);
> - if (pmd_user_accessible_page(pmd)) {
> - page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
> - pmd_write(pmd));
> - }
> + for (i = 0; i < nr; i++)
> + __page_table_check_pmd_clear(mm, *(pmdp + i));
> + if (pmd_user_accessible_page(pmd))
> + page_table_check_set(pmd_pfn(pmd), stride * nr, pmd_write(pmd));
> }
> -EXPORT_SYMBOL(__page_table_check_pmd_set);
> +EXPORT_SYMBOL(__page_table_check_pmds_set);
>
> -void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud)
> +void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
> + unsigned int nr)
> {
> + unsigned int i;
> + unsigned long stride = PUD_SIZE >> PAGE_SHIFT;
> +
> if (&init_mm == mm)
> return;
>
> - __page_table_check_pud_clear(mm, *pudp);
> - if (pud_user_accessible_page(pud)) {
> - page_table_check_set(pud_pfn(pud), PUD_SIZE >> PAGE_SHIFT,
> - pud_write(pud));
> - }
> + for (i = 0; i < nr; i++)
> + __page_table_check_pud_clear(mm, *(pudp + i));
> + if (pud_user_accessible_page(pud))
> + page_table_check_set(pud_pfn(pud), stride * nr, pud_write(pud));
> }
> -EXPORT_SYMBOL(__page_table_check_pud_set);
> +EXPORT_SYMBOL(__page_table_check_puds_set);
>
> void __page_table_check_pte_clear_range(struct mm_struct *mm,
> unsigned long addr,
next prev parent reply other threads:[~2025-02-06 11:00 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-05 15:09 [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 01/16] mm: hugetlb: Add huge page size param to huge_ptep_get_and_clear() Ryan Roberts
2025-02-06 5:03 ` Anshuman Khandual
2025-02-06 12:15 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 02/16] arm64: hugetlb: Fix huge_ptep_get_and_clear() for non-present ptes Ryan Roberts
2025-02-06 6:15 ` Anshuman Khandual
2025-02-06 12:55 ` Ryan Roberts
2025-02-12 14:44 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 03/16] arm64: hugetlb: Fix flush_hugetlb_tlb_range() invalidation level Ryan Roberts
2025-02-06 6:46 ` Anshuman Khandual
2025-02-06 13:04 ` Ryan Roberts
2025-02-13 4:57 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 04/16] arm64: hugetlb: Refine tlb maintenance scope Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 05/16] mm/page_table_check: Batch-check pmds/puds just like ptes Ryan Roberts
2025-02-06 10:55 ` Anshuman Khandual [this message]
2025-02-06 13:07 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 06/16] arm64/mm: Refactor __set_ptes() and __ptep_get_and_clear() Ryan Roberts
2025-02-06 11:48 ` Anshuman Khandual
2025-02-06 13:26 ` Ryan Roberts
2025-02-07 9:38 ` Ryan Roberts
2025-02-12 15:29 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 07/16] arm64: hugetlb: Use ___set_ptes() and ___ptep_get_and_clear() Ryan Roberts
2025-02-07 4:09 ` Anshuman Khandual
2025-02-07 10:00 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 08/16] arm64/mm: Hoist barriers out of ___set_ptes() loop Ryan Roberts
2025-02-07 5:35 ` Anshuman Khandual
2025-02-07 10:38 ` Ryan Roberts
2025-02-12 16:00 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 09/16] arm64/mm: Avoid barriers for invalid or userspace mappings Ryan Roberts
2025-02-07 8:11 ` Anshuman Khandual
2025-02-07 10:53 ` Ryan Roberts
2025-02-12 16:48 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 10/16] mm/vmalloc: Warn on improper use of vunmap_range() Ryan Roberts
2025-02-07 8:41 ` Anshuman Khandual
2025-02-07 10:59 ` Ryan Roberts
2025-02-13 6:36 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 11/16] mm/vmalloc: Gracefully unmap huge ptes Ryan Roberts
2025-02-07 9:19 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 12/16] arm64/mm: Support huge pte-mapped pages in vmap Ryan Roberts
2025-02-07 10:04 ` Anshuman Khandual
2025-02-07 11:20 ` Ryan Roberts
2025-02-13 6:32 ` Anshuman Khandual
2025-02-13 9:09 ` Ryan Roberts
2025-02-17 4:33 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 13/16] mm: Don't skip arch_sync_kernel_mappings() in error paths Ryan Roberts
2025-02-07 10:21 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 14/16] mm/vmalloc: Batch arch_sync_kernel_mappings() more efficiently Ryan Roberts
2025-02-10 7:11 ` Anshuman Khandual
2025-02-05 15:09 ` [PATCH v1 15/16] mm: Generalize arch_sync_kernel_mappings() Ryan Roberts
2025-02-10 7:45 ` Anshuman Khandual
2025-02-10 11:04 ` Ryan Roberts
2025-02-13 5:57 ` Anshuman Khandual
2025-02-13 9:17 ` Ryan Roberts
2025-02-05 15:09 ` [PATCH v1 16/16] arm64/mm: Defer barriers when updating kernel mappings Ryan Roberts
2025-02-10 8:03 ` Anshuman Khandual
2025-02-10 11:12 ` Ryan Roberts
2025-02-13 5:30 ` Anshuman Khandual
2025-02-13 9:38 ` Ryan Roberts
2025-02-17 4:48 ` Anshuman Khandual
2025-02-17 9:40 ` Ryan Roberts
2025-02-06 7:52 ` [PATCH v1 00/16] hugetlb and vmalloc fixes and perf improvements Andrew Morton
2025-02-06 11:59 ` Ryan Roberts
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=d86b6d7f-0ab3-4737-85cd-70cddcffc1e8@arm.com \
--to=anshuman.khandual@arm.com \
--cc=akpm@linux-foundation.org \
--cc=alexghiti@rivosinc.com \
--cc=ardb@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=dev.jain@arm.com \
--cc=hch@infradead.org \
--cc=kevin.brodsky@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mark.rutland@arm.com \
--cc=muchun.song@linux.dev \
--cc=pasha.tatashin@soleen.com \
--cc=ryan.roberts@arm.com \
--cc=steve.capper@linaro.org \
--cc=urezki@gmail.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox