* [PATCH v3 01/10] mm/ppc: Define pXd_large() with pXd_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-05 17:29 ` Christophe Leroy
2024-03-06 6:15 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() " peterx
` (8 subsequent siblings)
9 siblings, 2 replies; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Michael Ellerman, Nicholas Piggin, Aneesh Kumar K.V,
Naveen N. Rao
From: Peter Xu <peterx@redhat.com>
The two definitions are the same. The only difference is that pXd_large()
is only defined with THP selected, and only on book3s 64bits.
Instead of implementing it twice, make pXd_large() a macro to pXd_leaf().
Define it unconditionally just like pXd_leaf(). This helps to prepare
merging the two APIs.
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 16 ++--------------
arch/powerpc/include/asm/pgtable.h | 2 +-
2 files changed, 3 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 927d585652bc..d1318e8582ac 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1157,20 +1157,6 @@ pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
return pud_val(*pudp);
}
-/*
- * returns true for pmd migration entries, THP, devmap, hugetlb
- * But compile time dependent on THP config
- */
-static inline int pmd_large(pmd_t pmd)
-{
- return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
-}
-
-static inline int pud_large(pud_t pud)
-{
- return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
-}
-
/*
* For radix we should always find H_PAGE_HASHPTE zero. Hence
* the below will work for radix too
@@ -1455,6 +1441,7 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
*/
#define pmd_is_leaf pmd_is_leaf
#define pmd_leaf pmd_is_leaf
+#define pmd_large pmd_leaf
static inline bool pmd_is_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
@@ -1462,6 +1449,7 @@ static inline bool pmd_is_leaf(pmd_t pmd)
#define pud_is_leaf pud_is_leaf
#define pud_leaf pud_is_leaf
+#define pud_large pud_leaf
static inline bool pud_is_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 7a1ba8889aea..5928b3c1458d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -101,7 +101,7 @@ void poking_init(void);
extern unsigned long ioremap_bot;
extern const pgprot_t protection_map[16];
-#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef pmd_large
#define pmd_large(pmd) 0
#endif
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 01/10] mm/ppc: Define pXd_large() with pXd_leaf()
2024-03-05 4:37 ` [PATCH v3 01/10] mm/ppc: Define " peterx
@ 2024-03-05 17:29 ` Christophe Leroy
2024-03-06 6:15 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Christophe Leroy @ 2024-03-05 17:29 UTC (permalink / raw)
To: peterx, linux-mm, linux-kernel
Cc: x86, Kirill A . Shutemov, Jason Gunthorpe, Yang Shi,
Andrew Morton, linuxppc-dev, Muchun Song, Michael Ellerman,
Nicholas Piggin, Aneesh Kumar K.V, Naveen N. Rao
Le 05/03/2024 à 05:37, peterx@redhat.com a écrit :
> From: Peter Xu <peterx@redhat.com>
>
> The two definitions are the same. The only difference is that pXd_large()
> is only defined with THP selected, and only on book3s 64bits.
>
> Instead of implementing it twice, make pXd_large() a macro to pXd_leaf().
> Define it unconditionally just like pXd_leaf(). This helps to prepare
> merging the two APIs.
>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
> Cc: linuxppc-dev@lists.ozlabs.org
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> ---
> arch/powerpc/include/asm/book3s/64/pgtable.h | 16 ++--------------
> arch/powerpc/include/asm/pgtable.h | 2 +-
> 2 files changed, 3 insertions(+), 15 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 927d585652bc..d1318e8582ac 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1157,20 +1157,6 @@ pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
> return pud_val(*pudp);
> }
>
> -/*
> - * returns true for pmd migration entries, THP, devmap, hugetlb
> - * But compile time dependent on THP config
> - */
> -static inline int pmd_large(pmd_t pmd)
> -{
> - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> -}
> -
> -static inline int pud_large(pud_t pud)
> -{
> - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> -}
> -
> /*
> * For radix we should always find H_PAGE_HASHPTE zero. Hence
> * the below will work for radix too
> @@ -1455,6 +1441,7 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> */
> #define pmd_is_leaf pmd_is_leaf
> #define pmd_leaf pmd_is_leaf
> +#define pmd_large pmd_leaf
> static inline bool pmd_is_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> @@ -1462,6 +1449,7 @@ static inline bool pmd_is_leaf(pmd_t pmd)
>
> #define pud_is_leaf pud_is_leaf
> #define pud_leaf pud_is_leaf
> +#define pud_large pud_leaf
> static inline bool pud_is_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 7a1ba8889aea..5928b3c1458d 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -101,7 +101,7 @@ void poking_init(void);
> extern unsigned long ioremap_bot;
> extern const pgprot_t protection_map[16];
>
> -#ifndef CONFIG_TRANSPARENT_HUGEPAGE
> +#ifndef pmd_large
> #define pmd_large(pmd) 0
> #endif
>
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 01/10] mm/ppc: Define pXd_large() with pXd_leaf()
2024-03-05 4:37 ` [PATCH v3 01/10] mm/ppc: Define " peterx
2024-03-05 17:29 ` Christophe Leroy
@ 2024-03-06 6:15 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:15 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Michael Ellerman, Nicholas Piggin,
Aneesh Kumar K.V, Naveen N. Rao
On Tue, Mar 05, 2024 at 12:37:41PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> The two definitions are the same. The only difference is that pXd_large()
> is only defined with THP selected, and only on book3s 64bits.
>
> Instead of implementing it twice, make pXd_large() a macro to pXd_leaf().
> Define it unconditionally just like pXd_leaf(). This helps to prepare
> merging the two APIs.
>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
> Cc: linuxppc-dev@lists.ozlabs.org
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/powerpc/include/asm/book3s/64/pgtable.h | 16 ++--------------
> arch/powerpc/include/asm/pgtable.h | 2 +-
> 2 files changed, 3 insertions(+), 15 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 927d585652bc..d1318e8582ac 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1157,20 +1157,6 @@ pud_hugepage_update(struct mm_struct *mm, unsigned long addr, pud_t *pudp,
> return pud_val(*pudp);
> }
>
> -/*
> - * returns true for pmd migration entries, THP, devmap, hugetlb
> - * But compile time dependent on THP config
> - */
> -static inline int pmd_large(pmd_t pmd)
> -{
> - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> -}
> -
> -static inline int pud_large(pud_t pud)
> -{
> - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> -}
> -
> /*
> * For radix we should always find H_PAGE_HASHPTE zero. Hence
> * the below will work for radix too
> @@ -1455,6 +1441,7 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> */
> #define pmd_is_leaf pmd_is_leaf
> #define pmd_leaf pmd_is_leaf
> +#define pmd_large pmd_leaf
> static inline bool pmd_is_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> @@ -1462,6 +1449,7 @@ static inline bool pmd_is_leaf(pmd_t pmd)
>
> #define pud_is_leaf pud_is_leaf
> #define pud_leaf pud_is_leaf
> +#define pud_large pud_leaf
> static inline bool pud_is_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 7a1ba8889aea..5928b3c1458d 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -101,7 +101,7 @@ void poking_init(void);
> extern unsigned long ioremap_bot;
> extern const pgprot_t protection_map[16];
>
> -#ifndef CONFIG_TRANSPARENT_HUGEPAGE
> +#ifndef pmd_large
> #define pmd_large(pmd) 0
> #endif
>
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() with pXd_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
2024-03-05 4:37 ` [PATCH v3 01/10] mm/ppc: Define " peterx
@ 2024-03-05 4:37 ` peterx
2024-03-05 17:31 ` Christophe Leroy
2024-03-06 6:20 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 03/10] mm/x86: Replace p4d_large() with p4d_leaf() peterx
` (7 subsequent siblings)
9 siblings, 2 replies; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Michael Ellerman, Nicholas Piggin, Aneesh Kumar K.V,
Naveen N. Rao
From: Peter Xu <peterx@redhat.com>
They're the same macros underneath. Drop pXd_is_leaf(), instead always use
pXd_leaf().
At the meantime, instead of renames, drop the pXd_is_leaf() fallback
definitions directly in arch/powerpc/include/asm/pgtable.h. because similar
fallback macros for pXd_leaf() are already defined in
include/linux/pgtable.h.
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++----
arch/powerpc/include/asm/pgtable.h | 24 --------------------
arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 +++++-----
arch/powerpc/mm/book3s64/radix_pgtable.c | 14 ++++++------
arch/powerpc/mm/pgtable.c | 6 ++---
arch/powerpc/mm/pgtable_64.c | 6 ++---
arch/powerpc/xmon/xmon.c | 6 ++---
7 files changed, 26 insertions(+), 52 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index d1318e8582ac..3e99e409774a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1439,18 +1439,16 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
/*
* Like pmd_huge() and pmd_large(), but works regardless of config options
*/
-#define pmd_is_leaf pmd_is_leaf
-#define pmd_leaf pmd_is_leaf
+#define pmd_leaf pmd_leaf
#define pmd_large pmd_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
-#define pud_is_leaf pud_is_leaf
-#define pud_leaf pud_is_leaf
+#define pud_leaf pud_leaf
#define pud_large pud_leaf
-static inline bool pud_is_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 5928b3c1458d..e6edf1cdbc5b 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -182,30 +182,6 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
}
#endif
-#ifndef pmd_is_leaf
-#define pmd_is_leaf pmd_is_leaf
-static inline bool pmd_is_leaf(pmd_t pmd)
-{
- return false;
-}
-#endif
-
-#ifndef pud_is_leaf
-#define pud_is_leaf pud_is_leaf
-static inline bool pud_is_leaf(pud_t pud)
-{
- return false;
-}
-#endif
-
-#ifndef p4d_is_leaf
-#define p4d_is_leaf p4d_is_leaf
-static inline bool p4d_is_leaf(p4d_t p4d)
-{
- return false;
-}
-#endif
-
#define pmd_pgtable pmd_pgtable
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 4a1abb9f7c05..408d98f8a514 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -503,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
if (!pmd_present(*p))
continue;
- if (pmd_is_leaf(*p)) {
+ if (pmd_leaf(*p)) {
if (full) {
pmd_clear(p);
} else {
@@ -532,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
if (!pud_present(*p))
continue;
- if (pud_is_leaf(*p)) {
+ if (pud_leaf(*p)) {
pud_clear(p);
} else {
pmd_t *pmd;
@@ -635,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = pud_alloc_one(kvm->mm, gpa);
pmd = NULL;
- if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
+ if (pud && pud_present(*pud) && !pud_leaf(*pud))
pmd = pmd_offset(pud, gpa);
else if (level <= 1)
new_pmd = kvmppc_pmd_alloc();
- if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+ if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
new_ptep = kvmppc_pte_alloc();
/* Check if we might have been invalidated; let the guest retry if so */
@@ -658,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pud = NULL;
}
pud = pud_offset(p4d, gpa);
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
/* Check if we raced and someone else has set the same thing */
@@ -709,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
new_pmd = NULL;
}
pmd = pmd_offset(pud, gpa);
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
unsigned long lgpa = gpa & PMD_MASK;
/* Check if we raced and someone else has set the same thing */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index c6a4ac766b2b..1f8db10693e3 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
pudp = pud_alloc(&init_mm, p4dp, idx);
if (!pudp)
continue;
- if (pud_is_leaf(*pudp)) {
+ if (pud_leaf(*pudp)) {
ptep = (pte_t *)pudp;
goto update_the_pte;
}
pmdp = pmd_alloc(&init_mm, pudp, idx);
if (!pmdp)
continue;
- if (pmd_is_leaf(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
ptep = pmdp_ptep(pmdp);
goto update_the_pte;
}
@@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
if (!pmd_present(*pmd))
continue;
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
@@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
if (!pud_present(*pud))
continue;
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
if (!IS_ALIGNED(addr, PUD_SIZE) ||
!IS_ALIGNED(next, PUD_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
if (!p4d_present(*p4d))
continue;
- if (p4d_is_leaf(*p4d)) {
+ if (p4d_leaf(*p4d)) {
if (!IS_ALIGNED(addr, P4D_SIZE) ||
!IS_ALIGNED(next, P4D_SIZE)) {
WARN_ONCE(1, "%s: unaligned range\n", __func__);
@@ -1554,7 +1554,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (pud_is_leaf(*pud)) {
+ if (pud_leaf(*pud)) {
pud_clear(pud);
return 1;
}
@@ -1601,7 +1601,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (pmd_is_leaf(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 549a440ed7f6..9e7ba9c3851f 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -410,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (p4d_none(p4d))
return NULL;
- if (p4d_is_leaf(p4d)) {
+ if (p4d_leaf(p4d)) {
ret_pte = (pte_t *)p4dp;
goto out;
}
@@ -432,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (pud_none(pud))
return NULL;
- if (pud_is_leaf(pud)) {
+ if (pud_leaf(pud)) {
ret_pte = (pte_t *)pudp;
goto out;
}
@@ -471,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
goto out;
}
- if (pmd_is_leaf(pmd)) {
+ if (pmd_leaf(pmd)) {
ret_pte = (pte_t *)pmdp;
goto out;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 1b366526f4f2..386c6b06eab7 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
/* 4 level page table */
struct page *p4d_page(p4d_t p4d)
{
- if (p4d_is_leaf(p4d)) {
+ if (p4d_leaf(p4d)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d));
@@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
struct page *pud_page(pud_t pud)
{
- if (pud_is_leaf(pud)) {
+ if (pud_leaf(pud)) {
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
@@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_is_leaf(pmd)) {
+ if (pmd_leaf(pmd)) {
/*
* vmalloc_to_page may be called on any vmap address (not only
* vmalloc), and it uses pmd_page() etc., when huge vmap is
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index b3b94cd37713..9669c9925225 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (p4d_is_leaf(*p4dp)) {
+ if (p4d_leaf(*p4dp)) {
format_pte(p4dp, p4d_val(*p4dp));
return;
}
@@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pud_is_leaf(*pudp)) {
+ if (pud_leaf(*pudp)) {
format_pte(pudp, pud_val(*pudp));
return;
}
@@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr)
return;
}
- if (pmd_is_leaf(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
format_pte(pmdp, pmd_val(*pmdp));
return;
}
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() with pXd_leaf()
2024-03-05 4:37 ` [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() " peterx
@ 2024-03-05 17:31 ` Christophe Leroy
2024-03-06 6:20 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Christophe Leroy @ 2024-03-05 17:31 UTC (permalink / raw)
To: peterx, linux-mm, linux-kernel
Cc: x86, Kirill A . Shutemov, Jason Gunthorpe, Yang Shi,
Andrew Morton, linuxppc-dev, Muchun Song, Michael Ellerman,
Nicholas Piggin, Aneesh Kumar K.V, Naveen N. Rao
Le 05/03/2024 à 05:37, peterx@redhat.com a écrit :
> From: Peter Xu <peterx@redhat.com>
>
> They're the same macros underneath. Drop pXd_is_leaf(), instead always use
> pXd_leaf().
>
> At the meantime, instead of renames, drop the pXd_is_leaf() fallback
> definitions directly in arch/powerpc/include/asm/pgtable.h. because similar
> fallback macros for pXd_leaf() are already defined in
> include/linux/pgtable.h.
>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
> Cc: linuxppc-dev@lists.ozlabs.org
> Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
In case you post a new version, in the subject, usually with use powerpc
not ppc
> ---
> arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++----
> arch/powerpc/include/asm/pgtable.h | 24 --------------------
> arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 +++++-----
> arch/powerpc/mm/book3s64/radix_pgtable.c | 14 ++++++------
> arch/powerpc/mm/pgtable.c | 6 ++---
> arch/powerpc/mm/pgtable_64.c | 6 ++---
> arch/powerpc/xmon/xmon.c | 6 ++---
> 7 files changed, 26 insertions(+), 52 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index d1318e8582ac..3e99e409774a 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1439,18 +1439,16 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> /*
> * Like pmd_huge() and pmd_large(), but works regardless of config options
> */
> -#define pmd_is_leaf pmd_is_leaf
> -#define pmd_leaf pmd_is_leaf
> +#define pmd_leaf pmd_leaf
> #define pmd_large pmd_leaf
> -static inline bool pmd_is_leaf(pmd_t pmd)
> +static inline bool pmd_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> }
>
> -#define pud_is_leaf pud_is_leaf
> -#define pud_leaf pud_is_leaf
> +#define pud_leaf pud_leaf
> #define pud_large pud_leaf
> -static inline bool pud_is_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> }
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 5928b3c1458d..e6edf1cdbc5b 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -182,30 +182,6 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
> }
> #endif
>
> -#ifndef pmd_is_leaf
> -#define pmd_is_leaf pmd_is_leaf
> -static inline bool pmd_is_leaf(pmd_t pmd)
> -{
> - return false;
> -}
> -#endif
> -
> -#ifndef pud_is_leaf
> -#define pud_is_leaf pud_is_leaf
> -static inline bool pud_is_leaf(pud_t pud)
> -{
> - return false;
> -}
> -#endif
> -
> -#ifndef p4d_is_leaf
> -#define p4d_is_leaf p4d_is_leaf
> -static inline bool p4d_is_leaf(p4d_t p4d)
> -{
> - return false;
> -}
> -#endif
> -
> #define pmd_pgtable pmd_pgtable
> static inline pgtable_t pmd_pgtable(pmd_t pmd)
> {
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 4a1abb9f7c05..408d98f8a514 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -503,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
> for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
> if (!pmd_present(*p))
> continue;
> - if (pmd_is_leaf(*p)) {
> + if (pmd_leaf(*p)) {
> if (full) {
> pmd_clear(p);
> } else {
> @@ -532,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
> for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
> if (!pud_present(*p))
> continue;
> - if (pud_is_leaf(*p)) {
> + if (pud_leaf(*p)) {
> pud_clear(p);
> } else {
> pmd_t *pmd;
> @@ -635,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pud = pud_alloc_one(kvm->mm, gpa);
>
> pmd = NULL;
> - if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
> + if (pud && pud_present(*pud) && !pud_leaf(*pud))
> pmd = pmd_offset(pud, gpa);
> else if (level <= 1)
> new_pmd = kvmppc_pmd_alloc();
>
> - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
> new_ptep = kvmppc_pte_alloc();
>
> /* Check if we might have been invalidated; let the guest retry if so */
> @@ -658,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pud = NULL;
> }
> pud = pud_offset(p4d, gpa);
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> unsigned long hgpa = gpa & PUD_MASK;
>
> /* Check if we raced and someone else has set the same thing */
> @@ -709,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pmd = NULL;
> }
> pmd = pmd_offset(pud, gpa);
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> unsigned long lgpa = gpa & PMD_MASK;
>
> /* Check if we raced and someone else has set the same thing */
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index c6a4ac766b2b..1f8db10693e3 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
> pudp = pud_alloc(&init_mm, p4dp, idx);
> if (!pudp)
> continue;
> - if (pud_is_leaf(*pudp)) {
> + if (pud_leaf(*pudp)) {
> ptep = (pte_t *)pudp;
> goto update_the_pte;
> }
> pmdp = pmd_alloc(&init_mm, pudp, idx);
> if (!pmdp)
> continue;
> - if (pmd_is_leaf(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> ptep = pmdp_ptep(pmdp);
> goto update_the_pte;
> }
> @@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
> if (!pmd_present(*pmd))
> continue;
>
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> if (IS_ALIGNED(addr, PMD_SIZE) &&
> IS_ALIGNED(next, PMD_SIZE)) {
> if (!direct)
> @@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
> if (!pud_present(*pud))
> continue;
>
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> if (!IS_ALIGNED(addr, PUD_SIZE) ||
> !IS_ALIGNED(next, PUD_SIZE)) {
> WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
> if (!p4d_present(*p4d))
> continue;
>
> - if (p4d_is_leaf(*p4d)) {
> + if (p4d_leaf(*p4d)) {
> if (!IS_ALIGNED(addr, P4D_SIZE) ||
> !IS_ALIGNED(next, P4D_SIZE)) {
> WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -1554,7 +1554,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
>
> int pud_clear_huge(pud_t *pud)
> {
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> pud_clear(pud);
> return 1;
> }
> @@ -1601,7 +1601,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
>
> int pmd_clear_huge(pmd_t *pmd)
> {
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> pmd_clear(pmd);
> return 1;
> }
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index 549a440ed7f6..9e7ba9c3851f 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -410,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> if (p4d_none(p4d))
> return NULL;
>
> - if (p4d_is_leaf(p4d)) {
> + if (p4d_leaf(p4d)) {
> ret_pte = (pte_t *)p4dp;
> goto out;
> }
> @@ -432,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> if (pud_none(pud))
> return NULL;
>
> - if (pud_is_leaf(pud)) {
> + if (pud_leaf(pud)) {
> ret_pte = (pte_t *)pudp;
> goto out;
> }
> @@ -471,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> goto out;
> }
>
> - if (pmd_is_leaf(pmd)) {
> + if (pmd_leaf(pmd)) {
> ret_pte = (pte_t *)pmdp;
> goto out;
> }
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index 1b366526f4f2..386c6b06eab7 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
> /* 4 level page table */
> struct page *p4d_page(p4d_t p4d)
> {
> - if (p4d_is_leaf(p4d)) {
> + if (p4d_leaf(p4d)) {
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
> VM_WARN_ON(!p4d_huge(p4d));
> return pte_page(p4d_pte(p4d));
> @@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
>
> struct page *pud_page(pud_t pud)
> {
> - if (pud_is_leaf(pud)) {
> + if (pud_leaf(pud)) {
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
> VM_WARN_ON(!pud_huge(pud));
> return pte_page(pud_pte(pud));
> @@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
> */
> struct page *pmd_page(pmd_t pmd)
> {
> - if (pmd_is_leaf(pmd)) {
> + if (pmd_leaf(pmd)) {
> /*
> * vmalloc_to_page may be called on any vmap address (not only
> * vmalloc), and it uses pmd_page() etc., when huge vmap is
> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
> index b3b94cd37713..9669c9925225 100644
> --- a/arch/powerpc/xmon/xmon.c
> +++ b/arch/powerpc/xmon/xmon.c
> @@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (p4d_is_leaf(*p4dp)) {
> + if (p4d_leaf(*p4dp)) {
> format_pte(p4dp, p4d_val(*p4dp));
> return;
> }
> @@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (pud_is_leaf(*pudp)) {
> + if (pud_leaf(*pudp)) {
> format_pte(pudp, pud_val(*pudp));
> return;
> }
> @@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (pmd_is_leaf(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> format_pte(pmdp, pmd_val(*pmdp));
> return;
> }
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() with pXd_leaf()
2024-03-05 4:37 ` [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() " peterx
2024-03-05 17:31 ` Christophe Leroy
@ 2024-03-06 6:20 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:20 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Michael Ellerman, Nicholas Piggin,
Aneesh Kumar K.V, Naveen N. Rao
On Tue, Mar 05, 2024 at 12:37:42PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> They're the same macros underneath. Drop pXd_is_leaf(), instead always use
> pXd_leaf().
>
> At the meantime, instead of renames, drop the pXd_is_leaf() fallback
> definitions directly in arch/powerpc/include/asm/pgtable.h. because similar
> fallback macros for pXd_leaf() are already defined in
> include/linux/pgtable.h.
>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
> Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
> Cc: linuxppc-dev@lists.ozlabs.org
> Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/powerpc/include/asm/book3s/64/pgtable.h | 10 ++++----
> arch/powerpc/include/asm/pgtable.h | 24 --------------------
> arch/powerpc/kvm/book3s_64_mmu_radix.c | 12 +++++-----
> arch/powerpc/mm/book3s64/radix_pgtable.c | 14 ++++++------
> arch/powerpc/mm/pgtable.c | 6 ++---
> arch/powerpc/mm/pgtable_64.c | 6 ++---
> arch/powerpc/xmon/xmon.c | 6 ++---
> 7 files changed, 26 insertions(+), 52 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index d1318e8582ac..3e99e409774a 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1439,18 +1439,16 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> /*
> * Like pmd_huge() and pmd_large(), but works regardless of config options
> */
> -#define pmd_is_leaf pmd_is_leaf
> -#define pmd_leaf pmd_is_leaf
> +#define pmd_leaf pmd_leaf
> #define pmd_large pmd_leaf
> -static inline bool pmd_is_leaf(pmd_t pmd)
> +static inline bool pmd_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> }
>
> -#define pud_is_leaf pud_is_leaf
> -#define pud_leaf pud_is_leaf
> +#define pud_leaf pud_leaf
> #define pud_large pud_leaf
> -static inline bool pud_is_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> }
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index 5928b3c1458d..e6edf1cdbc5b 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -182,30 +182,6 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
> }
> #endif
>
> -#ifndef pmd_is_leaf
> -#define pmd_is_leaf pmd_is_leaf
> -static inline bool pmd_is_leaf(pmd_t pmd)
> -{
> - return false;
> -}
> -#endif
> -
> -#ifndef pud_is_leaf
> -#define pud_is_leaf pud_is_leaf
> -static inline bool pud_is_leaf(pud_t pud)
> -{
> - return false;
> -}
> -#endif
> -
> -#ifndef p4d_is_leaf
> -#define p4d_is_leaf p4d_is_leaf
> -static inline bool p4d_is_leaf(p4d_t p4d)
> -{
> - return false;
> -}
> -#endif
> -
> #define pmd_pgtable pmd_pgtable
> static inline pgtable_t pmd_pgtable(pmd_t pmd)
> {
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 4a1abb9f7c05..408d98f8a514 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -503,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
> for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
> if (!pmd_present(*p))
> continue;
> - if (pmd_is_leaf(*p)) {
> + if (pmd_leaf(*p)) {
> if (full) {
> pmd_clear(p);
> } else {
> @@ -532,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
> for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
> if (!pud_present(*p))
> continue;
> - if (pud_is_leaf(*p)) {
> + if (pud_leaf(*p)) {
> pud_clear(p);
> } else {
> pmd_t *pmd;
> @@ -635,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pud = pud_alloc_one(kvm->mm, gpa);
>
> pmd = NULL;
> - if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
> + if (pud && pud_present(*pud) && !pud_leaf(*pud))
> pmd = pmd_offset(pud, gpa);
> else if (level <= 1)
> new_pmd = kvmppc_pmd_alloc();
>
> - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd)))
> new_ptep = kvmppc_pte_alloc();
>
> /* Check if we might have been invalidated; let the guest retry if so */
> @@ -658,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pud = NULL;
> }
> pud = pud_offset(p4d, gpa);
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> unsigned long hgpa = gpa & PUD_MASK;
>
> /* Check if we raced and someone else has set the same thing */
> @@ -709,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
> new_pmd = NULL;
> }
> pmd = pmd_offset(pud, gpa);
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> unsigned long lgpa = gpa & PMD_MASK;
>
> /* Check if we raced and someone else has set the same thing */
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index c6a4ac766b2b..1f8db10693e3 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end,
> pudp = pud_alloc(&init_mm, p4dp, idx);
> if (!pudp)
> continue;
> - if (pud_is_leaf(*pudp)) {
> + if (pud_leaf(*pudp)) {
> ptep = (pte_t *)pudp;
> goto update_the_pte;
> }
> pmdp = pmd_alloc(&init_mm, pudp, idx);
> if (!pmdp)
> continue;
> - if (pmd_is_leaf(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> ptep = pmdp_ptep(pmdp);
> goto update_the_pte;
> }
> @@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
> if (!pmd_present(*pmd))
> continue;
>
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> if (IS_ALIGNED(addr, PMD_SIZE) &&
> IS_ALIGNED(next, PMD_SIZE)) {
> if (!direct)
> @@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
> if (!pud_present(*pud))
> continue;
>
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> if (!IS_ALIGNED(addr, PUD_SIZE) ||
> !IS_ALIGNED(next, PUD_SIZE)) {
> WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
> if (!p4d_present(*p4d))
> continue;
>
> - if (p4d_is_leaf(*p4d)) {
> + if (p4d_leaf(*p4d)) {
> if (!IS_ALIGNED(addr, P4D_SIZE) ||
> !IS_ALIGNED(next, P4D_SIZE)) {
> WARN_ONCE(1, "%s: unaligned range\n", __func__);
> @@ -1554,7 +1554,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
>
> int pud_clear_huge(pud_t *pud)
> {
> - if (pud_is_leaf(*pud)) {
> + if (pud_leaf(*pud)) {
> pud_clear(pud);
> return 1;
> }
> @@ -1601,7 +1601,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
>
> int pmd_clear_huge(pmd_t *pmd)
> {
> - if (pmd_is_leaf(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> pmd_clear(pmd);
> return 1;
> }
> diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
> index 549a440ed7f6..9e7ba9c3851f 100644
> --- a/arch/powerpc/mm/pgtable.c
> +++ b/arch/powerpc/mm/pgtable.c
> @@ -410,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> if (p4d_none(p4d))
> return NULL;
>
> - if (p4d_is_leaf(p4d)) {
> + if (p4d_leaf(p4d)) {
> ret_pte = (pte_t *)p4dp;
> goto out;
> }
> @@ -432,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> if (pud_none(pud))
> return NULL;
>
> - if (pud_is_leaf(pud)) {
> + if (pud_leaf(pud)) {
> ret_pte = (pte_t *)pudp;
> goto out;
> }
> @@ -471,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
> goto out;
> }
>
> - if (pmd_is_leaf(pmd)) {
> + if (pmd_leaf(pmd)) {
> ret_pte = (pte_t *)pmdp;
> goto out;
> }
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index 1b366526f4f2..386c6b06eab7 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
> /* 4 level page table */
> struct page *p4d_page(p4d_t p4d)
> {
> - if (p4d_is_leaf(p4d)) {
> + if (p4d_leaf(p4d)) {
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
> VM_WARN_ON(!p4d_huge(p4d));
> return pte_page(p4d_pte(p4d));
> @@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d)
>
> struct page *pud_page(pud_t pud)
> {
> - if (pud_is_leaf(pud)) {
> + if (pud_leaf(pud)) {
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
> VM_WARN_ON(!pud_huge(pud));
> return pte_page(pud_pte(pud));
> @@ -125,7 +125,7 @@ struct page *pud_page(pud_t pud)
> */
> struct page *pmd_page(pmd_t pmd)
> {
> - if (pmd_is_leaf(pmd)) {
> + if (pmd_leaf(pmd)) {
> /*
> * vmalloc_to_page may be called on any vmap address (not only
> * vmalloc), and it uses pmd_page() etc., when huge vmap is
> diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
> index b3b94cd37713..9669c9925225 100644
> --- a/arch/powerpc/xmon/xmon.c
> +++ b/arch/powerpc/xmon/xmon.c
> @@ -3342,7 +3342,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (p4d_is_leaf(*p4dp)) {
> + if (p4d_leaf(*p4dp)) {
> format_pte(p4dp, p4d_val(*p4dp));
> return;
> }
> @@ -3356,7 +3356,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (pud_is_leaf(*pudp)) {
> + if (pud_leaf(*pudp)) {
> format_pte(pudp, pud_val(*pudp));
> return;
> }
> @@ -3370,7 +3370,7 @@ static void show_pte(unsigned long addr)
> return;
> }
>
> - if (pmd_is_leaf(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> format_pte(pmdp, pmd_val(*pmdp));
> return;
> }
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 03/10] mm/x86: Replace p4d_large() with p4d_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
2024-03-05 4:37 ` [PATCH v3 01/10] mm/ppc: Define " peterx
2024-03-05 4:37 ` [PATCH v3 02/10] mm/ppc: Replace pXd_is_leaf() " peterx
@ 2024-03-05 4:37 ` peterx
2024-03-06 6:23 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf() peterx
` (6 subsequent siblings)
9 siblings, 1 reply; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen
From: Peter Xu <peterx@redhat.com>
p4d_large() is always defined as p4d_leaf(). Merge their usages. Chose
p4d_leaf() because p4d_leaf() is a global API, while p4d_large() is not.
Only x86 has p4d_leaf() defined as of now. So it also means after this
patch we removed all p4d_large() usages.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/x86/mm/fault.c | 4 ++--
arch/x86/mm/init_64.c | 2 +-
arch/x86/mm/pat/set_memory.c | 4 ++--
arch/x86/mm/pti.c | 2 +-
arch/x86/power/hibernate.c | 2 +-
arch/x86/xen/mmu_pv.c | 2 +-
6 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 679b09cfe241..8b69ce3f4115 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -368,7 +368,7 @@ static void dump_pagetable(unsigned long address)
goto bad;
pr_cont("P4D %lx ", p4d_val(*p4d));
- if (!p4d_present(*p4d) || p4d_large(*p4d))
+ if (!p4d_present(*p4d) || p4d_leaf(*p4d))
goto out;
pud = pud_offset(p4d, address);
@@ -1039,7 +1039,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
if (!p4d_present(*p4d))
return 0;
- if (p4d_large(*p4d))
+ if (p4d_leaf(*p4d))
return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
pud = pud_offset(p4d, address);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index ebdbcae48011..d691e7992a9a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1197,7 +1197,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
if (!p4d_present(*p4d))
continue;
- BUILD_BUG_ON(p4d_large(*p4d));
+ BUILD_BUG_ON(p4d_leaf(*p4d));
pud_base = pud_offset(p4d, 0);
remove_pud_table(pud_base, addr, next, altmap, direct);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index e9b448d1b1b7..5359a9c88099 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -676,7 +676,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
return NULL;
*level = PG_LEVEL_512G;
- if (p4d_large(*p4d) || !p4d_present(*p4d))
+ if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
pud = pud_offset(p4d, address);
@@ -739,7 +739,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
return NULL;
p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
+ if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, address);
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 669ba1c345b3..dc0a81f5f60e 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -206,7 +206,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
if (!p4d)
return NULL;
- BUILD_BUG_ON(p4d_large(*p4d) != 0);
+ BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
if (p4d_none(*p4d)) {
unsigned long new_pud_page = __get_free_page(gfp);
if (WARN_ON_ONCE(!new_pud_page))
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 6f955eb1e163..28153789f873 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -165,7 +165,7 @@ int relocate_restore_code(void)
pgd = (pgd_t *)__va(read_cr3_pa()) +
pgd_index(relocated_restore_code);
p4d = p4d_offset(pgd, relocated_restore_code);
- if (p4d_large(*p4d)) {
+ if (p4d_leaf(*p4d)) {
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
goto out;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e21974f2cf2d..12a43a4abebf 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1104,7 +1104,7 @@ static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
pud_t *pud_tbl;
int i;
- if (p4d_large(*p4d)) {
+ if (p4d_leaf(*p4d)) {
pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, P4D_SIZE);
return;
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 03/10] mm/x86: Replace p4d_large() with p4d_leaf()
2024-03-05 4:37 ` [PATCH v3 03/10] mm/x86: Replace p4d_large() with p4d_leaf() peterx
@ 2024-03-06 6:23 ` Mike Rapoport
0 siblings, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:23 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Thomas Gleixner, Ingo Molnar,
Borislav Petkov, Dave Hansen
On Tue, Mar 05, 2024 at 12:37:43PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> p4d_large() is always defined as p4d_leaf(). Merge their usages. Chose
> p4d_leaf() because p4d_leaf() is a global API, while p4d_large() is not.
>
> Only x86 has p4d_leaf() defined as of now. So it also means after this
> patch we removed all p4d_large() usages.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: x86@kernel.org
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/x86/mm/fault.c | 4 ++--
> arch/x86/mm/init_64.c | 2 +-
> arch/x86/mm/pat/set_memory.c | 4 ++--
> arch/x86/mm/pti.c | 2 +-
> arch/x86/power/hibernate.c | 2 +-
> arch/x86/xen/mmu_pv.c | 2 +-
> 6 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 679b09cfe241..8b69ce3f4115 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -368,7 +368,7 @@ static void dump_pagetable(unsigned long address)
> goto bad;
>
> pr_cont("P4D %lx ", p4d_val(*p4d));
> - if (!p4d_present(*p4d) || p4d_large(*p4d))
> + if (!p4d_present(*p4d) || p4d_leaf(*p4d))
> goto out;
>
> pud = pud_offset(p4d, address);
> @@ -1039,7 +1039,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
> if (!p4d_present(*p4d))
> return 0;
>
> - if (p4d_large(*p4d))
> + if (p4d_leaf(*p4d))
> return spurious_kernel_fault_check(error_code, (pte_t *) p4d);
>
> pud = pud_offset(p4d, address);
> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
> index ebdbcae48011..d691e7992a9a 100644
> --- a/arch/x86/mm/init_64.c
> +++ b/arch/x86/mm/init_64.c
> @@ -1197,7 +1197,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
> if (!p4d_present(*p4d))
> continue;
>
> - BUILD_BUG_ON(p4d_large(*p4d));
> + BUILD_BUG_ON(p4d_leaf(*p4d));
>
> pud_base = pud_offset(p4d, 0);
> remove_pud_table(pud_base, addr, next, altmap, direct);
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index e9b448d1b1b7..5359a9c88099 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -676,7 +676,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
> return NULL;
>
> *level = PG_LEVEL_512G;
> - if (p4d_large(*p4d) || !p4d_present(*p4d))
> + if (p4d_leaf(*p4d) || !p4d_present(*p4d))
> return (pte_t *)p4d;
>
> pud = pud_offset(p4d, address);
> @@ -739,7 +739,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
> return NULL;
>
> p4d = p4d_offset(pgd, address);
> - if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
> + if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
> return NULL;
>
> pud = pud_offset(p4d, address);
> diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
> index 669ba1c345b3..dc0a81f5f60e 100644
> --- a/arch/x86/mm/pti.c
> +++ b/arch/x86/mm/pti.c
> @@ -206,7 +206,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
> if (!p4d)
> return NULL;
>
> - BUILD_BUG_ON(p4d_large(*p4d) != 0);
> + BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
> if (p4d_none(*p4d)) {
> unsigned long new_pud_page = __get_free_page(gfp);
> if (WARN_ON_ONCE(!new_pud_page))
> diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
> index 6f955eb1e163..28153789f873 100644
> --- a/arch/x86/power/hibernate.c
> +++ b/arch/x86/power/hibernate.c
> @@ -165,7 +165,7 @@ int relocate_restore_code(void)
> pgd = (pgd_t *)__va(read_cr3_pa()) +
> pgd_index(relocated_restore_code);
> p4d = p4d_offset(pgd, relocated_restore_code);
> - if (p4d_large(*p4d)) {
> + if (p4d_leaf(*p4d)) {
> set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
> goto out;
> }
> diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
> index e21974f2cf2d..12a43a4abebf 100644
> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -1104,7 +1104,7 @@ static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
> pud_t *pud_tbl;
> int i;
>
> - if (p4d_large(*p4d)) {
> + if (p4d_leaf(*p4d)) {
> pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
> xen_free_ro_pages(pa, P4D_SIZE);
> return;
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (2 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 03/10] mm/x86: Replace p4d_large() with p4d_leaf() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-05 16:05 ` Jason Gunthorpe
2024-03-06 6:23 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 05/10] mm/x86: Drop two unnecessary pud_leaf() definitions peterx
` (5 subsequent siblings)
9 siblings, 2 replies; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen
From: Peter Xu <peterx@redhat.com>
pgd_leaf() is a global API while pgd_large() is not. Always use
the global pgd_leaf(), then drop pgd_large().
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/x86/include/asm/pgtable.h | 4 ++--
arch/x86/mm/pti.c | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 69ed0ea0641b..d6e993a5659f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1418,8 +1418,8 @@ static inline bool pgdp_maps_userspace(void *__ptr)
return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
}
-#define pgd_leaf pgd_large
-static inline int pgd_large(pgd_t pgd) { return 0; }
+#define pgd_leaf pgd_leaf
+static inline int pgd_leaf(pgd_t pgd) { return 0; }
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index dc0a81f5f60e..c17aab24c1b3 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -185,7 +185,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
}
- BUILD_BUG_ON(pgd_large(*pgd) != 0);
+ BUILD_BUG_ON(pgd_leaf(*pgd) != 0);
return p4d_offset(pgd, address);
}
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf()
2024-03-05 4:37 ` [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf() peterx
@ 2024-03-05 16:05 ` Jason Gunthorpe
2024-03-06 6:23 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Jason Gunthorpe @ 2024-03-05 16:05 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Yang Shi, Andrew Morton, linuxppc-dev,
Muchun Song, Thomas Gleixner, Ingo Molnar, Borislav Petkov,
Dave Hansen
On Tue, Mar 05, 2024 at 12:37:44PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> pgd_leaf() is a global API while pgd_large() is not. Always use
> the global pgd_leaf(), then drop pgd_large().
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: x86@kernel.org
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> arch/x86/include/asm/pgtable.h | 4 ++--
> arch/x86/mm/pti.c | 2 +-
> 2 files changed, 3 insertions(+), 3 deletions(-)
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Jason
^ permalink raw reply [flat|nested] 24+ messages in thread
* Re: [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf()
2024-03-05 4:37 ` [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf() peterx
2024-03-05 16:05 ` Jason Gunthorpe
@ 2024-03-06 6:23 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:23 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Thomas Gleixner, Ingo Molnar,
Borislav Petkov, Dave Hansen
On Tue, Mar 05, 2024 at 12:37:44PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> pgd_leaf() is a global API while pgd_large() is not. Always use
> the global pgd_leaf(), then drop pgd_large().
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: x86@kernel.org
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/x86/include/asm/pgtable.h | 4 ++--
> arch/x86/mm/pti.c | 2 +-
> 2 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 69ed0ea0641b..d6e993a5659f 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1418,8 +1418,8 @@ static inline bool pgdp_maps_userspace(void *__ptr)
> return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
> }
>
> -#define pgd_leaf pgd_large
> -static inline int pgd_large(pgd_t pgd) { return 0; }
> +#define pgd_leaf pgd_leaf
> +static inline int pgd_leaf(pgd_t pgd) { return 0; }
>
> #ifdef CONFIG_PAGE_TABLE_ISOLATION
> /*
> diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
> index dc0a81f5f60e..c17aab24c1b3 100644
> --- a/arch/x86/mm/pti.c
> +++ b/arch/x86/mm/pti.c
> @@ -185,7 +185,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
>
> set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
> }
> - BUILD_BUG_ON(pgd_large(*pgd) != 0);
> + BUILD_BUG_ON(pgd_leaf(*pgd) != 0);
>
> return p4d_offset(pgd, address);
> }
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 05/10] mm/x86: Drop two unnecessary pud_leaf() definitions
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (3 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 04/10] mm/x86: Replace pgd_large() with pgd_leaf() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-06 6:27 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 06/10] mm/kasan: Use pXd_leaf() in shadow_mapped() peterx
` (4 subsequent siblings)
9 siblings, 1 reply; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Thomas Gleixner, Ingo Molnar, Borislav Petkov, Dave Hansen
From: Peter Xu <peterx@redhat.com>
pud_leaf() has a fallback macro defined in include/linux/pgtable.h already.
Drop the extra two for x86.
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/x86/include/asm/pgtable.h | 1 -
include/asm-generic/pgtable-nopmd.h | 1 -
2 files changed, 2 deletions(-)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index d6e993a5659f..9db7a38a0e9f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1097,7 +1097,6 @@ static inline int pud_bad(pud_t pud)
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#else
-#define pud_leaf pud_large
static inline int pud_large(pud_t pud)
{
return 0;
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 8ffd64e7a24c..fa27e16bbe1b 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -31,7 +31,6 @@ static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
static inline int pud_user(pud_t pud) { return 0; }
-static inline int pud_leaf(pud_t pud) { return 0; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 05/10] mm/x86: Drop two unnecessary pud_leaf() definitions
2024-03-05 4:37 ` [PATCH v3 05/10] mm/x86: Drop two unnecessary pud_leaf() definitions peterx
@ 2024-03-06 6:27 ` Mike Rapoport
0 siblings, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:27 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Thomas Gleixner, Ingo Molnar,
Borislav Petkov, Dave Hansen
On Tue, Mar 05, 2024 at 12:37:45PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> pud_leaf() has a fallback macro defined in include/linux/pgtable.h already.
> Drop the extra two for x86.
>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Cc: x86@kernel.org
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Acked-by: Thomas Gleixner <tglx@linutronix.de>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/x86/include/asm/pgtable.h | 1 -
> include/asm-generic/pgtable-nopmd.h | 1 -
> 2 files changed, 2 deletions(-)
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index d6e993a5659f..9db7a38a0e9f 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -1097,7 +1097,6 @@ static inline int pud_bad(pud_t pud)
> return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
> }
> #else
> -#define pud_leaf pud_large
> static inline int pud_large(pud_t pud)
> {
> return 0;
> diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
> index 8ffd64e7a24c..fa27e16bbe1b 100644
> --- a/include/asm-generic/pgtable-nopmd.h
> +++ b/include/asm-generic/pgtable-nopmd.h
> @@ -31,7 +31,6 @@ static inline int pud_none(pud_t pud) { return 0; }
> static inline int pud_bad(pud_t pud) { return 0; }
> static inline int pud_present(pud_t pud) { return 1; }
> static inline int pud_user(pud_t pud) { return 0; }
> -static inline int pud_leaf(pud_t pud) { return 0; }
> static inline void pud_clear(pud_t *pud) { }
> #define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
>
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 06/10] mm/kasan: Use pXd_leaf() in shadow_mapped()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (4 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 05/10] mm/x86: Drop two unnecessary pud_leaf() definitions peterx
@ 2024-03-05 4:37 ` peterx
2024-03-06 6:28 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 07/10] mm/treewide: Replace pmd_large() with pmd_leaf() peterx
` (3 subsequent siblings)
9 siblings, 1 reply; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song,
Andrey Ryabinin, Alexander Potapenko, Andrey Konovalov,
Dmitry Vyukov, Vincenzo Frascino, kasan-dev
From: Peter Xu <peterx@redhat.com>
There is an old trick in shadow_mapped() to use pXd_bad() to detect huge
pages. After commit 93fab1b22ef7 ("mm: add generic p?d_leaf() macros") we
have a global API for huge mappings. Use that to replace the trick.
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: kasan-dev@googlegroups.com
Signed-off-by: Peter Xu <peterx@redhat.com>
---
mm/kasan/shadow.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 9ef84f31833f..d6210ca48dda 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -199,19 +199,12 @@ static bool shadow_mapped(unsigned long addr)
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return false;
-
- /*
- * We can't use pud_large() or pud_huge(), the first one is
- * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
- * pud_bad(), if pud is bad then it's bad because it's huge.
- */
- if (pud_bad(*pud))
+ if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
return false;
-
- if (pmd_bad(*pmd))
+ if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr);
return !pte_none(ptep_get(pte));
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 06/10] mm/kasan: Use pXd_leaf() in shadow_mapped()
2024-03-05 4:37 ` [PATCH v3 06/10] mm/kasan: Use pXd_leaf() in shadow_mapped() peterx
@ 2024-03-06 6:28 ` Mike Rapoport
0 siblings, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:28 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song, Andrey Ryabinin, Alexander Potapenko,
Andrey Konovalov, Dmitry Vyukov, Vincenzo Frascino, kasan-dev
On Tue, Mar 05, 2024 at 12:37:46PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> There is an old trick in shadow_mapped() to use pXd_bad() to detect huge
> pages. After commit 93fab1b22ef7 ("mm: add generic p?d_leaf() macros") we
> have a global API for huge mappings. Use that to replace the trick.
>
> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> Cc: Alexander Potapenko <glider@google.com>
> Cc: Andrey Konovalov <andreyknvl@gmail.com>
> Cc: Dmitry Vyukov <dvyukov@google.com>
> Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
> Cc: kasan-dev@googlegroups.com
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> mm/kasan/shadow.c | 11 ++---------
> 1 file changed, 2 insertions(+), 9 deletions(-)
>
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index 9ef84f31833f..d6210ca48dda 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -199,19 +199,12 @@ static bool shadow_mapped(unsigned long addr)
> pud = pud_offset(p4d, addr);
> if (pud_none(*pud))
> return false;
> -
> - /*
> - * We can't use pud_large() or pud_huge(), the first one is
> - * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
> - * pud_bad(), if pud is bad then it's bad because it's huge.
> - */
> - if (pud_bad(*pud))
> + if (pud_leaf(*pud))
> return true;
> pmd = pmd_offset(pud, addr);
> if (pmd_none(*pmd))
> return false;
> -
> - if (pmd_bad(*pmd))
> + if (pmd_leaf(*pmd))
> return true;
> pte = pte_offset_kernel(pmd, addr);
> return !pte_none(ptep_get(pte));
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 07/10] mm/treewide: Replace pmd_large() with pmd_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (5 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 06/10] mm/kasan: Use pXd_leaf() in shadow_mapped() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-06 6:31 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 08/10] mm/treewide: Replace pud_large() with pud_leaf() peterx
` (2 subsequent siblings)
9 siblings, 1 reply; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song
From: Peter Xu <peterx@redhat.com>
pmd_large() is always defined as pmd_leaf(). Merge their usages. Chose
pmd_leaf() because pmd_leaf() is a global API, while pmd_large() is not.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/arm/mm/dump.c | 4 ++--
arch/powerpc/mm/book3s64/pgtable.c | 2 +-
arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +-
arch/powerpc/mm/pgtable_64.c | 2 +-
arch/s390/boot/vmem.c | 2 +-
arch/s390/include/asm/pgtable.h | 8 ++++----
arch/s390/mm/gmap.c | 12 ++++++------
arch/s390/mm/hugetlbpage.c | 2 +-
arch/s390/mm/pageattr.c | 2 +-
arch/s390/mm/pgtable.c | 6 +++---
arch/s390/mm/vmem.c | 6 +++---
arch/sparc/mm/init_64.c | 4 ++--
arch/x86/boot/compressed/ident_map_64.c | 2 +-
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/mm/fault.c | 8 ++++----
arch/x86/mm/init_32.c | 2 +-
arch/x86/mm/init_64.c | 8 ++++----
arch/x86/mm/kasan_init_64.c | 2 +-
arch/x86/mm/mem_encrypt_identity.c | 4 ++--
arch/x86/mm/pat/set_memory.c | 4 ++--
arch/x86/mm/pgtable.c | 2 +-
arch/x86/mm/pti.c | 4 ++--
arch/x86/power/hibernate.c | 2 +-
arch/x86/xen/mmu_pv.c | 4 ++--
drivers/misc/sgi-gru/grufault.c | 2 +-
25 files changed, 49 insertions(+), 49 deletions(-)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index a9381095ab36..cd032522d902 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -349,12 +349,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
domain = get_domain_name(pmd);
- if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
+ if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd))
note_page(st, addr, 4, pmd_val(*pmd), domain);
else
walk_pte(st, pmd, addr, domain);
- if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
+ if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
addr += SECTION_SIZE;
pmd++;
domain = get_domain_name(pmd);
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 3438ab72c346..45f526547b27 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_large(pmd)));
+ WARN_ON(!(pmd_leaf(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 1f8db10693e3..5cc4008329be 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next)
{
- int large = pmd_large(*pmdp);
+ int large = pmd_leaf(*pmdp);
if (large)
vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 386c6b06eab7..9b99113cb51a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd)
* enabled so these checks can't be used.
*/
if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+ VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index e3a4500a5a75..348ab02b1028 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -333,7 +333,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
}
pte = boot_pte_alloc();
pmd_populate(&init_mm, pmd, pte);
- } else if (pmd_large(*pmd)) {
+ } else if (pmd_leaf(*pmd)) {
continue;
}
pgtable_pte_populate(pmd, addr, next, mode);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4b91e65c85d9..431d03d5116b 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -721,7 +721,7 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
- if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
return 1;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
@@ -820,8 +820,8 @@ static inline int pte_protnone(pte_t pte)
static inline int pmd_protnone(pmd_t pmd)
{
- /* pmd_large(pmd) implies pmd_present(pmd) */
- return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+ /* pmd_leaf(pmd) implies pmd_present(pmd) */
+ return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
}
#endif
@@ -1385,7 +1385,7 @@ static inline unsigned long pmd_deref(pmd_t pmd)
unsigned long origin_mask;
origin_mask = _SEGMENT_ENTRY_ORIGIN;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pmd_val(pmd) & origin_mask);
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 4d2674f89322..b78ded9d3bf7 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -603,7 +603,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
/* Are we allowed to use huge pages? */
- if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
+ if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
@@ -615,7 +615,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
rc = radix_tree_insert(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT, table);
if (!rc) {
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
*table = (pmd_val(*pmd) &
_SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
| _SEGMENT_ENTRY_GMAP_UC;
@@ -945,7 +945,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
}
/* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
- if (!pmd_large(*pmdp))
+ if (!pmd_leaf(*pmdp))
spin_unlock(&gmap->guest_table_lock);
return pmdp;
}
@@ -957,7 +957,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
*/
static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
{
- if (pmd_large(*pmdp))
+ if (pmd_leaf(*pmdp))
spin_unlock(&gmap->guest_table_lock);
}
@@ -1068,7 +1068,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
rc = -EAGAIN;
pmdp = gmap_pmd_op_walk(gmap, gaddr);
if (pmdp) {
- if (!pmd_large(*pmdp)) {
+ if (!pmd_leaf(*pmdp)) {
rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
bits);
if (!rc) {
@@ -2500,7 +2500,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
if (!pmdp)
return;
- if (pmd_large(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
bitmap_fill(bitmap, _PAGE_ENTRIES);
} else {
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 297a6d897d5a..1ccb5b40fe92 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -235,7 +235,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
int pmd_huge(pmd_t pmd)
{
- return pmd_large(pmd);
+ return pmd_leaf(pmd);
}
int pud_huge(pud_t pud)
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 631e3a4ee2de..9f55d5a3210c 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -185,7 +185,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
if (pmd_none(*pmdp))
return -EINVAL;
next = pmd_addr_end(addr, end);
- if (pmd_large(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
need_split = !!(flags & SET_MEMORY_4K);
need_split |= !!(addr & ~PMD_MASK);
need_split |= !!(addr + PMD_SIZE > next);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b71432b15d66..9ac66304d776 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -827,7 +827,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
return key ? -EFAULT : 0;
}
- if (pmd_large(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
/*
@@ -938,7 +938,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
return 0;
}
- if (pmd_large(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
cc = page_reset_referenced(paddr);
@@ -1002,7 +1002,7 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
return 0;
}
- if (pmd_large(*pmdp)) {
+ if (pmd_leaf(*pmdp)) {
paddr = pmd_val(*pmdp) & HPAGE_MASK;
paddr |= addr & ~HPAGE_MASK;
*key = page_get_storage_key(paddr);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index eb100479f7be..afe5edf2a604 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -236,7 +236,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
if (!add) {
if (pmd_none(*pmd))
continue;
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
@@ -281,7 +281,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
if (!pte)
goto out;
pmd_populate(&init_mm, pmd, pte);
- } else if (pmd_large(*pmd)) {
+ } else if (pmd_leaf(*pmd)) {
if (!direct)
vmemmap_use_sub_pmd(addr, next);
continue;
@@ -610,7 +610,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
if (!pte)
goto out;
pmd_populate(&init_mm, pmd, pte);
- } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
+ } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
goto out;
}
ptep = pte_offset_kernel(pmd, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f83017992eaa..5e067b6a4464 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1672,7 +1672,7 @@ bool kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd))
return false;
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
@@ -2968,7 +2968,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm;
pmd_t entry = *pmd;
- if (!pmd_large(entry) || !pmd_young(entry))
+ if (!pmd_leaf(entry) || !pmd_young(entry))
return;
pte = pmd_val(entry);
diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
index d040080d7edb..71c6e2fdcec7 100644
--- a/arch/x86/boot/compressed/ident_map_64.c
+++ b/arch/x86/boot/compressed/ident_map_64.c
@@ -284,7 +284,7 @@ static int set_clr_page_flags(struct x86_mapping_info *info,
pudp = pud_offset(p4dp, address);
pmdp = pmd_offset(pudp, address);
- if (pmd_large(*pmdp))
+ if (pmd_leaf(*pmdp))
ptep = split_large_pmd(info, pmdp, address);
else
ptep = pte_offset_kernel(pmdp, address);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2d6cdeab1f8a..c15123248c52 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3135,7 +3135,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
if (pmd_none(pmd) || !pmd_present(pmd))
goto out;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
level = PG_LEVEL_2M;
out:
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8b69ce3f4115..09417f950343 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -250,7 +250,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (!pmd_k)
return -1;
- if (pmd_large(*pmd_k))
+ if (pmd_leaf(*pmd_k))
return 0;
pte_k = pte_offset_kernel(pmd_k, address);
@@ -319,7 +319,7 @@ static void dump_pagetable(unsigned long address)
* And let's rather not kmap-atomic the pte, just in case
* it's allocated already:
*/
- if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
+ if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
@@ -384,7 +384,7 @@ static void dump_pagetable(unsigned long address)
goto bad;
pr_cont("PMD %lx ", pmd_val(*pmd));
- if (!pmd_present(*pmd) || pmd_large(*pmd))
+ if (!pmd_present(*pmd) || pmd_leaf(*pmd))
goto out;
pte = pte_offset_kernel(pmd, address);
@@ -1053,7 +1053,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
if (!pmd_present(*pmd))
return 0;
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
pte = pte_offset_kernel(pmd, address);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 5c736b707cae..ac41b1e0940d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -463,7 +463,7 @@ void __init native_pagetable_init(void)
break;
/* should not be large page here */
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
pfn, pmd, __pa(pmd));
BUG_ON(1);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d691e7992a9a..2c5490e58f41 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -530,7 +530,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
}
if (!pmd_none(*pmd)) {
- if (!pmd_large(*pmd)) {
+ if (!pmd_leaf(*pmd)) {
spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd);
paddr_last = phys_pte_init(pte, paddr,
@@ -1114,7 +1114,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
if (!pmd_present(*pmd))
continue;
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
if (!direct)
@@ -1520,9 +1520,9 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
- int large = pmd_large(*pmd);
+ int large = pmd_leaf(*pmd);
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next);
vmemmap_use_sub_pmd(addr, next);
}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0302491d799d..f41d26bc9161 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -95,7 +95,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
- if (!pmd_large(*pmd))
+ if (!pmd_leaf(*pmd))
kasan_populate_pmd(pmd, addr, next, nid);
} while (pmd++, addr = next, addr != end);
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index d73aeb16417f..bca4fea80579 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -161,7 +161,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
return;
pmd = pmd_offset(pud, ppd->vaddr);
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return;
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
@@ -185,7 +185,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
}
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
return;
pte = pte_offset_kernel(pmd, ppd->vaddr);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 5359a9c88099..b4037fe08eed 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -692,7 +692,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
return NULL;
*level = PG_LEVEL_2M;
- if (pmd_large(*pmd) || !pmd_present(*pmd))
+ if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
*level = PG_LEVEL_4K;
@@ -1229,7 +1229,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
* Try to unmap in 2M chunks.
*/
while (end - start >= PMD_SIZE) {
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
pmd_clear(pmd);
else
__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 0cbc1b8e8e3d..d05dd86ceb41 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -792,7 +792,7 @@ int pud_clear_huge(pud_t *pud)
*/
int pmd_clear_huge(pmd_t *pmd)
{
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index c17aab24c1b3..0442e8f479a6 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -252,7 +252,7 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
return NULL;
/* We can't do anything sensible if we hit a large mapping. */
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
WARN_ON(1);
return NULL;
}
@@ -341,7 +341,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
continue;
}
- if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+ if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
target_pmd = pti_user_pagetable_walk_pmd(addr);
if (WARN_ON(!target_pmd))
return;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 28153789f873..277eaf610e0e 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -175,7 +175,7 @@ int relocate_restore_code(void)
goto out;
}
pmd = pmd_offset(pud, relocated_restore_code);
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
goto out;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 12a43a4abebf..dde551bbd231 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1059,7 +1059,7 @@ static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
pte_t *pte_tbl;
int i;
- if (pmd_large(*pmd)) {
+ if (pmd_leaf(*pmd)) {
pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PMD_SIZE);
return;
@@ -1871,7 +1871,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
if (!pmd_present(pmd))
return 0;
pa = pmd_val(pmd) & PTE_PFN_MASK;
- if (pmd_large(pmd))
+ if (pmd_leaf(pmd))
return pa + (vaddr & ~PMD_MASK);
pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 629edb6486de..3557d78ee47a 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
if (unlikely(pmd_none(*pmdp)))
goto err;
#ifdef CONFIG_X86_64
- if (unlikely(pmd_large(*pmdp)))
+ if (unlikely(pmd_leaf(*pmdp)))
pte = ptep_get((pte_t *)pmdp);
else
#endif
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 07/10] mm/treewide: Replace pmd_large() with pmd_leaf()
2024-03-05 4:37 ` [PATCH v3 07/10] mm/treewide: Replace pmd_large() with pmd_leaf() peterx
@ 2024-03-06 6:31 ` Mike Rapoport
0 siblings, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:31 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song
On Tue, Mar 05, 2024 at 12:37:47PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> pmd_large() is always defined as pmd_leaf(). Merge their usages. Chose
> pmd_leaf() because pmd_leaf() is a global API, while pmd_large() is not.
>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> arch/arm/mm/dump.c | 4 ++--
> arch/powerpc/mm/book3s64/pgtable.c | 2 +-
> arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +-
> arch/powerpc/mm/pgtable_64.c | 2 +-
> arch/s390/boot/vmem.c | 2 +-
> arch/s390/include/asm/pgtable.h | 8 ++++----
> arch/s390/mm/gmap.c | 12 ++++++------
> arch/s390/mm/hugetlbpage.c | 2 +-
> arch/s390/mm/pageattr.c | 2 +-
> arch/s390/mm/pgtable.c | 6 +++---
> arch/s390/mm/vmem.c | 6 +++---
> arch/sparc/mm/init_64.c | 4 ++--
> arch/x86/boot/compressed/ident_map_64.c | 2 +-
> arch/x86/kvm/mmu/mmu.c | 2 +-
> arch/x86/mm/fault.c | 8 ++++----
> arch/x86/mm/init_32.c | 2 +-
> arch/x86/mm/init_64.c | 8 ++++----
> arch/x86/mm/kasan_init_64.c | 2 +-
> arch/x86/mm/mem_encrypt_identity.c | 4 ++--
> arch/x86/mm/pat/set_memory.c | 4 ++--
> arch/x86/mm/pgtable.c | 2 +-
> arch/x86/mm/pti.c | 4 ++--
> arch/x86/power/hibernate.c | 2 +-
> arch/x86/xen/mmu_pv.c | 4 ++--
> drivers/misc/sgi-gru/grufault.c | 2 +-
> 25 files changed, 49 insertions(+), 49 deletions(-)
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
>
> diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
> index a9381095ab36..cd032522d902 100644
> --- a/arch/arm/mm/dump.c
> +++ b/arch/arm/mm/dump.c
> @@ -349,12 +349,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
> for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
> addr = start + i * PMD_SIZE;
> domain = get_domain_name(pmd);
> - if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
> + if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd))
> note_page(st, addr, 4, pmd_val(*pmd), domain);
> else
> walk_pte(st, pmd, addr, domain);
>
> - if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
> + if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
> addr += SECTION_SIZE;
> pmd++;
> domain = get_domain_name(pmd);
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 3438ab72c346..45f526547b27 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
>
> WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
> assert_spin_locked(pmd_lockptr(mm, pmdp));
> - WARN_ON(!(pmd_large(pmd)));
> + WARN_ON(!(pmd_leaf(pmd)));
> #endif
> trace_hugepage_set_pmd(addr, pmd_val(pmd));
> return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
> diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
> index 1f8db10693e3..5cc4008329be 100644
> --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
> +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
> @@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
> int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
> unsigned long addr, unsigned long next)
> {
> - int large = pmd_large(*pmdp);
> + int large = pmd_leaf(*pmdp);
>
> if (large)
> vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
> index 386c6b06eab7..9b99113cb51a 100644
> --- a/arch/powerpc/mm/pgtable_64.c
> +++ b/arch/powerpc/mm/pgtable_64.c
> @@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd)
> * enabled so these checks can't be used.
> */
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
> - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
> + VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
> return pte_page(pmd_pte(pmd));
> }
> return virt_to_page(pmd_page_vaddr(pmd));
> diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
> index e3a4500a5a75..348ab02b1028 100644
> --- a/arch/s390/boot/vmem.c
> +++ b/arch/s390/boot/vmem.c
> @@ -333,7 +333,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
> }
> pte = boot_pte_alloc();
> pmd_populate(&init_mm, pmd, pte);
> - } else if (pmd_large(*pmd)) {
> + } else if (pmd_leaf(*pmd)) {
> continue;
> }
> pgtable_pte_populate(pmd, addr, next, mode);
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 4b91e65c85d9..431d03d5116b 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -721,7 +721,7 @@ static inline int pmd_large(pmd_t pmd)
>
> static inline int pmd_bad(pmd_t pmd)
> {
> - if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
> + if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
> return 1;
> return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
> }
> @@ -820,8 +820,8 @@ static inline int pte_protnone(pte_t pte)
>
> static inline int pmd_protnone(pmd_t pmd)
> {
> - /* pmd_large(pmd) implies pmd_present(pmd) */
> - return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
> + /* pmd_leaf(pmd) implies pmd_present(pmd) */
> + return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
> }
> #endif
>
> @@ -1385,7 +1385,7 @@ static inline unsigned long pmd_deref(pmd_t pmd)
> unsigned long origin_mask;
>
> origin_mask = _SEGMENT_ENTRY_ORIGIN;
> - if (pmd_large(pmd))
> + if (pmd_leaf(pmd))
> origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
> return (unsigned long)__va(pmd_val(pmd) & origin_mask);
> }
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 4d2674f89322..b78ded9d3bf7 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -603,7 +603,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
> pmd = pmd_offset(pud, vmaddr);
> VM_BUG_ON(pmd_none(*pmd));
> /* Are we allowed to use huge pages? */
> - if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
> + if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
> return -EFAULT;
> /* Link gmap segment table entry location to page table. */
> rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
> @@ -615,7 +615,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
> rc = radix_tree_insert(&gmap->host_to_guest,
> vmaddr >> PMD_SHIFT, table);
> if (!rc) {
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> *table = (pmd_val(*pmd) &
> _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
> | _SEGMENT_ENTRY_GMAP_UC;
> @@ -945,7 +945,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
> }
>
> /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
> - if (!pmd_large(*pmdp))
> + if (!pmd_leaf(*pmdp))
> spin_unlock(&gmap->guest_table_lock);
> return pmdp;
> }
> @@ -957,7 +957,7 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
> */
> static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
> {
> - if (pmd_large(*pmdp))
> + if (pmd_leaf(*pmdp))
> spin_unlock(&gmap->guest_table_lock);
> }
>
> @@ -1068,7 +1068,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
> rc = -EAGAIN;
> pmdp = gmap_pmd_op_walk(gmap, gaddr);
> if (pmdp) {
> - if (!pmd_large(*pmdp)) {
> + if (!pmd_leaf(*pmdp)) {
> rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
> bits);
> if (!rc) {
> @@ -2500,7 +2500,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
> if (!pmdp)
> return;
>
> - if (pmd_large(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
> bitmap_fill(bitmap, _PAGE_ENTRIES);
> } else {
> diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
> index 297a6d897d5a..1ccb5b40fe92 100644
> --- a/arch/s390/mm/hugetlbpage.c
> +++ b/arch/s390/mm/hugetlbpage.c
> @@ -235,7 +235,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
>
> int pmd_huge(pmd_t pmd)
> {
> - return pmd_large(pmd);
> + return pmd_leaf(pmd);
> }
>
> int pud_huge(pud_t pud)
> diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
> index 631e3a4ee2de..9f55d5a3210c 100644
> --- a/arch/s390/mm/pageattr.c
> +++ b/arch/s390/mm/pageattr.c
> @@ -185,7 +185,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
> if (pmd_none(*pmdp))
> return -EINVAL;
> next = pmd_addr_end(addr, end);
> - if (pmd_large(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> need_split = !!(flags & SET_MEMORY_4K);
> need_split |= !!(addr & ~PMD_MASK);
> need_split |= !!(addr + PMD_SIZE > next);
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index b71432b15d66..9ac66304d776 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -827,7 +827,7 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
> return key ? -EFAULT : 0;
> }
>
> - if (pmd_large(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> paddr = pmd_val(*pmdp) & HPAGE_MASK;
> paddr |= addr & ~HPAGE_MASK;
> /*
> @@ -938,7 +938,7 @@ int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
> return 0;
> }
>
> - if (pmd_large(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> paddr = pmd_val(*pmdp) & HPAGE_MASK;
> paddr |= addr & ~HPAGE_MASK;
> cc = page_reset_referenced(paddr);
> @@ -1002,7 +1002,7 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
> return 0;
> }
>
> - if (pmd_large(*pmdp)) {
> + if (pmd_leaf(*pmdp)) {
> paddr = pmd_val(*pmdp) & HPAGE_MASK;
> paddr |= addr & ~HPAGE_MASK;
> *key = page_get_storage_key(paddr);
> diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
> index eb100479f7be..afe5edf2a604 100644
> --- a/arch/s390/mm/vmem.c
> +++ b/arch/s390/mm/vmem.c
> @@ -236,7 +236,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
> if (!add) {
> if (pmd_none(*pmd))
> continue;
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> if (IS_ALIGNED(addr, PMD_SIZE) &&
> IS_ALIGNED(next, PMD_SIZE)) {
> if (!direct)
> @@ -281,7 +281,7 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
> if (!pte)
> goto out;
> pmd_populate(&init_mm, pmd, pte);
> - } else if (pmd_large(*pmd)) {
> + } else if (pmd_leaf(*pmd)) {
> if (!direct)
> vmemmap_use_sub_pmd(addr, next);
> continue;
> @@ -610,7 +610,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
> if (!pte)
> goto out;
> pmd_populate(&init_mm, pmd, pte);
> - } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
> + } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
> goto out;
> }
> ptep = pte_offset_kernel(pmd, addr);
> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
> index f83017992eaa..5e067b6a4464 100644
> --- a/arch/sparc/mm/init_64.c
> +++ b/arch/sparc/mm/init_64.c
> @@ -1672,7 +1672,7 @@ bool kern_addr_valid(unsigned long addr)
> if (pmd_none(*pmd))
> return false;
>
> - if (pmd_large(*pmd))
> + if (pmd_leaf(*pmd))
> return pfn_valid(pmd_pfn(*pmd));
>
> pte = pte_offset_kernel(pmd, addr);
> @@ -2968,7 +2968,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
> struct mm_struct *mm;
> pmd_t entry = *pmd;
>
> - if (!pmd_large(entry) || !pmd_young(entry))
> + if (!pmd_leaf(entry) || !pmd_young(entry))
> return;
>
> pte = pmd_val(entry);
> diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c
> index d040080d7edb..71c6e2fdcec7 100644
> --- a/arch/x86/boot/compressed/ident_map_64.c
> +++ b/arch/x86/boot/compressed/ident_map_64.c
> @@ -284,7 +284,7 @@ static int set_clr_page_flags(struct x86_mapping_info *info,
> pudp = pud_offset(p4dp, address);
> pmdp = pmd_offset(pudp, address);
>
> - if (pmd_large(*pmdp))
> + if (pmd_leaf(*pmdp))
> ptep = split_large_pmd(info, pmdp, address);
> else
> ptep = pte_offset_kernel(pmdp, address);
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 2d6cdeab1f8a..c15123248c52 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3135,7 +3135,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
> if (pmd_none(pmd) || !pmd_present(pmd))
> goto out;
>
> - if (pmd_large(pmd))
> + if (pmd_leaf(pmd))
> level = PG_LEVEL_2M;
>
> out:
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 8b69ce3f4115..09417f950343 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -250,7 +250,7 @@ static noinline int vmalloc_fault(unsigned long address)
> if (!pmd_k)
> return -1;
>
> - if (pmd_large(*pmd_k))
> + if (pmd_leaf(*pmd_k))
> return 0;
>
> pte_k = pte_offset_kernel(pmd_k, address);
> @@ -319,7 +319,7 @@ static void dump_pagetable(unsigned long address)
> * And let's rather not kmap-atomic the pte, just in case
> * it's allocated already:
> */
> - if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
> + if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
> goto out;
>
> pte = pte_offset_kernel(pmd, address);
> @@ -384,7 +384,7 @@ static void dump_pagetable(unsigned long address)
> goto bad;
>
> pr_cont("PMD %lx ", pmd_val(*pmd));
> - if (!pmd_present(*pmd) || pmd_large(*pmd))
> + if (!pmd_present(*pmd) || pmd_leaf(*pmd))
> goto out;
>
> pte = pte_offset_kernel(pmd, address);
> @@ -1053,7 +1053,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
> if (!pmd_present(*pmd))
> return 0;
>
> - if (pmd_large(*pmd))
> + if (pmd_leaf(*pmd))
> return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
>
> pte = pte_offset_kernel(pmd, address);
> diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
> index 5c736b707cae..ac41b1e0940d 100644
> --- a/arch/x86/mm/init_32.c
> +++ b/arch/x86/mm/init_32.c
> @@ -463,7 +463,7 @@ void __init native_pagetable_init(void)
> break;
>
> /* should not be large page here */
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
> pfn, pmd, __pa(pmd));
> BUG_ON(1);
> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
> index d691e7992a9a..2c5490e58f41 100644
> --- a/arch/x86/mm/init_64.c
> +++ b/arch/x86/mm/init_64.c
> @@ -530,7 +530,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
> }
>
> if (!pmd_none(*pmd)) {
> - if (!pmd_large(*pmd)) {
> + if (!pmd_leaf(*pmd)) {
> spin_lock(&init_mm.page_table_lock);
> pte = (pte_t *)pmd_page_vaddr(*pmd);
> paddr_last = phys_pte_init(pte, paddr,
> @@ -1114,7 +1114,7 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
> if (!pmd_present(*pmd))
> continue;
>
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> if (IS_ALIGNED(addr, PMD_SIZE) &&
> IS_ALIGNED(next, PMD_SIZE)) {
> if (!direct)
> @@ -1520,9 +1520,9 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
> int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
> unsigned long addr, unsigned long next)
> {
> - int large = pmd_large(*pmd);
> + int large = pmd_leaf(*pmd);
>
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> vmemmap_verify((pte_t *)pmd, node, addr, next);
> vmemmap_use_sub_pmd(addr, next);
> }
> diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
> index 0302491d799d..f41d26bc9161 100644
> --- a/arch/x86/mm/kasan_init_64.c
> +++ b/arch/x86/mm/kasan_init_64.c
> @@ -95,7 +95,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
> pmd = pmd_offset(pud, addr);
> do {
> next = pmd_addr_end(addr, end);
> - if (!pmd_large(*pmd))
> + if (!pmd_leaf(*pmd))
> kasan_populate_pmd(pmd, addr, next, nid);
> } while (pmd++, addr = next, addr != end);
> }
> diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
> index d73aeb16417f..bca4fea80579 100644
> --- a/arch/x86/mm/mem_encrypt_identity.c
> +++ b/arch/x86/mm/mem_encrypt_identity.c
> @@ -161,7 +161,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
> return;
>
> pmd = pmd_offset(pud, ppd->vaddr);
> - if (pmd_large(*pmd))
> + if (pmd_leaf(*pmd))
> return;
>
> set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
> @@ -185,7 +185,7 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
> set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
> }
>
> - if (pmd_large(*pmd))
> + if (pmd_leaf(*pmd))
> return;
>
> pte = pte_offset_kernel(pmd, ppd->vaddr);
> diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
> index 5359a9c88099..b4037fe08eed 100644
> --- a/arch/x86/mm/pat/set_memory.c
> +++ b/arch/x86/mm/pat/set_memory.c
> @@ -692,7 +692,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
> return NULL;
>
> *level = PG_LEVEL_2M;
> - if (pmd_large(*pmd) || !pmd_present(*pmd))
> + if (pmd_leaf(*pmd) || !pmd_present(*pmd))
> return (pte_t *)pmd;
>
> *level = PG_LEVEL_4K;
> @@ -1229,7 +1229,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
> * Try to unmap in 2M chunks.
> */
> while (end - start >= PMD_SIZE) {
> - if (pmd_large(*pmd))
> + if (pmd_leaf(*pmd))
> pmd_clear(pmd);
> else
> __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
> diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
> index 0cbc1b8e8e3d..d05dd86ceb41 100644
> --- a/arch/x86/mm/pgtable.c
> +++ b/arch/x86/mm/pgtable.c
> @@ -792,7 +792,7 @@ int pud_clear_huge(pud_t *pud)
> */
> int pmd_clear_huge(pmd_t *pmd)
> {
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> pmd_clear(pmd);
> return 1;
> }
> diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
> index c17aab24c1b3..0442e8f479a6 100644
> --- a/arch/x86/mm/pti.c
> +++ b/arch/x86/mm/pti.c
> @@ -252,7 +252,7 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
> return NULL;
>
> /* We can't do anything sensible if we hit a large mapping. */
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> WARN_ON(1);
> return NULL;
> }
> @@ -341,7 +341,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
> continue;
> }
>
> - if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
> + if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
> target_pmd = pti_user_pagetable_walk_pmd(addr);
> if (WARN_ON(!target_pmd))
> return;
> diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
> index 28153789f873..277eaf610e0e 100644
> --- a/arch/x86/power/hibernate.c
> +++ b/arch/x86/power/hibernate.c
> @@ -175,7 +175,7 @@ int relocate_restore_code(void)
> goto out;
> }
> pmd = pmd_offset(pud, relocated_restore_code);
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
> goto out;
> }
> diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
> index 12a43a4abebf..dde551bbd231 100644
> --- a/arch/x86/xen/mmu_pv.c
> +++ b/arch/x86/xen/mmu_pv.c
> @@ -1059,7 +1059,7 @@ static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
> pte_t *pte_tbl;
> int i;
>
> - if (pmd_large(*pmd)) {
> + if (pmd_leaf(*pmd)) {
> pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
> xen_free_ro_pages(pa, PMD_SIZE);
> return;
> @@ -1871,7 +1871,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
> if (!pmd_present(pmd))
> return 0;
> pa = pmd_val(pmd) & PTE_PFN_MASK;
> - if (pmd_large(pmd))
> + if (pmd_leaf(pmd))
> return pa + (vaddr & ~PMD_MASK);
>
> pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
> diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
> index 629edb6486de..3557d78ee47a 100644
> --- a/drivers/misc/sgi-gru/grufault.c
> +++ b/drivers/misc/sgi-gru/grufault.c
> @@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
> if (unlikely(pmd_none(*pmdp)))
> goto err;
> #ifdef CONFIG_X86_64
> - if (unlikely(pmd_large(*pmdp)))
> + if (unlikely(pmd_leaf(*pmdp)))
> pte = ptep_get((pte_t *)pmdp);
> else
> #endif
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 08/10] mm/treewide: Replace pud_large() with pud_leaf()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (6 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 07/10] mm/treewide: Replace pmd_large() with pmd_leaf() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-05 4:37 ` [PATCH v3 09/10] mm/treewide: Drop pXd_large() peterx
2024-03-05 4:37 ` [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs peterx
9 siblings, 0 replies; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song
From: Peter Xu <peterx@redhat.com>
pud_large() is always defined as pud_leaf(). Merge their usages. Chose
pud_leaf() because pud_leaf() is a global API, while pud_large() is not.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/powerpc/mm/book3s64/pgtable.c | 2 +-
arch/s390/boot/vmem.c | 2 +-
arch/s390/include/asm/pgtable.h | 4 ++--
arch/s390/mm/gmap.c | 2 +-
arch/s390/mm/hugetlbpage.c | 4 ++--
arch/s390/mm/pageattr.c | 2 +-
arch/s390/mm/pgtable.c | 2 +-
arch/s390/mm/vmem.c | 6 +++---
arch/sparc/mm/init_64.c | 2 +-
arch/x86/kvm/mmu/mmu.c | 2 +-
arch/x86/mm/fault.c | 4 ++--
arch/x86/mm/ident_map.c | 2 +-
arch/x86/mm/init_64.c | 4 ++--
arch/x86/mm/kasan_init_64.c | 2 +-
arch/x86/mm/mem_encrypt_identity.c | 2 +-
arch/x86/mm/pat/set_memory.c | 6 +++---
arch/x86/mm/pgtable.c | 2 +-
arch/x86/mm/pti.c | 2 +-
arch/x86/power/hibernate.c | 2 +-
arch/x86/xen/mmu_pv.c | 4 ++--
20 files changed, 29 insertions(+), 29 deletions(-)
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 45f526547b27..83823db3488b 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -130,7 +130,7 @@ void set_pud_at(struct mm_struct *mm, unsigned long addr,
WARN_ON(pte_hw_valid(pud_pte(*pudp)));
assert_spin_locked(pud_lockptr(mm, pudp));
- WARN_ON(!(pud_large(pud)));
+ WARN_ON(!(pud_leaf(pud)));
#endif
trace_hugepage_set_pud(addr, pud_val(pud));
return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 348ab02b1028..09b10bb6e4d0 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -366,7 +366,7 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
}
pmd = boot_crst_alloc(_SEGMENT_ENTRY_EMPTY);
pud_populate(&init_mm, pud, pmd);
- } else if (pud_large(*pud)) {
+ } else if (pud_leaf(*pud)) {
continue;
}
pgtable_pmd_populate(pud, addr, next, mode);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 431d03d5116b..a5f16a244a64 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -730,7 +730,7 @@ static inline int pud_bad(pud_t pud)
{
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
- if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
@@ -1400,7 +1400,7 @@ static inline unsigned long pud_deref(pud_t pud)
unsigned long origin_mask;
origin_mask = _REGION_ENTRY_ORIGIN;
- if (pud_large(pud))
+ if (pud_leaf(pud))
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
return (unsigned long)__va(pud_val(pud) & origin_mask);
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index b78ded9d3bf7..ac28a042d101 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -598,7 +598,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
pud = pud_offset(p4d, vmaddr);
VM_BUG_ON(pud_none(*pud));
/* large puds cannot yet be handled */
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 1ccb5b40fe92..c2e8242bd15d 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -224,7 +224,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
if (p4d_present(*p4dp)) {
pudp = pud_offset(p4dp, addr);
if (pud_present(*pudp)) {
- if (pud_large(*pudp))
+ if (pud_leaf(*pudp))
return (pte_t *) pudp;
pmdp = pmd_offset(pudp, addr);
}
@@ -240,7 +240,7 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud)
{
- return pud_large(pud);
+ return pud_leaf(pud);
}
bool __init arch_hugetlb_valid_size(unsigned long size)
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 9f55d5a3210c..01bc8fad64d6 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -274,7 +274,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
if (pud_none(*pudp))
return -EINVAL;
next = pud_addr_end(addr, end);
- if (pud_large(*pudp)) {
+ if (pud_leaf(*pudp)) {
need_split = !!(flags & SET_MEMORY_4K);
need_split |= !!(addr & ~PUD_MASK);
need_split |= !!(addr + PUD_SIZE > next);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9ac66304d776..2c944bafb030 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -470,7 +470,7 @@ static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
return -ENOENT;
/* Large PUDs are not supported yet. */
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return -EFAULT;
*pmdp = pmd_offset(pud, addr);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index afe5edf2a604..85cddf904cb2 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -329,7 +329,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
if (!add) {
if (pud_none(*pud))
continue;
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
pud_clear(pud);
@@ -350,7 +350,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
if (!pmd)
goto out;
pud_populate(&init_mm, pud, pmd);
- } else if (pud_large(*pud)) {
+ } else if (pud_leaf(*pud)) {
continue;
}
ret = modify_pmd_table(pud, addr, next, add, direct, altmap);
@@ -599,7 +599,7 @@ pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
if (!pmd)
goto out;
pud_populate(&init_mm, pud, pmd);
- } else if (WARN_ON_ONCE(pud_large(*pud))) {
+ } else if (WARN_ON_ONCE(pud_leaf(*pud))) {
goto out;
}
pmd = pmd_offset(pud, addr);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 5e067b6a4464..1ca9054d9b97 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1665,7 +1665,7 @@ bool kern_addr_valid(unsigned long addr)
if (pud_none(*pud))
return false;
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c15123248c52..5cb5bc4a72c4 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3126,7 +3126,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
if (pud_none(pud) || !pud_present(pud))
goto out;
- if (pud_large(pud)) {
+ if (pud_leaf(pud)) {
level = PG_LEVEL_1G;
goto out;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 09417f950343..2cc6fa5dc561 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -376,7 +376,7 @@ static void dump_pagetable(unsigned long address)
goto bad;
pr_cont("PUD %lx ", pud_val(*pud));
- if (!pud_present(*pud) || pud_large(*pud))
+ if (!pud_present(*pud) || pud_leaf(*pud))
goto out;
pmd = pmd_offset(pud, address);
@@ -1046,7 +1046,7 @@ spurious_kernel_fault(unsigned long error_code, unsigned long address)
if (!pud_present(*pud))
return 0;
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return spurious_kernel_fault_check(error_code, (pte_t *) pud);
pmd = pmd_offset(pud, address);
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index f50cc210a981..a204a332c71f 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -33,7 +33,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
next = end;
/* if this is already a gbpage, this portion is already mapped */
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
continue;
/* Is using a gbpage allowed? */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2c5490e58f41..7e177856ee4f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -617,7 +617,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
}
if (!pud_none(*pud)) {
- if (!pud_large(*pud)) {
+ if (!pud_leaf(*pud)) {
pmd = pmd_offset(pud, 0);
paddr_last = phys_pmd_init(pmd, paddr,
paddr_end,
@@ -1163,7 +1163,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!pud_present(*pud))
continue;
- if (pud_large(*pud) &&
+ if (pud_leaf(*pud) &&
IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
spin_lock(&init_mm.page_table_lock);
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index f41d26bc9161..9dddf19a5571 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -115,7 +115,7 @@ static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
pud = pud_offset(p4d, addr);
do {
next = pud_addr_end(addr, end);
- if (!pud_large(*pud))
+ if (!pud_leaf(*pud))
kasan_populate_pud(pud, addr, next, nid);
} while (pud++, addr = next, addr != end);
}
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index bca4fea80579..7dd30e16294d 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -145,7 +145,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
}
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
return NULL;
return pud;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index b4037fe08eed..e3a26f2c7781 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -684,7 +684,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
return NULL;
*level = PG_LEVEL_1G;
- if (pud_large(*pud) || !pud_present(*pud))
+ if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud;
pmd = pmd_offset(pud, address);
@@ -743,7 +743,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
return NULL;
pud = pud_offset(p4d, address);
- if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
+ if (pud_none(*pud) || pud_leaf(*pud) || !pud_present(*pud))
return NULL;
return pmd_offset(pud, address);
@@ -1274,7 +1274,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
*/
while (end - start >= PUD_SIZE) {
- if (pud_large(*pud))
+ if (pud_leaf(*pud))
pud_clear(pud);
else
unmap_pmd_range(pud, start, start + PUD_SIZE);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d05dd86ceb41..ff690ddc2334 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -777,7 +777,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
*/
int pud_clear_huge(pud_t *pud)
{
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
pud_clear(pud);
return 1;
}
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 0442e8f479a6..2e69abf4f852 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -217,7 +217,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
pud = pud_offset(p4d, address);
/* The user page tables do not use large mappings: */
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
WARN_ON(1);
return NULL;
}
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 277eaf610e0e..5b81d19cd114 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -170,7 +170,7 @@ int relocate_restore_code(void)
goto out;
}
pud = pud_offset(p4d, relocated_restore_code);
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
goto out;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index dde551bbd231..54e0d311dcc9 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1082,7 +1082,7 @@ static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
pmd_t *pmd_tbl;
int i;
- if (pud_large(*pud)) {
+ if (pud_leaf(*pud)) {
pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
xen_free_ro_pages(pa, PUD_SIZE);
return;
@@ -1863,7 +1863,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
if (!pud_present(pud))
return 0;
pa = pud_val(pud) & PTE_PFN_MASK;
- if (pud_large(pud))
+ if (pud_leaf(pud))
return pa + (vaddr & ~PUD_MASK);
pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* [PATCH v3 09/10] mm/treewide: Drop pXd_large()
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (7 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 08/10] mm/treewide: Replace pud_large() with pud_leaf() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-06 6:34 ` Mike Rapoport
2024-03-05 4:37 ` [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs peterx
9 siblings, 1 reply; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song
From: Peter Xu <peterx@redhat.com>
They're not used anymore, drop all of them.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/arm/include/asm/pgtable-2level.h | 1 -
arch/arm/include/asm/pgtable-3level.h | 1 -
arch/loongarch/kvm/mmu.c | 2 +-
arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +---
arch/powerpc/include/asm/pgtable.h | 4 ----
arch/s390/include/asm/pgtable.h | 8 ++++----
arch/sparc/include/asm/pgtable_64.h | 8 ++++----
arch/x86/include/asm/pgtable.h | 19 +++++++------------
arch/x86/kvm/mmu/mmu.c | 2 +-
9 files changed, 18 insertions(+), 31 deletions(-)
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index ce543cd9380c..b0a262566eb9 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -213,7 +213,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 71c3add6417f..4b1d9eb3908a 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -118,7 +118,6 @@
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
-#define pmd_large(pmd) pmd_sect(pmd)
#define pmd_leaf(pmd) pmd_sect(pmd)
#define pud_clear(pudp) \
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 50a6acd7ffe4..a556cff35740 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -723,7 +723,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
/*
* Read each entry once. As above, a non-leaf entry can be promoted to
* a huge page _during_ this walk. Re-reading the entry could send the
- * walk into the weeks, e.g. p*d_large() returns false (sees the old
+ * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 3e99e409774a..df66dce8306f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1437,17 +1437,15 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
}
/*
- * Like pmd_huge() and pmd_large(), but works regardless of config options
+ * Like pmd_huge(), but works regardless of config options
*/
#define pmd_leaf pmd_leaf
-#define pmd_large pmd_leaf
static inline bool pmd_leaf(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
#define pud_leaf pud_leaf
-#define pud_large pud_leaf
static inline bool pud_leaf(pud_t pud)
{
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index e6edf1cdbc5b..239709a2f68e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -101,10 +101,6 @@ void poking_init(void);
extern unsigned long ioremap_bot;
extern const pgprot_t protection_map[16];
-#ifndef pmd_large
-#define pmd_large(pmd) 0
-#endif
-
/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index a5f16a244a64..9e08af5b9247 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -705,16 +705,16 @@ static inline int pud_none(pud_t pud)
return pud_val(pud) == _REGION3_ENTRY_EMPTY;
}
-#define pud_leaf pud_large
-static inline int pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline int pud_leaf(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
return 0;
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
-#define pmd_leaf pmd_large
-static inline int pmd_large(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 652af9d63fa2..6ff0a28d5fd1 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -680,8 +680,8 @@ static inline unsigned long pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
-#define pmd_leaf pmd_large
-static inline unsigned long pmd_large(pmd_t pmd)
+#define pmd_leaf pmd_leaf
+static inline unsigned long pmd_leaf(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -867,8 +867,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
/* only used by the stubbed out hugetlb gup code, should never be called */
#define p4d_page(p4d) NULL
-#define pud_leaf pud_large
-static inline unsigned long pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline unsigned long pud_leaf(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 9db7a38a0e9f..cfc84c55d0e6 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -251,8 +251,8 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
-#define p4d_leaf p4d_large
-static inline int p4d_large(p4d_t p4d)
+#define p4d_leaf p4d_leaf
+static inline int p4d_leaf(p4d_t p4d)
{
/* No 512 GiB pages yet */
return 0;
@@ -260,14 +260,14 @@ static inline int p4d_large(p4d_t p4d)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
-#define pmd_leaf pmd_large
-static inline int pmd_large(pmd_t pte)
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pte)
{
return pmd_flags(pte) & _PAGE_PSE;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
+/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
static inline int pmd_trans_huge(pmd_t pmd)
{
return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
@@ -1085,8 +1085,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
*/
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
-#define pud_leaf pud_large
-static inline int pud_large(pud_t pud)
+#define pud_leaf pud_leaf
+static inline int pud_leaf(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
@@ -1096,11 +1096,6 @@ static inline int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
-#else
-static inline int pud_large(pud_t pud)
-{
- return 0;
-}
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#if CONFIG_PGTABLE_LEVELS > 3
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5cb5bc4a72c4..58f5e6b637b4 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3110,7 +3110,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
/*
* Read each entry once. As above, a non-leaf entry can be promoted to
* a huge page _during_ this walk. Re-reading the entry could send the
- * walk into the weeks, e.g. p*d_large() returns false (sees the old
+ * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 09/10] mm/treewide: Drop pXd_large()
2024-03-05 4:37 ` [PATCH v3 09/10] mm/treewide: Drop pXd_large() peterx
@ 2024-03-06 6:34 ` Mike Rapoport
0 siblings, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:34 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song
On Tue, Mar 05, 2024 at 12:37:49PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> They're not used anymore, drop all of them.
>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/arm/include/asm/pgtable-2level.h | 1 -
> arch/arm/include/asm/pgtable-3level.h | 1 -
> arch/loongarch/kvm/mmu.c | 2 +-
> arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +---
> arch/powerpc/include/asm/pgtable.h | 4 ----
> arch/s390/include/asm/pgtable.h | 8 ++++----
> arch/sparc/include/asm/pgtable_64.h | 8 ++++----
> arch/x86/include/asm/pgtable.h | 19 +++++++------------
> arch/x86/kvm/mmu/mmu.c | 2 +-
> 9 files changed, 18 insertions(+), 31 deletions(-)
>
> diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
> index ce543cd9380c..b0a262566eb9 100644
> --- a/arch/arm/include/asm/pgtable-2level.h
> +++ b/arch/arm/include/asm/pgtable-2level.h
> @@ -213,7 +213,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
>
> #define pmd_pfn(pmd) (__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
>
> -#define pmd_large(pmd) (pmd_val(pmd) & 2)
> #define pmd_leaf(pmd) (pmd_val(pmd) & 2)
> #define pmd_bad(pmd) (pmd_val(pmd) & 2)
> #define pmd_present(pmd) (pmd_val(pmd))
> diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
> index 71c3add6417f..4b1d9eb3908a 100644
> --- a/arch/arm/include/asm/pgtable-3level.h
> +++ b/arch/arm/include/asm/pgtable-3level.h
> @@ -118,7 +118,6 @@
> PMD_TYPE_TABLE)
> #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
> PMD_TYPE_SECT)
> -#define pmd_large(pmd) pmd_sect(pmd)
> #define pmd_leaf(pmd) pmd_sect(pmd)
>
> #define pud_clear(pudp) \
> diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
> index 50a6acd7ffe4..a556cff35740 100644
> --- a/arch/loongarch/kvm/mmu.c
> +++ b/arch/loongarch/kvm/mmu.c
> @@ -723,7 +723,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
> /*
> * Read each entry once. As above, a non-leaf entry can be promoted to
> * a huge page _during_ this walk. Re-reading the entry could send the
> - * walk into the weeks, e.g. p*d_large() returns false (sees the old
> + * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
> * value) and then p*d_offset() walks into the target huge page instead
> * of the old page table (sees the new value).
> */
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 3e99e409774a..df66dce8306f 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -1437,17 +1437,15 @@ static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_va
> }
>
> /*
> - * Like pmd_huge() and pmd_large(), but works regardless of config options
> + * Like pmd_huge(), but works regardless of config options
> */
> #define pmd_leaf pmd_leaf
> -#define pmd_large pmd_leaf
> static inline bool pmd_leaf(pmd_t pmd)
> {
> return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> }
>
> #define pud_leaf pud_leaf
> -#define pud_large pud_leaf
> static inline bool pud_leaf(pud_t pud)
> {
> return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
> index e6edf1cdbc5b..239709a2f68e 100644
> --- a/arch/powerpc/include/asm/pgtable.h
> +++ b/arch/powerpc/include/asm/pgtable.h
> @@ -101,10 +101,6 @@ void poking_init(void);
> extern unsigned long ioremap_bot;
> extern const pgprot_t protection_map[16];
>
> -#ifndef pmd_large
> -#define pmd_large(pmd) 0
> -#endif
> -
> /* can we use this in kvm */
> unsigned long vmalloc_to_phys(void *vmalloc_addr);
>
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index a5f16a244a64..9e08af5b9247 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -705,16 +705,16 @@ static inline int pud_none(pud_t pud)
> return pud_val(pud) == _REGION3_ENTRY_EMPTY;
> }
>
> -#define pud_leaf pud_large
> -static inline int pud_large(pud_t pud)
> +#define pud_leaf pud_leaf
> +static inline int pud_leaf(pud_t pud)
> {
> if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
> return 0;
> return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
> }
>
> -#define pmd_leaf pmd_large
> -static inline int pmd_large(pmd_t pmd)
> +#define pmd_leaf pmd_leaf
> +static inline int pmd_leaf(pmd_t pmd)
> {
> return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
> }
> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
> index 652af9d63fa2..6ff0a28d5fd1 100644
> --- a/arch/sparc/include/asm/pgtable_64.h
> +++ b/arch/sparc/include/asm/pgtable_64.h
> @@ -680,8 +680,8 @@ static inline unsigned long pte_special(pte_t pte)
> return pte_val(pte) & _PAGE_SPECIAL;
> }
>
> -#define pmd_leaf pmd_large
> -static inline unsigned long pmd_large(pmd_t pmd)
> +#define pmd_leaf pmd_leaf
> +static inline unsigned long pmd_leaf(pmd_t pmd)
> {
> pte_t pte = __pte(pmd_val(pmd));
>
> @@ -867,8 +867,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
> /* only used by the stubbed out hugetlb gup code, should never be called */
> #define p4d_page(p4d) NULL
>
> -#define pud_leaf pud_large
> -static inline unsigned long pud_large(pud_t pud)
> +#define pud_leaf pud_leaf
> +static inline unsigned long pud_leaf(pud_t pud)
> {
> pte_t pte = __pte(pud_val(pud));
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index 9db7a38a0e9f..cfc84c55d0e6 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -251,8 +251,8 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
> return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
> }
>
> -#define p4d_leaf p4d_large
> -static inline int p4d_large(p4d_t p4d)
> +#define p4d_leaf p4d_leaf
> +static inline int p4d_leaf(p4d_t p4d)
> {
> /* No 512 GiB pages yet */
> return 0;
> @@ -260,14 +260,14 @@ static inline int p4d_large(p4d_t p4d)
>
> #define pte_page(pte) pfn_to_page(pte_pfn(pte))
>
> -#define pmd_leaf pmd_large
> -static inline int pmd_large(pmd_t pte)
> +#define pmd_leaf pmd_leaf
> +static inline int pmd_leaf(pmd_t pte)
> {
> return pmd_flags(pte) & _PAGE_PSE;
> }
>
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
> +/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
> static inline int pmd_trans_huge(pmd_t pmd)
> {
> return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
> @@ -1085,8 +1085,8 @@ static inline pmd_t *pud_pgtable(pud_t pud)
> */
> #define pud_page(pud) pfn_to_page(pud_pfn(pud))
>
> -#define pud_leaf pud_large
> -static inline int pud_large(pud_t pud)
> +#define pud_leaf pud_leaf
> +static inline int pud_leaf(pud_t pud)
> {
> return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
> (_PAGE_PSE | _PAGE_PRESENT);
> @@ -1096,11 +1096,6 @@ static inline int pud_bad(pud_t pud)
> {
> return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
> }
> -#else
> -static inline int pud_large(pud_t pud)
> -{
> - return 0;
> -}
> #endif /* CONFIG_PGTABLE_LEVELS > 2 */
>
> #if CONFIG_PGTABLE_LEVELS > 3
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 5cb5bc4a72c4..58f5e6b637b4 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -3110,7 +3110,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
> /*
> * Read each entry once. As above, a non-leaf entry can be promoted to
> * a huge page _during_ this walk. Re-reading the entry could send the
> - * walk into the weeks, e.g. p*d_large() returns false (sees the old
> + * walk into the weeks, e.g. p*d_leaf() returns false (sees the old
> * value) and then p*d_offset() walks into the target huge page instead
> * of the old page table (sees the new value).
> */
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread
* [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs
2024-03-05 4:37 [PATCH v3 00/10] mm/treewide: Replace pXd_large() with pXd_leaf() peterx
` (8 preceding siblings ...)
2024-03-05 4:37 ` [PATCH v3 09/10] mm/treewide: Drop pXd_large() peterx
@ 2024-03-05 4:37 ` peterx
2024-03-05 14:33 ` Jason Gunthorpe
2024-03-06 6:35 ` Mike Rapoport
9 siblings, 2 replies; 24+ messages in thread
From: peterx @ 2024-03-05 4:37 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Christophe Leroy, x86, Kirill A . Shutemov, Jason Gunthorpe,
Yang Shi, peterx, Andrew Morton, linuxppc-dev, Muchun Song
From: Peter Xu <peterx@redhat.com>
Even if pXd_leaf() API is defined globally, it's not clear on the retval,
and there are three types used (bool, int, unsigned log).
Always return a boolean for pXd_leaf() APIs.
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
---
arch/riscv/include/asm/pgtable-64.h | 2 +-
arch/riscv/include/asm/pgtable.h | 2 +-
arch/s390/include/asm/pgtable.h | 4 ++--
arch/sparc/include/asm/pgtable_64.h | 4 ++--
arch/x86/include/asm/pgtable.h | 8 ++++----
include/linux/pgtable.h | 8 ++++----
6 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b42017d76924..2c7e1661db01 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -190,7 +190,7 @@ static inline int pud_bad(pud_t pud)
}
#define pud_leaf pud_leaf
-static inline int pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index add5cd30ab34..6839520dbcb1 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -241,7 +241,7 @@ static inline int pmd_bad(pmd_t pmd)
}
#define pmd_leaf pmd_leaf
-static inline int pmd_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 9e08af5b9247..60950e7a25f5 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -706,7 +706,7 @@ static inline int pud_none(pud_t pud)
}
#define pud_leaf pud_leaf
-static inline int pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
return 0;
@@ -714,7 +714,7 @@ static inline int pud_leaf(pud_t pud)
}
#define pmd_leaf pmd_leaf
-static inline int pmd_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
}
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 6ff0a28d5fd1..4d1bafaba942 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -681,7 +681,7 @@ static inline unsigned long pte_special(pte_t pte)
}
#define pmd_leaf pmd_leaf
-static inline unsigned long pmd_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -868,7 +868,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#define p4d_page(p4d) NULL
#define pud_leaf pud_leaf
-static inline unsigned long pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index cfc84c55d0e6..7621a5acb13e 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -252,7 +252,7 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
}
#define p4d_leaf p4d_leaf
-static inline int p4d_leaf(p4d_t p4d)
+static inline bool p4d_leaf(p4d_t p4d)
{
/* No 512 GiB pages yet */
return 0;
@@ -261,7 +261,7 @@ static inline int p4d_leaf(p4d_t p4d)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define pmd_leaf pmd_leaf
-static inline int pmd_leaf(pmd_t pte)
+static inline bool pmd_leaf(pmd_t pte)
{
return pmd_flags(pte) & _PAGE_PSE;
}
@@ -1086,7 +1086,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
#define pud_leaf pud_leaf
-static inline int pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
@@ -1413,7 +1413,7 @@ static inline bool pgdp_maps_userspace(void *__ptr)
}
#define pgd_leaf pgd_leaf
-static inline int pgd_leaf(pgd_t pgd) { return 0; }
+static inline bool pgd_leaf(pgd_t pgd) { return false; }
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a36cf4e124b0..85fc7554cd52 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1777,16 +1777,16 @@ typedef unsigned int pgtbl_mod_mask;
* Only meaningful when called on a valid entry.
*/
#ifndef pgd_leaf
-#define pgd_leaf(x) 0
+#define pgd_leaf(x) false
#endif
#ifndef p4d_leaf
-#define p4d_leaf(x) 0
+#define p4d_leaf(x) false
#endif
#ifndef pud_leaf
-#define pud_leaf(x) 0
+#define pud_leaf(x) false
#endif
#ifndef pmd_leaf
-#define pmd_leaf(x) 0
+#define pmd_leaf(x) false
#endif
#ifndef pgd_leaf_size
--
2.44.0
^ permalink raw reply [flat|nested] 24+ messages in thread* Re: [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs
2024-03-05 4:37 ` [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs peterx
@ 2024-03-05 14:33 ` Jason Gunthorpe
2024-03-06 6:35 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Jason Gunthorpe @ 2024-03-05 14:33 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Yang Shi, Andrew Morton, linuxppc-dev,
Muchun Song
On Tue, Mar 05, 2024 at 12:37:50PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> Even if pXd_leaf() API is defined globally, it's not clear on the retval,
> and there are three types used (bool, int, unsigned log).
>
> Always return a boolean for pXd_leaf() APIs.
>
> Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
> ---
> arch/riscv/include/asm/pgtable-64.h | 2 +-
> arch/riscv/include/asm/pgtable.h | 2 +-
> arch/s390/include/asm/pgtable.h | 4 ++--
> arch/sparc/include/asm/pgtable_64.h | 4 ++--
> arch/x86/include/asm/pgtable.h | 8 ++++----
> include/linux/pgtable.h | 8 ++++----
> 6 files changed, 14 insertions(+), 14 deletions(-)
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Jason
^ permalink raw reply [flat|nested] 24+ messages in thread
* Re: [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs
2024-03-05 4:37 ` [PATCH v3 10/10] mm/treewide: Align up pXd_leaf() retval across archs peterx
2024-03-05 14:33 ` Jason Gunthorpe
@ 2024-03-06 6:35 ` Mike Rapoport
1 sibling, 0 replies; 24+ messages in thread
From: Mike Rapoport @ 2024-03-06 6:35 UTC (permalink / raw)
To: peterx
Cc: linux-mm, linux-kernel, Christophe Leroy, x86,
Kirill A . Shutemov, Jason Gunthorpe, Yang Shi, Andrew Morton,
linuxppc-dev, Muchun Song
On Tue, Mar 05, 2024 at 12:37:50PM +0800, peterx@redhat.com wrote:
> From: Peter Xu <peterx@redhat.com>
>
> Even if pXd_leaf() API is defined globally, it's not clear on the retval,
> and there are three types used (bool, int, unsigned log).
>
> Always return a boolean for pXd_leaf() APIs.
>
> Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> ---
> arch/riscv/include/asm/pgtable-64.h | 2 +-
> arch/riscv/include/asm/pgtable.h | 2 +-
> arch/s390/include/asm/pgtable.h | 4 ++--
> arch/sparc/include/asm/pgtable_64.h | 4 ++--
> arch/x86/include/asm/pgtable.h | 8 ++++----
> include/linux/pgtable.h | 8 ++++----
> 6 files changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
> index b42017d76924..2c7e1661db01 100644
> --- a/arch/riscv/include/asm/pgtable-64.h
> +++ b/arch/riscv/include/asm/pgtable-64.h
> @@ -190,7 +190,7 @@ static inline int pud_bad(pud_t pud)
> }
>
> #define pud_leaf pud_leaf
> -static inline int pud_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
> }
> diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
> index add5cd30ab34..6839520dbcb1 100644
> --- a/arch/riscv/include/asm/pgtable.h
> +++ b/arch/riscv/include/asm/pgtable.h
> @@ -241,7 +241,7 @@ static inline int pmd_bad(pmd_t pmd)
> }
>
> #define pmd_leaf pmd_leaf
> -static inline int pmd_leaf(pmd_t pmd)
> +static inline bool pmd_leaf(pmd_t pmd)
> {
> return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
> }
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 9e08af5b9247..60950e7a25f5 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -706,7 +706,7 @@ static inline int pud_none(pud_t pud)
> }
>
> #define pud_leaf pud_leaf
> -static inline int pud_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
> return 0;
> @@ -714,7 +714,7 @@ static inline int pud_leaf(pud_t pud)
> }
>
> #define pmd_leaf pmd_leaf
> -static inline int pmd_leaf(pmd_t pmd)
> +static inline bool pmd_leaf(pmd_t pmd)
> {
> return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
> }
> diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
> index 6ff0a28d5fd1..4d1bafaba942 100644
> --- a/arch/sparc/include/asm/pgtable_64.h
> +++ b/arch/sparc/include/asm/pgtable_64.h
> @@ -681,7 +681,7 @@ static inline unsigned long pte_special(pte_t pte)
> }
>
> #define pmd_leaf pmd_leaf
> -static inline unsigned long pmd_leaf(pmd_t pmd)
> +static inline bool pmd_leaf(pmd_t pmd)
> {
> pte_t pte = __pte(pmd_val(pmd));
>
> @@ -868,7 +868,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
> #define p4d_page(p4d) NULL
>
> #define pud_leaf pud_leaf
> -static inline unsigned long pud_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> pte_t pte = __pte(pud_val(pud));
>
> diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
> index cfc84c55d0e6..7621a5acb13e 100644
> --- a/arch/x86/include/asm/pgtable.h
> +++ b/arch/x86/include/asm/pgtable.h
> @@ -252,7 +252,7 @@ static inline unsigned long pgd_pfn(pgd_t pgd)
> }
>
> #define p4d_leaf p4d_leaf
> -static inline int p4d_leaf(p4d_t p4d)
> +static inline bool p4d_leaf(p4d_t p4d)
> {
> /* No 512 GiB pages yet */
> return 0;
> @@ -261,7 +261,7 @@ static inline int p4d_leaf(p4d_t p4d)
> #define pte_page(pte) pfn_to_page(pte_pfn(pte))
>
> #define pmd_leaf pmd_leaf
> -static inline int pmd_leaf(pmd_t pte)
> +static inline bool pmd_leaf(pmd_t pte)
> {
> return pmd_flags(pte) & _PAGE_PSE;
> }
> @@ -1086,7 +1086,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
> #define pud_page(pud) pfn_to_page(pud_pfn(pud))
>
> #define pud_leaf pud_leaf
> -static inline int pud_leaf(pud_t pud)
> +static inline bool pud_leaf(pud_t pud)
> {
> return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
> (_PAGE_PSE | _PAGE_PRESENT);
> @@ -1413,7 +1413,7 @@ static inline bool pgdp_maps_userspace(void *__ptr)
> }
>
> #define pgd_leaf pgd_leaf
> -static inline int pgd_leaf(pgd_t pgd) { return 0; }
> +static inline bool pgd_leaf(pgd_t pgd) { return false; }
>
> #ifdef CONFIG_PAGE_TABLE_ISOLATION
> /*
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index a36cf4e124b0..85fc7554cd52 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -1777,16 +1777,16 @@ typedef unsigned int pgtbl_mod_mask;
> * Only meaningful when called on a valid entry.
> */
> #ifndef pgd_leaf
> -#define pgd_leaf(x) 0
> +#define pgd_leaf(x) false
> #endif
> #ifndef p4d_leaf
> -#define p4d_leaf(x) 0
> +#define p4d_leaf(x) false
> #endif
> #ifndef pud_leaf
> -#define pud_leaf(x) 0
> +#define pud_leaf(x) false
> #endif
> #ifndef pmd_leaf
> -#define pmd_leaf(x) 0
> +#define pmd_leaf(x) false
> #endif
>
> #ifndef pgd_leaf_size
> --
> 2.44.0
>
>
--
Sincerely yours,
Mike.
^ permalink raw reply [flat|nested] 24+ messages in thread