linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] sparc: Use vmemmap_populate_hugepages for vmemmap_populate
@ 2025-12-18 13:09 chengkaitao
  2025-12-18 16:27 ` Liam R. Howlett
  0 siblings, 1 reply; 2+ messages in thread
From: chengkaitao @ 2025-12-18 13:09 UTC (permalink / raw)
  To: davem, andreas, akpm, david, lorenzo.stoakes, Liam.Howlett,
	vbabka, rppt, surenb, mhocko
  Cc: kevin.brodsky, dave.hansen, ziy, chengkaitao, willy,
	zhengqi.arch, sparclinux, linux-kernel, linux-mm

From: Chengkaitao <chengkaitao@kylinos.cn>

1. Added the vmemmap_false_pmd function to accommodate architectures
that do not support basepages.
2. In the SPARC architecture, reimplemented vmemmap_populate using
vmemmap_populate_hugepages.

Signed-off-by: Chengkaitao <chengkaitao@kylinos.cn>
---
v2:
1. Revert the whitespace deletions
2. Change vmemmap_false_pmd to vmemmap_pte_fallback_allowed

Link to V1:
https://lore.kernel.org/all/20251217120858.18713-1-pilgrimtao@gmail.com/

 arch/sparc/mm/init_64.c | 50 +++++++++++++++--------------------------
 include/linux/mm.h      |  1 +
 mm/sparse-vmemmap.c     |  7 +++++-
 3 files changed, 25 insertions(+), 33 deletions(-)

diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index df9f7c444c39..86b11150e701 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2581,8 +2581,8 @@ unsigned long _PAGE_CACHE __read_mostly;
 EXPORT_SYMBOL(_PAGE_CACHE);
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
-			       int node, struct vmem_altmap *altmap)
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+			       unsigned long addr, unsigned long next)
 {
 	unsigned long pte_base;
 
@@ -2595,39 +2595,25 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
 
 	pte_base |= _PAGE_PMD_HUGE;
 
-	vstart = vstart & PMD_MASK;
-	vend = ALIGN(vend, PMD_SIZE);
-	for (; vstart < vend; vstart += PMD_SIZE) {
-		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
-		unsigned long pte;
-		p4d_t *p4d;
-		pud_t *pud;
-		pmd_t *pmd;
-
-		if (!pgd)
-			return -ENOMEM;
-
-		p4d = vmemmap_p4d_populate(pgd, vstart, node);
-		if (!p4d)
-			return -ENOMEM;
-
-		pud = vmemmap_pud_populate(p4d, vstart, node);
-		if (!pud)
-			return -ENOMEM;
-
-		pmd = pmd_offset(pud, vstart);
-		pte = pmd_val(*pmd);
-		if (!(pte & _PAGE_VALID)) {
-			void *block = vmemmap_alloc_block(PMD_SIZE, node);
+	pmd_val(*pmd) = pte_base | __pa(p);
+}
 
-			if (!block)
-				return -ENOMEM;
+bool __meminit vmemmap_pte_fallback_allowed(void)
+{
+	return false;
+}
 
-			pmd_val(*pmd) = pte_base | __pa(block);
-		}
-	}
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+				unsigned long addr, unsigned long next)
+{
+	vmemmap_verify((pte_t *)pmdp, node, addr, next);
+	return 1;
+}
 
-	return 0;
+int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+			       int node, struct vmem_altmap *altmap)
+{
+	return vmemmap_populate_hugepages(vstart, vend, node, altmap);
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15076261d0c2..ca159b029a5d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4248,6 +4248,7 @@ void *vmemmap_alloc_block_buf(unsigned long size, int node,
 void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
 		     unsigned long addr, unsigned long next);
+bool vmemmap_pte_fallback_allowed(void);
 int vmemmap_check_pmd(pmd_t *pmd, int node,
 		      unsigned long addr, unsigned long next);
 int vmemmap_populate_basepages(unsigned long start, unsigned long end,
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 37522d6cb398..45eb38048949 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -407,6 +407,11 @@ void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
 {
 }
 
+bool __weak __meminit vmemmap_pte_fallback_allowed(void)
+{
+	return true;
+}
+
 int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
 				       unsigned long addr, unsigned long next)
 {
@@ -446,7 +451,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
 			if (p) {
 				vmemmap_set_pmd(pmd, p, node, addr, next);
 				continue;
-			} else if (altmap) {
+			} else if (altmap || !vmemmap_pte_fallback_allowed()) {
 				/*
 				 * No fallback: In any case we care about, the
 				 * altmap should be reasonably sized and aligned
-- 
2.50.1 (Apple Git-155)



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH v2] sparc: Use vmemmap_populate_hugepages for vmemmap_populate
  2025-12-18 13:09 [PATCH v2] sparc: Use vmemmap_populate_hugepages for vmemmap_populate chengkaitao
@ 2025-12-18 16:27 ` Liam R. Howlett
  0 siblings, 0 replies; 2+ messages in thread
From: Liam R. Howlett @ 2025-12-18 16:27 UTC (permalink / raw)
  To: chengkaitao
  Cc: davem, andreas, akpm, david, lorenzo.stoakes, vbabka, rppt,
	surenb, mhocko, kevin.brodsky, dave.hansen, ziy, chengkaitao,
	willy, zhengqi.arch, sparclinux, linux-kernel, linux-mm

* chengkaitao <pilgrimtao@gmail.com> [251218 08:10]:
> From: Chengkaitao <chengkaitao@kylinos.cn>
> 
> 1. Added the vmemmap_false_pmd function to accommodate architectures
> that do not support basepages.
> 2. In the SPARC architecture, reimplemented vmemmap_populate using
> vmemmap_populate_hugepages.
> 
> Signed-off-by: Chengkaitao <chengkaitao@kylinos.cn>

Can you please fix your email client?  Your SoB does not match the
sender and your responses do not match the From of the patch.

I assume a v3 will include Mike's suggestions on v1 so this is already
out of date?

Please rewrite the change log to include the information that you
discussed with Mike.


> ---
> v2:
> 1. Revert the whitespace deletions
> 2. Change vmemmap_false_pmd to vmemmap_pte_fallback_allowed
> 
> Link to V1:
> https://lore.kernel.org/all/20251217120858.18713-1-pilgrimtao@gmail.com/
> 
>  arch/sparc/mm/init_64.c | 50 +++++++++++++++--------------------------
>  include/linux/mm.h      |  1 +
>  mm/sparse-vmemmap.c     |  7 +++++-
>  3 files changed, 25 insertions(+), 33 deletions(-)
> 
> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
> index df9f7c444c39..86b11150e701 100644
> --- a/arch/sparc/mm/init_64.c
> +++ b/arch/sparc/mm/init_64.c
> @@ -2581,8 +2581,8 @@ unsigned long _PAGE_CACHE __read_mostly;
>  EXPORT_SYMBOL(_PAGE_CACHE);
>  
>  #ifdef CONFIG_SPARSEMEM_VMEMMAP
> -int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
> -			       int node, struct vmem_altmap *altmap)
> +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
> +			       unsigned long addr, unsigned long next)
>  {
>  	unsigned long pte_base;
>  
> @@ -2595,39 +2595,25 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
>  
>  	pte_base |= _PAGE_PMD_HUGE;
>  
> -	vstart = vstart & PMD_MASK;
> -	vend = ALIGN(vend, PMD_SIZE);
> -	for (; vstart < vend; vstart += PMD_SIZE) {
> -		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
> -		unsigned long pte;
> -		p4d_t *p4d;
> -		pud_t *pud;
> -		pmd_t *pmd;
> -
> -		if (!pgd)
> -			return -ENOMEM;
> -
> -		p4d = vmemmap_p4d_populate(pgd, vstart, node);
> -		if (!p4d)
> -			return -ENOMEM;
> -
> -		pud = vmemmap_pud_populate(p4d, vstart, node);
> -		if (!pud)
> -			return -ENOMEM;
> -
> -		pmd = pmd_offset(pud, vstart);
> -		pte = pmd_val(*pmd);
> -		if (!(pte & _PAGE_VALID)) {
> -			void *block = vmemmap_alloc_block(PMD_SIZE, node);
> +	pmd_val(*pmd) = pte_base | __pa(p);
> +}
>  
> -			if (!block)
> -				return -ENOMEM;
> +bool __meminit vmemmap_pte_fallback_allowed(void)
> +{
> +	return false;
> +}
>  
> -			pmd_val(*pmd) = pte_base | __pa(block);
> -		}
> -	}
> +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
> +				unsigned long addr, unsigned long next)
> +{
> +	vmemmap_verify((pte_t *)pmdp, node, addr, next);
> +	return 1;
> +}
>  
> -	return 0;
> +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
> +			       int node, struct vmem_altmap *altmap)
> +{
> +	return vmemmap_populate_hugepages(vstart, vend, node, altmap);
>  }
>  #endif /* CONFIG_SPARSEMEM_VMEMMAP */
>  
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 15076261d0c2..ca159b029a5d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -4248,6 +4248,7 @@ void *vmemmap_alloc_block_buf(unsigned long size, int node,
>  void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
>  void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
>  		     unsigned long addr, unsigned long next);
> +bool vmemmap_pte_fallback_allowed(void);
>  int vmemmap_check_pmd(pmd_t *pmd, int node,
>  		      unsigned long addr, unsigned long next);
>  int vmemmap_populate_basepages(unsigned long start, unsigned long end,
> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
> index 37522d6cb398..45eb38048949 100644
> --- a/mm/sparse-vmemmap.c
> +++ b/mm/sparse-vmemmap.c
> @@ -407,6 +407,11 @@ void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
>  {
>  }
>  
> +bool __weak __meminit vmemmap_pte_fallback_allowed(void)
> +{
> +	return true;
> +}
> +
>  int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
>  				       unsigned long addr, unsigned long next)
>  {
> @@ -446,7 +451,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
>  			if (p) {
>  				vmemmap_set_pmd(pmd, p, node, addr, next);
>  				continue;
> -			} else if (altmap) {
> +			} else if (altmap || !vmemmap_pte_fallback_allowed()) {
>  				/*
>  				 * No fallback: In any case we care about, the
>  				 * altmap should be reasonably sized and aligned
> -- 
> 2.50.1 (Apple Git-155)
> 


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-12-18 16:28 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-18 13:09 [PATCH v2] sparc: Use vmemmap_populate_hugepages for vmemmap_populate chengkaitao
2025-12-18 16:27 ` Liam R. Howlett

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox