linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86,NUMA:Get the number of ram pages directly in numa_meminfo_cover_memory()
@ 2023-06-15 14:20 Liam Ni
  2023-06-15 17:00 ` Mike Rapoport
  2023-06-21 17:49 ` Dave Hansen
  0 siblings, 2 replies; 7+ messages in thread
From: Liam Ni @ 2023-06-15 14:20 UTC (permalink / raw)
  To: dave.hansen, luto, peterz, tglx, mingo, bp, hpa, akpm, rppt
  Cc: x86, linux-kernel, linux-mm, zhiguangni01

In a previous implementation,The ram page is calculated
by counting the number of holes,
however,the number of ram pages is calculated during hole calculation.
Therefore,we can directly calculate the amount of ram pages.

Signed-off-by: Liam Ni <zhiguangni01@gmail.com>
---
 arch/x86/mm/numa.c |  4 ++--
 include/linux/mm.h |  4 ++++
 mm/mm_init.c       | 33 +++++++++++++++++++++++++++++++++
 3 files changed, 39 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 2aadb2019b4f..8ea0e956e3d7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -461,12 +461,12 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
 		u64 s = mi->blk[i].start >> PAGE_SHIFT;
 		u64 e = mi->blk[i].end >> PAGE_SHIFT;
 		numaram += e - s;
-		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+		numaram += __available_pages_in_range(mi->blk[i].nid, s, e);
 		if ((s64)numaram < 0)
 			numaram = 0;
 	}
 
-	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
+	e820ram = available_pages_in_range(0, max_pfn);
 
 	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
 	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0daef3f2f029..f7f8c3476d1e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3041,8 +3041,12 @@ void free_area_init(unsigned long *max_zone_pfn);
 unsigned long node_map_pfn_alignment(void);
 unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
 						unsigned long end_pfn);
+unsigned long __available_pages_in_range(int nid, unsigned long start_pfn,
+						unsigned long end_pfn);
 extern unsigned long absent_pages_in_range(unsigned long start_pfn,
 						unsigned long end_pfn);
+extern unsigned long available_pages_in_range(unsigned long start_pfn,
+						unsigned long end_pfn);
 extern void get_pfn_range_for_nid(unsigned int nid,
 			unsigned long *start_pfn, unsigned long *end_pfn);
 
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 3ddd18a89b66..33451afbc040 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1165,6 +1165,39 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
 	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
 }
 
+/*
+ * Return the number of avalible pages in a range on a node. If nid is MAX_NUMNODES,
+ * then all avalible pages in the requested range will be accounted for.
+ */
+unsigned long __init __available_pages_in_range(int nid,
+				unsigned long range_start_pfn,
+				unsigned long range_end_pfn)
+{
+	unsigned long nr_avalible;
+	unsigned long start_pfn, end_pfn;
+	int i;
+
+	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+		nr_avalible += end_pfn - start_pfn;
+	}
+	return nr_avalible;
+}
+
+/**
+ * available_pages_in_range - Return the number of available page frames within a range
+ * @start_pfn: The start PFN to start searching for holes
+ * @end_pfn: The end PFN to stop searching for holes
+ *
+ * Return: the number of available pages frames within a range.
+ */
+unsigned long __init available_pages_in_range(unsigned long start_pfn,
+							unsigned long end_pfn)
+{
+	return __available_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
+}
+
 /* Return the number of page frames in holes in a zone on a node */
 static unsigned long __init zone_absent_pages_in_node(int nid,
 					unsigned long zone_type,
-- 
2.25.1



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-06-22  5:06 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-15 14:20 [PATCH] x86,NUMA:Get the number of ram pages directly in numa_meminfo_cover_memory() Liam Ni
2023-06-15 17:00 ` Mike Rapoport
2023-06-17 10:47   ` Liam Ni
2023-06-18  7:08     ` Mike Rapoport
2023-06-18  7:30       ` Liam Ni
2023-06-21 17:49 ` Dave Hansen
2023-06-22  5:06   ` Liam Ni

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox