From: Pratyush Yadav <pratyush@kernel.org>
To: Pasha Tatashin <pasha.tatashin@soleen.com>,
Mike Rapoport <rppt@kernel.org>,
Pratyush Yadav <pratyush@kernel.org>,
Andrew Morton <akpm@linux-foundation.org>,
David Hildenbrand <david@kernel.org>,
Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
"Liam R. Howlett" <Liam.Howlett@oracle.com>,
Vlastimil Babka <vbabka@suse.cz>,
Suren Baghdasaryan <surenb@google.com>,
Michal Hocko <mhocko@suse.com>, Jonathan Corbet <corbet@lwn.net>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
Dave Hansen <dave.hansen@linux.intel.com>,
x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
Muchun Song <muchun.song@linux.dev>,
Oscar Salvador <osalvador@suse.de>,
Alexander Graf <graf@amazon.com>,
David Matlack <dmatlack@google.com>,
David Rientjes <rientjes@google.com>,
Jason Gunthorpe <jgg@nvidia.com>,
Samiullah Khawaja <skhawaja@google.com>,
Vipin Sharma <vipinsh@google.com>,
Zhu Yanjun <yanjun.zhu@linux.dev>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
linux-doc@vger.kernel.org, kexec@lists.infradead.org
Subject: [RFC PATCH 05/10] mm: hugetlb: export some functions to hugetlb-internal header
Date: Sun, 7 Dec 2025 00:02:15 +0100 [thread overview]
Message-ID: <20251206230222.853493-6-pratyush@kernel.org> (raw)
In-Reply-To: <20251206230222.853493-1-pratyush@kernel.org>
A later commit will add support for live updating a memfd backed by
HugeTLB. It needs access to these internal functions to prepare the
folios and properly queue them to the hstate and the file. Move them out
to a separate hugetlb-internal header.
There does exist include/linux/hugetlb.h, but that contains higher level
routines. It also prefixes the function names to make it clear they
belong to hugetlb. These are low-level routines that do not need to be
exposed to the public API, and renaming them to prefix with hugetlb is
going to cause a lot of code churn. So create mm/hugetlb_internal.h that
contains these definitions.
Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
---
MAINTAINERS | 1 +
mm/hugetlb.c | 33 +++++++++------------------------
mm/hugetlb_internal.h | 35 +++++++++++++++++++++++++++++++++++
3 files changed, 45 insertions(+), 24 deletions(-)
create mode 100644 mm/hugetlb_internal.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 2722f98d0ed7..fc23a0381e19 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11540,6 +11540,7 @@ F: mm/hugetlb.c
F: mm/hugetlb_cgroup.c
F: mm/hugetlb_cma.c
F: mm/hugetlb_cma.h
+F: mm/hugetlb_internal.h
F: mm/hugetlb_vmemmap.c
F: mm/hugetlb_vmemmap.h
F: tools/testing/selftests/cgroup/test_hugetlb_memcg.c
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0455119716ec..0f818086bf4f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -55,6 +55,8 @@
#include "hugetlb_cma.h"
#include <linux/page-isolation.h>
+#include "hugetlb_internal.h"
+
int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
@@ -733,9 +735,8 @@ static int allocate_file_region_entries(struct resv_map *resv,
* fail; region_chg will always allocate at least 1 entry and a region_add for
* 1 page will only require at most 1 entry.
*/
-static long region_add(struct resv_map *resv, long f, long t,
- long in_regions_needed, struct hstate *h,
- struct hugetlb_cgroup *h_cg)
+long region_add(struct resv_map *resv, long f, long t, long in_regions_needed,
+ struct hstate *h, struct hugetlb_cgroup *h_cg)
{
long add = 0, actual_regions_needed = 0;
@@ -800,8 +801,7 @@ static long region_add(struct resv_map *resv, long f, long t,
* zero. -ENOMEM is returned if a new file_region structure or cache entry
* is needed and can not be allocated.
*/
-static long region_chg(struct resv_map *resv, long f, long t,
- long *out_regions_needed)
+long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed)
{
long chg = 0;
@@ -836,8 +836,7 @@ static long region_chg(struct resv_map *resv, long f, long t,
* routine. They are kept to make reading the calling code easier as
* arguments will match the associated region_chg call.
*/
-static void region_abort(struct resv_map *resv, long f, long t,
- long regions_needed)
+void region_abort(struct resv_map *resv, long f, long t, long regions_needed)
{
spin_lock(&resv->lock);
VM_BUG_ON(!resv->region_cache_count);
@@ -1162,19 +1161,6 @@ void resv_map_release(struct kref *ref)
kfree(resv_map);
}
-static inline struct resv_map *inode_resv_map(struct inode *inode)
-{
- /*
- * At inode evict time, i_mapping may not point to the original
- * address space within the inode. This original address space
- * contains the pointer to the resv_map. So, always use the
- * address space embedded within the inode.
- * The VERY common case is inode->mapping == &inode->i_data but,
- * this may not be true for device special inodes.
- */
- return (struct resv_map *)(&inode->i_data)->i_private_data;
-}
-
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
@@ -1887,14 +1873,14 @@ void free_huge_folio(struct folio *folio)
/*
* Must be called with the hugetlb lock held
*/
-static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
+void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
lockdep_assert_held(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[folio_nid(folio)]++;
}
-static void init_new_hugetlb_folio(struct folio *folio)
+void init_new_hugetlb_folio(struct folio *folio)
{
__folio_set_hugetlb(folio);
INIT_LIST_HEAD(&folio->lru);
@@ -2006,8 +1992,7 @@ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
return folio;
}
-static void prep_and_add_allocated_folios(struct hstate *h,
- struct list_head *folio_list)
+void prep_and_add_allocated_folios(struct hstate *h, struct list_head *folio_list)
{
unsigned long flags;
struct folio *folio, *tmp_f;
diff --git a/mm/hugetlb_internal.h b/mm/hugetlb_internal.h
new file mode 100644
index 000000000000..edfb4eb75828
--- /dev/null
+++ b/mm/hugetlb_internal.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 Pratyush Yadav <pratyush@kernel.org>
+ */
+#ifndef __HUGETLB_INTERNAL_H
+#define __HUGETLB_INTERNAL_H
+
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
+#include <linux/list.h>
+
+void init_new_hugetlb_folio(struct folio *folio);
+void account_new_hugetlb_folio(struct hstate *h, struct folio *folio);
+
+long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed);
+long region_add(struct resv_map *resv, long f, long t, long in_regions_needed,
+ struct hstate *h, struct hugetlb_cgroup *h_cg);
+void region_abort(struct resv_map *resv, long f, long t, long regions_needed);
+void prep_and_add_allocated_folios(struct hstate *h, struct list_head *folio_list);
+
+static inline struct resv_map *inode_resv_map(struct inode *inode)
+{
+ /*
+ * At inode evict time, i_mapping may not point to the original
+ * address space within the inode. This original address space
+ * contains the pointer to the resv_map. So, always use the
+ * address space embedded within the inode.
+ * The VERY common case is inode->mapping == &inode->i_data but,
+ * this may not be true for device special inodes.
+ */
+ return (struct resv_map *)(&inode->i_data)->i_private_data;
+}
+
+#endif /* __HUGETLB_INTERNAL_H */
--
2.43.0
next prev parent reply other threads:[~2025-12-06 23:03 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-06 23:02 [RFC PATCH 00/10] liveupdate: hugetlb support Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 01/10] kho: drop restriction on maximum page order Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 02/10] kho: disable scratch-only earlier in boot Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 03/10] liveupdate: do early initialization before hugepages are allocated Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 04/10] liveupdate: flb: allow getting FLB data in early boot Pratyush Yadav
2025-12-06 23:02 ` Pratyush Yadav [this message]
2025-12-06 23:02 ` [RFC PATCH 06/10] liveupdate: hugetlb subsystem FLB state preservation Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 07/10] mm: hugetlb: don't allocate pages already in live update Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 08/10] mm: hugetlb: disable CMA if liveupdate is enabled Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 09/10] mm: hugetlb: allow freezing the inode Pratyush Yadav
2025-12-06 23:02 ` [RFC PATCH 10/10] liveupdate: allow preserving hugetlb-backed memfd Pratyush Yadav
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251206230222.853493-6-pratyush@kernel.org \
--to=pratyush@kernel.org \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=bp@alien8.de \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=david@kernel.org \
--cc=dmatlack@google.com \
--cc=graf@amazon.com \
--cc=hpa@zytor.com \
--cc=jgg@nvidia.com \
--cc=kexec@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=mingo@redhat.com \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=pasha.tatashin@soleen.com \
--cc=rientjes@google.com \
--cc=rppt@kernel.org \
--cc=skhawaja@google.com \
--cc=surenb@google.com \
--cc=tglx@linutronix.de \
--cc=vbabka@suse.cz \
--cc=vipinsh@google.com \
--cc=x86@kernel.org \
--cc=yanjun.zhu@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox