From: Jane Chu <jane.chu@oracle.com>
To: akpm@linux-foundation.org, david@kernel.org,
muchun.song@linux.dev, osalvador@suse.de
Cc: lorenzo.stoakes@oracle.com, Liam.Howlett@oracle.com,
vbabka@kernel.org, rppt@kernel.org, surenb@google.com,
mhocko@suse.com, corbet@lwn.net, skhan@linuxfoundation.org,
hughd@google.com, baolin.wang@linux.alibaba.com,
peterx@redhat.com, linux-mm@kvack.org, linux-doc@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: [PATCH 5/6] hugetlb: make hugetlb_add_to_page_cache() use PAGE_SIZE-based index
Date: Thu, 9 Apr 2026 17:41:56 -0600 [thread overview]
Message-ID: <20260409234158.837786-6-jane.chu@oracle.com> (raw)
In-Reply-To: <20260409234158.837786-1-jane.chu@oracle.com>
hugetlb_add_to_page_cache() currently takes a parameter named 'idx',
but internally converts it from hugetlb page units into PAGE_SIZE-based
page-cache index units before calling __filemap_add_folio().
Make hugetlb_add_to_page_cache() take a PAGE_SIZE-based index directly
and update its callers accordingly. This removes the internal shift,
keeps the index units consistent with filemap_lock_folio() and
__filemap_add_folio(), and simplifies the surrounding code.
Signed-off-by: Jane Chu <jane.chu@oracle.com>
---
fs/hugetlbfs/inode.c | 2 +-
include/linux/hugetlb.h | 2 +-
mm/hugetlb.c | 21 ++++++++-------------
mm/memfd.c | 2 +-
4 files changed, 11 insertions(+), 16 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e24e9bf54e14..a72d46ff7980 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -825,7 +825,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
}
folio_zero_user(folio, addr);
__folio_mark_uptodate(folio);
- error = hugetlb_add_to_page_cache(folio, mapping, idx);
+ error = hugetlb_add_to_page_cache(folio, mapping, index);
if (unlikely(error)) {
restore_reserve_on_error(h, &pseudo_vma, addr, folio);
folio_put(folio);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 71691a2b6855..a51a5e12859c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -713,7 +713,7 @@ struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
- pgoff_t idx);
+ pgoff_t index);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct folio *folio);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 138e5ecf818e..47ef41b6fb2e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5625,15 +5625,14 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
}
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
- pgoff_t idx)
+ pgoff_t index)
{
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err;
- idx <<= huge_page_order(h);
__folio_set_locked(folio);
- err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+ err = __filemap_add_folio(mapping, folio, index, GFP_KERNEL, NULL);
if (unlikely(err)) {
__folio_clear_locked(folio);
@@ -5724,7 +5723,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
* before we get page_table_lock.
*/
new_folio = false;
- folio = filemap_lock_folio(mapping, vmf->pgoff << huge_page_order(h));
+ folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio)) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (vmf->pgoff >= size)
@@ -5788,8 +5787,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
new_folio = true;
if (vma->vm_flags & VM_MAYSHARE) {
- int err = hugetlb_add_to_page_cache(folio, mapping,
- vmf->pgoff);
+ int err = hugetlb_add_to_page_cache(folio, mapping, index);
if (err) {
/*
* err can't be -EEXIST which implies someone
@@ -6173,7 +6171,6 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
uffd_flags_t flags,
struct folio **foliop)
{
- pgoff_t idx;
spinlock_t *ptl;
struct folio *folio;
pte_t _dst_pte, dst_ptep;
@@ -6183,13 +6180,11 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool wp_enabled = (flags & MFILL_ATOMIC_WP);
int vm_shared = dst_vma->vm_flags & VM_SHARED;
+ pgoff_t index = linear_page_index(dst_vma, dst_addr);
struct address_space *mapping = dst_vma->vm_file->f_mapping;
bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
int ret = -ENOMEM;
- idx = linear_page_index(dst_vma, dst_addr);
- idx >>= huge_page_order(h);
-
if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
ptl = huge_pte_lock(h, dst_mm, dst_pte);
@@ -6211,7 +6206,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (is_continue) {
ret = -EFAULT;
- folio = filemap_lock_folio(mapping, idx << huge_page_order(h));
+ folio = filemap_lock_folio(mapping, index);
if (IS_ERR(folio))
goto out;
folio_in_pagecache = true;
@@ -6307,7 +6302,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
/* Add shared, newly allocated pages to the page cache. */
if (vm_shared && !is_continue) {
ret = -EFAULT;
- if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
+ if (index >= (i_size_read(mapping->host) >> PAGE_SHIFT))
goto out_release_nounlock;
/*
@@ -6316,7 +6311,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
- ret = hugetlb_add_to_page_cache(folio, mapping, idx);
+ ret = hugetlb_add_to_page_cache(folio, mapping, index);
if (ret)
goto out_release_nounlock;
folio_in_pagecache = true;
diff --git a/mm/memfd.c b/mm/memfd.c
index 911ff8220d05..56c8833c4195 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -122,7 +122,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t index)
err = hugetlb_add_to_page_cache(folio,
memfd->f_mapping,
- idx);
+ index);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
--
2.43.5
next prev parent reply other threads:[~2026-04-09 23:42 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-09 23:41 [PATCH 0/6] hugetlb: normalize exported interfaces to use base-page indices Jane Chu
2026-04-09 23:41 ` [PATCH 1/6] hugetlb: open-code hugetlb folio lookup index conversion Jane Chu
2026-04-09 23:41 ` [PATCH 2/6] hugetlb: remove the hugetlb_linear_page_index() helper Jane Chu
2026-04-09 23:41 ` [PATCH 3/6] hugetlb: make hugetlb_fault_mutex_hash() take PAGE_SIZE index Jane Chu
2026-04-09 23:41 ` [PATCH 4/6] hugetlb: drop vma_hugecache_offset() in favor of linear_page_index() Jane Chu
2026-04-09 23:41 ` Jane Chu [this message]
2026-04-09 23:41 ` [PATCH 6/6] hugetlb: pass hugetlb reservation ranges in base-page indices Jane Chu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260409234158.837786-6-jane.chu@oracle.com \
--to=jane.chu@oracle.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=baolin.wang@linux.alibaba.com \
--cc=corbet@lwn.net \
--cc=david@kernel.org \
--cc=hughd@google.com \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=muchun.song@linux.dev \
--cc=osalvador@suse.de \
--cc=peterx@redhat.com \
--cc=rppt@kernel.org \
--cc=skhan@linuxfoundation.org \
--cc=surenb@google.com \
--cc=vbabka@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox