From: Baolin Wang <baolin.wang@linux.alibaba.com>
To: akpm@linux-foundation.org, hughd@google.com
Cc: willy@infradead.org, david@redhat.com,
wangkefeng.wang@huawei.com, 21cnbao@gmail.com,
ryan.roberts@arm.com, ioworker0@gmail.com, da.gomez@samsung.com,
baolin.wang@linux.alibaba.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: [RFC PATCH v3 2/4] mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap
Date: Thu, 10 Oct 2024 17:58:12 +0800 [thread overview]
Message-ID: <b1804b973d3ee800a2f233be45732b71ce3917cb.1728548374.git.baolin.wang@linux.alibaba.com> (raw)
In-Reply-To: <cover.1728548374.git.baolin.wang@linux.alibaba.com>
Change the shmem_huge_global_enabled() to return the suitable huge
order bitmap, and return 0 if huge pages are not allowed. This is a
preparation for adding a new huge option to support various huge
orders allocation in the following patch.
No functional changes.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/shmem.c | 43 ++++++++++++++++++++++---------------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 0613421e09e7..f04935722457 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -548,48 +548,48 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- struct vm_area_struct *vma,
- unsigned long vm_flags)
+static unsigned int __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
+ unsigned long vm_flags)
{
struct mm_struct *mm = vma ? vma->vm_mm : NULL;
loff_t i_size;
if (!S_ISREG(inode->i_mode))
- return false;
+ return 0;
if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags)))
- return false;
+ return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
- return false;
+ return 0;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
- return true;
+ return BIT(HPAGE_PMD_ORDER);
case SHMEM_HUGE_WITHIN_SIZE:
index = round_up(index + 1, HPAGE_PMD_NR);
i_size = max(write_end, i_size_read(inode));
i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
case SHMEM_HUGE_ADVISE:
if (mm && (vm_flags & VM_HUGEPAGE))
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
default:
- return false;
+ return 0;
}
}
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
struct vm_area_struct *vma, unsigned long vm_flags)
{
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
- return false;
+ return 0;
return __shmem_huge_global_enabled(inode, index, write_end,
shmem_huge_force, vma, vm_flags);
@@ -771,11 +771,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
return 0;
}
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
struct vm_area_struct *vma, unsigned long vm_flags)
{
- return false;
+ return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1170,7 +1170,8 @@ static int shmem_getattr(struct mnt_idmap *idmap,
generic_fillattr(idmap, request_mask, inode, stat);
inode_unlock_shared(inode);
- if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0) ==
+ BIT(HPAGE_PMD_ORDER))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1679,7 +1680,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
- bool global_huge;
+ unsigned int global_order;
loff_t i_size;
int order;
@@ -1691,14 +1692,14 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
- global_huge = shmem_huge_global_enabled(inode, index, write_end,
+ global_order = shmem_huge_global_enabled(inode, index, write_end,
shmem_huge_force, vma, vm_flags);
if (!vma || !vma_is_anon_shmem(vma)) {
/*
* For tmpfs, we now only support PMD sized THP if huge page
* is enabled, otherwise fallback to order 0.
*/
- return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+ return global_order;
}
/*
@@ -1731,7 +1732,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
- if (global_huge)
+ if (global_order > 0)
mask |= READ_ONCE(huge_shmem_orders_inherit);
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
--
2.39.3
next prev parent reply other threads:[~2024-10-10 9:58 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-10 9:58 [RFC PATCH v3 0/4] Support large folios for tmpfs Baolin Wang
2024-10-10 9:58 ` [RFC PATCH v3 1/4] mm: factor out the order calculation into a new helper Baolin Wang
2024-10-10 9:58 ` Baolin Wang [this message]
2024-10-10 9:58 ` [RFC PATCH v3 3/4] mm: shmem: add large folio support to the write and fallocate paths for tmpfs Baolin Wang
2024-10-10 9:58 ` [RFC PATCH v3 4/4] docs: tmpfs: add documention for 'write_size' huge option Baolin Wang
2024-10-16 7:49 ` [RFC PATCH v3 0/4] Support large folios for tmpfs Kefeng Wang
2024-10-16 9:29 ` Baolin Wang
2024-10-16 13:45 ` Kefeng Wang
2024-10-17 9:52 ` Baolin Wang
2024-10-16 14:06 ` Matthew Wilcox
2024-10-17 9:34 ` Baolin Wang
2024-10-17 11:26 ` Kirill A. Shutemov
2024-10-21 6:24 ` Baolin Wang
2024-10-21 8:54 ` Kirill A. Shutemov
2024-10-21 13:34 ` Daniel Gomez
2024-10-22 3:41 ` Baolin Wang
2024-10-22 15:31 ` David Hildenbrand
2024-10-23 8:04 ` Baolin Wang
2024-10-23 9:27 ` David Hildenbrand
2024-10-24 10:49 ` Daniel Gomez
2024-10-24 10:52 ` Daniel Gomez
2024-10-25 2:56 ` Baolin Wang
2024-10-25 20:21 ` David Hildenbrand
2024-10-28 9:48 ` David Hildenbrand
2024-10-31 3:43 ` Baolin Wang
2024-10-31 8:53 ` David Hildenbrand
2024-10-31 10:04 ` Baolin Wang
2024-10-31 10:46 ` David Hildenbrand
2024-11-05 12:45 ` Baolin Wang
2024-11-05 14:56 ` David Hildenbrand
2024-11-06 3:17 ` Baolin Wang
2024-10-31 10:46 ` David Hildenbrand
2024-10-28 21:56 ` Daniel Gomez
2024-10-29 12:20 ` David Hildenbrand
2024-10-22 3:34 ` Baolin Wang
2024-10-22 10:06 ` Kirill A. Shutemov
2024-10-23 9:25 ` Baolin Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=b1804b973d3ee800a2f233be45732b71ce3917cb.1728548374.git.baolin.wang@linux.alibaba.com \
--to=baolin.wang@linux.alibaba.com \
--cc=21cnbao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=da.gomez@samsung.com \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=ioworker0@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ryan.roberts@arm.com \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox