From: Baolin Wang <baolin.wang@linux.alibaba.com>
To: akpm@linux-foundation.org, hughd@google.com
Cc: willy@infradead.org, david@redhat.com,
wangkefeng.wang@huawei.com, 21cnbao@gmail.com,
ryan.roberts@arm.com, ioworker0@gmail.com, da.gomez@samsung.com,
baolin.wang@linux.alibaba.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Subject: [PATCH v2 2/5] mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap
Date: Tue, 12 Nov 2024 15:45:49 +0800 [thread overview]
Message-ID: <fdfc5ccea1e15eb611bedebed6ec44287b462887.1731397290.git.baolin.wang@linux.alibaba.com> (raw)
In-Reply-To: <cover.1731397290.git.baolin.wang@linux.alibaba.com>
Change the shmem_huge_global_enabled() to return the suitable huge
order bitmap, and return 0 if huge pages are not allowed. This is a
preparation for supporting various huge orders allocation of tmpfs
in the following patches.
No functional changes.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/shmem.c | 40 ++++++++++++++++++++--------------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 579e58cb3262..86b2e417dc6f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -549,37 +549,37 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ unsigned long vm_flags)
{
loff_t i_size;
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
- return false;
+ return 0;
if (!S_ISREG(inode->i_mode))
- return false;
+ return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
- return false;
+ return 0;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
- return true;
+ return BIT(HPAGE_PMD_ORDER);
case SHMEM_HUGE_WITHIN_SIZE:
index = round_up(index + 1, HPAGE_PMD_NR);
i_size = max(write_end, i_size_read(inode));
i_size = round_up(i_size, PAGE_SIZE);
if (i_size >> PAGE_SHIFT >= index)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
- return true;
+ return BIT(HPAGE_PMD_ORDER);
fallthrough;
default:
- return false;
+ return 0;
}
}
@@ -774,11 +774,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
return 0;
}
-static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
- unsigned long vm_flags)
+static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force,
+ unsigned long vm_flags)
{
- return false;
+ return 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1682,21 +1682,21 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
unsigned long vm_flags = vma ? vma->vm_flags : 0;
- bool global_huge;
+ unsigned int global_orders;
loff_t i_size;
int order;
if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags)))
return 0;
- global_huge = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vm_flags);
+ global_orders = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vm_flags);
if (!vma || !vma_is_anon_shmem(vma)) {
/*
* For tmpfs, we now only support PMD sized THP if huge page
* is enabled, otherwise fallback to order 0.
*/
- return global_huge ? BIT(HPAGE_PMD_ORDER) : 0;
+ return global_orders;
}
/*
@@ -1729,7 +1729,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
- if (global_huge)
+ if (global_orders > 0)
mask |= READ_ONCE(huge_shmem_orders_inherit);
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
--
2.39.3
next prev parent reply other threads:[~2024-11-12 7:46 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-12 7:45 [PATCH v2 0/5] Support large folios for tmpfs Baolin Wang
2024-11-12 7:45 ` [PATCH v2 1/5] mm: factor out the order calculation into a new helper Baolin Wang
[not found] ` <CGME20241115135428eucas1p2b266175fadfb08cad9264c89fd395407@eucas1p2.samsung.com>
2024-11-15 13:54 ` Daniel Gomez
2024-11-12 7:45 ` Baolin Wang [this message]
2024-11-12 16:03 ` [PATCH v2 2/5] mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap David Hildenbrand
2024-11-12 7:45 ` [PATCH v2 3/5] mm: shmem: add large folio support for tmpfs Baolin Wang
2024-11-12 16:19 ` David Hildenbrand
2024-11-12 16:21 ` David Hildenbrand
2024-11-13 3:07 ` Baolin Wang
2024-11-15 13:48 ` David Hildenbrand
2024-11-13 6:53 ` [PATCH] mm: shmem: add large folio support for tmpfs fix Baolin Wang
2024-11-12 7:45 ` [PATCH v2 4/5] mm: shmem: add a kernel command line to change the default huge policy for tmpfs Baolin Wang
[not found] ` <CGME20241115140254eucas1p2e77d484813d39b8e6c8c0dbd6046f3c4@eucas1p2.samsung.com>
2024-11-15 14:02 ` Daniel Gomez
2024-11-15 14:54 ` David Hildenbrand
2024-11-16 3:00 ` Baolin Wang
2024-11-12 7:45 ` [PATCH v2 5/5] docs: tmpfs: update the huge folios policy for tmpfs and shmem Baolin Wang
2024-11-13 6:57 ` [PATCH] docs: tmpfs: update the huge folios policy for tmpfs and shmem fix Baolin Wang
2024-11-20 21:35 ` Barry Song
2024-11-22 11:12 ` David Hildenbrand
[not found] ` <CGME20241115131634eucas1p2db22b75fcc768a4bb6aa47ee180110cc@eucas1p2.samsung.com>
2024-11-15 13:16 ` [PATCH v2 0/5] Support large folios for tmpfs Daniel Gomez
2024-11-15 13:35 ` David Hildenbrand
2024-11-15 15:35 ` Daniel Gomez
2024-11-15 15:44 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fdfc5ccea1e15eb611bedebed6ec44287b462887.1731397290.git.baolin.wang@linux.alibaba.com \
--to=baolin.wang@linux.alibaba.com \
--cc=21cnbao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=da.gomez@samsung.com \
--cc=david@redhat.com \
--cc=hughd@google.com \
--cc=ioworker0@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=ryan.roberts@arm.com \
--cc=wangkefeng.wang@huawei.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox