From: Dev Jain <dev.jain@arm.com>
To: akpm@linux-foundation.org, david@redhat.com, willy@infradead.org,
kirill.shutemov@linux.intel.com
Cc: npache@redhat.com, ryan.roberts@arm.com,
anshuman.khandual@arm.com, catalin.marinas@arm.com,
cl@gentwo.org, vbabka@suse.cz, mhocko@suse.com,
apopple@nvidia.com, dave.hansen@linux.intel.com, will@kernel.org,
baohua@kernel.org, jack@suse.cz, srivatsa@csail.mit.edu,
haowenchao22@gmail.com, hughd@google.com,
aneesh.kumar@kernel.org, yang@os.amperecomputing.com,
peterx@redhat.com, ioworker0@gmail.com,
wangkefeng.wang@huawei.com, ziy@nvidia.com, jglisse@google.com,
surenb@google.com, vishal.moola@gmail.com, zokeefe@google.com,
zhengqi.arch@bytedance.com, jhubbard@nvidia.com,
21cnbao@gmail.com, linux-mm@kvack.org,
linux-kernel@vger.kernel.org, Dev Jain <dev.jain@arm.com>
Subject: [PATCH v2 12/17] khugepaged: Enable variable-sized VMA collapse
Date: Tue, 11 Feb 2025 16:43:21 +0530 [thread overview]
Message-ID: <20250211111326.14295-13-dev.jain@arm.com> (raw)
In-Reply-To: <20250211111326.14295-1-dev.jain@arm.com>
Applications in general may have a lot of VMAs less than PMD-size. Therefore
it is essential that khugepaged is able to collapse these VMAs.
Signed-off-by: Dev Jain <dev.jain@arm.com>
---
mm/khugepaged.c | 68 +++++++++++++++++++++++++++++--------------------
1 file changed, 41 insertions(+), 27 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 37cfa7beba3d..048f990d8507 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1413,7 +1413,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
static int hpage_collapse_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address, bool *mmap_locked,
- struct collapse_control *cc)
+ unsigned long orders, struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
@@ -1425,22 +1425,14 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
unsigned long _address, orig_address = address;
int node = NUMA_NO_NODE;
bool writable = false;
- unsigned long orders, orig_orders;
+ unsigned long orig_orders;
int order, prev_order;
bool all_pfns_present, all_pfns_contig, first_pfn_aligned;
pte_t prev_pteval;
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-
- orders = thp_vma_allowable_orders(vma, vma->vm_flags,
- TVA_IN_PF | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL_ANON);
- orders = thp_vma_suitable_orders(vma, address, orders);
orig_orders = orders;
order = highest_order(orders);
-
- /* MADV_COLLAPSE needs to work irrespective of sysfs setting */
- if (!cc->is_khugepaged)
- order = HPAGE_PMD_ORDER;
+ VM_BUG_ON(address & ((PAGE_SIZE << order) - 1));
scan_pte_range:
@@ -1667,7 +1659,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
decide_order:
/* Immediately exit on exhaustion of range */
- if (_address == orig_address + (PAGE_SIZE << HPAGE_PMD_ORDER))
+ if (_address == orig_address + (PAGE_SIZE << (highest_order(orig_orders))))
goto out;
/* Get highest order possible starting from address */
@@ -2636,6 +2628,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
struct mm_struct *mm;
struct vm_area_struct *vma;
int progress = 0;
+ unsigned long orders;
+ int order;
+ bool is_file_vma;
VM_BUG_ON(!pages);
lockdep_assert_held(&khugepaged_mm_lock);
@@ -2675,19 +2670,40 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_orders(vma, vma->vm_flags,
- TVA_ENFORCE_SYSFS, THP_ORDERS_ALL_ANON)) {
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_ENFORCE_SYSFS, THP_ORDERS_ALL_ANON);
+ if (!orders) {
skip:
progress++;
continue;
}
- hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
- hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
+
+ /* We can collapse anonymous VMAs less than PMD_SIZE */
+ is_file_vma = IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma);
+ if (is_file_vma) {
+ order = HPAGE_PMD_ORDER;
+ if (!(orders & (1UL << order)))
+ goto skip;
+ hend = round_down(vma->vm_end, PAGE_SIZE << order);
+ }
+ else {
+ /* select the highest possible order for the VMA */
+ order = highest_order(orders);
+ while (orders) {
+ hend = round_down(vma->vm_end, PAGE_SIZE << order);
+ if (khugepaged_scan.address <= hend)
+ break;
+ order = next_order(&orders, order);
+ }
+ }
+ if (!orders)
+ goto skip;
if (khugepaged_scan.address > hend)
goto skip;
+ hstart = round_up(vma->vm_start, PAGE_SIZE << order);
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
- VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(khugepaged_scan.address & ((PAGE_SIZE << order) - 1));
while (khugepaged_scan.address < hend) {
bool mmap_locked = true;
@@ -2697,13 +2713,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
goto breakouterloop;
VM_BUG_ON(khugepaged_scan.address < hstart ||
- khugepaged_scan.address + HPAGE_PMD_SIZE >
+ khugepaged_scan.address + (PAGE_SIZE << order) >
hend);
- if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
- if (!thp_vma_allowable_order(vma, vma->vm_flags,
- TVA_ENFORCE_SYSFS, PMD_ORDER))
- break;
-
+ if (is_file_vma) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
@@ -2725,15 +2737,15 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
}
} else {
*result = hpage_collapse_scan_pmd(mm, vma,
- khugepaged_scan.address, &mmap_locked, cc);
+ khugepaged_scan.address, &mmap_locked, orders, cc);
}
if (*result == SCAN_SUCCEED)
++khugepaged_pages_collapsed;
/* move to next address */
- khugepaged_scan.address += HPAGE_PMD_SIZE;
- progress += HPAGE_PMD_NR;
+ khugepaged_scan.address += (PAGE_SIZE << order);
+ progress += (1UL << order);
if (!mmap_locked)
/*
* We released mmap_lock so break loop. Note
@@ -3060,7 +3072,9 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
fput(file);
} else {
result = hpage_collapse_scan_pmd(mm, vma, addr,
- &mmap_locked, cc);
+ &mmap_locked,
+ BIT(HPAGE_PMD_ORDER),
+ cc);
}
if (!mmap_locked)
*prev = NULL; /* Tell caller we dropped mmap_lock */
--
2.30.2
next prev parent reply other threads:[~2025-02-11 11:15 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-11 11:13 [PATCH v2 00/17] khugepaged: Asynchronous mTHP collapse Dev Jain
2025-02-11 11:13 ` [PATCH v2 01/17] khugepaged: Generalize alloc_charge_folio() Dev Jain
2025-02-11 11:13 ` [PATCH v2 02/17] khugepaged: Generalize hugepage_vma_revalidate() Dev Jain
2025-02-11 11:13 ` [PATCH v2 03/17] khugepaged: Generalize __collapse_huge_page_swapin() Dev Jain
2025-02-11 11:13 ` [PATCH v2 04/17] khugepaged: Generalize __collapse_huge_page_isolate() Dev Jain
2025-02-11 11:13 ` [PATCH v2 05/17] khugepaged: Generalize __collapse_huge_page_copy() Dev Jain
2025-02-11 11:13 ` [PATCH v2 06/17] khugepaged: Abstract PMD-THP collapse Dev Jain
2025-02-11 11:13 ` [PATCH v2 07/17] khugepaged: Scan PTEs order-wise Dev Jain
2025-02-11 11:13 ` [PATCH v2 08/17] khugepaged: Introduce vma_collapse_anon_folio() Dev Jain
2025-02-11 11:13 ` [PATCH v2 09/17] khugepaged: Define collapse policy if a larger folio is already mapped Dev Jain
2025-02-11 11:13 ` [PATCH v2 10/17] khugepaged: Exit early on fully-mapped aligned mTHP Dev Jain
2025-02-11 11:13 ` [PATCH v2 11/17] khugepaged: Enable sysfs to control order of collapse Dev Jain
2025-02-11 11:13 ` Dev Jain [this message]
2025-02-11 11:13 ` [PATCH v2 13/17] khugepaged: Lock all VMAs mapping the PTE table Dev Jain
2025-02-11 11:13 ` [PATCH v2 14/17] khugepaged: Reset scan address to correct alignment Dev Jain
2025-02-11 11:13 ` [PATCH v2 15/17] khugepaged: Delay cond_resched() Dev Jain
2025-02-11 11:13 ` [PATCH v2 16/17] khugepaged: Implement strict policy for mTHP collapse Dev Jain
2025-02-11 11:13 ` [PATCH v2 17/17] Documentation: transhuge: Define khugepaged mTHP collapse policy Dev Jain
2025-02-11 23:23 ` [PATCH v2 00/17] khugepaged: Asynchronous mTHP collapse Andrew Morton
2025-02-12 4:18 ` Dev Jain
2025-02-15 1:47 ` Nico Pache
2025-02-15 7:36 ` Dev Jain
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250211111326.14295-13-dev.jain@arm.com \
--to=dev.jain@arm.com \
--cc=21cnbao@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=aneesh.kumar@kernel.org \
--cc=anshuman.khandual@arm.com \
--cc=apopple@nvidia.com \
--cc=baohua@kernel.org \
--cc=catalin.marinas@arm.com \
--cc=cl@gentwo.org \
--cc=dave.hansen@linux.intel.com \
--cc=david@redhat.com \
--cc=haowenchao22@gmail.com \
--cc=hughd@google.com \
--cc=ioworker0@gmail.com \
--cc=jack@suse.cz \
--cc=jglisse@google.com \
--cc=jhubbard@nvidia.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mhocko@suse.com \
--cc=npache@redhat.com \
--cc=peterx@redhat.com \
--cc=ryan.roberts@arm.com \
--cc=srivatsa@csail.mit.edu \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=vishal.moola@gmail.com \
--cc=wangkefeng.wang@huawei.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=yang@os.amperecomputing.com \
--cc=zhengqi.arch@bytedance.com \
--cc=ziy@nvidia.com \
--cc=zokeefe@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox