* [PATCH v3 01/10] mm: Add pfn_swap_entry_folio()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 02/10] proc: Use pfn_swap_entry_folio where obvious Matthew Wilcox (Oracle)
` (8 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton
Cc: Matthew Wilcox (Oracle), linux-mm, Kefeng Wang, david, linux-s390
Thanks to the compound_head() hidden inside PageLocked(), this saves a
call to compound_head() over calling page_folio(pfn_swap_entry_to_page())
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/swapops.h | 13 +++++++++++++
mm/filemap.c | 2 +-
mm/huge_memory.c | 2 +-
3 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index bff1e8d97de0..48b700ba1d18 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -468,6 +468,19 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
return p;
}
+static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
+{
+ struct folio *folio = pfn_folio(swp_offset_pfn(entry));
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked
+ */
+ BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
+
+ return folio;
+}
+
/*
* A pfn swap entry is a special type of swap entry that always has a pfn stored
* in the swap offset. They are used to represent unaddressable device memory
diff --git a/mm/filemap.c b/mm/filemap.c
index 750e779c23db..c704085f29b6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1354,7 +1354,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
unsigned long pflags;
bool in_thrashing;
wait_queue_head_t *q;
- struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
+ struct folio *folio = pfn_swap_entry_folio(entry);
q = folio_waitqueue(folio);
if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 94ef5c02b459..5b2da360ae0c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2035,7 +2035,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
if (is_swap_pmd(*pmd)) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
+ struct folio *folio = pfn_swap_entry_folio(entry);
pmd_t newpmd;
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 02/10] proc: Use pfn_swap_entry_folio where obvious
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 01/10] mm: Add pfn_swap_entry_folio() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 03/10] mprotect: Use pfn_swap_entry_folio Matthew Wilcox (Oracle)
` (7 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton
Cc: Matthew Wilcox (Oracle), linux-mm, Kefeng Wang, david, linux-s390
These callers only pass the result to PageAnon(), so we can save the
extra call to compound_head() by using pfn_swap_entry_folio().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
fs/proc/task_mmu.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 62b16f42d5d2..6e75bfd6c3f8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1807,7 +1807,7 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
if (p->masks_of_interest & PAGE_IS_FILE) {
swp = pte_to_swp_entry(pte);
if (is_pfn_swap_entry(swp) &&
- !PageAnon(pfn_swap_entry_to_page(swp)))
+ !folio_test_anon(pfn_swap_entry_folio(swp)))
categories |= PAGE_IS_FILE;
}
if (pte_swp_soft_dirty(pte))
@@ -1873,7 +1873,7 @@ static unsigned long pagemap_thp_category(struct pagemap_scan_private *p,
if (p->masks_of_interest & PAGE_IS_FILE) {
swp = pmd_to_swp_entry(pmd);
if (is_pfn_swap_entry(swp) &&
- !PageAnon(pfn_swap_entry_to_page(swp)))
+ !folio_test_anon(pfn_swap_entry_folio(swp)))
categories |= PAGE_IS_FILE;
}
}
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 03/10] mprotect: Use pfn_swap_entry_folio
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 01/10] mm: Add pfn_swap_entry_folio() Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 02/10] proc: Use pfn_swap_entry_folio where obvious Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 04/10] s390: use pfn_swap_entry_folio() in ptep_zap_swap_entry() Matthew Wilcox (Oracle)
` (6 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton
Cc: Matthew Wilcox (Oracle), linux-mm, Kefeng Wang, david, linux-s390
We only want to know whether the folio is anonymous, so use
pfn_swap_entry_folio() and save a call to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/mprotect.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 81991102f785..f8a4544b4601 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -198,13 +198,13 @@ static long change_pte_range(struct mmu_gather *tlb,
pte_t newpte;
if (is_writable_migration_entry(entry)) {
- struct page *page = pfn_swap_entry_to_page(entry);
+ struct folio *folio = pfn_swap_entry_folio(entry);
/*
* A protection check is difficult so
* just be safe and disable write
*/
- if (PageAnon(page))
+ if (folio_test_anon(folio))
entry = make_readable_exclusive_migration_entry(
swp_offset(entry));
else
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 04/10] s390: use pfn_swap_entry_folio() in ptep_zap_swap_entry()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (2 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 03/10] mprotect: Use pfn_swap_entry_folio Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 05/10] mm: use pfn_swap_entry_folio() in __split_huge_pmd_locked() Matthew Wilcox (Oracle)
` (5 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Call pfn_swap_entry_folio() in ptep_zap_swap_entry() as preparation for
converting mm counter functions to take a folio.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
arch/s390/mm/pgtable.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 99422926efe1..7e5dd4b17664 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -721,9 +721,9 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
- struct page *page = pfn_swap_entry_to_page(entry);
+ struct folio *folio = pfn_swap_entry_folio(entry);
- dec_mm_counter(mm, mm_counter(page));
+ dec_mm_counter(mm, mm_counter(&folio->page));
}
free_swap_and_cache(entry);
}
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 05/10] mm: use pfn_swap_entry_folio() in __split_huge_pmd_locked()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (3 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 04/10] s390: use pfn_swap_entry_folio() in ptep_zap_swap_entry() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 06/10] mm: use pfn_swap_entry_to_folio() in zap_huge_pmd() Matthew Wilcox (Oracle)
` (4 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Call pfn_swap_entry_folio() in __split_huge_pmd_locked() as preparation
for converting mm counter functions to take a folio.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/huge_memory.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5b2da360ae0c..4ad1416b60e3 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2432,7 +2432,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
- page = pfn_swap_entry_to_page(entry);
+ folio = pfn_swap_entry_folio(entry);
} else {
page = pmd_page(old_pmd);
folio = page_folio(page);
@@ -2443,7 +2443,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
folio_remove_rmap_pmd(folio, page, vma);
folio_put(folio);
}
- add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ add_mm_counter(mm, mm_counter_file(&folio->page), -HPAGE_PMD_NR);
return;
}
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 06/10] mm: use pfn_swap_entry_to_folio() in zap_huge_pmd()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (4 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 05/10] mm: use pfn_swap_entry_folio() in __split_huge_pmd_locked() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 07/10] mm: use pfn_swap_entry_folio() in copy_nonpresent_pte() Matthew Wilcox (Oracle)
` (3 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Call pfn_swap_entry_to_folio() in zap_huge_pmd() as preparation for
converting mm counter functions to take a folio. Saves a call to
compound_head() embedded inside PageAnon().
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/huge_memory.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4ad1416b60e3..4a17306c7dda 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1895,12 +1895,14 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
zap_deposited_table(tlb->mm, pmd);
spin_unlock(ptl);
} else {
- struct page *page = NULL;
+ struct folio *folio = NULL;
int flush_needed = 1;
if (pmd_present(orig_pmd)) {
- page = pmd_page(orig_pmd);
- folio_remove_rmap_pmd(page_folio(page), page, vma);
+ struct page *page = pmd_page(orig_pmd);
+
+ folio = page_folio(page);
+ folio_remove_rmap_pmd(folio, page, vma);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(!PageHead(page), page);
} else if (thp_migration_supported()) {
@@ -1908,23 +1910,24 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
entry = pmd_to_swp_entry(orig_pmd);
- page = pfn_swap_entry_to_page(entry);
+ folio = pfn_swap_entry_folio(entry);
flush_needed = 0;
} else
WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
- if (PageAnon(page)) {
+ if (folio_test_anon(folio)) {
zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
} else {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ add_mm_counter(tlb->mm, mm_counter_file(&folio->page),
+ -HPAGE_PMD_NR);
}
spin_unlock(ptl);
if (flush_needed)
- tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
+ tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
}
return 1;
}
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 07/10] mm: use pfn_swap_entry_folio() in copy_nonpresent_pte()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (5 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 06/10] mm: use pfn_swap_entry_to_folio() in zap_huge_pmd() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio() Matthew Wilcox (Oracle)
` (2 subsequent siblings)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Call pfn_swap_entry_folio() as preparation for converting mm counter
functions to take a folio.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/memory.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 7e1f4849463a..60aa08f2ccdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -806,9 +806,9 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
}
rss[MM_SWAPENTS]++;
} else if (is_migration_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
+ folio = pfn_swap_entry_folio(entry);
- rss[mm_counter(page)]++;
+ rss[mm_counter(&folio->page)]++;
if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (6 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 07/10] mm: use pfn_swap_entry_folio() in copy_nonpresent_pte() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-12 5:03 ` kernel test robot
2024-01-11 15:24 ` [PATCH v3 09/10] mm: convert mm_counter() to take a folio Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 10/10] mm: convert mm_counter_file() " Matthew Wilcox (Oracle)
9 siblings, 1 reply; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Make should_zap_page() take a folio and rename it to should_zap_folio()
as preparation for converting mm counter functions to take a folio.
Saves a call to compound_head() hidden inside PageAnon().
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/memory.c | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 60aa08f2ccdc..b73322ab9fd6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1369,19 +1369,20 @@ static inline bool should_zap_cows(struct zap_details *details)
return details->even_cows;
}
-/* Decides whether we should zap this page with the page pointer specified */
-static inline bool should_zap_page(struct zap_details *details, struct page *page)
+/* Decides whether we should zap this folio with the folio pointer specified */
+static inline bool should_zap_folio(struct zap_details *details,
+ struct folio *folio)
{
- /* If we can make a decision without *page.. */
+ /* If we can make a decision without *folio.. */
if (should_zap_cows(details))
return true;
- /* E.g. the caller passes NULL for the case of a zero page */
- if (!page)
+ /* E.g. the caller passes NULL for the case of a zero folio */
+ if (!folio)
return true;
- /* Otherwise we should only zap non-anon pages */
- return !PageAnon(page);
+ /* Otherwise we should only zap non-anon folios */
+ return !folio_test_anon(folio);
}
static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
@@ -1447,7 +1448,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
unsigned int delay_rmap;
page = vm_normal_page(vma, addr, ptent);
- if (unlikely(!should_zap_page(details, page)))
+ if (page)
+ folio = page_folio(page);
+
+ if (unlikely(!should_zap_folio(details, folio)))
continue;
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
@@ -1460,7 +1464,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
- folio = page_folio(page);
delay_rmap = 0;
if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
@@ -1492,7 +1495,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
folio = page_folio(page);
- if (unlikely(!should_zap_page(details, page)))
+ if (unlikely(!should_zap_folio(details, folio)))
continue;
/*
* Both device private/exclusive mappings should only
@@ -1513,10 +1516,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
} else if (is_migration_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
- if (!should_zap_page(details, page))
+ folio = pfn_swap_entry_folio(entry);
+ if (!should_zap_folio(details, folio))
continue;
- rss[mm_counter(page)]--;
+ rss[mm_counter(&folio->page)]--;
} else if (pte_marker_entry_uffd_wp(entry)) {
/*
* For anon: always drop the marker; for file: only
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
2024-01-11 15:24 ` [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio() Matthew Wilcox (Oracle)
@ 2024-01-12 5:03 ` kernel test robot
2024-01-12 10:14 ` Kefeng Wang
0 siblings, 1 reply; 15+ messages in thread
From: kernel test robot @ 2024-01-12 5:03 UTC (permalink / raw)
To: Matthew Wilcox (Oracle), Andrew Morton
Cc: llvm, oe-kbuild-all, Linux Memory Management List, Kefeng Wang,
david, linux-s390, Matthew Wilcox
Hi Matthew,
kernel test robot noticed the following build warnings:
[auto build test WARNING on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Add-pfn_swap_entry_folio/20240111-232757
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20240111152429.3374566-9-willy%40infradead.org
patch subject: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
config: arm-milbeaut_m10v_defconfig (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/config)
compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202401121250.A221BL2D-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> mm/memory.c:1451:8: warning: variable 'folio' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
if (page)
^~~~
mm/memory.c:1454:44: note: uninitialized use occurs here
if (unlikely(!should_zap_folio(details, folio)))
^~~~~
include/linux/compiler.h:77:42: note: expanded from macro 'unlikely'
# define unlikely(x) __builtin_expect(!!(x), 0)
^
mm/memory.c:1451:4: note: remove the 'if' if its condition is always true
if (page)
^~~~~~~~~
mm/memory.c:1438:22: note: initialize the variable 'folio' to silence this warning
struct folio *folio;
^
= NULL
1 warning generated.
vim +1451 mm/memory.c
1414
1415 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1416 struct vm_area_struct *vma, pmd_t *pmd,
1417 unsigned long addr, unsigned long end,
1418 struct zap_details *details)
1419 {
1420 struct mm_struct *mm = tlb->mm;
1421 int force_flush = 0;
1422 int rss[NR_MM_COUNTERS];
1423 spinlock_t *ptl;
1424 pte_t *start_pte;
1425 pte_t *pte;
1426 swp_entry_t entry;
1427
1428 tlb_change_page_size(tlb, PAGE_SIZE);
1429 init_rss_vec(rss);
1430 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1431 if (!pte)
1432 return addr;
1433
1434 flush_tlb_batched_pending(mm);
1435 arch_enter_lazy_mmu_mode();
1436 do {
1437 pte_t ptent = ptep_get(pte);
1438 struct folio *folio;
1439 struct page *page;
1440
1441 if (pte_none(ptent))
1442 continue;
1443
1444 if (need_resched())
1445 break;
1446
1447 if (pte_present(ptent)) {
1448 unsigned int delay_rmap;
1449
1450 page = vm_normal_page(vma, addr, ptent);
> 1451 if (page)
1452 folio = page_folio(page);
1453
1454 if (unlikely(!should_zap_folio(details, folio)))
1455 continue;
1456 ptent = ptep_get_and_clear_full(mm, addr, pte,
1457 tlb->fullmm);
1458 arch_check_zapped_pte(vma, ptent);
1459 tlb_remove_tlb_entry(tlb, pte, addr);
1460 zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1461 ptent);
1462 if (unlikely(!page)) {
1463 ksm_might_unmap_zero_page(mm, ptent);
1464 continue;
1465 }
1466
1467 delay_rmap = 0;
1468 if (!folio_test_anon(folio)) {
1469 if (pte_dirty(ptent)) {
1470 folio_set_dirty(folio);
1471 if (tlb_delay_rmap(tlb)) {
1472 delay_rmap = 1;
1473 force_flush = 1;
1474 }
1475 }
1476 if (pte_young(ptent) && likely(vma_has_recency(vma)))
1477 folio_mark_accessed(folio);
1478 }
1479 rss[mm_counter(page)]--;
1480 if (!delay_rmap) {
1481 folio_remove_rmap_pte(folio, page, vma);
1482 if (unlikely(page_mapcount(page) < 0))
1483 print_bad_pte(vma, addr, ptent, page);
1484 }
1485 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1486 force_flush = 1;
1487 addr += PAGE_SIZE;
1488 break;
1489 }
1490 continue;
1491 }
1492
1493 entry = pte_to_swp_entry(ptent);
1494 if (is_device_private_entry(entry) ||
1495 is_device_exclusive_entry(entry)) {
1496 page = pfn_swap_entry_to_page(entry);
1497 folio = page_folio(page);
1498 if (unlikely(!should_zap_folio(details, folio)))
1499 continue;
1500 /*
1501 * Both device private/exclusive mappings should only
1502 * work with anonymous page so far, so we don't need to
1503 * consider uffd-wp bit when zap. For more information,
1504 * see zap_install_uffd_wp_if_needed().
1505 */
1506 WARN_ON_ONCE(!vma_is_anonymous(vma));
1507 rss[mm_counter(page)]--;
1508 if (is_device_private_entry(entry))
1509 folio_remove_rmap_pte(folio, page, vma);
1510 folio_put(folio);
1511 } else if (!non_swap_entry(entry)) {
1512 /* Genuine swap entry, hence a private anon page */
1513 if (!should_zap_cows(details))
1514 continue;
1515 rss[MM_SWAPENTS]--;
1516 if (unlikely(!free_swap_and_cache(entry)))
1517 print_bad_pte(vma, addr, ptent, NULL);
1518 } else if (is_migration_entry(entry)) {
1519 folio = pfn_swap_entry_folio(entry);
1520 if (!should_zap_folio(details, folio))
1521 continue;
1522 rss[mm_counter(&folio->page)]--;
1523 } else if (pte_marker_entry_uffd_wp(entry)) {
1524 /*
1525 * For anon: always drop the marker; for file: only
1526 * drop the marker if explicitly requested.
1527 */
1528 if (!vma_is_anonymous(vma) &&
1529 !zap_drop_file_uffd_wp(details))
1530 continue;
1531 } else if (is_hwpoison_entry(entry) ||
1532 is_poisoned_swp_entry(entry)) {
1533 if (!should_zap_cows(details))
1534 continue;
1535 } else {
1536 /* We should have covered all the swap entry types */
1537 pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
1538 WARN_ON_ONCE(1);
1539 }
1540 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1541 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
1542 } while (pte++, addr += PAGE_SIZE, addr != end);
1543
1544 add_mm_rss_vec(mm, rss);
1545 arch_leave_lazy_mmu_mode();
1546
1547 /* Do the actual TLB flush before dropping ptl */
1548 if (force_flush) {
1549 tlb_flush_mmu_tlbonly(tlb);
1550 tlb_flush_rmaps(tlb, vma);
1551 }
1552 pte_unmap_unlock(start_pte, ptl);
1553
1554 /*
1555 * If we forced a TLB flush (either due to running out of
1556 * batch buffers or because we needed to flush dirty TLB
1557 * entries before releasing the ptl), free the batched
1558 * memory too. Come back again if we didn't do everything.
1559 */
1560 if (force_flush)
1561 tlb_flush_mmu(tlb);
1562
1563 return addr;
1564 }
1565
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
2024-01-12 5:03 ` kernel test robot
@ 2024-01-12 10:14 ` Kefeng Wang
2024-01-22 17:19 ` Ryan Roberts
0 siblings, 1 reply; 15+ messages in thread
From: Kefeng Wang @ 2024-01-12 10:14 UTC (permalink / raw)
To: kernel test robot, Matthew Wilcox (Oracle), Andrew Morton
Cc: llvm, oe-kbuild-all, Linux Memory Management List, david, linux-s390
On 2024/1/12 13:03, kernel test robot wrote:
> Hi Matthew,
>
> kernel test robot noticed the following build warnings:
>
> [auto build test WARNING on akpm-mm/mm-everything]
>
> url: https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Add-pfn_swap_entry_folio/20240111-232757
> base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
> patch link: https://lore.kernel.org/r/20240111152429.3374566-9-willy%40infradead.org
> patch subject: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
> config: arm-milbeaut_m10v_defconfig (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/config)
> compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git ae42196bc493ffe877a7e3dff8be32035dea4d07)
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/reproduce)
>
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202401121250.A221BL2D-lkp@intel.com/
>
> All warnings (new ones prefixed by >>):
>
>>> mm/memory.c:1451:8: warning: variable 'folio' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
> if (page)
> ^~~~
> mm/memory.c:1454:44: note: uninitialized use occurs here
> if (unlikely(!should_zap_folio(details, folio)))
> ^~~~~
> include/linux/compiler.h:77:42: note: expanded from macro 'unlikely'
> # define unlikely(x) __builtin_expect(!!(x), 0)
> ^
> mm/memory.c:1451:4: note: remove the 'if' if its condition is always true
> if (page)
> ^~~~~~~~~
> mm/memory.c:1438:22: note: initialize the variable 'folio' to silence this warning
> struct folio *folio;
> ^
> = NULL
Hi Andrew, please help to squash following change, thanks.
diff --git a/mm/memory.c b/mm/memory.c
index 998237b5600f..5e88d5379127 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1435,7 +1435,7 @@ static unsigned long zap_pte_range(struct
mmu_gather *tlb,
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = ptep_get(pte);
- struct folio *folio;
+ struct folio *folio = NULL;
struct page *page;
if (pte_none(ptent))
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
2024-01-12 10:14 ` Kefeng Wang
@ 2024-01-22 17:19 ` Ryan Roberts
2024-01-22 17:32 ` Ryan Roberts
0 siblings, 1 reply; 15+ messages in thread
From: Ryan Roberts @ 2024-01-22 17:19 UTC (permalink / raw)
To: Kefeng Wang, kernel test robot, Matthew Wilcox (Oracle), Andrew Morton
Cc: llvm, oe-kbuild-all, Linux Memory Management List, david, linux-s390
On 12/01/2024 10:14, Kefeng Wang wrote:
>
>
> On 2024/1/12 13:03, kernel test robot wrote:
>> Hi Matthew,
>>
>> kernel test robot noticed the following build warnings:
>>
>> [auto build test WARNING on akpm-mm/mm-everything]
>>
>> url:
>> https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Add-pfn_swap_entry_folio/20240111-232757
>> base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
>> patch link:
>> https://lore.kernel.org/r/20240111152429.3374566-9-willy%40infradead.org
>> patch subject: [PATCH v3 08/10] mm: Convert to should_zap_page() to
>> should_zap_folio()
>> config: arm-milbeaut_m10v_defconfig
>> (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/config)
>> compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git
>> ae42196bc493ffe877a7e3dff8be32035dea4d07)
>> reproduce (this is a W=1 build):
>> (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/reproduce)
>>
>> If you fix the issue in a separate patch/commit (i.e. not just a new version of
>> the same patch/commit), kindly add following tags
>> | Reported-by: kernel test robot <lkp@intel.com>
>> | Closes:
>> https://lore.kernel.org/oe-kbuild-all/202401121250.A221BL2D-lkp@intel.com/
>>
>> All warnings (new ones prefixed by >>):
>>
>>>> mm/memory.c:1451:8: warning: variable 'folio' is used uninitialized whenever
>>>> 'if' condition is false [-Wsometimes-uninitialized]
>> if (page)
>> ^~~~
>> mm/memory.c:1454:44: note: uninitialized use occurs here
>> if (unlikely(!should_zap_folio(details, folio)))
>> ^~~~~
>> include/linux/compiler.h:77:42: note: expanded from macro 'unlikely'
>> # define unlikely(x) __builtin_expect(!!(x), 0)
>> ^
>> mm/memory.c:1451:4: note: remove the 'if' if its condition is always true
>> if (page)
>> ^~~~~~~~~
>> mm/memory.c:1438:22: note: initialize the variable 'folio' to silence this
>> warning
>> struct folio *folio;
>> ^
>> = NULL
>
> Hi Andrew, please help to squash following change, thanks.
I just independently found this issue during coincidental review of the code.
It's still a problem in mm-unstable, so wondered if you missed the request, Andrew?
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 998237b5600f..5e88d5379127 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1435,7 +1435,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
> arch_enter_lazy_mmu_mode();
> do {
> pte_t ptent = ptep_get(pte);
> - struct folio *folio;
> + struct folio *folio = NULL;
> struct page *page;
>
> if (pte_none(ptent))
>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio()
2024-01-22 17:19 ` Ryan Roberts
@ 2024-01-22 17:32 ` Ryan Roberts
0 siblings, 0 replies; 15+ messages in thread
From: Ryan Roberts @ 2024-01-22 17:32 UTC (permalink / raw)
To: Kefeng Wang, kernel test robot, Matthew Wilcox (Oracle), Andrew Morton
Cc: llvm, oe-kbuild-all, Linux Memory Management List, david, linux-s390
On 22/01/2024 17:19, Ryan Roberts wrote:
> On 12/01/2024 10:14, Kefeng Wang wrote:
>>
>>
>> On 2024/1/12 13:03, kernel test robot wrote:
>>> Hi Matthew,
>>>
>>> kernel test robot noticed the following build warnings:
>>>
>>> [auto build test WARNING on akpm-mm/mm-everything]
>>>
>>> url:
>>> https://github.com/intel-lab-lkp/linux/commits/Matthew-Wilcox-Oracle/mm-Add-pfn_swap_entry_folio/20240111-232757
>>> base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
>>> patch link:
>>> https://lore.kernel.org/r/20240111152429.3374566-9-willy%40infradead.org
>>> patch subject: [PATCH v3 08/10] mm: Convert to should_zap_page() to
>>> should_zap_folio()
>>> config: arm-milbeaut_m10v_defconfig
>>> (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/config)
>>> compiler: clang version 16.0.4 (https://github.com/llvm/llvm-project.git
>>> ae42196bc493ffe877a7e3dff8be32035dea4d07)
>>> reproduce (this is a W=1 build):
>>> (https://download.01.org/0day-ci/archive/20240112/202401121250.A221BL2D-lkp@intel.com/reproduce)
>>>
>>> If you fix the issue in a separate patch/commit (i.e. not just a new version of
>>> the same patch/commit), kindly add following tags
>>> | Reported-by: kernel test robot <lkp@intel.com>
>>> | Closes:
>>> https://lore.kernel.org/oe-kbuild-all/202401121250.A221BL2D-lkp@intel.com/
>>>
>>> All warnings (new ones prefixed by >>):
>>>
>>>>> mm/memory.c:1451:8: warning: variable 'folio' is used uninitialized whenever
>>>>> 'if' condition is false [-Wsometimes-uninitialized]
>>> if (page)
>>> ^~~~
>>> mm/memory.c:1454:44: note: uninitialized use occurs here
>>> if (unlikely(!should_zap_folio(details, folio)))
>>> ^~~~~
>>> include/linux/compiler.h:77:42: note: expanded from macro 'unlikely'
>>> # define unlikely(x) __builtin_expect(!!(x), 0)
>>> ^
>>> mm/memory.c:1451:4: note: remove the 'if' if its condition is always true
>>> if (page)
>>> ^~~~~~~~~
>>> mm/memory.c:1438:22: note: initialize the variable 'folio' to silence this
>>> warning
>>> struct folio *folio;
>>> ^
>>> = NULL
>>
>> Hi Andrew, please help to squash following change, thanks.
>
> I just independently found this issue during coincidental review of the code.
> It's still a problem in mm-unstable, so wondered if you missed the request, Andrew?
Sorry - please ignore this - I was confused. I see that it is infact applied to
mm-unstable.
>
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 998237b5600f..5e88d5379127 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -1435,7 +1435,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
>> arch_enter_lazy_mmu_mode();
>> do {
>> pte_t ptent = ptep_get(pte);
>> - struct folio *folio;
>> + struct folio *folio = NULL;
>> struct page *page;
>>
>> if (pte_none(ptent))
>>
>>
>>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v3 09/10] mm: convert mm_counter() to take a folio
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (7 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 08/10] mm: Convert to should_zap_page() to should_zap_folio() Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
2024-01-11 15:24 ` [PATCH v3 10/10] mm: convert mm_counter_file() " Matthew Wilcox (Oracle)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Now all callers of mm_counter() have a folio, convert mm_counter() to
take a folio. Saves a call to compound_head() hidden inside PageAnon().
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
arch/s390/mm/pgtable.c | 2 +-
include/linux/mm.h | 6 +++---
mm/memory.c | 10 +++++-----
mm/rmap.c | 8 ++++----
mm/userfaultfd.c | 2 +-
5 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7e5dd4b17664..b71432b15d66 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
else if (is_migration_entry(entry)) {
struct folio *folio = pfn_swap_entry_folio(entry);
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
}
free_swap_and_cache(entry);
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f5a97dec5169..22e597b36b38 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page)
return MM_FILEPAGES;
}
-static inline int mm_counter(struct page *page)
+static inline int mm_counter(struct folio *folio)
{
- if (PageAnon(page))
+ if (folio_test_anon(folio))
return MM_ANONPAGES;
- return mm_counter_file(page);
+ return mm_counter_file(&folio->page);
}
static inline unsigned long get_mm_rss(struct mm_struct *mm)
diff --git a/mm/memory.c b/mm/memory.c
index b73322ab9fd6..53ef7ae96440 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
} else if (is_migration_entry(entry)) {
folio = pfn_swap_entry_folio(entry);
- rss[mm_counter(&folio->page)]++;
+ rss[mm_counter(folio)]++;
if (!is_readable_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
@@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* keep things as they are.
*/
folio_get(folio);
- rss[mm_counter(page)]++;
+ rss[mm_counter(folio)]++;
/* Cannot fail as these pages cannot get pinned. */
folio_try_dup_anon_rmap_pte(folio, page, src_vma);
@@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (pte_young(ptent) && likely(vma_has_recency(vma)))
folio_mark_accessed(folio);
}
- rss[mm_counter(page)]--;
+ rss[mm_counter(folio)]--;
if (!delay_rmap) {
folio_remove_rmap_pte(folio, page, vma);
if (unlikely(page_mapcount(page) < 0))
@@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
* see zap_install_uffd_wp_if_needed().
*/
WARN_ON_ONCE(!vma_is_anonymous(vma));
- rss[mm_counter(page)]--;
+ rss[mm_counter(folio)]--;
if (is_device_private_entry(entry))
folio_remove_rmap_pte(folio, page, vma);
folio_put(folio);
@@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
folio = pfn_swap_entry_folio(entry);
if (!should_zap_folio(details, folio))
continue;
- rss[mm_counter(&folio->page)]--;
+ rss[mm_counter(folio)]--;
} else if (pte_marker_entry_uffd_wp(entry)) {
/*
* For anon: always drop the marker; for file: only
diff --git a/mm/rmap.c b/mm/rmap.c
index f5d43edad529..4648cf1d8178 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
@@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
} else if (folio_test_anon(folio)) {
swp_entry_t entry = page_swap_entry(subpage);
pte_t swp_pte;
@@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
set_huge_pte_at(mm, address, pvmw.pte, pteval,
hsz);
} else {
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
set_pte_at(mm, address, pvmw.pte, pteval);
}
@@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
+ dec_mm_counter(mm, mm_counter(folio));
} else {
swp_entry_t entry;
pte_t swp_pte;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 216ab4c8621f..662ab304cca3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
* Must happen after rmap, as mm_counter() checks mapping (via
* PageAnon()), which is set by __page_set_anon_rmap().
*/
- inc_mm_counter(dst_mm, mm_counter(page));
+ inc_mm_counter(dst_mm, mm_counter(folio));
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread* [PATCH v3 10/10] mm: convert mm_counter_file() to take a folio
2024-01-11 15:24 [PATCH v3 00/10] mm: convert mm counter to take a folio Matthew Wilcox (Oracle)
` (8 preceding siblings ...)
2024-01-11 15:24 ` [PATCH v3 09/10] mm: convert mm_counter() to take a folio Matthew Wilcox (Oracle)
@ 2024-01-11 15:24 ` Matthew Wilcox (Oracle)
9 siblings, 0 replies; 15+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-01-11 15:24 UTC (permalink / raw)
To: Andrew Morton; +Cc: Kefeng Wang, linux-mm, david, linux-s390, Matthew Wilcox
From: Kefeng Wang <wangkefeng.wang@huawei.com>
Now all callers of mm_counter_file() have a folio, convert
mm_counter_file() to take a folio. Saves a call to compound_head()
hidden inside PageSwapBacked().
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 8 ++++----
kernel/events/uprobes.c | 2 +-
mm/huge_memory.c | 4 ++--
mm/khugepaged.c | 4 ++--
mm/memory.c | 10 +++++-----
mm/rmap.c | 2 +-
6 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 22e597b36b38..ac6b71cbdffb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2595,10 +2595,10 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member)
mm_trace_rss_stat(mm, member);
}
-/* Optimized variant when page is already known not to be PageAnon */
-static inline int mm_counter_file(struct page *page)
+/* Optimized variant when folio is already known not to be anon */
+static inline int mm_counter_file(struct folio *folio)
{
- if (PageSwapBacked(page))
+ if (folio_test_swapbacked(folio))
return MM_SHMEMPAGES;
return MM_FILEPAGES;
}
@@ -2607,7 +2607,7 @@ static inline int mm_counter(struct folio *folio)
{
if (folio_test_anon(folio))
return MM_ANONPAGES;
- return mm_counter_file(&folio->page);
+ return mm_counter_file(folio);
}
static inline unsigned long get_mm_rss(struct mm_struct *mm)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 485bb0389b48..948c2e064ca3 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -188,7 +188,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
dec_mm_counter(mm, MM_ANONPAGES);
if (!folio_test_anon(old_folio)) {
- dec_mm_counter(mm, mm_counter_file(old_page));
+ dec_mm_counter(mm, mm_counter_file(old_folio));
inc_mm_counter(mm, MM_ANONPAGES);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4a17306c7dda..f40feb31b507 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1921,7 +1921,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
} else {
if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd);
- add_mm_counter(tlb->mm, mm_counter_file(&folio->page),
+ add_mm_counter(tlb->mm, mm_counter_file(folio),
-HPAGE_PMD_NR);
}
@@ -2446,7 +2446,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
folio_remove_rmap_pmd(folio, page, vma);
folio_put(folio);
}
- add_mm_counter(mm, mm_counter_file(&folio->page), -HPAGE_PMD_NR);
+ add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
return;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2b219acb528e..fe43fbc44525 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1634,7 +1634,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
/* step 3: set proper refcount and mm_counters. */
if (nr_ptes) {
folio_ref_sub(folio, nr_ptes);
- add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
+ add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
}
/* step 4: remove empty page table */
@@ -1665,7 +1665,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
if (nr_ptes) {
flush_tlb_mm(mm);
folio_ref_sub(folio, nr_ptes);
- add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
+ add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
}
if (start_pte)
pte_unmap_unlock(start_pte, ptl);
diff --git a/mm/memory.c b/mm/memory.c
index 53ef7ae96440..b8bc09696cc9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -966,7 +966,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
} else if (page) {
folio_get(folio);
folio_dup_file_rmap_pte(folio, page);
- rss[mm_counter_file(page)]++;
+ rss[mm_counter_file(folio)]++;
}
/*
@@ -1873,7 +1873,7 @@ static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
return -EBUSY;
/* Ok, finally just insert the thing.. */
folio_get(folio);
- inc_mm_counter(vma->vm_mm, mm_counter_file(page));
+ inc_mm_counter(vma->vm_mm, mm_counter_file(folio));
folio_add_file_rmap_pte(folio, page, vma);
set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
return 0;
@@ -3178,7 +3178,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
if (old_folio) {
if (!folio_test_anon(old_folio)) {
- dec_mm_counter(mm, mm_counter_file(&old_folio->page));
+ dec_mm_counter(mm, mm_counter_file(old_folio));
inc_mm_counter(mm, MM_ANONPAGES);
}
} else {
@@ -4463,7 +4463,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
if (write)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
+ add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR);
folio_add_file_rmap_pmd(folio, page, vma);
/*
@@ -4526,7 +4526,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
folio_add_new_anon_rmap(folio, vma, addr);
folio_add_lru_vma(folio, vma);
} else {
- add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
+ add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr);
folio_add_file_rmap_ptes(folio, page, nr, vma);
}
set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
diff --git a/mm/rmap.c b/mm/rmap.c
index 4648cf1d8178..1cf2bffa48ed 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1903,7 +1903,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- dec_mm_counter(mm, mm_counter_file(&folio->page));
+ dec_mm_counter(mm, mm_counter_file(folio));
}
discard:
if (unlikely(folio_test_hugetlb(folio)))
--
2.43.0
^ permalink raw reply [flat|nested] 15+ messages in thread