On 04.09.25 03:06, Jinjiang Tu wrote:
在 2025/9/4 9:05, Jinjiang Tu 写道:
在 2025/9/3 17:16, David Hildenbrand 写道:
Maybe We can first count the refcount to add, and only call folio_ref_{add, sub} once before return+++ b/mm/filemap.c
@@ -3693,6 +3693,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
}
vmf->pte = old_ptep;
+ folio_put(folio);
return ret;
}
@@ -3705,7 +3706,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
struct page *page = &folio->page;
if (PageHWPoison(page))
- return ret;
+ goto out;
/* See comment of filemap_map_folio_range() */
if (!folio_test_workingset(folio))
@@ -3717,15 +3718,17 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
* the fault-around logic.
*/
if (!pte_none(ptep_get(vmf->pte)))
- return ret;
+ goto out;
if (vmf->address == addr)
ret = VM_FAULT_NOPAGE;
set_pte_range(vmf, folio, page, 1, addr);
(*rss)++;
- folio_ref_inc(folio);
+ return ret;
+out:
+ folio_put(folio);
We can use a folio_ref_dec() here
/* Locked folios cannot get truncated. */
folio_ref_dec(folio);
return ret;
}
@@ -3785,7 +3788,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
nr_pages, &rss, &mmap_miss);
folio_unlock(folio);
- folio_put(folio);
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
add_mm_counter(vma->vm_mm, folio_type, rss);
pte_unmap_unlock(vmf->pte, vmf->ptl);
I think we can optimize filemap_map_folio_range() as well:
diff --git a/mm/filemap.c b/mm/filemap.c
index b101405b770ae..d1fcddc72c5f6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3646,6 +3646,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
unsigned long addr, unsigned int nr_pages,
unsigned long *rss, unsigned short *mmap_miss)
{
+ bool ref_from_caller = true;
vm_fault_t ret = 0;
struct page *page = folio_page(folio, start);
unsigned int count = 0;
@@ -3679,7 +3680,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (count) {
set_pte_range(vmf, folio, page, count, addr);
*rss += count;
- folio_ref_add(folio, count);
+ if (count - ref_from_caller)
+ folio_ref_add(folio, count - ref_from_caller);
+ ref_from_caller = false;
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
@@ -3694,13 +3697,19 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (count) {
set_pte_range(vmf, folio, page, count, addr);
*rss += count;
- folio_ref_add(folio, count);
+ if (count - ref_from_caller)
+ folio_ref_add(folio, count - ref_from_caller);
+ ref_from_caller = false;
if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
vmf->pte = old_ptep;
+ if (ref_from_caller)
+ /* Locked folios cannot get truncated. */
+ folio_ref_dec(folio);
+
return ret;
}
It would save at least a folio_ref_dec(), and in corner cases (only map a single page)
also a folio_ref_add().
I'm not a fan of that, because I'm planning on moving the folio_ref_add() before the set_pte_range() so we can minimize the number of false positives with our folio_ref_count() != folio_expected_ref_count() checks, and I can sanity check when adjusting the mapcount that it is always >= refcount.
I see, I will send v3 as the diff sugguested by you.
Thanks.