在 2025/9/3 17:16, David Hildenbrand 写道:
+++ b/mm/filemap.c
@@ -3693,6 +3693,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
      }
        vmf->pte = old_ptep;
+    folio_put(folio);
        return ret;
  }
@@ -3705,7 +3706,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
      struct page *page = &folio->page;
        if (PageHWPoison(page))
-        return ret;
+        goto out;
        /* See comment of filemap_map_folio_range() */
      if (!folio_test_workingset(folio))
@@ -3717,15 +3718,17 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
       * the fault-around logic.
       */
      if (!pte_none(ptep_get(vmf->pte)))
-        return ret;
+        goto out;
        if (vmf->address == addr)
          ret = VM_FAULT_NOPAGE;
        set_pte_range(vmf, folio, page, 1, addr);
      (*rss)++;
-    folio_ref_inc(folio);
+    return ret;
  +out:
+    folio_put(folio);

We can use a folio_ref_dec() here

    /* Locked folios cannot get truncated. */
    folio_ref_dec(folio);

      return ret;
  }
  @@ -3785,7 +3788,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
                      nr_pages, &rss, &mmap_miss);
            folio_unlock(folio);
-        folio_put(folio);
      } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
      add_mm_counter(vma->vm_mm, folio_type, rss);
      pte_unmap_unlock(vmf->pte, vmf->ptl);


I think we can optimize filemap_map_folio_range() as well:

diff --git a/mm/filemap.c b/mm/filemap.c
index b101405b770ae..d1fcddc72c5f6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3646,6 +3646,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                        unsigned long addr, unsigned int nr_pages,
                        unsigned long *rss, unsigned short *mmap_miss)
 {
+       bool ref_from_caller = true;
        vm_fault_t ret = 0;
        struct page *page = folio_page(folio, start);
        unsigned int count = 0;
@@ -3679,7 +3680,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                if (count) {
                        set_pte_range(vmf, folio, page, count, addr);
                        *rss += count;
-                       folio_ref_add(folio, count);
+                       if (count - ref_from_caller)
+                               folio_ref_add(folio, count - ref_from_caller);
+                       ref_from_caller = false;
                        if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                ret = VM_FAULT_NOPAGE;
                }
@@ -3694,13 +3697,19 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
        if (count) {
                set_pte_range(vmf, folio, page, count, addr);
                *rss += count;
-               folio_ref_add(folio, count);
+               if (count - ref_from_caller)
+                       folio_ref_add(folio, count - ref_from_caller);
+               ref_from_caller = false;
                if (in_range(vmf->address, addr, count * PAGE_SIZE))
                        ret = VM_FAULT_NOPAGE;
        }
 
        vmf->pte = old_ptep;
 
+       if (ref_from_caller)
+               /* Locked folios cannot get truncated. */
+               folio_ref_dec(folio);
+
        return ret;
 }


It would save at least a folio_ref_dec(), and in corner cases (only map a single page)
also a folio_ref_add(). 

Maybe We can first count the refcount to add, and only call folio_ref_{add, sub} once before return


--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3643,6 +3643,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
        struct page *page = folio_page(folio, start);
        unsigned int count = 0;
        pte_t *old_ptep = vmf->pte;
+       int ref_to_add = -1;
 
        do {
                if (PageHWPoison(page + count))
@@ -3672,7 +3673,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                if (count) {
                        set_pte_range(vmf, folio, page, count, addr);
                        *rss += count;
-                       folio_ref_add(folio, count);
+                       ref_to_add += count;
                        if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                ret = VM_FAULT_NOPAGE;
                }
@@ -3687,12 +3688,17 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
        if (count) {
                set_pte_range(vmf, folio, page, count, addr);
                *rss += count;
-               folio_ref_add(folio, count);
+               ref_to_add += count;
                if (in_range(vmf->address, addr, count * PAGE_SIZE))
                        ret = VM_FAULT_NOPAGE;
        }
 
        vmf->pte = old_ptep;
+       /* Locked folios cannot get truncated. */
+       if (ref_to_add > 0)
+               folio_ref_add(folio, ref_to_add);
+       else if (ref_to_add < 0)
+               folio_ref_sub(folio, ref_to_add);
 
        return ret;
 }