linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jinjiang Tu <tujinjiang@huawei.com>
To: David Hildenbrand <david@redhat.com>, <willy@infradead.org>,
	<akpm@linux-foundation.org>, <linux-mm@kvack.org>
Cc: <wangkefeng.wang@huawei.com>
Subject: Re: [PATCH v2] filemap: optimize order0 folio in filemap_map_pages
Date: Thu, 4 Sep 2025 09:05:32 +0800	[thread overview]
Message-ID: <f5dd70ab-c610-4142-be57-1fa311672b5e@huawei.com> (raw)
In-Reply-To: <3c283054-b14e-4f36-966f-78cf3bc0f3af@redhat.com>

[-- Attachment #1: Type: text/plain, Size: 6210 bytes --]


在 2025/9/3 17:16, David Hildenbrand 写道:
>> +++ b/mm/filemap.c
>> @@ -3693,6 +3693,7 @@ static vm_fault_t 
>> filemap_map_folio_range(struct vm_fault *vmf,
>>       }
>>         vmf->pte = old_ptep;
>> +    folio_put(folio);
>>         return ret;
>>   }
>> @@ -3705,7 +3706,7 @@ static vm_fault_t 
>> filemap_map_order0_folio(struct vm_fault *vmf,
>>       struct page *page = &folio->page;
>>         if (PageHWPoison(page))
>> -        return ret;
>> +        goto out;
>>         /* See comment of filemap_map_folio_range() */
>>       if (!folio_test_workingset(folio))
>> @@ -3717,15 +3718,17 @@ static vm_fault_t 
>> filemap_map_order0_folio(struct vm_fault *vmf,
>>        * the fault-around logic.
>>        */
>>       if (!pte_none(ptep_get(vmf->pte)))
>> -        return ret;
>> +        goto out;
>>         if (vmf->address == addr)
>>           ret = VM_FAULT_NOPAGE;
>>         set_pte_range(vmf, folio, page, 1, addr);
>>       (*rss)++;
>> -    folio_ref_inc(folio);
>> +    return ret;
>>   +out:
>> +    folio_put(folio);
>
> We can use a folio_ref_dec() here
>
>     /* Locked folios cannot get truncated. */
>     folio_ref_dec(folio);
>
>>       return ret;
>>   }
>>   @@ -3785,7 +3788,6 @@ vm_fault_t filemap_map_pages(struct vm_fault 
>> *vmf,
>>                       nr_pages, &rss, &mmap_miss);
>>             folio_unlock(folio);
>> -        folio_put(folio);
>>       } while ((folio = next_uptodate_folio(&xas, mapping, 
>> end_pgoff)) != NULL);
>>       add_mm_counter(vma->vm_mm, folio_type, rss);
>>       pte_unmap_unlock(vmf->pte, vmf->ptl);
>
>
> I think we can optimize filemap_map_folio_range() as well:
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index b101405b770ae..d1fcddc72c5f6 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -3646,6 +3646,7 @@ static vm_fault_t filemap_map_folio_range(struct 
> vm_fault *vmf,
>                         unsigned long addr, unsigned int nr_pages,
>                         unsigned long *rss, unsigned short *mmap_miss)
>  {
> +       bool ref_from_caller = true;
>         vm_fault_t ret = 0;
>         struct page *page = folio_page(folio, start);
>         unsigned int count = 0;
> @@ -3679,7 +3680,9 @@ static vm_fault_t filemap_map_folio_range(struct 
> vm_fault *vmf,
>                 if (count) {
>                         set_pte_range(vmf, folio, page, count, addr);
>                         *rss += count;
> -                       folio_ref_add(folio, count);
> +                       if (count - ref_from_caller)
> +                               folio_ref_add(folio, count - 
> ref_from_caller);
> +                       ref_from_caller = false;
>                         if (in_range(vmf->address, addr, count * 
> PAGE_SIZE))
>                                 ret = VM_FAULT_NOPAGE;
>                 }
> @@ -3694,13 +3697,19 @@ static vm_fault_t 
> filemap_map_folio_range(struct vm_fault *vmf,
>         if (count) {
>                 set_pte_range(vmf, folio, page, count, addr);
>                 *rss += count;
> -               folio_ref_add(folio, count);
> +               if (count - ref_from_caller)
> +                       folio_ref_add(folio, count - ref_from_caller);
> +               ref_from_caller = false;
>                 if (in_range(vmf->address, addr, count * PAGE_SIZE))
>                         ret = VM_FAULT_NOPAGE;
>         }
>
>         vmf->pte = old_ptep;
>
> +       if (ref_from_caller)
> +               /* Locked folios cannot get truncated. */
> +               folio_ref_dec(folio);
> +
>         return ret;
>  }
>
>
> It would save at least a folio_ref_dec(), and in corner cases (only 
> map a single page)
> also a folio_ref_add().
>
Maybe We can first count the refcount to add, and only call folio_ref_{add, sub} once before return


--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3643,6 +3643,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
         struct page *page = folio_page(folio, start);
         unsigned int count = 0;
         pte_t *old_ptep = vmf->pte;
+       int ref_to_add = -1;
  
         do {
                 if (PageHWPoison(page + count))
@@ -3672,7 +3673,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
                 if (count) {
                         set_pte_range(vmf, folio, page, count, addr);
                         *rss += count;
-                       folio_ref_add(folio, count);
+                       ref_to_add += count;
                         if (in_range(vmf->address, addr, count * PAGE_SIZE))
                                 ret = VM_FAULT_NOPAGE;
                 }
@@ -3687,12 +3688,17 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
         if (count) {
                 set_pte_range(vmf, folio, page, count, addr);
                 *rss += count;
-               folio_ref_add(folio, count);
+               ref_to_add += count;
                 if (in_range(vmf->address, addr, count * PAGE_SIZE))
                         ret = VM_FAULT_NOPAGE;
         }
  
         vmf->pte = old_ptep;
+       /* Locked folios cannot get truncated. */
+       if (ref_to_add > 0)
+               folio_ref_add(folio, ref_to_add);
+       else if (ref_to_add < 0)
+               folio_ref_sub(folio, ref_to_add);
  
         return ret;
  }


[-- Attachment #2: Type: text/html, Size: 8239 bytes --]

  reply	other threads:[~2025-09-04  1:06 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-03  8:42 Jinjiang Tu
2025-09-03  9:16 ` David Hildenbrand
2025-09-04  1:05   ` Jinjiang Tu [this message]
2025-09-04  1:06     ` Jinjiang Tu
2025-09-04  6:20       ` David Hildenbrand
2025-09-04 12:12         ` Jinjiang Tu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f5dd70ab-c610-4142-be57-1fa311672b5e@huawei.com \
    --to=tujinjiang@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=linux-mm@kvack.org \
    --cc=wangkefeng.wang@huawei.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox