linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Alistair Popple <apopple@nvidia.com>
To: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Dan Williams <dan.j.williams@intel.com>,
	 Vishal Verma <vishal.l.verma@intel.com>,
	Dave Jiang <dave.jiang@intel.com>,
	nvdimm@lists.linux.dev,  linux-cxl@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Subject: Re: [PATCH 2/2] dax: Use folios more widely within DAX
Date: Tue, 17 Dec 2024 09:25:49 +1100	[thread overview]
Message-ID: <oepbp7g4qhbovnoquftr4hrddqylfvuxpo5elu2l6wmofiyvuy@ukzkii7dvdie> (raw)
In-Reply-To: <20241216155408.8102-2-willy@infradead.org>

On Mon, Dec 16, 2024 at 03:53:56PM +0000, Matthew Wilcox (Oracle) wrote:
> Convert from pfn to folio instead of page and use those folios
> throughout to avoid accesses to page->index and page->mapping.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  fs/dax.c | 53 +++++++++++++++++++++++++++--------------------------
>  1 file changed, 27 insertions(+), 26 deletions(-)
> 
> diff --git a/fs/dax.c b/fs/dax.c
> index 21b47402b3dc..972febc6fb9d 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -320,38 +320,39 @@ static unsigned long dax_end_pfn(void *entry)
>  	for (pfn = dax_to_pfn(entry); \
>  			pfn < dax_end_pfn(entry); pfn++)
>  
> -static inline bool dax_page_is_shared(struct page *page)
> +static inline bool dax_folio_is_shared(struct folio *folio)
>  {
> -	return page->mapping == PAGE_MAPPING_DAX_SHARED;
> +	return folio->mapping == PAGE_MAPPING_DAX_SHARED;

This will conflict with my series which introduces compound ZONE_DEVICE
pages to free up a PTE bit and allow FS DAX pages to be refcounted
normally. The main change is here -
https://lore.kernel.org/ linux-mm/39a896451e59b735f205e34da5510ead5e4cd47d.1732239628.git-series.apopple@nvidia.com/

I'm hoping we can get that in linux-next "soon".

>  }
>  
>  /*
> - * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
> + * Set the folio->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
>   * refcount.
>   */
> -static inline void dax_page_share_get(struct page *page)
> +static inline void dax_folio_share_get(struct folio *folio)
>  {
> -	if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
> +	if (folio->mapping != PAGE_MAPPING_DAX_SHARED) {
>  		/*
>  		 * Reset the index if the page was already mapped
>  		 * regularly before.
>  		 */
> -		if (page->mapping)
> -			page->share = 1;
> -		page->mapping = PAGE_MAPPING_DAX_SHARED;
> +		if (folio->mapping)
> +			folio->page.share = 1;

It also moves the share accounting to the folio as well so we could remove the
whole page->index/share union once that's merged.

> +		folio->mapping = PAGE_MAPPING_DAX_SHARED;
>  	}
> -	page->share++;
> +	folio->page.share++;
>  }
>  
> -static inline unsigned long dax_page_share_put(struct page *page)
> +static inline unsigned long dax_folio_share_put(struct folio *folio)
>  {
> -	return --page->share;
> +	return --folio->page.share;
>  }
>  
>  /*
> - * When it is called in dax_insert_entry(), the shared flag will indicate that
> - * whether this entry is shared by multiple files.  If so, set the page->mapping
> - * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
> + * When it is called in dax_insert_entry(), the shared flag will indicate
> + * that whether this entry is shared by multiple files.  If so, set
> + * the folio->mapping PAGE_MAPPING_DAX_SHARED, and use page->share
> + * as refcount.
>   */
>  static void dax_associate_entry(void *entry, struct address_space *mapping,
>  		struct vm_area_struct *vma, unsigned long address, bool shared)
> @@ -364,14 +365,14 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
>  
>  	index = linear_page_index(vma, address & ~(size - 1));
>  	for_each_mapped_pfn(entry, pfn) {
> -		struct page *page = pfn_to_page(pfn);
> +		struct folio *folio = pfn_folio(pfn);
>  
>  		if (shared) {
> -			dax_page_share_get(page);
> +			dax_folio_share_get(folio);
>  		} else {
> -			WARN_ON_ONCE(page->mapping);
> -			page->mapping = mapping;
> -			page->index = index + i++;
> +			WARN_ON_ONCE(folio->mapping);
> +			folio->mapping = mapping;
> +			folio->index = index + i++;
>  		}
>  	}
>  }
> @@ -385,17 +386,17 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
>  		return;
>  
>  	for_each_mapped_pfn(entry, pfn) {
> -		struct page *page = pfn_to_page(pfn);
> +		struct folio *folio = pfn_folio(pfn);
>  
> -		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
> -		if (dax_page_is_shared(page)) {
> +		WARN_ON_ONCE(trunc && folio_ref_count(folio) > 1);
> +		if (dax_folio_is_shared(folio)) {
>  			/* keep the shared flag if this page is still shared */
> -			if (dax_page_share_put(page) > 0)
> +			if (dax_folio_share_put(folio) > 0)
>  				continue;
>  		} else
> -			WARN_ON_ONCE(page->mapping && page->mapping != mapping);
> -		page->mapping = NULL;
> -		page->index = 0;
> +			WARN_ON_ONCE(folio->mapping && folio->mapping != mapping);
> +		folio->mapping = NULL;
> +		folio->index = 0;
>  	}
>  }
>  
> -- 
> 2.45.2
> 
> 


  parent reply	other threads:[~2024-12-16 22:26 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-12-16 15:53 [PATCH 1/2] dax: Remove access to page->index Matthew Wilcox (Oracle)
2024-12-16 15:53 ` [PATCH 2/2] dax: Use folios more widely within DAX Matthew Wilcox (Oracle)
2024-12-16 17:49   ` jane.chu
2024-12-16 22:25   ` Alistair Popple [this message]
2024-12-16 17:49 ` [PATCH 1/2] dax: Remove access to page->index jane.chu
2025-01-07  0:43 ` Dan Williams
2025-01-07 23:24   ` Alistair Popple
2025-02-14 16:16     ` Matthew Wilcox
2025-02-14 23:37     ` Andrew Morton
2025-02-14 21:44 ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=oepbp7g4qhbovnoquftr4hrddqylfvuxpo5elu2l6wmofiyvuy@ukzkii7dvdie \
    --to=apopple@nvidia.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.jiang@intel.com \
    --cc=linux-cxl@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=nvdimm@lists.linux.dev \
    --cc=vishal.l.verma@intel.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox