* [PATCH v2] mm: Convert DAX lock/unlock page to lock/unlock folio
@ 2023-09-08 19:52 Jane Chu
2023-09-08 21:08 ` Matthew Wilcox
0 siblings, 1 reply; 3+ messages in thread
From: Jane Chu @ 2023-09-08 19:52 UTC (permalink / raw)
To: willy, akpm, nvdimm, dan.j.williams, naoya.horiguchi, linux-mm
The one caller of DAX lock/unlock page already calls compound_head(),
so use page_folio() instead, then use a folio throughout the DAX code
to remove uses of page->mapping and page->index.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
---
fs/dax.c | 24 ++++++++++++------------
include/linux/dax.h | 10 +++++-----
mm/memory-failure.c | 29 ++++++++++++++++-------------
3 files changed, 33 insertions(+), 30 deletions(-)
diff --git a/fs/dax.c b/fs/dax.c
index 906ecbd541a3..c70d4da047db 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -426,23 +426,23 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
-/*
- * dax_lock_page - Lock the DAX entry corresponding to a page
- * @page: The page whose entry we want to lock
+/**
+ * dax_lock_folio - Lock the DAX entry corresponding to a folio
+ * @folio: The folio whose entry we want to lock
*
* Context: Process context.
- * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
* not be locked.
*/
-dax_entry_t dax_lock_page(struct page *page)
+dax_entry_t dax_lock_folio(struct folio *folio)
{
XA_STATE(xas, NULL, 0);
void *entry;
- /* Ensure page->mapping isn't freed while we look at it */
+ /* Ensure folio->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
- struct address_space *mapping = READ_ONCE(page->mapping);
+ struct address_space *mapping = READ_ONCE(folio->mapping);
entry = NULL;
if (!mapping || !dax_mapping(mapping))
@@ -461,11 +461,11 @@ dax_entry_t dax_lock_page(struct page *page)
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
- if (mapping != page->mapping) {
+ if (mapping != folio->mapping) {
xas_unlock_irq(&xas);
continue;
}
- xas_set(&xas, page->index);
+ xas_set(&xas, folio->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
@@ -481,10 +481,10 @@ dax_entry_t dax_lock_page(struct page *page)
return (dax_entry_t)entry;
}
-void dax_unlock_page(struct page *page, dax_entry_t cookie)
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
- struct address_space *mapping = page->mapping;
- XA_STATE(xas, &mapping->i_pages, page->index);
+ struct address_space *mapping = folio->mapping;
+ XA_STATE(xas, &mapping->i_pages, folio->index);
if (S_ISCHR(mapping->host->i_mode))
return;
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 261944ec0887..711deb72c109 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
unsigned long index, struct page **page);
void dax_unlock_mapping_entry(struct address_space *mapping,
@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline dax_entry_t dax_lock_page(struct page *page)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
{
- if (IS_DAX(page->mapping->host))
+ if (IS_DAX(folio->mapping->host))
return ~0UL;
return 0;
}
-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index fe121fdb05f7..90e04fedebbc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1710,20 +1710,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
}
+/*
+ * Only dev_pagemap pages get here, such as fsdax when the filesystem
+ * either do not claim or fails to claim a hwpoison event, or devdax.
+ * The fsdax pages are initialized per base page, and the devdax pages
+ * could be initialized either as base pages, or as compound pages with
+ * vmemmap optimization enabled. Devdax is simplistic in its dealing with
+ * hwpoison, such that, if a subpage of a compound page is poisoned,
+ * simply mark the compound head page is by far sufficient.
+ */
static int mf_generic_kill_procs(unsigned long long pfn, int flags,
struct dev_pagemap *pgmap)
{
- struct page *page = pfn_to_page(pfn);
+ struct folio *folio = page_folio(pfn_to_page(pfn));
LIST_HEAD(to_kill);
dax_entry_t cookie;
int rc = 0;
- /*
- * Pages instantiated by device-dax (not filesystem-dax)
- * may be compound pages.
- */
- page = compound_head(page);
-
/*
* Prevent the inode from being freed while we are interrogating
* the address_space, typically this would be handled by
@@ -1731,11 +1734,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
- cookie = dax_lock_page(page);
+ cookie = dax_lock_folio(folio);
if (!cookie)
return -EBUSY;
- if (hwpoison_filter(page)) {
+ if (hwpoison_filter(&folio->page)) {
rc = -EOPNOTSUPP;
goto unlock;
}
@@ -1757,7 +1760,7 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* Use this flag as an indication that the dax page has been
* remapped UC to prevent speculative consumption of poison.
*/
- SetPageHWPoison(page);
+ SetPageHWPoison(&folio->page);
/*
* Unlike System-RAM there is no possibility to swap in a
@@ -1766,11 +1769,11 @@ static int mf_generic_kill_procs(unsigned long long pfn, int flags,
* SIGBUS (i.e. MF_MUST_KILL)
*/
flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
- collect_procs(page, &to_kill, true);
+ collect_procs(&folio->page, &to_kill, true);
- unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags);
+ unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
unlock:
- dax_unlock_page(page, cookie);
+ dax_unlock_folio(folio, cookie);
return rc;
}
base-commit: 727dbda16b83600379061c4ca8270ef3e2f51922
--
2.18.4
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm: Convert DAX lock/unlock page to lock/unlock folio
2023-09-08 19:52 [PATCH v2] mm: Convert DAX lock/unlock page to lock/unlock folio Jane Chu
@ 2023-09-08 21:08 ` Matthew Wilcox
2023-09-08 21:56 ` Jane Chu
0 siblings, 1 reply; 3+ messages in thread
From: Matthew Wilcox @ 2023-09-08 21:08 UTC (permalink / raw)
To: Jane Chu; +Cc: akpm, nvdimm, dan.j.williams, naoya.horiguchi, linux-mm
On Fri, Sep 08, 2023 at 01:52:15PM -0600, Jane Chu wrote:
You need to put a From: line at the top of this so that if someone
applies this it shows me as author rather than you.
> The one caller of DAX lock/unlock page already calls compound_head(),
> so use page_folio() instead, then use a folio throughout the DAX code
> to remove uses of page->mapping and page->index.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Jane Chu <jane.chu@oracle.com>
> ---
You should say what changed from v1 here. Also Naoya Horiguchi offered
an Acked-by tag that would be appropriate to include.
> fs/dax.c | 24 ++++++++++++------------
> include/linux/dax.h | 10 +++++-----
> mm/memory-failure.c | 29 ++++++++++++++++-------------
> 3 files changed, 33 insertions(+), 30 deletions(-)
> +++ b/mm/memory-failure.c
> @@ -1710,20 +1710,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
> kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
> }
>
> +/*
> + * Only dev_pagemap pages get here, such as fsdax when the filesystem
> + * either do not claim or fails to claim a hwpoison event, or devdax.
> + * The fsdax pages are initialized per base page, and the devdax pages
> + * could be initialized either as base pages, or as compound pages with
> + * vmemmap optimization enabled. Devdax is simplistic in its dealing with
> + * hwpoison, such that, if a subpage of a compound page is poisoned,
> + * simply mark the compound head page is by far sufficient.
> + */
> static int mf_generic_kill_procs(unsigned long long pfn, int flags,
> struct dev_pagemap *pgmap)
> {
> - struct page *page = pfn_to_page(pfn);
> + struct folio *folio = page_folio(pfn_to_page(pfn));
We have a pfn_folio() (which does the same thing, but may not always)
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm: Convert DAX lock/unlock page to lock/unlock folio
2023-09-08 21:08 ` Matthew Wilcox
@ 2023-09-08 21:56 ` Jane Chu
0 siblings, 0 replies; 3+ messages in thread
From: Jane Chu @ 2023-09-08 21:56 UTC (permalink / raw)
To: Matthew Wilcox; +Cc: akpm, nvdimm, dan.j.williams, naoya.horiguchi, linux-mm
On 9/8/2023 2:08 PM, Matthew Wilcox wrote:
> On Fri, Sep 08, 2023 at 01:52:15PM -0600, Jane Chu wrote:
>
> You need to put a From: line at the top of this so that if someone
> applies this it shows me as author rather than you.
Sorry, will fix.
>
>> The one caller of DAX lock/unlock page already calls compound_head(),
>> so use page_folio() instead, then use a folio throughout the DAX code
>> to remove uses of page->mapping and page->index.
>>
>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>> Signed-off-by: Jane Chu <jane.chu@oracle.com>
>> ---
>
> You should say what changed from v1 here. Also Naoya Horiguchi offered
> an Acked-by tag that would be appropriate to include.
Sure.
>
>> fs/dax.c | 24 ++++++++++++------------
>> include/linux/dax.h | 10 +++++-----
>> mm/memory-failure.c | 29 ++++++++++++++++-------------
>> 3 files changed, 33 insertions(+), 30 deletions(-)
>
>> +++ b/mm/memory-failure.c
>> @@ -1710,20 +1710,23 @@ static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
>> kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
>> }
>>
>> +/*
>> + * Only dev_pagemap pages get here, such as fsdax when the filesystem
>> + * either do not claim or fails to claim a hwpoison event, or devdax.
>> + * The fsdax pages are initialized per base page, and the devdax pages
>> + * could be initialized either as base pages, or as compound pages with
>> + * vmemmap optimization enabled. Devdax is simplistic in its dealing with
>> + * hwpoison, such that, if a subpage of a compound page is poisoned,
>> + * simply mark the compound head page is by far sufficient.
>> + */
>> static int mf_generic_kill_procs(unsigned long long pfn, int flags,
>> struct dev_pagemap *pgmap)
>> {
>> - struct page *page = pfn_to_page(pfn);
>> + struct folio *folio = page_folio(pfn_to_page(pfn));
>
> We have a pfn_folio() (which does the same thing, but may not always)
>
It does the same thing, will replace in a respin.
thanks!
-jane
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-09-08 21:57 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-08 19:52 [PATCH v2] mm: Convert DAX lock/unlock page to lock/unlock folio Jane Chu
2023-09-08 21:08 ` Matthew Wilcox
2023-09-08 21:56 ` Jane Chu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox