* [PATCH] mm: Use folio more widely in __split_huge_page
@ 2024-02-27 18:05 Matthew Wilcox (Oracle)
2024-02-27 18:35 ` Sidhartha Kumar
0 siblings, 1 reply; 3+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-02-27 18:05 UTC (permalink / raw)
To: Andrew Morton, linux-mm; +Cc: Matthew Wilcox (Oracle)
We already have a folio; use it instead of the head page where reasonable.
Saves a couple of calls to compound_head() and elimimnates a few
references to page->mapping.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
mm/huge_memory.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 28341a5067fb..aeb6671f7c44 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2884,7 +2884,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct lruvec *lruvec;
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
- unsigned int nr = thp_nr_pages(head);
+ unsigned int nr = folio_nr_pages(folio);
int i, nr_dropped = 0;
/* complete memcg works before add pages to LRU */
@@ -2907,7 +2907,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (head[i].index >= end) {
struct folio *tail = page_folio(head + i);
- if (shmem_mapping(head->mapping))
+ if (shmem_mapping(folio->mapping))
nr_dropped++;
else if (folio_test_clear_dirty(tail))
folio_account_cleaned(tail,
@@ -2915,7 +2915,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
__filemap_remove_folio(tail, NULL);
folio_put(tail);
} else if (!PageAnon(page)) {
- __xa_store(&head->mapping->i_pages, head[i].index,
+ __xa_store(&folio->mapping->i_pages, head[i].index,
head + i, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, offset + i,
@@ -2930,23 +2930,23 @@ static void __split_huge_page(struct page *page, struct list_head *list,
split_page_owner(head, nr);
/* See comment in __split_huge_page_tail() */
- if (PageAnon(head)) {
+ if (folio_test_anon(folio)) {
/* Additional pin to swap cache */
- if (PageSwapCache(head)) {
- page_ref_add(head, 2);
+ if (folio_test_swapcache(folio)) {
+ folio_ref_add(folio, 2);
xa_unlock(&swap_cache->i_pages);
} else {
- page_ref_inc(head);
+ folio_ref_inc(folio);
}
} else {
/* Additional pin to page cache */
- page_ref_add(head, 2);
- xa_unlock(&head->mapping->i_pages);
+ folio_ref_add(folio, 2);
+ xa_unlock(&folio->mapping->i_pages);
}
local_irq_enable();
if (nr_dropped)
- shmem_uncharge(head->mapping->host, nr_dropped);
+ shmem_uncharge(folio->mapping->host, nr_dropped);
remap_page(folio, nr);
if (folio_test_swapcache(folio))
@@ -2954,9 +2954,10 @@ static void __split_huge_page(struct page *page, struct list_head *list,
for (i = 0; i < nr; i++) {
struct page *subpage = head + i;
+ struct folio *new_folio = page_folio(subpage);
if (subpage == page)
continue;
- unlock_page(subpage);
+ folio_unlock(new_folio);
/*
* Subpages may be freed if there wasn't any mapping
--
2.43.0
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] mm: Use folio more widely in __split_huge_page
2024-02-27 18:05 [PATCH] mm: Use folio more widely in __split_huge_page Matthew Wilcox (Oracle)
@ 2024-02-27 18:35 ` Sidhartha Kumar
0 siblings, 0 replies; 3+ messages in thread
From: Sidhartha Kumar @ 2024-02-27 18:35 UTC (permalink / raw)
To: Matthew Wilcox (Oracle), Andrew Morton, linux-mm
On 2/27/24 10:05 AM, Matthew Wilcox (Oracle) wrote:
> We already have a folio; use it instead of the head page where reasonable.
> Saves a couple of calls to compound_head() and elimimnates a few
> references to page->mapping.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
> mm/huge_memory.c | 23 ++++++++++++-----------
> 1 file changed, 12 insertions(+), 11 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 28341a5067fb..aeb6671f7c44 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2884,7 +2884,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> struct lruvec *lruvec;
> struct address_space *swap_cache = NULL;
> unsigned long offset = 0;
> - unsigned int nr = thp_nr_pages(head);
> + unsigned int nr = folio_nr_pages(folio);
> int i, nr_dropped = 0;
>
> /* complete memcg works before add pages to LRU */
> @@ -2907,7 +2907,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> if (head[i].index >= end) {
> struct folio *tail = page_folio(head + i);
>
> - if (shmem_mapping(head->mapping))
> + if (shmem_mapping(folio->mapping))
> nr_dropped++;
> else if (folio_test_clear_dirty(tail))
> folio_account_cleaned(tail,
> @@ -2915,7 +2915,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> __filemap_remove_folio(tail, NULL);
> folio_put(tail);
> } else if (!PageAnon(page)) {
> - __xa_store(&head->mapping->i_pages, head[i].index,
> + __xa_store(&folio->mapping->i_pages, head[i].index,
> head + i, 0);
> } else if (swap_cache) {
> __xa_store(&swap_cache->i_pages, offset + i,
> @@ -2930,23 +2930,23 @@ static void __split_huge_page(struct page *page, struct list_head *list,
> split_page_owner(head, nr);
>
> /* See comment in __split_huge_page_tail() */
> - if (PageAnon(head)) {
> + if (folio_test_anon(folio)) {
> /* Additional pin to swap cache */
> - if (PageSwapCache(head)) {
> - page_ref_add(head, 2);
> + if (folio_test_swapcache(folio)) {
> + folio_ref_add(folio, 2);
> xa_unlock(&swap_cache->i_pages);
> } else {
> - page_ref_inc(head);
> + folio_ref_inc(folio);
> }
> } else {
> /* Additional pin to page cache */
> - page_ref_add(head, 2);
> - xa_unlock(&head->mapping->i_pages);
> + folio_ref_add(folio, 2);
> + xa_unlock(&folio->mapping->i_pages);
> }
> local_irq_enable();
>
> if (nr_dropped)
> - shmem_uncharge(head->mapping->host, nr_dropped);
> + shmem_uncharge(folio->mapping->host, nr_dropped);
> remap_page(folio, nr);
>
> if (folio_test_swapcache(folio))
> @@ -2954,9 +2954,10 @@ static void __split_huge_page(struct page *page, struct list_head *list,
>
> for (i = 0; i < nr; i++) {
> struct page *subpage = head + i;
> + struct folio *new_folio = page_folio(subpage);
> if (subpage == page)
> continue;
> - unlock_page(subpage);
> + folio_unlock(new_folio);
>
> /*
> * Subpages may be freed if there wasn't any mapping
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH] mm: Use folio more widely in __split_huge_page
@ 2024-02-28 16:42 Matthew Wilcox (Oracle)
0 siblings, 0 replies; 3+ messages in thread
From: Matthew Wilcox (Oracle) @ 2024-02-28 16:42 UTC (permalink / raw)
To: Andrew Morton, linux-mm; +Cc: Matthew Wilcox (Oracle), Sidhartha Kumar
We already have a folio; use it instead of the head page where reasonable.
Saves a couple of calls to compound_head() and elimimnates a few
references to page->mapping.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
---
mm/huge_memory.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 50d146eb248f..110730e855c2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2919,7 +2919,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (head[i].index >= end) {
struct folio *tail = page_folio(head + i);
- if (shmem_mapping(head->mapping))
+ if (shmem_mapping(folio->mapping))
nr_dropped++;
else if (folio_test_clear_dirty(tail))
folio_account_cleaned(tail,
@@ -2927,7 +2927,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
__filemap_remove_folio(tail, NULL);
folio_put(tail);
} else if (!PageAnon(page)) {
- __xa_store(&head->mapping->i_pages, head[i].index,
+ __xa_store(&folio->mapping->i_pages, head[i].index,
head + i, 0);
} else if (swap_cache) {
__xa_store(&swap_cache->i_pages, offset + i,
@@ -2948,23 +2948,23 @@ static void __split_huge_page(struct page *page, struct list_head *list,
split_page_owner(head, order, new_order);
/* See comment in __split_huge_page_tail() */
- if (PageAnon(head)) {
+ if (folio_test_anon(folio)) {
/* Additional pin to swap cache */
- if (PageSwapCache(head)) {
- page_ref_add(head, 1 + new_nr);
+ if (folio_test_swapcache(folio)) {
+ folio_ref_add(folio, 1 + new_nr);
xa_unlock(&swap_cache->i_pages);
} else {
- page_ref_inc(head);
+ folio_ref_inc(folio);
}
} else {
/* Additional pin to page cache */
- page_ref_add(head, 1 + new_nr);
- xa_unlock(&head->mapping->i_pages);
+ folio_ref_add(folio, 1 + new_nr);
+ xa_unlock(&folio->mapping->i_pages);
}
local_irq_enable();
if (nr_dropped)
- shmem_uncharge(head->mapping->host, nr_dropped);
+ shmem_uncharge(folio->mapping->host, nr_dropped);
remap_page(folio, nr);
if (folio_test_swapcache(folio))
@@ -2980,9 +2980,10 @@ static void __split_huge_page(struct page *page, struct list_head *list,
for (i = 0; i < nr; i += new_nr) {
struct page *subpage = head + i;
+ struct folio *new_folio = page_folio(subpage);
if (subpage == page)
continue;
- unlock_page(subpage);
+ folio_unlock(new_folio);
/*
* Subpages may be freed if there wasn't any mapping
--
2.43.0
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2024-02-28 16:43 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-27 18:05 [PATCH] mm: Use folio more widely in __split_huge_page Matthew Wilcox (Oracle)
2024-02-27 18:35 ` Sidhartha Kumar
2024-02-28 16:42 Matthew Wilcox (Oracle)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox