* [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags
@ 2024-09-25 3:02 zhaoyang.huang
2024-09-25 9:42 ` Andrew Morton
0 siblings, 1 reply; 4+ messages in thread
From: zhaoyang.huang @ 2024-09-25 3:02 UTC (permalink / raw)
To: Andrew Morton, David Hildenbrand, Matthew Wilcox, Yu Zhao,
linux-mm, linux-kernel, Zhaoyang Huang, steve.kang
From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
Bits of LRU_REFS_MASK are not inherited during migration which lead to
new_folio start from tier0. Fix this by migrate the bits domain.
Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
---
include/linux/mm_inline.h | 8 ++++++++
mm/migrate.c | 1 +
2 files changed, 9 insertions(+)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index f4fe593c1400..ecf07a2a8201 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
+static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
+{
+ unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
+
+ set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
+}
#else /* !CONFIG_LRU_GEN */
static inline bool lru_gen_enabled(void)
@@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
+static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
+{}
#endif /* CONFIG_LRU_GEN */
static __always_inline
diff --git a/mm/migrate.c b/mm/migrate.c
index 923ea80ba744..60c97e235ae7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
if (folio_test_idle(folio))
folio_set_idle(newfolio);
+ folio_migrate_refs(newfolio, folio);
/*
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags
2024-09-25 3:02 [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags zhaoyang.huang
@ 2024-09-25 9:42 ` Andrew Morton
2024-09-25 11:49 ` Zhaoyang Huang
0 siblings, 1 reply; 4+ messages in thread
From: Andrew Morton @ 2024-09-25 9:42 UTC (permalink / raw)
To: zhaoyang.huang
Cc: David Hildenbrand, Matthew Wilcox, Yu Zhao, linux-mm,
linux-kernel, Zhaoyang Huang, steve.kang
On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:
> From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
>
> Bits of LRU_REFS_MASK are not inherited during migration which lead to
> new_folio start from tier0. Fix this by migrate the bits domain.
I'm having trouble understanding this, sorry. Please more fully
describe the runtime effects of this flaw.
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> return true;
> }
>
> +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> +{
> + unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> +
> + set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> +}
> #else /* !CONFIG_LRU_GEN */
>
> static inline bool lru_gen_enabled(void)
> @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> return false;
> }
>
> +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> +{}
> #endif /* CONFIG_LRU_GEN */
>
> static __always_inline
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 923ea80ba744..60c97e235ae7 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
> if (folio_test_idle(folio))
> folio_set_idle(newfolio);
>
> + folio_migrate_refs(newfolio, folio);
> /*
> * Copy NUMA information to the new page, to prevent over-eager
> * future migrations of this same page.
> --
> 2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags
2024-09-25 9:42 ` Andrew Morton
@ 2024-09-25 11:49 ` Zhaoyang Huang
2024-09-25 21:22 ` Yu Zhao
0 siblings, 1 reply; 4+ messages in thread
From: Zhaoyang Huang @ 2024-09-25 11:49 UTC (permalink / raw)
To: Andrew Morton
Cc: zhaoyang.huang, David Hildenbrand, Matthew Wilcox, Yu Zhao,
linux-mm, linux-kernel, steve.kang
On Wed, Sep 25, 2024 at 5:42 PM Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:
>
> > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> >
> > Bits of LRU_REFS_MASK are not inherited during migration which lead to
> > new_folio start from tier0. Fix this by migrate the bits domain.
>
> I'm having trouble understanding this, sorry. Please more fully
> describe the runtime effects of this flaw.
Sorry for bringing confusion. According to my understanding, MGLRU
records how many times that vfs access this page in a range of bits
domain(LRU_REFS_MASK) in folio->flags which are not being migrated to
new_folios so far. This commit would like to do so to have the
new_folio inherit these bits from the old folio. Is it right and
worthy to do?
>
> > --- a/include/linux/mm_inline.h
> > +++ b/include/linux/mm_inline.h
> > @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > return true;
> > }
> >
> > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> > +{
> > + unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> > +
> > + set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> > +}
> > #else /* !CONFIG_LRU_GEN */
> >
> > static inline bool lru_gen_enabled(void)
> > @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > return false;
> > }
> >
> > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
> > +{}
> > #endif /* CONFIG_LRU_GEN */
> >
> > static __always_inline
> > diff --git a/mm/migrate.c b/mm/migrate.c
> > index 923ea80ba744..60c97e235ae7 100644
> > --- a/mm/migrate.c
> > +++ b/mm/migrate.c
> > @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
> > if (folio_test_idle(folio))
> > folio_set_idle(newfolio);
> >
> > + folio_migrate_refs(newfolio, folio);
> > /*
> > * Copy NUMA information to the new page, to prevent over-eager
> > * future migrations of this same page.
> > --
> > 2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags
2024-09-25 11:49 ` Zhaoyang Huang
@ 2024-09-25 21:22 ` Yu Zhao
0 siblings, 0 replies; 4+ messages in thread
From: Yu Zhao @ 2024-09-25 21:22 UTC (permalink / raw)
To: Zhaoyang Huang
Cc: Andrew Morton, zhaoyang.huang, David Hildenbrand, Matthew Wilcox,
linux-mm, linux-kernel, steve.kang
On Wed, Sep 25, 2024 at 5:50 AM Zhaoyang Huang <huangzhaoyang@gmail.com> wrote:
>
> On Wed, Sep 25, 2024 at 5:42 PM Andrew Morton <akpm@linux-foundation.org> wrote:
> >
> > On Wed, 25 Sep 2024 11:02:25 +0800 "zhaoyang.huang" <zhaoyang.huang@unisoc.com> wrote:
> >
> > > From: Zhaoyang Huang <zhaoyang.huang@unisoc.com>
> > >
> > > Bits of LRU_REFS_MASK are not inherited during migration which lead to
> > > new_folio start from tier0. Fix this by migrate the bits domain.
> >
> > I'm having trouble understanding this, sorry. Please more fully
> > describe the runtime effects of this flaw.
> Sorry for bringing confusion. According to my understanding, MGLRU
> records how many times that vfs access this page in a range of bits
> domain(LRU_REFS_MASK) in folio->flags which are not being migrated to
> new_folios so far. This commit would like to do so to have the
> new_folio inherit these bits from the old folio. Is it right and
> worthy to do?
Correct. And this was not done because
1. While LRU_REFS_MASK is useful, but it's not important, and
2. More important information, e.g., generations, is lost during migration.
So I'm not sure how much this patch can help. But you think it does, why not.
> > > --- a/include/linux/mm_inline.h
> > > +++ b/include/linux/mm_inline.h
> > > @@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > > return true;
> > > }
> > >
> > > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
Nitpick: folio_migrate_refs(struct folio *new, struct folio *old)
> > > +{
> > > + unsigned long refs = READ_ONCE(folio->flags) & LRU_REFS_MASK;
> > > +
> > > + set_mask_bits(&new_folio->flags, LRU_REFS_MASK, refs);
> > > +}
> > > #else /* !CONFIG_LRU_GEN */
> > >
> > > static inline bool lru_gen_enabled(void)
> > > @@ -313,6 +319,8 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
> > > return false;
> > > }
> > >
> > > +static inline void folio_migrate_refs(struct folio *new_folio, struct folio *folio)
Ditto.
> > > +{}
A line break between "{" and "}".
> > > #endif /* CONFIG_LRU_GEN */
> > >
> > > static __always_inline
> > > diff --git a/mm/migrate.c b/mm/migrate.c
> > > index 923ea80ba744..60c97e235ae7 100644
> > > --- a/mm/migrate.c
> > > +++ b/mm/migrate.c
> > > @@ -618,6 +618,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
> > > if (folio_test_idle(folio))
> > > folio_set_idle(newfolio);
> > >
> > > + folio_migrate_refs(newfolio, folio);
> > > /*
> > > * Copy NUMA information to the new page, to prevent over-eager
> > > * future migrations of this same page.
> > > --
> > > 2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-09-25 21:23 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-09-25 3:02 [PATCH] mm: migrate LRU_REFS_MASK bits in folio_migrate_flags zhaoyang.huang
2024-09-25 9:42 ` Andrew Morton
2024-09-25 11:49 ` Zhaoyang Huang
2024-09-25 21:22 ` Yu Zhao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox