* [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory
2023-03-27 21:15 [PATCH v11 0/3] Memory poison recovery in khugepaged collapsing Jiaqi Yan
@ 2023-03-27 21:15 ` Jiaqi Yan
2023-03-28 15:58 ` Yang Shi
2023-03-27 21:15 ` [PATCH v11 2/3] mm/hwpoison: introduce copy_mc_highpage Jiaqi Yan
2023-03-27 21:15 ` [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory Jiaqi Yan
2 siblings, 1 reply; 9+ messages in thread
From: Jiaqi Yan @ 2023-03-27 21:15 UTC (permalink / raw)
To: kirill.shutemov, kirill, shy828301, tongtiangen, tony.luck
Cc: naoya.horiguchi, linmiaohe, jiaqiyan, linux-mm, akpm, osalvador,
wangkefeng.wang, stevensd, hughd
Make __collapse_huge_page_copy return whether copying anonymous pages
succeeded, and make collapse_huge_page handle the return status.
Break existing PTE scan loop into two for-loops. The first loop copies
source pages into target huge page, and can fail gracefully when running
into memory errors in source pages. If copying all pages succeeds, the
second loop releases and clears up these normal pages. Otherwise, the
second loop rolls back the page table and page states by:
- re-establishing the original PTEs-to-PMD connection.
- releasing source pages back to their LRU list.
Tested manually:
0. Enable khugepaged on system under test.
1. Start a two-thread application. Each thread allocates a chunk of
non-huge anonymous memory buffer.
2. Pick 4 random buffer locations (2 in each thread) and inject
uncorrectable memory errors at corresponding physical addresses.
3. Signal both threads to make their memory buffer collapsible, i.e.
calling madvise(MADV_HUGEPAGE).
4. Wait and check kernel log: khugepaged is able to recover from poisoned
pages and skips collapsing them.
5. Signal both threads to inspect their buffer contents and make sure no
data corruption.
Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
include/trace/events/huge_memory.h | 3 +-
mm/khugepaged.c | 114 +++++++++++++++++++++++++----
2 files changed, 103 insertions(+), 14 deletions(-)
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 3e6fb05852f9a..46cce509957ba 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -36,7 +36,8 @@
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_TRUNCATED, "truncated") \
- EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EMe(SCAN_COPY_MC, "copy_poisoned_page") \
#undef EM
#undef EMe
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index bee7fd7db380a..bef68286345c8 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -55,6 +55,7 @@ enum scan_result {
SCAN_CGROUP_CHARGE_FAIL,
SCAN_TRUNCATED,
SCAN_PAGE_HAS_PRIVATE,
+ SCAN_COPY_MC,
};
#define CREATE_TRACE_POINTS
@@ -681,20 +682,22 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
return result;
}
-static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
- struct vm_area_struct *vma,
- unsigned long address,
- spinlock_t *ptl,
- struct list_head *compound_pagelist)
+static void __collapse_huge_page_copy_succeeded(pte_t *pte,
+ pmd_t *pmd,
+ struct vm_area_struct *vma,
+ unsigned long address,
+ spinlock_t *ptl,
+ struct list_head *compound_pagelist)
{
- struct page *src_page, *tmp;
+ struct page *src_page;
+ struct page *tmp;
pte_t *_pte;
- for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, page++, address += PAGE_SIZE) {
- pte_t pteval = *_pte;
+ pte_t pteval;
+ for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+ _pte++, address += PAGE_SIZE) {
+ pteval = *_pte;
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
- clear_user_highpage(page, address);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
if (is_zero_pfn(pte_pfn(pteval))) {
/*
@@ -706,7 +709,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
} else {
src_page = pte_page(pteval);
- copy_user_highpage(page, src_page, address, vma);
if (!PageCompound(src_page))
release_pte_page(src_page);
/*
@@ -733,6 +735,88 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
}
+static void __collapse_huge_page_copy_failed(pte_t *pte,
+ pmd_t *pmd,
+ pmd_t orig_pmd,
+ struct vm_area_struct *vma,
+ unsigned long address,
+ struct list_head *compound_pagelist)
+{
+ spinlock_t *pmd_ptl;
+
+ /*
+ * Re-establish the PMD to point to the original page table
+ * entry. Restoring PMD needs to be done prior to releasing
+ * pages. Since pages are still isolated and locked here,
+ * acquiring anon_vma_lock_write is unnecessary.
+ */
+ pmd_ptl = pmd_lock(vma->vm_mm, pmd);
+ pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
+ spin_unlock(pmd_ptl);
+ /*
+ * Release both raw and compound pages isolated
+ * in __collapse_huge_page_isolate.
+ */
+ release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
+}
+
+/*
+ * __collapse_huge_page_copy - attempts to copy memory contents from raw
+ * pages to a hugepage. Cleans up the raw pages if copying succeeds;
+ * otherwise restores the original page table and releases isolated raw pages.
+ * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
+ *
+ * @pte: starting of the PTEs to copy from
+ * @page: the new hugepage to copy contents to
+ * @pmd: pointer to the new hugepage's PMD
+ * @orig_pmd: the original raw pages' PMD
+ * @vma: the original raw pages' virtual memory area
+ * @address: starting address to copy
+ * @pte_ptl: lock on raw pages' PTEs
+ * @compound_pagelist: list that stores compound pages
+ */
+static int __collapse_huge_page_copy(pte_t *pte,
+ struct page *page,
+ pmd_t *pmd,
+ pmd_t orig_pmd,
+ struct vm_area_struct *vma,
+ unsigned long address,
+ spinlock_t *pte_ptl,
+ struct list_head *compound_pagelist)
+{
+ struct page *src_page;
+ pte_t *_pte;
+ pte_t pteval;
+ unsigned long _address;
+ int result = SCAN_SUCCEED;
+
+ /*
+ * Copying pages' contents is subject to memory poison at any iteration.
+ */
+ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
+ _pte++, page++, _address += PAGE_SIZE) {
+ pteval = *_pte;
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ clear_user_highpage(page, _address);
+ continue;
+ }
+ src_page = pte_page(pteval);
+ if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
+ result = SCAN_COPY_MC;
+ break;
+ }
+ }
+
+ if (likely(result == SCAN_SUCCEED))
+ __collapse_huge_page_copy_succeeded(pte, pmd, vma, address,
+ pte_ptl, compound_pagelist);
+ else
+ __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
+ address, compound_pagelist);
+
+ return result;
+}
+
static void khugepaged_alloc_sleep(void)
{
DEFINE_WAIT(wait);
@@ -1106,9 +1190,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
anon_vma_unlock_write(vma->anon_vma);
- __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
- &compound_pagelist);
+ result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
+ vma, address, pte_ptl,
+ &compound_pagelist);
pte_unmap(pte);
+ if (unlikely(result != SCAN_SUCCEED))
+ goto out_up_write;
+
/*
* spin_lock() below is not the equivalent of smp_wmb(), but
* the smp_wmb() inside __SetPageUptodate() can be reused to
--
2.40.0.348.gf938b09366-goog
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory
2023-03-27 21:15 ` [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory Jiaqi Yan
@ 2023-03-28 15:58 ` Yang Shi
2023-03-29 0:13 ` Jiaqi Yan
0 siblings, 1 reply; 9+ messages in thread
From: Yang Shi @ 2023-03-28 15:58 UTC (permalink / raw)
To: Jiaqi Yan
Cc: kirill.shutemov, kirill, tongtiangen, tony.luck, naoya.horiguchi,
linmiaohe, linux-mm, akpm, osalvador, wangkefeng.wang, stevensd,
hughd
On Mon, Mar 27, 2023 at 2:15 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
>
> Make __collapse_huge_page_copy return whether copying anonymous pages
> succeeded, and make collapse_huge_page handle the return status.
>
> Break existing PTE scan loop into two for-loops. The first loop copies
> source pages into target huge page, and can fail gracefully when running
> into memory errors in source pages. If copying all pages succeeds, the
> second loop releases and clears up these normal pages. Otherwise, the
> second loop rolls back the page table and page states by:
> - re-establishing the original PTEs-to-PMD connection.
> - releasing source pages back to their LRU list.
>
> Tested manually:
> 0. Enable khugepaged on system under test.
> 1. Start a two-thread application. Each thread allocates a chunk of
> non-huge anonymous memory buffer.
> 2. Pick 4 random buffer locations (2 in each thread) and inject
> uncorrectable memory errors at corresponding physical addresses.
> 3. Signal both threads to make their memory buffer collapsible, i.e.
> calling madvise(MADV_HUGEPAGE).
> 4. Wait and check kernel log: khugepaged is able to recover from poisoned
> pages and skips collapsing them.
> 5. Signal both threads to inspect their buffer contents and make sure no
> data corruption.
>
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Just a nit below:
> ---
> include/trace/events/huge_memory.h | 3 +-
> mm/khugepaged.c | 114 +++++++++++++++++++++++++----
> 2 files changed, 103 insertions(+), 14 deletions(-)
>
> diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
> index 3e6fb05852f9a..46cce509957ba 100644
> --- a/include/trace/events/huge_memory.h
> +++ b/include/trace/events/huge_memory.h
> @@ -36,7 +36,8 @@
> EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
> EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
> EM( SCAN_TRUNCATED, "truncated") \
> - EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
> + EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
> + EMe(SCAN_COPY_MC, "copy_poisoned_page") \
>
> #undef EM
> #undef EMe
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index bee7fd7db380a..bef68286345c8 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -55,6 +55,7 @@ enum scan_result {
> SCAN_CGROUP_CHARGE_FAIL,
> SCAN_TRUNCATED,
> SCAN_PAGE_HAS_PRIVATE,
> + SCAN_COPY_MC,
> };
>
> #define CREATE_TRACE_POINTS
> @@ -681,20 +682,22 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> return result;
> }
>
> -static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> - struct vm_area_struct *vma,
> - unsigned long address,
> - spinlock_t *ptl,
> - struct list_head *compound_pagelist)
> +static void __collapse_huge_page_copy_succeeded(pte_t *pte,
> + pmd_t *pmd,
> + struct vm_area_struct *vma,
> + unsigned long address,
> + spinlock_t *ptl,
> + struct list_head *compound_pagelist)
> {
> - struct page *src_page, *tmp;
> + struct page *src_page;
> + struct page *tmp;
> pte_t *_pte;
> - for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> - _pte++, page++, address += PAGE_SIZE) {
> - pte_t pteval = *_pte;
> + pte_t pteval;
>
> + for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> + _pte++, address += PAGE_SIZE) {
> + pteval = *_pte;
> if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> - clear_user_highpage(page, address);
> add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
> if (is_zero_pfn(pte_pfn(pteval))) {
> /*
> @@ -706,7 +709,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> }
> } else {
> src_page = pte_page(pteval);
> - copy_user_highpage(page, src_page, address, vma);
> if (!PageCompound(src_page))
> release_pte_page(src_page);
> /*
> @@ -733,6 +735,88 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> }
> }
>
> +static void __collapse_huge_page_copy_failed(pte_t *pte,
> + pmd_t *pmd,
> + pmd_t orig_pmd,
> + struct vm_area_struct *vma,
> + unsigned long address,
It looks like "address" is not used at all. It could be removed.
> + struct list_head *compound_pagelist)
> +{
> + spinlock_t *pmd_ptl;
> +
> + /*
> + * Re-establish the PMD to point to the original page table
> + * entry. Restoring PMD needs to be done prior to releasing
> + * pages. Since pages are still isolated and locked here,
> + * acquiring anon_vma_lock_write is unnecessary.
> + */
> + pmd_ptl = pmd_lock(vma->vm_mm, pmd);
> + pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
> + spin_unlock(pmd_ptl);
> + /*
> + * Release both raw and compound pages isolated
> + * in __collapse_huge_page_isolate.
> + */
> + release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
> +}
> +
> +/*
> + * __collapse_huge_page_copy - attempts to copy memory contents from raw
> + * pages to a hugepage. Cleans up the raw pages if copying succeeds;
> + * otherwise restores the original page table and releases isolated raw pages.
> + * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
> + *
> + * @pte: starting of the PTEs to copy from
> + * @page: the new hugepage to copy contents to
> + * @pmd: pointer to the new hugepage's PMD
> + * @orig_pmd: the original raw pages' PMD
> + * @vma: the original raw pages' virtual memory area
> + * @address: starting address to copy
> + * @pte_ptl: lock on raw pages' PTEs
> + * @compound_pagelist: list that stores compound pages
> + */
> +static int __collapse_huge_page_copy(pte_t *pte,
> + struct page *page,
> + pmd_t *pmd,
> + pmd_t orig_pmd,
> + struct vm_area_struct *vma,
> + unsigned long address,
> + spinlock_t *pte_ptl,
> + struct list_head *compound_pagelist)
> +{
> + struct page *src_page;
> + pte_t *_pte;
> + pte_t pteval;
> + unsigned long _address;
> + int result = SCAN_SUCCEED;
> +
> + /*
> + * Copying pages' contents is subject to memory poison at any iteration.
> + */
> + for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
> + _pte++, page++, _address += PAGE_SIZE) {
> + pteval = *_pte;
> + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> + clear_user_highpage(page, _address);
> + continue;
> + }
> + src_page = pte_page(pteval);
> + if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
> + result = SCAN_COPY_MC;
> + break;
> + }
> + }
> +
> + if (likely(result == SCAN_SUCCEED))
> + __collapse_huge_page_copy_succeeded(pte, pmd, vma, address,
> + pte_ptl, compound_pagelist);
> + else
> + __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
> + address, compound_pagelist);
> +
> + return result;
> +}
> +
> static void khugepaged_alloc_sleep(void)
> {
> DEFINE_WAIT(wait);
> @@ -1106,9 +1190,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> */
> anon_vma_unlock_write(vma->anon_vma);
>
> - __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
> - &compound_pagelist);
> + result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
> + vma, address, pte_ptl,
> + &compound_pagelist);
> pte_unmap(pte);
> + if (unlikely(result != SCAN_SUCCEED))
> + goto out_up_write;
> +
> /*
> * spin_lock() below is not the equivalent of smp_wmb(), but
> * the smp_wmb() inside __SetPageUptodate() can be reused to
> --
> 2.40.0.348.gf938b09366-goog
>
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory
2023-03-28 15:58 ` Yang Shi
@ 2023-03-29 0:13 ` Jiaqi Yan
0 siblings, 0 replies; 9+ messages in thread
From: Jiaqi Yan @ 2023-03-29 0:13 UTC (permalink / raw)
To: Yang Shi
Cc: kirill.shutemov, kirill, tongtiangen, tony.luck, naoya.horiguchi,
linmiaohe, linux-mm, akpm, osalvador, wangkefeng.wang, stevensd,
hughd
On Tue, Mar 28, 2023 at 8:59 AM Yang Shi <shy828301@gmail.com> wrote:
>
> On Mon, Mar 27, 2023 at 2:15 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
> >
> > Make __collapse_huge_page_copy return whether copying anonymous pages
> > succeeded, and make collapse_huge_page handle the return status.
> >
> > Break existing PTE scan loop into two for-loops. The first loop copies
> > source pages into target huge page, and can fail gracefully when running
> > into memory errors in source pages. If copying all pages succeeds, the
> > second loop releases and clears up these normal pages. Otherwise, the
> > second loop rolls back the page table and page states by:
> > - re-establishing the original PTEs-to-PMD connection.
> > - releasing source pages back to their LRU list.
> >
> > Tested manually:
> > 0. Enable khugepaged on system under test.
> > 1. Start a two-thread application. Each thread allocates a chunk of
> > non-huge anonymous memory buffer.
> > 2. Pick 4 random buffer locations (2 in each thread) and inject
> > uncorrectable memory errors at corresponding physical addresses.
> > 3. Signal both threads to make their memory buffer collapsible, i.e.
> > calling madvise(MADV_HUGEPAGE).
> > 4. Wait and check kernel log: khugepaged is able to recover from poisoned
> > pages and skips collapsing them.
> > 5. Signal both threads to inspect their buffer contents and make sure no
> > data corruption.
> >
> > Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
>
> Reviewed-by: Yang Shi <shy828301@gmail.com>
>
> Just a nit below:
>
> > ---
> > include/trace/events/huge_memory.h | 3 +-
> > mm/khugepaged.c | 114 +++++++++++++++++++++++++----
> > 2 files changed, 103 insertions(+), 14 deletions(-)
> >
> > diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
> > index 3e6fb05852f9a..46cce509957ba 100644
> > --- a/include/trace/events/huge_memory.h
> > +++ b/include/trace/events/huge_memory.h
> > @@ -36,7 +36,8 @@
> > EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
> > EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
> > EM( SCAN_TRUNCATED, "truncated") \
> > - EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
> > + EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
> > + EMe(SCAN_COPY_MC, "copy_poisoned_page") \
> >
> > #undef EM
> > #undef EMe
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index bee7fd7db380a..bef68286345c8 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -55,6 +55,7 @@ enum scan_result {
> > SCAN_CGROUP_CHARGE_FAIL,
> > SCAN_TRUNCATED,
> > SCAN_PAGE_HAS_PRIVATE,
> > + SCAN_COPY_MC,
> > };
> >
> > #define CREATE_TRACE_POINTS
> > @@ -681,20 +682,22 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> > return result;
> > }
> >
> > -static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> > - struct vm_area_struct *vma,
> > - unsigned long address,
> > - spinlock_t *ptl,
> > - struct list_head *compound_pagelist)
> > +static void __collapse_huge_page_copy_succeeded(pte_t *pte,
> > + pmd_t *pmd,
> > + struct vm_area_struct *vma,
> > + unsigned long address,
> > + spinlock_t *ptl,
> > + struct list_head *compound_pagelist)
> > {
> > - struct page *src_page, *tmp;
> > + struct page *src_page;
> > + struct page *tmp;
> > pte_t *_pte;
> > - for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> > - _pte++, page++, address += PAGE_SIZE) {
> > - pte_t pteval = *_pte;
> > + pte_t pteval;
> >
> > + for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> > + _pte++, address += PAGE_SIZE) {
> > + pteval = *_pte;
> > if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> > - clear_user_highpage(page, address);
> > add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
> > if (is_zero_pfn(pte_pfn(pteval))) {
> > /*
> > @@ -706,7 +709,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> > }
> > } else {
> > src_page = pte_page(pteval);
> > - copy_user_highpage(page, src_page, address, vma);
> > if (!PageCompound(src_page))
> > release_pte_page(src_page);
> > /*
> > @@ -733,6 +735,88 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
> > }
> > }
> >
> > +static void __collapse_huge_page_copy_failed(pte_t *pte,
> > + pmd_t *pmd,
> > + pmd_t orig_pmd,
> > + struct vm_area_struct *vma,
> > + unsigned long address,
>
> It looks like "address" is not used at all. It could be removed.
Thanks for catching this!
pmd in __collapse_huge_page_copy_succeeded can also be dropped.
>
> > + struct list_head *compound_pagelist)
> > +{
> > + spinlock_t *pmd_ptl;
> > +
> > + /*
> > + * Re-establish the PMD to point to the original page table
> > + * entry. Restoring PMD needs to be done prior to releasing
> > + * pages. Since pages are still isolated and locked here,
> > + * acquiring anon_vma_lock_write is unnecessary.
> > + */
> > + pmd_ptl = pmd_lock(vma->vm_mm, pmd);
> > + pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
> > + spin_unlock(pmd_ptl);
> > + /*
> > + * Release both raw and compound pages isolated
> > + * in __collapse_huge_page_isolate.
> > + */
> > + release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
> > +}
> > +
> > +/*
> > + * __collapse_huge_page_copy - attempts to copy memory contents from raw
> > + * pages to a hugepage. Cleans up the raw pages if copying succeeds;
> > + * otherwise restores the original page table and releases isolated raw pages.
> > + * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
> > + *
> > + * @pte: starting of the PTEs to copy from
> > + * @page: the new hugepage to copy contents to
> > + * @pmd: pointer to the new hugepage's PMD
> > + * @orig_pmd: the original raw pages' PMD
> > + * @vma: the original raw pages' virtual memory area
> > + * @address: starting address to copy
> > + * @pte_ptl: lock on raw pages' PTEs
> > + * @compound_pagelist: list that stores compound pages
> > + */
> > +static int __collapse_huge_page_copy(pte_t *pte,
> > + struct page *page,
> > + pmd_t *pmd,
> > + pmd_t orig_pmd,
> > + struct vm_area_struct *vma,
> > + unsigned long address,
> > + spinlock_t *pte_ptl,
> > + struct list_head *compound_pagelist)
> > +{
> > + struct page *src_page;
> > + pte_t *_pte;
> > + pte_t pteval;
> > + unsigned long _address;
> > + int result = SCAN_SUCCEED;
> > +
> > + /*
> > + * Copying pages' contents is subject to memory poison at any iteration.
> > + */
> > + for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
> > + _pte++, page++, _address += PAGE_SIZE) {
> > + pteval = *_pte;
> > + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> > + clear_user_highpage(page, _address);
> > + continue;
> > + }
> > + src_page = pte_page(pteval);
> > + if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
> > + result = SCAN_COPY_MC;
> > + break;
> > + }
> > + }
> > +
> > + if (likely(result == SCAN_SUCCEED))
> > + __collapse_huge_page_copy_succeeded(pte, pmd, vma, address,
> > + pte_ptl, compound_pagelist);
> > + else
> > + __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
> > + address, compound_pagelist);
> > +
> > + return result;
> > +}
> > +
> > static void khugepaged_alloc_sleep(void)
> > {
> > DEFINE_WAIT(wait);
> > @@ -1106,9 +1190,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> > */
> > anon_vma_unlock_write(vma->anon_vma);
> >
> > - __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
> > - &compound_pagelist);
> > + result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
> > + vma, address, pte_ptl,
> > + &compound_pagelist);
> > pte_unmap(pte);
> > + if (unlikely(result != SCAN_SUCCEED))
> > + goto out_up_write;
> > +
> > /*
> > * spin_lock() below is not the equivalent of smp_wmb(), but
> > * the smp_wmb() inside __SetPageUptodate() can be reused to
> > --
> > 2.40.0.348.gf938b09366-goog
> >
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v11 2/3] mm/hwpoison: introduce copy_mc_highpage
2023-03-27 21:15 [PATCH v11 0/3] Memory poison recovery in khugepaged collapsing Jiaqi Yan
2023-03-27 21:15 ` [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory Jiaqi Yan
@ 2023-03-27 21:15 ` Jiaqi Yan
2023-03-28 16:00 ` Yang Shi
2023-03-27 21:15 ` [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory Jiaqi Yan
2 siblings, 1 reply; 9+ messages in thread
From: Jiaqi Yan @ 2023-03-27 21:15 UTC (permalink / raw)
To: kirill.shutemov, kirill, shy828301, tongtiangen, tony.luck
Cc: naoya.horiguchi, linmiaohe, jiaqiyan, linux-mm, akpm, osalvador,
wangkefeng.wang, stevensd, hughd
Similar to how copy_mc_user_highpage is implemented for
copy_user_highpage on #MC supported architecture, introduce
the #MC handled version of copy_highpage.
This helper has immediate usage when khugepaged wants to copy
file-backed memory pages and tolerate #MC.
Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
include/linux/highmem.h | 54 +++++++++++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 13 deletions(-)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 9c7cdaa3de8cd..4de1dbcd3ef64 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -315,7 +315,29 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
#endif
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
+
+#endif
+
#ifdef copy_mc_to_kernel
+/*
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
+ */
static inline int copy_mc_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
@@ -332,29 +354,35 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
return ret;
}
-#else
-static inline int copy_mc_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- copy_user_highpage(to, from, vaddr, vma);
- return 0;
-}
-#endif
-#ifndef __HAVE_ARCH_COPY_HIGHPAGE
-
-static inline void copy_highpage(struct page *to, struct page *from)
+static inline int copy_mc_highpage(struct page *to, struct page *from)
{
+ unsigned long ret;
char *vfrom, *vto;
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
- copy_page(vto, vfrom);
- kmsan_copy_page_meta(to, from);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_copy_page_meta(to, from);
kunmap_local(vto);
kunmap_local(vfrom);
+
+ return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ copy_user_highpage(to, from, vaddr, vma);
+ return 0;
}
+static inline int copy_mc_highpage(struct page *to, struct page *from)
+{
+ copy_highpage(to, from);
+ return 0;
+}
#endif
static inline void memcpy_page(struct page *dst_page, size_t dst_off,
--
2.40.0.348.gf938b09366-goog
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH v11 2/3] mm/hwpoison: introduce copy_mc_highpage
2023-03-27 21:15 ` [PATCH v11 2/3] mm/hwpoison: introduce copy_mc_highpage Jiaqi Yan
@ 2023-03-28 16:00 ` Yang Shi
0 siblings, 0 replies; 9+ messages in thread
From: Yang Shi @ 2023-03-28 16:00 UTC (permalink / raw)
To: Jiaqi Yan
Cc: kirill.shutemov, kirill, tongtiangen, tony.luck, naoya.horiguchi,
linmiaohe, linux-mm, akpm, osalvador, wangkefeng.wang, stevensd,
hughd
On Mon, Mar 27, 2023 at 2:15 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
>
> Similar to how copy_mc_user_highpage is implemented for
> copy_user_highpage on #MC supported architecture, introduce
> the #MC handled version of copy_highpage.
>
> This helper has immediate usage when khugepaged wants to copy
> file-backed memory pages and tolerate #MC.
>
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
BTW, you could preserve the reviewed/acked tags as long as there is no
significant change between two versions.
> ---
> include/linux/highmem.h | 54 +++++++++++++++++++++++++++++++----------
> 1 file changed, 41 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 9c7cdaa3de8cd..4de1dbcd3ef64 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -315,7 +315,29 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
>
> #endif
>
> +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> +
> +static inline void copy_highpage(struct page *to, struct page *from)
> +{
> + char *vfrom, *vto;
> +
> + vfrom = kmap_local_page(from);
> + vto = kmap_local_page(to);
> + copy_page(vto, vfrom);
> + kmsan_copy_page_meta(to, from);
> + kunmap_local(vto);
> + kunmap_local(vfrom);
> +}
> +
> +#endif
> +
> #ifdef copy_mc_to_kernel
> +/*
> + * If architecture supports machine check exception handling, define the
> + * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
> + * page with #MC in source page (@from) handled, and return the number
> + * of bytes not copied if there was a #MC, otherwise 0 for success.
> + */
> static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> unsigned long vaddr, struct vm_area_struct *vma)
> {
> @@ -332,29 +354,35 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
>
> return ret;
> }
> -#else
> -static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> - unsigned long vaddr, struct vm_area_struct *vma)
> -{
> - copy_user_highpage(to, from, vaddr, vma);
> - return 0;
> -}
> -#endif
>
> -#ifndef __HAVE_ARCH_COPY_HIGHPAGE
> -
> -static inline void copy_highpage(struct page *to, struct page *from)
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
> {
> + unsigned long ret;
> char *vfrom, *vto;
>
> vfrom = kmap_local_page(from);
> vto = kmap_local_page(to);
> - copy_page(vto, vfrom);
> - kmsan_copy_page_meta(to, from);
> + ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
> + if (!ret)
> + kmsan_copy_page_meta(to, from);
> kunmap_local(vto);
> kunmap_local(vfrom);
> +
> + return ret;
> +}
> +#else
> +static inline int copy_mc_user_highpage(struct page *to, struct page *from,
> + unsigned long vaddr, struct vm_area_struct *vma)
> +{
> + copy_user_highpage(to, from, vaddr, vma);
> + return 0;
> }
>
> +static inline int copy_mc_highpage(struct page *to, struct page *from)
> +{
> + copy_highpage(to, from);
> + return 0;
> +}
> #endif
>
> static inline void memcpy_page(struct page *dst_page, size_t dst_off,
> --
> 2.40.0.348.gf938b09366-goog
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory
2023-03-27 21:15 [PATCH v11 0/3] Memory poison recovery in khugepaged collapsing Jiaqi Yan
2023-03-27 21:15 ` [PATCH v11 1/3] mm/khugepaged: recover from poisoned anonymous memory Jiaqi Yan
2023-03-27 21:15 ` [PATCH v11 2/3] mm/hwpoison: introduce copy_mc_highpage Jiaqi Yan
@ 2023-03-27 21:15 ` Jiaqi Yan
2023-03-28 16:01 ` Yang Shi
2 siblings, 1 reply; 9+ messages in thread
From: Jiaqi Yan @ 2023-03-27 21:15 UTC (permalink / raw)
To: kirill.shutemov, kirill, shy828301, tongtiangen, tony.luck
Cc: naoya.horiguchi, linmiaohe, jiaqiyan, linux-mm, akpm, osalvador,
wangkefeng.wang, stevensd, hughd
Make collapse_file roll back when copying pages failed. More concretely:
- extract copying operations into a separate loop
- postpone the updates for nr_none until both scanning and copying
succeeded
- postpone joining small xarray entries until both scanning and copying
succeeded
- postpone the update operations to NR_XXX_THPS until both scanning and
copying succeeded
- for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
copying failed
Tested manually:
0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
1. Start a two-thread application. Each thread allocates a chunk of
non-huge memory buffer from /mnt/ramdisk.
2. Pick 4 random buffer address (2 in each thread) and inject
uncorrectable memory errors at physical addresses.
3. Signal both threads to make their memory buffer collapsible, i.e.
calling madvise(MADV_HUGEPAGE).
4. Wait and then check kernel log: khugepaged is able to recover from
poisoned pages by skipping them.
5. Signal both threads to inspect their buffer contents and make sure no
data corruption.
Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
mm/khugepaged.c | 86 +++++++++++++++++++++++++++++++------------------
1 file changed, 54 insertions(+), 32 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index bef68286345c8..38c1655ce0a9e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1874,6 +1874,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
{
struct address_space *mapping = file->f_mapping;
struct page *hpage;
+ struct page *page;
+ struct page *tmp;
+ struct folio *folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1918,8 +1921,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_set(&xas, start);
for (index = start; index < end; index++) {
- struct page *page = xas_next(&xas);
- struct folio *folio;
+ page = xas_next(&xas);
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
@@ -2099,12 +2101,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
put_page(page);
goto xa_unlocked;
}
- nr = thp_nr_pages(hpage);
- if (is_shmem)
- __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
- else {
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+ if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
@@ -2115,21 +2113,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
smp_mb();
if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL;
- __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
- goto xa_locked;
}
}
-
- if (nr_none) {
- __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
- /* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
- }
-
- /* Join all the small entries into a single multi-index entry */
- xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, hpage);
xa_locked:
xas_unlock_irq(&xas);
xa_unlocked:
@@ -2142,21 +2128,36 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
try_to_unmap_flush();
if (result == SCAN_SUCCEED) {
- struct page *page, *tmp;
- struct folio *folio;
-
/*
* Replacing old pages with new one has succeeded, now we
- * need to copy the content and free the old pages.
+ * attempt to copy the contents.
*/
index = start;
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+ list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
- copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
- page);
+ if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
+ page) > 0) {
+ result = SCAN_COPY_MC;
+ break;
+ }
+ index++;
+ }
+ while (result == SCAN_SUCCEED && index < end) {
+ clear_highpage(hpage + (index % HPAGE_PMD_NR));
+ index++;
+ }
+ }
+
+ nr = thp_nr_pages(hpage);
+ if (result == SCAN_SUCCEED) {
+ /*
+ * Copying old pages to huge one has succeeded, now we
+ * need to free the old pages.
+ */
+ list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
@@ -2164,12 +2165,23 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
ClearPageUnevictable(page);
unlock_page(page);
put_page(page);
- index++;
}
- while (index < end) {
- clear_highpage(hpage + (index % HPAGE_PMD_NR));
- index++;
+
+ xas_lock_irq(&xas);
+ if (is_shmem)
+ __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
+ else
+ __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
+
+ if (nr_none) {
+ __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
+ /* nr_none is always 0 for non-shmem. */
+ __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
+ /* Join all the small entries into a single multi-index entry. */
+ xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+ xas_store(&xas, hpage);
+ xas_unlock_irq(&xas);
folio = page_folio(hpage);
folio_mark_uptodate(folio);
@@ -2187,8 +2199,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
unlock_page(hpage);
hpage = NULL;
} else {
- struct page *page;
-
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) {
@@ -2222,6 +2232,18 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
+ /*
+ * Undo the updates of filemap_nr_thps_inc for non-SHMEM
+ * file only. This undo is not needed unless failure is
+ * due to SCAN_COPY_MC.
+ *
+ * Paired with smp_mb() in do_dentry_open() to ensure the
+ * update to nr_thps is visible.
+ */
+ smp_mb();
+ if (!is_shmem && result == SCAN_COPY_MC)
+ filemap_nr_thps_dec(mapping);
+
xas_unlock_irq(&xas);
hpage->mapping = NULL;
--
2.40.0.348.gf938b09366-goog
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory
2023-03-27 21:15 ` [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory Jiaqi Yan
@ 2023-03-28 16:01 ` Yang Shi
2023-03-29 0:12 ` Jiaqi Yan
0 siblings, 1 reply; 9+ messages in thread
From: Yang Shi @ 2023-03-28 16:01 UTC (permalink / raw)
To: Jiaqi Yan
Cc: kirill.shutemov, kirill, tongtiangen, tony.luck, naoya.horiguchi,
linmiaohe, linux-mm, akpm, osalvador, wangkefeng.wang, stevensd,
hughd
On Mon, Mar 27, 2023 at 2:16 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
>
> Make collapse_file roll back when copying pages failed. More concretely:
> - extract copying operations into a separate loop
> - postpone the updates for nr_none until both scanning and copying
> succeeded
> - postpone joining small xarray entries until both scanning and copying
> succeeded
> - postpone the update operations to NR_XXX_THPS until both scanning and
> copying succeeded
> - for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
> copying failed
>
> Tested manually:
> 0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
> 1. Start a two-thread application. Each thread allocates a chunk of
> non-huge memory buffer from /mnt/ramdisk.
> 2. Pick 4 random buffer address (2 in each thread) and inject
> uncorrectable memory errors at physical addresses.
> 3. Signal both threads to make their memory buffer collapsible, i.e.
> calling madvise(MADV_HUGEPAGE).
> 4. Wait and then check kernel log: khugepaged is able to recover from
> poisoned pages by skipping them.
> 5. Signal both threads to inspect their buffer contents and make sure no
> data corruption.
>
> Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
A nit below:
> ---
> mm/khugepaged.c | 86 +++++++++++++++++++++++++++++++------------------
> 1 file changed, 54 insertions(+), 32 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index bef68286345c8..38c1655ce0a9e 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1874,6 +1874,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> {
> struct address_space *mapping = file->f_mapping;
> struct page *hpage;
> + struct page *page;
> + struct page *tmp;
> + struct folio *folio;
> pgoff_t index = 0, end = start + HPAGE_PMD_NR;
> LIST_HEAD(pagelist);
> XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
> @@ -1918,8 +1921,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
>
> xas_set(&xas, start);
> for (index = start; index < end; index++) {
> - struct page *page = xas_next(&xas);
> - struct folio *folio;
> + page = xas_next(&xas);
>
> VM_BUG_ON(index != xas.xa_index);
> if (is_shmem) {
> @@ -2099,12 +2101,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> put_page(page);
> goto xa_unlocked;
> }
> - nr = thp_nr_pages(hpage);
>
> - if (is_shmem)
> - __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
> - else {
> - __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
> + if (!is_shmem) {
> filemap_nr_thps_inc(mapping);
> /*
> * Paired with smp_mb() in do_dentry_open() to ensure
> @@ -2115,21 +2113,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> smp_mb();
> if (inode_is_open_for_write(mapping->host)) {
> result = SCAN_FAIL;
> - __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
> filemap_nr_thps_dec(mapping);
> - goto xa_locked;
> }
> }
> -
> - if (nr_none) {
> - __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
> - /* nr_none is always 0 for non-shmem. */
> - __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
> - }
> -
> - /* Join all the small entries into a single multi-index entry */
> - xas_set_order(&xas, start, HPAGE_PMD_ORDER);
> - xas_store(&xas, hpage);
> xa_locked:
> xas_unlock_irq(&xas);
> xa_unlocked:
> @@ -2142,21 +2128,36 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> try_to_unmap_flush();
>
> if (result == SCAN_SUCCEED) {
> - struct page *page, *tmp;
> - struct folio *folio;
> -
> /*
> * Replacing old pages with new one has succeeded, now we
> - * need to copy the content and free the old pages.
> + * attempt to copy the contents.
> */
> index = start;
> - list_for_each_entry_safe(page, tmp, &pagelist, lru) {
> + list_for_each_entry(page, &pagelist, lru) {
> while (index < page->index) {
> clear_highpage(hpage + (index % HPAGE_PMD_NR));
> index++;
> }
> - copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
> - page);
> + if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
> + page) > 0) {
> + result = SCAN_COPY_MC;
> + break;
> + }
> + index++;
> + }
> + while (result == SCAN_SUCCEED && index < end) {
> + clear_highpage(hpage + (index % HPAGE_PMD_NR));
> + index++;
> + }
> + }
> +
> + nr = thp_nr_pages(hpage);
> + if (result == SCAN_SUCCEED) {
> + /*
> + * Copying old pages to huge one has succeeded, now we
> + * need to free the old pages.
> + */
> + list_for_each_entry_safe(page, tmp, &pagelist, lru) {
> list_del(&page->lru);
> page->mapping = NULL;
> page_ref_unfreeze(page, 1);
> @@ -2164,12 +2165,23 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> ClearPageUnevictable(page);
> unlock_page(page);
> put_page(page);
> - index++;
> }
> - while (index < end) {
> - clear_highpage(hpage + (index % HPAGE_PMD_NR));
> - index++;
> +
> + xas_lock_irq(&xas);
> + if (is_shmem)
> + __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
> + else
> + __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
> +
> + if (nr_none) {
> + __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
> + /* nr_none is always 0 for non-shmem. */
> + __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
> }
> + /* Join all the small entries into a single multi-index entry. */
> + xas_set_order(&xas, start, HPAGE_PMD_ORDER);
> + xas_store(&xas, hpage);
> + xas_unlock_irq(&xas);
>
> folio = page_folio(hpage);
> folio_mark_uptodate(folio);
> @@ -2187,8 +2199,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> unlock_page(hpage);
> hpage = NULL;
> } else {
> - struct page *page;
> -
> /* Something went wrong: roll back page cache changes */
> xas_lock_irq(&xas);
> if (nr_none) {
> @@ -2222,6 +2232,18 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> xas_lock_irq(&xas);
> }
> VM_BUG_ON(nr_none);
> + /*
> + * Undo the updates of filemap_nr_thps_inc for non-SHMEM
> + * file only. This undo is not needed unless failure is
> + * due to SCAN_COPY_MC.
> + *
> + * Paired with smp_mb() in do_dentry_open() to ensure the
> + * update to nr_thps is visible.
> + */
> + smp_mb();
> + if (!is_shmem && result == SCAN_COPY_MC)
> + filemap_nr_thps_dec(mapping);
I think the memory barrier should be after the dec.
> +
> xas_unlock_irq(&xas);
>
> hpage->mapping = NULL;
> --
> 2.40.0.348.gf938b09366-goog
>
^ permalink raw reply [flat|nested] 9+ messages in thread* Re: [PATCH v11 3/3] mm/khugepaged: recover from poisoned file-backed memory
2023-03-28 16:01 ` Yang Shi
@ 2023-03-29 0:12 ` Jiaqi Yan
0 siblings, 0 replies; 9+ messages in thread
From: Jiaqi Yan @ 2023-03-29 0:12 UTC (permalink / raw)
To: Yang Shi
Cc: kirill.shutemov, kirill, tongtiangen, tony.luck, naoya.horiguchi,
linmiaohe, linux-mm, akpm, osalvador, wangkefeng.wang, stevensd,
hughd
On Tue, Mar 28, 2023 at 9:02 AM Yang Shi <shy828301@gmail.com> wrote:
>
> On Mon, Mar 27, 2023 at 2:16 PM Jiaqi Yan <jiaqiyan@google.com> wrote:
> >
> > Make collapse_file roll back when copying pages failed. More concretely:
> > - extract copying operations into a separate loop
> > - postpone the updates for nr_none until both scanning and copying
> > succeeded
> > - postpone joining small xarray entries until both scanning and copying
> > succeeded
> > - postpone the update operations to NR_XXX_THPS until both scanning and
> > copying succeeded
> > - for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but
> > copying failed
> >
> > Tested manually:
> > 0. Enable khugepaged on system under test. Mount tmpfs at /mnt/ramdisk.
> > 1. Start a two-thread application. Each thread allocates a chunk of
> > non-huge memory buffer from /mnt/ramdisk.
> > 2. Pick 4 random buffer address (2 in each thread) and inject
> > uncorrectable memory errors at physical addresses.
> > 3. Signal both threads to make their memory buffer collapsible, i.e.
> > calling madvise(MADV_HUGEPAGE).
> > 4. Wait and then check kernel log: khugepaged is able to recover from
> > poisoned pages by skipping them.
> > 5. Signal both threads to inspect their buffer contents and make sure no
> > data corruption.
> >
> > Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
>
> Reviewed-by: Yang Shi <shy828301@gmail.com>
>
> A nit below:
>
> > ---
> > mm/khugepaged.c | 86 +++++++++++++++++++++++++++++++------------------
> > 1 file changed, 54 insertions(+), 32 deletions(-)
> >
> > diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> > index bef68286345c8..38c1655ce0a9e 100644
> > --- a/mm/khugepaged.c
> > +++ b/mm/khugepaged.c
> > @@ -1874,6 +1874,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > {
> > struct address_space *mapping = file->f_mapping;
> > struct page *hpage;
> > + struct page *page;
> > + struct page *tmp;
> > + struct folio *folio;
> > pgoff_t index = 0, end = start + HPAGE_PMD_NR;
> > LIST_HEAD(pagelist);
> > XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
> > @@ -1918,8 +1921,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> >
> > xas_set(&xas, start);
> > for (index = start; index < end; index++) {
> > - struct page *page = xas_next(&xas);
> > - struct folio *folio;
> > + page = xas_next(&xas);
> >
> > VM_BUG_ON(index != xas.xa_index);
> > if (is_shmem) {
> > @@ -2099,12 +2101,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > put_page(page);
> > goto xa_unlocked;
> > }
> > - nr = thp_nr_pages(hpage);
> >
> > - if (is_shmem)
> > - __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
> > - else {
> > - __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
> > + if (!is_shmem) {
> > filemap_nr_thps_inc(mapping);
> > /*
> > * Paired with smp_mb() in do_dentry_open() to ensure
> > @@ -2115,21 +2113,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > smp_mb();
> > if (inode_is_open_for_write(mapping->host)) {
> > result = SCAN_FAIL;
> > - __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
> > filemap_nr_thps_dec(mapping);
> > - goto xa_locked;
> > }
> > }
> > -
> > - if (nr_none) {
> > - __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
> > - /* nr_none is always 0 for non-shmem. */
> > - __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
> > - }
> > -
> > - /* Join all the small entries into a single multi-index entry */
> > - xas_set_order(&xas, start, HPAGE_PMD_ORDER);
> > - xas_store(&xas, hpage);
> > xa_locked:
> > xas_unlock_irq(&xas);
> > xa_unlocked:
> > @@ -2142,21 +2128,36 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > try_to_unmap_flush();
> >
> > if (result == SCAN_SUCCEED) {
> > - struct page *page, *tmp;
> > - struct folio *folio;
> > -
> > /*
> > * Replacing old pages with new one has succeeded, now we
> > - * need to copy the content and free the old pages.
> > + * attempt to copy the contents.
> > */
> > index = start;
> > - list_for_each_entry_safe(page, tmp, &pagelist, lru) {
> > + list_for_each_entry(page, &pagelist, lru) {
> > while (index < page->index) {
> > clear_highpage(hpage + (index % HPAGE_PMD_NR));
> > index++;
> > }
> > - copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
> > - page);
> > + if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
> > + page) > 0) {
> > + result = SCAN_COPY_MC;
> > + break;
> > + }
> > + index++;
> > + }
> > + while (result == SCAN_SUCCEED && index < end) {
> > + clear_highpage(hpage + (index % HPAGE_PMD_NR));
> > + index++;
> > + }
> > + }
> > +
> > + nr = thp_nr_pages(hpage);
> > + if (result == SCAN_SUCCEED) {
> > + /*
> > + * Copying old pages to huge one has succeeded, now we
> > + * need to free the old pages.
> > + */
> > + list_for_each_entry_safe(page, tmp, &pagelist, lru) {
> > list_del(&page->lru);
> > page->mapping = NULL;
> > page_ref_unfreeze(page, 1);
> > @@ -2164,12 +2165,23 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > ClearPageUnevictable(page);
> > unlock_page(page);
> > put_page(page);
> > - index++;
> > }
> > - while (index < end) {
> > - clear_highpage(hpage + (index % HPAGE_PMD_NR));
> > - index++;
> > +
> > + xas_lock_irq(&xas);
> > + if (is_shmem)
> > + __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
> > + else
> > + __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
> > +
> > + if (nr_none) {
> > + __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
> > + /* nr_none is always 0 for non-shmem. */
> > + __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
> > }
> > + /* Join all the small entries into a single multi-index entry. */
> > + xas_set_order(&xas, start, HPAGE_PMD_ORDER);
> > + xas_store(&xas, hpage);
> > + xas_unlock_irq(&xas);
> >
> > folio = page_folio(hpage);
> > folio_mark_uptodate(folio);
> > @@ -2187,8 +2199,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > unlock_page(hpage);
> > hpage = NULL;
> > } else {
> > - struct page *page;
> > -
> > /* Something went wrong: roll back page cache changes */
> > xas_lock_irq(&xas);
> > if (nr_none) {
> > @@ -2222,6 +2232,18 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
> > xas_lock_irq(&xas);
> > }
> > VM_BUG_ON(nr_none);
> > + /*
> > + * Undo the updates of filemap_nr_thps_inc for non-SHMEM
> > + * file only. This undo is not needed unless failure is
> > + * due to SCAN_COPY_MC.
> > + *
> > + * Paired with smp_mb() in do_dentry_open() to ensure the
> > + * update to nr_thps is visible.
> > + */
> > + smp_mb();
> > + if (!is_shmem && result == SCAN_COPY_MC)
> > + filemap_nr_thps_dec(mapping);
>
> I think the memory barrier should be after the dec.
Ah, will move into the if block and put after filemap_nr_thps_dec.
>
> > +
> > xas_unlock_irq(&xas);
> >
> > hpage->mapping = NULL;
> > --
> > 2.40.0.348.gf938b09366-goog
> >
^ permalink raw reply [flat|nested] 9+ messages in thread