* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
[not found] ` <6447AB19-CC4D-40C2-94F5-C39DE132E1D6@nvidia.com>
@ 2024-08-08 15:12 ` Zi Yan
0 siblings, 0 replies; 8+ messages in thread
From: Zi Yan @ 2024-08-08 15:12 UTC (permalink / raw)
To: Kefeng Wang, Andrew Morton
Cc: David Hildenbrand, Baolin Wang, linux-mm, Huang, Ying,
linux-kernel, stable
[-- Attachment #1.1: Type: text/plain, Size: 3641 bytes --]
On 8 Aug 2024, at 10:57, Zi Yan wrote:
> On 8 Aug 2024, at 10:36, Kefeng Wang wrote:
>
>> On 2024/8/8 22:21, Zi Yan wrote:
>>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>>
>>>> On 08.08.24 16:13, Zi Yan wrote:
>>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>>
>>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>>
>>>>>>>
>> ...
>>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>>
>>>>>> Then, just copy the 3 LOC.
>>>>>>
>>>>>> For mm/memory.c that would be:
>>>>>>
>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>>> --- a/mm/memory.c
>>>>>> +++ b/mm/memory.c
>>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> }
>>>>>> pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>>> vmf->address, &vmf->ptl);
>>>>>> if (unlikely(!vmf->pte))
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> }
>>>>>> goto out_map;
>>>>>> }
>>>>>> -out:
>>>>>> if (nid != NUMA_NO_NODE)
>>>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>>> return 0;
>>
>> Maybe drop this part too,
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 410ba50ca746..07343c1469e0 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>> nid = target_nid;
>> flags |= TNF_MIGRATED;
>> + goto out;
>> } else {
>> flags |= TNF_MIGRATE_FAIL;
>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>> return 0;
>> }
>> - goto out_map;
>> }
>>
>> - if (nid != NUMA_NO_NODE)
>> - task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> - return 0;
>> out_map:
>> /*
>> * Make it present again, depending on how arch implements
>> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
>> writable);
>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>> +out:
>> if (nid != NUMA_NO_NODE)
>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> return 0;
>
> Even better. Thanks. The updated fixup is attached.
Update the fixup, if Andrew wants to fold it in instead of a resend, again to fix a typo causing compilation failure.
Best Regards,
Yan, Zi
[-- Attachment #1.2: 0001-fixup-mm-numa-no-task_numa_fault-call-if-page-table-.patch --]
[-- Type: text/plain, Size: 3359 bytes --]
From b42f0e90ed0b4117139cf66de2d6f83e3d8bcf8d Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 Aug 2024 10:18:42 -0400
Subject: [PATCH] fixup! mm/numa: no task_numa_fault() call if page table is
changed
---
mm/huge_memory.c | 18 +++++++-----------
mm/memory.c | 19 ++++++++-----------
2 files changed, 15 insertions(+), 22 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a3c018f2b554..4e4364a17e6d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1681,7 +1681,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
spin_unlock(vmf->ptl);
- goto out;
+ return 0;
}
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
@@ -1724,23 +1724,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
flags |= TNF_MIGRATED;
nid = target_nid;
+ goto out;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
spin_unlock(vmf->ptl);
- goto out;
+ return 0;
}
- goto out_map;
}
-count_fault:
- if (nid != NUMA_NO_NODE)
- task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
-
-out:
- return 0;
-
out_map:
/* Restore the PMD */
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
@@ -1750,7 +1743,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
- goto count_fault;
+out:
+ if (nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index 503d493263df..d9b1dff9dc57 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
+ return 0;
}
pte = pte_modify(old_pte, vma->vm_page_prot);
@@ -5523,24 +5523,18 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
+ goto out;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte))
- goto out;
+ return 0;
if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto out;
+ return 0;
}
- goto out_map;
}
-
-count_fault:
- if (nid != NUMA_NO_NODE)
- task_numa_fault(last_cpupid, nid, nr_pages, flags);
-out:
- return 0;
out_map:
/*
* Make it present again, depending on how arch implements
@@ -5553,7 +5547,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- goto count_fault;
+out:
+ if (nid != NUMA_NO_NODE)
+ task_numa_fault(last_cpupid, nid, nr_pages, flags);
+ return 0;
}
static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
--
2.43.0
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 2/2] mm/migrate: move common code to numa_migrate_check (was numa_migrate_prep)
[not found] ` <4CC95A65-A183-4D7B-A52E-5AAFB56625E1@nvidia.com>
@ 2024-08-08 23:13 ` Zi Yan
0 siblings, 0 replies; 8+ messages in thread
From: Zi Yan @ 2024-08-08 23:13 UTC (permalink / raw)
To: Huang, Ying
Cc: linux-mm, Andrew Morton, David Hildenbrand, Baolin Wang,
Kefeng Wang, linux-kernel
[-- Attachment #1.1: Type: text/plain, Size: 6451 bytes --]
On 8 Aug 2024, at 10:59, Zi Yan wrote:
> On 8 Aug 2024, at 0:14, Huang, Ying wrote:
>
>> Zi Yan <ziy@nvidia.com> writes:
>>
>>> do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
>>> reduce redundancy, move common code to numa_migrate_prep() and rename
>>> the function to numa_migrate_check() to reflect its functionality.
>>>
>>> Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
>>> flag.
>>>
>>> Suggested-by: David Hildenbrand <david@redhat.com>
>>> Signed-off-by: Zi Yan <ziy@nvidia.com>
>>> ---
>>> mm/huge_memory.c | 14 ++-------
>>> mm/internal.h | 5 ++--
>>> mm/memory.c | 76 ++++++++++++++++++++++++------------------------
>>> 3 files changed, 44 insertions(+), 51 deletions(-)
>>>
>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>> index a3c018f2b554..9b312cae6775 100644
>>> --- a/mm/huge_memory.c
>>> +++ b/mm/huge_memory.c
>>> @@ -1699,18 +1699,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>>> if (!folio)
>>> goto out_map;
>>>
>>> - /* See similar comment in do_numa_page for explanation */
>>> - if (!writable)
>>> - flags |= TNF_NO_GROUP;
>>> -
>>> nid = folio_nid(folio);
>>
>> NITPICK: It appears that we can remove "nid" local variable.
>
> I thought about it, but
>
> 1. if folio is NULL from vm_normal_folio_pmd(), nid remains NUMA_NO_NODE,
> 2. if migration succeeds, nid is changed to target_nid,
> 3. if migration fails, nid is the folio node id,
>
> all three are consumed by if (nid != NUMA_NO_NODE) before task_numa_fault().
>
> I will give it a try in next version and see if I can remove it.
>
>>
>>> - /*
>>> - * For memory tiering mode, cpupid of slow memory page is used
>>> - * to record page access time. So use default value.
>>> - */
>>> - if (!folio_use_access_time(folio))
>>> - last_cpupid = folio_last_cpupid(folio);
>>> - target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
>>> +
>>> + target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
>>> + &last_cpupid);
>>> if (target_nid == NUMA_NO_NODE)
>>> goto out_map;
>>> if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
>>> diff --git a/mm/internal.h b/mm/internal.h
>>> index 52f7fc4e8ac3..fb16e18c9761 100644
>>> --- a/mm/internal.h
>>> +++ b/mm/internal.h
>>> @@ -1191,8 +1191,9 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
>>>
>>> void __vunmap_range_noflush(unsigned long start, unsigned long end);
>>>
>>> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
>>> - unsigned long addr, int page_nid, int *flags);
>>> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
>>> + unsigned long addr, int *flags, bool writable,
>>> + int *last_cpupid);
>>>
>>> void free_zone_device_folio(struct folio *folio);
>>> int migrate_device_coherent_page(struct page *page);
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 503d493263df..b093df652c11 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -5368,16 +5368,43 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
>>> return ret;
>>> }
>>>
>>> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
>>> - unsigned long addr, int page_nid, int *flags)
>>> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
>>> + unsigned long addr, int *flags,
>>> + bool writable, int *last_cpupid)
>>> {
>>> struct vm_area_struct *vma = vmf->vma;
>>>
>>> + /*
>>> + * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
>>> + * much anyway since they can be in shared cache state. This misses
>>> + * the case where a mapping is writable but the process never writes
>>> + * to it but pte_write gets cleared during protection updates and
>>> + * pte_dirty has unpredictable behaviour between PTE scan updates,
>>> + * background writeback, dirty balancing and application behaviour.
>>> + */
>>> + if (!writable)
>>> + *flags |= TNF_NO_GROUP;
>>> +
>>> + /*
>>> + * Flag if the folio is shared between multiple address spaces. This
>>> + * is later used when determining whether to group tasks together
>>> + */
>>> + if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
>>> + *flags |= TNF_SHARED;
>>> + /*
>>> + * For memory tiering mode, cpupid of slow memory page is used
>>> + * to record page access time. So use default value.
>>> + */
>>> + if (folio_use_access_time(folio))
>>> + *last_cpupid = (-1 & LAST_CPUPID_MASK);
>>> + else
>>> + *last_cpupid = folio_last_cpupid(folio);
>>> +
>>> /* Record the current PID acceesing VMA */
>>> vma_set_access_pid_bit(vma);
>>>
>>> count_vm_numa_event(NUMA_HINT_FAULTS);
>>> - if (page_nid == numa_node_id()) {
>>> + if (folio_nid(folio) == numa_node_id()) {
>>> count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
>>> *flags |= TNF_FAULT_LOCAL;
>>> }
>>> @@ -5442,13 +5469,13 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
>>> static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>> {
>>> struct vm_area_struct *vma = vmf->vma;
>>> + pte_t old_pte = vmf->orig_pte;
>>
>> The usage of old_pte is different from other use cases. Where,
>>
>> old_pte = *pte;
>> check old_pte and orig_pte
>> generate new_pte from old_pte
>> set new_pte
>>
>> We have used this before in do_numa_page(), but not do that now. But I
>> still think that it's better to follow the convention partly if
>> possible. This makes code easier to be read. I notices that we don't
>> follow it in do_huge_pmd_numa_page(), we may change that?
>
> Sure, since I am trying to make do_numa_page() and do_huge_pmd_numa_page()
> use the same pattern.
>
>>
>>> + pte_t pte;
>>> struct folio *folio = NULL;
>>> int nid = NUMA_NO_NODE;
>>> bool writable = false, ignore_writable = false;
>>> bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
>>> - int last_cpupid;
>>> - int target_nid;
>>> - pte_t pte, old_pte;
>>> + int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
>>
>> Because we will initialize last_cpupid in numa_migrate_check(), we don't
>> need to initialize it here?
>
> Will remove it.
>
> Thanks for the review.
I attached my fixup and updated patch 2.
Let me know how it looks.
--
Best Regards,
Yan, Zi
[-- Attachment #1.2: 0001-mm-migrate-move-common-code-to-numa_migrate_check-wa.patch --]
[-- Type: text/plain, Size: 7341 bytes --]
From 9d2f9fc7a3e7d21af820c91aa239b6a2511d7840 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 Aug 2024 11:17:03 -0400
Subject: [PATCH] mm/migrate: move common code to numa_migrate_check (was
numa_migrate_prep)
do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
reduce redundancy, move common code to numa_migrate_prep() and rename
the function to numa_migrate_check() to reflect its functionality.
Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
flag.
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/huge_memory.c | 29 +++++++++-------------
mm/internal.h | 5 ++--
mm/memory.c | 63 +++++++++++++++++++++++++-----------------------
3 files changed, 47 insertions(+), 50 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4364a17e6d..96a52e71d167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1669,22 +1669,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pmd_t oldpmd = vmf->orig_pmd;
- pmd_t pmd;
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int nid = NUMA_NO_NODE;
- int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+ int target_nid, last_cpupid;
+ pmd_t pmd, old_pmd;
bool writable = false;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ old_pmd = pmdp_get(vmf->pmd);
+
+ if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(old_pmd, vma->vm_page_prot);
/*
* Detect now whether the PMD could be writable; this information
@@ -1699,18 +1700,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!folio)
goto out_map;
- /* See similar comment in do_numa_page for explanation */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
nid = folio_nid(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if (!folio_use_access_time(folio))
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+ &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -1728,7 +1721,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
@@ -1736,7 +1729,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
out_map:
/* Restore the PMD */
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (writable)
pmd = pmd_mkwrite(pmd, vma);
diff --git a/mm/internal.h b/mm/internal.h
index 52f7fc4e8ac3..fb16e18c9761 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1191,8 +1191,9 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags);
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags, bool writable,
+ int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
int migrate_device_coherent_page(struct page *page);
diff --git a/mm/memory.c b/mm/memory.c
index d9b1dff9dc57..3441f60d54ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5368,16 +5368,43 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
- unsigned long addr, int page_nid, int *flags)
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr, int *flags,
+ bool writable, int *last_cpupid)
{
struct vm_area_struct *vma = vmf->vma;
+ /*
+ * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+ * much anyway since they can be in shared cache state. This misses
+ * the case where a mapping is writable but the process never writes
+ * to it but pte_write gets cleared during protection updates and
+ * pte_dirty has unpredictable behaviour between PTE scan updates,
+ * background writeback, dirty balancing and application behaviour.
+ */
+ if (!writable)
+ *flags |= TNF_NO_GROUP;
+
+ /*
+ * Flag if the folio is shared between multiple address spaces. This
+ * is later used when determining whether to group tasks together
+ */
+ if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+ *flags |= TNF_SHARED;
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if (folio_use_access_time(folio))
+ *last_cpupid = (-1 & LAST_CPUPID_MASK);
+ else
+ *last_cpupid = folio_last_cpupid(folio);
+
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
count_vm_numa_event(NUMA_HINT_FAULTS);
- if (page_nid == numa_node_id()) {
+ if (folio_nid(folio) == numa_node_id()) {
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
*flags |= TNF_FAULT_LOCAL;
}
@@ -5479,35 +5506,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!folio || folio_is_zone_device(folio))
goto out_map;
- /*
- * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
- * much anyway since they can be in shared cache state. This misses
- * the case where a mapping is writable but the process never writes
- * to it but pte_write gets cleared during protection updates and
- * pte_dirty has unpredictable behaviour between PTE scan updates,
- * background writeback, dirty balancing and application behaviour.
- */
- if (!writable)
- flags |= TNF_NO_GROUP;
-
- /*
- * Flag if the folio is shared between multiple address spaces. This
- * is later used when determining whether to group tasks together
- */
- if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
- flags |= TNF_SHARED;
-
nid = folio_nid(folio);
nr_pages = folio_nr_pages(folio);
- /*
- * For memory tiering mode, cpupid of slow memory page is used
- * to record page access time. So use default value.
- */
- if (folio_use_access_time(folio))
- last_cpupid = (-1 & LAST_CPUPID_MASK);
- else
- last_cpupid = folio_last_cpupid(folio);
- target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+
+ target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
+ writable, &last_cpupid);
if (target_nid == NUMA_NO_NODE)
goto out_map;
if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
--
2.43.0
[-- Attachment #1.3: patch2_fixup.patch --]
[-- Type: text/plain, Size: 3198 bytes --]
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5610289865a7..96a52e71d167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1669,22 +1669,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pmd_t oldpmd = vmf->orig_pmd;
- pmd_t pmd;
struct folio *folio;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int nid = NUMA_NO_NODE;
- int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+ int target_nid, last_cpupid;
+ pmd_t pmd, old_pmd;
bool writable = false;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ old_pmd = pmdp_get(vmf->pmd);
+
+ if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(old_pmd, vma->vm_page_prot);
/*
* Detect now whether the PMD could be writable; this information
@@ -1720,7 +1721,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
- if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+ if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
spin_unlock(vmf->ptl);
return 0;
}
@@ -1728,7 +1729,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
out_map:
/* Restore the PMD */
- pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+ pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (writable)
pmd = pmd_mkwrite(pmd, vma);
diff --git a/mm/memory.c b/mm/memory.c
index f186a8d8c992..3441f60d54ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5469,13 +5469,13 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
static vm_fault_t do_numa_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pte_t old_pte = vmf->orig_pte;
- pte_t pte;
struct folio *folio = NULL;
int nid = NUMA_NO_NODE;
bool writable = false, ignore_writable = false;
bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
- int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+ int last_cpupid;
+ int target_nid;
+ pte_t pte, old_pte;
int flags = 0, nr_pages;
/*
@@ -5483,7 +5483,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
* table lock, that its contents have not changed during fault handling.
*/
spin_lock(vmf->ptl);
- if (unlikely(!pte_same(old_pte, ptep_get(vmf->pte)))) {
+ /* Read the live PTE from the page tables: */
+ old_pte = ptep_get(vmf->pte);
+
+ if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
@@ -5530,7 +5533,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte))
return 0;
- if (unlikely(!pte_same(old_pte, ptep_get(vmf->pte)))) {
+ if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
[not found] ` <b0b94a65-51f1-459e-879f-696baba85399@huawei.com>
[not found] ` <6447AB19-CC4D-40C2-94F5-C39DE132E1D6@nvidia.com>
@ 2024-08-09 1:25 ` Huang, Ying
2024-08-09 2:05 ` Zi Yan
1 sibling, 1 reply; 8+ messages in thread
From: Huang, Ying @ 2024-08-09 1:25 UTC (permalink / raw)
To: Kefeng Wang
Cc: Zi Yan, David Hildenbrand, Andrew Morton, Baolin Wang, linux-mm,
linux-kernel, stable
Kefeng Wang <wangkefeng.wang@huawei.com> writes:
> On 2024/8/8 22:21, Zi Yan wrote:
>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>
>>> On 08.08.24 16:13, Zi Yan wrote:
>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>
>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>
>>>>>>
> ...
>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>
>>>>> Then, just copy the 3 LOC.
>>>>>
>>>>> For mm/memory.c that would be:
>>>>>
>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>> --- a/mm/memory.c
>>>>> +++ b/mm/memory.c
>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>> - goto out;
>>>>> + return 0;
>>>>> }
>>>>> pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>> vmf->address, &vmf->ptl);
>>>>> if (unlikely(!vmf->pte))
>>>>> - goto out;
>>>>> + return 0;
>>>>> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>> - goto out;
>>>>> + return 0;
>>>>> }
>>>>> goto out_map;
>>>>> }
>>>>> -out:
>>>>> if (nid != NUMA_NO_NODE)
>>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>> return 0;
>
> Maybe drop this part too,
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 410ba50ca746..07343c1469e0 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
> nid = target_nid;
> flags |= TNF_MIGRATED;
> + goto out;
> } else {
> flags |= TNF_MIGRATE_FAIL;
> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> return 0;
> }
> - goto out_map;
> }
>
> - if (nid != NUMA_NO_NODE)
> - task_numa_fault(last_cpupid, nid, nr_pages, flags);
> - return 0;
> out_map:
> /*
> * Make it present again, depending on how arch implements
IMHO, migration success is normal path, while migration failure is error
processing path. If so, it's better to use "goto" for error processing
instead of normal path.
> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> numa_rebuild_single_mapping(vmf, vma, vmf->address,
> vmf->pte,
> writable);
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> +out:
> if (nid != NUMA_NO_NODE)
> task_numa_fault(last_cpupid, nid, nr_pages, flags);
> return 0;
>
>
--
Best Regards,
Huang, Ying
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
2024-08-09 1:25 ` Huang, Ying
@ 2024-08-09 2:05 ` Zi Yan
2024-08-09 7:52 ` Huang, Ying
0 siblings, 1 reply; 8+ messages in thread
From: Zi Yan @ 2024-08-09 2:05 UTC (permalink / raw)
To: Huang, Ying
Cc: Kefeng Wang, David Hildenbrand, Andrew Morton, Baolin Wang,
linux-mm, linux-kernel, stable
[-- Attachment #1: Type: text/plain, Size: 7041 bytes --]
On 8 Aug 2024, at 21:25, Huang, Ying wrote:
> Kefeng Wang <wangkefeng.wang@huawei.com> writes:
>
>> On 2024/8/8 22:21, Zi Yan wrote:
>>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>>
>>>> On 08.08.24 16:13, Zi Yan wrote:
>>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>>
>>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>>
>>>>>>>
>> ...
>>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>>
>>>>>> Then, just copy the 3 LOC.
>>>>>>
>>>>>> For mm/memory.c that would be:
>>>>>>
>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>>> --- a/mm/memory.c
>>>>>> +++ b/mm/memory.c
>>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> }
>>>>>> pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>>> vmf->address, &vmf->ptl);
>>>>>> if (unlikely(!vmf->pte))
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>> - goto out;
>>>>>> + return 0;
>>>>>> }
>>>>>> goto out_map;
>>>>>> }
>>>>>> -out:
>>>>>> if (nid != NUMA_NO_NODE)
>>>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>>> return 0;
>>
>> Maybe drop this part too,
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 410ba50ca746..07343c1469e0 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>> nid = target_nid;
>> flags |= TNF_MIGRATED;
>> + goto out;
>> } else {
>> flags |= TNF_MIGRATE_FAIL;
>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>> return 0;
>> }
>> - goto out_map;
>> }
>>
>> - if (nid != NUMA_NO_NODE)
>> - task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> - return 0;
>> out_map:
>> /*
>> * Make it present again, depending on how arch implements
>
> IMHO, migration success is normal path, while migration failure is error
> processing path. If so, it's better to use "goto" for error processing
> instead of normal path.
>
>> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> numa_rebuild_single_mapping(vmf, vma, vmf->address,
>> vmf->pte,
>> writable);
>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>> +out:
>> if (nid != NUMA_NO_NODE)
>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> return 0;
>>
>>
How about calling task_numa_fault and return in the migration successful path?
(target_nid cannot be NUMA_NO_NODE, so if is not needed)
diff --git a/mm/memory.c b/mm/memory.c
index 3441f60d54ef..abdb73a68b80 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
- goto out;
+ task_numa_fault(last_cpupid, nid, nr_pages, flags);
+ return 0;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
-out:
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
Or move the make-present code inside migration failed branch? This one
does not duplicate code but others can jump into this branch.
diff --git a/mm/memory.c b/mm/memory.c
index 3441f60d54ef..c9b4e7099815 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5526,7 +5526,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
- goto out;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5537,20 +5536,20 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl);
return 0;
}
- }
out_map:
- /*
- * Make it present again, depending on how arch implements
- * non-accessible ptes, some can allow access by kernel mode.
- */
- if (folio && folio_test_large(folio))
- numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
- pte_write_upgrade);
- else
- numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
- writable);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
-out:
+ /*
+ * Make it present again, depending on how arch implements
+ * non-accessible ptes, some can allow access by kernel mode.
+ */
+ if (folio && folio_test_large(folio))
+ numa_rebuild_large_mapping(vmf, vma, folio, pte,
+ ignore_writable, pte_write_upgrade);
+ else
+ numa_rebuild_single_mapping(vmf, vma, vmf->address,
+ vmf->pte, writable);
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ }
+
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
Of course, I will need to change mm/huge_memory.c as well.
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
2024-08-09 2:05 ` Zi Yan
@ 2024-08-09 7:52 ` Huang, Ying
2024-08-09 13:28 ` Zi Yan
0 siblings, 1 reply; 8+ messages in thread
From: Huang, Ying @ 2024-08-09 7:52 UTC (permalink / raw)
To: Zi Yan
Cc: Kefeng Wang, David Hildenbrand, Andrew Morton, Baolin Wang,
linux-mm, linux-kernel, stable
Zi Yan <ziy@nvidia.com> writes:
> On 8 Aug 2024, at 21:25, Huang, Ying wrote:
>
>> Kefeng Wang <wangkefeng.wang@huawei.com> writes:
>>
>>> On 2024/8/8 22:21, Zi Yan wrote:
>>>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>>>
>>>>> On 08.08.24 16:13, Zi Yan wrote:
>>>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>>>
>>>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>>>
>>>>>>>>
>>> ...
>>>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>>>
>>>>>>> Then, just copy the 3 LOC.
>>>>>>>
>>>>>>> For mm/memory.c that would be:
>>>>>>>
>>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>>>> --- a/mm/memory.c
>>>>>>> +++ b/mm/memory.c
>>>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>> - goto out;
>>>>>>> + return 0;
>>>>>>> }
>>>>>>> pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>>>> vmf->address, &vmf->ptl);
>>>>>>> if (unlikely(!vmf->pte))
>>>>>>> - goto out;
>>>>>>> + return 0;
>>>>>>> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>> - goto out;
>>>>>>> + return 0;
>>>>>>> }
>>>>>>> goto out_map;
>>>>>>> }
>>>>>>> -out:
>>>>>>> if (nid != NUMA_NO_NODE)
>>>>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>>>> return 0;
>>>
>>> Maybe drop this part too,
>>>
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 410ba50ca746..07343c1469e0 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>>> nid = target_nid;
>>> flags |= TNF_MIGRATED;
>>> + goto out;
>>> } else {
>>> flags |= TNF_MIGRATE_FAIL;
>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>> return 0;
>>> }
>>> - goto out_map;
>>> }
>>>
>>> - if (nid != NUMA_NO_NODE)
>>> - task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>> - return 0;
>>> out_map:
>>> /*
>>> * Make it present again, depending on how arch implements
>>
>> IMHO, migration success is normal path, while migration failure is error
>> processing path. If so, it's better to use "goto" for error processing
>> instead of normal path.
>>
>>> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>> numa_rebuild_single_mapping(vmf, vma, vmf->address,
>>> vmf->pte,
>>> writable);
>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>> +out:
>>> if (nid != NUMA_NO_NODE)
>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>> return 0;
>>>
>>>
>
> How about calling task_numa_fault and return in the migration successful path?
> (target_nid cannot be NUMA_NO_NODE, so if is not needed)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 3441f60d54ef..abdb73a68b80 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
> nid = target_nid;
> flags |= TNF_MIGRATED;
> - goto out;
> + task_numa_fault(last_cpupid, nid, nr_pages, flags);
> + return 0;
> } else {
> flags |= TNF_MIGRATE_FAIL;
> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> @@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
> writable);
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> -out:
> if (nid != NUMA_NO_NODE)
> task_numa_fault(last_cpupid, nid, nr_pages, flags);
> return 0;
>
This looks better for me, or change it further.
if (migrate_misplaced_folio(folio, vma, target_nid))
goto out_map_pt;
nid = target_nid;
flags |= TNF_MIGRATED;
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
out_map_pt:
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
...
>
>
> Or move the make-present code inside migration failed branch? This one
> does not duplicate code but others can jump into this branch.
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 3441f60d54ef..c9b4e7099815 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -5526,7 +5526,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
> nid = target_nid;
> flags |= TNF_MIGRATED;
> - goto out;
> } else {
> flags |= TNF_MIGRATE_FAIL;
> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> @@ -5537,20 +5536,20 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
> pte_unmap_unlock(vmf->pte, vmf->ptl);
> return 0;
> }
> - }
> out_map:
> - /*
> - * Make it present again, depending on how arch implements
> - * non-accessible ptes, some can allow access by kernel mode.
> - */
> - if (folio && folio_test_large(folio))
> - numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable,
> - pte_write_upgrade);
> - else
> - numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
> - writable);
> - pte_unmap_unlock(vmf->pte, vmf->ptl);
> -out:
> + /*
> + * Make it present again, depending on how arch implements
> + * non-accessible ptes, some can allow access by kernel mode.
> + */
> + if (folio && folio_test_large(folio))
> + numa_rebuild_large_mapping(vmf, vma, folio, pte,
> + ignore_writable, pte_write_upgrade);
> + else
> + numa_rebuild_single_mapping(vmf, vma, vmf->address,
> + vmf->pte, writable);
> + pte_unmap_unlock(vmf->pte, vmf->ptl);
> + }
> +
> if (nid != NUMA_NO_NODE)
> task_numa_fault(last_cpupid, nid, nr_pages, flags);
> return 0;
>
>
> Of course, I will need to change mm/huge_memory.c as well.
>
--
Best Regards,
Huang, Ying
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
2024-08-09 7:52 ` Huang, Ying
@ 2024-08-09 13:28 ` Zi Yan
2024-08-09 13:49 ` David Hildenbrand
0 siblings, 1 reply; 8+ messages in thread
From: Zi Yan @ 2024-08-09 13:28 UTC (permalink / raw)
To: Huang, Ying, Andrew Morton
Cc: Kefeng Wang, David Hildenbrand, Baolin Wang, linux-mm,
linux-kernel, stable
[-- Attachment #1: Type: text/plain, Size: 8052 bytes --]
On 9 Aug 2024, at 3:52, Huang, Ying wrote:
> Zi Yan <ziy@nvidia.com> writes:
>
>> On 8 Aug 2024, at 21:25, Huang, Ying wrote:
>>
>>> Kefeng Wang <wangkefeng.wang@huawei.com> writes:
>>>
>>>> On 2024/8/8 22:21, Zi Yan wrote:
>>>>> On 8 Aug 2024, at 10:14, David Hildenbrand wrote:
>>>>>
>>>>>> On 08.08.24 16:13, Zi Yan wrote:
>>>>>>> On 8 Aug 2024, at 4:22, David Hildenbrand wrote:
>>>>>>>
>>>>>>>> On 08.08.24 05:19, Baolin Wang wrote:
>>>>>>>>>
>>>>>>>>>
>>>> ...
>>>>>>>> Agreed, maybe we should simply handle that right away and replace the "goto out;" users by "return 0;".
>>>>>>>>
>>>>>>>> Then, just copy the 3 LOC.
>>>>>>>>
>>>>>>>> For mm/memory.c that would be:
>>>>>>>>
>>>>>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>>>>>> index 67496dc5064f..410ba50ca746 100644
>>>>>>>> --- a/mm/memory.c
>>>>>>>> +++ b/mm/memory.c
>>>>>>>> @@ -5461,7 +5461,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>>> if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
>>>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>>> - goto out;
>>>>>>>> + return 0;
>>>>>>>> }
>>>>>>>> pte = pte_modify(old_pte, vma->vm_page_prot);
>>>>>>>> @@ -5528,15 +5528,14 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>>>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>>>>>> vmf->address, &vmf->ptl);
>>>>>>>> if (unlikely(!vmf->pte))
>>>>>>>> - goto out;
>>>>>>>> + return 0;
>>>>>>>> if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
>>>>>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>>>>>> - goto out;
>>>>>>>> + return 0;
>>>>>>>> }
>>>>>>>> goto out_map;
>>>>>>>> }
>>>>>>>> -out:
>>>>>>>> if (nid != NUMA_NO_NODE)
>>>>>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>>>>>> return 0;
>>>>
>>>> Maybe drop this part too,
>>>>
>>>> diff --git a/mm/memory.c b/mm/memory.c
>>>> index 410ba50ca746..07343c1469e0 100644
>>>> --- a/mm/memory.c
>>>> +++ b/mm/memory.c
>>>> @@ -5523,6 +5523,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>>>> nid = target_nid;
>>>> flags |= TNF_MIGRATED;
>>>> + goto out;
>>>> } else {
>>>> flags |= TNF_MIGRATE_FAIL;
>>>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>>>> @@ -5533,12 +5534,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>> return 0;
>>>> }
>>>> - goto out_map;
>>>> }
>>>>
>>>> - if (nid != NUMA_NO_NODE)
>>>> - task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>> - return 0;
>>>> out_map:
>>>> /*
>>>> * Make it present again, depending on how arch implements
>>>
>>> IMHO, migration success is normal path, while migration failure is error
>>> processing path. If so, it's better to use "goto" for error processing
>>> instead of normal path.
>>>
>>>> @@ -5551,6 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>> numa_rebuild_single_mapping(vmf, vma, vmf->address,
>>>> vmf->pte,
>>>> writable);
>>>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>>>> +out:
>>>> if (nid != NUMA_NO_NODE)
>>>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>>>> return 0;
>>>>
>>>>
>>
>> How about calling task_numa_fault and return in the migration successful path?
>> (target_nid cannot be NUMA_NO_NODE, so if is not needed)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index 3441f60d54ef..abdb73a68b80 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -5526,7 +5526,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> if (!migrate_misplaced_folio(folio, vma, target_nid)) {
>> nid = target_nid;
>> flags |= TNF_MIGRATED;
>> - goto out;
>> + task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> + return 0;
>> } else {
>> flags |= TNF_MIGRATE_FAIL;
>> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
>> @@ -5550,7 +5551,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>> numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
>> writable);
>> pte_unmap_unlock(vmf->pte, vmf->ptl);
>> -out:
>> if (nid != NUMA_NO_NODE)
>> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>> return 0;
>>
>
> This looks better for me, or change it further.
Thanks. Will put a fixup below.
>
> if (migrate_misplaced_folio(folio, vma, target_nid))
> goto out_map_pt;
>
> nid = target_nid;
> flags |= TNF_MIGRATED;
> task_numa_fault(last_cpupid, nid, nr_pages, flags);
>
> return 0;
>
> out_map_pt:
> flags |= TNF_MIGRATE_FAIL;
> vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
> ...
>
I will send a separate patch for this refactoring, since this patch
is meant for fixing commit b99a342d4f11 ("NUMA balancing: reduce TLB flush via delaying mapping on hint page fault”).
Hi Andrew,
Can you fold the fixup below to this patch? Thanks.
From 645fa83b2eed14494755ed67e5c52a55656287ac Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@nvidia.com>
Date: Thu, 8 Aug 2024 22:06:41 -0400
Subject: [PATCH] fixup! fixup! mm/numa: no task_numa_fault() call if page
table is changed
Based on suggestion from Ying.
Link: https://lore.kernel.org/linux-mm/87cymizdvc.fsf@yhuang6-desk2.ccr.corp.intel.com/
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
mm/huge_memory.c | 5 +++--
mm/memory.c | 5 +++--
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4364a17e6d..0e79ccaaf5e4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1724,7 +1724,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
flags |= TNF_MIGRATED;
nid = target_nid;
- goto out;
+ task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
+ return 0;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1743,7 +1744,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
-out:
+
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
return 0;
diff --git a/mm/memory.c b/mm/memory.c
index d9b1dff9dc57..6c3aadf3e5ad 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5523,7 +5523,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
- goto out;
+ task_numa_fault(last_cpupid, nid, nr_pages, flags);
+ return 0;
} else {
flags |= TNF_MIGRATE_FAIL;
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
@@ -5547,7 +5548,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte,
writable);
pte_unmap_unlock(vmf->pte, vmf->ptl);
-out:
+
if (nid != NUMA_NO_NODE)
task_numa_fault(last_cpupid, nid, nr_pages, flags);
return 0;
--
2.43.0
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
2024-08-09 13:28 ` Zi Yan
@ 2024-08-09 13:49 ` David Hildenbrand
2024-08-09 14:17 ` Zi Yan
0 siblings, 1 reply; 8+ messages in thread
From: David Hildenbrand @ 2024-08-09 13:49 UTC (permalink / raw)
To: Zi Yan, Huang, Ying, Andrew Morton
Cc: Kefeng Wang, Baolin Wang, linux-mm, linux-kernel, stable
> Hi Andrew,
>
> Can you fold the fixup below to this patch? Thanks.
It might be reasonable to send out a new version. At least I am confused
now what the latest state is :D
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed
2024-08-09 13:49 ` David Hildenbrand
@ 2024-08-09 14:17 ` Zi Yan
0 siblings, 0 replies; 8+ messages in thread
From: Zi Yan @ 2024-08-09 14:17 UTC (permalink / raw)
To: David Hildenbrand
Cc: Huang, Ying, Andrew Morton, Kefeng Wang, Baolin Wang, linux-mm,
linux-kernel, stable
[-- Attachment #1: Type: text/plain, Size: 330 bytes --]
On 9 Aug 2024, at 9:49, David Hildenbrand wrote:
>> Hi Andrew,
>>
>> Can you fold the fixup below to this patch? Thanks.
>
> It might be reasonable to send out a new version. At least I am confused now what the latest state is :D
Sure. I will send v3 to include all patches I sent recently.
Best Regards,
Yan, Zi
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 854 bytes --]
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2024-08-09 14:17 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20240807184730.1266736-1-ziy@nvidia.com>
[not found] ` <20240807184730.1266736-2-ziy@nvidia.com>
[not found] ` <8734nf1wil.fsf@yhuang6-desk2.ccr.corp.intel.com>
[not found] ` <4CC95A65-A183-4D7B-A52E-5AAFB56625E1@nvidia.com>
2024-08-08 23:13 ` [PATCH 2/2] mm/migrate: move common code to numa_migrate_check (was numa_migrate_prep) Zi Yan
[not found] ` <956553dc-587c-4a43-9877-7e8844f27f95@linux.alibaba.com>
[not found] ` <1881267a-723d-4ba0-96d0-d863ae9345a4@redhat.com>
[not found] ` <09AC6DFA-E50A-478D-A608-6EF08D8137E9@nvidia.com>
[not found] ` <052552f4-5a8d-4799-8f02-177585a1c8dd@redhat.com>
[not found] ` <8890DD6A-126A-406D-8AB9-97CF5A1F4DA4@nvidia.com>
[not found] ` <b0b94a65-51f1-459e-879f-696baba85399@huawei.com>
[not found] ` <6447AB19-CC4D-40C2-94F5-C39DE132E1D6@nvidia.com>
2024-08-08 15:12 ` [PATCH 1/2] mm/numa: no task_numa_fault() call if page table is changed Zi Yan
2024-08-09 1:25 ` Huang, Ying
2024-08-09 2:05 ` Zi Yan
2024-08-09 7:52 ` Huang, Ying
2024-08-09 13:28 ` Zi Yan
2024-08-09 13:49 ` David Hildenbrand
2024-08-09 14:17 ` Zi Yan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox