linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Mel Gorman <mgorman@suse.de>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>, Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
	Ingo Molnar <mingo@kernel.org>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Linux-MM <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>, Mel Gorman <mgorman@suse.de>
Subject: [PATCH 05/27] mm, numa: Sanitize task_numa_fault() callsites
Date: Thu,  8 Aug 2013 15:00:17 +0100	[thread overview]
Message-ID: <1375970439-5111-6-git-send-email-mgorman@suse.de> (raw)
In-Reply-To: <1375970439-5111-1-git-send-email-mgorman@suse.de>

From: Peter Zijlstra <peterz@infradead.org>

There are three callers of task_numa_fault():

 - do_huge_pmd_numa_page():
     Accounts against the current node, not the node where the
     page resides, unless we migrated, in which case it accounts
     against the node we migrated to.

 - do_numa_page():
     Accounts against the current node, not the node where the
     page resides, unless we migrated, in which case it accounts
     against the node we migrated to.

 - do_pmd_numa_page():
     Accounts not at all when the page isn't migrated, otherwise
     accounts against the node we migrated towards.

This seems wrong to me; all three sites should have the same
sementaics, furthermore we should accounts against where the page
really is, we already know where the task is.

So modify all three sites to always account; we did after all receive
the fault; and always account to where the page is after migration,
regardless of success.

They all still differ on when they clear the PTE/PMD; ideally that
would get sorted too.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
---
 mm/huge_memory.c | 21 ++++++++++++---------
 mm/memory.c      | 53 +++++++++++++++++++++--------------------------------
 2 files changed, 33 insertions(+), 41 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4ebe3aa..bf59194 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1292,9 +1292,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
 	struct page *page;
 	unsigned long haddr = addr & HPAGE_PMD_MASK;
+	int page_nid = -1, this_nid = numa_node_id();
 	int target_nid;
-	int src_nid = -1;
-	bool migrated;
+	bool migrated = false;
 
 	spin_lock(&mm->page_table_lock);
 	if (unlikely(!pmd_same(pmd, *pmdp)))
@@ -1311,9 +1311,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		goto clear_pmdnuma;
 
 	get_page(page);
-	src_nid = numa_node_id();
+	page_nid = page_to_nid(page);
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (src_nid == page_to_nid(page))
+	if (page_nid == this_nid)
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
 	target_nid = mpol_misplaced(page, vma, haddr);
@@ -1338,11 +1338,12 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	/* Migrate the THP to the requested node */
 	migrated = migrate_misplaced_transhuge_page(mm, vma,
 				pmdp, pmd, addr, page, target_nid);
-	if (!migrated)
+	if (migrated)
+		page_nid = target_nid;
+	else
 		goto check_same;
 
-	task_numa_fault(target_nid, HPAGE_PMD_NR, true);
-	return 0;
+	goto out;
 
 check_same:
 	spin_lock(&mm->page_table_lock);
@@ -1355,8 +1356,10 @@ clear_pmdnuma:
 	update_mmu_cache_pmd(vma, addr, pmdp);
 out_unlock:
 	spin_unlock(&mm->page_table_lock);
-	if (src_nid != -1)
-		task_numa_fault(src_nid, HPAGE_PMD_NR, false);
+
+out:
+	if (page_nid != -1)
+		task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
 	return 0;
 }
 
diff --git a/mm/memory.c b/mm/memory.c
index 871b881..b0307f2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3517,12 +3517,12 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
-				unsigned long addr, int current_nid)
+				unsigned long addr, int page_nid)
 {
 	get_page(page);
 
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (current_nid == numa_node_id())
+	if (page_nid == numa_node_id())
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 
 	return mpol_misplaced(page, vma, addr);
@@ -3533,7 +3533,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
 	struct page *page = NULL;
 	spinlock_t *ptl;
-	int current_nid = -1;
+	int page_nid = -1;
 	int target_nid;
 	bool migrated = false;
 
@@ -3568,15 +3568,10 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		return 0;
 	}
 
-	current_nid = page_to_nid(page);
-	target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+	page_nid = page_to_nid(page);
+	target_nid = numa_migrate_prep(page, vma, addr, page_nid);
 	pte_unmap_unlock(ptep, ptl);
 	if (target_nid == -1) {
-		/*
-		 * Account for the fault against the current node if it not
-		 * being replaced regardless of where the page is located.
-		 */
-		current_nid = numa_node_id();
 		put_page(page);
 		goto out;
 	}
@@ -3584,11 +3579,11 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	/* Migrate to the requested node */
 	migrated = migrate_misplaced_page(page, target_nid);
 	if (migrated)
-		current_nid = target_nid;
+		page_nid = target_nid;
 
 out:
-	if (current_nid != -1)
-		task_numa_fault(current_nid, 1, migrated);
+	if (page_nid != -1)
+		task_numa_fault(page_nid, 1, migrated);
 	return 0;
 }
 
@@ -3603,7 +3598,6 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	unsigned long offset;
 	spinlock_t *ptl;
 	bool numa = false;
-	int local_nid = numa_node_id();
 
 	spin_lock(&mm->page_table_lock);
 	pmd = *pmdp;
@@ -3626,9 +3620,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 	for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
 		pte_t pteval = *pte;
 		struct page *page;
-		int curr_nid = local_nid;
+		int page_nid = -1;
 		int target_nid;
-		bool migrated;
+		bool migrated = false;
+
 		if (!pte_present(pteval))
 			continue;
 		if (!pte_numa(pteval))
@@ -3650,25 +3645,19 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
 		if (unlikely(page_mapcount(page) != 1))
 			continue;
 
-		/*
-		 * Note that the NUMA fault is later accounted to either
-		 * the node that is currently running or where the page is
-		 * migrated to.
-		 */
-		curr_nid = local_nid;
-		target_nid = numa_migrate_prep(page, vma, addr,
-					       page_to_nid(page));
-		if (target_nid == -1) {
+		page_nid = page_to_nid(page);
+		target_nid = numa_migrate_prep(page, vma, addr, page_nid);
+		pte_unmap_unlock(pte, ptl);
+		if (target_nid != -1) {
+			migrated = migrate_misplaced_page(page, target_nid);
+			if (migrated)
+				page_nid = target_nid;
+		} else {
 			put_page(page);
-			continue;
 		}
 
-		/* Migrate to the requested node */
-		pte_unmap_unlock(pte, ptl);
-		migrated = migrate_misplaced_page(page, target_nid);
-		if (migrated)
-			curr_nid = target_nid;
-		task_numa_fault(curr_nid, 1, migrated);
+		if (page_nid != -1)
+			task_numa_fault(page_nid, 1, migrated);
 
 		pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 	}
-- 
1.8.1.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2013-08-08 14:00 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-08-08 14:00 [PATCH 0/27] Basic scheduler support for automatic NUMA balancing V6 Mel Gorman
2013-08-08 14:00 ` [PATCH 01/27] mm: numa: Document automatic NUMA balancing sysctls Mel Gorman
2013-08-08 14:00 ` [PATCH 02/27] sched, numa: Comment fixlets Mel Gorman
2013-08-08 14:00 ` [PATCH 03/27] mm: numa: Account for THP numa hinting faults on the correct node Mel Gorman
2013-08-08 14:00 ` [PATCH 04/27] mm: numa: Do not migrate or account for hinting faults on the zero page Mel Gorman
2013-08-08 14:00 ` Mel Gorman [this message]
2013-08-08 14:00 ` [PATCH 06/27] sched, numa: Mitigate chance that same task always updates PTEs Mel Gorman
2013-08-08 14:00 ` [PATCH 07/27] sched, numa: Continue PTE scanning even if migrate rate limited Mel Gorman
2013-08-08 14:00 ` [PATCH 08/27] Revert "mm: sched: numa: Delay PTE scanning until a task is scheduled on a new node" Mel Gorman
2013-08-08 14:00 ` [PATCH 09/27] sched: numa: Initialise numa_next_scan properly Mel Gorman
2013-08-08 14:00 ` [PATCH 10/27] sched: numa: Slow scan rate if no NUMA hinting faults are being recorded Mel Gorman
2013-08-08 14:00 ` [PATCH 11/27] sched: Set the scan rate proportional to the memory usage of the task being scanned Mel Gorman
2013-08-08 14:00 ` [PATCH 12/27] sched: numa: Correct adjustment of numa_scan_period Mel Gorman
2013-08-08 14:00 ` [PATCH 13/27] mm: Only flush TLBs if a transhuge PMD is modified for NUMA pte scanning Mel Gorman
2013-08-08 14:00 ` [PATCH 14/27] mm: Do not flush TLB during protection change if !pte_present && !migration_entry Mel Gorman
2013-08-08 14:00 ` [PATCH 15/27] sched: Track NUMA hinting faults on per-node basis Mel Gorman
2013-08-08 14:00 ` [PATCH 16/27] sched: Select a preferred node with the most numa hinting faults Mel Gorman
2013-08-08 14:00 ` [PATCH 17/27] sched: Update NUMA hinting faults once per scan Mel Gorman
2013-08-08 14:00 ` [PATCH 18/27] sched: Favour moving tasks towards the preferred node Mel Gorman
2013-08-08 14:00 ` [PATCH 19/27] sched: Resist moving tasks towards nodes with fewer hinting faults Mel Gorman
2013-08-08 14:00 ` [PATCH 20/27] sched: Reschedule task on preferred NUMA node once selected Mel Gorman
2013-08-08 14:00 ` [PATCH 21/27] sched: Add infrastructure for split shared/private accounting of NUMA hinting faults Mel Gorman
2013-08-08 14:00 ` [PATCH 22/27] sched: Check current->mm before allocating NUMA faults Mel Gorman
2013-08-08 14:00 ` [PATCH 23/27] mm: numa: Scan pages with elevated page_mapcount Mel Gorman
2013-08-08 14:00 ` [PATCH 24/27] sched: Remove check that skips small VMAs Mel Gorman
2013-08-08 14:00 ` [PATCH 25/27] sched: Set preferred NUMA node based on number of private faults Mel Gorman
2013-08-08 14:00 ` [PATCH 26/27] sched: Avoid overloading CPUs on a preferred NUMA node Mel Gorman
2013-08-08 14:00 ` [PATCH 27/27] sched: Retry migration of tasks to CPU on a preferred node Mel Gorman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1375970439-5111-6-git-send-email-mgorman@suse.de \
    --to=mgorman@suse.de \
    --cc=a.p.zijlstra@chello.nl \
    --cc=aarcange@redhat.com \
    --cc=hannes@cmpxchg.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@kernel.org \
    --cc=riel@redhat.com \
    --cc=srikar@linux.vnet.ibm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox