linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Hugh Dickins <hughd@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>,
	Dave Hansen <dave.hansen@intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	Johannes Weiner <hannes@cmpxchg.org>,
	Michal Hocko <mhocko@suse.cz>,
	David Rientjes <rientjes@google.com>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv10 37/36, RFC] thp: allow mlocked THP again
Date: Thu,  3 Sep 2015 18:16:28 +0300	[thread overview]
Message-ID: <1441293388-137552-1-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1441293202-137314-1-git-send-email-kirill.shutemov@linux.intel.com>

This patch brings back mlocked THP. Instead of forbidding mlocked pages
altogether, we just avoid mlocking PTE-mapped THPs and munlock THPs on
split_huge_pmd().

This means PTE-mapped THPs will be on normal lru lists and will be
split under memory pressure by vmscan. After the split vmscan will
detect unevictable small pages and mlock them.

This way we can void leaking mlocked pages into non-VM_LOCKED VMAs.

Not-Yet-Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---

I'm not yet 100% certain that this approch is correct. Review would be appriciated.
More testing is required.

---
 mm/gup.c         |  6 ++++--
 mm/huge_memory.c | 33 +++++++++++++++++++++++-------
 mm/memory.c      |  3 +--
 mm/mlock.c       | 61 +++++++++++++++++++++++++++++++++++++-------------------
 4 files changed, 71 insertions(+), 32 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index 70d65e4015a4..e95b0cb6ed81 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -143,6 +143,10 @@ retry:
 		mark_page_accessed(page);
 	}
 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+		/* Do not mlock pte-mapped THP */
+		if (PageTransCompound(page))
+			goto out;
+
 		/*
 		 * The preliminary mapping check is mainly to avoid the
 		 * pointless overhead of lock_page on the ZERO_PAGE
@@ -920,8 +924,6 @@ long populate_vma_page_range(struct vm_area_struct *vma,
 	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
 	if (vma->vm_flags & VM_LOCKONFAULT)
 		gup_flags &= ~FOLL_POPULATE;
-	if (vma->vm_flags & VM_LOCKED)
-		gup_flags |= FOLL_SPLIT;
 	/*
 	 * We want to touch writable mappings with a write fault in order
 	 * to break COW, except for shared mappings because these don't COW
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2cc99f9096a8..d714de02473b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -846,8 +846,6 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 		return VM_FAULT_FALLBACK;
-	if (vma->vm_flags & VM_LOCKED)
-		return VM_FAULT_FALLBACK;
 	if (unlikely(anon_vma_prepare(vma)))
 		return VM_FAULT_OOM;
 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
@@ -1316,7 +1314,16 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 			update_mmu_cache_pmd(vma, addr, pmd);
 	}
 	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
-		if (page->mapping && trylock_page(page)) {
+		/*
+		 * We don't mlock() pte-mapped THPs. This way we can avoid
+		 * leaking mlocked pages into non-VM_LOCKED VMAs.
+		 * In most cases the pmd is the only mapping of the page: we
+		 * break COW for the mlock(). The only scenario when we have
+		 * the page shared here is if we mlocking read-only mapping
+		 * shared over fork(). We skip mlocking such pages.
+		 */
+		if (compound_mapcount(page) == 1 && !PageDoubleMap(page) &&
+				page->mapping && trylock_page(page)) {
 			lru_add_drain();
 			if (page->mapping)
 				mlock_vma_page(page);
@@ -2194,8 +2201,6 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
 	if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
 	    (vma->vm_flags & VM_NOHUGEPAGE))
 		return false;
-	if (vma->vm_flags & VM_LOCKED)
-		return false;
 	if (!vma->anon_vma || vma->vm_ops)
 		return false;
 	if (is_vma_temporary_stack(vma))
@@ -2755,14 +2760,28 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
 	spinlock_t *ptl;
 	struct mm_struct *mm = vma->vm_mm;
+	struct page *page = NULL;
 	unsigned long haddr = address & HPAGE_PMD_MASK;
 
 	mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
 	ptl = pmd_lock(mm, pmd);
-	if (likely(pmd_trans_huge(*pmd)))
-		__split_huge_pmd_locked(vma, pmd, haddr, false);
+	if (unlikely(!pmd_trans_huge(*pmd)))
+		goto out;
+	page = pmd_page(*pmd);
+	__split_huge_pmd_locked(vma, pmd, haddr, false);
+	if (PageMlocked(page))
+		get_page(page);
+	else
+		page = NULL;
+out:
 	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
+	if (page) {
+		lock_page(page);
+		munlock_vma_page(page);
+		unlock_page(page);
+		put_page(page);
+	}
 }
 
 static void split_huge_pmd_address(struct vm_area_struct *vma,
diff --git a/mm/memory.c b/mm/memory.c
index 5a9e9399d935..6356b316ddda 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2155,8 +2155,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
 
 	pte_unmap_unlock(page_table, ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
-	/* THP pages are never mlocked */
-	if (old_page && !PageTransCompound(old_page)) {
+	if (old_page) {
 		/*
 		 * Don't let another task, with possibly unlocked vma,
 		 * keep the mlocked page.
diff --git a/mm/mlock.c b/mm/mlock.c
index 8314cd3eee87..c37bbc1d3774 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -82,6 +82,9 @@ void mlock_vma_page(struct page *page)
 	/* Serialize with page migration */
 	BUG_ON(!PageLocked(page));
 
+	VM_BUG_ON_PAGE(PageTail(page), page);
+	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
+
 	if (!TestSetPageMlocked(page)) {
 		mod_zone_page_state(page_zone(page), NR_MLOCK,
 				    hpage_nr_pages(page));
@@ -178,6 +181,8 @@ unsigned int munlock_vma_page(struct page *page)
 	/* For try_to_munlock() and to serialize with page migration */
 	BUG_ON(!PageLocked(page));
 
+	VM_BUG_ON_PAGE(PageTail(page), page);
+
 	/*
 	 * Serialize with any parallel __split_huge_page_refcount() which
 	 * might otherwise copy PageMlocked to part of the tail pages before
@@ -443,29 +448,43 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
 				&page_mask);
 
-		if (page && !IS_ERR(page) && !PageTransCompound(page)) {
-			/*
-			 * Non-huge pages are handled in batches via
-			 * pagevec. The pin from follow_page_mask()
-			 * prevents them from collapsing by THP.
-			 */
-			pagevec_add(&pvec, page);
-			zone = page_zone(page);
-			zoneid = page_zone_id(page);
+		if (page && !IS_ERR(page)) {
+			if (PageTransTail(page)) {
+				VM_BUG_ON_PAGE(PageMlocked(page), page);
+				put_page(page); /* follow_page_mask() */
+			} else if (PageTransHuge(page)) {
+				lock_page(page);
+				/*
+				 * Any THP page found by follow_page_mask() may
+				 * have gotten split before reaching
+				 * munlock_vma_page(), so we need to recompute
+				 * the page_mask here.
+				 */
+				page_mask = munlock_vma_page(page);
+				unlock_page(page);
+				put_page(page); /* follow_page_mask() */
+			} else {
+				/*
+				 * Non-huge pages are handled in batches via
+				 * pagevec. The pin from follow_page_mask()
+				 * prevents them from collapsing by THP.
+				 */
+				pagevec_add(&pvec, page);
+				zone = page_zone(page);
+				zoneid = page_zone_id(page);
 
-			/*
-			 * Try to fill the rest of pagevec using fast
-			 * pte walk. This will also update start to
-			 * the next page to process. Then munlock the
-			 * pagevec.
-			 */
-			start = __munlock_pagevec_fill(&pvec, vma,
-					zoneid, start, end);
-			__munlock_pagevec(&pvec, zone);
-			goto next;
+				/*
+				 * Try to fill the rest of pagevec using fast
+				 * pte walk. This will also update start to
+				 * the next page to process. Then munlock the
+				 * pagevec.
+				 */
+				start = __munlock_pagevec_fill(&pvec, vma,
+						zoneid, start, end);
+				__munlock_pagevec(&pvec, zone);
+				goto next;
+			}
 		}
-		/* It's a bug to munlock in the middle of a THP page */
-		VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
 		page_increm = 1 + page_mask;
 		start += page_increm * PAGE_SIZE;
 next:
-- 
2.5.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2015-09-03 15:17 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-09-03 15:12 [PATCHv10 00/36] THP refcounting redesign Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 01/36] mm, proc: adjust PSS calculation Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 02/36] rmap: add argument to charge compound page Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 03/36] memcg: adjust to support new THP refcounting Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 04/36] mm, thp: adjust conditions when we can reuse the page on WP fault Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 05/36] mm: adjust FOLL_SPLIT for new refcounting Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 06/36] mm: handle PTE-mapped tail pages in gerneric fast gup implementaiton Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 07/36] thp, mlock: do not allow huge pages in mlocked area Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 08/36] khugepaged: ignore pmd tables with THP mapped with ptes Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 09/36] thp: rename split_huge_page_pmd() to split_huge_pmd() Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 10/36] mm, vmstats: new THP splitting event Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 11/36] mm: temporally mark THP broken Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 12/36] thp: drop all split_huge_page()-related code Kirill A. Shutemov
2015-09-03 15:12 ` [PATCHv10 13/36] mm: drop tail page refcounting Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 14/36] futex, thp: remove special case for THP in get_futex_key Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 15/36] ksm: prepare to new THP semantics Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 16/36] mm, thp: remove compound_lock Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 17/36] arm64, thp: remove infrastructure for handling splitting PMDs Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 18/36] arm, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 19/36] mips, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 20/36] powerpc, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 21/36] s390, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 22/36] sparc, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 23/36] tile, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 24/36] x86, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 25/36] mm, " Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 26/36] mm: rework mapcount accounting to enable 4k mapping of THPs Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 27/36] mm: differentiate page_mapped() from page_mapcount() for compound pages Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 28/36] mm, numa: skip PTE-mapped THP on numa fault Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 29/36] thp: implement split_huge_pmd() Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 30/36] thp: add option to setup migration entries during PMD split Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 31/36] thp, mm: split_huge_page(): caller need to lock page Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 32/36] thp: reintroduce split_huge_page() Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 33/36] migrate_pages: try to split pages on qeueuing Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 34/36] thp: introduce deferred_split_huge_page() Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 35/36] mm: re-enable THP Kirill A. Shutemov
2015-09-03 15:13 ` [PATCHv10 36/36] thp: update documentation Kirill A. Shutemov
2015-09-03 15:16 ` Kirill A. Shutemov [this message]
2015-09-11 13:22   ` [PATCHv10 37/36, RFC] thp: allow mlocked THP again Vlastimil Babka
2015-09-14 11:05     ` Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1441293388-137552-1-git-send-email-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=dave.hansen@intel.com \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.cz \
    --cc=rientjes@google.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox