From: Zi Yan <zi.yan@sent.com>
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: Dave Hansen <dave.hansen@linux.intel.com>,
Michal Hocko <mhocko@kernel.org>,
"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
Andrew Morton <akpm@linux-foundation.org>,
Vlastimil Babka <vbabka@suse.cz>,
Mel Gorman <mgorman@techsingularity.net>,
John Hubbard <jhubbard@nvidia.com>,
Mark Hairgrove <mhairgrove@nvidia.com>,
Nitin Gupta <nigupta@nvidia.com>,
David Nellans <dnellans@nvidia.com>, Zi Yan <ziy@nvidia.com>
Subject: [RFC PATCH 29/31] mm: madvise: add madvise options to split PMD and PUD THPs.
Date: Fri, 15 Feb 2019 14:08:54 -0800 [thread overview]
Message-ID: <20190215220856.29749-30-zi.yan@sent.com> (raw)
In-Reply-To: <20190215220856.29749-1-zi.yan@sent.com>
From: Zi Yan <ziy@nvidia.com>
Signed-off-by: Zi Yan <ziy@nvidia.com>
---
include/uapi/asm-generic/mman-common.h | 12 +++
mm/madvise.c | 106 +++++++++++++++++++++++++
2 files changed, 118 insertions(+)
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index d1ec94a1970d..33db8b6a2ce0 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -69,6 +69,18 @@
#define MADV_MEMDEFRAG 20 /* Worth backing with hugepages */
#define MADV_NOMEMDEFRAG 21 /* Not worth backing with hugepages */
+#define MADV_SPLITHUGEPAGE 24 /* Split huge page in range once */
+#define MADV_PROMOTEHUGEPAGE 25 /* Promote range into huge page */
+
+#define MADV_SPLITHUGEMAP 26 /* Split huge page table entry in range once */
+#define MADV_PROMOTEHUGEMAP 27 /* Promote range into huge page table entry */
+
+#define MADV_SPLITHUGEPUDPAGE 28 /* Split huge page in range once */
+#define MADV_PROMOTEHUGEPUDPAGE 29 /* Promote range into huge page */
+
+#define MADV_SPLITHUGEPUDMAP 30 /* Split huge page table entry in range once */
+#define MADV_PROMOTEHUGEPUDMAP 31 /* Promote range into huge page table entry */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/mm/madvise.c b/mm/madvise.c
index 9cef96d633e8..be3818c06e17 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -624,6 +624,95 @@ static long madvise_memdefrag(struct vm_area_struct *vma,
*prev = vma;
return memdefrag_madvise(vma, &vma->vm_flags, behavior);
}
+
+static long madvise_split_promote_hugepage(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end, int behavior)
+{
+ struct page *page;
+ unsigned long addr = start, haddr;
+ int ret = 0;
+ *prev = vma;
+
+ while (addr < end && !ret) {
+ switch (behavior) {
+ case MADV_SPLITHUGEMAP:
+ split_huge_pmd_address(vma, addr, false, NULL);
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_SPLITHUGEPUDMAP:
+ split_huge_pud_address(vma, addr, false, NULL);
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_SPLITHUGEPAGE:
+ page = follow_page(vma, addr, FOLL_GET);
+ if (page) {
+ lock_page(page);
+ if (split_huge_page(page)) {
+ pr_debug("%s: fail to split page\n", __func__);
+ ret = -EBUSY;
+ }
+ unlock_page(page);
+ put_page(page);
+ } else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_SPLITHUGEPUDPAGE:
+ page = follow_page(vma, addr, FOLL_GET);
+ if (page) {
+ lock_page(page);
+ if (split_huge_pud_page(page)) {
+ pr_debug("%s: fail to split pud page\n", __func__);
+ ret = -EBUSY;
+ }
+ unlock_page(page);
+ put_page(page);
+ } else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEMAP:
+ haddr = addr & HPAGE_PMD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PMD_SIZE) <= end)
+ promote_huge_pmd_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPUDMAP:
+ haddr = addr & HPAGE_PUD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PUD_SIZE) <= end)
+ promote_huge_pud_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPAGE:
+ haddr = addr & HPAGE_PMD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PMD_SIZE) <= end)
+ promote_huge_page_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PMD_SIZE;
+ break;
+ case MADV_PROMOTEHUGEPUDPAGE:
+ haddr = addr & HPAGE_PUD_MASK;
+ if (haddr >= start && (haddr + HPAGE_PUD_SIZE) <= end)
+ promote_huge_pud_page_address(vma, haddr);
+ else
+ ret = -ENODEV;
+ addr += HPAGE_PUD_SIZE;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_MEMORY_FAILURE
/*
* Error injection support for memory error handling.
@@ -708,6 +797,15 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
case MADV_MEMDEFRAG:
case MADV_NOMEMDEFRAG:
return madvise_memdefrag(vma, prev, start, end, behavior);
+ case MADV_SPLITHUGEPAGE:
+ case MADV_PROMOTEHUGEPAGE:
+ case MADV_SPLITHUGEMAP:
+ case MADV_PROMOTEHUGEMAP:
+ case MADV_SPLITHUGEPUDPAGE:
+ case MADV_PROMOTEHUGEPUDPAGE:
+ case MADV_SPLITHUGEPUDMAP:
+ case MADV_PROMOTEHUGEPUDMAP:
+ return madvise_split_promote_hugepage(vma, prev, start, end, behavior);
default:
return madvise_behavior(vma, prev, start, end, behavior);
}
@@ -744,6 +842,14 @@ madvise_behavior_valid(int behavior)
#endif
case MADV_MEMDEFRAG:
case MADV_NOMEMDEFRAG:
+ case MADV_SPLITHUGEPAGE:
+ case MADV_PROMOTEHUGEPAGE:
+ case MADV_SPLITHUGEMAP:
+ case MADV_PROMOTEHUGEMAP:
+ case MADV_SPLITHUGEPUDPAGE:
+ case MADV_PROMOTEHUGEPUDPAGE:
+ case MADV_SPLITHUGEPUDMAP:
+ case MADV_PROMOTEHUGEPUDMAP:
return true;
default:
--
2.20.1
next prev parent reply other threads:[~2019-02-15 22:10 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-15 22:08 [RFC PATCH 00/31] Generating physically contiguous memory after page allocation Zi Yan
2019-02-15 22:08 ` [RFC PATCH 01/31] mm: migrate: Add exchange_pages to exchange two lists of pages Zi Yan
2019-02-17 11:29 ` Matthew Wilcox
2019-02-18 17:31 ` Zi Yan
2019-02-18 17:42 ` Vlastimil Babka
2019-02-18 17:51 ` Zi Yan
2019-02-18 17:52 ` Matthew Wilcox
2019-02-18 17:59 ` Zi Yan
2019-02-19 7:42 ` Anshuman Khandual
2019-02-19 12:56 ` Matthew Wilcox
2019-02-20 4:38 ` Anshuman Khandual
2019-03-14 2:39 ` Zi Yan
2019-02-21 21:10 ` Jerome Glisse
2019-02-21 21:25 ` Zi Yan
2019-02-15 22:08 ` [RFC PATCH 02/31] mm: migrate: Add THP exchange support Zi Yan
2019-02-15 22:08 ` [RFC PATCH 03/31] mm: migrate: Add tmpfs " Zi Yan
2019-02-15 22:08 ` [RFC PATCH 04/31] mm: add mem_defrag functionality Zi Yan
2019-02-15 22:08 ` [RFC PATCH 05/31] mem_defrag: split a THP if either src or dst is THP only Zi Yan
2019-02-15 22:08 ` [RFC PATCH 06/31] mm: Make MAX_ORDER configurable in Kconfig for buddy allocator Zi Yan
2019-02-15 22:08 ` [RFC PATCH 07/31] mm: deallocate pages with order > MAX_ORDER Zi Yan
2019-02-15 22:08 ` [RFC PATCH 08/31] mm: add pagechain container for storing multiple pages Zi Yan
2019-02-15 22:08 ` [RFC PATCH 09/31] mm: thp: 1GB anonymous page implementation Zi Yan
2019-02-15 22:08 ` [RFC PATCH 10/31] mm: proc: add 1GB THP kpageflag Zi Yan
2019-02-15 22:08 ` [RFC PATCH 11/31] mm: debug: print compound page order in dump_page() Zi Yan
2019-02-15 22:08 ` [RFC PATCH 12/31] mm: stats: Separate PMD THP and PUD THP stats Zi Yan
2019-02-15 22:08 ` [RFC PATCH 13/31] mm: thp: 1GB THP copy on write implementation Zi Yan
2019-02-15 22:08 ` [RFC PATCH 14/31] mm: thp: handling 1GB THP reference bit Zi Yan
2019-02-15 22:08 ` [RFC PATCH 15/31] mm: thp: add 1GB THP split_huge_pud_page() function Zi Yan
2019-02-15 22:08 ` [RFC PATCH 16/31] mm: thp: check compound_mapcount of PMD-mapped PUD THPs at free time Zi Yan
2019-02-15 22:08 ` [RFC PATCH 17/31] mm: thp: split properly PMD-mapped PUD THP to PTE-mapped PUD THP Zi Yan
2019-02-15 22:08 ` [RFC PATCH 18/31] mm: page_vma_walk: teach it about PMD-mapped " Zi Yan
2019-02-15 22:08 ` [RFC PATCH 19/31] mm: thp: 1GB THP support in try_to_unmap() Zi Yan
2019-02-15 22:08 ` [RFC PATCH 20/31] mm: thp: split 1GB THPs at page reclaim Zi Yan
2019-02-15 22:08 ` [RFC PATCH 21/31] mm: thp: 1GB zero page shrinker Zi Yan
2019-02-15 22:08 ` [RFC PATCH 22/31] mm: thp: 1GB THP follow_p*d_page() support Zi Yan
2019-02-15 22:08 ` [RFC PATCH 23/31] mm: support 1GB THP pagemap support Zi Yan
2019-02-15 22:08 ` [RFC PATCH 24/31] sysctl: add an option to only print the head page virtual address Zi Yan
2019-02-15 22:08 ` [RFC PATCH 25/31] mm: thp: add a knob to enable/disable 1GB THPs Zi Yan
2019-02-15 22:08 ` [RFC PATCH 26/31] mm: thp: promote PTE-mapped THP to PMD-mapped THP Zi Yan
2019-02-15 22:08 ` [RFC PATCH 27/31] mm: thp: promote PMD-mapped PUD pages to PUD-mapped PUD pages Zi Yan
2019-02-15 22:08 ` [RFC PATCH 28/31] mm: vmstats: add page promotion stats Zi Yan
2019-02-15 22:08 ` Zi Yan [this message]
2019-02-15 22:08 ` [RFC PATCH 30/31] mm: mem_defrag: thp: PMD THP and PUD THP in-place promotion support Zi Yan
2019-02-15 22:08 ` [RFC PATCH 31/31] sysctl: toggle to promote PUD-mapped 1GB THP or not Zi Yan
2019-02-20 1:42 ` [RFC PATCH 00/31] Generating physically contiguous memory after page allocation Mike Kravetz
2019-02-20 2:33 ` Zi Yan
2019-02-20 3:18 ` Mike Kravetz
2019-02-20 5:19 ` Zi Yan
2019-02-20 5:27 ` Mike Kravetz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190215220856.29749-30-zi.yan@sent.com \
--to=zi.yan@sent.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=dnellans@nvidia.com \
--cc=jhubbard@nvidia.com \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mhairgrove@nvidia.com \
--cc=mhocko@kernel.org \
--cc=nigupta@nvidia.com \
--cc=vbabka@suse.cz \
--cc=ziy@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox