linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Minchan Kim <minchan@kernel.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm <linux-mm@kvack.org>,
	Joonsoo Kim <iamjoonsoo.kim@lge.com>,
	Vlastimil Babka <vbabka@suse.cz>, John Dias <joaodias@google.com>,
	Suren Baghdasaryan <surenb@google.com>,
	pullip.cho@samsung.com, Minchan Kim <minchan@kernel.org>
Subject: [RFC 3/7] mm: compaction: deal with upcoming high-order page splitting
Date: Fri, 14 Aug 2020 10:31:27 -0700	[thread overview]
Message-ID: <20200814173131.2803002-4-minchan@kernel.org> (raw)
In-Reply-To: <20200814173131.2803002-1-minchan@kernel.org>

When compaction isolates free pages, it needs to consider freed
pages's order and sub-page splitting to support upcoming high
order page bulk allocation. Since we have primitive functions
to deal with high order page splitting, this patch introduces
cc->isolate_order to indicate what order pages the API user
want to allocate. It isolates free pages with order greater or
equal to cc->isolate_order. After isolating it splits them into
sub pages of cc->isolate_order order.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 mm/compaction.c | 42 ++++++++++++++++++++++++++++--------------
 mm/internal.h   |  1 +
 2 files changed, 29 insertions(+), 14 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index f31799a841f2..76f380cb801d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -68,7 +68,8 @@ static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
 #define COMPACTION_HPAGE_ORDER	(PMD_SHIFT - PAGE_SHIFT)
 #endif
 
-static unsigned long release_freepages(struct list_head *freelist)
+static unsigned long release_freepages(struct list_head *freelist,
+					unsigned int order)
 {
 	struct page *page, *next;
 	unsigned long high_pfn = 0;
@@ -76,7 +77,7 @@ static unsigned long release_freepages(struct list_head *freelist)
 	list_for_each_entry_safe(page, next, freelist, lru) {
 		unsigned long pfn = page_to_pfn(page);
 		list_del(&page->lru);
-		__free_page(page);
+		__free_pages(page, order);
 		if (pfn > high_pfn)
 			high_pfn = pfn;
 	}
@@ -84,7 +85,7 @@ static unsigned long release_freepages(struct list_head *freelist)
 	return high_pfn;
 }
 
-static void split_map_pages(struct list_head *list)
+static void split_map_pages(struct list_head *list, unsigned int split_order)
 {
 	unsigned int i, order, nr_pages;
 	struct page *page, *next;
@@ -94,15 +95,15 @@ static void split_map_pages(struct list_head *list)
 		list_del(&page->lru);
 
 		order = page_private(page);
-		nr_pages = 1 << order;
+		nr_pages = 1 << (order - split_order);
 
 		post_alloc_hook(page, order, __GFP_MOVABLE);
-		if (order)
-			split_page_by_order(page, order, 0);
+		if (order > split_order)
+			split_page_by_order(page, order, split_order);
 
 		for (i = 0; i < nr_pages; i++) {
 			list_add(&page->lru, &tmp_list);
-			page++;
+			page += 1 << split_order;
 		}
 	}
 
@@ -547,8 +548,10 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
 }
 
 /*
- * Isolate free pages onto a private freelist. If @strict is true, will abort
- * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
+ * Isolate free pages onto a private freelist if order of page is greater
+ * or equal to cc->isolate_order. If @strict is true, will abort
+ * returning 0 on any invalid PFNs, pages with order lower than
+ * cc->isolate_order or non-free pages inside of the pageblock
  * (even though it may still end up isolating some pages).
  */
 static unsigned long isolate_freepages_block(struct compact_control *cc,
@@ -625,8 +628,19 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 				goto isolate_fail;
 		}
 
-		/* Found a free page, will break it into order-0 pages */
+		/*
+		 * Found a free page. will isolate and possibly split the page
+		 * into isolate_order sub pages if the page's order is greater
+		 * than or equal to the isolate_order. Otherwise, it will keep
+		 * going with further pages to isolate them unless strict is
+		 * true.
+		 */
 		order = page_order(page);
+		if (order < cc->isolate_order) {
+			blockpfn += (1UL << order) - 1;
+			cursor += (1UL << order) - 1;
+			goto isolate_fail;
+		}
 		isolated = __isolate_free_page(page, order);
 		if (!isolated)
 			break;
@@ -752,11 +766,11 @@ isolate_freepages_range(struct compact_control *cc,
 	}
 
 	/* __isolate_free_page() does not map the pages */
-	split_map_pages(&freelist);
+	split_map_pages(&freelist, cc->isolate_order);
 
 	if (pfn < end_pfn) {
 		/* Loop terminated early, cleanup. */
-		release_freepages(&freelist);
+		release_freepages(&freelist, cc->isolate_order);
 		return 0;
 	}
 
@@ -1564,7 +1578,7 @@ static void isolate_freepages(struct compact_control *cc)
 
 splitmap:
 	/* __isolate_free_page() does not map the pages */
-	split_map_pages(freelist);
+	split_map_pages(freelist, 0);
 }
 
 /*
@@ -2376,7 +2390,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
 	 * so we don't leave any returned pages behind in the next attempt.
 	 */
 	if (cc->nr_freepages > 0) {
-		unsigned long free_pfn = release_freepages(&cc->freepages);
+		unsigned long free_pfn = release_freepages(&cc->freepages, 0);
 
 		cc->nr_freepages = 0;
 		VM_BUG_ON(free_pfn == 0);
diff --git a/mm/internal.h b/mm/internal.h
index 10c677655912..5f1e9d76a623 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -244,6 +244,7 @@ struct compact_control {
 	bool contended;			/* Signal lock or sched contention */
 	bool rescan;			/* Rescanning the same pageblock */
 	bool alloc_contig;		/* alloc_contig_range allocation */
+	int isolate_order;		/* minimum order isolated from buddy */
 };
 
 /*
-- 
2.28.0.220.ged08abb693-goog



  parent reply	other threads:[~2020-08-14 17:31 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-14 17:31 [RFC 0/7] Support high-order page bulk allocation Minchan Kim
2020-08-14 17:31 ` [RFC 1/7] mm: page_owner: split page by order Minchan Kim
2020-08-14 17:31 ` [RFC 2/7] mm: introduce split_page_by_order Minchan Kim
2020-08-14 17:31 ` Minchan Kim [this message]
2020-08-14 17:31 ` [RFC 4/7] mm: factor __alloc_contig_range out Minchan Kim
2020-08-14 17:31 ` [RFC 5/7] mm: introduce alloc_pages_bulk API Minchan Kim
2020-08-17 17:40   ` David Hildenbrand
2020-08-14 17:31 ` [RFC 6/7] mm: make alloc_pages_bulk best effort Minchan Kim
2020-08-14 17:31 ` [RFC 7/7] mm/page_isolation: avoid drain_all_pages for alloc_pages_bulk Minchan Kim
2020-08-14 17:40 ` [RFC 0/7] Support high-order page bulk allocation Matthew Wilcox
2020-08-14 20:55   ` Minchan Kim
2020-08-18  2:16     ` Cho KyongHo
2020-08-18  9:22     ` Cho KyongHo
2020-08-16 12:31 ` David Hildenbrand
2020-08-17 15:27   ` Minchan Kim
2020-08-17 15:45     ` David Hildenbrand
2020-08-17 16:30       ` Minchan Kim
2020-08-17 16:44         ` David Hildenbrand
2020-08-17 17:03           ` David Hildenbrand
2020-08-17 23:34           ` Minchan Kim
2020-08-18  7:42             ` Nicholas Piggin
2020-08-18  7:49             ` David Hildenbrand
2020-08-18 15:15               ` Minchan Kim
2020-08-18 15:58                 ` Matthew Wilcox
2020-08-18 16:22                   ` David Hildenbrand
2020-08-18 16:49                     ` Minchan Kim
2020-08-19  0:27                     ` Yang Shi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200814173131.2803002-4-minchan@kernel.org \
    --to=minchan@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=joaodias@google.com \
    --cc=linux-mm@kvack.org \
    --cc=pullip.cho@samsung.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox