From: npiggin@suse.de
To: akpm@linux-foundation.org
Cc: Nishanth Aravamudan <nacc@us.ibm.com>,
linux-mm@kvack.org, Andrew Hastings <abh@cray.com>,
kniht@us.ibm.com, andi@firstfloor.orgabh@cray.com,
joachim.deguara@amd.com
Subject: [patch 09/21] hugetlb: support larger than MAX_ORDER
Date: Tue, 03 Jun 2008 20:00:05 +1000 [thread overview]
Message-ID: <20080603100939.343814851@amd.local0.net> (raw)
In-Reply-To: <20080603095956.781009952@amd.local0.net>
[-- Attachment #1: hugetlb-unlimited-order.patch --]
[-- Type: text/plain, Size: 5445 bytes --]
This is needed on x86-64 to handle GB pages in hugetlbfs, because it is
not practical to enlarge MAX_ORDER to 1GB.
Instead the 1GB pages are only allocated at boot using the bootmem
allocator using the hugepages=... option.
These 1G bootmem pages are never freed. In theory it would be possible
to implement that with some complications, but since it would be a one-way
street (>= MAX_ORDER pages cannot be allocated later) I decided not to
currently.
The >= MAX_ORDER code is not ifdef'ed per architecture. It is not very big
and the ifdef uglyness seemed not be worth it.
Known problems: /proc/meminfo and "free" do not display the memory
allocated for gb pages in "Total". This is a little confusing for the
user.
Acked-by: Andrew Hastings <abh@cray.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Nick Piggin <npiggin@suse.de>
---
mm/hugetlb.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 72 insertions(+), 2 deletions(-)
Index: linux-2.6/mm/hugetlb.c
===================================================================
--- linux-2.6.orig/mm/hugetlb.c 2008-06-03 19:56:45.000000000 +1000
+++ linux-2.6/mm/hugetlb.c 2008-06-03 19:56:50.000000000 +1000
@@ -14,6 +14,7 @@
#include <linux/mempolicy.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
+#include <linux/bootmem.h>
#include <linux/sysfs.h>
#include <asm/page.h>
@@ -307,7 +308,7 @@ static void free_huge_page(struct page *
INIT_LIST_HEAD(&page->lru);
spin_lock(&hugetlb_lock);
- if (h->surplus_huge_pages_node[nid]) {
+ if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
@@ -368,6 +369,9 @@ static struct page *alloc_fresh_huge_pag
{
struct page *page;
+ if (h->order >= MAX_ORDER)
+ return NULL;
+
page = alloc_pages_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN,
@@ -434,6 +438,9 @@ static struct page *alloc_buddy_huge_pag
struct page *page;
unsigned int nid;
+ if (h->order >= MAX_ORDER)
+ return NULL;
+
/*
* Assume we will successfully allocate the surplus page to
* prevent racing processes from causing the surplus to exceed
@@ -610,6 +617,10 @@ static void return_unused_surplus_pages(
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
+ /* Cannot return gigantic pages currently */
+ if (h->order >= MAX_ORDER)
+ return;
+
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
while (remaining_iterations-- && nr_pages) {
@@ -838,6 +849,63 @@ static struct page *alloc_huge_page(stru
return page;
}
+static __initdata LIST_HEAD(huge_boot_pages);
+
+struct huge_bootmem_page {
+ struct list_head list;
+ struct hstate *hstate;
+};
+
+static int __init alloc_bootmem_huge_page(struct hstate *h)
+{
+ struct huge_bootmem_page *m;
+ int nr_nodes = nodes_weight(node_online_map);
+
+ while (nr_nodes) {
+ void *addr;
+
+ addr = __alloc_bootmem_node_nopanic(
+ NODE_DATA(h->hugetlb_next_nid),
+ huge_page_size(h), huge_page_size(h), 0);
+
+ if (addr) {
+ /*
+ * Use the beginning of the huge page to store the
+ * huge_bootmem_page struct (until gather_bootmem
+ * puts them into the mem_map).
+ */
+ m = addr;
+ if (m)
+ goto found;
+ }
+ hstate_next_node(h);
+ nr_nodes--;
+ }
+ return 0;
+
+found:
+ BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
+ /* Put them into a private list first because mem_map is not up yet */
+ list_add(&m->list, &huge_boot_pages);
+ m->hstate = h;
+ return 1;
+}
+
+/* Put bootmem huge pages into the standard lists after mem_map is up */
+static void __init gather_bootmem_prealloc(void)
+{
+ struct huge_bootmem_page *m;
+
+ list_for_each_entry(m, &huge_boot_pages, list) {
+ struct page *page = virt_to_page(m);
+ struct hstate *h = m->hstate;
+ __ClearPageReserved(page);
+ WARN_ON(page_count(page) != 1);
+ prep_compound_page(page, h->order);
+ prep_new_huge_page(h, page, page_to_nid(page));
+ }
+}
+
static void __init hugetlb_init_one_hstate(struct hstate *h)
{
unsigned long i;
@@ -848,7 +916,10 @@ static void __init hugetlb_init_one_hsta
h->hugetlb_next_nid = first_node(node_online_map);
for (i = 0; i < h->max_huge_pages; ++i) {
- if (!alloc_fresh_huge_page(h))
+ if (h->order >= MAX_ORDER) {
+ if (!alloc_bootmem_huge_page(h))
+ break;
+ } else if (!alloc_fresh_huge_page(h))
break;
}
h->max_huge_pages = h->free_huge_pages = h->nr_huge_pages = i;
@@ -881,6 +952,9 @@ static void try_to_free_low(struct hstat
{
int i;
+ if (h->order >= MAX_ORDER)
+ return;
+
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
@@ -907,6 +981,9 @@ static unsigned long set_max_huge_pages(
{
unsigned long min_count, ret;
+ if (h->order >= MAX_ORDER)
+ return h->max_huge_pages;
+
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
@@ -1129,6 +1206,8 @@ static int __init hugetlb_init(void)
hugetlb_init_hstates();
+ gather_bootmem_prealloc();
+
report_hugepages();
hugetlb_sysfs_init();
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2008-06-03 10:00 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-06-03 9:59 [patch 00/21] hugetlb multi size, giant hugetlb support, etc npiggin
2008-06-03 9:59 ` [patch 01/21] hugetlb: factor out prep_new_huge_page npiggin
2008-06-03 9:59 ` [patch 02/21] hugetlb: modular state npiggin
2008-06-03 10:58 ` [patch 02/21] hugetlb: modular state (take 2) Nick Piggin
2008-06-03 9:59 ` [patch 03/21] hugetlb: multiple hstates npiggin
2008-06-03 10:00 ` [patch 04/21] hugetlbfs: per mount hstates npiggin
2008-06-03 10:00 ` [patch 05/21] hugetlb: new sysfs interface npiggin
2008-06-03 10:00 ` [patch 06/21] hugetlb: abstract numa round robin selection npiggin
2008-06-03 10:00 ` [patch 07/21] mm: introduce non panic alloc_bootmem npiggin
2008-06-03 10:00 ` [patch 08/21] mm: export prep_compound_page to mm npiggin
2008-06-03 10:00 ` npiggin [this message]
2008-06-03 10:00 ` [patch 10/21] hugetlb: support boot allocate different sizes npiggin
2008-06-03 10:00 ` [patch 11/21] hugetlb: printk cleanup npiggin
2008-06-03 10:00 ` [patch 12/21] hugetlb: introduce pud_huge npiggin
2008-06-03 10:00 ` [patch 13/21] x86: support GB hugepages on 64-bit npiggin
2008-06-03 10:00 ` [patch 14/21] x86: add hugepagesz option " npiggin
2008-06-03 17:48 ` Dave Hansen
2008-06-03 18:24 ` Andi Kleen
2008-06-03 18:59 ` Dave Hansen
2008-06-03 20:57 ` Andi Kleen
2008-06-03 21:27 ` Dave Hansen
2008-06-04 0:06 ` Andi Kleen
2008-06-04 1:04 ` Nick Piggin
2008-06-04 16:01 ` Dave Hansen
2008-06-06 16:09 ` Dave Hansen
2008-06-05 23:15 ` Nishanth Aravamudan
2008-06-06 0:29 ` Andi Kleen
2008-06-04 1:10 ` Nick Piggin
2008-06-05 23:12 ` Nishanth Aravamudan
2008-06-05 23:23 ` Nishanth Aravamudan
2008-06-03 19:00 ` Dave Hansen
2008-06-03 10:00 ` [patch 15/21] hugetlb: override default huge page size npiggin
2008-06-03 10:00 ` [patch 16/21] hugetlb: allow arch overried hugepage allocation npiggin
2008-06-03 10:00 ` [patch 17/21] powerpc: function to allocate gigantic hugepages npiggin
2008-06-03 10:00 ` [patch 18/21] powerpc: scan device tree for gigantic pages npiggin
2008-06-03 10:00 ` [patch 19/21] powerpc: define support for 16G hugepages npiggin
2008-06-03 10:00 ` [patch 20/21] fs: check for statfs overflow npiggin
2008-06-03 10:00 ` [patch 21/21] powerpc: support multiple hugepage sizes npiggin
2008-06-03 10:29 ` [patch 1/1] x86: get_user_pages_lockless support 1GB hugepages Nick Piggin
2008-06-03 10:57 ` [patch 00/21] hugetlb multi size, giant hugetlb support, etc Nick Piggin
2008-06-06 17:12 ` Andy Whitcroft
2008-06-04 8:29 ` Andrew Morton
2008-06-04 9:35 ` Nick Piggin
2008-06-04 9:46 ` Andrew Morton
2008-06-04 11:04 ` Nick Piggin
2008-06-04 11:33 ` Nick Piggin
2008-06-04 11:57 ` Andi Kleen
2008-06-04 18:39 ` Andrew Morton
2008-06-04 11:29 [patch 00/21] hugetlb patches resend npiggin
2008-06-04 11:29 ` [patch 09/21] hugetlb: support larger than MAX_ORDER npiggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080603100939.343814851@amd.local0.net \
--to=npiggin@suse.de \
--cc=abh@cray.com \
--cc=akpm@linux-foundation.org \
--cc=andi@firstfloor.orgabh \
--cc=kniht@us.ibm.com \
--cc=linux-mm@kvack.org \
--cc=nacc@us.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox