* [PATCH 1/3] hugetlb: numafy several functions
@ 2008-02-06 23:15 Nishanth Aravamudan
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
` (2 more replies)
0 siblings, 3 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-06 23:15 UTC (permalink / raw)
To: wli; +Cc: agl, lee.schermerhorn, linux-mm
Add node-parameterized helpers for dequeue_huge_page,
alloc_fresh_huge_page, adjust_pool_surplus and try_to_free_low. Also
have update_and_free_page() take a nid parameter. These changes are
necessary to add sysfs attributes to specify the number of hugepages on
NUMA nodes.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d9a3803..d1f6c5a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -70,6 +70,20 @@ static void enqueue_huge_page(struct page *page)
free_huge_pages_node[nid]++;
}
+static struct page *dequeue_huge_page_node(struct vm_area_struct *vma,
+ int nid)
+{
+ struct page *page;
+
+ page = list_entry(hugepage_freelists[nid].next, struct page, lru);
+ list_del(&page->lru);
+ free_huge_pages--;
+ free_huge_pages_node[nid]--;
+ if (vma && vma->vm_flags & VM_MAYSHARE)
+ resv_huge_pages--;
+ return page;
+}
+
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
unsigned long address)
{
@@ -84,13 +98,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
nid = zone_to_nid(*z);
if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
!list_empty(&hugepage_freelists[nid])) {
- page = list_entry(hugepage_freelists[nid].next,
- struct page, lru);
- list_del(&page->lru);
- free_huge_pages--;
- free_huge_pages_node[nid]--;
- if (vma && vma->vm_flags & VM_MAYSHARE)
- resv_huge_pages--;
+ page = dequeue_huge_page_node(vma, nid);
break;
}
}
@@ -98,11 +106,11 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
return page;
}
-static void update_and_free_page(struct page *page)
+static void update_and_free_page(int nid, struct page *page)
{
int i;
nr_huge_pages--;
- nr_huge_pages_node[page_to_nid(page)]--;
+ nr_huge_pages_node[nid]--;
for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
@@ -124,7 +132,7 @@ static void free_huge_page(struct page *page)
spin_lock(&hugetlb_lock);
if (surplus_huge_pages_node[nid]) {
- update_and_free_page(page);
+ update_and_free_page(nid, page);
surplus_huge_pages--;
surplus_huge_pages_node[nid]--;
} else {
@@ -141,6 +149,18 @@ static void free_huge_page(struct page *page)
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
+static int adjust_pool_surplus_node(int delta, int nid)
+{
+ if (delta < 0 && !surplus_huge_pages_node[nid])
+ return 0;
+ if (delta > 0 && surplus_huge_pages_node[nid] >=
+ nr_huge_pages_node[nid])
+ return 0;
+ surplus_huge_pages += delta;
+ surplus_huge_pages_node[nid] += delta;
+ return 1;
+}
+
static int adjust_pool_surplus(int delta)
{
static int prev_nid;
@@ -152,19 +172,9 @@ static int adjust_pool_surplus(int delta)
nid = next_node(nid, node_online_map);
if (nid == MAX_NUMNODES)
nid = first_node(node_online_map);
-
- /* To shrink on this node, there must be a surplus page */
- if (delta < 0 && !surplus_huge_pages_node[nid])
- continue;
- /* Surplus cannot exceed the total number of pages */
- if (delta > 0 && surplus_huge_pages_node[nid] >=
- nr_huge_pages_node[nid])
- continue;
-
- surplus_huge_pages += delta;
- surplus_huge_pages_node[nid] += delta;
- ret = 1;
- break;
+ ret = adjust_pool_surplus_node(delta, nid);
+ if (ret == 1)
+ break;
} while (nid != prev_nid);
prev_nid = nid;
@@ -384,7 +394,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
- update_and_free_page(page);
+ update_and_free_page(nid, page);
free_huge_pages--;
free_huge_pages_node[nid]--;
surplus_huge_pages--;
@@ -489,25 +499,35 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
+static void try_to_free_low_node(unsigned long count, int nid)
+{
+ struct page *page, *next;
+ list_for_each_entry_safe(page, next, &hugepage_freelists[nid], lru) {
+ if (PageHighMem(page))
+ continue;
+ list_del(&page->lru);
+ update_and_free_page(nid, page);
+ free_huge_pages--;
+ free_huge_pages_node[nid]--;
+ if (count >= nr_huge_pages_node[nid])
+ return;
+ }
+}
+
static void try_to_free_low(unsigned long count)
{
int i;
for (i = 0; i < MAX_NUMNODES; ++i) {
- struct page *page, *next;
- list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
- if (count >= nr_huge_pages)
- return;
- if (PageHighMem(page))
- continue;
- list_del(&page->lru);
- update_and_free_page(page);
- free_huge_pages--;
- free_huge_pages_node[page_to_nid(page)]--;
- }
+ try_to_free_low_node(count, i);
+ if (count >= nr_huge_pages)
+ return;
}
}
#else
+static inline void try_to_free_low_node(unsigned long count, int nid)
+{
+}
static inline void try_to_free_low(unsigned long count)
{
}
@@ -572,7 +592,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
struct page *page = dequeue_huge_page(NULL, 0);
if (!page)
break;
- update_and_free_page(page);
+ update_and_free_page(page_to_nid(page), page);
}
while (count < persistent_huge_pages) {
if (!adjust_pool_surplus(1))
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute
2008-02-06 23:15 [PATCH 1/3] hugetlb: numafy several functions Nishanth Aravamudan
@ 2008-02-06 23:18 ` Nishanth Aravamudan
2008-02-06 23:19 ` [PATCH 3/3] hugetlb: interleave dequeing of huge pages Nishanth Aravamudan
` (2 more replies)
2008-02-07 18:35 ` [PATCH 1/3] hugetlb: numafy several functions Lee Schermerhorn
2008-02-08 16:37 ` Adam Litke
2 siblings, 3 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-06 23:18 UTC (permalink / raw)
To: wli; +Cc: agl, lee.schermerhorn, linux-mm, greg
Allow specifying the number of hugepages to allocate on a particular
node. Our current global sysctl will try its best to put hugepages
equally on each node, but htat may not always be desired. This allows
the admin to control the layout of hugepage allocation at a finer level
(while not breaking the existing interface). Add callbacks in the sysfs
node registration and unregistration functions into hugetlb to add the
nr_hugepages attribute, which is a no-op if !NUMA or !HUGETLB.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
---
Greg, do I need to add documentation for this sysfs attribute to
Documentation/ABI? I'm not sure if I should just add a file in testing/
for just this attribute or should defer and create documentation for all
of the /sys/devices/system/node information?
I was hoping to have the per-node nr_overcommit_hugepages done by this
point, however it's a bit trickier. Do folks (Lee?) think it would
actually be needed for their platforms? It adds quite a bit of locking
overhead to the dynamic pool path to keep the counters sane, which might
obviate the relatively high performance of the dynamic path.
diff --git a/drivers/base/node.c b/drivers/base/node.c
index e59861f..daf5b2b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -152,6 +152,7 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
+ hugetlb_register_node(node);
}
return error;
}
@@ -169,6 +170,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
+ hugetlb_unregister_node(node);
sysdev_unregister(&node->sysdev);
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7ca198b..c85796c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -6,7 +6,9 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
+#include <linux/node.h>
#include <linux/shm.h>
+#include <linux/sysdev.h>
#include <asm/tlbflush.h>
struct ctl_table;
@@ -26,6 +28,13 @@ void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned lon
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
+#ifdef CONFIG_NUMA
+int hugetlb_register_node(struct node *);
+void hugetlb_unregister_node(struct node *);
+#else
+#define hugetlb_register_node(node) do {} while(0)
+#define hugetlb_unregister_node(node) do {} while(0)
+#endif
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -114,6 +123,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define unmap_hugepage_range(vma, start, end) BUG()
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
+#define hugetlb_register_node(node) do {} while(0)
+#define hugetlb_unregister_node(node) do {} while(0)
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define prepare_hugepage_range(addr,len) (-EINVAL)
#define pmd_huge(x) 0
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d1f6c5a..05dac46 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -497,7 +497,6 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
return nr;
}
-#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low_node(unsigned long count, int nid)
{
@@ -513,7 +512,14 @@ static void try_to_free_low_node(unsigned long count, int nid)
return;
}
}
+#else
+static inline void try_to_free_low_node(unsigned long count, int nid)
+{
+}
+#endif
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
int i;
@@ -525,9 +531,6 @@ static void try_to_free_low(unsigned long count)
}
}
#else
-static inline void try_to_free_low_node(unsigned long count, int nid)
-{
-}
static inline void try_to_free_low(unsigned long count)
{
}
@@ -661,6 +664,117 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
nid, free_huge_pages_node[nid]);
}
+#ifdef CONFIG_NUMA
+static ssize_t hugetlb_read_nr_hugepages_node(struct sys_device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", nr_huge_pages_node[dev->id]);
+}
+
+#define persistent_huge_pages_node(nid) \
+ (nr_huge_pages_node[nid] - surplus_huge_pages_node[nid])
+static ssize_t hugetlb_write_nr_hugepages_node(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ int nid = dev->id;
+ unsigned long target;
+ unsigned long free_on_other_nodes;
+ unsigned long nr_huge_pages_req = simple_strtoul(buf, NULL, 10);
+ ssize_t ret;
+
+ /*
+ * Increase the pool size on the node
+ * First take pages out of surplus state. Then make up the
+ * remaining difference by allocating fresh huge pages.
+ *
+ * We might race with alloc_buddy_huge_page() here and be unable
+ * to convert a surplus huge page to a normal huge page. That is
+ * not critical, though, it just means the overall size of the
+ * pool might be one hugepage larger than it needs to be, but
+ * within all the constraints specified by the sysctls.
+ */
+ spin_lock(&hugetlb_lock);
+ while (surplus_huge_pages_node[nid] &&
+ nr_huge_pages_req > persistent_huge_pages_node(nid)) {
+ if (!adjust_pool_surplus_node(-1, nid))
+ break;
+ }
+
+ while (nr_huge_pages_req > persistent_huge_pages_node(nid)) {
+ struct page *ret;
+ /*
+ * If this allocation races such that we no longer need the
+ * page, free_huge_page will handle it by freeing the page
+ * and reducing the surplus.
+ */
+ spin_unlock(&hugetlb_lock);
+ ret = alloc_fresh_huge_page_node(nid);
+ spin_lock(&hugetlb_lock);
+ if (!ret)
+ goto out;
+
+ }
+
+ if (nr_huge_pages_req >= nr_huge_pages_node[nid])
+ goto out;
+
+ /*
+ * Decrease the pool size
+ * First return free pages to the buddy allocator (being careful
+ * to keep enough around to satisfy reservations). Then place
+ * pages into surplus state as needed so the pool will shrink
+ * to the desired size as pages become free.
+ *
+ * By placing pages into the surplus state independent of the
+ * overcommit value, we are allowing the surplus pool size to
+ * exceed overcommit. There are few sane options here. Since
+ * alloc_buddy_huge_page() is checking the global counter,
+ * though, we'll note that we're not allowed to exceed surplus
+ * and won't grow the pool anywhere else. Not until one of the
+ * sysctls are changed, or the surplus pages go out of use.
+ */
+ free_on_other_nodes = free_huge_pages - free_huge_pages_node[nid];
+ if (free_on_other_nodes >= resv_huge_pages) {
+ /* other nodes can satisfy reserve */
+ target = nr_huge_pages_req;
+ } else {
+ /* this node needs some free to satisfy reserve */
+ target = max((resv_huge_pages - free_on_other_nodes),
+ nr_huge_pages_req);
+ }
+ try_to_free_low_node(nid, target);
+ while (target < persistent_huge_pages_node(nid)) {
+ struct page *page = dequeue_huge_page_node(NULL, nid);
+ if (!page)
+ break;
+ update_and_free_page(nid, page);
+ }
+
+ while (target < persistent_huge_pages_node(nid)) {
+ if (!adjust_pool_surplus_node(1, nid))
+ break;
+ }
+out:
+ ret = persistent_huge_pages_node(nid);
+ spin_unlock(&hugetlb_lock);
+ return ret;
+}
+
+static SYSDEV_ATTR(nr_hugepages, S_IRUGO | S_IWUSR,
+ hugetlb_read_nr_hugepages_node,
+ hugetlb_write_nr_hugepages_node);
+
+int hugetlb_register_node(struct node *node)
+{
+ return sysdev_create_file(&node->sysdev, &attr_nr_hugepages);
+}
+
+void hugetlb_unregister_node(struct node *node)
+{
+ sysdev_remove_file(&node->sysdev, &attr_nr_hugepages);
+}
+#endif
+
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 3/3] hugetlb: interleave dequeing of huge pages
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
@ 2008-02-06 23:19 ` Nishanth Aravamudan
2008-02-06 23:23 ` [UPDATED][PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-07 0:03 ` [PATCH " Greg KH
2 siblings, 0 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-06 23:19 UTC (permalink / raw)
To: wli; +Cc: agl, lee.schermerhorn, linux-mm, greg
Currently, when shrinking the hugetlb pool, we free all of the pages on
node 0, then all the pages on node 1, etc. Instead, we interleave over
the nodes with memory. If some particularly node should be cleared
first, the per-node sysfs attribute can be used for finer-grained
control. This also helps with keeping the pool balanced as we change the
pool at run-time.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 05dac46..f7cd942 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -84,7 +84,38 @@ static struct page *dequeue_huge_page_node(struct vm_area_struct *vma,
return page;
}
-static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page(void)
+{
+ struct page *page = NULL;
+ int start_nid;
+ int next_nid;
+
+ start_nid = hugetlb_next_nid;
+
+ do {
+ if (!list_empty(&hugepage_freelists[hugetlb_next_nid]))
+ page = dequeue_huge_page_node(NULL, hugetlb_next_nid);
+ /*
+ * Use a helper variable to find the next node and then
+ * copy it back to hugetlb_next_nid afterwards:
+ * otherwise there's a window in which a racer might
+ * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+ * But we don't need to use a spin_lock here: it really
+ * doesn't matter if occasionally a racer chooses the
+ * same nid as we do. Move nid forward in the mask even
+ * if we just successfully allocated a hugepage so that
+ * the next caller gets hugepages on the next node.
+ */
+ next_nid = next_node(hugetlb_next_nid, node_online_map);
+ if (next_nid == MAX_NUMNODES)
+ next_nid = first_node(node_online_map);
+ hugetlb_next_nid = next_nid;
+ } while (!page && hugetlb_next_nid != start_nid);
+
+ return page;
+}
+
+static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
unsigned long address)
{
int nid;
@@ -411,7 +442,7 @@ static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
struct page *page;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
return page ? page : ERR_PTR(-VM_FAULT_OOM);
}
@@ -426,7 +457,7 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
spin_lock(&hugetlb_lock);
if (free_huge_pages > resv_huge_pages)
- page = dequeue_huge_page(vma, addr);
+ page = dequeue_huge_page_vma(vma, addr);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(vma, addr);
@@ -592,7 +623,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
min_count = max(count, min_count);
try_to_free_low(min_count);
while (min_count < persistent_huge_pages) {
- struct page *page = dequeue_huge_page(NULL, 0);
+ struct page *page = dequeue_huge_page();
if (!page)
break;
update_and_free_page(page_to_nid(page), page);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [UPDATED][PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-06 23:19 ` [PATCH 3/3] hugetlb: interleave dequeing of huge pages Nishanth Aravamudan
@ 2008-02-06 23:23 ` Nishanth Aravamudan
2008-02-07 0:03 ` [PATCH " Greg KH
2 siblings, 0 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-06 23:23 UTC (permalink / raw)
To: wli; +Cc: agl, lee.schermerhorn, linux-mm, greg
On 06.02.2008 [15:18:45 -0800], Nishanth Aravamudan wrote:
> hugetlb: add per-node nr_hugepages sysfs attribute
Sorry, a few checkpatch errors slipped through, fixed below.
hugetlb: add per-node nr_hugepages sysfs attribute
Allow specifying the number of hugepages to allocate on a particular
node. Our current global sysctl will try its best to put hugepages
equally on each node, but htat may not always be desired. This allows
the admin to control the layout of hugepage allocation at a finer level
(while not breaking the existing interface). Add callbacks in the sysfs
node registration and unregistration functions into hugetlb to add the
nr_hugepages attribute, which is a no-op if !NUMA or !HUGETLB.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
diff --git a/drivers/base/node.c b/drivers/base/node.c
index e59861f..daf5b2b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -152,6 +152,7 @@ int register_node(struct node *node, int num, struct node *parent)
sysdev_create_file(&node->sysdev, &attr_meminfo);
sysdev_create_file(&node->sysdev, &attr_numastat);
sysdev_create_file(&node->sysdev, &attr_distance);
+ hugetlb_register_node(node);
}
return error;
}
@@ -169,6 +170,7 @@ void unregister_node(struct node *node)
sysdev_remove_file(&node->sysdev, &attr_meminfo);
sysdev_remove_file(&node->sysdev, &attr_numastat);
sysdev_remove_file(&node->sysdev, &attr_distance);
+ hugetlb_unregister_node(node);
sysdev_unregister(&node->sysdev);
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 7ca198b..a4f7559 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -6,7 +6,9 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
+#include <linux/node.h>
#include <linux/shm.h>
+#include <linux/sysdev.h>
#include <asm/tlbflush.h>
struct ctl_table;
@@ -26,6 +28,13 @@ void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned lon
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
+#ifdef CONFIG_NUMA
+int hugetlb_register_node(struct node *);
+void hugetlb_unregister_node(struct node *);
+#else
+#define hugetlb_register_node(node) do {} while (0)
+#define hugetlb_unregister_node(node) do {} while (0)
+#endif
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -114,6 +123,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define unmap_hugepage_range(vma, start, end) BUG()
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
+#define hugetlb_register_node(node) do {} while (0)
+#define hugetlb_unregister_node(node) do {} while (0)
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define prepare_hugepage_range(addr,len) (-EINVAL)
#define pmd_huge(x) 0
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d1f6c5a..05dac46 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -497,7 +497,6 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
return nr;
}
-#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low_node(unsigned long count, int nid)
{
@@ -513,7 +512,14 @@ static void try_to_free_low_node(unsigned long count, int nid)
return;
}
}
+#else
+static inline void try_to_free_low_node(unsigned long count, int nid)
+{
+}
+#endif
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
int i;
@@ -525,9 +531,6 @@ static void try_to_free_low(unsigned long count)
}
}
#else
-static inline void try_to_free_low_node(unsigned long count, int nid)
-{
-}
static inline void try_to_free_low(unsigned long count)
{
}
@@ -661,6 +664,117 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
nid, free_huge_pages_node[nid]);
}
+#ifdef CONFIG_NUMA
+static ssize_t hugetlb_read_nr_hugepages_node(struct sys_device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", nr_huge_pages_node[dev->id]);
+}
+
+#define persistent_huge_pages_node(nid) \
+ (nr_huge_pages_node[nid] - surplus_huge_pages_node[nid])
+static ssize_t hugetlb_write_nr_hugepages_node(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ int nid = dev->id;
+ unsigned long target;
+ unsigned long free_on_other_nodes;
+ unsigned long nr_huge_pages_req = simple_strtoul(buf, NULL, 10);
+ ssize_t ret;
+
+ /*
+ * Increase the pool size on the node
+ * First take pages out of surplus state. Then make up the
+ * remaining difference by allocating fresh huge pages.
+ *
+ * We might race with alloc_buddy_huge_page() here and be unable
+ * to convert a surplus huge page to a normal huge page. That is
+ * not critical, though, it just means the overall size of the
+ * pool might be one hugepage larger than it needs to be, but
+ * within all the constraints specified by the sysctls.
+ */
+ spin_lock(&hugetlb_lock);
+ while (surplus_huge_pages_node[nid] &&
+ nr_huge_pages_req > persistent_huge_pages_node(nid)) {
+ if (!adjust_pool_surplus_node(-1, nid))
+ break;
+ }
+
+ while (nr_huge_pages_req > persistent_huge_pages_node(nid)) {
+ struct page *ret;
+ /*
+ * If this allocation races such that we no longer need the
+ * page, free_huge_page will handle it by freeing the page
+ * and reducing the surplus.
+ */
+ spin_unlock(&hugetlb_lock);
+ ret = alloc_fresh_huge_page_node(nid);
+ spin_lock(&hugetlb_lock);
+ if (!ret)
+ goto out;
+
+ }
+
+ if (nr_huge_pages_req >= nr_huge_pages_node[nid])
+ goto out;
+
+ /*
+ * Decrease the pool size
+ * First return free pages to the buddy allocator (being careful
+ * to keep enough around to satisfy reservations). Then place
+ * pages into surplus state as needed so the pool will shrink
+ * to the desired size as pages become free.
+ *
+ * By placing pages into the surplus state independent of the
+ * overcommit value, we are allowing the surplus pool size to
+ * exceed overcommit. There are few sane options here. Since
+ * alloc_buddy_huge_page() is checking the global counter,
+ * though, we'll note that we're not allowed to exceed surplus
+ * and won't grow the pool anywhere else. Not until one of the
+ * sysctls are changed, or the surplus pages go out of use.
+ */
+ free_on_other_nodes = free_huge_pages - free_huge_pages_node[nid];
+ if (free_on_other_nodes >= resv_huge_pages) {
+ /* other nodes can satisfy reserve */
+ target = nr_huge_pages_req;
+ } else {
+ /* this node needs some free to satisfy reserve */
+ target = max((resv_huge_pages - free_on_other_nodes),
+ nr_huge_pages_req);
+ }
+ try_to_free_low_node(nid, target);
+ while (target < persistent_huge_pages_node(nid)) {
+ struct page *page = dequeue_huge_page_node(NULL, nid);
+ if (!page)
+ break;
+ update_and_free_page(nid, page);
+ }
+
+ while (target < persistent_huge_pages_node(nid)) {
+ if (!adjust_pool_surplus_node(1, nid))
+ break;
+ }
+out:
+ ret = persistent_huge_pages_node(nid);
+ spin_unlock(&hugetlb_lock);
+ return ret;
+}
+
+static SYSDEV_ATTR(nr_hugepages, S_IRUGO | S_IWUSR,
+ hugetlb_read_nr_hugepages_node,
+ hugetlb_write_nr_hugepages_node);
+
+int hugetlb_register_node(struct node *node)
+{
+ return sysdev_create_file(&node->sysdev, &attr_nr_hugepages);
+}
+
+void hugetlb_unregister_node(struct node *node)
+{
+ sysdev_remove_file(&node->sysdev, &attr_nr_hugepages);
+}
+#endif
+
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-06 23:19 ` [PATCH 3/3] hugetlb: interleave dequeing of huge pages Nishanth Aravamudan
2008-02-06 23:23 ` [UPDATED][PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
@ 2008-02-07 0:03 ` Greg KH
2008-02-07 0:59 ` Nishanth Aravamudan
2 siblings, 1 reply; 12+ messages in thread
From: Greg KH @ 2008-02-07 0:03 UTC (permalink / raw)
To: Nishanth Aravamudan; +Cc: wli, agl, lee.schermerhorn, linux-mm
On Wed, Feb 06, 2008 at 03:18:45PM -0800, Nishanth Aravamudan wrote:
> hugetlb: add per-node nr_hugepages sysfs attribute
>
> Allow specifying the number of hugepages to allocate on a particular
> node. Our current global sysctl will try its best to put hugepages
> equally on each node, but htat may not always be desired. This allows
> the admin to control the layout of hugepage allocation at a finer level
> (while not breaking the existing interface). Add callbacks in the sysfs
> node registration and unregistration functions into hugetlb to add the
> nr_hugepages attribute, which is a no-op if !NUMA or !HUGETLB.
>
> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
>
> ---
> Greg, do I need to add documentation for this sysfs attribute to
> Documentation/ABI?
Yes, please.
> I'm not sure if I should just add a file in testing/ for just this
> attribute or should defer and create documentation for all of the
> /sys/devices/system/node information?
How about both for this one, and the existing ones? That would be best.
thanks,
greg k-h
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute
2008-02-07 0:03 ` [PATCH " Greg KH
@ 2008-02-07 0:59 ` Nishanth Aravamudan
0 siblings, 0 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-07 0:59 UTC (permalink / raw)
To: Greg KH; +Cc: wli, agl, lee.schermerhorn, linux-mm
On 06.02.2008 [16:03:28 -0800], Greg KH wrote:
> On Wed, Feb 06, 2008 at 03:18:45PM -0800, Nishanth Aravamudan wrote:
> > hugetlb: add per-node nr_hugepages sysfs attribute
> >
> > Allow specifying the number of hugepages to allocate on a particular
> > node. Our current global sysctl will try its best to put hugepages
> > equally on each node, but htat may not always be desired. This allows
> > the admin to control the layout of hugepage allocation at a finer level
> > (while not breaking the existing interface). Add callbacks in the sysfs
> > node registration and unregistration functions into hugetlb to add the
> > nr_hugepages attribute, which is a no-op if !NUMA or !HUGETLB.
> >
> > Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
> >
> > ---
> > Greg, do I need to add documentation for this sysfs attribute to
> > Documentation/ABI?
>
> Yes, please.
Ok, thanks. I'll submit a follow-on patch.
> > I'm not sure if I should just add a file in testing/ for just this
> > attribute or should defer and create documentation for all of the
> > /sys/devices/system/node information?
>
> How about both for this one, and the existing ones? That would be
> best.
Sorry, that's what I meant (documenting existing interface and adding
this to that documentation). I'll probably do it in a separate series.
Thanks,
Nish
--
Nishanth Aravamudan <nacc@us.ibm.com>
IBM Linux Technology Center
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-06 23:15 [PATCH 1/3] hugetlb: numafy several functions Nishanth Aravamudan
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
@ 2008-02-07 18:35 ` Lee Schermerhorn
2008-02-07 18:52 ` Nishanth Aravamudan
2008-02-08 16:37 ` Adam Litke
2 siblings, 1 reply; 12+ messages in thread
From: Lee Schermerhorn @ 2008-02-07 18:35 UTC (permalink / raw)
To: Nishanth Aravamudan; +Cc: wli, agl, linux-mm, Mel Gorman, Andrew Morton
On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> hugetlb: numafy several functions
>
<snip>
Nish: glad to see these surface again. I'll add them [back] into my
tree for testing. I'm at 24-mm1. Can't tell from the messages what
release they're against, but I'll sort that out.
Another thing: I've tended to test these atop Mel Gorman's zonelist
rework and a set of mempolicy cleanups that I'm holding pending
acceptance of Mel's patches. I'll probably do that with these. At some
point we need to sort out with Andrew when or whether Mel's patches will
hit -mm. If so, what order vs yours...
Thanks,
Lee
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-07 18:35 ` [PATCH 1/3] hugetlb: numafy several functions Lee Schermerhorn
@ 2008-02-07 18:52 ` Nishanth Aravamudan
2008-02-08 16:47 ` Lee Schermerhorn
0 siblings, 1 reply; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-07 18:52 UTC (permalink / raw)
To: Lee Schermerhorn; +Cc: wli, agl, linux-mm, Mel Gorman, Andrew Morton
On 07.02.2008 [13:35:15 -0500], Lee Schermerhorn wrote:
> On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> > hugetlb: numafy several functions
> >
>
> <snip>
>
> Nish: glad to see these surface again. I'll add them [back] into my
> tree for testing. I'm at 24-mm1. Can't tell from the messages what
> release they're against, but I'll sort that out.
They were against -git tip when I rebased ... hrm that would be
551e4fb2465b87de9d4aa1669b27d624435443bb, I believe.
> Another thing: I've tended to test these atop Mel Gorman's zonelist
> rework and a set of mempolicy cleanups that I'm holding pending
> acceptance of Mel's patches. I'll probably do that with these. At
> some point we need to sort out with Andrew when or whether Mel's
> patches will hit -mm. If so, what order vs yours...
I think Mel's patches may be more generally useful than mine (as mine
are all keyed on hugepage support). So I would like to see his go first
then I can rework mine on top, if that is the order that it ends up
happening in.
Thanks,
Nish
--
Nishanth Aravamudan <nacc@us.ibm.com>
IBM Linux Technology Center
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-06 23:15 [PATCH 1/3] hugetlb: numafy several functions Nishanth Aravamudan
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-07 18:35 ` [PATCH 1/3] hugetlb: numafy several functions Lee Schermerhorn
@ 2008-02-08 16:37 ` Adam Litke
2008-02-08 16:48 ` Nishanth Aravamudan
2 siblings, 1 reply; 12+ messages in thread
From: Adam Litke @ 2008-02-08 16:37 UTC (permalink / raw)
To: Nishanth Aravamudan; +Cc: wli, lee.schermerhorn, linux-mm
On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> @@ -141,6 +149,18 @@ static void free_huge_page(struct page *page)
> * balanced by operating on them in a round-robin fashion.
> * Returns 1 if an adjustment was made.
> */
> +static int adjust_pool_surplus_node(int delta, int nid)
> +{
> + if (delta < 0 && !surplus_huge_pages_node[nid])
> + return 0;
> + if (delta > 0 && surplus_huge_pages_node[nid] >=
> + nr_huge_pages_node[nid])
> + return 0;
> + surplus_huge_pages += delta;
> + surplus_huge_pages_node[nid] += delta;
> + return 1;
> +}
> +
> static int adjust_pool_surplus(int delta)
> {
> static int prev_nid;
> @@ -152,19 +172,9 @@ static int adjust_pool_surplus(int delta)
> nid = next_node(nid, node_online_map);
> if (nid == MAX_NUMNODES)
> nid = first_node(node_online_map);
> -
> - /* To shrink on this node, there must be a surplus page */
> - if (delta < 0 && !surplus_huge_pages_node[nid])
> - continue;
> - /* Surplus cannot exceed the total number of pages */
> - if (delta > 0 && surplus_huge_pages_node[nid] >=
> - nr_huge_pages_node[nid])
> - continue;
Unless I am misreading the diff, it seems the above comments were lost
in translation. I vote for preserving them :) Otherwise this looks
pretty good to me.
--
Adam Litke - (agl at us.ibm.com)
IBM Linux Technology Center
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-07 18:52 ` Nishanth Aravamudan
@ 2008-02-08 16:47 ` Lee Schermerhorn
2008-02-08 17:08 ` Nishanth Aravamudan
0 siblings, 1 reply; 12+ messages in thread
From: Lee Schermerhorn @ 2008-02-08 16:47 UTC (permalink / raw)
To: Nishanth Aravamudan; +Cc: wli, agl, linux-mm, Mel Gorman, Andrew Morton
On Thu, 2008-02-07 at 10:52 -0800, Nishanth Aravamudan wrote:
> On 07.02.2008 [13:35:15 -0500], Lee Schermerhorn wrote:
> > On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> > > hugetlb: numafy several functions
> > >
> >
> > <snip>
> >
> > Nish: glad to see these surface again. I'll add them [back] into my
> > tree for testing. I'm at 24-mm1. Can't tell from the messages what
> > release they're against, but I'll sort that out.
>
> They were against -git tip when I rebased ... hrm that would be
> 551e4fb2465b87de9d4aa1669b27d624435443bb, I believe.
>
> > Another thing: I've tended to test these atop Mel Gorman's zonelist
> > rework and a set of mempolicy cleanups that I'm holding pending
> > acceptance of Mel's patches. I'll probably do that with these. At
> > some point we need to sort out with Andrew when or whether Mel's
> > patches will hit -mm. If so, what order vs yours...
>
> I think Mel's patches may be more generally useful than mine (as mine
> are all keyed on hugepage support). So I would like to see his go first
> then I can rework mine on top, if that is the order that it ends up
> happening in.
Nish:
Heads up: Your "smarter retry" patch will need some rework in vmscan.c
because of the memory controller changes [currently in 24-mm1]. I have
rebased you patches against 24-mm1, atop Mel's two-zonelist patches and
my mempolicy cleanup series. I can send you the entire stack or place
the tarball on a web site, if you're interested.
Lee
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-08 16:37 ` Adam Litke
@ 2008-02-08 16:48 ` Nishanth Aravamudan
0 siblings, 0 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-08 16:48 UTC (permalink / raw)
To: Adam Litke; +Cc: wli, lee.schermerhorn, linux-mm
On 08.02.2008 [10:37:24 -0600], Adam Litke wrote:
> On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> > @@ -141,6 +149,18 @@ static void free_huge_page(struct page *page)
> > * balanced by operating on them in a round-robin fashion.
> > * Returns 1 if an adjustment was made.
> > */
> > +static int adjust_pool_surplus_node(int delta, int nid)
> > +{
> > + if (delta < 0 && !surplus_huge_pages_node[nid])
> > + return 0;
> > + if (delta > 0 && surplus_huge_pages_node[nid] >=
> > + nr_huge_pages_node[nid])
> > + return 0;
> > + surplus_huge_pages += delta;
> > + surplus_huge_pages_node[nid] += delta;
> > + return 1;
> > +}
> > +
> > static int adjust_pool_surplus(int delta)
> > {
> > static int prev_nid;
> > @@ -152,19 +172,9 @@ static int adjust_pool_surplus(int delta)
> > nid = next_node(nid, node_online_map);
> > if (nid == MAX_NUMNODES)
> > nid = first_node(node_online_map);
> > -
> > - /* To shrink on this node, there must be a surplus page */
> > - if (delta < 0 && !surplus_huge_pages_node[nid])
> > - continue;
> > - /* Surplus cannot exceed the total number of pages */
> > - if (delta > 0 && surplus_huge_pages_node[nid] >=
> > - nr_huge_pages_node[nid])
> > - continue;
>
> Unless I am misreading the diff, it seems the above comments were lost
> in translation. I vote for preserving them :) Otherwise this looks
> pretty good to me.
Oops, sorry about that, Adam. I'll update this in my next posting,
pending any other comments/testing feedback.
Thanks,
Nish
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/3] hugetlb: numafy several functions
2008-02-08 16:47 ` Lee Schermerhorn
@ 2008-02-08 17:08 ` Nishanth Aravamudan
0 siblings, 0 replies; 12+ messages in thread
From: Nishanth Aravamudan @ 2008-02-08 17:08 UTC (permalink / raw)
To: Lee Schermerhorn; +Cc: wli, agl, linux-mm, Mel Gorman, Andrew Morton
On 08.02.2008 [11:47:17 -0500], Lee Schermerhorn wrote:
> On Thu, 2008-02-07 at 10:52 -0800, Nishanth Aravamudan wrote:
> > On 07.02.2008 [13:35:15 -0500], Lee Schermerhorn wrote:
> > > On Wed, 2008-02-06 at 15:15 -0800, Nishanth Aravamudan wrote:
> > > > hugetlb: numafy several functions
> > > >
> > >
> > > <snip>
> > >
> > > Nish: glad to see these surface again. I'll add them [back] into my
> > > tree for testing. I'm at 24-mm1. Can't tell from the messages what
> > > release they're against, but I'll sort that out.
> >
> > They were against -git tip when I rebased ... hrm that would be
> > 551e4fb2465b87de9d4aa1669b27d624435443bb, I believe.
> >
> > > Another thing: I've tended to test these atop Mel Gorman's zonelist
> > > rework and a set of mempolicy cleanups that I'm holding pending
> > > acceptance of Mel's patches. I'll probably do that with these. At
> > > some point we need to sort out with Andrew when or whether Mel's
> > > patches will hit -mm. If so, what order vs yours...
> >
> > I think Mel's patches may be more generally useful than mine (as mine
> > are all keyed on hugepage support). So I would like to see his go first
> > then I can rework mine on top, if that is the order that it ends up
> > happening in.
>
> Nish:
>
> Heads up: Your "smarter retry" patch will need some rework in vmscan.c
> because of the memory controller changes [currently in 24-mm1]. I have
> rebased you patches against 24-mm1, atop Mel's two-zonelist patches and
> my mempolicy cleanup series. I can send you the entire stack or place
> the tarball on a web site, if you're interested.
I can refresh the patchset for -mm, I was just posting based upon
mainline as that was traditionally how I sent patches to Andrew. But in
this case you're right, there are enough collisions with existing -mm
bits that I should rebase.
Thanks for the heads up.
-Nish
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2008-02-08 17:08 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-02-06 23:15 [PATCH 1/3] hugetlb: numafy several functions Nishanth Aravamudan
2008-02-06 23:18 ` [PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-06 23:19 ` [PATCH 3/3] hugetlb: interleave dequeing of huge pages Nishanth Aravamudan
2008-02-06 23:23 ` [UPDATED][PATCH 2/3] hugetlb: add per-node nr_hugepages sysfs attribute Nishanth Aravamudan
2008-02-07 0:03 ` [PATCH " Greg KH
2008-02-07 0:59 ` Nishanth Aravamudan
2008-02-07 18:35 ` [PATCH 1/3] hugetlb: numafy several functions Lee Schermerhorn
2008-02-07 18:52 ` Nishanth Aravamudan
2008-02-08 16:47 ` Lee Schermerhorn
2008-02-08 17:08 ` Nishanth Aravamudan
2008-02-08 16:37 ` Adam Litke
2008-02-08 16:48 ` Nishanth Aravamudan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox