* [patch] first bit of vm balancing fixes for 2.3.52-1
@ 2000-03-13 22:50 Ben LaHaise
2000-03-13 22:55 ` Linus Torvalds
2000-03-17 12:35 ` Stephen C. Tweedie
0 siblings, 2 replies; 10+ messages in thread
From: Ben LaHaise @ 2000-03-13 22:50 UTC (permalink / raw)
To: linux-mm; +Cc: torvalds
This is the first little bit of a few vm balancing patches I've been
working on. It does two main things: moves the lru_cache list into the
per-zone structure, and slightly reworks the kswapd wakeup logic so the
zone_wake_kswapd flag is cleared in free_pages_ok. Moving the lru_cache
list into the zone structure means we can make much better progress when
trying to free a specific type of memory. Moving the clearing of the
zone_wake_kswapd flag into the free_pages routine stops kswapd from
continuing to swap out ad nausium: my box will discard the entire page
cache when it hits low memory when doing a simple sequential read. With
this patch in place it hovers around 3MB free as it should.
-ben
diff -ur 2.3.52-1/include/linux/mmzone.h linux/include/linux/mmzone.h
--- 2.3.52-1/include/linux/mmzone.h Mon Mar 13 15:16:25 2000
+++ linux/include/linux/mmzone.h Mon Mar 13 16:08:21 2000
@@ -15,8 +15,8 @@
#define MAX_ORDER 10
typedef struct free_area_struct {
- struct list_head free_list;
- unsigned int * map;
+ struct list_head free_list;
+ unsigned int *map;
} free_area_t;
struct pglist_data;
@@ -25,30 +25,31 @@
/*
* Commonly accessed fields:
*/
- spinlock_t lock;
- unsigned long offset;
- unsigned long free_pages;
- char low_on_memory;
- char zone_wake_kswapd;
- unsigned long pages_min, pages_low, pages_high;
+ spinlock_t lock;
+ unsigned long offset;
+ unsigned long free_pages;
+ char low_on_memory;
+ char zone_wake_kswapd;
+ unsigned long pages_min, pages_low, pages_high;
+ struct list_head lru_cache;
/*
* free areas of different sizes
*/
- free_area_t free_area[MAX_ORDER];
+ free_area_t free_area[MAX_ORDER];
/*
* rarely used fields:
*/
- char * name;
- unsigned long size;
+ char *name;
+ unsigned long size;
/*
* Discontig memory support fields.
*/
- struct pglist_data *zone_pgdat;
- unsigned long zone_start_paddr;
- unsigned long zone_start_mapnr;
- struct page * zone_mem_map;
+ struct pglist_data *zone_pgdat;
+ unsigned long zone_start_paddr;
+ unsigned long zone_start_mapnr;
+ struct page *zone_mem_map;
} zone_t;
#define ZONE_DMA 0
diff -ur 2.3.52-1/include/linux/swap.h linux/include/linux/swap.h
--- 2.3.52-1/include/linux/swap.h Mon Mar 13 15:16:26 2000
+++ linux/include/linux/swap.h Mon Mar 13 16:38:31 2000
@@ -67,7 +67,6 @@
FASTCALL(unsigned int nr_free_buffer_pages(void));
FASTCALL(unsigned int nr_free_highpages(void));
extern int nr_lru_pages;
-extern struct list_head lru_cache;
extern atomic_t nr_async_pages;
extern struct address_space swapper_space;
extern atomic_t page_cache_size;
@@ -167,7 +166,7 @@
#define lru_cache_add(page) \
do { \
spin_lock(&pagemap_lru_lock); \
- list_add(&(page)->lru, &lru_cache); \
+ list_add(&(page)->lru, &page->zone->lru_cache); \
nr_lru_pages++; \
spin_unlock(&pagemap_lru_lock); \
} while (0)
diff -ur 2.3.52-1/mm/filemap.c linux/mm/filemap.c
--- 2.3.52-1/mm/filemap.c Sun Mar 12 18:03:02 2000
+++ linux/mm/filemap.c Mon Mar 13 16:40:04 2000
@@ -220,15 +220,18 @@
struct list_head * page_lru, * dispose;
struct page * page;
+ if (!zone)
+ BUG();
+
count = nr_lru_pages / (priority+1);
spin_lock(&pagemap_lru_lock);
- while (count > 0 && (page_lru = lru_cache.prev) != &lru_cache) {
+ while (count > 0 && (page_lru = zone->lru_cache.prev) != &zone->lru_cache) {
page = list_entry(page_lru, struct page, lru);
list_del(page_lru);
- dispose = &lru_cache;
+ dispose = &zone->lru_cache;
if (test_and_clear_bit(PG_referenced, &page->flags))
/* Roll the page at the top of the lru list,
* we could also be more aggressive putting
@@ -355,8 +358,8 @@
nr_lru_pages--;
out:
- list_splice(&young, &lru_cache);
- list_splice(&old, lru_cache.prev);
+ list_splice(&young, &zone->lru_cache);
+ list_splice(&old, zone->lru_cache.prev);
spin_unlock(&pagemap_lru_lock);
diff -ur 2.3.52-1/mm/page_alloc.c linux/mm/page_alloc.c
--- 2.3.52-1/mm/page_alloc.c Fri Mar 10 16:11:22 2000
+++ linux/mm/page_alloc.c Mon Mar 13 17:17:53 2000
@@ -26,7 +26,6 @@
int nr_swap_pages = 0;
int nr_lru_pages;
-LIST_HEAD(lru_cache);
pg_data_t *pgdat_list = (pg_data_t *)0;
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -59,6 +58,19 @@
*/
#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->offset) || (((x)-mem_map) >= (zone)->offset+(zone)->size))
+static inline unsigned long classfree(zone_t *zone)
+{
+ unsigned long free = 0;
+ zone_t *z = zone->zone_pgdat->node_zones;
+
+ while (z != zone) {
+ free += z->free_pages;
+ z++;
+ }
+ free += zone->free_pages;
+ return(free);
+}
+
/*
* Buddy system. Hairy. You really aren't expected to understand this
*
@@ -135,6 +147,9 @@
memlist_add_head(&(base + page_idx)->list, &area->free_list);
spin_unlock_irqrestore(&zone->lock, flags);
+
+ if (classfree(zone) > zone->pages_high)
+ zone->zone_wake_kswapd = 0;
}
#define MARK_USED(index, order, area) \
@@ -201,19 +216,6 @@
return NULL;
}
-static inline unsigned long classfree(zone_t *zone)
-{
- unsigned long free = 0;
- zone_t *z = zone->zone_pgdat->node_zones;
-
- while (z != zone) {
- free += z->free_pages;
- z++;
- }
- free += zone->free_pages;
- return(free);
-}
-
static inline int zone_balance_memory (zone_t *zone, int gfp_mask)
{
int freed;
@@ -263,21 +265,12 @@
{
unsigned long free = classfree(z);
- if (free > z->pages_high)
- {
- if (z->low_on_memory)
- z->low_on_memory = 0;
- z->zone_wake_kswapd = 0;
- }
- else
+ if (free <= z->pages_high)
{
extern wait_queue_head_t kswapd_wait;
- if (free <= z->pages_low) {
- z->zone_wake_kswapd = 1;
- wake_up_interruptible(&kswapd_wait);
- } else
- z->zone_wake_kswapd = 0;
+ z->zone_wake_kswapd = 1;
+ wake_up_interruptible(&kswapd_wait);
if (free <= z->pages_min)
z->low_on_memory = 1;
@@ -585,6 +578,7 @@
unsigned long bitmap_size;
memlist_init(&zone->free_area[i].free_list);
+ memlist_init(&zone->lru_cache);
mask += mask;
size = (size + ~mask) & mask;
bitmap_size = size >> i;
diff -ur 2.3.52-1/mm/vmscan.c linux/mm/vmscan.c
--- 2.3.52-1/mm/vmscan.c Mon Feb 28 10:44:22 2000
+++ linux/mm/vmscan.c Mon Mar 13 17:07:23 2000
@@ -504,8 +504,7 @@
while (pgdat) {
for (i = 0; i < MAX_NR_ZONES; i++) {
zone = pgdat->node_zones + i;
- if ((!zone->size) ||
- (!zone->zone_wake_kswapd))
+ if ((!zone->size) || (!zone->zone_wake_kswapd))
continue;
do_try_to_free_pages(GFP_KSWAPD, zone);
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-13 22:50 [patch] first bit of vm balancing fixes for 2.3.52-1 Ben LaHaise
@ 2000-03-13 22:55 ` Linus Torvalds
2000-03-13 23:28 ` Kanoj Sarcar
2000-03-17 12:35 ` Stephen C. Tweedie
1 sibling, 1 reply; 10+ messages in thread
From: Linus Torvalds @ 2000-03-13 22:55 UTC (permalink / raw)
To: Ben LaHaise; +Cc: linux-mm
On Mon, 13 Mar 2000, Ben LaHaise wrote:
>
> This is the first little bit of a few vm balancing patches I've been
> working on. It does two main things: moves the lru_cache list into the
> per-zone structure, and slightly reworks the kswapd wakeup logic so the
> zone_wake_kswapd flag is cleared in free_pages_ok. Moving the lru_cache
> list into the zone structure means we can make much better progress when
> trying to free a specific type of memory. Moving the clearing of the
> zone_wake_kswapd flag into the free_pages routine stops kswapd from
> continuing to swap out ad nausium: my box will discard the entire page
> cache when it hits low memory when doing a simple sequential read. With
> this patch in place it hovers around 3MB free as it should.
Looks sane to me.
Linus
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-13 22:55 ` Linus Torvalds
@ 2000-03-13 23:28 ` Kanoj Sarcar
2000-03-13 23:31 ` Linus Torvalds
0 siblings, 1 reply; 10+ messages in thread
From: Kanoj Sarcar @ 2000-03-13 23:28 UTC (permalink / raw)
To: Linus Torvalds; +Cc: Ben LaHaise, linux-mm
>
>
> On Mon, 13 Mar 2000, Ben LaHaise wrote:
> >
> > This is the first little bit of a few vm balancing patches I've been
> > working on. It does two main things: moves the lru_cache list into the
> > per-zone structure, and slightly reworks the kswapd wakeup logic so the
> > zone_wake_kswapd flag is cleared in free_pages_ok. Moving the lru_cache
> > list into the zone structure means we can make much better progress when
> > trying to free a specific type of memory. Moving the clearing of the
> > zone_wake_kswapd flag into the free_pages routine stops kswapd from
> > continuing to swap out ad nausium: my box will discard the entire page
> > cache when it hits low memory when doing a simple sequential read. With
> > this patch in place it hovers around 3MB free as it should.
>
> Looks sane to me.
>
> Linus
I am not sure about the zone lru_cache, since any claims without extensive
performance testing is meaningless ... but it does look more cleaner
theoretically.
About the zone_wake_kswapd clearing in free_pages, yes, it is the right
thing to do ... looking at 2.2, a similar thing was done (ie, nr_free_pages
was updated, which was a signal to the balancing code). Unfortunately,
this leads to a classfree() call in both pagefree and pagealloc, but I guess
thats the cost of fixing the memory class bugs present in 2.2.
I do have a problem though with the way the zone_wake_kswapd flag is
otherwise being manipulated in the patch. The rules by which low_on_memory,
zone_wake_kswapd and kswapd poking is done is in Documentation/vm/balance.
I think free_pages_ok also needs to clear the low_on_memory (its never
being cleared in your code). I think the code in the (free <= z->pages_high)
in __alloc_pages() should stay the way it is, unless you can come up for
a logic for changing it.
Kanoj
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-13 23:28 ` Kanoj Sarcar
@ 2000-03-13 23:31 ` Linus Torvalds
2000-03-14 0:23 ` Kanoj Sarcar
0 siblings, 1 reply; 10+ messages in thread
From: Linus Torvalds @ 2000-03-13 23:31 UTC (permalink / raw)
To: Kanoj Sarcar; +Cc: Ben LaHaise, linux-mm
On Mon, 13 Mar 2000, Kanoj Sarcar wrote:
>
> I am not sure about the zone lru_cache, since any claims without extensive
> performance testing is meaningless ... but it does look more cleaner
> theoretically.
It's certainly "different", and LRU list itself will obviously not be as
"least recently used" as a global LRU. However, considering that we're
only using the LRU on a per-zone basis anyway, I think it should give the
same basic behaviour, no?
And it definitely looked cleaner ;)
Linus
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-13 23:31 ` Linus Torvalds
@ 2000-03-14 0:23 ` Kanoj Sarcar
2000-03-14 0:32 ` Linus Torvalds
0 siblings, 1 reply; 10+ messages in thread
From: Kanoj Sarcar @ 2000-03-14 0:23 UTC (permalink / raw)
To: Linus Torvalds; +Cc: Ben LaHaise, linux-mm
>
>
> On Mon, 13 Mar 2000, Kanoj Sarcar wrote:
> >
> > I am not sure about the zone lru_cache, since any claims without extensive
> > performance testing is meaningless ... but it does look more cleaner
> > theoretically.
>
> It's certainly "different", and LRU list itself will obviously not be as
> "least recently used" as a global LRU. However, considering that we're
> only using the LRU on a per-zone basis anyway, I think it should give the
> same basic behaviour, no?
Hmm, true ... I would like Ben to look over the low_on_memory and
zone_wake_kswapd setting/clearing part though.
Kanoj
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-14 0:23 ` Kanoj Sarcar
@ 2000-03-14 0:32 ` Linus Torvalds
0 siblings, 0 replies; 10+ messages in thread
From: Linus Torvalds @ 2000-03-14 0:32 UTC (permalink / raw)
To: Kanoj Sarcar; +Cc: Ben LaHaise, linux-mm
On Mon, 13 Mar 2000, Kanoj Sarcar wrote:
> > It's certainly "different", and LRU list itself will obviously not be as
> > "least recently used" as a global LRU. However, considering that we're
> > only using the LRU on a per-zone basis anyway, I think it should give the
> > same basic behaviour, no?
>
> Hmm, true ... I would like Ben to look over the low_on_memory and
> zone_wake_kswapd setting/clearing part though.
The more the merrier. Before new releases we've always had a "tuning"
period with tons of sometimes arbitrary patches going in to make
performance smooth on different classes of machines (16MB - 4GB is quite
the range to try to cover ;).
Linus
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-13 22:50 [patch] first bit of vm balancing fixes for 2.3.52-1 Ben LaHaise
2000-03-13 22:55 ` Linus Torvalds
@ 2000-03-17 12:35 ` Stephen C. Tweedie
2000-03-20 20:58 ` Kanoj Sarcar
1 sibling, 1 reply; 10+ messages in thread
From: Stephen C. Tweedie @ 2000-03-17 12:35 UTC (permalink / raw)
To: Ben LaHaise; +Cc: linux-mm, torvalds, Stephen Tweedie
Hi,
On Mon, 13 Mar 2000 17:50:50 -0500 (EST), Ben LaHaise <bcrl@redhat.com>
said:
> This is the first little bit of a few vm balancing patches I've been
> working on.
Just out of interest, is anyone working on fixing the zone balancing?
The current behaviour is highly suboptimal: if you have two zones to
pick from for a given alloc_page(), and the first zone is at its
pages_min threshold, then we will always allocate from that first zone
and push it into kswap activation no matter how much free space there is
in the next zone.
The net effect of this is that we may not _ever_ end up using the next
zone for allocations if the request trickle in slowly enough; and that
either way, the memory use between the two zones is unbalanced. On an
8GB box it may be reasonable to keep the lomem zone for non-himem
allocations, but on 2GB we probably want to allocate page cache and user
pages as fairly as possible above and below 1GB.
--Stephen
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-17 12:35 ` Stephen C. Tweedie
@ 2000-03-20 20:58 ` Kanoj Sarcar
2000-03-20 21:31 ` Linus Torvalds
0 siblings, 1 reply; 10+ messages in thread
From: Kanoj Sarcar @ 2000-03-20 20:58 UTC (permalink / raw)
To: Stephen C. Tweedie; +Cc: Ben LaHaise, linux-mm, torvalds
>
> Hi,
>
> On Mon, 13 Mar 2000 17:50:50 -0500 (EST), Ben LaHaise <bcrl@redhat.com>
> said:
>
> > This is the first little bit of a few vm balancing patches I've been
> > working on.
>
> Just out of interest, is anyone working on fixing the zone balancing?
>
> The current behaviour is highly suboptimal: if you have two zones to
> pick from for a given alloc_page(), and the first zone is at its
> pages_min threshold, then we will always allocate from that first zone
> and push it into kswap activation no matter how much free space there is
> in the next zone.
Hmm, I disagree for 2.3.50 and pre1 ... note that the decision of
low-memoryness is taken based on _cumulative_ free and number of pages,
so whether you allocate from the regular or dma zone, you should be
stealing and poking kswapd roughly the same number of times. As far as
I can see, spreading the allocation over lower class zones does not seem
to have advantages in this case.
With Linus' change to the page alloc code in pre2, yes, spreading
the allocation is an option, but I would be real careful before
putting that in 2.4.
Kanoj
>
> The net effect of this is that we may not _ever_ end up using the next
> zone for allocations if the request trickle in slowly enough; and that
> either way, the memory use between the two zones is unbalanced. On an
> 8GB box it may be reasonable to keep the lomem zone for non-himem
> allocations, but on 2GB we probably want to allocate page cache and user
> pages as fairly as possible above and below 1GB.
>
> --Stephen
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux.eu.org/Linux-MM/
>
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-20 20:58 ` Kanoj Sarcar
@ 2000-03-20 21:31 ` Linus Torvalds
2000-03-20 22:06 ` Linus Torvalds
0 siblings, 1 reply; 10+ messages in thread
From: Linus Torvalds @ 2000-03-20 21:31 UTC (permalink / raw)
To: Kanoj Sarcar; +Cc: Stephen C. Tweedie, Ben LaHaise, linux-mm
On Mon, 20 Mar 2000, Kanoj Sarcar wrote:
> >
> > The current behaviour is highly suboptimal: if you have two zones to
> > pick from for a given alloc_page(), and the first zone is at its
> > pages_min threshold, then we will always allocate from that first zone
> > and push it into kswap activation no matter how much free space there is
> > in the next zone.
>
> With Linus' change to the page alloc code in pre2, yes, spreading
> the allocation is an option, but I would be real careful before
> putting that in 2.4.
It's not an option: it is how things work.
My code expliticly says: ok, walk the list of zones, if any of them have
plenty of memory free just allocate it.
Only if none of the zones is an obvious target for allocation do we
balance, and then we mark all the appropriate zones for balancing at once.
Linus
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [patch] first bit of vm balancing fixes for 2.3.52-1
2000-03-20 21:31 ` Linus Torvalds
@ 2000-03-20 22:06 ` Linus Torvalds
0 siblings, 0 replies; 10+ messages in thread
From: Linus Torvalds @ 2000-03-20 22:06 UTC (permalink / raw)
To: Kanoj Sarcar; +Cc: Stephen C. Tweedie, Ben LaHaise, linux-mm
On Mon, 20 Mar 2000, Linus Torvalds wrote:
>
> My code expliticly says: ok, walk the list of zones, if any of them have
> plenty of memory free just allocate it.
Ugh. The "plenty" test should take "zone->low_on_memory" into account too.
This should fix that, and get the PF_MEMALLOC case right too.
This way we explicitly try to avoid any zones that are being balanced
(we'll still allocate from such a zone, it's just that we'll go through
the balancing motions first - think of it as a way of saying "we want to
get OUT of the 'low_on_memory' state quicky, not make it worse").
Linus
-----
--- v2.3.99-pre2/linux/mm/page_alloc.c Sun Mar 19 18:35:31 2000
+++ linux/mm/page_alloc.c Mon Mar 20 14:03:34 2000
@@ -271,6 +271,14 @@
zone_t **zone = zonelist->zones;
/*
+ * If this is a recursive call, we'd better
+ * do our best to just allocate things without
+ * further thought.
+ */
+ if (current->flags & PF_MEMALLOC)
+ goto allocate_ok;
+
+ /*
* (If anyone calls gfp from interrupts nonatomically then it
* will sooner or later tripped up by a schedule().)
*
@@ -283,32 +291,22 @@
break;
if (!z->size)
BUG();
- /*
- * If this is a recursive call, we'd better
- * do our best to just allocate things without
- * further thought.
- */
- if (!(current->flags & PF_MEMALLOC)) {
- /* Are we low on memory? */
- if (z->free_pages <= z->pages_low)
- continue;
- }
- /*
- * This is an optimization for the 'higher order zone
- * is empty' case - it can happen even in well-behaved
- * systems, think the page-cache filling up all RAM.
- * We skip over empty zones. (this is not exact because
- * we do not take the spinlock and it's not exact for
- * the higher order case, but will do it for most things.)
- */
- if (z->free_pages) {
+
+ /* Are we low on memory? Don't make it worse.. */
+ if (!z->low_on_memory && z->free_pages > z->pages_low) {
struct page *page = rmqueue(z, order);
if (page)
return page;
}
}
+
+ /*
+ * Ok, no obvious zones were available, start
+ * balancing things a bit..
+ */
if (zone_balance_memory(zonelist)) {
zone = zonelist->zones;
+allocate_ok:
for (;;) {
zone_t *z = *(zone++);
if (!z)
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2000-03-20 22:06 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2000-03-13 22:50 [patch] first bit of vm balancing fixes for 2.3.52-1 Ben LaHaise
2000-03-13 22:55 ` Linus Torvalds
2000-03-13 23:28 ` Kanoj Sarcar
2000-03-13 23:31 ` Linus Torvalds
2000-03-14 0:23 ` Kanoj Sarcar
2000-03-14 0:32 ` Linus Torvalds
2000-03-17 12:35 ` Stephen C. Tweedie
2000-03-20 20:58 ` Kanoj Sarcar
2000-03-20 21:31 ` Linus Torvalds
2000-03-20 22:06 ` Linus Torvalds
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox