* [patch] pae-2.4.3-C3
@ 2001-03-28 9:57 Ingo Molnar
2001-03-29 6:10 ` Albert D. Cahalan
0 siblings, 1 reply; 3+ messages in thread
From: Ingo Molnar @ 2001-03-28 9:57 UTC (permalink / raw)
To: Linus Torvalds; +Cc: Alan Cox, Linux Kernel List, linux-mm
[-- Attachment #1: Type: TEXT/PLAIN, Size: 857 bytes --]
the attached pae-2.4.3-C3 patch fixes the PAE code to work with SLAB
FORCED_DEBUG (which enables redzoning) too.
the problem is that redzoning is enabled unconditionally, and SLAB has no
information about how crutial alignment is in the case of any particular
SLAB cache. The CPU generates a general protection fault if in PAE mode a
non-16-byte aligned pgd is loaded into %cr3.
Redzoning is very useful and most caches use HWCACHE_ALIGN, so i didnt
want to disable redzoning if SLAB_HWCACHE_ALIGN is set. The patch adds
SLAB_MUST_HWCACHE_ALIGN, which makes the distinction.
the patch is against 2.4.3-pre8 (applies cleanly to ac26 as well), and
compiles & boot cleanly in PAE mode, with or without CONFIG_DEBUG_SLAB.
The kernel also compiles cleanly in non-PAE mode.
(the patch also introduces the pae_pgd SLAB cache, which is a tiny
speedup.)
Ingo
[-- Attachment #2: Type: TEXT/PLAIN, Size: 4322 bytes --]
--- linux/init/main.c.orig Wed Mar 28 14:41:59 2001
+++ linux/init/main.c Wed Mar 28 14:42:02 2001
@@ -570,6 +570,9 @@
#endif
mem_init();
kmem_cache_sizes_init();
+#if CONFIG_X86_PAE
+ init_pae_pgd_cache();
+#endif
#ifdef CONFIG_3215_CONSOLE
con3215_activate();
#endif
--- linux/mm/slab.c.orig Wed Mar 28 14:41:59 2001
+++ linux/mm/slab.c Wed Mar 28 14:42:02 2001
@@ -102,9 +102,11 @@
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_NO_REAP | SLAB_CACHE_DMA)
+ SLAB_NO_REAP | SLAB_CACHE_DMA | \
+ SLAB_MUST_HWCACHE_ALIGN)
#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | SLAB_CACHE_DMA)
+# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
+ SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN)
#endif
/*
@@ -641,7 +643,7 @@
flags &= ~SLAB_POISON;
}
#if FORCED_DEBUG
- if (size < (PAGE_SIZE>>3))
+ if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))
/*
* do not red zone large object, causes severe
* fragmentation.
--- linux/include/linux/slab.h.orig Sun Dec 31 20:10:17 2000
+++ linux/include/linux/slab.h Wed Mar 28 14:44:25 2001
@@ -36,6 +36,7 @@
#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
+#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
/* flags passed to a constructor func */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
--- linux/include/asm-i386/pgalloc.h.orig Wed Mar 28 14:41:59 2001
+++ linux/include/asm-i386/pgalloc.h Wed Mar 28 14:44:25 2001
@@ -20,13 +20,19 @@
#if CONFIG_X86_PAE
-extern void *kmalloc(size_t, int);
-extern void kfree(const void *);
+struct kmem_cache_s;
+
+extern struct kmem_cache_s *pae_pgd_cachep;
+
+extern void *kmem_cache_alloc(struct kmem_cache_s *, int);
+extern void kmem_cache_free(struct kmem_cache_s *, void *);
+
+extern void init_pae_pgd_cache(void);
extern __inline__ pgd_t *get_pgd_slow(void)
{
int i;
- pgd_t *pgd = kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL);
if (pgd) {
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
@@ -42,7 +48,7 @@
out_oom:
for (i--; i >= 0; i--)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kfree(pgd);
+ kmem_cache_free(pae_pgd_cachep, pgd);
return NULL;
}
@@ -88,7 +94,7 @@
for (i = 0; i < USER_PTRS_PER_PGD; i++)
free_page((unsigned long)__va(pgd_val(pgd[i])-1));
- kfree(pgd);
+ kmem_cache_free(pae_pgd_cachep, pgd);
#else
free_page((unsigned long)pgd);
#endif
--- linux/arch/i386/mm/init.c.orig Wed Mar 28 14:41:56 2001
+++ linux/arch/i386/mm/init.c Wed Mar 28 14:42:02 2001
@@ -15,6 +15,9 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
@@ -131,15 +134,11 @@
pte_t *pte;
pgd = swapper_pg_dir + __pgd_offset(vaddr);
- if (pgd_none(*pgd)) {
- printk("PAE BUG #00!\n");
- return;
- }
+ if (pgd_none(*pgd))
+ BUG();
pmd = pmd_offset(pgd, vaddr);
- if (pmd_none(*pmd)) {
- printk("PAE BUG #01!\n");
- return;
- }
+ if (pmd_none(*pmd))
+ BUG();
pte = pte_offset(pmd, vaddr);
if (pte_val(*pte))
pte_ERROR(*pte);
@@ -183,7 +182,7 @@
pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
if (pmd != pmd_offset(pgd, 0))
- printk("PAE BUG #02!\n");
+ BUG();
}
pmd = pmd_offset(pgd, vaddr);
#else
@@ -581,3 +580,23 @@
val->mem_unit = PAGE_SIZE;
return;
}
+
+#if CONFIG_X86_PAE
+
+#include <linux/slab.h>
+
+struct kmem_cache_s *pae_pgd_cachep;
+
+void __init init_pae_pgd_cache(void)
+{
+ /*
+ * PAE pgds must be 16-byte aligned:
+ */
+ pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
+ if (!pae_pgd_cachep)
+ panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
+}
+
+#endif
+
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [patch] pae-2.4.3-C3
2001-03-28 9:57 [patch] pae-2.4.3-C3 Ingo Molnar
@ 2001-03-29 6:10 ` Albert D. Cahalan
2001-03-29 7:14 ` Ingo Molnar
0 siblings, 1 reply; 3+ messages in thread
From: Albert D. Cahalan @ 2001-03-29 6:10 UTC (permalink / raw)
To: mingo; +Cc: Linux Kernel List, linux-mm
Ingo Molnar writes:
> the attached pae-2.4.3-C3 patch fixes the PAE code to work with SLAB
> FORCED_DEBUG (which enables redzoning) too.
>
> the problem is that redzoning is enabled unconditionally, and SLAB has no
> information about how crutial alignment is in the case of any particular
> SLAB cache. The CPU generates a general protection fault if in PAE mode a
> non-16-byte aligned pgd is loaded into %cr3.
How about just fixing the debug code to align things? Sure it wastes
a bit of memory, but debug code is like that.
Sane alignment might be: largest power-of-two factor of the size,
or 4 bytes, which ever is larger. (adjust "4" per-arch as needed)
For an SMP config, set a minimum alignment equal to the cache line size.
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [patch] pae-2.4.3-C3
2001-03-29 6:10 ` Albert D. Cahalan
@ 2001-03-29 7:14 ` Ingo Molnar
0 siblings, 0 replies; 3+ messages in thread
From: Ingo Molnar @ 2001-03-29 7:14 UTC (permalink / raw)
To: Albert D. Cahalan; +Cc: Linux Kernel List, linux-mm
On Thu, 29 Mar 2001, Albert D. Cahalan wrote:
> > the problem is that redzoning is enabled unconditionally, and SLAB has no
> > information about how crutial alignment is in the case of any particular
> > SLAB cache. The CPU generates a general protection fault if in PAE mode a
> > non-16-byte aligned pgd is loaded into %cr3.
>
> How about just fixing the debug code to align things? Sure it wastes a
> bit of memory, but debug code is like that.
redzoning also catches code which assumes specific alignment.
Ingo
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux.eu.org/Linux-MM/
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2001-03-29 7:14 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2001-03-28 9:57 [patch] pae-2.4.3-C3 Ingo Molnar
2001-03-29 6:10 ` Albert D. Cahalan
2001-03-29 7:14 ` Ingo Molnar
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox