* RFC/Patch Make Page Tables Relocatable Part 1/2 Page Table Migration Code
@ 2008-01-04 15:59 Ross Biro
0 siblings, 0 replies; only message in thread
From: Ross Biro @ 2008-01-04 15:59 UTC (permalink / raw)
To: linux-mm
[-- Attachment #1: Type: text/plain, Size: 185 bytes --]
This patch allows setting a flag that a particular mm is in need of a
flush, but delaying the flush until more changes are made. Similar to
lazy mode, but not quite the same.
Ross
[-- Attachment #2: maybeflush.patch --]
[-- Type: application/octet-stream, Size: 8364 bytes --]
diff -urwNbB 2.6.23/arch/alpha/kernel/smp.c 2.6.23a/arch/alpha/kernel/smp.c
--- 2.6.23/arch/alpha/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/alpha/kernel/smp.c 2007-10-29 13:50:06.000000000 -0700
@@ -850,6 +850,8 @@
{
preempt_disable();
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (mm == current->active_mm) {
flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) {
diff -urwNbB 2.6.23/arch/arm/kernel/smp.c 2.6.23a/arch/arm/kernel/smp.c
--- 2.6.23/arch/arm/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/arm/kernel/smp.c 2007-10-29 13:50:21.000000000 -0700
@@ -713,6 +713,8 @@
{
cpumask_t mask = mm->cpu_vm_mask;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask);
}
diff -urwNbB 2.6.23/arch/avr32/mm/tlb.c 2.6.23a/arch/avr32/mm/tlb.c
--- 2.6.23/arch/avr32/mm/tlb.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/avr32/mm/tlb.c 2007-10-29 13:50:39.000000000 -0700
@@ -249,6 +249,8 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
/* Invalidate all TLB entries of this process by getting a new ASID */
if (mm->context != NO_CONTEXT) {
unsigned long flags;
diff -urwNbB 2.6.23/arch/cris/arch-v10/mm/tlb.c 2.6.23a/arch/cris/arch-v10/mm/tlb.c
--- 2.6.23/arch/cris/arch-v10/mm/tlb.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/cris/arch-v10/mm/tlb.c 2007-10-29 13:50:55.000000000 -0700
@@ -69,6 +69,8 @@
D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if(page_id == NO_CONTEXT)
return;
diff -urwNbB 2.6.23/arch/cris/arch-v32/kernel/smp.c 2.6.23a/arch/cris/arch-v32/kernel/smp.c
--- 2.6.23/arch/cris/arch-v32/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/cris/arch-v32/kernel/smp.c 2007-10-29 13:51:06.000000000 -0700
@@ -237,6 +237,7 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
__flush_tlb_mm(mm);
flush_tlb_common(mm, FLUSH_ALL, 0);
/* No more mappings in other CPUs */
diff -urwNbB 2.6.23/arch/i386/kernel/smp.c 2.6.23a/arch/i386/kernel/smp.c
--- 2.6.23/arch/i386/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/i386/kernel/smp.c 2007-10-29 13:51:47.000000000 -0700
@@ -410,6 +410,8 @@
{
cpumask_t cpu_mask;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
diff -urwNbB 2.6.23/arch/i386/mach-voyager/voyager_smp.c 2.6.23a/arch/i386/mach-voyager/voyager_smp.c
--- 2.6.23/arch/i386/mach-voyager/voyager_smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/i386/mach-voyager/voyager_smp.c 2007-10-29 13:51:55.000000000 -0700
@@ -924,6 +924,8 @@
{
unsigned long cpu_mask;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
preempt_disable();
cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
diff -urwNbB 2.6.23/arch/ia64/kernel/smp.c 2.6.23a/arch/ia64/kernel/smp.c
--- 2.6.23/arch/ia64/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/ia64/kernel/smp.c 2007-10-29 13:52:03.000000000 -0700
@@ -325,6 +325,8 @@
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
diff -urwNbB 2.6.23/arch/m32r/kernel/smp.c 2.6.23a/arch/m32r/kernel/smp.c
--- 2.6.23/arch/m32r/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/m32r/kernel/smp.c 2007-10-29 13:52:49.000000000 -0700
@@ -280,6 +280,8 @@
unsigned long *mmc;
unsigned long flags;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
diff -urwNbB 2.6.23/arch/mips/kernel/smp.c 2.6.23a/arch/mips/kernel/smp.c
--- 2.6.23/arch/mips/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/mips/kernel/smp.c 2007-10-29 13:53:21.000000000 -0700
@@ -387,6 +387,8 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
diff -urwNbB 2.6.23/arch/powerpc/mm/tlb_32.c 2.6.23a/arch/powerpc/mm/tlb_32.c
--- 2.6.23/arch/powerpc/mm/tlb_32.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/powerpc/mm/tlb_32.c 2007-10-29 13:54:06.000000000 -0700
@@ -144,6 +144,8 @@
{
struct vm_area_struct *mp;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (Hash == 0) {
_tlbia();
return;
diff -urwNbB 2.6.23/arch/ppc/mm/tlb.c 2.6.23a/arch/ppc/mm/tlb.c
--- 2.6.23/arch/ppc/mm/tlb.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/ppc/mm/tlb.c 2007-10-29 13:54:21.000000000 -0700
@@ -144,6 +144,8 @@
{
struct vm_area_struct *mp;
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (Hash == 0) {
_tlbia();
return;
diff -urwNbB 2.6.23/arch/sh64/mm/fault.c 2.6.23a/arch/sh64/mm/fault.c
--- 2.6.23/arch/sh64/mm/fault.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/sh64/mm/fault.c 2007-10-29 13:55:03.000000000 -0700
@@ -517,6 +517,8 @@
++calls_to_flush_tlb_mm;
#endif
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (mm->context == NO_CONTEXT)
return;
diff -urwNbB 2.6.23/arch/sparc/kernel/smp.c 2.6.23a/arch/sparc/kernel/smp.c
--- 2.6.23/arch/sparc/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/sparc/kernel/smp.c 2007-10-29 13:55:22.000000000 -0700
@@ -163,6 +163,8 @@
void smp_flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if(mm->context != NO_CONTEXT) {
cpumask_t cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
diff -urwNbB 2.6.23/arch/sparc64/kernel/smp.c 2.6.23a/arch/sparc64/kernel/smp.c
--- 2.6.23/arch/sparc64/kernel/smp.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/sparc64/kernel/smp.c 2007-10-29 13:56:32.000000000 -0700
@@ -1112,6 +1112,8 @@
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (atomic_read(&mm->mm_users) == 1) {
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
diff -urwNbB 2.6.23/arch/um/kernel/tlb.c 2.6.23a/arch/um/kernel/tlb.c
--- 2.6.23/arch/um/kernel/tlb.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/um/kernel/tlb.c 2007-10-29 13:57:05.000000000 -0700
@@ -402,6 +402,7 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
}
diff -urwNbB 2.6.23/arch/xtensa/mm/tlb.c 2.6.23a/arch/xtensa/mm/tlb.c
--- 2.6.23/arch/xtensa/mm/tlb.c 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/arch/xtensa/mm/tlb.c 2007-10-29 13:57:26.000000000 -0700
@@ -63,6 +63,8 @@
void flush_tlb_mm(struct mm_struct *mm)
{
+ clear_bit(MMF_NNED_FLUSH, mm->flags);
+
if (mm == current->active_mm) {
int flags;
local_save_flags(flags);
diff -urwNbB 2.6.23/include/asm-generic/tlb.h 2.6.23a/include/asm-generic/tlb.h
--- 2.6.23/include/asm-generic/tlb.h 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/include/asm-generic/tlb.h 2007-11-30 08:04:09.000000000 -0800
@@ -145,4 +145,23 @@
#define tlb_migrate_finish(mm) do {} while (0)
+/* flush an mm that we messed with earlier, but delayed the flush
+ assuming that we would muck with it a whole lot more. */
+static inline void maybe_flush_tlb_mm(struct mm_struct *mm)
+{
+ if (test_and_clear_bit(MMF_NEED_FLUSH, &mm->flags))
+ flush_tlb_mm(mm);
+}
+
+/* possibly flag an mm as needing to be flushed. */
+static inline int maybe_need_flush_mm(struct mm_struct *mm)
+{
+ if (!cpus_empty(mm->cpu_vm_mask)) {
+ set_bit(MMF_NEED_FLUSH, &mm->flags);
+ return 1;
+ }
+ return 0;
+}
+
+
#endif /* _ASM_GENERIC__TLB_H */
diff -urwNbB 2.6.23/include/linux/sched.h 2.6.23a/include/linux/sched.h
--- 2.6.23/include/linux/sched.h 2007-10-09 13:31:38.000000000 -0700
+++ 2.6.23a/include/linux/sched.h 2008-01-02 08:49:40.000000000 -0800
@@ -366,6 +366,10 @@
#define MMF_DUMP_FILTER_DEFAULT \
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
+/* Misc MM flags. */
+#define MMF_NEED_FLUSH 6
+#define MMF_NEED_RELOAD 7 /* Only meaningful on some archs. */
+
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2008-01-04 15:59 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-01-04 15:59 RFC/Patch Make Page Tables Relocatable Part 1/2 Page Table Migration Code Ross Biro
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox