mm/memory.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) Index: linux-2.6/mm/memory.c =================================================================== --- linux-2.6.orig/mm/memory.c 2006-05-06 16:51:01.000000000 +0200 +++ linux-2.6/mm/memory.c 2006-05-06 17:15:16.000000000 +0200 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -2078,6 +2079,7 @@ static int do_no_page(struct mm_struct * unsigned int sequence = 0; int ret = VM_FAULT_MINOR; int anon = 0; + int dirty = 0; pte_unmap(page_table); BUG_ON(vma->vm_flags & VM_PFNMAP); @@ -2165,8 +2167,10 @@ retry: } else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); - if (write_access) + if (write_access) { set_page_dirty(new_page); + dirty++; + } } } else { /* One of our sibling threads was faster, back out. */ @@ -2179,6 +2183,8 @@ retry: lazy_mmu_prot_update(entry); unlock: pte_unmap_unlock(page_table, ptl); + if (dirty) + balance_dirty_pages_ratelimited_nr(mapping, dirty); return ret; oom: page_cache_release(new_page); @@ -2243,6 +2249,8 @@ static inline int handle_pte_fault(struc pte_t entry; pte_t old_entry; spinlock_t *ptl; + struct address_space *mapping; + int dirty = 0; old_entry = entry = *pte; if (!pte_present(entry)) { @@ -2273,8 +2281,11 @@ static inline int handle_pte_fault(struc struct page *page; entry = pte_mkwrite(entry); page = vm_normal_page(vma, address, entry); - if (page) + if (page) { set_page_dirty(page); + mapping = page_mapping(page); + dirty++; + } } } entry = pte_mkdirty(entry); @@ -2297,6 +2308,9 @@ static inline int handle_pte_fault(struc } unlock: pte_unmap_unlock(pte, ptl); + if (dirty && mapping) + balance_dirty_pages_ratelimited_nr(mapping, dirty); + return VM_FAULT_MINOR; }