* [PATCH 2.5.66-mm2] Optimizaction for object-based rmap
@ 2003-04-02 17:18 Dave McCracken
0 siblings, 0 replies; only message in thread
From: Dave McCracken @ 2003-04-02 17:18 UTC (permalink / raw)
To: Andrew Morton; +Cc: Linux Memory Management, Linux Kernel
[-- Attachment #1: Type: text/plain, Size: 550 bytes --]
It occurred to me that a simple way to improve objrmap performance would be
to sort the vma chains off address_space by offset. Here's a patch that
does it. Tests show no measureable cost, and it could significantly reduce
the impact of the worst case scenario (100 mappings * 100 processes) we've
all worried about.
Dave McCracken
======================================================================
Dave McCracken IBM Linux Base Kernel Team 1-512-838-3059
dmccr@us.ibm.com T/L 678-3059
[-- Attachment #2: objsort-2.5.66-mm2-1.diff --]
[-- Type: text/plain, Size: 2893 bytes --]
--- 2.5.66-mm2-test/./mm/mmap.c 2003-04-01 11:23:35.000000000 -0600
+++ 2.5.66-mm2-fix/./mm/mmap.c 2003-04-01 11:25:31.000000000 -0600
@@ -311,14 +311,26 @@ static inline void __vma_link_file(struc
if (file) {
struct inode * inode = file->f_dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
+ struct list_head *vmlist, *vmhead;
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
if (vma->vm_flags & VM_SHARED)
- list_add_tail(&vma->shared, &mapping->i_mmap_shared);
+ vmhead = &mapping->i_mmap_shared;
else
- list_add_tail(&vma->shared, &mapping->i_mmap);
+ vmhead = &mapping->i_mmap;
+
+ list_for_each(vmlist, &mapping->i_mmap_shared) {
+ struct vm_area_struct *vmtemp;
+ vmtemp = list_entry(vmlist, struct vm_area_struct, shared);
+ if (vmtemp->vm_pgoff >= vma->vm_pgoff)
+ break;
+ }
+ if (vmlist == vmhead)
+ list_add_tail(&vma->shared, vmlist);
+ else
+ list_add(&vma->shared, vmlist);
}
}
--- 2.5.66-mm2-test/./mm/rmap.c 2003-04-01 14:09:26.000000000 -0600
+++ 2.5.66-mm2-fix/./mm/rmap.c 2003-04-01 11:30:21.000000000 -0600
@@ -207,12 +207,16 @@ page_referenced_obj(struct page *page)
if (down_trylock(&mapping->i_shared_sem))
return 1;
- list_for_each_entry(vma, &mapping->i_mmap, shared)
+ list_for_each_entry(vma, &mapping->i_mmap, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
referenced += page_referenced_obj_one(vma, page);
-
- list_for_each_entry(vma, &mapping->i_mmap_shared, shared)
+ }
+ list_for_each_entry(vma, &mapping->i_mmap_shared, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
referenced += page_referenced_obj_one(vma, page);
-
+ }
up(&mapping->i_shared_sem);
return referenced;
@@ -572,12 +576,16 @@ try_to_unmap_obj(struct page *page)
return ret;
list_for_each_entry(vma, &mapping->i_mmap, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
ret = try_to_unmap_obj_one(vma, page);
if (ret == SWAP_FAIL || !page->pte.mapcount)
goto out;
}
list_for_each_entry(vma, &mapping->i_mmap_shared, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
ret = try_to_unmap_obj_one(vma, page);
if (ret == SWAP_FAIL || !page->pte.mapcount)
goto out;
@@ -868,6 +876,8 @@ again:
index = NRPTE - index;
list_for_each_entry(vma, &mapping->i_mmap, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
pte = find_pte(vma, page, NULL);
if (pte) {
ptecount++;
@@ -886,6 +896,8 @@ again:
}
}
list_for_each_entry(vma, &mapping->i_mmap_shared, shared) {
+ if (vma->vm_pgoff > (page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT)))
+ break;
pte = find_pte(vma, page, NULL);
if (pte) {
ptecount++;
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2003-04-02 17:18 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2003-04-02 17:18 [PATCH 2.5.66-mm2] Optimizaction for object-based rmap Dave McCracken
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox