* [PATCH 2.5.41-mm3] Proactively share page tables for shared memory
@ 2002-10-11 19:31 Dave McCracken
0 siblings, 0 replies; only message in thread
From: Dave McCracken @ 2002-10-11 19:31 UTC (permalink / raw)
To: Andrew Morton; +Cc: Linux Memory Management, Linux Kernel
[-- Attachment #1: Type: text/plain, Size: 464 bytes --]
This is the other part to shared page tables. It will actively attempt to
find and share a pte page for newly mapped shared memory.
This patch is intended to be applied to 2.5.41-mm3 plus the bugfix patch I
submitted this morning.
Dave McCracken
======================================================================
Dave McCracken IBM Linux Base Kernel Team 1-512-838-3059
dmccr@us.ibm.com T/L 678-3059
[-- Attachment #2: shmmap-2.5.41-mm3-1.diff --]
[-- Type: text/plain, Size: 2734 bytes --]
--- 2.5.41-mm3-shpte/./mm/memory.c 2002-10-11 10:59:14.000000000 -0500
+++ 2.5.41-mm3-shmmap/./mm/memory.c 2002-10-11 13:36:34.000000000 -0500
@@ -365,6 +365,77 @@
out_map:
return pte_offset_map(pmd, address);
}
+
+static pte_t *pte_try_to_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long address)
+{
+ struct address_space *as;
+ struct vm_area_struct *lvma;
+ struct page *ptepage;
+ unsigned long base;
+
+ /*
+ * It already has a pte page. No point in checking further.
+ * We can go ahead and return it now, since we know it's there.
+ */
+ if (pmd_present(*pmd)) {
+ ptepage = pmd_page(*pmd);
+ pte_page_lock(ptepage);
+ return pte_page_map(ptepage, address);
+ }
+
+ /* It's not even shared memory. We definitely can't share the page. */
+ if (!(vma->vm_flags & VM_SHARED))
+ return NULL;
+
+ /* We can only share if the entire pte page fits inside the vma */
+ base = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1);
+ if ((base < vma->vm_start) || (vma->vm_end < (base + PMD_SIZE)))
+ return NULL;
+
+ as = vma->vm_file->f_dentry->d_inode->i_mapping;
+
+ list_for_each_entry(lvma, &as->i_mmap_shared, shared) {
+ pgd_t *lpgd;
+ pmd_t *lpmd;
+ pmd_t pmdval;
+
+ /* Skip the one we're working on */
+ if (lvma == vma)
+ continue;
+
+ /* It has to be mapping to the same address */
+ if ((lvma->vm_start != vma->vm_start) ||
+ (lvma->vm_end != vma->vm_end) ||
+ (lvma->vm_pgoff != vma->vm_pgoff))
+ continue;
+
+ lpgd = pgd_offset(vma->vm_mm, address);
+ lpmd = pmd_offset(lpgd, address);
+
+ /* This page table doesn't have a pte page either, so skip it. */
+ if (!pmd_present(*lpmd))
+ continue;
+
+ /* Ok, we can share it. */
+
+ ptepage = pmd_page(*lpmd);
+ pte_page_lock(ptepage);
+ get_page(ptepage);
+ /*
+ * If this vma is only mapping it read-only, set the
+ * pmd entry read-only to protect it from writes.
+ * Otherwise set it writeable.
+ */
+ if (vma->vm_flags & VM_MAYWRITE)
+ pmdval = pmd_mkwrite(*lpmd);
+ else
+ pmdval = pmd_wrprotect(*lpmd);
+ set_pmd(pmd, pmdval);
+ return pte_page_map(ptepage, address);
+ }
+ return NULL;
+}
#endif
pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
@@ -1966,8 +2037,11 @@
pte_page_lock(pmd_page(*pmd));
pte = pte_unshare(mm, pmd, address);
} else {
- pte = pte_alloc_map(mm, pmd, address);
- pte_page_lock(pmd_page(*pmd));
+ pte = pte_try_to_share(mm, vma, pmd, address);
+ if (!pte) {
+ pte = pte_alloc_map(mm, pmd, address);
+ pte_page_lock(pmd_page(*pmd));
+ }
}
#else
pte = pte_alloc_map(mm, pmd, address);
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2002-10-11 19:31 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2002-10-11 19:31 [PATCH 2.5.41-mm3] Proactively share page tables for shared memory Dave McCracken
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox