From: Dave McCracken <dmccr@us.ibm.com>
To: Andrew Morton <akpm@digeo.com>
Cc: Linux Kernel <linux-kernel@vger.kernel.org>,
Linux Memory Management <linux-mm@kvack.org>
Subject: [PATCH 2.5.42-mm3] Fix unmap_page_range again for shared page tables
Date: Tue, 15 Oct 2002 17:22:13 -0500 [thread overview]
Message-ID: <274410000.1034720533@baldur.austin.ibm.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 428 bytes --]
My previous attempt at getting unmap_page_range right had a bad race
condition. Here's a patch that should resolve it correctly.
This patch applies on top of the patch I sent earlier in the day.
Dave McCracken
======================================================================
Dave McCracken IBM Linux Base Kernel Team 1-512-838-3059
dmccr@us.ibm.com T/L 678-3059
[-- Attachment #2: shpte-2.5.42-mm3-2.diff --]
[-- Type: text/plain, Size: 6019 bytes --]
--- 2.5.42-mm3-shsent/./mm/mmap.c 2002-10-15 09:59:37.000000000 -0500
+++ 2.5.42-mm3-shpte/./mm/mmap.c 2002-10-15 14:02:17.000000000 -0500
@@ -23,7 +23,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
-extern void unmap_page_range(mmu_gather_t *,struct vm_area_struct *vma, unsigned long address, unsigned long size);
+extern void unmap_page_range(mmu_gather_t **,struct vm_area_struct *vma, unsigned long address, unsigned long size);
#ifdef CONFIG_SHAREPTE
extern void unmap_shared_range(struct mm_struct *mm, unsigned long address, unsigned long end);
#endif
@@ -994,10 +994,6 @@
{
mmu_gather_t *tlb;
-#ifdef CONFIG_SHAREPTE
- /* Make sure all the pte pages in the range are unshared if necessary */
- unmap_shared_range(mm, start, end);
-#endif
tlb = tlb_gather_mmu(mm, 0);
do {
@@ -1006,7 +1002,7 @@
from = start < mpnt->vm_start ? mpnt->vm_start : start;
to = end > mpnt->vm_end ? mpnt->vm_end : end;
- unmap_page_range(tlb, mpnt, from, to);
+ unmap_page_range(&tlb, mpnt, from, to);
if (mpnt->vm_flags & VM_ACCOUNT) {
len = to - from;
--- 2.5.42-mm3-shsent/./mm/memory.c 2002-10-15 14:11:54.000000000 -0500
+++ 2.5.42-mm3-shpte/./mm/memory.c 2002-10-15 14:01:46.000000000 -0500
@@ -720,82 +720,11 @@
}
#endif
-#ifdef CONFIG_SHAREPTE
-static inline void unmap_shared_pmd(struct mm_struct *mm, pgd_t *pgd,
- unsigned long address, unsigned long end)
-{
- struct page *ptepage;
- pmd_t * pmd;
-
- if (pgd_none(*pgd))
- return;
- if (pgd_bad(*pgd)) {
- pgd_ERROR(*pgd);
- pgd_clear(pgd);
- return;
- }
- pmd = pmd_offset(pgd, address);
- if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
- end = ((address + PGDIR_SIZE) & PGDIR_MASK);
- do {
- if (pmd_none(*pmd))
- goto skip_pmd;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- goto skip_pmd;
- }
-
- ptepage = pmd_page(*pmd);
- pte_page_lock(ptepage);
-
- if (page_count(ptepage) > 1) {
- if ((address <= ptepage->index) &&
- (end >= (ptepage->index + PMD_SIZE))) {
- pmd_clear(pmd);
- pgtable_remove_rmap_locked(ptepage, mm);
- mm->rss -= ptepage->private;
- put_page(ptepage);
- } else {
- pte_unshare(mm, pmd, address);
- ptepage = pmd_page(*pmd);
- }
- }
- pte_page_unlock(ptepage);
-skip_pmd:
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
-}
-
-void unmap_shared_range(struct mm_struct *mm, unsigned long address, unsigned long end)
-{
- pgd_t * pgd;
-
- if (address >= end)
- BUG();
- pgd = pgd_offset(mm, address);
- do {
- unmap_shared_pmd(mm, pgd, address, end - address);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- pgd++;
- } while (address && (address < end));
-}
-#endif
-
static void zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
{
unsigned long offset;
pte_t *ptep;
- if (pmd_none(*pmd))
- return;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
-
offset = address & ~PMD_MASK;
if (offset + size > PMD_SIZE)
size = PMD_SIZE - offset;
@@ -833,9 +762,10 @@
pte_unmap(ptep-1);
}
-static void zap_pmd_range(mmu_gather_t *tlb, pgd_t * dir, unsigned long address, unsigned long size)
+static void zap_pmd_range(mmu_gather_t **tlb, pgd_t * dir, unsigned long address, unsigned long size)
{
struct page *ptepage;
+ struct mm_struct *mm = (*tlb)->mm;
pmd_t * pmd;
unsigned long end;
@@ -851,35 +781,56 @@
if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
end = ((address + PGDIR_SIZE) & PGDIR_MASK);
do {
- if (pmd_present(*pmd)) {
- ptepage = pmd_page(*pmd);
- pte_page_lock(ptepage);
+ if (pmd_none(*pmd))
+ goto skip_pmd;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ goto skip_pmd;
+ }
+
+ ptepage = pmd_page(*pmd);
+ pte_page_lock(ptepage);
#ifdef CONFIG_SHAREPTE
- if (page_count(ptepage) > 1)
- BUG();
-#endif
- zap_pte_range(tlb, pmd, address, end - address);
- pte_page_unlock(ptepage);
+ if (page_count(ptepage) > 1) {
+ if ((address <= ptepage->index) &&
+ (end >= (ptepage->index + PMD_SIZE))) {
+ pmd_clear(pmd);
+ pgtable_remove_rmap_locked(ptepage, mm);
+ mm->rss -= ptepage->private;
+ put_page(ptepage);
+ goto unlock_pte;
+ } else {
+ tlb_finish_mmu(*tlb, address, end);
+ pte_unshare(mm, pmd, address);
+ *tlb = tlb_gather_mmu(mm, 0);
+ ptepage = pmd_page(*pmd);
+ }
}
+#endif
+ zap_pte_range(*tlb, pmd, address, end - address);
+unlock_pte:
+ pte_page_unlock(ptepage);
+skip_pmd:
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
}
-void unmap_page_range(mmu_gather_t *tlb, struct vm_area_struct *vma, unsigned long address, unsigned long end)
+void unmap_page_range(mmu_gather_t **tlb, struct vm_area_struct *vma, unsigned long address, unsigned long end)
{
pgd_t * dir;
BUG_ON(address >= end);
dir = pgd_offset(vma->vm_mm, address);
- tlb_start_vma(tlb, vma);
+ tlb_start_vma(*tlb, vma);
do {
zap_pmd_range(tlb, dir, address, end - address);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
- tlb_end_vma(tlb, vma);
+ tlb_end_vma(*tlb, vma);
}
/* Dispose of an entire mmu_gather_t per rescheduling point */
@@ -911,9 +862,6 @@
spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_SHAREPTE
- unmap_shared_range(mm, address, address + size);
-#endif
/*
* This was once a long-held spinlock. Now we break the
* work up into ZAP_BLOCK_SIZE units and relinquish the
@@ -926,7 +874,7 @@
flush_cache_range(vma, address, end);
tlb = tlb_gather_mmu(mm, 0);
- unmap_page_range(tlb, vma, address, end);
+ unmap_page_range(&tlb, vma, address, end);
tlb_finish_mmu(tlb, address, end);
cond_resched_lock(&mm->page_table_lock);
reply other threads:[~2002-10-15 22:22 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=274410000.1034720533@baldur.austin.ibm.com \
--to=dmccr@us.ibm.com \
--cc=akpm@digeo.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox