From: Andrea Arcangeli <aarcange@redhat.com>
To: linux-mm@kvack.org
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Subject: [PATCH 11 of 25] add pmd mangling functions to x86
Date: Sat, 14 Nov 2009 17:38:29 -0000 [thread overview]
Message-ID: <8b66f7de1c453d7a2184.1258220309@v2.random> (raw)
In-Reply-To: <patchbomb.1258220298@v2.random>
From: Andrea Arcangeli <aarcange@redhat.com>
Add needed pmd mangling functions with simmetry with their pte counterparts.
pmdp_freeze_flush is the only exception only present on the pmd side and it's
needed to serialize the VM against split_huge_page, it simply atomically clears
the present bit in the same way pmdp_clear_flush_young atomically clears the
accessed bit (and both need to flush the tlb to make it effective, which is
mandatory to happen synchronously for pmdp_freeze_flush).
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
---
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -93,11 +93,21 @@ static inline int pte_young(pte_t pte)
return pte_flags(pte) & _PAGE_ACCESSED;
}
+static inline int pmd_young(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_ACCESSED;
+}
+
static inline int pte_write(pte_t pte)
{
return pte_flags(pte) & _PAGE_RW;
}
+static inline int pmd_write(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_RW;
+}
+
static inline int pte_file(pte_t pte)
{
return pte_flags(pte) & _PAGE_FILE;
@@ -148,6 +158,13 @@ static inline pte_t pte_set_flags(pte_t
return native_make_pte(v | set);
}
+static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v | set);
+}
+
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
@@ -155,6 +172,13 @@ static inline pte_t pte_clear_flags(pte_
return native_make_pte(v & ~clear);
}
+static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
+{
+ pmdval_t v = native_pmd_val(pmd);
+
+ return native_make_pmd(v & ~clear);
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_DIRTY);
@@ -165,11 +189,21 @@ static inline pte_t pte_mkold(pte_t pte)
return pte_clear_flags(pte, _PAGE_ACCESSED);
}
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_ACCESSED);
+}
+
static inline pte_t pte_wrprotect(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_RW);
}
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_RW);
+}
+
static inline pte_t pte_mkexec(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_NX);
@@ -180,16 +214,41 @@ static inline pte_t pte_mkdirty(pte_t pt
return pte_set_flags(pte, _PAGE_DIRTY);
}
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_DIRTY);
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_PSE);
+}
+
static inline pte_t pte_mkyoung(pte_t pte)
{
return pte_set_flags(pte, _PAGE_ACCESSED);
}
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_ACCESSED);
+}
+
+static inline pmd_t pmd_mkfreeze(pmd_t pmd)
+{
+ return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+
static inline pte_t pte_mkwrite(pte_t pte)
{
return pte_set_flags(pte, _PAGE_RW);
}
+static inline pmd_t pmd_mkwrite(pmd_t pmd)
+{
+ return pmd_set_flags(pmd, _PAGE_RW);
+}
+
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte_set_flags(pte, _PAGE_PSE);
@@ -318,6 +377,11 @@ static inline int pte_same(pte_t a, pte_
return a.pte == b.pte;
}
+static inline int pmd_same(pmd_t a, pmd_t b)
+{
+ return a.pmd == b.pmd;
+}
+
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
@@ -349,7 +413,7 @@ static inline unsigned long pmd_page_vad
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -370,6 +434,7 @@ static inline unsigned long pmd_index(un
* to linux/mm.h:page_to_nid())
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -566,14 +631,21 @@ struct vm_area_struct;
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
@@ -584,6 +656,14 @@ static inline pte_t ptep_get_and_clear(s
return pte;
}
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+ pmd_update(mm, addr, pmdp);
+ return pmd;
+}
+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
@@ -610,6 +690,16 @@ static inline void ptep_set_wrprotect(st
pte_update(mm, addr, ptep);
}
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)&pmdp->pmd);
+ pmd_update(mm, addr, pmd);
+}
+
+extern void pmdp_freeze_flush(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp);
+
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -71,6 +71,18 @@ static inline pte_t native_ptep_get_and_
return ret;
#endif
}
+static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+{
+#ifdef CONFIG_SMP
+ return native_make_pmd(xchg(&xp->pmd, 0));
+#else
+ /* native_local_pmdp_get_and_clear,
+ but duplicated because of cyclic dependency */
+ pmd_t ret = *xp;
+ native_pmd_clear(NULL, 0, xp);
+ return ret;
+#endif
+}
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -288,6 +288,23 @@ int ptep_set_access_flags(struct vm_area
return changed;
}
+int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ int changed = !pmd_same(*pmdp, entry);
+
+ VM_BUG_ON(address & ~HPAGE_MASK);
+
+ if (changed && dirty) {
+ *pmdp = entry;
+ pmd_update_defer(vma->vm_mm, address, pmdp);
+ flush_tlb_range(vma, address, address + HPAGE_SIZE);
+ }
+
+ return changed;
+}
+
int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
@@ -303,6 +320,21 @@ int ptep_test_and_clear_young(struct vm_
return ret;
}
+int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ int ret = 0;
+
+ if (pmd_young(*pmdp))
+ ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+ (unsigned long *) &pmdp->pmd);
+
+ if (ret)
+ pmd_update(vma->vm_mm, addr, pmdp);
+
+ return ret;
+}
+
int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
@@ -315,6 +347,33 @@ int ptep_clear_flush_young(struct vm_are
return young;
}
+int pmdp_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ int young;
+
+ VM_BUG_ON(address & ~HPAGE_MASK);
+
+ young = pmdp_test_and_clear_young(vma, address, pmdp);
+ if (young)
+ flush_tlb_range(vma, address, address + HPAGE_SIZE);
+
+ return young;
+}
+
+void pmdp_freeze_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ int cleared;
+ VM_BUG_ON(address & ~HPAGE_MASK);
+ cleared = test_and_clear_bit(_PAGE_BIT_PRESENT,
+ (unsigned long *)&pmdp->pmd);
+ if (cleared) {
+ pmd_update(vma->vm_mm, address, pmdp);
+ flush_tlb_range(vma, address, address + HPAGE_SIZE);
+ }
+}
+
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @reserve - size of hole to reserve
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2009-11-14 18:10 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-11-14 17:38 [PATCH 00 of 25] Transparent Hugepage support #1 Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 01 of 25] bit_lock smp memory barriers Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 02 of 25] compound_lock Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 03 of 25] alter compound get_page/put_page Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 04 of 25] clear compound mapping Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 05 of 25] add native_set_pmd_at Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 06 of 25] add pmd paravirt ops Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 07 of 25] no paravirt version of pmd ops Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 08 of 25] export maybe_mkwrite Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 09 of 25] comment reminder in destroy_compound_page Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 10 of 25] config_transparent_hugepage Andrea Arcangeli
2009-11-14 17:38 ` Andrea Arcangeli [this message]
2009-11-14 17:38 ` [PATCH 12 of 25] add pmd mangling generic functions Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 13 of 25] special pmd_trans_* functions Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 14 of 25] bail out gup_fast on freezed pmd Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 15 of 25] pte alloc trans frozen Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 16 of 25] add pmd mmu_notifier helpers Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 17 of 25] clear page compound Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 18 of 25] add pmd_huge_pte to mm_struct Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 19 of 25] ensure mapcount is taken on head pages Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 20 of 25] add page_check_address_pmd to find the pmd mapping a transparent hugepage Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 21 of 25] split_huge_page_mm/vma Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 22 of 25] split_huge_page paging Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 23 of 25] pmd_trans_huge migrate bugcheck Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 24 of 25] transparent hugepage core Andrea Arcangeli
2009-11-14 17:38 ` [PATCH 25 of 25] kvm mmu transparent hugepage support Andrea Arcangeli
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8b66f7de1c453d7a2184.1258220309@v2.random \
--to=aarcange@redhat.com \
--cc=linux-mm@kvack.org \
--cc=mtosatti@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox