From: npiggin@nick.local0.net
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: akpm@linux-foundation.org, Jared Hulbert <jaredeh@gmail.com>,
Carsten Otte <cotte@de.ibm.com>,
Martin Schwidefsky <schwidefsky@de.ibm.com>,
Heiko Carstens <heiko.carstens@de.ibm.com>,
linux-mm@kvack.org
Subject: [patch 3/7] mm: add vm_insert_mixed
Date: Tue, 11 Mar 2008 21:46:56 +1100 [thread overview]
Message-ID: <20080311105125.873658000@nick.local0.net> (raw)
In-Reply-To: <20080311104653.995564000@nick.local0.net>
[-- Attachment #1: mm-insert_mixed.patch --]
[-- Type: text/plain, Size: 5169 bytes --]
vm_insert_mixed will insert either a raw pfn or a refcounted struct page
into the page tables, depending on whether vm_normal_page() will return
the page or not. With the introduction of the new pte bit, this is now
a too tricky for drivers to be doing themselves.
filemap_xip uses this in a subsequent patch.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Jared Hulbert <jaredeh@gmail.com>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux-mm@kvack.org
---
include/linux/mm.h | 2 +
mm/memory.c | 79 ++++++++++++++++++++++++++++++++++++-----------------
2 files changed, 57 insertions(+), 24 deletions(-)
Index: linux-2.6/include/linux/mm.h
===================================================================
--- linux-2.6.orig/include/linux/mm.h
+++ linux-2.6/include/linux/mm.h
@@ -1144,6 +1144,8 @@ int remap_pfn_range(struct vm_area_struc
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
Index: linux-2.6/mm/memory.c
===================================================================
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1176,8 +1176,9 @@ pte_t *get_locked_pte(struct mm_struct *
* old drivers should use this, and they needed to mark their
* pages reserved for the old functions anyway.
*/
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot)
{
+ struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;
@@ -1244,10 +1245,37 @@ int vm_insert_page(struct vm_area_struct
if (!page_count(page))
return -EINVAL;
vma->vm_flags |= VM_INSERTPAGE;
- return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+ return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int retval;
+ pte_t *pte, entry;
+ spinlock_t *ptl;
+
+ retval = -ENOMEM;
+ pte = get_locked_pte(mm, addr, &ptl);
+ if (!pte)
+ goto out;
+ retval = -EBUSY;
+ if (!pte_none(*pte))
+ goto out_unlock;
+
+ /* Ok, finally just insert the thing.. */
+ entry = pte_mkspecial(pfn_pte(pfn, prot));
+ set_pte_at(mm, addr, pte, entry);
+ update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+
+ retval = 0;
+out_unlock:
+ pte_unmap_unlock(pte, ptl);
+out:
+ return retval;
+}
+
/**
* vm_insert_pfn - insert single pfn into user vma
* @vma: user vma to map to
@@ -1263,11 +1291,6 @@ EXPORT_SYMBOL(vm_insert_page);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
- struct mm_struct *mm = vma->vm_mm;
- int retval;
- pte_t *pte, entry;
- spinlock_t *ptl;
-
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
@@ -1280,27 +1303,35 @@ int vm_insert_pfn(struct vm_area_struct
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
- retval = -ENOMEM;
- pte = get_locked_pte(mm, addr, &ptl);
- if (!pte)
- goto out;
- retval = -EBUSY;
- if (!pte_none(*pte))
- goto out_unlock;
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_pfn);
- /* Ok, finally just insert the thing.. */
- entry = pte_mkspecial(pfn_pte(pfn, vma->vm_page_prot));
- set_pte_at(mm, addr, pte, entry);
- update_mmu_cache(vma, addr, entry);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+{
+ BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
- retval = 0;
-out_unlock:
- pte_unmap_unlock(pte, ptl);
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
-out:
- return retval;
+ /*
+ * If we don't have pte special, then we have to use the pfn_valid()
+ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+ * refcount the page if pfn_valid is true (hence insert_page rather
+ * than insert_pfn).
+ */
+ if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+ struct page *page;
+
+ page = pfn_to_page(pfn);
+ return insert_page(vma, addr, page, vma->vm_page_prot);
+ }
+ return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
}
-EXPORT_SYMBOL(vm_insert_pfn);
+EXPORT_SYMBOL(vm_insert_mixed);
/*
* maps a range of physical memory into the requested pages. the old
--
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2008-03-11 10:46 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-03-11 10:46 [patch 0/7] [rfc] VM_MIXEDMAP, pte_special, xip work npiggin
2008-03-11 10:46 ` [patch 1/7] mm: introduce VM_MIXEDMAP npiggin, Jared Hulbert
2008-03-11 10:46 ` [patch 2/7] mm: introduce pte_special pte bit npiggin
2008-03-11 10:46 ` npiggin [this message]
2008-03-11 10:46 ` [patch 4/7] Alter the block device ->direct_access() API to work with the new get_xip_mem() API (that requires both kaddr and pfn are returned) npiggin
2008-03-11 10:46 ` [patch 5/7] xip: support non-struct page backed memory npiggin
2008-03-11 11:44 ` [patch 0/7] [rfc] VM_MIXEDMAP, pte_special, xip work Nick Piggin
2008-03-11 21:12 ` Jared Hulbert
2008-03-11 23:21 ` Nick Piggin
2008-03-12 4:35 ` Andrew Morton
2008-03-12 5:33 ` Nick Piggin
2008-03-12 8:46 ` Martin Schwidefsky
2008-03-12 16:40 ` Jared Hulbert
2008-03-12 17:10 ` Jared Hulbert
2008-03-28 1:52 [patch 0/7] 2.6.25-rc5-mm1: VM_MIXEDMAP, pte_special, xip series npiggin
2008-03-28 1:52 ` [patch 3/7] mm: add vm_insert_mixed npiggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080311105125.873658000@nick.local0.net \
--to=npiggin@nick.local0.net \
--cc=akpm@linux-foundation.org \
--cc=cotte@de.ibm.com \
--cc=heiko.carstens@de.ibm.com \
--cc=jaredeh@gmail.com \
--cc=linux-mm@kvack.org \
--cc=schwidefsky@de.ibm.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox