From: Hugh Dickins <hugh.dickins@tiscali.co.uk>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>,
KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
Nick Piggin <npiggin@suse.de>, Rik van Riel <riel@redhat.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 8/8] mm: FOLL flags for GUP flags
Date: Mon, 7 Sep 2009 22:40:49 +0100 (BST) [thread overview]
Message-ID: <Pine.LNX.4.64.0909072239390.15430@sister.anvils> (raw)
In-Reply-To: <Pine.LNX.4.64.0909072222070.15424@sister.anvils>
__get_user_pages() has been taking its own GUP flags, then processing
them into FOLL flags for follow_page(). Though oddly named, the FOLL
flags are more widely used, so pass them to __get_user_pages() now.
Sorry, VM flags, VM_FAULT flags and FAULT_FLAGs are still distinct.
(The patch to __get_user_pages() looks peculiar, with both gup_flags
and foll_flags: the gup_flags remain constant; but as before there's
an exceptional case, out of scope of the patch, in which foll_flags
per page have FOLL_WRITE masked off.)
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
---
include/linux/mm.h | 1
mm/internal.h | 6 -----
mm/memory.c | 44 ++++++++++++++++++-------------------------
mm/mlock.c | 4 +--
mm/nommu.c | 16 +++++++--------
5 files changed, 31 insertions(+), 40 deletions(-)
--- mm7/include/linux/mm.h 2009-09-07 13:16:39.000000000 +0100
+++ mm8/include/linux/mm.h 2009-09-07 13:17:07.000000000 +0100
@@ -1248,6 +1248,7 @@ struct page *follow_page(struct vm_area_
#define FOLL_TOUCH 0x02 /* mark page accessed */
#define FOLL_GET 0x04 /* do get_page on page */
#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
+#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
--- mm7/mm/internal.h 2009-09-07 13:16:39.000000000 +0100
+++ mm8/mm/internal.h 2009-09-07 13:17:07.000000000 +0100
@@ -250,12 +250,8 @@ static inline void mminit_validate_memmo
}
#endif /* CONFIG_SPARSEMEM */
-#define GUP_FLAGS_WRITE 0x01
-#define GUP_FLAGS_FORCE 0x02
-#define GUP_FLAGS_DUMP 0x04
-
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
+ unsigned long start, int len, unsigned int foll_flags,
struct page **pages, struct vm_area_struct **vmas);
#define ZONE_RECLAIM_NOSCAN -2
--- mm7/mm/memory.c 2009-09-07 13:17:01.000000000 +0100
+++ mm8/mm/memory.c 2009-09-07 13:17:07.000000000 +0100
@@ -1209,27 +1209,29 @@ no_page_table:
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
- unsigned int vm_flags = 0;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
+ unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
/*
* Require read or write permissions.
- * If 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
do {
struct vm_area_struct *vma;
- unsigned int foll_flags;
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(tsk, start)) {
@@ -1241,7 +1243,7 @@ int __get_user_pages(struct task_struct
pte_t *pte;
/* user gate pages are read-only */
- if (write)
+ if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
@@ -1278,22 +1280,15 @@ int __get_user_pages(struct task_struct
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
- foll_flags = FOLL_TOUCH;
- if (pages)
- foll_flags |= FOLL_GET;
- if (flags & GUP_FLAGS_DUMP)
- foll_flags |= FOLL_DUMP;
- if (write)
- foll_flags |= FOLL_WRITE;
-
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &nr_pages, i, foll_flags);
+ &start, &nr_pages, i, gup_flags);
continue;
}
do {
struct page *page;
+ unsigned int foll_flags = gup_flags;
/*
* If we have a pending SIGKILL, don't keep faulting
@@ -1302,9 +1297,6 @@ int __get_user_pages(struct task_struct
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
- if (write)
- foll_flags |= FOLL_WRITE;
-
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
@@ -1416,12 +1408,14 @@ int get_user_pages(struct task_struct *t
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas)
{
- int flags = 0;
+ int flags = FOLL_TOUCH;
+ if (pages)
+ flags |= FOLL_GET;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
@@ -1448,7 +1442,7 @@ struct page *get_dump_page(unsigned long
struct page *page;
if (__get_user_pages(current, current->mm, addr, 1,
- GUP_FLAGS_FORCE | GUP_FLAGS_DUMP, &page, &vma) < 1)
+ FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
return NULL;
if (page == ZERO_PAGE(0)) {
page_cache_release(page);
--- mm7/mm/mlock.c 2009-09-07 13:16:15.000000000 +0100
+++ mm8/mm/mlock.c 2009-09-07 13:17:07.000000000 +0100
@@ -166,9 +166,9 @@ static long __mlock_vma_pages_range(stru
VM_BUG_ON(end > vma->vm_end);
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- gup_flags = 0;
+ gup_flags = FOLL_TOUCH | FOLL_GET;
if (vma->vm_flags & VM_WRITE)
- gup_flags = GUP_FLAGS_WRITE;
+ gup_flags |= FOLL_WRITE;
while (nr_pages > 0) {
int i;
--- mm7/mm/nommu.c 2009-09-07 13:16:22.000000000 +0100
+++ mm8/mm/nommu.c 2009-09-07 13:17:07.000000000 +0100
@@ -128,20 +128,20 @@ unsigned int kobjsize(const void *objp)
}
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int nr_pages, int flags,
+ unsigned long start, int nr_pages, int foll_flags,
struct page **pages, struct vm_area_struct **vmas)
{
struct vm_area_struct *vma;
unsigned long vm_flags;
int i;
- int write = !!(flags & GUP_FLAGS_WRITE);
- int force = !!(flags & GUP_FLAGS_FORCE);
/* calculate required read or write permissions.
- * - if 'force' is set, we only require the "MAY" flags.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
*/
- vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
- vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+ vm_flags = (foll_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (foll_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start);
@@ -183,9 +183,9 @@ int get_user_pages(struct task_struct *t
int flags = 0;
if (write)
- flags |= GUP_FLAGS_WRITE;
+ flags |= FOLL_WRITE;
if (force)
- flags |= GUP_FLAGS_FORCE;
+ flags |= FOLL_FORCE;
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
}
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2009-09-07 21:41 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2009-09-07 21:26 [PATCH 0/8] mm: around get_user_pages flags Hugh Dickins
2009-09-07 21:29 ` [PATCH 1/8] mm: munlock use follow_page Hugh Dickins
2009-09-08 2:58 ` KAMEZAWA Hiroyuki
2009-09-08 11:30 ` Hugh Dickins
2009-09-08 17:10 ` Rik van Riel
2009-09-09 15:59 ` Minchan Kim
2009-09-11 11:07 ` Hiroaki Wakabayashi
2009-09-07 21:31 ` [PATCH 2/8] mm: remove unused GUP flags Hugh Dickins
2009-09-08 17:27 ` Rik van Riel
2009-09-07 21:33 ` [PATCH 3/8] mm: add get_dump_page Hugh Dickins
2009-09-08 18:57 ` Rik van Riel
2009-09-07 21:35 ` [PATCH 4/8] mm: FOLL_DUMP replace FOLL_ANON Hugh Dickins
2009-09-08 18:59 ` Rik van Riel
2009-09-09 11:14 ` Mel Gorman
2009-09-09 16:16 ` Minchan Kim
2009-09-13 15:46 ` Hugh Dickins
2009-09-13 23:05 ` Minchan Kim
2009-09-07 21:37 ` [PATCH 5/8] mm: follow_hugetlb_page flags Hugh Dickins
2009-09-08 22:21 ` Rik van Riel
2009-09-09 11:31 ` Mel Gorman
2009-09-13 15:35 ` Hugh Dickins
2009-09-14 13:27 ` Mel Gorman
2009-09-15 20:26 ` Hugh Dickins
2009-09-07 21:38 ` [PATCH 6/8] mm: fix anonymous dirtying Hugh Dickins
2009-09-08 22:23 ` Rik van Riel
2009-09-07 21:39 ` [PATCH 7/8] mm: reinstate ZERO_PAGE Hugh Dickins
2009-09-08 2:37 ` KAMEZAWA Hiroyuki
2009-09-08 11:56 ` Hugh Dickins
2009-09-09 1:44 ` KAMEZAWA Hiroyuki
2009-09-15 20:15 ` Hugh Dickins
2009-09-08 7:31 ` Nick Piggin
2009-09-08 12:17 ` Hugh Dickins
2009-09-08 15:34 ` Nick Piggin
2009-09-08 16:40 ` Hugh Dickins
2009-09-08 14:13 ` Linus Torvalds
2009-09-08 23:35 ` Rik van Riel
2009-09-07 21:40 ` Hugh Dickins [this message]
2009-09-07 23:51 ` [PATCH 0/8] mm: around get_user_pages flags Linus Torvalds
2009-09-07 23:52 ` KAMEZAWA Hiroyuki
2009-09-08 0:00 ` KOSAKI Motohiro
2009-09-10 0:33 ` KOSAKI Motohiro
2009-09-15 20:16 ` Hugh Dickins
2009-09-15 20:30 ` [PATCH 0/4] mm: mlock, hugetlb, zero followups Hugh Dickins
2009-09-15 20:31 ` [PATCH 1/4] mm: m(un)lock avoid ZERO_PAGE Hugh Dickins
2009-09-16 0:08 ` KOSAKI Motohiro
2009-09-16 9:35 ` Mel Gorman
2009-09-16 11:40 ` Hugh Dickins
2009-09-16 12:47 ` Mel Gorman
2009-09-15 20:33 ` [PATCH 2/4] mm: hugetlbfs_pagecache_present Hugh Dickins
2009-09-15 20:37 ` [PATCH 3/4] mm: ZERO_PAGE without PTE_SPECIAL Hugh Dickins
2009-09-16 6:20 ` KAMEZAWA Hiroyuki
2009-09-15 20:38 ` [PATCH 4/4] mm: move highest_memmap_pfn Hugh Dickins
2009-09-17 0:33 ` [PATCH 0/4] mm: mlock, hugetlb, zero followups KOSAKI Motohiro
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Pine.LNX.4.64.0909072239390.15430@sister.anvils \
--to=hugh.dickins@tiscali.co.uk \
--cc=akpm@linux-foundation.org \
--cc=kamezawa.hiroyu@jp.fujitsu.com \
--cc=kosaki.motohiro@jp.fujitsu.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=npiggin@suse.de \
--cc=riel@redhat.com \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox