From: Dan Williams <dan.j.williams@intel.com>
To: akpm@linux-foundation.org
Cc: Michal Hocko <mhocko@suse.com>,
linux-nvdimm@lists.01.org,
Dave Hansen <dave.hansen@linux.intel.com>,
linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
linux-mm@kvack.org, linux-fsdevel@vger.kernel.org, hch@lst.de,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v3 10/13] mm: disable get_user_pages_fast() for dax
Date: Thu, 19 Oct 2017 19:39:51 -0700 [thread overview]
Message-ID: <150846719161.24336.5799047274707349501.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <150846713528.24336.4459262264611579791.stgit@dwillia2-desk3.amr.corp.intel.com>
In preparation for solving the dax-dma vs truncate race, disable
get_user_pages_fast(). The race fix relies on the vma being available.
We can still support get_user_pages_fast() for 1GB (pud) 'devmap'
mappings since those are only implemented for device-dax, everything
else needs the vma and the gup-slow-path in case it might be a
filesytem-dax mapping.
Cc: Michal Hocko <mhocko@suse.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
mm/gup.c | 48 +++++++++++++-----------------------------------
1 file changed, 13 insertions(+), 35 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index b2b4d4263768..308be897d22a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1290,22 +1290,12 @@ static inline pte_t gup_get_pte(pte_t *ptep)
}
#endif
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
-{
- while ((*nr) - nr_start) {
- struct page *page = pages[--(*nr)];
-
- ClearPageReferenced(page);
- put_page(page);
- }
-}
-
#ifdef __HAVE_ARCH_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
- int nr_start = *nr, ret = 0;
+ int ret = 0;
pte_t *ptep, *ptem;
ptem = ptep = pte_offset_map(&pmd, addr);
@@ -1323,13 +1313,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
if (!pte_access_permitted(pte, write))
goto pte_unmap;
- if (pte_devmap(pte)) {
- pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
- if (unlikely(!pgmap)) {
- undo_dev_pagemap(nr, nr_start, pages);
- goto pte_unmap;
- }
- } else if (pte_special(pte))
+ if (pte_devmap(pte) || (pte_special(pte)))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -1378,6 +1362,16 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
#endif /* __HAVE_ARCH_PTE_SPECIAL */
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
+{
+ while ((*nr) - nr_start) {
+ struct page *page = pages[--(*nr)];
+
+ ClearPageReferenced(page);
+ put_page(page);
+ }
+}
+
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
@@ -1402,15 +1396,6 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
return 1;
}
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
-{
- unsigned long fault_pfn;
-
- fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
-}
-
static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
@@ -1420,13 +1405,6 @@ static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
return __gup_device_huge(fault_pfn, addr, end, pages, nr);
}
#else
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
-{
- BUILD_BUG();
- return 0;
-}
-
static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
@@ -1445,7 +1423,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
if (pmd_devmap(orig))
- return __gup_device_huge_pmd(orig, addr, end, pages, nr);
+ return 0;
refs = 0;
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2017-10-20 2:46 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-20 2:38 [PATCH v3 00/13] dax: fix dma vs truncate and remove 'page-less' support Dan Williams
2017-10-20 2:39 ` [PATCH v3 01/13] dax: quiet bdev_dax_supported() Dan Williams
2017-10-20 2:39 ` [PATCH v3 02/13] dax: require 'struct page' for filesystem dax Dan Williams
2017-10-20 7:57 ` Christoph Hellwig
2017-10-20 15:23 ` Dan Williams
2017-10-20 16:29 ` Christoph Hellwig
2017-10-20 22:29 ` Dan Williams
2017-10-21 3:20 ` Matthew Wilcox
2017-10-21 4:16 ` Dan Williams
2017-10-21 8:15 ` Christoph Hellwig
2017-10-23 5:18 ` Martin Schwidefsky
2017-10-23 8:55 ` Dan Williams
2017-10-23 10:44 ` Martin Schwidefsky
2017-10-23 11:20 ` Dan Williams
2017-10-20 2:39 ` [PATCH v3 03/13] dax: stop using VM_MIXEDMAP for dax Dan Williams
2017-10-20 2:39 ` [PATCH v3 04/13] dax: stop using VM_HUGEPAGE " Dan Williams
2017-10-20 2:39 ` [PATCH v3 05/13] dax: stop requiring a live device for dax_flush() Dan Williams
2017-10-20 2:39 ` [PATCH v3 06/13] dax: store pfns in the radix Dan Williams
2017-10-20 2:39 ` [PATCH v3 07/13] dax: warn if dma collides with truncate Dan Williams
2017-10-20 2:39 ` [PATCH v3 08/13] tools/testing/nvdimm: add 'bio_delay' mechanism Dan Williams
2017-10-20 2:39 ` [PATCH v3 09/13] IB/core: disable memory registration of fileystem-dax vmas Dan Williams
2017-10-20 2:39 ` Dan Williams [this message]
2017-10-20 2:39 ` [PATCH v3 11/13] fs: use smp_load_acquire in break_{layout,lease} Dan Williams
2017-10-20 12:39 ` Jeffrey Layton
2017-10-20 2:40 ` [PATCH v3 12/13] dax: handle truncate of dma-busy pages Dan Williams
2017-10-20 13:05 ` Jeff Layton
2017-10-20 15:42 ` Dan Williams
2017-10-20 16:32 ` Christoph Hellwig
2017-10-20 17:27 ` Dan Williams
2017-10-20 20:36 ` Brian Foster
2017-10-21 8:11 ` Christoph Hellwig
2017-10-20 2:40 ` [PATCH v3 13/13] xfs: wire up FL_ALLOCATED support Dan Williams
2017-10-20 7:47 ` [PATCH v3 00/13] dax: fix dma vs truncate and remove 'page-less' support Christoph Hellwig
2017-10-20 9:31 ` Christoph Hellwig
2017-10-26 10:58 ` Jan Kara
2017-10-26 23:51 ` Williams, Dan J
2017-10-27 6:48 ` Dave Chinner
2017-10-27 11:42 ` Dan Williams
2017-10-29 21:52 ` Dave Chinner
2017-10-27 6:45 ` Christoph Hellwig
2017-10-29 23:46 ` Dan Williams
2017-10-30 2:00 ` Dave Chinner
2017-10-30 8:38 ` Jan Kara
2017-10-30 11:20 ` Dave Chinner
2017-10-30 17:51 ` Dan Williams
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=150846719161.24336.5799047274707349501.stgit@dwillia2-desk3.amr.corp.intel.com \
--to=dan.j.williams@intel.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=hch@lst.de \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvdimm@lists.01.org \
--cc=linux-xfs@vger.kernel.org \
--cc=mhocko@suse.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox