linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Dan Williams <dan.j.williams@intel.com>
To: linux-nvdimm@lists.01.org
Cc: Jan Kara <jack@suse.cz>, Matthew Wilcox <mawilcox@microsoft.com>,
	linux-kernel@vger.kernel.org, linux-xfs@vger.kernel.org,
	linux-mm@kvack.org, Jeff Moyer <jmoyer@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	linux-fsdevel@vger.kernel.org, akpm@linux-foundation.org,
	hch@lst.de
Subject: [PATCH 14/15] dax: associate mappings with inodes, and warn if dma collides with truncate
Date: Tue, 31 Oct 2017 16:22:51 -0700	[thread overview]
Message-ID: <150949217152.24061.9869502311102659784.stgit@dwillia2-desk3.amr.corp.intel.com> (raw)
In-Reply-To: <150949209290.24061.6283157778959640151.stgit@dwillia2-desk3.amr.corp.intel.com>

Catch cases where truncate encounters pages that are still under active
dma. This warning is a canary for potential data corruption as truncated
blocks could be allocated to a new file while the device is still
perform i/o.

Cc: Jan Kara <jack@suse.cz>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 fs/dax.c                 |   56 ++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/mm_types.h |   20 ++++++++++++----
 kernel/memremap.c        |   10 ++++----
 3 files changed, 76 insertions(+), 10 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index ac6497dcfebd..fd5d385988d1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -297,6 +297,55 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
 	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 }
 
+static unsigned long dax_entry_size(void *entry)
+{
+	if (dax_is_zero_entry(entry))
+		return 0;
+	else if (dax_is_empty_entry(entry))
+		return 0;
+	else if (dax_is_pmd_entry(entry))
+		return HPAGE_SIZE;
+	else
+		return PAGE_SIZE;
+}
+
+#define for_each_entry_pfn(entry, pfn, end_pfn) \
+	for (pfn = dax_radix_pfn(entry), \
+			end_pfn = pfn + dax_entry_size(entry) / PAGE_SIZE; \
+			pfn < end_pfn; \
+			pfn++)
+
+static void dax_associate_entry(void *entry, struct inode *inode)
+{
+	unsigned long pfn, end_pfn;
+
+	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+		return;
+
+	for_each_entry_pfn(entry, pfn, end_pfn) {
+		struct page *page = pfn_to_page(pfn);
+
+		WARN_ON_ONCE(page->inode);
+		page->inode = inode;
+	}
+}
+
+static void dax_disassociate_entry(void *entry, struct inode *inode, bool trunc)
+{
+	unsigned long pfn, end_pfn;
+
+	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
+		return;
+
+	for_each_entry_pfn(entry, pfn, end_pfn) {
+		struct page *page = pfn_to_page(pfn);
+
+		WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
+		WARN_ON_ONCE(page->inode && page->inode != inode);
+		page->inode = NULL;
+	}
+}
+
 /*
  * Find radix tree entry at given index. If it points to an exceptional entry,
  * return it with the radix tree entry locked. If the radix tree doesn't
@@ -403,6 +452,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 		}
 
 		if (pmd_downgrade) {
+			dax_disassociate_entry(entry, mapping->host, false);
 			radix_tree_delete(&mapping->page_tree, index);
 			mapping->nrexceptional--;
 			dax_wake_mapping_entry_waiter(mapping, index, entry,
@@ -452,6 +502,7 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
 	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
 	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
 		goto out;
+	dax_disassociate_entry(entry, mapping->host, trunc);
 	radix_tree_delete(page_tree, index);
 	mapping->nrexceptional--;
 	ret = 1;
@@ -529,6 +580,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 {
 	struct radix_tree_root *page_tree = &mapping->page_tree;
 	unsigned long pfn = pfn_t_to_pfn(pfn_t);
+	struct inode *inode = mapping->host;
 	pgoff_t index = vmf->pgoff;
 	void *new_entry;
 
@@ -548,6 +600,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 
 	spin_lock_irq(&mapping->tree_lock);
 	new_entry = dax_radix_locked_entry(pfn, flags);
+	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
+		dax_disassociate_entry(entry, inode, false);
+		dax_associate_entry(new_entry, inode);
+	}
 
 	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
 		/*
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..dd976851e8d8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -118,11 +118,21 @@ struct page {
 					 * Can be used as a generic list
 					 * by the page owner.
 					 */
-		struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
-					    * lru or handled by a slab
-					    * allocator, this points to the
-					    * hosting device page map.
-					    */
+		struct {
+			/*
+			 * ZONE_DEVICE pages are never on an lru or handled by
+			 * a slab allocator, this points to the hosting device
+			 * page map.
+			 */
+			struct dev_pagemap *pgmap;
+			/*
+			 * inode association for MEMORY_DEVICE_FS_DAX page-idle
+			 * callbacks. Note that we don't use ->mapping since
+			 * that has hard coded page-cache assumptions in
+			 * several paths.
+			 */
+			struct inode *inode;
+		};
 		struct {		/* slub per cpu partial pages */
 			struct page *next;	/* Next partial slab */
 #ifdef CONFIG_64BIT
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 8a4ebfe9db4e..f9a2929fc310 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -441,13 +441,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 		struct page *page = pfn_to_page(pfn);
 
 		/*
-		 * ZONE_DEVICE pages union ->lru with a ->pgmap back
-		 * pointer.  It is a bug if a ZONE_DEVICE page is ever
-		 * freed or placed on a driver-private list.  Seed the
-		 * storage with LIST_POISON* values.
+		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
+		 * and ->inode (for the MEMORY_DEVICE_FS_DAX case) association.
+		 * It is a bug if a ZONE_DEVICE page is ever freed or placed on
+		 * a driver-private list.
 		 */
-		list_del(&page->lru);
 		page->pgmap = pgmap;
+		page->inode = NULL;
 		percpu_ref_get(ref);
 		if (!(++i % 1024))
 			cond_resched();

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-10-31 23:29 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-10-31 23:21 [PATCH 00/15] dax: prep work for fixing dax-dma vs truncate collisions Dan Williams
2017-10-31 23:21 ` [PATCH 01/15] dax: quiet bdev_dax_supported() Dan Williams
2017-11-02 20:11   ` Christoph Hellwig
2017-10-31 23:21 ` [PATCH 02/15] mm, dax: introduce pfn_t_special() Dan Williams
2017-11-03  2:32   ` Michael Ellerman
2017-10-31 23:21 ` [PATCH 03/15] dax: require 'struct page' by default for filesystem dax Dan Williams
2017-10-31 23:21 ` [PATCH 04/15] brd: remove dax support Dan Williams
2017-11-02 20:12   ` Christoph Hellwig
2017-11-04 16:31   ` Jens Axboe
2017-10-31 23:22 ` [PATCH 05/15] dax: stop using VM_MIXEDMAP for dax Dan Williams
2017-10-31 23:22 ` [PATCH 06/15] dax: stop using VM_HUGEPAGE " Dan Williams
2017-10-31 23:22 ` [PATCH 07/15] dax: stop requiring a live device for dax_flush() Dan Williams
2017-11-02 20:12   ` Christoph Hellwig
2017-10-31 23:22 ` [PATCH 08/15] dax: store pfns in the radix Dan Williams
2017-10-31 23:22 ` [PATCH 09/15] tools/testing/nvdimm: add 'bio_delay' mechanism Dan Williams
2017-10-31 23:22 ` [PATCH 10/15] IB/core: disable memory registration of fileystem-dax vmas Dan Williams
2017-11-02 20:13   ` Christoph Hellwig
2017-11-02 21:06     ` Dan Williams
2017-10-31 23:22 ` [PATCH 11/15] [media] v4l2: disable filesystem-dax mapping support Dan Williams
2017-10-31 23:22 ` [PATCH 12/15] mm, dax: enable filesystems to trigger page-idle callbacks Dan Williams
2017-11-10  9:04   ` Christoph Hellwig
2017-10-31 23:22 ` [PATCH 13/15] mm, devmap: introduce CONFIG_DEVMAP_MANAGED_PAGES Dan Williams
2017-11-10  9:06   ` Christoph Hellwig
2017-10-31 23:22 ` Dan Williams [this message]
2017-11-10  9:08   ` [PATCH 14/15] dax: associate mappings with inodes, and warn if dma collides with truncate Christoph Hellwig
2017-12-20  1:11     ` Dan Williams
2017-12-20 14:38       ` Jan Kara
2017-12-20 22:41         ` Dan Williams
2017-12-21 12:14           ` Jan Kara
2017-12-21 17:31             ` Dan Williams
2017-12-22  8:51               ` Jan Kara
2017-12-20 22:14       ` Dave Chinner
2017-10-31 23:22 ` [PATCH 15/15] wait_bit: introduce {wait_on,wake_up}_devmap_idle Dan Williams
2017-11-10  9:09   ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=150949217152.24061.9869502311102659784.stgit@dwillia2-desk3.amr.corp.intel.com \
    --to=dan.j.williams@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=hch@lst.de \
    --cc=jack@suse.cz \
    --cc=jmoyer@redhat.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=ross.zwisler@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox