From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Andrea Arcangeli <aarcange@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>,
Hugh Dickins <hughd@google.com>,
Wu Fengguang <fengguang.wu@intel.com>, Jan Kara <jack@suse.cz>,
Mel Gorman <mgorman@suse.de>,
linux-mm@kvack.org, Andi Kleen <ak@linux.intel.com>,
Matthew Wilcox <willy@linux.intel.com>,
"Kirill A. Shutemov" <kirill@shutemov.name>,
Hillf Danton <dhillf@gmail.com>, Dave Hansen <dave@sr71.net>,
Ning Qu <quning@google.com>,
linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 19/23] truncate: support huge pages
Date: Sun, 4 Aug 2013 05:17:21 +0300 [thread overview]
Message-ID: <1375582645-29274-20-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1375582645-29274-1-git-send-email-kirill.shutemov@linux.intel.com>
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
truncate_inode_pages_range() drops whole huge page at once if it's fully
inside the range.
If a huge page is only partly in the range we zero out the part,
exactly like we do for partial small pages.
invalidate_mapping_pages() just skips huge pages if they are not fully
in the range.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
mm/truncate.c | 108 +++++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 84 insertions(+), 24 deletions(-)
diff --git a/mm/truncate.c b/mm/truncate.c
index 353b683..fcef7cb 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -205,8 +205,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
{
pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */
- unsigned int partial_start; /* inclusive */
- unsigned int partial_end; /* exclusive */
+ bool partial_thp_start = false, partial_thp_end = false;
struct pagevec pvec;
pgoff_t index;
int i;
@@ -215,15 +214,9 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (mapping->nrpages == 0)
return;
- /* Offsets within partial pages */
- partial_start = lstart & (PAGE_CACHE_SIZE - 1);
- partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
-
/*
* 'start' and 'end' always covers the range of pages to be fully
- * truncated. Partial pages are covered with 'partial_start' at the
- * start of the range and 'partial_end' at the end of the range.
- * Note that 'end' is exclusive while 'lend' is inclusive.
+ * truncated. Note that 'end' is exclusive while 'lend' is inclusive.
*/
start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (lend == -1)
@@ -249,6 +242,23 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (index >= end)
break;
+ if (PageTransTailCache(page)) {
+ /* part of already handled huge page */
+ if (!page->mapping)
+ continue;
+ /* the range starts in middle of huge page */
+ partial_thp_start = true;
+ start = index & ~HPAGE_CACHE_INDEX_MASK;
+ continue;
+ }
+ /* the range ends on huge page */
+ if (PageTransHugeCache(page) &&
+ index == (end & ~HPAGE_CACHE_INDEX_MASK)) {
+ partial_thp_end = true;
+ end = index;
+ break;
+ }
+
if (!trylock_page(page))
continue;
WARN_ON(page->index != index);
@@ -265,34 +275,74 @@ void truncate_inode_pages_range(struct address_space *mapping,
index++;
}
- if (partial_start) {
- struct page *page = find_lock_page(mapping, start - 1);
+ if (partial_thp_start || lstart & ~PAGE_CACHE_MASK) {
+ pgoff_t off;
+ struct page *page;
+ unsigned pstart, pend;
+ void (*zero_segment)(struct page *page,
+ unsigned start, unsigned len);
+retry_partial_start:
+ if (partial_thp_start) {
+ zero_segment = zero_huge_user_segment;
+ off = (start - 1) & ~HPAGE_CACHE_INDEX_MASK;
+ pstart = lstart & ~HPAGE_PMD_MASK;
+ if ((end & ~HPAGE_CACHE_INDEX_MASK) == off)
+ pend = (lend - 1) & ~HPAGE_PMD_MASK;
+ else
+ pend = HPAGE_PMD_SIZE;
+ } else {
+ zero_segment = zero_user_segment;
+ off = start - 1;
+ pstart = lstart & ~PAGE_CACHE_MASK;
+ if (start > end)
+ pend = (lend - 1) & ~PAGE_CACHE_MASK;
+ else
+ pend = PAGE_CACHE_SIZE;
+ }
+
+ page = find_get_page(mapping, off);
if (page) {
- unsigned int top = PAGE_CACHE_SIZE;
- if (start > end) {
- /* Truncation within a single page */
- top = partial_end;
- partial_end = 0;
+ /* the last tail page*/
+ if (PageTransTailCache(page)) {
+ partial_thp_start = true;
+ page_cache_release(page);
+ goto retry_partial_start;
}
+
+ lock_page(page);
wait_on_page_writeback(page);
- zero_user_segment(page, partial_start, top);
+ zero_segment(page, pstart, pend);
cleancache_invalidate_page(mapping, page);
if (page_has_private(page))
- do_invalidatepage(page, partial_start,
- top - partial_start);
+ do_invalidatepage(page, pstart,
+ pend - pstart);
unlock_page(page);
page_cache_release(page);
}
}
- if (partial_end) {
- struct page *page = find_lock_page(mapping, end);
+ if (partial_thp_end || (lend + 1) & ~PAGE_CACHE_MASK) {
+ pgoff_t off;
+ struct page *page;
+ unsigned pend;
+ void (*zero_segment)(struct page *page,
+ unsigned start, unsigned len);
+ if (partial_thp_end) {
+ zero_segment = zero_huge_user_segment;
+ off = end & ~HPAGE_CACHE_INDEX_MASK;
+ pend = (lend - 1) & ~HPAGE_PMD_MASK;
+ } else {
+ zero_segment = zero_user_segment;
+ off = end;
+ pend = (lend - 1) & ~PAGE_CACHE_MASK;
+ }
+
+ page = find_lock_page(mapping, end);
if (page) {
wait_on_page_writeback(page);
- zero_user_segment(page, 0, partial_end);
+ zero_segment(page, 0, pend);
cleancache_invalidate_page(mapping, page);
if (page_has_private(page))
- do_invalidatepage(page, 0,
- partial_end);
+ do_invalidatepage(page, 0, pend);
unlock_page(page);
page_cache_release(page);
}
@@ -327,6 +377,9 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (index >= end)
break;
+ if (PageTransTailCache(page))
+ continue;
+
lock_page(page);
WARN_ON(page->index != index);
wait_on_page_writeback(page);
@@ -401,6 +454,13 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (index > end)
break;
+ /* skip huge page if it's not fully in the range */
+ if (PageTransHugeCache(page) &&
+ index + HPAGE_CACHE_NR - 1 > end)
+ continue;
+ if (PageTransTailCache(page))
+ continue;
+
if (!trylock_page(page))
continue;
WARN_ON(page->index != index);
--
1.8.3.2
--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org. For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>
next prev parent reply other threads:[~2013-08-04 2:14 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-04 2:17 [PATCHv5 00/23] Transparent huge page cache: phase 1, everything but mmap() Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 01/23] radix-tree: implement preload for multiple contiguous elements Kirill A. Shutemov
2013-08-05 11:17 ` Jan Kara
2013-08-06 16:34 ` Matthew Wilcox
2013-08-06 20:17 ` Jan Kara
2013-08-07 16:32 ` Kirill A. Shutemov
2013-08-07 20:00 ` Jan Kara
2013-08-07 20:24 ` Kirill A. Shutemov
2013-08-07 20:36 ` Jan Kara
2013-08-07 21:37 ` Kirill A. Shutemov
2013-08-08 8:45 ` Kirill A. Shutemov
2013-08-08 10:04 ` Jan Kara
2013-08-09 11:13 ` Kirill A. Shutemov
2013-08-09 11:36 ` Jan Kara
2013-08-04 2:17 ` [PATCH 02/23] memcg, thp: charge huge cache pages Kirill A. Shutemov
2013-08-04 8:25 ` Michal Hocko
2013-08-04 2:17 ` [PATCH 03/23] thp: compile-time and sysfs knob for thp pagecache Kirill A. Shutemov
2013-09-05 21:53 ` Ning Qu
2013-09-06 11:33 ` Kirill A. Shutemov
2013-09-06 17:14 ` Ning Qu
2013-08-04 2:17 ` [PATCH 04/23] thp, mm: introduce mapping_can_have_hugepages() predicate Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 05/23] thp: represent file thp pages in meminfo and friends Kirill A. Shutemov
2013-08-30 22:16 ` Ning Qu
2013-09-02 11:36 ` Kirill A. Shutemov
2013-09-02 20:05 ` Ning Qu
2013-08-04 2:17 ` [PATCH 06/23] thp, mm: rewrite add_to_page_cache_locked() to support huge pages Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 07/23] mm: trace filemap: dump page order Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 08/23] block: implement add_bdi_stat() Kirill A. Shutemov
2013-08-05 11:21 ` Jan Kara
2013-08-04 2:17 ` [PATCH 09/23] thp, mm: rewrite delete_from_page_cache() to support huge pages Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 10/23] thp, mm: warn if we try to use replace_page_cache_page() with THP Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 11/23] thp, mm: handle tail pages in page_cache_get_speculative() Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 12/23] thp, mm: add event counters for huge page alloc on file write or read Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 13/23] thp, mm: allocate huge pages in grab_cache_page_write_begin() Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 14/23] thp, mm: naive support of thp in generic_perform_write Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 15/23] mm, fs: avoid page allocation beyond i_size on read Kirill A. Shutemov
2013-08-05 0:29 ` NeilBrown
2013-08-04 2:17 ` [PATCH 16/23] thp, mm: handle transhuge pages in do_generic_file_read() Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 17/23] thp, libfs: initial thp support Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 18/23] thp: libfs: introduce simple_thp_release() Kirill A. Shutemov
2013-08-04 2:17 ` Kirill A. Shutemov [this message]
2013-08-05 13:29 ` [PATCH 19/23] truncate: support huge pages Jan Kara
2013-08-06 20:23 ` Dave Hansen
2013-08-06 20:57 ` Kirill A. Shutemov
2013-08-06 21:55 ` Dave Hansen
2013-08-09 14:39 ` Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 20/23] thp: handle file pages in split_huge_page() Kirill A. Shutemov
2013-08-06 19:09 ` Ning Qu
2013-08-06 21:09 ` Ning Qu
2013-08-06 21:47 ` Ning Qu
2013-08-09 14:46 ` Kirill A. Shutemov
2013-08-09 14:49 ` Ning Qu
2013-08-09 21:24 ` Ning Qu
2013-08-04 2:17 ` [PATCH 21/23] thp: wait_split_huge_page(): serialize over i_mmap_mutex too Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 22/23] thp, mm: split huge page on mmap file page Kirill A. Shutemov
2013-08-08 20:49 ` Khalid Aziz
2013-08-09 14:50 ` Kirill A. Shutemov
2013-08-04 2:17 ` [PATCH 23/23] ramfs: enable transparent huge page cache Kirill A. Shutemov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1375582645-29274-20-git-send-email-kirill.shutemov@linux.intel.com \
--to=kirill.shutemov@linux.intel.com \
--cc=aarcange@redhat.com \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=dave@sr71.net \
--cc=dhillf@gmail.com \
--cc=fengguang.wu@intel.com \
--cc=hughd@google.com \
--cc=jack@suse.cz \
--cc=kirill@shutemov.name \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@suse.de \
--cc=quning@google.com \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox