From: Alex Sierra <alex.sierra@amd.com>
To: <jgg@nvidia.com>
Cc: <david@redhat.com>, <Felix.Kuehling@amd.com>,
<linux-mm@kvack.org>, <rcampbell@nvidia.com>,
<linux-ext4@vger.kernel.org>, <linux-xfs@vger.kernel.org>,
<amd-gfx@lists.freedesktop.org>,
<dri-devel@lists.freedesktop.org>, <hch@lst.de>,
<jglisse@redhat.com>, <apopple@nvidia.com>, <willy@infradead.org>,
<akpm@linux-foundation.org>
Subject: [PATCH v2 2/3] tools: add more gup configs to hmm_gup selftests
Date: Wed, 30 Mar 2022 16:25:36 -0500 [thread overview]
Message-ID: <20220330212537.12186-3-alex.sierra@amd.com> (raw)
In-Reply-To: <20220330212537.12186-1-alex.sierra@amd.com>
Test device pages with get_user_pages and get_user_pages_fast.
The motivation is to test device coherent type pages in the gup and
gup fast paths, after vm_normal_pages was split into LRU and non-LRU
handled.
Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
tools/testing/selftests/vm/hmm-tests.c | 65 +++++++++++++++++---------
1 file changed, 44 insertions(+), 21 deletions(-)
diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
index 11b83a8084fe..65e30ab6494c 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -1769,6 +1769,24 @@ TEST_F(hmm, exclusive_cow)
hmm_buffer_free(buffer);
}
+static int gup_test_exec(int gup_fd, unsigned long addr,
+ int cmd, int npages, int size)
+{
+ struct gup_test gup = {
+ .nr_pages_per_call = npages,
+ .addr = addr,
+ .gup_flags = FOLL_WRITE,
+ .size = size,
+ };
+
+ if (ioctl(gup_fd, cmd, &gup)) {
+ perror("ioctl on error\n");
+ return errno;
+ }
+
+ return 0;
+}
+
/*
* Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
* This should trigger a migration back to system memory for both, private
@@ -1779,7 +1797,6 @@ TEST_F(hmm, exclusive_cow)
TEST_F(hmm, hmm_gup_test)
{
struct hmm_buffer *buffer;
- struct gup_test gup;
int gup_fd;
unsigned long npages;
unsigned long size;
@@ -1792,8 +1809,7 @@ TEST_F(hmm, hmm_gup_test)
if (gup_fd == -1)
SKIP(return, "Skipping test, could not find gup_test driver");
- npages = 4;
- ASSERT_NE(npages, 0);
+ npages = 3;
size = npages << self->page_shift;
buffer = malloc(sizeof(*buffer));
@@ -1822,28 +1838,35 @@ TEST_F(hmm, hmm_gup_test)
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
- gup.nr_pages_per_call = npages;
- gup.addr = (unsigned long)buffer->ptr;
- gup.gup_flags = FOLL_WRITE;
- gup.size = size;
- /*
- * Calling gup_test ioctl. It will try to PIN_LONGTERM these device pages
- * causing a migration back to system memory for both, private and coherent
- * type pages.
- */
- if (ioctl(gup_fd, PIN_LONGTERM_BENCHMARK, &gup)) {
- perror("ioctl on PIN_LONGTERM_BENCHMARK\n");
- goto out_test;
- }
-
- /* Take snapshot to make sure pages have been migrated to sys memory */
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr,
+ GUP_BASIC_TEST, 1, self->page_size), 0);
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr + 1 * self->page_size,
+ GUP_FAST_BENCHMARK, 1, self->page_size), 0);
+ ASSERT_EQ(gup_test_exec(gup_fd,
+ (unsigned long)buffer->ptr + 2 * self->page_size,
+ PIN_LONGTERM_BENCHMARK, 1, self->page_size), 0);
+
+ /* Take snapshot to CPU pagetables */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
m = buffer->mirror;
- for (i = 0; i < npages; i++)
- ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE);
-out_test:
+ if (hmm_is_coherent_type(variant->device_number)) {
+ ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
+ ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
+ } else {
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
+ }
+ ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
+ /* Check again the content on the pages. Make sure there's no
+ * corrupted data.
+ */
+ for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
+ ASSERT_EQ(ptr[i], i);
+
close(gup_fd);
hmm_buffer_free(buffer);
}
--
2.32.0
next prev parent reply other threads:[~2022-03-30 21:26 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-30 21:25 [PATCH v2 0/3] split vm_normal_pages for LRU and non-LRU handling Alex Sierra
2022-03-30 21:25 ` [PATCH v2 1/3] mm: add vm_normal_lru_pages for LRU handled pages only Alex Sierra
2022-03-31 8:53 ` Christoph Hellwig
2022-03-31 8:55 ` David Hildenbrand
2022-03-31 8:57 ` Christoph Hellwig
2022-04-01 20:08 ` Felix Kuehling
2022-04-04 17:38 ` Jason Gunthorpe
2022-04-04 19:22 ` Sierra Guiza, Alejandro (Alex)
2022-03-30 21:25 ` Alex Sierra [this message]
2022-03-30 21:25 ` [PATCH v2 3/3] tools: add selftests to hmm for COW in device memory Alex Sierra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220330212537.12186-3-alex.sierra@amd.com \
--to=alex.sierra@amd.com \
--cc=Felix.Kuehling@amd.com \
--cc=akpm@linux-foundation.org \
--cc=amd-gfx@lists.freedesktop.org \
--cc=apopple@nvidia.com \
--cc=david@redhat.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@lst.de \
--cc=jgg@nvidia.com \
--cc=jglisse@redhat.com \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-xfs@vger.kernel.org \
--cc=rcampbell@nvidia.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox