Hi Mike
From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
migration tests presume that both THP and HugeTLB huge pages are 2MB.
Add dynamic detection of huge page size with read_pmd_pagesize() for THP
and with default_huge_page_size() for HugeTLB.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
tools/testing/selftests/mm/migration.c | 44 +++++++++++++++++++-------
1 file changed, 32 insertions(+), 12 deletions(-)
diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c
index 60e78bbfc0e3..e3c2f831e15a 100644
--- a/tools/testing/selftests/mm/migration.c
+++ b/tools/testing/selftests/mm/migration.c
@@ -184,22 +184,27 @@ TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
*/
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
{
+ unsigned long pmdsize;
uint64_t *ptr;
int i;
if (!thp_is_enabled())
SKIP(return, "Transparent Hugepages not available");
+ pmdsize = read_pmd_pagesize();
+ if (!pmdsize)
+ SKIP(return, "Reading PMD pagesize failed");
+
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
- ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
+ ptr = mmap(NULL, 2 * pmdsize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
- ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
- ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
- memset(ptr, 0xde, TWOMEG);
+ ptr = (uint64_t *) ALIGN((uintptr_t) ptr, pmdsize);
+ ASSERT_EQ(madvise(ptr, pmdsize, MADV_HUGEPAGE), 0);
+ memset(ptr, 0xde, pmdsize);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
@@ -215,6 +220,7 @@ TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
{
+ unsigned long pmdsize;
pid_t pid;
uint64_t *ptr;
int i;
@@ -222,17 +228,21 @@ TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
if (!thp_is_enabled())
SKIP(return, "Transparent Hugepages not available");
+ pmdsize = read_pmd_pagesize();
+ if (!pmdsize)
+ SKIP(return, "Reading PMD pagesize failed");
+
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
- ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE,
+ ptr = mmap(NULL, 2 * pmdsize, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
- ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
- ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
+ ptr = (uint64_t *) ALIGN((uintptr_t) ptr, pmdsize);
+ ASSERT_EQ(madvise(ptr, pmdsize, MADV_HUGEPAGE), 0);
- memset(ptr, 0xde, TWOMEG);
+ memset(ptr, 0xde, pmdsize);
for (i = 0; i < self->nthreads - 1; i++) {
pid = fork();
if (!pid) {
@@ -256,17 +266,22 @@ TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
*/
TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
{
+ unsigned long hugepage_size;
uint64_t *ptr;
int i;
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
- ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
+ hugepage_size = default_huge_page_size();
+ if (!hugepage_size)
+ SKIP(return, "Reading HugeTLB pagesize failed\n");
+
+ ptr = mmap(NULL, hugepage_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
- memset(ptr, 0xde, TWOMEG);
+ memset(ptr, 0xde, hugepage_size);
for (i = 0; i < self->nthreads - 1; i++)
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
perror("Couldn't create thread");
@@ -281,6 +296,7 @@ TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
*/
TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
{
+ unsigned long hugepage_size;
pid_t pid;
uint64_t *ptr;
int i;
@@ -288,11 +304,15 @@ TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
SKIP(return, "Not enough threads or NUMA nodes available");
- ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
+ hugepage_size = default_huge_page_size();
+ if (!hugepage_size)
+ SKIP(return, "Reading HugeTLB pagesize failed\n");
+
+ ptr = mmap(NULL, hugepage_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
ASSERT_NE(ptr, MAP_FAILED);
- memset(ptr, 0xde, TWOMEG);
+ memset(ptr, 0xde, hugepage_size);
for (i = 0; i < self->nthreads - 1; i++) {
pid = fork();
if (!pid) {
This patch looks good to me.
Reviewed by: Donet Tom <donettom@linux.ibm.com>
-Donet