linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
Cc: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>,
	Lu Baolu <baolu.lu@linux.intel.com>,
	David Hildenbrand <david@redhat.com>,
	Christoph Hellwig <hch@lst.de>,
	iommu@lists.linux.dev, Joao Martins <joao.m.martins@oracle.com>,
	Kevin Tian <kevin.tian@intel.com>,
	kvm@vger.kernel.org, linux-mm@kvack.org,
	Pasha Tatashin <pasha.tatashin@soleen.com>,
	Peter Xu <peterx@redhat.com>, Ryan Roberts <ryan.roberts@arm.com>,
	Sean Christopherson <seanjc@google.com>,
	Tina Zhang <tina.zhang@intel.com>
Subject: [PATCH 10/16] iommupt: Add a kunit test to compare against iopt
Date: Thu, 15 Aug 2024 12:11:26 -0300	[thread overview]
Message-ID: <10-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com>

The comparison tests checks the memory layout of the page table against
the memory layout created by the io-pgtable version to ensure they are the
same. This gives a high confidence aspects of the formats are working
correctly.

Most likely this would never be merged to the kernel, it is a useful
development tool to build the formats.

The compare tests for AMDv1, x86PAE and VTD SS require a bunch of hacky
patches to those drivers and this kunit command:

./tools/testing/kunit/kunit.py run --build_dir build_kunit_x86_64 --arch x86_64 --kunitconfig ./drivers/iommu/generic_pt/.kunitconfig --kconfig_add CONFIG_PCI=y --kconfig_add CONFIG_AMD_IOMMU=y --kconfig_add CONFIG_INTEL_IOMMU=y --kconfig_add CONFIG_CONFIG_IOMMU_IO_PGTABLE_VTD=y

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/generic_pt/.kunitconfig         |  10 +
 drivers/iommu/generic_pt/Kconfig              |   1 +
 drivers/iommu/generic_pt/fmt/iommu_template.h |   3 +
 drivers/iommu/generic_pt/kunit_iommu_cmp.h    | 272 ++++++++++++++++++
 4 files changed, 286 insertions(+)
 create mode 100644 drivers/iommu/generic_pt/kunit_iommu_cmp.h

diff --git a/drivers/iommu/generic_pt/.kunitconfig b/drivers/iommu/generic_pt/.kunitconfig
index f428cae8ce584c..a16ca5f72a7c5b 100644
--- a/drivers/iommu/generic_pt/.kunitconfig
+++ b/drivers/iommu/generic_pt/.kunitconfig
@@ -11,3 +11,13 @@ CONFIG_IOMMU_PT_DART=y
 CONFIG_IOMMU_PT_VTDSS=y
 CONFIG_IOMMU_PT_X86PAE=y
 CONFIG_IOMMUT_PT_KUNIT_TEST=y
+
+CONFIG_COMPILE_TEST=y
+CONFIG_IOMMU_IO_PGTABLE_LPAE=y
+CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y
+CONFIG_IOMMU_IO_PGTABLE_DART=y
+# These are x86 specific and can't be turned on generally
+# Turn them on to compare test x86pae and vtdss
+#CONFIG_AMD_IOMMU=y
+#CONFIG_INTEL_IOMMU=y
+#CONFIG_CONFIG_IOMMU_IO_PGTABLE_VTD=y
diff --git a/drivers/iommu/generic_pt/Kconfig b/drivers/iommu/generic_pt/Kconfig
index 2c5c2bc59bf8ea..3ac9b2324ebd98 100644
--- a/drivers/iommu/generic_pt/Kconfig
+++ b/drivers/iommu/generic_pt/Kconfig
@@ -31,6 +31,7 @@ config IOMMU_PT
 if IOMMU_PT
 config IOMMUT_PT_KUNIT_TEST
 	tristate "IOMMU Page Table KUnit Test" if !KUNIT_ALL_TESTS
+	select IOMMU_IO_PGTABLE
 	depends on KUNIT
 	default KUNIT_ALL_TESTS
 endif
diff --git a/drivers/iommu/generic_pt/fmt/iommu_template.h b/drivers/iommu/generic_pt/fmt/iommu_template.h
index 809f4ce6874591..8d113cc68ec485 100644
--- a/drivers/iommu/generic_pt/fmt/iommu_template.h
+++ b/drivers/iommu/generic_pt/fmt/iommu_template.h
@@ -43,4 +43,7 @@
  */
 #include "../kunit_generic_pt.h"
 #include "../kunit_iommu_pt.h"
+#ifdef pt_iommu_alloc_io_pgtable
+#include "../kunit_iommu_cmp.h"
+#endif
 #endif
diff --git a/drivers/iommu/generic_pt/kunit_iommu_cmp.h b/drivers/iommu/generic_pt/kunit_iommu_cmp.h
new file mode 100644
index 00000000000000..283b3f2b07425e
--- /dev/null
+++ b/drivers/iommu/generic_pt/kunit_iommu_cmp.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+#include "kunit_iommu.h"
+#include "pt_iter.h"
+#include <linux/iommu.h>
+#include <linux/io-pgtable.h>
+
+struct kunit_iommu_cmp_priv {
+	/* Generic PT version */
+	struct kunit_iommu_priv fmt;
+
+	/* IO pagetable version */
+	struct io_pgtable_ops *pgtbl_ops;
+	struct io_pgtable_cfg *fmt_memory;
+	struct pt_iommu_table ref_table;
+};
+
+struct compare_tables {
+	struct kunit *test;
+	struct pt_range ref_range;
+	struct pt_table_p *ref_table;
+};
+
+static int __compare_tables(struct pt_range *range, void *arg,
+			    unsigned int level, struct pt_table_p *table)
+{
+	struct pt_state pts = pt_init(range, level, table);
+	struct compare_tables *cmp = arg;
+	struct pt_state ref_pts =
+		pt_init(&cmp->ref_range, level, cmp->ref_table);
+	struct kunit *test = cmp->test;
+	int ret;
+
+	for_each_pt_level_item(&pts) {
+		u64 entry, ref_entry;
+
+		cmp->ref_range.va = range->va;
+		ref_pts.index = pts.index;
+		pt_load_entry(&ref_pts);
+
+		entry = pt_kunit_cmp_mask_entry(&pts);
+		ref_entry = pt_kunit_cmp_mask_entry(&ref_pts);
+
+		/*if (entry != 0 || ref_entry != 0)
+			printk("Check %llx Level %u index %u ptr %px refptr %px: %llx (%llx) %llx (%llx)\n",
+			       pts.range->va, pts.level, pts.index,
+			       pts.table,
+			       ref_pts.table,
+			       pts.entry, entry,
+			       ref_pts.entry, ref_entry);*/
+
+		KUNIT_ASSERT_EQ(test, pts.type, ref_pts.type);
+		KUNIT_ASSERT_EQ(test, entry, ref_entry);
+		if (entry != ref_entry)
+			return 0;
+
+		if (pts.type == PT_ENTRY_TABLE) {
+			cmp->ref_table = ref_pts.table_lower;
+			ret = pt_descend(&pts, arg, __compare_tables);
+			if (ret)
+				return ret;
+		}
+
+		/* Defeat contiguous entry aggregation */
+		pts.type = PT_ENTRY_EMPTY;
+	}
+
+	return 0;
+}
+
+static void compare_tables(struct kunit *test)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+	struct pt_range range = pt_top_range(priv->common);
+	struct compare_tables cmp = {
+		.test = test,
+	};
+	struct pt_state pts = pt_init_top(&range);
+	struct pt_state ref_pts;
+
+	pt_iommu_setup_ref_table(&cmp_priv->ref_table, cmp_priv->pgtbl_ops);
+	cmp.ref_range =
+		pt_top_range(common_from_iommu(&cmp_priv->ref_table.iommu));
+	ref_pts = pt_init_top(&cmp.ref_range);
+	KUNIT_ASSERT_EQ(test, pts.level, ref_pts.level);
+
+	cmp.ref_table = ref_pts.table;
+	KUNIT_ASSERT_EQ(test, pt_walk_range(&range, __compare_tables, &cmp), 0);
+}
+
+static void test_cmp_init(struct kunit *test)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+	struct io_pgtable_cfg *pgtbl_cfg =
+		&io_pgtable_ops_to_pgtable(cmp_priv->pgtbl_ops)->cfg;
+
+	/* Fixture does the setup */
+	KUNIT_EXPECT_NE(test, priv->info.pgsize_bitmap, 0);
+
+	/* pt_iommu has a superset of page sizes (ARM supports contiguous) */
+	KUNIT_EXPECT_EQ(test,
+			priv->info.pgsize_bitmap & pgtbl_cfg->pgsize_bitmap,
+			pgtbl_cfg->pgsize_bitmap);
+
+	/* Empty compare works */
+	compare_tables(test);
+}
+
+static void do_cmp_map(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa,
+		       pt_oaddr_t len, unsigned int prot)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+	const struct pt_iommu_ops *ops = priv->iommu->ops;
+	size_t mapped;
+	int ret;
+
+	/* This lacks pagination, must call with perfectly aligned everything */
+	if (sizeof(unsigned long) == 8) {
+		KUNIT_EXPECT_EQ(test, va % len, 0);
+		KUNIT_EXPECT_EQ(test, pa % len, 0);
+	}
+
+	mapped = 0;
+	ret = ops->map_pages(priv->iommu, va, pa, len, prot, GFP_KERNEL,
+			     &mapped, NULL);
+	KUNIT_EXPECT_EQ(test, ret, 0);
+	KUNIT_EXPECT_EQ(test, mapped, len);
+
+	mapped = 0;
+	ret = cmp_priv->pgtbl_ops->map_pages(cmp_priv->pgtbl_ops, va, pa, len,
+					     1, prot, GFP_KERNEL, &mapped);
+	KUNIT_EXPECT_EQ(test, ret, 0);
+	KUNIT_EXPECT_EQ(test, mapped, len);
+}
+
+static void do_cmp_unmap(struct kunit *test, pt_vaddr_t va, pt_vaddr_t len)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+	const struct pt_iommu_ops *ops = priv->iommu->ops;
+	size_t ret;
+
+	KUNIT_EXPECT_EQ(test, va % len, 0);
+
+	ret = ops->unmap_pages(priv->iommu, va, len, NULL);
+	KUNIT_EXPECT_EQ(test, ret, len);
+	ret = cmp_priv->pgtbl_ops->unmap_pages(cmp_priv->pgtbl_ops, va, len, 1,
+					       NULL);
+	KUNIT_EXPECT_EQ(test, ret, len);
+}
+
+static void test_cmp_one_map(struct kunit *test)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+	struct io_pgtable_cfg *pgtbl_cfg =
+		&io_pgtable_ops_to_pgtable(cmp_priv->pgtbl_ops)->cfg;
+	const pt_oaddr_t addr =
+		oalog2_mod(0x74a71445deadbeef, priv->common->max_oasz_lg2);
+	pt_vaddr_t pgsize_bitmap = priv->safe_pgsize_bitmap &
+				   pgtbl_cfg->pgsize_bitmap;
+	pt_vaddr_t cur_va;
+	unsigned int prot = 0;
+	unsigned int pgsz_lg2;
+
+	/*
+	 * Check that every prot combination at every page size level generates
+	 * the same data in page table.
+	 */
+	for (prot = 0; prot <= (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |
+				IOMMU_NOEXEC | IOMMU_MMIO);
+	     prot++) {
+		/* Page tables usually cannot represent inaccessible memory */
+		if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+			continue;
+
+		/* Try every supported page size */
+		cur_va = priv->smallest_pgsz * 256;
+		for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+			pt_vaddr_t len = log2_to_int(pgsz_lg2);
+
+			if (!(pgsize_bitmap & len))
+				continue;
+
+			cur_va = ALIGN(cur_va, len);
+			do_cmp_map(test, cur_va,
+				   oalog2_set_mod(addr, 0, pgsz_lg2), len,
+				   prot);
+			compare_tables(test);
+			cur_va += len;
+		}
+
+		cur_va = priv->smallest_pgsz * 256;
+		for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+			pt_vaddr_t len = log2_to_int(pgsz_lg2);
+
+			if (!(pgsize_bitmap & len))
+				continue;
+
+			cur_va = ALIGN(cur_va, len);
+			do_cmp_unmap(test, cur_va, len);
+			compare_tables(test);
+			cur_va += len;
+		}
+	}
+}
+
+static int pt_kunit_iommu_cmp_init(struct kunit *test)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv;
+	struct kunit_iommu_priv *priv;
+	int ret;
+
+	test->priv = cmp_priv = kzalloc(sizeof(*cmp_priv), GFP_KERNEL);
+	if (!cmp_priv)
+		return -ENOMEM;
+	priv = &cmp_priv->fmt;
+
+	ret = pt_kunit_priv_init(priv);
+	if (ret)
+		goto err_priv;
+
+	cmp_priv->pgtbl_ops = pt_iommu_alloc_io_pgtable(
+		&priv->cfg, &priv->dummy_dev, &cmp_priv->fmt_memory);
+	if (!cmp_priv->pgtbl_ops) {
+		ret = -ENOMEM;
+		goto err_fmt_table;
+	}
+
+	cmp_priv->ref_table = priv->fmt_table;
+	return 0;
+
+err_fmt_table:
+	pt_iommu_deinit(priv->iommu);
+err_priv:
+	kfree(test->priv);
+	test->priv = NULL;
+	return ret;
+}
+
+static void pt_kunit_iommu_cmp_exit(struct kunit *test)
+{
+	struct kunit_iommu_cmp_priv *cmp_priv = test->priv;
+	struct kunit_iommu_priv *priv = &cmp_priv->fmt;
+
+	if (!test->priv)
+		return;
+
+	pt_iommu_deinit(priv->iommu);
+	free_io_pgtable_ops(cmp_priv->pgtbl_ops);
+	pt_iommu_free_pgtbl_cfg(cmp_priv->fmt_memory);
+	kfree(test->priv);
+}
+
+static struct kunit_case cmp_test_cases[] = {
+	KUNIT_CASE(test_cmp_init),
+	KUNIT_CASE(test_cmp_one_map),
+	{},
+};
+
+static struct kunit_suite NS(cmp_suite) = {
+	.name = __stringify(NS(iommu_cmp_test)),
+	.init = pt_kunit_iommu_cmp_init,
+	.exit = pt_kunit_iommu_cmp_exit,
+	.test_cases = cmp_test_cases,
+};
+kunit_test_suites(&NS(cmp_suite));
-- 
2.46.0



  parent reply	other threads:[~2024-08-15 15:14 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-15 15:11 [PATCH 00/16] Consolidate iommu page table implementations Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 01/16] genpt: Generic Page Table base API Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 02/16] genpt: Add a specialized allocator for page table levels Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 03/16] iommupt: Add the basic structure of the iommu implementation Jason Gunthorpe
2024-08-16 17:58   ` Jeff Johnson
2024-08-15 15:11 ` [PATCH 04/16] iommupt: Add iova_to_phys op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 05/16] iommupt: Add unmap_pages op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 06/16] iommupt: Add map_pages op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 07/16] iommupt: Add cut_mapping op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 08/16] iommupt: Add read_and_clear_dirty op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 09/16] iommupt: Add a kunit test for Generic Page Table and the IOMMU implementation Jason Gunthorpe
2024-08-16 17:55   ` Jeff Johnson
2024-08-19 14:16     ` Jason Gunthorpe
2024-08-15 15:11 ` Jason Gunthorpe [this message]
2024-08-15 15:11 ` [PATCH 11/16] iommupt: Add the 64 bit ARMv8 page table format Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 12/16] iommupt: Add the AMD IOMMU v1 " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 13/16] iommupt: Add the x86 PAE " Jason Gunthorpe
2024-08-16 19:21   ` Sean Christopherson
2024-08-17  0:36     ` Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 14/16] iommupt: Add the DART v1/v2 " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 15/16] iommupt: Add the 32 bit ARMv7s " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 16/16] iommupt: Add the Intel VT-D second stage " Jason Gunthorpe
2024-08-19  2:51   ` Zhang, Tina
2024-08-19 15:53     ` Jason Gunthorpe
2024-08-20  8:22       ` Yi Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=10-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=alejandro.j.jimenez@oracle.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=david@redhat.com \
    --cc=hch@lst.de \
    --cc=iommu@lists.linux.dev \
    --cc=joao.m.martins@oracle.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=peterx@redhat.com \
    --cc=ryan.roberts@arm.com \
    --cc=seanjc@google.com \
    --cc=tina.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox