From: Jason Gunthorpe <jgg@nvidia.com>
Cc: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>,
Lu Baolu <baolu.lu@linux.intel.com>,
David Hildenbrand <david@redhat.com>,
Christoph Hellwig <hch@lst.de>,
iommu@lists.linux.dev, Joao Martins <joao.m.martins@oracle.com>,
Kevin Tian <kevin.tian@intel.com>,
kvm@vger.kernel.org, linux-mm@kvack.org,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Peter Xu <peterx@redhat.com>, Ryan Roberts <ryan.roberts@arm.com>,
Sean Christopherson <seanjc@google.com>,
Tina Zhang <tina.zhang@intel.com>
Subject: [PATCH 03/16] iommupt: Add the basic structure of the iommu implementation
Date: Thu, 15 Aug 2024 12:11:19 -0300 [thread overview]
Message-ID: <3-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com>
The iommu implementation is a single version of the iommu domain
operations, iova_to_phys, map, unmap, read_and_clear_dirty and
flushing. It is intended to be a near drop in replacement for existing
iopt users.
By using the Generic Page Table mechanism it is a single algorithmic
implementation that operates all the different page table formats with
consistent characteristics.
Implement the basic starting point: alloc(), get_info() and deinit().
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/generic_pt/fmt/iommu_template.h | 37 ++++
drivers/iommu/generic_pt/iommu_pt.h | 166 ++++++++++++++++++
include/linux/generic_pt/iommu.h | 87 +++++++++
3 files changed, 290 insertions(+)
create mode 100644 drivers/iommu/generic_pt/fmt/iommu_template.h
create mode 100644 drivers/iommu/generic_pt/iommu_pt.h
create mode 100644 include/linux/generic_pt/iommu.h
diff --git a/drivers/iommu/generic_pt/fmt/iommu_template.h b/drivers/iommu/generic_pt/fmt/iommu_template.h
new file mode 100644
index 00000000000000..d6ca1582e11ca4
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_template.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Template to build the iommu module and kunit from the format and
+ * implementation headers.
+ *
+ * The format should have:
+ * #define PT_FMT <name>
+ * #define PT_SUPPORTED_FEATURES (BIT(PT_FEAT_xx) | BIT(PT_FEAT_yy))
+ * And optionally:
+ * #define PT_FORCE_ENABLED_FEATURES ..
+ * #define PT_FMT_VARIANT <suffix>
+ */
+#include <linux/args.h>
+#include <linux/stringify.h>
+
+#ifdef PT_FMT_VARIANT
+#define PTPFX \
+ CONCATENATE(CONCATENATE(PT_FMT, _), CONCATENATE(PT_FMT_VARIANT, _))
+#else
+#define PTPFX CONCATENATE(PT_FMT, _)
+#endif
+
+#define _PT_FMT_H PT_FMT.h
+#define PT_FMT_H __stringify(_PT_FMT_H)
+
+#define _PT_DEFS_H CONCATENATE(defs_, _PT_FMT_H)
+#define PT_DEFS_H __stringify(_PT_DEFS_H)
+
+#include <linux/generic_pt/common.h>
+#include PT_DEFS_H
+#include "../pt_defs.h"
+#include PT_FMT_H
+#include "../pt_common.h"
+
+#include "../iommu_pt.h"
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
new file mode 100644
index 00000000000000..708beaf5d812f7
--- /dev/null
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ *
+ * "Templated C code" for implementing the iommu operations for page tables.
+ * This is compiled multiple times, over all the page table formats to pick up
+ * the per-format definitions.
+ */
+#ifndef __GENERIC_PT_IOMMU_PT_H
+#define __GENERIC_PT_IOMMU_PT_H
+
+#include "pt_iter.h"
+#include "pt_alloc.h"
+
+#include <linux/iommu.h>
+#include <linux/export.h>
+
+struct pt_iommu_collect_args {
+ struct pt_radix_list_head free_list;
+ u8 ignore_mapped : 1;
+};
+
+static int __collect_tables(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_collect_args *collect = arg;
+ int ret;
+
+ if (collect->ignore_mapped && !pt_can_have_table(&pts))
+ return 0;
+
+ for_each_pt_level_item(&pts) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ pt_radix_add_list(&collect->free_list, pts.table_lower);
+ ret = pt_descend(&pts, arg, __collect_tables);
+ if (ret)
+ return ret;
+ continue;
+ }
+ if (pts.type == PT_ENTRY_OA && !collect->ignore_mapped)
+ return -EADDRINUSE;
+ }
+ return 0;
+}
+
+static void NS(get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_top_range(common);
+ struct pt_state pts = pt_init_top(&range);
+ pt_vaddr_t pgsize_bitmap = 0;
+
+ if (pt_feature(common, PT_FEAT_DYNAMIC_TOP)) {
+ for (pts.level = 0; pts.level <= PT_MAX_TOP_LEVEL;
+ pts.level++) {
+ if (pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2)
+ break;
+ pgsize_bitmap |= pt_possible_sizes(&pts);
+ }
+ } else {
+ for (pts.level = 0; pts.level <= range.top_level; pts.level++)
+ pgsize_bitmap |= pt_possible_sizes(&pts);
+ }
+
+ /* Hide page sizes larger than the maximum OA */
+ info->pgsize_bitmap = oalog2_mod(pgsize_bitmap, common->max_oasz_lg2);
+}
+
+static void NS(deinit)(struct pt_iommu *iommu_table)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_top_range(common);
+ struct pt_iommu_collect_args collect = {
+ .ignore_mapped = true,
+ };
+
+ pt_radix_add_list(&collect.free_list, range.top_table);
+ pt_walk_range(&range, __collect_tables, &collect);
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
+ pt_radix_stop_incoherent_list(&collect.free_list,
+ iommu_table->iommu_device);
+ pt_radix_free_list(&collect.free_list);
+}
+
+static const struct pt_iommu_ops NS(ops) = {
+ .iova_to_phys = NS(iova_to_phys),
+ .get_info = NS(get_info),
+ .deinit = NS(deinit),
+};
+
+static int pt_init_common(struct pt_common *common)
+{
+ struct pt_range top_range = pt_top_range(common);
+
+ if (PT_WARN_ON(top_range.top_level > PT_MAX_TOP_LEVEL))
+ return -EINVAL;
+
+ if (top_range.top_level == PT_MAX_TOP_LEVEL ||
+ common->max_vasz_lg2 == top_range.max_vasz_lg2)
+ common->features &= ~BIT(PT_FEAT_DYNAMIC_TOP);
+
+ if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP))
+ common->max_vasz_lg2 = top_range.max_vasz_lg2;
+
+ if (top_range.max_vasz_lg2 == PT_VADDR_MAX_LG2)
+ common->features |= BIT(PT_FEAT_FULL_VA);
+
+ /* Requested features must match features compiled into this format */
+ if ((common->features & ~(unsigned int)PT_SUPPORTED_FEATURES) ||
+ (common->features & PT_FORCE_ENABLED_FEATURES) !=
+ PT_FORCE_ENABLED_FEATURES)
+ return -EOPNOTSUPP;
+
+ /* FIXME generalize the oa/va maximums from HW better in the cfg */
+ if (common->max_oasz_lg2 == 0)
+ common->max_oasz_lg2 = pt_max_output_address_lg2(common);
+ else
+ common->max_oasz_lg2 = min(common->max_oasz_lg2,
+ pt_max_output_address_lg2(common));
+ return 0;
+}
+
+#define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
+#define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
+int pt_iommu_init(struct pt_iommu_table *fmt_table,
+ struct pt_iommu_table_cfg *cfg, gfp_t gfp)
+{
+ struct pt_iommu *iommu_table = &fmt_table->iommu;
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_table_p *table_mem;
+ int ret;
+
+ memset(fmt_table, 0, sizeof(*fmt_table));
+ spin_lock_init(&iommu_table->table_lock);
+ common->features = cfg->features;
+ common->max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
+ iommu_table->iommu_device = cfg->iommu_device;
+ iommu_table->nid = dev_to_node(cfg->iommu_device);
+
+ ret = pt_iommu_fmt_init(fmt_table, cfg);
+ if (ret)
+ return ret;
+
+ ret = pt_init_common(common);
+ if (ret)
+ return ret;
+
+ table_mem = table_alloc_top(common, common->top_of_table, gfp, false);
+ if (IS_ERR(table_mem))
+ return PTR_ERR(table_mem);
+#ifdef PT_FIXED_TOP_LEVEL
+ pt_top_set(common, table_mem, PT_FIXED_TOP_LEVEL);
+#else
+ pt_top_set(common, table_mem, pt_top_get_level(common));
+#endif
+ iommu_table->ops = &NS(ops);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(pt_iommu_init, GENERIC_PT_IOMMU);
+
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(GENERIC_PT);
+
+#endif
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
new file mode 100644
index 00000000000000..d9d3da49dc0fe2
--- /dev/null
+++ b/include/linux/generic_pt/iommu.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_IOMMU_H
+#define __GENERIC_PT_IOMMU_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/mm_types.h>
+
+struct pt_iommu_ops;
+
+/**
+ * DOC: IOMMU Radix Page Table
+ *
+ * The iommu implementation of the Generic Page Table provides an ops struct
+ * that is useful to go with an iommu_domain to serve the DMA API, IOMMUFD and
+ * the generic map/unmap interface.
+ *
+ * This interface uses a caller provided locking approach. The caller must have
+ * a VA range lock concept that prevents concurrent threads from calling ops on
+ * the same VA. Generally the range lock must be at least as large as a single
+ * map call.
+ */
+
+/**
+ * struct pt_iommu - Base structure for iommu page tables
+ *
+ * The format specific struct will include this as the first member.
+ */
+struct pt_iommu {
+ /**
+ * @ops: Function pointers to access the API
+ */
+ const struct pt_iommu_ops *ops;
+ /**
+ * @nid: Node ID to use for table memory allocations. This defaults to
+ * dev_to_node(iommu_device). The iommu driver may want to set the NID
+ * to the device's NID, if there are multiple table walkers.
+ */
+ int nid;
+ /* private: */
+ /* Write lock for pt_common top_of_table */
+ spinlock_t table_lock;
+ struct device *iommu_device;
+};
+
+/**
+ * struct pt_iommu_info - Details about the iommu page table
+ *
+ * Returned from pt_iommu_ops->get_info()
+ */
+struct pt_iommu_info {
+ /**
+ * @pgsize_bitmap: A bitmask where each set bit indicates
+ * a page size that can be natively stored in the page table.
+ */
+ u64 pgsize_bitmap;
+};
+
+/* See the function comments in iommu_pt.c for kdocs */
+struct pt_iommu_ops {
+ /**
+ * get_info() - Return the pt_iommu_info structure
+ * @iommu_table: Table to query
+ *
+ * Return some basic static information about the page table.
+ */
+ void (*get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info);
+
+ /**
+ * deinit() - Undo a format specific init operation
+ * @iommu_table: Table to destroy
+ *
+ * Release all of the memory. The caller must have already removed the
+ * table from all HW access and all caches.
+ */
+ void (*deinit)(struct pt_iommu *iommu_table);
+};
+
+static inline void pt_iommu_deinit(struct pt_iommu *iommu_table)
+{
+ iommu_table->ops->deinit(iommu_table);
+}
+
+#endif
--
2.46.0
next prev parent reply other threads:[~2024-08-15 15:12 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-15 15:11 [PATCH 00/16] Consolidate iommu page table implementations Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 01/16] genpt: Generic Page Table base API Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 02/16] genpt: Add a specialized allocator for page table levels Jason Gunthorpe
2024-08-15 15:11 ` Jason Gunthorpe [this message]
2024-08-16 17:58 ` [PATCH 03/16] iommupt: Add the basic structure of the iommu implementation Jeff Johnson
2024-08-15 15:11 ` [PATCH 04/16] iommupt: Add iova_to_phys op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 05/16] iommupt: Add unmap_pages op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 06/16] iommupt: Add map_pages op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 07/16] iommupt: Add cut_mapping op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 08/16] iommupt: Add read_and_clear_dirty op Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 09/16] iommupt: Add a kunit test for Generic Page Table and the IOMMU implementation Jason Gunthorpe
2024-08-16 17:55 ` Jeff Johnson
2024-08-19 14:16 ` Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 10/16] iommupt: Add a kunit test to compare against iopt Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 11/16] iommupt: Add the 64 bit ARMv8 page table format Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 12/16] iommupt: Add the AMD IOMMU v1 " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 13/16] iommupt: Add the x86 PAE " Jason Gunthorpe
2024-08-16 19:21 ` Sean Christopherson
2024-08-17 0:36 ` Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 14/16] iommupt: Add the DART v1/v2 " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 15/16] iommupt: Add the 32 bit ARMv7s " Jason Gunthorpe
2024-08-15 15:11 ` [PATCH 16/16] iommupt: Add the Intel VT-D second stage " Jason Gunthorpe
2024-08-19 2:51 ` Zhang, Tina
2024-08-19 15:53 ` Jason Gunthorpe
2024-08-20 8:22 ` Yi Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=3-v1-01fa10580981+1d-iommu_pt_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=alejandro.j.jimenez@oracle.com \
--cc=baolu.lu@linux.intel.com \
--cc=david@redhat.com \
--cc=hch@lst.de \
--cc=iommu@lists.linux.dev \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=pasha.tatashin@soleen.com \
--cc=peterx@redhat.com \
--cc=ryan.roberts@arm.com \
--cc=seanjc@google.com \
--cc=tina.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox