From: Dan Williams <dan.j.williams@intel.com>
To: linux-cxl@vger.kernel.org
Cc: Fan Ni <fan.ni@samsung.com>,
vishal.l.verma@intel.com, dave.hansen@linux.intel.com,
linux-mm@kvack.org, linux-acpi@vger.kernel.org
Subject: [PATCH v2 18/20] dax/hmem: Move hmem device registration to dax_hmem.ko
Date: Fri, 10 Feb 2023 01:07:07 -0800 [thread overview]
Message-ID: <167602002771.1924368.5653558226424530127.stgit@dwillia2-xfh.jf.intel.com> (raw)
In-Reply-To: <167601992097.1924368.18291887895351917895.stgit@dwillia2-xfh.jf.intel.com>
In preparation for the CXL region driver to take over the responsibility
of registering device-dax instances for CXL regions, move the
registration of "hmem" devices to dax_hmem.ko.
Previously the builtin component of this enabling
(drivers/dax/hmem/device.o) would register platform devices for each
address range and trigger the dax_hmem.ko module to load and attach
device-dax instances to those devices. Now, the ranges are collected
from the HMAT and EFI memory map walking, but the device creation is
deferred. A new "hmem_platform" device is created which triggers
dax_hmem.ko to load and register the platform devices.
Tested-by: Fan Ni <fan.ni@samsung.com>
Link: https://lore.kernel.org/r/167564543923.847146.9030380223622044744.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
drivers/acpi/numa/hmat.c | 2 -
drivers/dax/Kconfig | 2 -
drivers/dax/hmem/device.c | 91 +++++++++++++++++++--------------------
drivers/dax/hmem/hmem.c | 105 +++++++++++++++++++++++++++++++++++++++++++++
include/linux/dax.h | 7 ++-
5 files changed, 155 insertions(+), 52 deletions(-)
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index ff24282301ab..bba268ecd802 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -718,7 +718,7 @@ static void hmat_register_target_devices(struct memory_target *target)
for (res = target->memregions.child; res; res = res->sibling) {
int target_nid = pxm_to_node(target->memory_pxm);
- hmem_register_device(target_nid, res);
+ hmem_register_resource(target_nid, res);
}
}
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5fdf269a822e..d13c889c2a64 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -46,7 +46,7 @@ config DEV_DAX_HMEM
Say M if unsure.
config DEV_DAX_HMEM_DEVICES
- depends on DEV_DAX_HMEM && DAX=y
+ depends on DEV_DAX_HMEM && DAX
def_bool y
config DEV_DAX_KMEM
diff --git a/drivers/dax/hmem/device.c b/drivers/dax/hmem/device.c
index b1b339bccfe5..f9e1a76a04a9 100644
--- a/drivers/dax/hmem/device.c
+++ b/drivers/dax/hmem/device.c
@@ -8,6 +8,8 @@
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
+static bool platform_initialized;
+static DEFINE_MUTEX(hmem_resource_lock);
static struct resource hmem_active = {
.name = "HMEM devices",
.start = 0,
@@ -15,71 +17,66 @@ static struct resource hmem_active = {
.flags = IORESOURCE_MEM,
};
-void hmem_register_device(int target_nid, struct resource *res)
+int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
+{
+ struct resource *res;
+ int rc = 0;
+
+ mutex_lock(&hmem_resource_lock);
+ for (res = hmem_active.child; res; res = res->sibling) {
+ rc = fn(host, (int) res->desc, res);
+ if (rc)
+ break;
+ }
+ mutex_unlock(&hmem_resource_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(walk_hmem_resources);
+
+static void __hmem_register_resource(int target_nid, struct resource *res)
{
struct platform_device *pdev;
- struct memregion_info info;
- int rc, id;
+ struct resource *new;
+ int rc;
- if (nohmem)
+ new = __request_region(&hmem_active, res->start, resource_size(res), "",
+ 0);
+ if (!new) {
+ pr_debug("hmem range %pr already active\n", res);
return;
+ }
- rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
- IORES_DESC_SOFT_RESERVED);
- if (rc != REGION_INTERSECTS)
- return;
+ new->desc = target_nid;
- id = memregion_alloc(GFP_KERNEL);
- if (id < 0) {
- pr_err("memregion allocation failure for %pr\n", res);
+ if (platform_initialized)
return;
- }
- pdev = platform_device_alloc("hmem", id);
+ pdev = platform_device_alloc("hmem_platform", 0);
if (!pdev) {
- pr_err("hmem device allocation failure for %pr\n", res);
- goto out_pdev;
- }
-
- if (!__request_region(&hmem_active, res->start, resource_size(res),
- dev_name(&pdev->dev), 0)) {
- dev_dbg(&pdev->dev, "hmem range %pr already active\n", res);
- goto out_active;
- }
-
- pdev->dev.numa_node = numa_map_to_online_node(target_nid);
- info = (struct memregion_info) {
- .target_node = target_nid,
- .range = {
- .start = res->start,
- .end = res->end,
- },
- };
- rc = platform_device_add_data(pdev, &info, sizeof(info));
- if (rc < 0) {
- pr_err("hmem memregion_info allocation failure for %pr\n", res);
- goto out_resource;
+ pr_err_once("failed to register device-dax hmem_platform device\n");
+ return;
}
rc = platform_device_add(pdev);
- if (rc < 0) {
- dev_err(&pdev->dev, "device add failed for %pr\n", res);
- goto out_resource;
- }
+ if (rc)
+ platform_device_put(pdev);
+ else
+ platform_initialized = true;
+}
- return;
+void hmem_register_resource(int target_nid, struct resource *res)
+{
+ if (nohmem)
+ return;
-out_resource:
- __release_region(&hmem_active, res->start, resource_size(res));
-out_active:
- platform_device_put(pdev);
-out_pdev:
- memregion_free(id);
+ mutex_lock(&hmem_resource_lock);
+ __hmem_register_resource(target_nid, res);
+ mutex_unlock(&hmem_resource_lock);
}
static __init int hmem_register_one(struct resource *res, void *data)
{
- hmem_register_device(phys_to_target_node(res->start), res);
+ hmem_register_resource(phys_to_target_node(res->start), res);
return 0;
}
diff --git a/drivers/dax/hmem/hmem.c b/drivers/dax/hmem/hmem.c
index 5025a8c9850b..e7bdff3132fa 100644
--- a/drivers/dax/hmem/hmem.c
+++ b/drivers/dax/hmem/hmem.c
@@ -3,6 +3,7 @@
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
+#include <linux/dax.h>
#include "../bus.h"
static bool region_idle;
@@ -43,8 +44,110 @@ static struct platform_driver dax_hmem_driver = {
},
};
-module_platform_driver(dax_hmem_driver);
+static void release_memregion(void *data)
+{
+ memregion_free((long) data);
+}
+
+static void release_hmem(void *pdev)
+{
+ platform_device_unregister(pdev);
+}
+
+static int hmem_register_device(struct device *host, int target_nid,
+ const struct resource *res)
+{
+ struct platform_device *pdev;
+ struct memregion_info info;
+ long id;
+ int rc;
+
+ rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
+ IORES_DESC_SOFT_RESERVED);
+ if (rc != REGION_INTERSECTS)
+ return 0;
+
+ id = memregion_alloc(GFP_KERNEL);
+ if (id < 0) {
+ dev_err(host, "memregion allocation failure for %pr\n", res);
+ return -ENOMEM;
+ }
+ rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
+ if (rc)
+ return rc;
+
+ pdev = platform_device_alloc("hmem", id);
+ if (!pdev) {
+ dev_err(host, "device allocation failure for %pr\n", res);
+ return -ENOMEM;
+ }
+
+ pdev->dev.numa_node = numa_map_to_online_node(target_nid);
+ info = (struct memregion_info) {
+ .target_node = target_nid,
+ .range = {
+ .start = res->start,
+ .end = res->end,
+ },
+ };
+ rc = platform_device_add_data(pdev, &info, sizeof(info));
+ if (rc < 0) {
+ dev_err(host, "memregion_info allocation failure for %pr\n",
+ res);
+ goto out_put;
+ }
+
+ rc = platform_device_add(pdev);
+ if (rc < 0) {
+ dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
+ res);
+ goto out_put;
+ }
+
+ return devm_add_action_or_reset(host, release_hmem, pdev);
+
+out_put:
+ platform_device_put(pdev);
+ return rc;
+}
+
+static int dax_hmem_platform_probe(struct platform_device *pdev)
+{
+ return walk_hmem_resources(&pdev->dev, hmem_register_device);
+}
+
+static struct platform_driver dax_hmem_platform_driver = {
+ .probe = dax_hmem_platform_probe,
+ .driver = {
+ .name = "hmem_platform",
+ },
+};
+
+static __init int dax_hmem_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&dax_hmem_platform_driver);
+ if (rc)
+ return rc;
+
+ rc = platform_driver_register(&dax_hmem_driver);
+ if (rc)
+ platform_driver_unregister(&dax_hmem_platform_driver);
+
+ return rc;
+}
+
+static __exit void dax_hmem_exit(void)
+{
+ platform_driver_unregister(&dax_hmem_driver);
+ platform_driver_unregister(&dax_hmem_platform_driver);
+}
+
+module_init(dax_hmem_init);
+module_exit(dax_hmem_exit);
MODULE_ALIAS("platform:hmem*");
+MODULE_ALIAS("platform:hmem_platform*");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 2b5ecb591059..bf6258472e49 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -262,11 +262,14 @@ static inline bool dax_mapping(struct address_space *mapping)
}
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
-void hmem_register_device(int target_nid, struct resource *r);
+void hmem_register_resource(int target_nid, struct resource *r);
#else
-static inline void hmem_register_device(int target_nid, struct resource *r)
+static inline void hmem_register_resource(int target_nid, struct resource *r)
{
}
#endif
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
next prev parent reply other threads:[~2023-02-10 9:07 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-10 9:05 [PATCH v2 00/20] CXL RAM and the 'Soft Reserved' => 'System RAM' default Dan Williams
2023-02-10 9:05 ` [PATCH v2 01/20] cxl/memdev: Fix endpoint port removal Dan Williams
2023-02-10 17:28 ` Jonathan Cameron
2023-02-10 21:14 ` Dan Williams
2023-02-10 23:17 ` Verma, Vishal L
2023-02-10 9:05 ` [PATCH v2 02/20] cxl/Documentation: Update references to attributes added in v6.0 Dan Williams
2023-02-10 9:05 ` [PATCH v2 03/20] cxl/region: Add a mode attribute for regions Dan Williams
2023-02-10 9:05 ` [PATCH v2 04/20] cxl/region: Support empty uuids for non-pmem regions Dan Williams
2023-02-10 17:30 ` Jonathan Cameron
2023-02-10 23:34 ` Ira Weiny
2023-02-10 9:05 ` [PATCH v2 05/20] cxl/region: Validate region mode vs decoder mode Dan Williams
2023-02-10 9:05 ` [PATCH v2 06/20] cxl/region: Add volatile region creation support Dan Williams
2023-02-10 9:06 ` [PATCH v2 07/20] cxl/region: Refactor attach_target() for autodiscovery Dan Williams
2023-02-10 9:06 ` [PATCH v2 08/20] cxl/region: Cleanup target list on attach error Dan Williams
2023-02-10 17:31 ` Jonathan Cameron
2023-02-10 23:17 ` Verma, Vishal L
2023-02-10 23:46 ` Ira Weiny
2023-02-10 9:06 ` [PATCH v2 09/20] cxl/region: Move region-position validation to a helper Dan Williams
2023-02-10 17:34 ` Jonathan Cameron
2023-02-10 9:06 ` [PATCH v2 10/20] kernel/range: Uplevel the cxl subsystem's range_contains() helper Dan Williams
2023-02-10 9:06 ` [PATCH v2 11/20] cxl/region: Enable CONFIG_CXL_REGION to be toggled Dan Williams
2023-02-10 9:06 ` [PATCH v2 12/20] cxl/port: Split endpoint and switch port probe Dan Williams
2023-02-10 17:41 ` Jonathan Cameron
2023-02-10 23:21 ` Verma, Vishal L
2023-02-10 9:06 ` [PATCH v2 13/20] cxl/region: Add region autodiscovery Dan Williams
2023-02-10 18:09 ` Jonathan Cameron
2023-02-10 21:35 ` Dan Williams
2023-02-14 13:23 ` Jonathan Cameron
2023-02-14 16:43 ` Dan Williams
2023-02-10 21:49 ` Dan Williams
2023-02-11 0:29 ` Verma, Vishal L
2023-02-11 1:03 ` Dan Williams
[not found] ` <CGME20230213192752uscas1p1c49508da4b100c9ba6a1a3aa92ca03e5@uscas1p1.samsung.com>
2023-02-13 19:27 ` Fan Ni
[not found] ` <CGME20230228185348uscas1p1a5314a077383ee81ac228c1b9f1da2f8@uscas1p1.samsung.com>
2023-02-28 18:53 ` Fan Ni
2023-02-10 9:06 ` [PATCH v2 14/20] tools/testing/cxl: Define a fixed volatile configuration to parse Dan Williams
2023-02-10 18:12 ` Jonathan Cameron
2023-02-10 18:36 ` Dave Jiang
2023-02-11 0:39 ` Verma, Vishal L
2023-02-10 9:06 ` [PATCH v2 15/20] dax/hmem: Move HMAT and Soft reservation probe initcall level Dan Williams
2023-02-10 21:53 ` Dave Jiang
2023-02-10 21:57 ` Dave Jiang
2023-02-11 0:40 ` Verma, Vishal L
2023-02-10 9:06 ` [PATCH v2 16/20] dax/hmem: Drop unnecessary dax_hmem_remove() Dan Williams
2023-02-10 21:59 ` Dave Jiang
2023-02-11 0:41 ` Verma, Vishal L
2023-02-10 9:07 ` [PATCH v2 17/20] dax/hmem: Convey the dax range via memregion_info() Dan Williams
2023-02-10 22:03 ` Dave Jiang
2023-02-11 4:25 ` Verma, Vishal L
2023-02-10 9:07 ` Dan Williams [this message]
2023-02-10 18:25 ` [PATCH v2 18/20] dax/hmem: Move hmem device registration to dax_hmem.ko Jonathan Cameron
2023-02-10 22:09 ` Dave Jiang
2023-02-11 4:41 ` Verma, Vishal L
2023-02-10 9:07 ` [PATCH v2 19/20] dax: Assign RAM regions to memory-hotplug by default Dan Williams
2023-02-10 22:19 ` Dave Jiang
2023-02-11 5:57 ` Verma, Vishal L
2023-02-10 9:07 ` [PATCH v2 20/20] cxl/dax: Create dax devices for CXL RAM regions Dan Williams
2023-02-10 18:38 ` Jonathan Cameron
2023-02-10 22:42 ` Dave Jiang
2023-02-10 17:53 ` [PATCH v2 00/20] CXL RAM and the 'Soft Reserved' => 'System RAM' default Dan Williams
2023-02-11 14:04 ` Gregory Price
2023-02-13 18:22 ` Gregory Price
2023-02-13 18:31 ` Gregory Price
[not found] ` <CGME20230222214151uscas1p26d53b2e198f63a1f382fe575c6c25070@uscas1p2.samsung.com>
2023-02-22 21:41 ` Fan Ni
2023-02-22 22:18 ` Dan Williams
2023-02-14 13:35 ` Jonathan Cameron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=167602002771.1924368.5653558226424530127.stgit@dwillia2-xfh.jf.intel.com \
--to=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=fan.ni@samsung.com \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=vishal.l.verma@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox