Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/platform_device.h>
3#include <linux/memregion.h>
4#include <linux/module.h>
5#include <linux/dax.h>
6#include <linux/mm.h>
7
8static bool nohmem;
9module_param_named(disable, nohmem, bool, 0444);
10
11static struct resource hmem_active = {
12 .name = "HMEM devices",
13 .start = 0,
14 .end = -1,
15 .flags = IORESOURCE_MEM,
16};
17
18void hmem_register_device(int target_nid, struct resource *r)
19{
20 /* define a clean / non-busy resource for the platform device */
21 struct resource res = {
22 .start = r->start,
23 .end = r->end,
24 .flags = IORESOURCE_MEM,
25 .desc = IORES_DESC_SOFT_RESERVED,
26 };
27 struct platform_device *pdev;
28 struct memregion_info info;
29 int rc, id;
30
31 if (nohmem)
32 return;
33
34 rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
35 IORES_DESC_SOFT_RESERVED);
36 if (rc != REGION_INTERSECTS)
37 return;
38
39 id = memregion_alloc(GFP_KERNEL);
40 if (id < 0) {
41 pr_err("memregion allocation failure for %pr\n", &res);
42 return;
43 }
44
45 pdev = platform_device_alloc("hmem", id);
46 if (!pdev) {
47 pr_err("hmem device allocation failure for %pr\n", &res);
48 goto out_pdev;
49 }
50
51 if (!__request_region(&hmem_active, res.start, resource_size(&res),
52 dev_name(&pdev->dev), 0)) {
53 dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
54 goto out_active;
55 }
56
57 pdev->dev.numa_node = numa_map_to_online_node(target_nid);
58 info = (struct memregion_info) {
59 .target_node = target_nid,
60 };
61 rc = platform_device_add_data(pdev, &info, sizeof(info));
62 if (rc < 0) {
63 pr_err("hmem memregion_info allocation failure for %pr\n", &res);
64 goto out_resource;
65 }
66
67 rc = platform_device_add_resources(pdev, &res, 1);
68 if (rc < 0) {
69 pr_err("hmem resource allocation failure for %pr\n", &res);
70 goto out_resource;
71 }
72
73 rc = platform_device_add(pdev);
74 if (rc < 0) {
75 dev_err(&pdev->dev, "device add failed for %pr\n", &res);
76 goto out_resource;
77 }
78
79 return;
80
81out_resource:
82 __release_region(&hmem_active, res.start, resource_size(&res));
83out_active:
84 platform_device_put(pdev);
85out_pdev:
86 memregion_free(id);
87}
88
89static __init int hmem_register_one(struct resource *res, void *data)
90{
91 hmem_register_device(phys_to_target_node(res->start), res);
92
93 return 0;
94}
95
96static __init int hmem_init(void)
97{
98 walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
99 IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
100 return 0;
101}
102
103/*
104 * As this is a fallback for address ranges unclaimed by the ACPI HMAT
105 * parsing it must be at an initcall level greater than hmat_init().
106 */
107late_initcall(hmem_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/platform_device.h>
3#include <linux/memregion.h>
4#include <linux/module.h>
5#include <linux/dax.h>
6#include <linux/mm.h>
7
8static bool nohmem;
9module_param_named(disable, nohmem, bool, 0444);
10
11static bool platform_initialized;
12static DEFINE_MUTEX(hmem_resource_lock);
13static struct resource hmem_active = {
14 .name = "HMEM devices",
15 .start = 0,
16 .end = -1,
17 .flags = IORESOURCE_MEM,
18};
19
20int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
21{
22 struct resource *res;
23 int rc = 0;
24
25 mutex_lock(&hmem_resource_lock);
26 for (res = hmem_active.child; res; res = res->sibling) {
27 rc = fn(host, (int) res->desc, res);
28 if (rc)
29 break;
30 }
31 mutex_unlock(&hmem_resource_lock);
32 return rc;
33}
34EXPORT_SYMBOL_GPL(walk_hmem_resources);
35
36static void __hmem_register_resource(int target_nid, struct resource *res)
37{
38 struct platform_device *pdev;
39 struct resource *new;
40 int rc;
41
42 new = __request_region(&hmem_active, res->start, resource_size(res), "",
43 0);
44 if (!new) {
45 pr_debug("hmem range %pr already active\n", res);
46 return;
47 }
48
49 new->desc = target_nid;
50
51 if (platform_initialized)
52 return;
53
54 pdev = platform_device_alloc("hmem_platform", 0);
55 if (!pdev) {
56 pr_err_once("failed to register device-dax hmem_platform device\n");
57 return;
58 }
59
60 rc = platform_device_add(pdev);
61 if (rc)
62 platform_device_put(pdev);
63 else
64 platform_initialized = true;
65}
66
67void hmem_register_resource(int target_nid, struct resource *res)
68{
69 if (nohmem)
70 return;
71
72 mutex_lock(&hmem_resource_lock);
73 __hmem_register_resource(target_nid, res);
74 mutex_unlock(&hmem_resource_lock);
75}
76
77static __init int hmem_register_one(struct resource *res, void *data)
78{
79 hmem_register_resource(phys_to_target_node(res->start), res);
80
81 return 0;
82}
83
84static __init int hmem_init(void)
85{
86 walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
87 IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
88 return 0;
89}
90
91/*
92 * As this is a fallback for address ranges unclaimed by the ACPI HMAT
93 * parsing it must be at an initcall level greater than hmat_init().
94 */
95device_initcall(hmem_init);