Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/platform_device.h>
3#include <linux/memregion.h>
4#include <linux/module.h>
5#include <linux/dax.h>
6#include <linux/mm.h>
7
8static bool nohmem;
9module_param_named(disable, nohmem, bool, 0444);
10
11static struct resource hmem_active = {
12 .name = "HMEM devices",
13 .start = 0,
14 .end = -1,
15 .flags = IORESOURCE_MEM,
16};
17
18void hmem_register_device(int target_nid, struct resource *r)
19{
20 /* define a clean / non-busy resource for the platform device */
21 struct resource res = {
22 .start = r->start,
23 .end = r->end,
24 .flags = IORESOURCE_MEM,
25 .desc = IORES_DESC_SOFT_RESERVED,
26 };
27 struct platform_device *pdev;
28 struct memregion_info info;
29 int rc, id;
30
31 if (nohmem)
32 return;
33
34 rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
35 IORES_DESC_SOFT_RESERVED);
36 if (rc != REGION_INTERSECTS)
37 return;
38
39 id = memregion_alloc(GFP_KERNEL);
40 if (id < 0) {
41 pr_err("memregion allocation failure for %pr\n", &res);
42 return;
43 }
44
45 pdev = platform_device_alloc("hmem", id);
46 if (!pdev) {
47 pr_err("hmem device allocation failure for %pr\n", &res);
48 goto out_pdev;
49 }
50
51 if (!__request_region(&hmem_active, res.start, resource_size(&res),
52 dev_name(&pdev->dev), 0)) {
53 dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
54 goto out_active;
55 }
56
57 pdev->dev.numa_node = numa_map_to_online_node(target_nid);
58 info = (struct memregion_info) {
59 .target_node = target_nid,
60 };
61 rc = platform_device_add_data(pdev, &info, sizeof(info));
62 if (rc < 0) {
63 pr_err("hmem memregion_info allocation failure for %pr\n", &res);
64 goto out_resource;
65 }
66
67 rc = platform_device_add_resources(pdev, &res, 1);
68 if (rc < 0) {
69 pr_err("hmem resource allocation failure for %pr\n", &res);
70 goto out_resource;
71 }
72
73 rc = platform_device_add(pdev);
74 if (rc < 0) {
75 dev_err(&pdev->dev, "device add failed for %pr\n", &res);
76 goto out_resource;
77 }
78
79 return;
80
81out_resource:
82 __release_region(&hmem_active, res.start, resource_size(&res));
83out_active:
84 platform_device_put(pdev);
85out_pdev:
86 memregion_free(id);
87}
88
89static __init int hmem_register_one(struct resource *res, void *data)
90{
91 hmem_register_device(phys_to_target_node(res->start), res);
92
93 return 0;
94}
95
96static __init int hmem_init(void)
97{
98 walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
99 IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
100 return 0;
101}
102
103/*
104 * As this is a fallback for address ranges unclaimed by the ACPI HMAT
105 * parsing it must be at an initcall level greater than hmat_init().
106 */
107late_initcall(hmem_init);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/platform_device.h>
3#include <linux/memregion.h>
4#include <linux/module.h>
5#include <linux/dax.h>
6#include <linux/mm.h>
7
8static bool nohmem;
9module_param_named(disable, nohmem, bool, 0444);
10
11void hmem_register_device(int target_nid, struct resource *r)
12{
13 /* define a clean / non-busy resource for the platform device */
14 struct resource res = {
15 .start = r->start,
16 .end = r->end,
17 .flags = IORESOURCE_MEM,
18 };
19 struct platform_device *pdev;
20 struct memregion_info info;
21 int rc, id;
22
23 if (nohmem)
24 return;
25
26 rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
27 IORES_DESC_SOFT_RESERVED);
28 if (rc != REGION_INTERSECTS)
29 return;
30
31 id = memregion_alloc(GFP_KERNEL);
32 if (id < 0) {
33 pr_err("memregion allocation failure for %pr\n", &res);
34 return;
35 }
36
37 pdev = platform_device_alloc("hmem", id);
38 if (!pdev) {
39 pr_err("hmem device allocation failure for %pr\n", &res);
40 goto out_pdev;
41 }
42
43 pdev->dev.numa_node = numa_map_to_online_node(target_nid);
44 info = (struct memregion_info) {
45 .target_node = target_nid,
46 };
47 rc = platform_device_add_data(pdev, &info, sizeof(info));
48 if (rc < 0) {
49 pr_err("hmem memregion_info allocation failure for %pr\n", &res);
50 goto out_pdev;
51 }
52
53 rc = platform_device_add_resources(pdev, &res, 1);
54 if (rc < 0) {
55 pr_err("hmem resource allocation failure for %pr\n", &res);
56 goto out_resource;
57 }
58
59 rc = platform_device_add(pdev);
60 if (rc < 0) {
61 dev_err(&pdev->dev, "device add failed for %pr\n", &res);
62 goto out_resource;
63 }
64
65 return;
66
67out_resource:
68 put_device(&pdev->dev);
69out_pdev:
70 memregion_free(id);
71}
72
73static __init int hmem_register_one(struct resource *res, void *data)
74{
75 /*
76 * If the resource is not a top-level resource it was already
77 * assigned to a device by the HMAT parsing.
78 */
79 if (res->parent != &iomem_resource) {
80 pr_info("HMEM: skip %pr, already claimed\n", res);
81 return 0;
82 }
83
84 hmem_register_device(phys_to_target_node(res->start), res);
85
86 return 0;
87}
88
89static __init int hmem_init(void)
90{
91 walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
92 IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
93 return 0;
94}
95
96/*
97 * As this is a fallback for address ranges unclaimed by the ACPI HMAT
98 * parsing it must be at an initcall level greater than hmat_init().
99 */
100late_initcall(hmem_init);