Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/cpumask.h>
6#include <linux/module.h>
7#include <linux/device.h>
8#include <linux/nd.h>
9#include "nd-core.h"
10#include "nd.h"
11
12static int nd_region_probe(struct device *dev)
13{
14 int err, rc;
15 static unsigned long once;
16 struct nd_region_data *ndrd;
17 struct nd_region *nd_region = to_nd_region(dev);
18
19 if (nd_region->num_lanes > num_online_cpus()
20 && nd_region->num_lanes < num_possible_cpus()
21 && !test_and_set_bit(0, &once)) {
22 dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
23 num_online_cpus(), nd_region->num_lanes,
24 num_possible_cpus());
25 dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
26 nd_region->num_lanes);
27 }
28
29 rc = nd_region_activate(nd_region);
30 if (rc)
31 return rc;
32
33 rc = nd_blk_region_init(nd_region);
34 if (rc)
35 return rc;
36
37 if (is_memory(&nd_region->dev)) {
38 struct resource ndr_res;
39
40 if (devm_init_badblocks(dev, &nd_region->bb))
41 return -ENODEV;
42 nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
43 "badblocks");
44 if (!nd_region->bb_state)
45 dev_warn(&nd_region->dev,
46 "'badblocks' notification disabled\n");
47 ndr_res.start = nd_region->ndr_start;
48 ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
49 nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
50 }
51
52 rc = nd_region_register_namespaces(nd_region, &err);
53 if (rc < 0)
54 return rc;
55
56 ndrd = dev_get_drvdata(dev);
57 ndrd->ns_active = rc;
58 ndrd->ns_count = rc + err;
59
60 if (rc && err && rc == err)
61 return -ENODEV;
62
63 nd_region->btt_seed = nd_btt_create(nd_region);
64 nd_region->pfn_seed = nd_pfn_create(nd_region);
65 nd_region->dax_seed = nd_dax_create(nd_region);
66 if (err == 0)
67 return 0;
68
69 /*
70 * Given multiple namespaces per region, we do not want to
71 * disable all the successfully registered peer namespaces upon
72 * a single registration failure. If userspace is missing a
73 * namespace that it expects it can disable/re-enable the region
74 * to retry discovery after correcting the failure.
75 * <regionX>/namespaces returns the current
76 * "<async-registered>/<total>" namespace count.
77 */
78 dev_err(dev, "failed to register %d namespace%s, continuing...\n",
79 err, err == 1 ? "" : "s");
80 return 0;
81}
82
83static int child_unregister(struct device *dev, void *data)
84{
85 nd_device_unregister(dev, ND_SYNC);
86 return 0;
87}
88
89static int nd_region_remove(struct device *dev)
90{
91 struct nd_region *nd_region = to_nd_region(dev);
92
93 device_for_each_child(dev, NULL, child_unregister);
94
95 /* flush attribute readers and disable */
96 nvdimm_bus_lock(dev);
97 nd_region->ns_seed = NULL;
98 nd_region->btt_seed = NULL;
99 nd_region->pfn_seed = NULL;
100 nd_region->dax_seed = NULL;
101 dev_set_drvdata(dev, NULL);
102 nvdimm_bus_unlock(dev);
103
104 /*
105 * Note, this assumes nd_device_lock() context to not race
106 * nd_region_notify()
107 */
108 sysfs_put(nd_region->bb_state);
109 nd_region->bb_state = NULL;
110
111 return 0;
112}
113
114static int child_notify(struct device *dev, void *data)
115{
116 nd_device_notify(dev, *(enum nvdimm_event *) data);
117 return 0;
118}
119
120static void nd_region_notify(struct device *dev, enum nvdimm_event event)
121{
122 if (event == NVDIMM_REVALIDATE_POISON) {
123 struct nd_region *nd_region = to_nd_region(dev);
124 struct resource res;
125
126 if (is_memory(&nd_region->dev)) {
127 res.start = nd_region->ndr_start;
128 res.end = nd_region->ndr_start +
129 nd_region->ndr_size - 1;
130 nvdimm_badblocks_populate(nd_region,
131 &nd_region->bb, &res);
132 if (nd_region->bb_state)
133 sysfs_notify_dirent(nd_region->bb_state);
134 }
135 }
136 device_for_each_child(dev, &event, child_notify);
137}
138
139static struct nd_device_driver nd_region_driver = {
140 .probe = nd_region_probe,
141 .remove = nd_region_remove,
142 .notify = nd_region_notify,
143 .drv = {
144 .name = "nd_region",
145 },
146 .type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
147};
148
149int __init nd_region_init(void)
150{
151 return nd_driver_register(&nd_region_driver);
152}
153
154void nd_region_exit(void)
155{
156 driver_unregister(&nd_region_driver.drv);
157}
158
159MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
160MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_BLK);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/memregion.h>
6#include <linux/cpumask.h>
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/nd.h>
10#include "nd-core.h"
11#include "nd.h"
12
13static int nd_region_probe(struct device *dev)
14{
15 int err, rc;
16 static unsigned long once;
17 struct nd_region_data *ndrd;
18 struct nd_region *nd_region = to_nd_region(dev);
19 struct range range = {
20 .start = nd_region->ndr_start,
21 .end = nd_region->ndr_start + nd_region->ndr_size - 1,
22 };
23
24 if (nd_region->num_lanes > num_online_cpus()
25 && nd_region->num_lanes < num_possible_cpus()
26 && !test_and_set_bit(0, &once)) {
27 dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
28 num_online_cpus(), nd_region->num_lanes,
29 num_possible_cpus());
30 dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
31 nd_region->num_lanes);
32 }
33
34 rc = nd_region_activate(nd_region);
35 if (rc)
36 return rc;
37
38 if (devm_init_badblocks(dev, &nd_region->bb))
39 return -ENODEV;
40 nd_region->bb_state =
41 sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
42 if (!nd_region->bb_state)
43 dev_warn(dev, "'badblocks' notification disabled\n");
44 nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
45
46 rc = nd_region_register_namespaces(nd_region, &err);
47 if (rc < 0)
48 return rc;
49
50 ndrd = dev_get_drvdata(dev);
51 ndrd->ns_active = rc;
52 ndrd->ns_count = rc + err;
53
54 if (rc && err && rc == err)
55 return -ENODEV;
56
57 nd_region->btt_seed = nd_btt_create(nd_region);
58 nd_region->pfn_seed = nd_pfn_create(nd_region);
59 nd_region->dax_seed = nd_dax_create(nd_region);
60 if (err == 0)
61 return 0;
62
63 /*
64 * Given multiple namespaces per region, we do not want to
65 * disable all the successfully registered peer namespaces upon
66 * a single registration failure. If userspace is missing a
67 * namespace that it expects it can disable/re-enable the region
68 * to retry discovery after correcting the failure.
69 * <regionX>/namespaces returns the current
70 * "<async-registered>/<total>" namespace count.
71 */
72 dev_err(dev, "failed to register %d namespace%s, continuing...\n",
73 err, err == 1 ? "" : "s");
74 return 0;
75}
76
77static int child_unregister(struct device *dev, void *data)
78{
79 nd_device_unregister(dev, ND_SYNC);
80 return 0;
81}
82
83static void nd_region_remove(struct device *dev)
84{
85 struct nd_region *nd_region = to_nd_region(dev);
86
87 device_for_each_child(dev, NULL, child_unregister);
88
89 /* flush attribute readers and disable */
90 nvdimm_bus_lock(dev);
91 nd_region->ns_seed = NULL;
92 nd_region->btt_seed = NULL;
93 nd_region->pfn_seed = NULL;
94 nd_region->dax_seed = NULL;
95 dev_set_drvdata(dev, NULL);
96 nvdimm_bus_unlock(dev);
97
98 /*
99 * Note, this assumes device_lock() context to not race
100 * nd_region_notify()
101 */
102 sysfs_put(nd_region->bb_state);
103 nd_region->bb_state = NULL;
104
105 /*
106 * Try to flush caches here since a disabled region may be subject to
107 * secure erase while disabled, and previous dirty data should not be
108 * written back to a new instance of the region. This only matters on
109 * bare metal where security commands are available, so silent failure
110 * here is ok.
111 */
112 if (cpu_cache_has_invalidate_memregion())
113 cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
114}
115
116static int child_notify(struct device *dev, void *data)
117{
118 nd_device_notify(dev, *(enum nvdimm_event *) data);
119 return 0;
120}
121
122static void nd_region_notify(struct device *dev, enum nvdimm_event event)
123{
124 if (event == NVDIMM_REVALIDATE_POISON) {
125 struct nd_region *nd_region = to_nd_region(dev);
126
127 if (is_memory(&nd_region->dev)) {
128 struct range range = {
129 .start = nd_region->ndr_start,
130 .end = nd_region->ndr_start +
131 nd_region->ndr_size - 1,
132 };
133
134 nvdimm_badblocks_populate(nd_region,
135 &nd_region->bb, &range);
136 if (nd_region->bb_state)
137 sysfs_notify_dirent(nd_region->bb_state);
138 }
139 }
140 device_for_each_child(dev, &event, child_notify);
141}
142
143static struct nd_device_driver nd_region_driver = {
144 .probe = nd_region_probe,
145 .remove = nd_region_remove,
146 .notify = nd_region_notify,
147 .drv = {
148 .name = "nd_region",
149 },
150 .type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
151};
152
153int __init nd_region_init(void)
154{
155 return nd_driver_register(&nd_region_driver);
156}
157
158void nd_region_exit(void)
159{
160 driver_unregister(&nd_region_driver.drv);
161}
162
163MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);