Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2020 Intel Corporation. */
  3#include <linux/device.h>
  4#include <linux/slab.h>
  5#include <linux/idr.h>
  6#include <cxlmem.h>
  7#include <cxl.h>
  8#include "core.h"
  9
 10/**
 11 * DOC: cxl pmem
 12 *
 13 * The core CXL PMEM infrastructure supports persistent memory
 14 * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
 15 * 'bridge' device is added at the root of a CXL device topology if
 16 * platform firmware advertises at least one persistent memory capable
 17 * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
 18 * device. Then for each cxl_memdev in the CXL device topology a bridge
 19 * device is added to host a LIBNVDIMM dimm object. When these bridges
 20 * are registered native LIBNVDIMM uapis are translated to CXL
 21 * operations, for example, namespace label access commands.
 22 */
 23
 24static DEFINE_IDA(cxl_nvdimm_bridge_ida);
 25
 26static void cxl_nvdimm_bridge_release(struct device *dev)
 27{
 28	struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
 29
 30	ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
 31	kfree(cxl_nvb);
 32}
 33
 34static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
 35	&cxl_base_attribute_group,
 36	NULL,
 37};
 38
 39const struct device_type cxl_nvdimm_bridge_type = {
 40	.name = "cxl_nvdimm_bridge",
 41	.release = cxl_nvdimm_bridge_release,
 42	.groups = cxl_nvdimm_bridge_attribute_groups,
 43};
 44
 45struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
 46{
 47	if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
 48			  "not a cxl_nvdimm_bridge device\n"))
 49		return NULL;
 50	return container_of(dev, struct cxl_nvdimm_bridge, dev);
 51}
 52EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
 53
 54bool is_cxl_nvdimm_bridge(struct device *dev)
 55{
 56	return dev->type == &cxl_nvdimm_bridge_type;
 57}
 58EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
 59
 60static int match_nvdimm_bridge(struct device *dev, void *data)
 61{
 62	return is_cxl_nvdimm_bridge(dev);
 63}
 64
 65struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
 66{
 67	struct cxl_root *cxl_root __free(put_cxl_root) =
 68		find_cxl_root(cxlmd->endpoint);
 69	struct device *dev;
 70
 71	if (!cxl_root)
 72		return NULL;
 73
 74	dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
 75
 76	if (!dev)
 77		return NULL;
 78
 79	return to_cxl_nvdimm_bridge(dev);
 80}
 81EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
 82
 83static struct lock_class_key cxl_nvdimm_bridge_key;
 84
 85static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
 86{
 87	struct cxl_nvdimm_bridge *cxl_nvb;
 88	struct device *dev;
 89	int rc;
 90
 91	cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
 92	if (!cxl_nvb)
 93		return ERR_PTR(-ENOMEM);
 94
 95	rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
 96	if (rc < 0)
 97		goto err;
 98	cxl_nvb->id = rc;
 99
100	dev = &cxl_nvb->dev;
101	cxl_nvb->port = port;
102	device_initialize(dev);
103	lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
104	device_set_pm_not_required(dev);
105	dev->parent = &port->dev;
106	dev->bus = &cxl_bus_type;
107	dev->type = &cxl_nvdimm_bridge_type;
108
109	return cxl_nvb;
110
111err:
112	kfree(cxl_nvb);
113	return ERR_PTR(rc);
114}
115
116static void unregister_nvb(void *_cxl_nvb)
117{
118	struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
119
120	device_unregister(&cxl_nvb->dev);
121}
122
123/**
124 * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
125 * @host: platform firmware root device
126 * @port: CXL port at the root of a CXL topology
127 *
128 * Return: bridge device that can host cxl_nvdimm objects
129 */
130struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
131						     struct cxl_port *port)
132{
133	struct cxl_nvdimm_bridge *cxl_nvb;
134	struct device *dev;
135	int rc;
136
137	if (!IS_ENABLED(CONFIG_CXL_PMEM))
138		return ERR_PTR(-ENXIO);
139
140	cxl_nvb = cxl_nvdimm_bridge_alloc(port);
141	if (IS_ERR(cxl_nvb))
142		return cxl_nvb;
143
144	dev = &cxl_nvb->dev;
145	rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
146	if (rc)
147		goto err;
148
149	rc = device_add(dev);
150	if (rc)
151		goto err;
152
153	rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
154	if (rc)
155		return ERR_PTR(rc);
156
157	return cxl_nvb;
158
159err:
160	put_device(dev);
161	return ERR_PTR(rc);
162}
163EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
164
165static void cxl_nvdimm_release(struct device *dev)
166{
167	struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
168
169	kfree(cxl_nvd);
170}
171
172static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
173	&cxl_base_attribute_group,
174	NULL,
175};
176
177const struct device_type cxl_nvdimm_type = {
178	.name = "cxl_nvdimm",
179	.release = cxl_nvdimm_release,
180	.groups = cxl_nvdimm_attribute_groups,
181};
182
183bool is_cxl_nvdimm(struct device *dev)
184{
185	return dev->type == &cxl_nvdimm_type;
186}
187EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
188
189struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
190{
191	if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
192			  "not a cxl_nvdimm device\n"))
193		return NULL;
194	return container_of(dev, struct cxl_nvdimm, dev);
195}
196EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
197
198static struct lock_class_key cxl_nvdimm_key;
199
200static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_nvdimm_bridge *cxl_nvb,
201					   struct cxl_memdev *cxlmd)
202{
203	struct cxl_nvdimm *cxl_nvd;
204	struct device *dev;
205
206	cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
207	if (!cxl_nvd)
208		return ERR_PTR(-ENOMEM);
209
210	dev = &cxl_nvd->dev;
211	cxl_nvd->cxlmd = cxlmd;
212	cxlmd->cxl_nvd = cxl_nvd;
213	device_initialize(dev);
214	lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
215	device_set_pm_not_required(dev);
216	dev->parent = &cxlmd->dev;
217	dev->bus = &cxl_bus_type;
218	dev->type = &cxl_nvdimm_type;
219	/*
220	 * A "%llx" string is 17-bytes vs dimm_id that is max
221	 * NVDIMM_KEY_DESC_LEN
222	 */
223	BUILD_BUG_ON(sizeof(cxl_nvd->dev_id) < 17 ||
224		     sizeof(cxl_nvd->dev_id) > NVDIMM_KEY_DESC_LEN);
225	sprintf(cxl_nvd->dev_id, "%llx", cxlmd->cxlds->serial);
226
227	return cxl_nvd;
228}
229
230static void cxlmd_release_nvdimm(void *_cxlmd)
231{
232	struct cxl_memdev *cxlmd = _cxlmd;
233	struct cxl_nvdimm *cxl_nvd = cxlmd->cxl_nvd;
234	struct cxl_nvdimm_bridge *cxl_nvb = cxlmd->cxl_nvb;
235
236	cxl_nvd->cxlmd = NULL;
237	cxlmd->cxl_nvd = NULL;
238	cxlmd->cxl_nvb = NULL;
239	device_unregister(&cxl_nvd->dev);
240	put_device(&cxl_nvb->dev);
241}
242
243/**
244 * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
245 * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
246 *
247 * Return: 0 on success negative error code on failure.
248 */
249int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
250{
251	struct cxl_nvdimm_bridge *cxl_nvb;
252	struct cxl_nvdimm *cxl_nvd;
253	struct device *dev;
254	int rc;
255
256	cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
257	if (!cxl_nvb)
258		return -ENODEV;
259
260	cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
261	if (IS_ERR(cxl_nvd)) {
262		rc = PTR_ERR(cxl_nvd);
263		goto err_alloc;
264	}
265	cxlmd->cxl_nvb = cxl_nvb;
266
267	dev = &cxl_nvd->dev;
268	rc = dev_set_name(dev, "pmem%d", cxlmd->id);
269	if (rc)
270		goto err;
271
272	rc = device_add(dev);
273	if (rc)
274		goto err;
275
276	dev_dbg(&cxlmd->dev, "register %s\n", dev_name(dev));
277
278	/* @cxlmd carries a reference on @cxl_nvb until cxlmd_release_nvdimm */
279	return devm_add_action_or_reset(&cxlmd->dev, cxlmd_release_nvdimm, cxlmd);
280
281err:
282	put_device(dev);
283err_alloc:
284	cxlmd->cxl_nvb = NULL;
285	cxlmd->cxl_nvd = NULL;
286	put_device(&cxl_nvb->dev);
287
288	return rc;
289}
290EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);