Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright(c) 2020 Intel Corporation. */
  3
  4#include <linux/device.h>
  5#include <linux/slab.h>
  6#include <linux/idr.h>
  7#include <linux/pci.h>
  8#include <cxlmem.h>
  9#include "core.h"
 10
 11static DECLARE_RWSEM(cxl_memdev_rwsem);
 12
 13/*
 14 * An entire PCI topology full of devices should be enough for any
 15 * config
 16 */
 17#define CXL_MEM_MAX_DEVS 65536
 18
 19static int cxl_mem_major;
 20static DEFINE_IDA(cxl_memdev_ida);
 21
 22static void cxl_memdev_release(struct device *dev)
 23{
 24	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 25
 26	ida_free(&cxl_memdev_ida, cxlmd->id);
 27	kfree(cxlmd);
 28}
 29
 30static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
 31				kgid_t *gid)
 32{
 33	return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
 34}
 35
 36static ssize_t firmware_version_show(struct device *dev,
 37				     struct device_attribute *attr, char *buf)
 38{
 39	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 40	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 41
 42	return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
 43}
 44static DEVICE_ATTR_RO(firmware_version);
 45
 46static ssize_t payload_max_show(struct device *dev,
 47				struct device_attribute *attr, char *buf)
 48{
 49	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 50	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 51
 52	return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
 53}
 54static DEVICE_ATTR_RO(payload_max);
 55
 56static ssize_t label_storage_size_show(struct device *dev,
 57				       struct device_attribute *attr, char *buf)
 58{
 59	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 60	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 61
 62	return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
 63}
 64static DEVICE_ATTR_RO(label_storage_size);
 65
 66static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
 67			     char *buf)
 68{
 69	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 70	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 71	unsigned long long len = resource_size(&cxlds->ram_res);
 72
 73	return sysfs_emit(buf, "%#llx\n", len);
 74}
 75
 76static struct device_attribute dev_attr_ram_size =
 77	__ATTR(size, 0444, ram_size_show, NULL);
 78
 79static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
 80			      char *buf)
 81{
 82	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 83	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 84	unsigned long long len = resource_size(&cxlds->pmem_res);
 85
 86	return sysfs_emit(buf, "%#llx\n", len);
 87}
 88
 89static struct device_attribute dev_attr_pmem_size =
 90	__ATTR(size, 0444, pmem_size_show, NULL);
 91
 92static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
 93			   char *buf)
 94{
 95	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 96	struct cxl_dev_state *cxlds = cxlmd->cxlds;
 97
 98	return sysfs_emit(buf, "%#llx\n", cxlds->serial);
 99}
100static DEVICE_ATTR_RO(serial);
101
102static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
103			      char *buf)
104{
105	return sprintf(buf, "%d\n", dev_to_node(dev));
106}
107static DEVICE_ATTR_RO(numa_node);
108
109static struct attribute *cxl_memdev_attributes[] = {
110	&dev_attr_serial.attr,
111	&dev_attr_firmware_version.attr,
112	&dev_attr_payload_max.attr,
113	&dev_attr_label_storage_size.attr,
114	&dev_attr_numa_node.attr,
115	NULL,
116};
117
118static struct attribute *cxl_memdev_pmem_attributes[] = {
119	&dev_attr_pmem_size.attr,
120	NULL,
121};
122
123static struct attribute *cxl_memdev_ram_attributes[] = {
124	&dev_attr_ram_size.attr,
125	NULL,
126};
127
128static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a,
129				  int n)
130{
131	if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr)
132		return 0;
133	return a->mode;
134}
135
136static struct attribute_group cxl_memdev_attribute_group = {
137	.attrs = cxl_memdev_attributes,
138	.is_visible = cxl_memdev_visible,
139};
140
141static struct attribute_group cxl_memdev_ram_attribute_group = {
142	.name = "ram",
143	.attrs = cxl_memdev_ram_attributes,
144};
145
146static struct attribute_group cxl_memdev_pmem_attribute_group = {
147	.name = "pmem",
148	.attrs = cxl_memdev_pmem_attributes,
149};
150
151static const struct attribute_group *cxl_memdev_attribute_groups[] = {
152	&cxl_memdev_attribute_group,
153	&cxl_memdev_ram_attribute_group,
154	&cxl_memdev_pmem_attribute_group,
155	NULL,
156};
157
158static const struct device_type cxl_memdev_type = {
159	.name = "cxl_memdev",
160	.release = cxl_memdev_release,
161	.devnode = cxl_memdev_devnode,
162	.groups = cxl_memdev_attribute_groups,
163};
164
165bool is_cxl_memdev(struct device *dev)
166{
167	return dev->type == &cxl_memdev_type;
168}
169EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL);
170
171/**
172 * set_exclusive_cxl_commands() - atomically disable user cxl commands
173 * @cxlds: The device state to operate on
174 * @cmds: bitmap of commands to mark exclusive
175 *
176 * Grab the cxl_memdev_rwsem in write mode to flush in-flight
177 * invocations of the ioctl path and then disable future execution of
178 * commands with the command ids set in @cmds.
179 */
180void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
181{
182	down_write(&cxl_memdev_rwsem);
183	bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
184		  CXL_MEM_COMMAND_ID_MAX);
185	up_write(&cxl_memdev_rwsem);
186}
187EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
188
189/**
190 * clear_exclusive_cxl_commands() - atomically enable user cxl commands
191 * @cxlds: The device state to modify
192 * @cmds: bitmap of commands to mark available for userspace
193 */
194void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
195{
196	down_write(&cxl_memdev_rwsem);
197	bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
198		      CXL_MEM_COMMAND_ID_MAX);
199	up_write(&cxl_memdev_rwsem);
200}
201EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
202
203static void cxl_memdev_shutdown(struct device *dev)
204{
205	struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
206
207	down_write(&cxl_memdev_rwsem);
208	cxlmd->cxlds = NULL;
209	up_write(&cxl_memdev_rwsem);
210}
211
212static void cxl_memdev_unregister(void *_cxlmd)
213{
214	struct cxl_memdev *cxlmd = _cxlmd;
215	struct device *dev = &cxlmd->dev;
216
217	cxl_memdev_shutdown(dev);
218	cdev_device_del(&cxlmd->cdev, dev);
219	put_device(dev);
220}
221
222static void detach_memdev(struct work_struct *work)
223{
224	struct cxl_memdev *cxlmd;
225
226	cxlmd = container_of(work, typeof(*cxlmd), detach_work);
227	device_release_driver(&cxlmd->dev);
228	put_device(&cxlmd->dev);
229}
230
231static struct lock_class_key cxl_memdev_key;
232
233static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
234					   const struct file_operations *fops)
235{
236	struct cxl_memdev *cxlmd;
237	struct device *dev;
238	struct cdev *cdev;
239	int rc;
240
241	cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
242	if (!cxlmd)
243		return ERR_PTR(-ENOMEM);
244
245	rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
246	if (rc < 0)
247		goto err;
248	cxlmd->id = rc;
249
250	dev = &cxlmd->dev;
251	device_initialize(dev);
252	lockdep_set_class(&dev->mutex, &cxl_memdev_key);
253	dev->parent = cxlds->dev;
254	dev->bus = &cxl_bus_type;
255	dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
256	dev->type = &cxl_memdev_type;
257	device_set_pm_not_required(dev);
258	INIT_WORK(&cxlmd->detach_work, detach_memdev);
259
260	cdev = &cxlmd->cdev;
261	cdev_init(cdev, fops);
262	return cxlmd;
263
264err:
265	kfree(cxlmd);
266	return ERR_PTR(rc);
267}
268
269static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
270			       unsigned long arg)
271{
272	switch (cmd) {
273	case CXL_MEM_QUERY_COMMANDS:
274		return cxl_query_cmd(cxlmd, (void __user *)arg);
275	case CXL_MEM_SEND_COMMAND:
276		return cxl_send_cmd(cxlmd, (void __user *)arg);
277	default:
278		return -ENOTTY;
279	}
280}
281
282static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
283			     unsigned long arg)
284{
285	struct cxl_memdev *cxlmd = file->private_data;
286	int rc = -ENXIO;
287
288	down_read(&cxl_memdev_rwsem);
289	if (cxlmd->cxlds)
290		rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
291	up_read(&cxl_memdev_rwsem);
292
293	return rc;
294}
295
296static int cxl_memdev_open(struct inode *inode, struct file *file)
297{
298	struct cxl_memdev *cxlmd =
299		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
300
301	get_device(&cxlmd->dev);
302	file->private_data = cxlmd;
303
304	return 0;
305}
306
307static int cxl_memdev_release_file(struct inode *inode, struct file *file)
308{
309	struct cxl_memdev *cxlmd =
310		container_of(inode->i_cdev, typeof(*cxlmd), cdev);
311
312	put_device(&cxlmd->dev);
313
314	return 0;
315}
316
317static const struct file_operations cxl_memdev_fops = {
318	.owner = THIS_MODULE,
319	.unlocked_ioctl = cxl_memdev_ioctl,
320	.open = cxl_memdev_open,
321	.release = cxl_memdev_release_file,
322	.compat_ioctl = compat_ptr_ioctl,
323	.llseek = noop_llseek,
324};
325
326struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
327{
328	struct cxl_memdev *cxlmd;
329	struct device *dev;
330	struct cdev *cdev;
331	int rc;
332
333	cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
334	if (IS_ERR(cxlmd))
335		return cxlmd;
336
337	dev = &cxlmd->dev;
338	rc = dev_set_name(dev, "mem%d", cxlmd->id);
339	if (rc)
340		goto err;
341
342	/*
343	 * Activate ioctl operations, no cxl_memdev_rwsem manipulation
344	 * needed as this is ordered with cdev_add() publishing the device.
345	 */
346	cxlmd->cxlds = cxlds;
347	cxlds->cxlmd = cxlmd;
348
349	cdev = &cxlmd->cdev;
350	rc = cdev_device_add(cdev, dev);
351	if (rc)
352		goto err;
353
354	rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
355	if (rc)
356		return ERR_PTR(rc);
357	return cxlmd;
358
359err:
360	/*
361	 * The cdev was briefly live, shutdown any ioctl operations that
362	 * saw that state.
363	 */
364	cxl_memdev_shutdown(dev);
365	put_device(dev);
366	return ERR_PTR(rc);
367}
368EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
369
370__init int cxl_memdev_init(void)
371{
372	dev_t devt;
373	int rc;
374
375	rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl");
376	if (rc)
377		return rc;
378
379	cxl_mem_major = MAJOR(devt);
380
381	return 0;
382}
383
384void cxl_memdev_exit(void)
385{
386	unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS);
387}