Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * linux/drivers/char/raw.c
  3 *
  4 * Front-end raw character devices.  These can be bound to any block
  5 * devices to provide genuine Unix raw character device semantics.
  6 *
  7 * We reserve minor number 0 for a control interface.  ioctl()s on this
  8 * device are used to bind the other minor numbers to block devices.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/fs.h>
 13#include <linux/major.h>
 14#include <linux/blkdev.h>
 
 15#include <linux/module.h>
 16#include <linux/raw.h>
 17#include <linux/capability.h>
 18#include <linux/uio.h>
 19#include <linux/cdev.h>
 20#include <linux/device.h>
 21#include <linux/mutex.h>
 22#include <linux/gfp.h>
 23#include <linux/compat.h>
 24#include <linux/vmalloc.h>
 25
 26#include <asm/uaccess.h>
 27
 28struct raw_device_data {
 29	struct block_device *binding;
 30	int inuse;
 31};
 32
 33static struct class *raw_class;
 34static struct raw_device_data *raw_devices;
 35static DEFINE_MUTEX(raw_mutex);
 36static const struct file_operations raw_ctl_fops; /* forward declaration */
 37
 38static int max_raw_minors = MAX_RAW_MINORS;
 39
 40module_param(max_raw_minors, int, 0);
 41MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
 42
 43/*
 44 * Open/close code for raw IO.
 45 *
 46 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
 47 * point at the blockdev's address_space and set the file handle to use
 48 * O_DIRECT.
 49 *
 50 * Set the device's soft blocksize to the minimum possible.  This gives the
 51 * finest possible alignment and has no adverse impact on performance.
 52 */
 53static int raw_open(struct inode *inode, struct file *filp)
 54{
 55	const int minor = iminor(inode);
 56	struct block_device *bdev;
 57	int err;
 58
 59	if (minor == 0) {	/* It is the control device */
 60		filp->f_op = &raw_ctl_fops;
 61		return 0;
 62	}
 63
 64	mutex_lock(&raw_mutex);
 65
 66	/*
 67	 * All we need to do on open is check that the device is bound.
 68	 */
 69	bdev = raw_devices[minor].binding;
 70	err = -ENODEV;
 71	if (!bdev)
 72		goto out;
 73	igrab(bdev->bd_inode);
 74	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
 75	if (err)
 76		goto out;
 77	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
 78	if (err)
 79		goto out1;
 80	filp->f_flags |= O_DIRECT;
 81	filp->f_mapping = bdev->bd_inode->i_mapping;
 82	if (++raw_devices[minor].inuse == 1)
 83		filp->f_path.dentry->d_inode->i_mapping =
 84			bdev->bd_inode->i_mapping;
 85	filp->private_data = bdev;
 86	mutex_unlock(&raw_mutex);
 87	return 0;
 88
 89out1:
 90	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 91out:
 92	mutex_unlock(&raw_mutex);
 93	return err;
 94}
 95
 96/*
 97 * When the final fd which refers to this character-special node is closed, we
 98 * make its ->mapping point back at its own i_data.
 99 */
100static int raw_release(struct inode *inode, struct file *filp)
101{
102	const int minor= iminor(inode);
103	struct block_device *bdev;
104
105	mutex_lock(&raw_mutex);
106	bdev = raw_devices[minor].binding;
107	if (--raw_devices[minor].inuse == 0) {
108		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
109		inode->i_mapping = &inode->i_data;
110		inode->i_mapping->backing_dev_info = &default_backing_dev_info;
111	}
112	mutex_unlock(&raw_mutex);
113
114	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
115	return 0;
116}
117
118/*
119 * Forward ioctls to the underlying block device.
120 */
121static long
122raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
123{
124	struct block_device *bdev = filp->private_data;
125	return blkdev_ioctl(bdev, 0, command, arg);
126}
127
128static int bind_set(int number, u64 major, u64 minor)
129{
130	dev_t dev = MKDEV(major, minor);
131	struct raw_device_data *rawdev;
132	int err = 0;
133
134	if (number <= 0 || number >= max_raw_minors)
135		return -EINVAL;
136
137	if (MAJOR(dev) != major || MINOR(dev) != minor)
138		return -EINVAL;
139
140	rawdev = &raw_devices[number];
141
142	/*
143	 * This is like making block devices, so demand the
144	 * same capability
145	 */
146	if (!capable(CAP_SYS_ADMIN))
147		return -EPERM;
148
149	/*
150	 * For now, we don't need to check that the underlying
151	 * block device is present or not: we can do that when
152	 * the raw device is opened.  Just check that the
153	 * major/minor numbers make sense.
154	 */
155
156	if (MAJOR(dev) == 0 && dev != 0)
157		return -EINVAL;
158
159	mutex_lock(&raw_mutex);
160	if (rawdev->inuse) {
161		mutex_unlock(&raw_mutex);
162		return -EBUSY;
163	}
164	if (rawdev->binding) {
165		bdput(rawdev->binding);
166		module_put(THIS_MODULE);
167	}
168	if (!dev) {
169		/* unbind */
170		rawdev->binding = NULL;
171		device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
172	} else {
173		rawdev->binding = bdget(dev);
174		if (rawdev->binding == NULL) {
175			err = -ENOMEM;
176		} else {
177			dev_t raw = MKDEV(RAW_MAJOR, number);
178			__module_get(THIS_MODULE);
179			device_destroy(raw_class, raw);
180			device_create(raw_class, NULL, raw, NULL,
181				      "raw%d", number);
182		}
183	}
184	mutex_unlock(&raw_mutex);
185	return err;
186}
187
188static int bind_get(int number, dev_t *dev)
189{
190	struct raw_device_data *rawdev;
191	struct block_device *bdev;
192
193	if (number <= 0 || number >= MAX_RAW_MINORS)
194		return -EINVAL;
195
196	rawdev = &raw_devices[number];
197
198	mutex_lock(&raw_mutex);
199	bdev = rawdev->binding;
200	*dev = bdev ? bdev->bd_dev : 0;
201	mutex_unlock(&raw_mutex);
202	return 0;
203}
204
205/*
206 * Deal with ioctls against the raw-device control interface, to bind
207 * and unbind other raw devices.
208 */
209static long raw_ctl_ioctl(struct file *filp, unsigned int command,
210			  unsigned long arg)
211{
212	struct raw_config_request rq;
213	dev_t dev;
214	int err;
215
216	switch (command) {
217	case RAW_SETBIND:
218		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
219			return -EFAULT;
220
221		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
222
223	case RAW_GETBIND:
224		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
225			return -EFAULT;
226
227		err = bind_get(rq.raw_minor, &dev);
228		if (err)
229			return err;
230
231		rq.block_major = MAJOR(dev);
232		rq.block_minor = MINOR(dev);
233
234		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
235			return -EFAULT;
236
237		return 0;
238	}
239
240	return -EINVAL;
241}
242
243#ifdef CONFIG_COMPAT
244struct raw32_config_request {
245	compat_int_t	raw_minor;
246	compat_u64	block_major;
247	compat_u64	block_minor;
248};
249
250static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
251				unsigned long arg)
252{
253	struct raw32_config_request __user *user_req = compat_ptr(arg);
254	struct raw32_config_request rq;
255	dev_t dev;
256	int err = 0;
257
258	switch (cmd) {
259	case RAW_SETBIND:
260		if (copy_from_user(&rq, user_req, sizeof(rq)))
261			return -EFAULT;
262
263		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
264
265	case RAW_GETBIND:
266		if (copy_from_user(&rq, user_req, sizeof(rq)))
267			return -EFAULT;
268
269		err = bind_get(rq.raw_minor, &dev);
270		if (err)
271			return err;
272
273		rq.block_major = MAJOR(dev);
274		rq.block_minor = MINOR(dev);
275
276		if (copy_to_user(user_req, &rq, sizeof(rq)))
277			return -EFAULT;
278
279		return 0;
280	}
281
282	return -EINVAL;
283}
284#endif
285
286static const struct file_operations raw_fops = {
287	.read		= do_sync_read,
288	.aio_read	= generic_file_aio_read,
289	.write		= do_sync_write,
290	.aio_write	= blkdev_aio_write,
291	.fsync		= blkdev_fsync,
292	.open		= raw_open,
293	.release	= raw_release,
294	.unlocked_ioctl = raw_ioctl,
295	.llseek		= default_llseek,
296	.owner		= THIS_MODULE,
297};
298
299static const struct file_operations raw_ctl_fops = {
300	.unlocked_ioctl = raw_ctl_ioctl,
301#ifdef CONFIG_COMPAT
302	.compat_ioctl	= raw_ctl_compat_ioctl,
303#endif
304	.open		= raw_open,
305	.owner		= THIS_MODULE,
306	.llseek		= noop_llseek,
307};
308
309static struct cdev raw_cdev;
310
311static char *raw_devnode(struct device *dev, mode_t *mode)
312{
313	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
314}
315
316static int __init raw_init(void)
317{
318	dev_t dev = MKDEV(RAW_MAJOR, 0);
319	int ret;
320
321	if (max_raw_minors < 1 || max_raw_minors > 65536) {
322		printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
323			" between 1 and 65536), using %d\n", MAX_RAW_MINORS);
324		max_raw_minors = MAX_RAW_MINORS;
325	}
326
327	raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
328	if (!raw_devices) {
329		printk(KERN_ERR "Not enough memory for raw device structures\n");
330		ret = -ENOMEM;
331		goto error;
332	}
333	memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
334
335	ret = register_chrdev_region(dev, max_raw_minors, "raw");
336	if (ret)
337		goto error;
338
339	cdev_init(&raw_cdev, &raw_fops);
340	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
341	if (ret) {
342		goto error_region;
343	}
344
345	raw_class = class_create(THIS_MODULE, "raw");
346	if (IS_ERR(raw_class)) {
347		printk(KERN_ERR "Error creating raw class.\n");
348		cdev_del(&raw_cdev);
349		ret = PTR_ERR(raw_class);
350		goto error_region;
351	}
352	raw_class->devnode = raw_devnode;
353	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
354
355	return 0;
356
357error_region:
358	unregister_chrdev_region(dev, max_raw_minors);
359error:
360	vfree(raw_devices);
361	return ret;
362}
363
364static void __exit raw_exit(void)
365{
366	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
367	class_destroy(raw_class);
368	cdev_del(&raw_cdev);
369	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
370}
371
372module_init(raw_init);
373module_exit(raw_exit);
374MODULE_LICENSE("GPL");
v4.17
  1/*
  2 * linux/drivers/char/raw.c
  3 *
  4 * Front-end raw character devices.  These can be bound to any block
  5 * devices to provide genuine Unix raw character device semantics.
  6 *
  7 * We reserve minor number 0 for a control interface.  ioctl()s on this
  8 * device are used to bind the other minor numbers to block devices.
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/fs.h>
 13#include <linux/major.h>
 14#include <linux/blkdev.h>
 15#include <linux/backing-dev.h>
 16#include <linux/module.h>
 17#include <linux/raw.h>
 18#include <linux/capability.h>
 19#include <linux/uio.h>
 20#include <linux/cdev.h>
 21#include <linux/device.h>
 22#include <linux/mutex.h>
 23#include <linux/gfp.h>
 24#include <linux/compat.h>
 25#include <linux/vmalloc.h>
 26
 27#include <linux/uaccess.h>
 28
 29struct raw_device_data {
 30	struct block_device *binding;
 31	int inuse;
 32};
 33
 34static struct class *raw_class;
 35static struct raw_device_data *raw_devices;
 36static DEFINE_MUTEX(raw_mutex);
 37static const struct file_operations raw_ctl_fops; /* forward declaration */
 38
 39static int max_raw_minors = MAX_RAW_MINORS;
 40
 41module_param(max_raw_minors, int, 0);
 42MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
 43
 44/*
 45 * Open/close code for raw IO.
 46 *
 47 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
 48 * point at the blockdev's address_space and set the file handle to use
 49 * O_DIRECT.
 50 *
 51 * Set the device's soft blocksize to the minimum possible.  This gives the
 52 * finest possible alignment and has no adverse impact on performance.
 53 */
 54static int raw_open(struct inode *inode, struct file *filp)
 55{
 56	const int minor = iminor(inode);
 57	struct block_device *bdev;
 58	int err;
 59
 60	if (minor == 0) {	/* It is the control device */
 61		filp->f_op = &raw_ctl_fops;
 62		return 0;
 63	}
 64
 65	mutex_lock(&raw_mutex);
 66
 67	/*
 68	 * All we need to do on open is check that the device is bound.
 69	 */
 70	bdev = raw_devices[minor].binding;
 71	err = -ENODEV;
 72	if (!bdev)
 73		goto out;
 74	bdgrab(bdev);
 75	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
 76	if (err)
 77		goto out;
 78	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
 79	if (err)
 80		goto out1;
 81	filp->f_flags |= O_DIRECT;
 82	filp->f_mapping = bdev->bd_inode->i_mapping;
 83	if (++raw_devices[minor].inuse == 1)
 84		file_inode(filp)->i_mapping =
 85			bdev->bd_inode->i_mapping;
 86	filp->private_data = bdev;
 87	mutex_unlock(&raw_mutex);
 88	return 0;
 89
 90out1:
 91	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
 92out:
 93	mutex_unlock(&raw_mutex);
 94	return err;
 95}
 96
 97/*
 98 * When the final fd which refers to this character-special node is closed, we
 99 * make its ->mapping point back at its own i_data.
100 */
101static int raw_release(struct inode *inode, struct file *filp)
102{
103	const int minor= iminor(inode);
104	struct block_device *bdev;
105
106	mutex_lock(&raw_mutex);
107	bdev = raw_devices[minor].binding;
108	if (--raw_devices[minor].inuse == 0)
109		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
110		inode->i_mapping = &inode->i_data;
 
 
111	mutex_unlock(&raw_mutex);
112
113	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
114	return 0;
115}
116
117/*
118 * Forward ioctls to the underlying block device.
119 */
120static long
121raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
122{
123	struct block_device *bdev = filp->private_data;
124	return blkdev_ioctl(bdev, 0, command, arg);
125}
126
127static int bind_set(int number, u64 major, u64 minor)
128{
129	dev_t dev = MKDEV(major, minor);
130	struct raw_device_data *rawdev;
131	int err = 0;
132
133	if (number <= 0 || number >= max_raw_minors)
134		return -EINVAL;
135
136	if (MAJOR(dev) != major || MINOR(dev) != minor)
137		return -EINVAL;
138
139	rawdev = &raw_devices[number];
140
141	/*
142	 * This is like making block devices, so demand the
143	 * same capability
144	 */
145	if (!capable(CAP_SYS_ADMIN))
146		return -EPERM;
147
148	/*
149	 * For now, we don't need to check that the underlying
150	 * block device is present or not: we can do that when
151	 * the raw device is opened.  Just check that the
152	 * major/minor numbers make sense.
153	 */
154
155	if (MAJOR(dev) == 0 && dev != 0)
156		return -EINVAL;
157
158	mutex_lock(&raw_mutex);
159	if (rawdev->inuse) {
160		mutex_unlock(&raw_mutex);
161		return -EBUSY;
162	}
163	if (rawdev->binding) {
164		bdput(rawdev->binding);
165		module_put(THIS_MODULE);
166	}
167	if (!dev) {
168		/* unbind */
169		rawdev->binding = NULL;
170		device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
171	} else {
172		rawdev->binding = bdget(dev);
173		if (rawdev->binding == NULL) {
174			err = -ENOMEM;
175		} else {
176			dev_t raw = MKDEV(RAW_MAJOR, number);
177			__module_get(THIS_MODULE);
178			device_destroy(raw_class, raw);
179			device_create(raw_class, NULL, raw, NULL,
180				      "raw%d", number);
181		}
182	}
183	mutex_unlock(&raw_mutex);
184	return err;
185}
186
187static int bind_get(int number, dev_t *dev)
188{
189	struct raw_device_data *rawdev;
190	struct block_device *bdev;
191
192	if (number <= 0 || number >= max_raw_minors)
193		return -EINVAL;
194
195	rawdev = &raw_devices[number];
196
197	mutex_lock(&raw_mutex);
198	bdev = rawdev->binding;
199	*dev = bdev ? bdev->bd_dev : 0;
200	mutex_unlock(&raw_mutex);
201	return 0;
202}
203
204/*
205 * Deal with ioctls against the raw-device control interface, to bind
206 * and unbind other raw devices.
207 */
208static long raw_ctl_ioctl(struct file *filp, unsigned int command,
209			  unsigned long arg)
210{
211	struct raw_config_request rq;
212	dev_t dev;
213	int err;
214
215	switch (command) {
216	case RAW_SETBIND:
217		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
218			return -EFAULT;
219
220		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
221
222	case RAW_GETBIND:
223		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
224			return -EFAULT;
225
226		err = bind_get(rq.raw_minor, &dev);
227		if (err)
228			return err;
229
230		rq.block_major = MAJOR(dev);
231		rq.block_minor = MINOR(dev);
232
233		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
234			return -EFAULT;
235
236		return 0;
237	}
238
239	return -EINVAL;
240}
241
242#ifdef CONFIG_COMPAT
243struct raw32_config_request {
244	compat_int_t	raw_minor;
245	compat_u64	block_major;
246	compat_u64	block_minor;
247};
248
249static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
250				unsigned long arg)
251{
252	struct raw32_config_request __user *user_req = compat_ptr(arg);
253	struct raw32_config_request rq;
254	dev_t dev;
255	int err = 0;
256
257	switch (cmd) {
258	case RAW_SETBIND:
259		if (copy_from_user(&rq, user_req, sizeof(rq)))
260			return -EFAULT;
261
262		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
263
264	case RAW_GETBIND:
265		if (copy_from_user(&rq, user_req, sizeof(rq)))
266			return -EFAULT;
267
268		err = bind_get(rq.raw_minor, &dev);
269		if (err)
270			return err;
271
272		rq.block_major = MAJOR(dev);
273		rq.block_minor = MINOR(dev);
274
275		if (copy_to_user(user_req, &rq, sizeof(rq)))
276			return -EFAULT;
277
278		return 0;
279	}
280
281	return -EINVAL;
282}
283#endif
284
285static const struct file_operations raw_fops = {
286	.read_iter	= blkdev_read_iter,
287	.write_iter	= blkdev_write_iter,
 
 
288	.fsync		= blkdev_fsync,
289	.open		= raw_open,
290	.release	= raw_release,
291	.unlocked_ioctl = raw_ioctl,
292	.llseek		= default_llseek,
293	.owner		= THIS_MODULE,
294};
295
296static const struct file_operations raw_ctl_fops = {
297	.unlocked_ioctl = raw_ctl_ioctl,
298#ifdef CONFIG_COMPAT
299	.compat_ioctl	= raw_ctl_compat_ioctl,
300#endif
301	.open		= raw_open,
302	.owner		= THIS_MODULE,
303	.llseek		= noop_llseek,
304};
305
306static struct cdev raw_cdev;
307
308static char *raw_devnode(struct device *dev, umode_t *mode)
309{
310	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
311}
312
313static int __init raw_init(void)
314{
315	dev_t dev = MKDEV(RAW_MAJOR, 0);
316	int ret;
317
318	if (max_raw_minors < 1 || max_raw_minors > 65536) {
319		printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
320			" between 1 and 65536), using %d\n", MAX_RAW_MINORS);
321		max_raw_minors = MAX_RAW_MINORS;
322	}
323
324	raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors);
325	if (!raw_devices) {
326		printk(KERN_ERR "Not enough memory for raw device structures\n");
327		ret = -ENOMEM;
328		goto error;
329	}
 
330
331	ret = register_chrdev_region(dev, max_raw_minors, "raw");
332	if (ret)
333		goto error;
334
335	cdev_init(&raw_cdev, &raw_fops);
336	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
337	if (ret)
338		goto error_region;
 
 
339	raw_class = class_create(THIS_MODULE, "raw");
340	if (IS_ERR(raw_class)) {
341		printk(KERN_ERR "Error creating raw class.\n");
342		cdev_del(&raw_cdev);
343		ret = PTR_ERR(raw_class);
344		goto error_region;
345	}
346	raw_class->devnode = raw_devnode;
347	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
348
349	return 0;
350
351error_region:
352	unregister_chrdev_region(dev, max_raw_minors);
353error:
354	vfree(raw_devices);
355	return ret;
356}
357
358static void __exit raw_exit(void)
359{
360	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
361	class_destroy(raw_class);
362	cdev_del(&raw_cdev);
363	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
364}
365
366module_init(raw_init);
367module_exit(raw_exit);
368MODULE_LICENSE("GPL");