Loading...
1/*
2 * USB Skeleton driver - 2.2
3 *
4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 *
10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11 * but has been rewritten to be easier to read and use.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/kref.h>
20#include <linux/uaccess.h>
21#include <linux/usb.h>
22#include <linux/mutex.h>
23
24
25/* Define these values to match your devices */
26#define USB_SKEL_VENDOR_ID 0xfff0
27#define USB_SKEL_PRODUCT_ID 0xfff0
28
29/* table of devices that work with this driver */
30static const struct usb_device_id skel_table[] = {
31 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
32 { } /* Terminating entry */
33};
34MODULE_DEVICE_TABLE(usb, skel_table);
35
36
37/* Get a minor range for your devices from the usb maintainer */
38#define USB_SKEL_MINOR_BASE 192
39
40/* our private defines. if this grows any larger, use your own .h file */
41#define MAX_TRANSFER (PAGE_SIZE - 512)
42/* MAX_TRANSFER is chosen so that the VM is not stressed by
43 allocations > PAGE_SIZE and the number of packets in a page
44 is an integer 512 is the largest possible packet on EHCI */
45#define WRITES_IN_FLIGHT 8
46/* arbitrarily chosen */
47
48/* Structure to hold all of our device specific stuff */
49struct usb_skel {
50 struct usb_device *udev; /* the usb device for this device */
51 struct usb_interface *interface; /* the interface for this device */
52 struct semaphore limit_sem; /* limiting the number of writes in progress */
53 struct usb_anchor submitted; /* in case we need to retract our submissions */
54 struct urb *bulk_in_urb; /* the urb to read data with */
55 unsigned char *bulk_in_buffer; /* the buffer to receive data */
56 size_t bulk_in_size; /* the size of the receive buffer */
57 size_t bulk_in_filled; /* number of bytes in the buffer */
58 size_t bulk_in_copied; /* already copied to user space */
59 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
60 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
61 int errors; /* the last request tanked */
62 bool ongoing_read; /* a read is going on */
63 spinlock_t err_lock; /* lock for errors */
64 struct kref kref;
65 struct mutex io_mutex; /* synchronize I/O with disconnect */
66 wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */
67};
68#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
69
70static struct usb_driver skel_driver;
71static void skel_draw_down(struct usb_skel *dev);
72
73static void skel_delete(struct kref *kref)
74{
75 struct usb_skel *dev = to_skel_dev(kref);
76
77 usb_free_urb(dev->bulk_in_urb);
78 usb_put_dev(dev->udev);
79 kfree(dev->bulk_in_buffer);
80 kfree(dev);
81}
82
83static int skel_open(struct inode *inode, struct file *file)
84{
85 struct usb_skel *dev;
86 struct usb_interface *interface;
87 int subminor;
88 int retval = 0;
89
90 subminor = iminor(inode);
91
92 interface = usb_find_interface(&skel_driver, subminor);
93 if (!interface) {
94 pr_err("%s - error, can't find device for minor %d\n",
95 __func__, subminor);
96 retval = -ENODEV;
97 goto exit;
98 }
99
100 dev = usb_get_intfdata(interface);
101 if (!dev) {
102 retval = -ENODEV;
103 goto exit;
104 }
105
106 retval = usb_autopm_get_interface(interface);
107 if (retval)
108 goto exit;
109
110 /* increment our usage count for the device */
111 kref_get(&dev->kref);
112
113 /* save our object in the file's private structure */
114 file->private_data = dev;
115
116exit:
117 return retval;
118}
119
120static int skel_release(struct inode *inode, struct file *file)
121{
122 struct usb_skel *dev;
123
124 dev = file->private_data;
125 if (dev == NULL)
126 return -ENODEV;
127
128 /* allow the device to be autosuspended */
129 mutex_lock(&dev->io_mutex);
130 if (dev->interface)
131 usb_autopm_put_interface(dev->interface);
132 mutex_unlock(&dev->io_mutex);
133
134 /* decrement the count on our device */
135 kref_put(&dev->kref, skel_delete);
136 return 0;
137}
138
139static int skel_flush(struct file *file, fl_owner_t id)
140{
141 struct usb_skel *dev;
142 int res;
143
144 dev = file->private_data;
145 if (dev == NULL)
146 return -ENODEV;
147
148 /* wait for io to stop */
149 mutex_lock(&dev->io_mutex);
150 skel_draw_down(dev);
151
152 /* read out errors, leave subsequent opens a clean slate */
153 spin_lock_irq(&dev->err_lock);
154 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
155 dev->errors = 0;
156 spin_unlock_irq(&dev->err_lock);
157
158 mutex_unlock(&dev->io_mutex);
159
160 return res;
161}
162
163static void skel_read_bulk_callback(struct urb *urb)
164{
165 struct usb_skel *dev;
166
167 dev = urb->context;
168
169 spin_lock(&dev->err_lock);
170 /* sync/async unlink faults aren't errors */
171 if (urb->status) {
172 if (!(urb->status == -ENOENT ||
173 urb->status == -ECONNRESET ||
174 urb->status == -ESHUTDOWN))
175 dev_err(&dev->interface->dev,
176 "%s - nonzero write bulk status received: %d\n",
177 __func__, urb->status);
178
179 dev->errors = urb->status;
180 } else {
181 dev->bulk_in_filled = urb->actual_length;
182 }
183 dev->ongoing_read = 0;
184 spin_unlock(&dev->err_lock);
185
186 wake_up_interruptible(&dev->bulk_in_wait);
187}
188
189static int skel_do_read_io(struct usb_skel *dev, size_t count)
190{
191 int rv;
192
193 /* prepare a read */
194 usb_fill_bulk_urb(dev->bulk_in_urb,
195 dev->udev,
196 usb_rcvbulkpipe(dev->udev,
197 dev->bulk_in_endpointAddr),
198 dev->bulk_in_buffer,
199 min(dev->bulk_in_size, count),
200 skel_read_bulk_callback,
201 dev);
202 /* tell everybody to leave the URB alone */
203 spin_lock_irq(&dev->err_lock);
204 dev->ongoing_read = 1;
205 spin_unlock_irq(&dev->err_lock);
206
207 /* submit bulk in urb, which means no data to deliver */
208 dev->bulk_in_filled = 0;
209 dev->bulk_in_copied = 0;
210
211 /* do it */
212 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
213 if (rv < 0) {
214 dev_err(&dev->interface->dev,
215 "%s - failed submitting read urb, error %d\n",
216 __func__, rv);
217 rv = (rv == -ENOMEM) ? rv : -EIO;
218 spin_lock_irq(&dev->err_lock);
219 dev->ongoing_read = 0;
220 spin_unlock_irq(&dev->err_lock);
221 }
222
223 return rv;
224}
225
226static ssize_t skel_read(struct file *file, char *buffer, size_t count,
227 loff_t *ppos)
228{
229 struct usb_skel *dev;
230 int rv;
231 bool ongoing_io;
232
233 dev = file->private_data;
234
235 /* if we cannot read at all, return EOF */
236 if (!dev->bulk_in_urb || !count)
237 return 0;
238
239 /* no concurrent readers */
240 rv = mutex_lock_interruptible(&dev->io_mutex);
241 if (rv < 0)
242 return rv;
243
244 if (!dev->interface) { /* disconnect() was called */
245 rv = -ENODEV;
246 goto exit;
247 }
248
249 /* if IO is under way, we must not touch things */
250retry:
251 spin_lock_irq(&dev->err_lock);
252 ongoing_io = dev->ongoing_read;
253 spin_unlock_irq(&dev->err_lock);
254
255 if (ongoing_io) {
256 /* nonblocking IO shall not wait */
257 if (file->f_flags & O_NONBLOCK) {
258 rv = -EAGAIN;
259 goto exit;
260 }
261 /*
262 * IO may take forever
263 * hence wait in an interruptible state
264 */
265 rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
266 if (rv < 0)
267 goto exit;
268 }
269
270 /* errors must be reported */
271 rv = dev->errors;
272 if (rv < 0) {
273 /* any error is reported once */
274 dev->errors = 0;
275 /* to preserve notifications about reset */
276 rv = (rv == -EPIPE) ? rv : -EIO;
277 /* report it */
278 goto exit;
279 }
280
281 /*
282 * if the buffer is filled we may satisfy the read
283 * else we need to start IO
284 */
285
286 if (dev->bulk_in_filled) {
287 /* we had read data */
288 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
289 size_t chunk = min(available, count);
290
291 if (!available) {
292 /*
293 * all data has been used
294 * actual IO needs to be done
295 */
296 rv = skel_do_read_io(dev, count);
297 if (rv < 0)
298 goto exit;
299 else
300 goto retry;
301 }
302 /*
303 * data is available
304 * chunk tells us how much shall be copied
305 */
306
307 if (copy_to_user(buffer,
308 dev->bulk_in_buffer + dev->bulk_in_copied,
309 chunk))
310 rv = -EFAULT;
311 else
312 rv = chunk;
313
314 dev->bulk_in_copied += chunk;
315
316 /*
317 * if we are asked for more than we have,
318 * we start IO but don't wait
319 */
320 if (available < count)
321 skel_do_read_io(dev, count - chunk);
322 } else {
323 /* no data in the buffer */
324 rv = skel_do_read_io(dev, count);
325 if (rv < 0)
326 goto exit;
327 else
328 goto retry;
329 }
330exit:
331 mutex_unlock(&dev->io_mutex);
332 return rv;
333}
334
335static void skel_write_bulk_callback(struct urb *urb)
336{
337 struct usb_skel *dev;
338
339 dev = urb->context;
340
341 /* sync/async unlink faults aren't errors */
342 if (urb->status) {
343 if (!(urb->status == -ENOENT ||
344 urb->status == -ECONNRESET ||
345 urb->status == -ESHUTDOWN))
346 dev_err(&dev->interface->dev,
347 "%s - nonzero write bulk status received: %d\n",
348 __func__, urb->status);
349
350 spin_lock(&dev->err_lock);
351 dev->errors = urb->status;
352 spin_unlock(&dev->err_lock);
353 }
354
355 /* free up our allocated buffer */
356 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
357 urb->transfer_buffer, urb->transfer_dma);
358 up(&dev->limit_sem);
359}
360
361static ssize_t skel_write(struct file *file, const char *user_buffer,
362 size_t count, loff_t *ppos)
363{
364 struct usb_skel *dev;
365 int retval = 0;
366 struct urb *urb = NULL;
367 char *buf = NULL;
368 size_t writesize = min(count, (size_t)MAX_TRANSFER);
369
370 dev = file->private_data;
371
372 /* verify that we actually have some data to write */
373 if (count == 0)
374 goto exit;
375
376 /*
377 * limit the number of URBs in flight to stop a user from using up all
378 * RAM
379 */
380 if (!(file->f_flags & O_NONBLOCK)) {
381 if (down_interruptible(&dev->limit_sem)) {
382 retval = -ERESTARTSYS;
383 goto exit;
384 }
385 } else {
386 if (down_trylock(&dev->limit_sem)) {
387 retval = -EAGAIN;
388 goto exit;
389 }
390 }
391
392 spin_lock_irq(&dev->err_lock);
393 retval = dev->errors;
394 if (retval < 0) {
395 /* any error is reported once */
396 dev->errors = 0;
397 /* to preserve notifications about reset */
398 retval = (retval == -EPIPE) ? retval : -EIO;
399 }
400 spin_unlock_irq(&dev->err_lock);
401 if (retval < 0)
402 goto error;
403
404 /* create a urb, and a buffer for it, and copy the data to the urb */
405 urb = usb_alloc_urb(0, GFP_KERNEL);
406 if (!urb) {
407 retval = -ENOMEM;
408 goto error;
409 }
410
411 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
412 &urb->transfer_dma);
413 if (!buf) {
414 retval = -ENOMEM;
415 goto error;
416 }
417
418 if (copy_from_user(buf, user_buffer, writesize)) {
419 retval = -EFAULT;
420 goto error;
421 }
422
423 /* this lock makes sure we don't submit URBs to gone devices */
424 mutex_lock(&dev->io_mutex);
425 if (!dev->interface) { /* disconnect() was called */
426 mutex_unlock(&dev->io_mutex);
427 retval = -ENODEV;
428 goto error;
429 }
430
431 /* initialize the urb properly */
432 usb_fill_bulk_urb(urb, dev->udev,
433 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
434 buf, writesize, skel_write_bulk_callback, dev);
435 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
436 usb_anchor_urb(urb, &dev->submitted);
437
438 /* send the data out the bulk port */
439 retval = usb_submit_urb(urb, GFP_KERNEL);
440 mutex_unlock(&dev->io_mutex);
441 if (retval) {
442 dev_err(&dev->interface->dev,
443 "%s - failed submitting write urb, error %d\n",
444 __func__, retval);
445 goto error_unanchor;
446 }
447
448 /*
449 * release our reference to this urb, the USB core will eventually free
450 * it entirely
451 */
452 usb_free_urb(urb);
453
454
455 return writesize;
456
457error_unanchor:
458 usb_unanchor_urb(urb);
459error:
460 if (urb) {
461 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
462 usb_free_urb(urb);
463 }
464 up(&dev->limit_sem);
465
466exit:
467 return retval;
468}
469
470static const struct file_operations skel_fops = {
471 .owner = THIS_MODULE,
472 .read = skel_read,
473 .write = skel_write,
474 .open = skel_open,
475 .release = skel_release,
476 .flush = skel_flush,
477 .llseek = noop_llseek,
478};
479
480/*
481 * usb class driver info in order to get a minor number from the usb core,
482 * and to have the device registered with the driver core
483 */
484static struct usb_class_driver skel_class = {
485 .name = "skel%d",
486 .fops = &skel_fops,
487 .minor_base = USB_SKEL_MINOR_BASE,
488};
489
490static int skel_probe(struct usb_interface *interface,
491 const struct usb_device_id *id)
492{
493 struct usb_skel *dev;
494 struct usb_host_interface *iface_desc;
495 struct usb_endpoint_descriptor *endpoint;
496 size_t buffer_size;
497 int i;
498 int retval = -ENOMEM;
499
500 /* allocate memory for our device state and initialize it */
501 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
502 if (!dev) {
503 dev_err(&interface->dev, "Out of memory\n");
504 goto error;
505 }
506 kref_init(&dev->kref);
507 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
508 mutex_init(&dev->io_mutex);
509 spin_lock_init(&dev->err_lock);
510 init_usb_anchor(&dev->submitted);
511 init_waitqueue_head(&dev->bulk_in_wait);
512
513 dev->udev = usb_get_dev(interface_to_usbdev(interface));
514 dev->interface = interface;
515
516 /* set up the endpoint information */
517 /* use only the first bulk-in and bulk-out endpoints */
518 iface_desc = interface->cur_altsetting;
519 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
520 endpoint = &iface_desc->endpoint[i].desc;
521
522 if (!dev->bulk_in_endpointAddr &&
523 usb_endpoint_is_bulk_in(endpoint)) {
524 /* we found a bulk in endpoint */
525 buffer_size = usb_endpoint_maxp(endpoint);
526 dev->bulk_in_size = buffer_size;
527 dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
528 dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
529 if (!dev->bulk_in_buffer) {
530 dev_err(&interface->dev,
531 "Could not allocate bulk_in_buffer\n");
532 goto error;
533 }
534 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
535 if (!dev->bulk_in_urb) {
536 dev_err(&interface->dev,
537 "Could not allocate bulk_in_urb\n");
538 goto error;
539 }
540 }
541
542 if (!dev->bulk_out_endpointAddr &&
543 usb_endpoint_is_bulk_out(endpoint)) {
544 /* we found a bulk out endpoint */
545 dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
546 }
547 }
548 if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
549 dev_err(&interface->dev,
550 "Could not find both bulk-in and bulk-out endpoints\n");
551 goto error;
552 }
553
554 /* save our data pointer in this interface device */
555 usb_set_intfdata(interface, dev);
556
557 /* we can register the device now, as it is ready */
558 retval = usb_register_dev(interface, &skel_class);
559 if (retval) {
560 /* something prevented us from registering this driver */
561 dev_err(&interface->dev,
562 "Not able to get a minor for this device.\n");
563 usb_set_intfdata(interface, NULL);
564 goto error;
565 }
566
567 /* let the user know what node this device is now attached to */
568 dev_info(&interface->dev,
569 "USB Skeleton device now attached to USBSkel-%d",
570 interface->minor);
571 return 0;
572
573error:
574 if (dev)
575 /* this frees allocated memory */
576 kref_put(&dev->kref, skel_delete);
577 return retval;
578}
579
580static void skel_disconnect(struct usb_interface *interface)
581{
582 struct usb_skel *dev;
583 int minor = interface->minor;
584
585 dev = usb_get_intfdata(interface);
586 usb_set_intfdata(interface, NULL);
587
588 /* give back our minor */
589 usb_deregister_dev(interface, &skel_class);
590
591 /* prevent more I/O from starting */
592 mutex_lock(&dev->io_mutex);
593 dev->interface = NULL;
594 mutex_unlock(&dev->io_mutex);
595
596 usb_kill_anchored_urbs(&dev->submitted);
597
598 /* decrement our usage count */
599 kref_put(&dev->kref, skel_delete);
600
601 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
602}
603
604static void skel_draw_down(struct usb_skel *dev)
605{
606 int time;
607
608 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
609 if (!time)
610 usb_kill_anchored_urbs(&dev->submitted);
611 usb_kill_urb(dev->bulk_in_urb);
612}
613
614static int skel_suspend(struct usb_interface *intf, pm_message_t message)
615{
616 struct usb_skel *dev = usb_get_intfdata(intf);
617
618 if (!dev)
619 return 0;
620 skel_draw_down(dev);
621 return 0;
622}
623
624static int skel_resume(struct usb_interface *intf)
625{
626 return 0;
627}
628
629static int skel_pre_reset(struct usb_interface *intf)
630{
631 struct usb_skel *dev = usb_get_intfdata(intf);
632
633 mutex_lock(&dev->io_mutex);
634 skel_draw_down(dev);
635
636 return 0;
637}
638
639static int skel_post_reset(struct usb_interface *intf)
640{
641 struct usb_skel *dev = usb_get_intfdata(intf);
642
643 /* we are sure no URBs are active - no locking needed */
644 dev->errors = -EPIPE;
645 mutex_unlock(&dev->io_mutex);
646
647 return 0;
648}
649
650static struct usb_driver skel_driver = {
651 .name = "skeleton",
652 .probe = skel_probe,
653 .disconnect = skel_disconnect,
654 .suspend = skel_suspend,
655 .resume = skel_resume,
656 .pre_reset = skel_pre_reset,
657 .post_reset = skel_post_reset,
658 .id_table = skel_table,
659 .supports_autosuspend = 1,
660};
661
662module_usb_driver(skel_driver);
663
664MODULE_LICENSE("GPL");
1/*
2 * USB Skeleton driver - 2.2
3 *
4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
9 *
10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11 * but has been rewritten to be easier to read and use.
12 *
13 */
14
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/kref.h>
21#include <linux/uaccess.h>
22#include <linux/usb.h>
23#include <linux/mutex.h>
24
25
26/* Define these values to match your devices */
27#define USB_SKEL_VENDOR_ID 0xfff0
28#define USB_SKEL_PRODUCT_ID 0xfff0
29
30/* table of devices that work with this driver */
31static const struct usb_device_id skel_table[] = {
32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33 { } /* Terminating entry */
34};
35MODULE_DEVICE_TABLE(usb, skel_table);
36
37
38/* Get a minor range for your devices from the usb maintainer */
39#define USB_SKEL_MINOR_BASE 192
40
41/* our private defines. if this grows any larger, use your own .h file */
42#define MAX_TRANSFER (PAGE_SIZE - 512)
43/* MAX_TRANSFER is chosen so that the VM is not stressed by
44 allocations > PAGE_SIZE and the number of packets in a page
45 is an integer 512 is the largest possible packet on EHCI */
46#define WRITES_IN_FLIGHT 8
47/* arbitrarily chosen */
48
49/* Structure to hold all of our device specific stuff */
50struct usb_skel {
51 struct usb_device *udev; /* the usb device for this device */
52 struct usb_interface *interface; /* the interface for this device */
53 struct semaphore limit_sem; /* limiting the number of writes in progress */
54 struct usb_anchor submitted; /* in case we need to retract our submissions */
55 struct urb *bulk_in_urb; /* the urb to read data with */
56 unsigned char *bulk_in_buffer; /* the buffer to receive data */
57 size_t bulk_in_size; /* the size of the receive buffer */
58 size_t bulk_in_filled; /* number of bytes in the buffer */
59 size_t bulk_in_copied; /* already copied to user space */
60 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
61 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
62 int errors; /* the last request tanked */
63 bool ongoing_read; /* a read is going on */
64 bool processed_urb; /* indicates we haven't processed the urb */
65 spinlock_t err_lock; /* lock for errors */
66 struct kref kref;
67 struct mutex io_mutex; /* synchronize I/O with disconnect */
68 struct completion bulk_in_completion; /* to wait for an ongoing read */
69};
70#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
71
72static struct usb_driver skel_driver;
73static void skel_draw_down(struct usb_skel *dev);
74
75static void skel_delete(struct kref *kref)
76{
77 struct usb_skel *dev = to_skel_dev(kref);
78
79 usb_free_urb(dev->bulk_in_urb);
80 usb_put_dev(dev->udev);
81 kfree(dev->bulk_in_buffer);
82 kfree(dev);
83}
84
85static int skel_open(struct inode *inode, struct file *file)
86{
87 struct usb_skel *dev;
88 struct usb_interface *interface;
89 int subminor;
90 int retval = 0;
91
92 subminor = iminor(inode);
93
94 interface = usb_find_interface(&skel_driver, subminor);
95 if (!interface) {
96 pr_err("%s - error, can't find device for minor %d\n",
97 __func__, subminor);
98 retval = -ENODEV;
99 goto exit;
100 }
101
102 dev = usb_get_intfdata(interface);
103 if (!dev) {
104 retval = -ENODEV;
105 goto exit;
106 }
107
108 /* increment our usage count for the device */
109 kref_get(&dev->kref);
110
111 /* lock the device to allow correctly handling errors
112 * in resumption */
113 mutex_lock(&dev->io_mutex);
114
115 retval = usb_autopm_get_interface(interface);
116 if (retval)
117 goto out_err;
118
119 /* save our object in the file's private structure */
120 file->private_data = dev;
121 mutex_unlock(&dev->io_mutex);
122
123exit:
124 return retval;
125}
126
127static int skel_release(struct inode *inode, struct file *file)
128{
129 struct usb_skel *dev;
130
131 dev = file->private_data;
132 if (dev == NULL)
133 return -ENODEV;
134
135 /* allow the device to be autosuspended */
136 mutex_lock(&dev->io_mutex);
137 if (dev->interface)
138 usb_autopm_put_interface(dev->interface);
139 mutex_unlock(&dev->io_mutex);
140
141 /* decrement the count on our device */
142 kref_put(&dev->kref, skel_delete);
143 return 0;
144}
145
146static int skel_flush(struct file *file, fl_owner_t id)
147{
148 struct usb_skel *dev;
149 int res;
150
151 dev = file->private_data;
152 if (dev == NULL)
153 return -ENODEV;
154
155 /* wait for io to stop */
156 mutex_lock(&dev->io_mutex);
157 skel_draw_down(dev);
158
159 /* read out errors, leave subsequent opens a clean slate */
160 spin_lock_irq(&dev->err_lock);
161 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
162 dev->errors = 0;
163 spin_unlock_irq(&dev->err_lock);
164
165 mutex_unlock(&dev->io_mutex);
166
167 return res;
168}
169
170static void skel_read_bulk_callback(struct urb *urb)
171{
172 struct usb_skel *dev;
173
174 dev = urb->context;
175
176 spin_lock(&dev->err_lock);
177 /* sync/async unlink faults aren't errors */
178 if (urb->status) {
179 if (!(urb->status == -ENOENT ||
180 urb->status == -ECONNRESET ||
181 urb->status == -ESHUTDOWN))
182 dev_err(&dev->interface->dev,
183 "%s - nonzero write bulk status received: %d\n",
184 __func__, urb->status);
185
186 dev->errors = urb->status;
187 } else {
188 dev->bulk_in_filled = urb->actual_length;
189 }
190 dev->ongoing_read = 0;
191 spin_unlock(&dev->err_lock);
192
193 complete(&dev->bulk_in_completion);
194}
195
196static int skel_do_read_io(struct usb_skel *dev, size_t count)
197{
198 int rv;
199
200 /* prepare a read */
201 usb_fill_bulk_urb(dev->bulk_in_urb,
202 dev->udev,
203 usb_rcvbulkpipe(dev->udev,
204 dev->bulk_in_endpointAddr),
205 dev->bulk_in_buffer,
206 min(dev->bulk_in_size, count),
207 skel_read_bulk_callback,
208 dev);
209 /* tell everybody to leave the URB alone */
210 spin_lock_irq(&dev->err_lock);
211 dev->ongoing_read = 1;
212 spin_unlock_irq(&dev->err_lock);
213
214 /* do it */
215 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
216 if (rv < 0) {
217 dev_err(&dev->interface->dev,
218 "%s - failed submitting read urb, error %d\n",
219 __func__, rv);
220 dev->bulk_in_filled = 0;
221 rv = (rv == -ENOMEM) ? rv : -EIO;
222 spin_lock_irq(&dev->err_lock);
223 dev->ongoing_read = 0;
224 spin_unlock_irq(&dev->err_lock);
225 }
226
227 return rv;
228}
229
230static ssize_t skel_read(struct file *file, char *buffer, size_t count,
231 loff_t *ppos)
232{
233 struct usb_skel *dev;
234 int rv;
235 bool ongoing_io;
236
237 dev = file->private_data;
238
239 /* if we cannot read at all, return EOF */
240 if (!dev->bulk_in_urb || !count)
241 return 0;
242
243 /* no concurrent readers */
244 rv = mutex_lock_interruptible(&dev->io_mutex);
245 if (rv < 0)
246 return rv;
247
248 if (!dev->interface) { /* disconnect() was called */
249 rv = -ENODEV;
250 goto exit;
251 }
252
253 /* if IO is under way, we must not touch things */
254retry:
255 spin_lock_irq(&dev->err_lock);
256 ongoing_io = dev->ongoing_read;
257 spin_unlock_irq(&dev->err_lock);
258
259 if (ongoing_io) {
260 /* nonblocking IO shall not wait */
261 if (file->f_flags & O_NONBLOCK) {
262 rv = -EAGAIN;
263 goto exit;
264 }
265 /*
266 * IO may take forever
267 * hence wait in an interruptible state
268 */
269 rv = wait_for_completion_interruptible(&dev->bulk_in_completion);
270 if (rv < 0)
271 goto exit;
272 /*
273 * by waiting we also semiprocessed the urb
274 * we must finish now
275 */
276 dev->bulk_in_copied = 0;
277 dev->processed_urb = 1;
278 }
279
280 if (!dev->processed_urb) {
281 /*
282 * the URB hasn't been processed
283 * do it now
284 */
285 wait_for_completion(&dev->bulk_in_completion);
286 dev->bulk_in_copied = 0;
287 dev->processed_urb = 1;
288 }
289
290 /* errors must be reported */
291 rv = dev->errors;
292 if (rv < 0) {
293 /* any error is reported once */
294 dev->errors = 0;
295 /* to preserve notifications about reset */
296 rv = (rv == -EPIPE) ? rv : -EIO;
297 /* no data to deliver */
298 dev->bulk_in_filled = 0;
299 /* report it */
300 goto exit;
301 }
302
303 /*
304 * if the buffer is filled we may satisfy the read
305 * else we need to start IO
306 */
307
308 if (dev->bulk_in_filled) {
309 /* we had read data */
310 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
311 size_t chunk = min(available, count);
312
313 if (!available) {
314 /*
315 * all data has been used
316 * actual IO needs to be done
317 */
318 rv = skel_do_read_io(dev, count);
319 if (rv < 0)
320 goto exit;
321 else
322 goto retry;
323 }
324 /*
325 * data is available
326 * chunk tells us how much shall be copied
327 */
328
329 if (copy_to_user(buffer,
330 dev->bulk_in_buffer + dev->bulk_in_copied,
331 chunk))
332 rv = -EFAULT;
333 else
334 rv = chunk;
335
336 dev->bulk_in_copied += chunk;
337
338 /*
339 * if we are asked for more than we have,
340 * we start IO but don't wait
341 */
342 if (available < count)
343 skel_do_read_io(dev, count - chunk);
344 } else {
345 /* no data in the buffer */
346 rv = skel_do_read_io(dev, count);
347 if (rv < 0)
348 goto exit;
349 else if (!(file->f_flags & O_NONBLOCK))
350 goto retry;
351 rv = -EAGAIN;
352 }
353exit:
354 mutex_unlock(&dev->io_mutex);
355 return rv;
356}
357
358static void skel_write_bulk_callback(struct urb *urb)
359{
360 struct usb_skel *dev;
361
362 dev = urb->context;
363
364 /* sync/async unlink faults aren't errors */
365 if (urb->status) {
366 if (!(urb->status == -ENOENT ||
367 urb->status == -ECONNRESET ||
368 urb->status == -ESHUTDOWN))
369 dev_err(&dev->interface->dev,
370 "%s - nonzero write bulk status received: %d\n",
371 __func__, urb->status);
372
373 spin_lock(&dev->err_lock);
374 dev->errors = urb->status;
375 spin_unlock(&dev->err_lock);
376 }
377
378 /* free up our allocated buffer */
379 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
380 urb->transfer_buffer, urb->transfer_dma);
381 up(&dev->limit_sem);
382}
383
384static ssize_t skel_write(struct file *file, const char *user_buffer,
385 size_t count, loff_t *ppos)
386{
387 struct usb_skel *dev;
388 int retval = 0;
389 struct urb *urb = NULL;
390 char *buf = NULL;
391 size_t writesize = min(count, (size_t)MAX_TRANSFER);
392
393 dev = file->private_data;
394
395 /* verify that we actually have some data to write */
396 if (count == 0)
397 goto exit;
398
399 /*
400 * limit the number of URBs in flight to stop a user from using up all
401 * RAM
402 */
403 if (!(file->f_flags & O_NONBLOCK)) {
404 if (down_interruptible(&dev->limit_sem)) {
405 retval = -ERESTARTSYS;
406 goto exit;
407 }
408 } else {
409 if (down_trylock(&dev->limit_sem)) {
410 retval = -EAGAIN;
411 goto exit;
412 }
413 }
414
415 spin_lock_irq(&dev->err_lock);
416 retval = dev->errors;
417 if (retval < 0) {
418 /* any error is reported once */
419 dev->errors = 0;
420 /* to preserve notifications about reset */
421 retval = (retval == -EPIPE) ? retval : -EIO;
422 }
423 spin_unlock_irq(&dev->err_lock);
424 if (retval < 0)
425 goto error;
426
427 /* create a urb, and a buffer for it, and copy the data to the urb */
428 urb = usb_alloc_urb(0, GFP_KERNEL);
429 if (!urb) {
430 retval = -ENOMEM;
431 goto error;
432 }
433
434 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
435 &urb->transfer_dma);
436 if (!buf) {
437 retval = -ENOMEM;
438 goto error;
439 }
440
441 if (copy_from_user(buf, user_buffer, writesize)) {
442 retval = -EFAULT;
443 goto error;
444 }
445
446 /* this lock makes sure we don't submit URBs to gone devices */
447 mutex_lock(&dev->io_mutex);
448 if (!dev->interface) { /* disconnect() was called */
449 mutex_unlock(&dev->io_mutex);
450 retval = -ENODEV;
451 goto error;
452 }
453
454 /* initialize the urb properly */
455 usb_fill_bulk_urb(urb, dev->udev,
456 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
457 buf, writesize, skel_write_bulk_callback, dev);
458 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
459 usb_anchor_urb(urb, &dev->submitted);
460
461 /* send the data out the bulk port */
462 retval = usb_submit_urb(urb, GFP_KERNEL);
463 mutex_unlock(&dev->io_mutex);
464 if (retval) {
465 dev_err(&dev->interface->dev,
466 "%s - failed submitting write urb, error %d\n",
467 __func__, retval);
468 goto error_unanchor;
469 }
470
471 /*
472 * release our reference to this urb, the USB core will eventually free
473 * it entirely
474 */
475 usb_free_urb(urb);
476
477
478 return writesize;
479
480error_unanchor:
481 usb_unanchor_urb(urb);
482error:
483 if (urb) {
484 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
485 usb_free_urb(urb);
486 }
487 up(&dev->limit_sem);
488
489exit:
490 return retval;
491}
492
493static const struct file_operations skel_fops = {
494 .owner = THIS_MODULE,
495 .read = skel_read,
496 .write = skel_write,
497 .open = skel_open,
498 .release = skel_release,
499 .flush = skel_flush,
500 .llseek = noop_llseek,
501};
502
503/*
504 * usb class driver info in order to get a minor number from the usb core,
505 * and to have the device registered with the driver core
506 */
507static struct usb_class_driver skel_class = {
508 .name = "skel%d",
509 .fops = &skel_fops,
510 .minor_base = USB_SKEL_MINOR_BASE,
511};
512
513static int skel_probe(struct usb_interface *interface,
514 const struct usb_device_id *id)
515{
516 struct usb_skel *dev;
517 struct usb_host_interface *iface_desc;
518 struct usb_endpoint_descriptor *endpoint;
519 size_t buffer_size;
520 int i;
521 int retval = -ENOMEM;
522
523 /* allocate memory for our device state and initialize it */
524 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
525 if (!dev) {
526 dev_err(&interface->dev, "Out of memory\n");
527 goto error;
528 }
529 kref_init(&dev->kref);
530 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
531 mutex_init(&dev->io_mutex);
532 spin_lock_init(&dev->err_lock);
533 init_usb_anchor(&dev->submitted);
534 init_completion(&dev->bulk_in_completion);
535
536 dev->udev = usb_get_dev(interface_to_usbdev(interface));
537 dev->interface = interface;
538
539 /* set up the endpoint information */
540 /* use only the first bulk-in and bulk-out endpoints */
541 iface_desc = interface->cur_altsetting;
542 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
543 endpoint = &iface_desc->endpoint[i].desc;
544
545 if (!dev->bulk_in_endpointAddr &&
546 usb_endpoint_is_bulk_in(endpoint)) {
547 /* we found a bulk in endpoint */
548 buffer_size = usb_endpoint_maxp(endpoint);
549 dev->bulk_in_size = buffer_size;
550 dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
551 dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
552 if (!dev->bulk_in_buffer) {
553 dev_err(&interface->dev,
554 "Could not allocate bulk_in_buffer\n");
555 goto error;
556 }
557 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
558 if (!dev->bulk_in_urb) {
559 dev_err(&interface->dev,
560 "Could not allocate bulk_in_urb\n");
561 goto error;
562 }
563 }
564
565 if (!dev->bulk_out_endpointAddr &&
566 usb_endpoint_is_bulk_out(endpoint)) {
567 /* we found a bulk out endpoint */
568 dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
569 }
570 }
571 if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
572 dev_err(&interface->dev,
573 "Could not find both bulk-in and bulk-out endpoints\n");
574 goto error;
575 }
576
577 /* save our data pointer in this interface device */
578 usb_set_intfdata(interface, dev);
579
580 /* we can register the device now, as it is ready */
581 retval = usb_register_dev(interface, &skel_class);
582 if (retval) {
583 /* something prevented us from registering this driver */
584 dev_err(&interface->dev,
585 "Not able to get a minor for this device.\n");
586 usb_set_intfdata(interface, NULL);
587 goto error;
588 }
589
590 /* let the user know what node this device is now attached to */
591 dev_info(&interface->dev,
592 "USB Skeleton device now attached to USBSkel-%d",
593 interface->minor);
594 return 0;
595
596error:
597 if (dev)
598 /* this frees allocated memory */
599 kref_put(&dev->kref, skel_delete);
600 return retval;
601}
602
603static void skel_disconnect(struct usb_interface *interface)
604{
605 struct usb_skel *dev;
606 int minor = interface->minor;
607
608 dev = usb_get_intfdata(interface);
609 usb_set_intfdata(interface, NULL);
610
611 /* give back our minor */
612 usb_deregister_dev(interface, &skel_class);
613
614 /* prevent more I/O from starting */
615 mutex_lock(&dev->io_mutex);
616 dev->interface = NULL;
617 mutex_unlock(&dev->io_mutex);
618
619 usb_kill_anchored_urbs(&dev->submitted);
620
621 /* decrement our usage count */
622 kref_put(&dev->kref, skel_delete);
623
624 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
625}
626
627static void skel_draw_down(struct usb_skel *dev)
628{
629 int time;
630
631 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
632 if (!time)
633 usb_kill_anchored_urbs(&dev->submitted);
634 usb_kill_urb(dev->bulk_in_urb);
635}
636
637static int skel_suspend(struct usb_interface *intf, pm_message_t message)
638{
639 struct usb_skel *dev = usb_get_intfdata(intf);
640
641 if (!dev)
642 return 0;
643 skel_draw_down(dev);
644 return 0;
645}
646
647static int skel_resume(struct usb_interface *intf)
648{
649 return 0;
650}
651
652static int skel_pre_reset(struct usb_interface *intf)
653{
654 struct usb_skel *dev = usb_get_intfdata(intf);
655
656 mutex_lock(&dev->io_mutex);
657 skel_draw_down(dev);
658
659 return 0;
660}
661
662static int skel_post_reset(struct usb_interface *intf)
663{
664 struct usb_skel *dev = usb_get_intfdata(intf);
665
666 /* we are sure no URBs are active - no locking needed */
667 dev->errors = -EPIPE;
668 mutex_unlock(&dev->io_mutex);
669
670 return 0;
671}
672
673static struct usb_driver skel_driver = {
674 .name = "skeleton",
675 .probe = skel_probe,
676 .disconnect = skel_disconnect,
677 .suspend = skel_suspend,
678 .resume = skel_resume,
679 .pre_reset = skel_pre_reset,
680 .post_reset = skel_post_reset,
681 .id_table = skel_table,
682 .supports_autosuspend = 1,
683};
684
685module_usb_driver(skel_driver);
686
687MODULE_LICENSE("GPL");