Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Released under the GPLv2 only.
  3 * SPDX-License-Identifier: GPL-2.0
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/string.h>
  8#include <linux/bitops.h>
  9#include <linux/slab.h>
 10#include <linux/log2.h>
 11#include <linux/usb.h>
 12#include <linux/wait.h>
 13#include <linux/usb/hcd.h>
 14#include <linux/scatterlist.h>
 15
 16#define to_urb(d) container_of(d, struct urb, kref)
 17
 18
 19static void urb_destroy(struct kref *kref)
 20{
 21	struct urb *urb = to_urb(kref);
 22
 23	if (urb->transfer_flags & URB_FREE_BUFFER)
 24		kfree(urb->transfer_buffer);
 25
 26	kfree(urb);
 27}
 28
 29/**
 30 * usb_init_urb - initializes a urb so that it can be used by a USB driver
 31 * @urb: pointer to the urb to initialize
 32 *
 33 * Initializes a urb so that the USB subsystem can use it properly.
 34 *
 35 * If a urb is created with a call to usb_alloc_urb() it is not
 36 * necessary to call this function.  Only use this if you allocate the
 37 * space for a struct urb on your own.  If you call this function, be
 38 * careful when freeing the memory for your urb that it is no longer in
 39 * use by the USB core.
 40 *
 41 * Only use this function if you _really_ understand what you are doing.
 42 */
 43void usb_init_urb(struct urb *urb)
 44{
 45	if (urb) {
 46		memset(urb, 0, sizeof(*urb));
 47		kref_init(&urb->kref);
 48		INIT_LIST_HEAD(&urb->anchor_list);
 49	}
 50}
 51EXPORT_SYMBOL_GPL(usb_init_urb);
 52
 53/**
 54 * usb_alloc_urb - creates a new urb for a USB driver to use
 55 * @iso_packets: number of iso packets for this urb
 56 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
 57 *	valid options for this.
 58 *
 59 * Creates an urb for the USB driver to use, initializes a few internal
 60 * structures, increments the usage counter, and returns a pointer to it.
 61 *
 62 * If the driver want to use this urb for interrupt, control, or bulk
 63 * endpoints, pass '0' as the number of iso packets.
 64 *
 65 * The driver must call usb_free_urb() when it is finished with the urb.
 66 *
 67 * Return: A pointer to the new urb, or %NULL if no memory is available.
 68 */
 69struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
 70{
 71	struct urb *urb;
 72
 73	urb = kmalloc(sizeof(struct urb) +
 74		iso_packets * sizeof(struct usb_iso_packet_descriptor),
 75		mem_flags);
 76	if (!urb)
 77		return NULL;
 78	usb_init_urb(urb);
 79	return urb;
 80}
 81EXPORT_SYMBOL_GPL(usb_alloc_urb);
 82
 83/**
 84 * usb_free_urb - frees the memory used by a urb when all users of it are finished
 85 * @urb: pointer to the urb to free, may be NULL
 86 *
 87 * Must be called when a user of a urb is finished with it.  When the last user
 88 * of the urb calls this function, the memory of the urb is freed.
 89 *
 90 * Note: The transfer buffer associated with the urb is not freed unless the
 91 * URB_FREE_BUFFER transfer flag is set.
 92 */
 93void usb_free_urb(struct urb *urb)
 94{
 95	if (urb)
 96		kref_put(&urb->kref, urb_destroy);
 97}
 98EXPORT_SYMBOL_GPL(usb_free_urb);
 99
100/**
101 * usb_get_urb - increments the reference count of the urb
102 * @urb: pointer to the urb to modify, may be NULL
103 *
104 * This must be  called whenever a urb is transferred from a device driver to a
105 * host controller driver.  This allows proper reference counting to happen
106 * for urbs.
107 *
108 * Return: A pointer to the urb with the incremented reference counter.
109 */
110struct urb *usb_get_urb(struct urb *urb)
111{
112	if (urb)
113		kref_get(&urb->kref);
114	return urb;
115}
116EXPORT_SYMBOL_GPL(usb_get_urb);
117
118/**
119 * usb_anchor_urb - anchors an URB while it is processed
120 * @urb: pointer to the urb to anchor
121 * @anchor: pointer to the anchor
122 *
123 * This can be called to have access to URBs which are to be executed
124 * without bothering to track them
125 */
126void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
127{
128	unsigned long flags;
129
130	spin_lock_irqsave(&anchor->lock, flags);
131	usb_get_urb(urb);
132	list_add_tail(&urb->anchor_list, &anchor->urb_list);
133	urb->anchor = anchor;
134
135	if (unlikely(anchor->poisoned))
136		atomic_inc(&urb->reject);
137
138	spin_unlock_irqrestore(&anchor->lock, flags);
139}
140EXPORT_SYMBOL_GPL(usb_anchor_urb);
141
142static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
143{
144	return atomic_read(&anchor->suspend_wakeups) == 0 &&
145		list_empty(&anchor->urb_list);
146}
147
148/* Callers must hold anchor->lock */
149static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
150{
151	urb->anchor = NULL;
152	list_del(&urb->anchor_list);
153	usb_put_urb(urb);
154	if (usb_anchor_check_wakeup(anchor))
155		wake_up(&anchor->wait);
156}
157
158/**
159 * usb_unanchor_urb - unanchors an URB
160 * @urb: pointer to the urb to anchor
161 *
162 * Call this to stop the system keeping track of this URB
163 */
164void usb_unanchor_urb(struct urb *urb)
165{
166	unsigned long flags;
167	struct usb_anchor *anchor;
168
169	if (!urb)
170		return;
171
172	anchor = urb->anchor;
173	if (!anchor)
174		return;
175
176	spin_lock_irqsave(&anchor->lock, flags);
177	/*
178	 * At this point, we could be competing with another thread which
179	 * has the same intention. To protect the urb from being unanchored
180	 * twice, only the winner of the race gets the job.
181	 */
182	if (likely(anchor == urb->anchor))
183		__usb_unanchor_urb(urb, anchor);
184	spin_unlock_irqrestore(&anchor->lock, flags);
185}
186EXPORT_SYMBOL_GPL(usb_unanchor_urb);
187
188/*-------------------------------------------------------------------*/
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190/**
191 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
192 * @urb: pointer to the urb describing the request
193 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
194 *	of valid options for this.
195 *
196 * This submits a transfer request, and transfers control of the URB
197 * describing that request to the USB subsystem.  Request completion will
198 * be indicated later, asynchronously, by calling the completion handler.
199 * The three types of completion are success, error, and unlink
200 * (a software-induced fault, also called "request cancellation").
201 *
202 * URBs may be submitted in interrupt context.
203 *
204 * The caller must have correctly initialized the URB before submitting
205 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
206 * available to ensure that most fields are correctly initialized, for
207 * the particular kind of transfer, although they will not initialize
208 * any transfer flags.
209 *
210 * If the submission is successful, the complete() callback from the URB
211 * will be called exactly once, when the USB core and Host Controller Driver
212 * (HCD) are finished with the URB.  When the completion function is called,
213 * control of the URB is returned to the device driver which issued the
214 * request.  The completion handler may then immediately free or reuse that
215 * URB.
216 *
217 * With few exceptions, USB device drivers should never access URB fields
218 * provided by usbcore or the HCD until its complete() is called.
219 * The exceptions relate to periodic transfer scheduling.  For both
220 * interrupt and isochronous urbs, as part of successful URB submission
221 * urb->interval is modified to reflect the actual transfer period used
222 * (normally some power of two units).  And for isochronous urbs,
223 * urb->start_frame is modified to reflect when the URB's transfers were
224 * scheduled to start.
225 *
226 * Not all isochronous transfer scheduling policies will work, but most
227 * host controller drivers should easily handle ISO queues going from now
228 * until 10-200 msec into the future.  Drivers should try to keep at
229 * least one or two msec of data in the queue; many controllers require
230 * that new transfers start at least 1 msec in the future when they are
231 * added.  If the driver is unable to keep up and the queue empties out,
232 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
233 * If the flag is set, or if the queue is idle, then the URB is always
234 * assigned to the first available (and not yet expired) slot in the
235 * endpoint's schedule.  If the flag is not set and the queue is active
236 * then the URB is always assigned to the next slot in the schedule
237 * following the end of the endpoint's previous URB, even if that slot is
238 * in the past.  When a packet is assigned in this way to a slot that has
239 * already expired, the packet is not transmitted and the corresponding
240 * usb_iso_packet_descriptor's status field will return -EXDEV.  If this
241 * would happen to all the packets in the URB, submission fails with a
242 * -EXDEV error code.
243 *
244 * For control endpoints, the synchronous usb_control_msg() call is
245 * often used (in non-interrupt context) instead of this call.
246 * That is often used through convenience wrappers, for the requests
247 * that are standardized in the USB 2.0 specification.  For bulk
248 * endpoints, a synchronous usb_bulk_msg() call is available.
249 *
250 * Return:
251 * 0 on successful submissions. A negative error number otherwise.
252 *
253 * Request Queuing:
254 *
255 * URBs may be submitted to endpoints before previous ones complete, to
256 * minimize the impact of interrupt latencies and system overhead on data
257 * throughput.  With that queuing policy, an endpoint's queue would never
258 * be empty.  This is required for continuous isochronous data streams,
259 * and may also be required for some kinds of interrupt transfers. Such
260 * queuing also maximizes bandwidth utilization by letting USB controllers
261 * start work on later requests before driver software has finished the
262 * completion processing for earlier (successful) requests.
263 *
264 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
265 * than one.  This was previously a HCD-specific behavior, except for ISO
266 * transfers.  Non-isochronous endpoint queues are inactive during cleanup
267 * after faults (transfer errors or cancellation).
268 *
269 * Reserved Bandwidth Transfers:
270 *
271 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
272 * using the interval specified in the urb.  Submitting the first urb to
273 * the endpoint reserves the bandwidth necessary to make those transfers.
274 * If the USB subsystem can't allocate sufficient bandwidth to perform
275 * the periodic request, submitting such a periodic request should fail.
276 *
277 * For devices under xHCI, the bandwidth is reserved at configuration time, or
278 * when the alt setting is selected.  If there is not enough bus bandwidth, the
279 * configuration/alt setting request will fail.  Therefore, submissions to
280 * periodic endpoints on devices under xHCI should never fail due to bandwidth
281 * constraints.
282 *
283 * Device drivers must explicitly request that repetition, by ensuring that
284 * some URB is always on the endpoint's queue (except possibly for short
285 * periods during completion callbacks).  When there is no longer an urb
286 * queued, the endpoint's bandwidth reservation is canceled.  This means
287 * drivers can use their completion handlers to ensure they keep bandwidth
288 * they need, by reinitializing and resubmitting the just-completed urb
289 * until the driver longer needs that periodic bandwidth.
290 *
291 * Memory Flags:
292 *
293 * The general rules for how to decide which mem_flags to use
294 * are the same as for kmalloc.  There are four
295 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
296 * GFP_ATOMIC.
297 *
298 * GFP_NOFS is not ever used, as it has not been implemented yet.
299 *
300 * GFP_ATOMIC is used when
301 *   (a) you are inside a completion handler, an interrupt, bottom half,
302 *       tasklet or timer, or
303 *   (b) you are holding a spinlock or rwlock (does not apply to
304 *       semaphores), or
305 *   (c) current->state != TASK_RUNNING, this is the case only after
306 *       you've changed it.
307 *
308 * GFP_NOIO is used in the block io path and error handling of storage
309 * devices.
310 *
311 * All other situations use GFP_KERNEL.
312 *
313 * Some more specific rules for mem_flags can be inferred, such as
314 *  (1) start_xmit, timeout, and receive methods of network drivers must
315 *      use GFP_ATOMIC (they are called with a spinlock held);
316 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
317 *      called with a spinlock held);
318 *  (3) If you use a kernel thread with a network driver you must use
319 *      GFP_NOIO, unless (b) or (c) apply;
320 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
321 *      apply or your are in a storage driver's block io path;
322 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
323 *  (6) changing firmware on a running storage or net device uses
324 *      GFP_NOIO, unless b) or c) apply
325 *
326 */
327int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
328{
329	static int			pipetypes[4] = {
330		PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
331	};
332	int				xfertype, max;
333	struct usb_device		*dev;
334	struct usb_host_endpoint	*ep;
335	int				is_out;
336	unsigned int			allowed;
337
338	if (!urb || !urb->complete)
339		return -EINVAL;
340	if (urb->hcpriv) {
341		WARN_ONCE(1, "URB %p submitted while active\n", urb);
342		return -EBUSY;
343	}
344
345	dev = urb->dev;
346	if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
347		return -ENODEV;
348
349	/* For now, get the endpoint from the pipe.  Eventually drivers
350	 * will be required to set urb->ep directly and we will eliminate
351	 * urb->pipe.
352	 */
353	ep = usb_pipe_endpoint(dev, urb->pipe);
354	if (!ep)
355		return -ENOENT;
356
357	urb->ep = ep;
358	urb->status = -EINPROGRESS;
359	urb->actual_length = 0;
360
361	/* Lots of sanity checks, so HCDs can rely on clean data
362	 * and don't need to duplicate tests
363	 */
364	xfertype = usb_endpoint_type(&ep->desc);
365	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
366		struct usb_ctrlrequest *setup =
367				(struct usb_ctrlrequest *) urb->setup_packet;
368
369		if (!setup)
370			return -ENOEXEC;
371		is_out = !(setup->bRequestType & USB_DIR_IN) ||
372				!setup->wLength;
373	} else {
374		is_out = usb_endpoint_dir_out(&ep->desc);
375	}
376
377	/* Clear the internal flags and cache the direction for later use */
378	urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
379			URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
380			URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
381			URB_DMA_SG_COMBINED);
382	urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
383
384	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
385			dev->state < USB_STATE_CONFIGURED)
386		return -ENODEV;
387
388	max = usb_endpoint_maxp(&ep->desc);
389	if (max <= 0) {
390		dev_dbg(&dev->dev,
391			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
392			usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
393			__func__, max);
394		return -EMSGSIZE;
395	}
396
397	/* periodic transfers limit size per frame/uframe,
398	 * but drivers only control those sizes for ISO.
399	 * while we're checking, initialize return status.
400	 */
401	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
402		int	n, len;
403
404		/* SuperSpeed isoc endpoints have up to 16 bursts of up to
405		 * 3 packets each
406		 */
407		if (dev->speed >= USB_SPEED_SUPER) {
408			int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
409			int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
410			max *= burst;
411			max *= mult;
412		}
413
 
 
 
 
 
 
 
 
414		/* "high bandwidth" mode, 1-3 packets/uframe? */
415		if (dev->speed == USB_SPEED_HIGH)
416			max *= usb_endpoint_maxp_mult(&ep->desc);
417
418		if (urb->number_of_packets <= 0)
419			return -EINVAL;
420		for (n = 0; n < urb->number_of_packets; n++) {
421			len = urb->iso_frame_desc[n].length;
422			if (len < 0 || len > max)
423				return -EMSGSIZE;
424			urb->iso_frame_desc[n].status = -EXDEV;
425			urb->iso_frame_desc[n].actual_length = 0;
426		}
427	} else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
428			dev->speed != USB_SPEED_WIRELESS) {
429		struct scatterlist *sg;
430		int i;
431
432		for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
433			if (sg->length % max)
434				return -EINVAL;
435	}
436
437	/* the I/O buffer must be mapped/unmapped, except when length=0 */
438	if (urb->transfer_buffer_length > INT_MAX)
439		return -EMSGSIZE;
440
441	/*
442	 * stuff that drivers shouldn't do, but which shouldn't
443	 * cause problems in HCDs if they get it wrong.
444	 */
445
446	/* Check that the pipe's type matches the endpoint's type */
447	if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
448		dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
449			usb_pipetype(urb->pipe), pipetypes[xfertype]);
450
451	/* Check against a simple/standard policy */
452	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
453			URB_FREE_BUFFER);
454	switch (xfertype) {
455	case USB_ENDPOINT_XFER_BULK:
456	case USB_ENDPOINT_XFER_INT:
457		if (is_out)
458			allowed |= URB_ZERO_PACKET;
459		/* FALLTHROUGH */
460	case USB_ENDPOINT_XFER_CONTROL:
461		allowed |= URB_NO_FSBR;	/* only affects UHCI */
462		/* FALLTHROUGH */
463	default:			/* all non-iso endpoints */
464		if (!is_out)
465			allowed |= URB_SHORT_NOT_OK;
466		break;
467	case USB_ENDPOINT_XFER_ISOC:
468		allowed |= URB_ISO_ASAP;
469		break;
470	}
471	allowed &= urb->transfer_flags;
472
473	/* warn if submitter gave bogus flags */
474	if (allowed != urb->transfer_flags)
475		dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
476			urb->transfer_flags, allowed);
477
478	/*
479	 * Force periodic transfer intervals to be legal values that are
480	 * a power of two (so HCDs don't need to).
481	 *
482	 * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
483	 * supports different values... this uses EHCI/UHCI defaults (and
484	 * EHCI can use smaller non-default values).
485	 */
486	switch (xfertype) {
487	case USB_ENDPOINT_XFER_ISOC:
488	case USB_ENDPOINT_XFER_INT:
489		/* too small? */
490		switch (dev->speed) {
491		case USB_SPEED_WIRELESS:
492			if ((urb->interval < 6)
493				&& (xfertype == USB_ENDPOINT_XFER_INT))
494				return -EINVAL;
 
495		default:
496			if (urb->interval <= 0)
497				return -EINVAL;
498			break;
499		}
500		/* too big? */
501		switch (dev->speed) {
502		case USB_SPEED_SUPER_PLUS:
503		case USB_SPEED_SUPER:	/* units are 125us */
504			/* Handle up to 2^(16-1) microframes */
505			if (urb->interval > (1 << 15))
506				return -EINVAL;
507			max = 1 << 15;
508			break;
509		case USB_SPEED_WIRELESS:
510			if (urb->interval > 16)
511				return -EINVAL;
512			break;
513		case USB_SPEED_HIGH:	/* units are microframes */
514			/* NOTE usb handles 2^15 */
515			if (urb->interval > (1024 * 8))
516				urb->interval = 1024 * 8;
517			max = 1024 * 8;
518			break;
519		case USB_SPEED_FULL:	/* units are frames/msec */
520		case USB_SPEED_LOW:
521			if (xfertype == USB_ENDPOINT_XFER_INT) {
522				if (urb->interval > 255)
523					return -EINVAL;
524				/* NOTE ohci only handles up to 32 */
525				max = 128;
526			} else {
527				if (urb->interval > 1024)
528					urb->interval = 1024;
529				/* NOTE usb and ohci handle up to 2^15 */
530				max = 1024;
531			}
532			break;
533		default:
534			return -EINVAL;
535		}
536		if (dev->speed != USB_SPEED_WIRELESS) {
537			/* Round down to a power of 2, no more than max */
538			urb->interval = min(max, 1 << ilog2(urb->interval));
539		}
540	}
541
542	return usb_hcd_submit_urb(urb, mem_flags);
543}
544EXPORT_SYMBOL_GPL(usb_submit_urb);
545
546/*-------------------------------------------------------------------*/
547
548/**
549 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
550 * @urb: pointer to urb describing a previously submitted request,
551 *	may be NULL
552 *
553 * This routine cancels an in-progress request.  URBs complete only once
554 * per submission, and may be canceled only once per submission.
555 * Successful cancellation means termination of @urb will be expedited
556 * and the completion handler will be called with a status code
557 * indicating that the request has been canceled (rather than any other
558 * code).
559 *
560 * Drivers should not call this routine or related routines, such as
561 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
562 * method has returned.  The disconnect function should synchronize with
563 * a driver's I/O routines to insure that all URB-related activity has
564 * completed before it returns.
565 *
566 * This request is asynchronous, however the HCD might call the ->complete()
567 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
568 * must not hold any locks that may be taken by the completion function.
569 * Success is indicated by returning -EINPROGRESS, at which time the URB will
570 * probably not yet have been given back to the device driver. When it is
571 * eventually called, the completion function will see @urb->status ==
572 * -ECONNRESET.
573 * Failure is indicated by usb_unlink_urb() returning any other value.
574 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
575 * never submitted, or it was unlinked before, or the hardware is already
576 * finished with it), even if the completion handler has not yet run.
577 *
578 * The URB must not be deallocated while this routine is running.  In
579 * particular, when a driver calls this routine, it must insure that the
580 * completion handler cannot deallocate the URB.
581 *
582 * Return: -EINPROGRESS on success. See description for other values on
583 * failure.
584 *
585 * Unlinking and Endpoint Queues:
586 *
587 * [The behaviors and guarantees described below do not apply to virtual
588 * root hubs but only to endpoint queues for physical USB devices.]
589 *
590 * Host Controller Drivers (HCDs) place all the URBs for a particular
591 * endpoint in a queue.  Normally the queue advances as the controller
592 * hardware processes each request.  But when an URB terminates with an
593 * error its queue generally stops (see below), at least until that URB's
594 * completion routine returns.  It is guaranteed that a stopped queue
595 * will not restart until all its unlinked URBs have been fully retired,
596 * with their completion routines run, even if that's not until some time
597 * after the original completion handler returns.  The same behavior and
598 * guarantee apply when an URB terminates because it was unlinked.
599 *
600 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
601 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
602 * and -EREMOTEIO.  Control endpoint queues behave the same way except
603 * that they are not guaranteed to stop for -EREMOTEIO errors.  Queues
604 * for isochronous endpoints are treated differently, because they must
605 * advance at fixed rates.  Such queues do not stop when an URB
606 * encounters an error or is unlinked.  An unlinked isochronous URB may
607 * leave a gap in the stream of packets; it is undefined whether such
608 * gaps can be filled in.
609 *
610 * Note that early termination of an URB because a short packet was
611 * received will generate a -EREMOTEIO error if and only if the
612 * URB_SHORT_NOT_OK flag is set.  By setting this flag, USB device
613 * drivers can build deep queues for large or complex bulk transfers
614 * and clean them up reliably after any sort of aborted transfer by
615 * unlinking all pending URBs at the first fault.
616 *
617 * When a control URB terminates with an error other than -EREMOTEIO, it
618 * is quite likely that the status stage of the transfer will not take
619 * place.
620 */
621int usb_unlink_urb(struct urb *urb)
622{
623	if (!urb)
624		return -EINVAL;
625	if (!urb->dev)
626		return -ENODEV;
627	if (!urb->ep)
628		return -EIDRM;
629	return usb_hcd_unlink_urb(urb, -ECONNRESET);
630}
631EXPORT_SYMBOL_GPL(usb_unlink_urb);
632
633/**
634 * usb_kill_urb - cancel a transfer request and wait for it to finish
635 * @urb: pointer to URB describing a previously submitted request,
636 *	may be NULL
637 *
638 * This routine cancels an in-progress request.  It is guaranteed that
639 * upon return all completion handlers will have finished and the URB
640 * will be totally idle and available for reuse.  These features make
641 * this an ideal way to stop I/O in a disconnect() callback or close()
642 * function.  If the request has not already finished or been unlinked
643 * the completion handler will see urb->status == -ENOENT.
644 *
645 * While the routine is running, attempts to resubmit the URB will fail
646 * with error -EPERM.  Thus even if the URB's completion handler always
647 * tries to resubmit, it will not succeed and the URB will become idle.
648 *
649 * The URB must not be deallocated while this routine is running.  In
650 * particular, when a driver calls this routine, it must insure that the
651 * completion handler cannot deallocate the URB.
652 *
653 * This routine may not be used in an interrupt context (such as a bottom
654 * half or a completion handler), or when holding a spinlock, or in other
655 * situations where the caller can't schedule().
656 *
657 * This routine should not be called by a driver after its disconnect
658 * method has returned.
659 */
660void usb_kill_urb(struct urb *urb)
661{
662	might_sleep();
663	if (!(urb && urb->dev && urb->ep))
664		return;
665	atomic_inc(&urb->reject);
666
667	usb_hcd_unlink_urb(urb, -ENOENT);
668	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
669
670	atomic_dec(&urb->reject);
671}
672EXPORT_SYMBOL_GPL(usb_kill_urb);
673
674/**
675 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
676 * @urb: pointer to URB describing a previously submitted request,
677 *	may be NULL
678 *
679 * This routine cancels an in-progress request.  It is guaranteed that
680 * upon return all completion handlers will have finished and the URB
681 * will be totally idle and cannot be reused.  These features make
682 * this an ideal way to stop I/O in a disconnect() callback.
683 * If the request has not already finished or been unlinked
684 * the completion handler will see urb->status == -ENOENT.
685 *
686 * After and while the routine runs, attempts to resubmit the URB will fail
687 * with error -EPERM.  Thus even if the URB's completion handler always
688 * tries to resubmit, it will not succeed and the URB will become idle.
689 *
690 * The URB must not be deallocated while this routine is running.  In
691 * particular, when a driver calls this routine, it must insure that the
692 * completion handler cannot deallocate the URB.
693 *
694 * This routine may not be used in an interrupt context (such as a bottom
695 * half or a completion handler), or when holding a spinlock, or in other
696 * situations where the caller can't schedule().
697 *
698 * This routine should not be called by a driver after its disconnect
699 * method has returned.
700 */
701void usb_poison_urb(struct urb *urb)
702{
703	might_sleep();
704	if (!urb)
705		return;
706	atomic_inc(&urb->reject);
707
708	if (!urb->dev || !urb->ep)
709		return;
710
711	usb_hcd_unlink_urb(urb, -ENOENT);
712	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
713}
714EXPORT_SYMBOL_GPL(usb_poison_urb);
715
716void usb_unpoison_urb(struct urb *urb)
717{
718	if (!urb)
719		return;
720
721	atomic_dec(&urb->reject);
722}
723EXPORT_SYMBOL_GPL(usb_unpoison_urb);
724
725/**
726 * usb_block_urb - reliably prevent further use of an URB
727 * @urb: pointer to URB to be blocked, may be NULL
728 *
729 * After the routine has run, attempts to resubmit the URB will fail
730 * with error -EPERM.  Thus even if the URB's completion handler always
731 * tries to resubmit, it will not succeed and the URB will become idle.
732 *
733 * The URB must not be deallocated while this routine is running.  In
734 * particular, when a driver calls this routine, it must insure that the
735 * completion handler cannot deallocate the URB.
736 */
737void usb_block_urb(struct urb *urb)
738{
739	if (!urb)
740		return;
741
742	atomic_inc(&urb->reject);
743}
744EXPORT_SYMBOL_GPL(usb_block_urb);
745
746/**
747 * usb_kill_anchored_urbs - cancel transfer requests en masse
748 * @anchor: anchor the requests are bound to
749 *
750 * this allows all outstanding URBs to be killed starting
751 * from the back of the queue
752 *
753 * This routine should not be called by a driver after its disconnect
754 * method has returned.
755 */
756void usb_kill_anchored_urbs(struct usb_anchor *anchor)
757{
758	struct urb *victim;
759
760	spin_lock_irq(&anchor->lock);
761	while (!list_empty(&anchor->urb_list)) {
762		victim = list_entry(anchor->urb_list.prev, struct urb,
763				    anchor_list);
764		/* we must make sure the URB isn't freed before we kill it*/
765		usb_get_urb(victim);
766		spin_unlock_irq(&anchor->lock);
767		/* this will unanchor the URB */
768		usb_kill_urb(victim);
769		usb_put_urb(victim);
770		spin_lock_irq(&anchor->lock);
771	}
772	spin_unlock_irq(&anchor->lock);
773}
774EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
775
776
777/**
778 * usb_poison_anchored_urbs - cease all traffic from an anchor
779 * @anchor: anchor the requests are bound to
780 *
781 * this allows all outstanding URBs to be poisoned starting
782 * from the back of the queue. Newly added URBs will also be
783 * poisoned
784 *
785 * This routine should not be called by a driver after its disconnect
786 * method has returned.
787 */
788void usb_poison_anchored_urbs(struct usb_anchor *anchor)
789{
790	struct urb *victim;
791
792	spin_lock_irq(&anchor->lock);
793	anchor->poisoned = 1;
794	while (!list_empty(&anchor->urb_list)) {
795		victim = list_entry(anchor->urb_list.prev, struct urb,
796				    anchor_list);
797		/* we must make sure the URB isn't freed before we kill it*/
798		usb_get_urb(victim);
799		spin_unlock_irq(&anchor->lock);
800		/* this will unanchor the URB */
801		usb_poison_urb(victim);
802		usb_put_urb(victim);
803		spin_lock_irq(&anchor->lock);
804	}
805	spin_unlock_irq(&anchor->lock);
806}
807EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
808
809/**
810 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
811 * @anchor: anchor the requests are bound to
812 *
813 * Reverses the effect of usb_poison_anchored_urbs
814 * the anchor can be used normally after it returns
815 */
816void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
817{
818	unsigned long flags;
819	struct urb *lazarus;
820
821	spin_lock_irqsave(&anchor->lock, flags);
822	list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
823		usb_unpoison_urb(lazarus);
824	}
825	anchor->poisoned = 0;
826	spin_unlock_irqrestore(&anchor->lock, flags);
827}
828EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
829/**
830 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
831 * @anchor: anchor the requests are bound to
832 *
833 * this allows all outstanding URBs to be unlinked starting
834 * from the back of the queue. This function is asynchronous.
835 * The unlinking is just triggered. It may happen after this
836 * function has returned.
837 *
838 * This routine should not be called by a driver after its disconnect
839 * method has returned.
840 */
841void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
842{
843	struct urb *victim;
844
845	while ((victim = usb_get_from_anchor(anchor)) != NULL) {
846		usb_unlink_urb(victim);
847		usb_put_urb(victim);
848	}
849}
850EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
851
852/**
853 * usb_anchor_suspend_wakeups
854 * @anchor: the anchor you want to suspend wakeups on
855 *
856 * Call this to stop the last urb being unanchored from waking up any
857 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
858 * back path to delay waking up until after the completion handler has run.
859 */
860void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
861{
862	if (anchor)
863		atomic_inc(&anchor->suspend_wakeups);
864}
865EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
866
867/**
868 * usb_anchor_resume_wakeups
869 * @anchor: the anchor you want to resume wakeups on
870 *
871 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
872 * wake up any current waiters if the anchor is empty.
873 */
874void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
875{
876	if (!anchor)
877		return;
878
879	atomic_dec(&anchor->suspend_wakeups);
880	if (usb_anchor_check_wakeup(anchor))
881		wake_up(&anchor->wait);
882}
883EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
884
885/**
886 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
887 * @anchor: the anchor you want to become unused
888 * @timeout: how long you are willing to wait in milliseconds
889 *
890 * Call this is you want to be sure all an anchor's
891 * URBs have finished
892 *
893 * Return: Non-zero if the anchor became unused. Zero on timeout.
894 */
895int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
896				  unsigned int timeout)
897{
898	return wait_event_timeout(anchor->wait,
899				  usb_anchor_check_wakeup(anchor),
900				  msecs_to_jiffies(timeout));
901}
902EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
903
904/**
905 * usb_get_from_anchor - get an anchor's oldest urb
906 * @anchor: the anchor whose urb you want
907 *
908 * This will take the oldest urb from an anchor,
909 * unanchor and return it
910 *
911 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
912 * urbs associated with it.
913 */
914struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
915{
916	struct urb *victim;
917	unsigned long flags;
918
919	spin_lock_irqsave(&anchor->lock, flags);
920	if (!list_empty(&anchor->urb_list)) {
921		victim = list_entry(anchor->urb_list.next, struct urb,
922				    anchor_list);
923		usb_get_urb(victim);
924		__usb_unanchor_urb(victim, anchor);
925	} else {
926		victim = NULL;
927	}
928	spin_unlock_irqrestore(&anchor->lock, flags);
929
930	return victim;
931}
932
933EXPORT_SYMBOL_GPL(usb_get_from_anchor);
934
935/**
936 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
937 * @anchor: the anchor whose urbs you want to unanchor
938 *
939 * use this to get rid of all an anchor's urbs
940 */
941void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
942{
943	struct urb *victim;
944	unsigned long flags;
945
946	spin_lock_irqsave(&anchor->lock, flags);
947	while (!list_empty(&anchor->urb_list)) {
948		victim = list_entry(anchor->urb_list.prev, struct urb,
949				    anchor_list);
950		__usb_unanchor_urb(victim, anchor);
951	}
952	spin_unlock_irqrestore(&anchor->lock, flags);
953}
954
955EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
956
957/**
958 * usb_anchor_empty - is an anchor empty
959 * @anchor: the anchor you want to query
960 *
961 * Return: 1 if the anchor has no urbs associated with it.
962 */
963int usb_anchor_empty(struct usb_anchor *anchor)
964{
965	return list_empty(&anchor->urb_list);
966}
967
968EXPORT_SYMBOL_GPL(usb_anchor_empty);
969
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Released under the GPLv2 only.
 
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/string.h>
  8#include <linux/bitops.h>
  9#include <linux/slab.h>
 10#include <linux/log2.h>
 11#include <linux/usb.h>
 12#include <linux/wait.h>
 13#include <linux/usb/hcd.h>
 14#include <linux/scatterlist.h>
 15
 16#define to_urb(d) container_of(d, struct urb, kref)
 17
 18
 19static void urb_destroy(struct kref *kref)
 20{
 21	struct urb *urb = to_urb(kref);
 22
 23	if (urb->transfer_flags & URB_FREE_BUFFER)
 24		kfree(urb->transfer_buffer);
 25
 26	kfree(urb);
 27}
 28
 29/**
 30 * usb_init_urb - initializes a urb so that it can be used by a USB driver
 31 * @urb: pointer to the urb to initialize
 32 *
 33 * Initializes a urb so that the USB subsystem can use it properly.
 34 *
 35 * If a urb is created with a call to usb_alloc_urb() it is not
 36 * necessary to call this function.  Only use this if you allocate the
 37 * space for a struct urb on your own.  If you call this function, be
 38 * careful when freeing the memory for your urb that it is no longer in
 39 * use by the USB core.
 40 *
 41 * Only use this function if you _really_ understand what you are doing.
 42 */
 43void usb_init_urb(struct urb *urb)
 44{
 45	if (urb) {
 46		memset(urb, 0, sizeof(*urb));
 47		kref_init(&urb->kref);
 48		INIT_LIST_HEAD(&urb->anchor_list);
 49	}
 50}
 51EXPORT_SYMBOL_GPL(usb_init_urb);
 52
 53/**
 54 * usb_alloc_urb - creates a new urb for a USB driver to use
 55 * @iso_packets: number of iso packets for this urb
 56 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
 57 *	valid options for this.
 58 *
 59 * Creates an urb for the USB driver to use, initializes a few internal
 60 * structures, increments the usage counter, and returns a pointer to it.
 61 *
 62 * If the driver want to use this urb for interrupt, control, or bulk
 63 * endpoints, pass '0' as the number of iso packets.
 64 *
 65 * The driver must call usb_free_urb() when it is finished with the urb.
 66 *
 67 * Return: A pointer to the new urb, or %NULL if no memory is available.
 68 */
 69struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
 70{
 71	struct urb *urb;
 72
 73	urb = kmalloc(sizeof(struct urb) +
 74		iso_packets * sizeof(struct usb_iso_packet_descriptor),
 75		mem_flags);
 76	if (!urb)
 77		return NULL;
 78	usb_init_urb(urb);
 79	return urb;
 80}
 81EXPORT_SYMBOL_GPL(usb_alloc_urb);
 82
 83/**
 84 * usb_free_urb - frees the memory used by a urb when all users of it are finished
 85 * @urb: pointer to the urb to free, may be NULL
 86 *
 87 * Must be called when a user of a urb is finished with it.  When the last user
 88 * of the urb calls this function, the memory of the urb is freed.
 89 *
 90 * Note: The transfer buffer associated with the urb is not freed unless the
 91 * URB_FREE_BUFFER transfer flag is set.
 92 */
 93void usb_free_urb(struct urb *urb)
 94{
 95	if (urb)
 96		kref_put(&urb->kref, urb_destroy);
 97}
 98EXPORT_SYMBOL_GPL(usb_free_urb);
 99
100/**
101 * usb_get_urb - increments the reference count of the urb
102 * @urb: pointer to the urb to modify, may be NULL
103 *
104 * This must be  called whenever a urb is transferred from a device driver to a
105 * host controller driver.  This allows proper reference counting to happen
106 * for urbs.
107 *
108 * Return: A pointer to the urb with the incremented reference counter.
109 */
110struct urb *usb_get_urb(struct urb *urb)
111{
112	if (urb)
113		kref_get(&urb->kref);
114	return urb;
115}
116EXPORT_SYMBOL_GPL(usb_get_urb);
117
118/**
119 * usb_anchor_urb - anchors an URB while it is processed
120 * @urb: pointer to the urb to anchor
121 * @anchor: pointer to the anchor
122 *
123 * This can be called to have access to URBs which are to be executed
124 * without bothering to track them
125 */
126void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
127{
128	unsigned long flags;
129
130	spin_lock_irqsave(&anchor->lock, flags);
131	usb_get_urb(urb);
132	list_add_tail(&urb->anchor_list, &anchor->urb_list);
133	urb->anchor = anchor;
134
135	if (unlikely(anchor->poisoned))
136		atomic_inc(&urb->reject);
137
138	spin_unlock_irqrestore(&anchor->lock, flags);
139}
140EXPORT_SYMBOL_GPL(usb_anchor_urb);
141
142static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
143{
144	return atomic_read(&anchor->suspend_wakeups) == 0 &&
145		list_empty(&anchor->urb_list);
146}
147
148/* Callers must hold anchor->lock */
149static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
150{
151	urb->anchor = NULL;
152	list_del(&urb->anchor_list);
153	usb_put_urb(urb);
154	if (usb_anchor_check_wakeup(anchor))
155		wake_up(&anchor->wait);
156}
157
158/**
159 * usb_unanchor_urb - unanchors an URB
160 * @urb: pointer to the urb to anchor
161 *
162 * Call this to stop the system keeping track of this URB
163 */
164void usb_unanchor_urb(struct urb *urb)
165{
166	unsigned long flags;
167	struct usb_anchor *anchor;
168
169	if (!urb)
170		return;
171
172	anchor = urb->anchor;
173	if (!anchor)
174		return;
175
176	spin_lock_irqsave(&anchor->lock, flags);
177	/*
178	 * At this point, we could be competing with another thread which
179	 * has the same intention. To protect the urb from being unanchored
180	 * twice, only the winner of the race gets the job.
181	 */
182	if (likely(anchor == urb->anchor))
183		__usb_unanchor_urb(urb, anchor);
184	spin_unlock_irqrestore(&anchor->lock, flags);
185}
186EXPORT_SYMBOL_GPL(usb_unanchor_urb);
187
188/*-------------------------------------------------------------------*/
189
190static const int pipetypes[4] = {
191	PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
192};
193
194/**
195 * usb_urb_ep_type_check - sanity check of endpoint in the given urb
196 * @urb: urb to be checked
197 *
198 * This performs a light-weight sanity check for the endpoint in the
199 * given urb.  It returns 0 if the urb contains a valid endpoint, otherwise
200 * a negative error code.
201 */
202int usb_urb_ep_type_check(const struct urb *urb)
203{
204	const struct usb_host_endpoint *ep;
205
206	ep = usb_pipe_endpoint(urb->dev, urb->pipe);
207	if (!ep)
208		return -EINVAL;
209	if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
210		return -EINVAL;
211	return 0;
212}
213EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
214
215/**
216 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
217 * @urb: pointer to the urb describing the request
218 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
219 *	of valid options for this.
220 *
221 * This submits a transfer request, and transfers control of the URB
222 * describing that request to the USB subsystem.  Request completion will
223 * be indicated later, asynchronously, by calling the completion handler.
224 * The three types of completion are success, error, and unlink
225 * (a software-induced fault, also called "request cancellation").
226 *
227 * URBs may be submitted in interrupt context.
228 *
229 * The caller must have correctly initialized the URB before submitting
230 * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
231 * available to ensure that most fields are correctly initialized, for
232 * the particular kind of transfer, although they will not initialize
233 * any transfer flags.
234 *
235 * If the submission is successful, the complete() callback from the URB
236 * will be called exactly once, when the USB core and Host Controller Driver
237 * (HCD) are finished with the URB.  When the completion function is called,
238 * control of the URB is returned to the device driver which issued the
239 * request.  The completion handler may then immediately free or reuse that
240 * URB.
241 *
242 * With few exceptions, USB device drivers should never access URB fields
243 * provided by usbcore or the HCD until its complete() is called.
244 * The exceptions relate to periodic transfer scheduling.  For both
245 * interrupt and isochronous urbs, as part of successful URB submission
246 * urb->interval is modified to reflect the actual transfer period used
247 * (normally some power of two units).  And for isochronous urbs,
248 * urb->start_frame is modified to reflect when the URB's transfers were
249 * scheduled to start.
250 *
251 * Not all isochronous transfer scheduling policies will work, but most
252 * host controller drivers should easily handle ISO queues going from now
253 * until 10-200 msec into the future.  Drivers should try to keep at
254 * least one or two msec of data in the queue; many controllers require
255 * that new transfers start at least 1 msec in the future when they are
256 * added.  If the driver is unable to keep up and the queue empties out,
257 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
258 * If the flag is set, or if the queue is idle, then the URB is always
259 * assigned to the first available (and not yet expired) slot in the
260 * endpoint's schedule.  If the flag is not set and the queue is active
261 * then the URB is always assigned to the next slot in the schedule
262 * following the end of the endpoint's previous URB, even if that slot is
263 * in the past.  When a packet is assigned in this way to a slot that has
264 * already expired, the packet is not transmitted and the corresponding
265 * usb_iso_packet_descriptor's status field will return -EXDEV.  If this
266 * would happen to all the packets in the URB, submission fails with a
267 * -EXDEV error code.
268 *
269 * For control endpoints, the synchronous usb_control_msg() call is
270 * often used (in non-interrupt context) instead of this call.
271 * That is often used through convenience wrappers, for the requests
272 * that are standardized in the USB 2.0 specification.  For bulk
273 * endpoints, a synchronous usb_bulk_msg() call is available.
274 *
275 * Return:
276 * 0 on successful submissions. A negative error number otherwise.
277 *
278 * Request Queuing:
279 *
280 * URBs may be submitted to endpoints before previous ones complete, to
281 * minimize the impact of interrupt latencies and system overhead on data
282 * throughput.  With that queuing policy, an endpoint's queue would never
283 * be empty.  This is required for continuous isochronous data streams,
284 * and may also be required for some kinds of interrupt transfers. Such
285 * queuing also maximizes bandwidth utilization by letting USB controllers
286 * start work on later requests before driver software has finished the
287 * completion processing for earlier (successful) requests.
288 *
289 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
290 * than one.  This was previously a HCD-specific behavior, except for ISO
291 * transfers.  Non-isochronous endpoint queues are inactive during cleanup
292 * after faults (transfer errors or cancellation).
293 *
294 * Reserved Bandwidth Transfers:
295 *
296 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
297 * using the interval specified in the urb.  Submitting the first urb to
298 * the endpoint reserves the bandwidth necessary to make those transfers.
299 * If the USB subsystem can't allocate sufficient bandwidth to perform
300 * the periodic request, submitting such a periodic request should fail.
301 *
302 * For devices under xHCI, the bandwidth is reserved at configuration time, or
303 * when the alt setting is selected.  If there is not enough bus bandwidth, the
304 * configuration/alt setting request will fail.  Therefore, submissions to
305 * periodic endpoints on devices under xHCI should never fail due to bandwidth
306 * constraints.
307 *
308 * Device drivers must explicitly request that repetition, by ensuring that
309 * some URB is always on the endpoint's queue (except possibly for short
310 * periods during completion callbacks).  When there is no longer an urb
311 * queued, the endpoint's bandwidth reservation is canceled.  This means
312 * drivers can use their completion handlers to ensure they keep bandwidth
313 * they need, by reinitializing and resubmitting the just-completed urb
314 * until the driver longer needs that periodic bandwidth.
315 *
316 * Memory Flags:
317 *
318 * The general rules for how to decide which mem_flags to use
319 * are the same as for kmalloc.  There are four
320 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
321 * GFP_ATOMIC.
322 *
323 * GFP_NOFS is not ever used, as it has not been implemented yet.
324 *
325 * GFP_ATOMIC is used when
326 *   (a) you are inside a completion handler, an interrupt, bottom half,
327 *       tasklet or timer, or
328 *   (b) you are holding a spinlock or rwlock (does not apply to
329 *       semaphores), or
330 *   (c) current->state != TASK_RUNNING, this is the case only after
331 *       you've changed it.
332 *
333 * GFP_NOIO is used in the block io path and error handling of storage
334 * devices.
335 *
336 * All other situations use GFP_KERNEL.
337 *
338 * Some more specific rules for mem_flags can be inferred, such as
339 *  (1) start_xmit, timeout, and receive methods of network drivers must
340 *      use GFP_ATOMIC (they are called with a spinlock held);
341 *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
342 *      called with a spinlock held);
343 *  (3) If you use a kernel thread with a network driver you must use
344 *      GFP_NOIO, unless (b) or (c) apply;
345 *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
346 *      apply or your are in a storage driver's block io path;
347 *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
348 *  (6) changing firmware on a running storage or net device uses
349 *      GFP_NOIO, unless b) or c) apply
350 *
351 */
352int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
353{
 
 
 
354	int				xfertype, max;
355	struct usb_device		*dev;
356	struct usb_host_endpoint	*ep;
357	int				is_out;
358	unsigned int			allowed;
359
360	if (!urb || !urb->complete)
361		return -EINVAL;
362	if (urb->hcpriv) {
363		WARN_ONCE(1, "URB %pK submitted while active\n", urb);
364		return -EBUSY;
365	}
366
367	dev = urb->dev;
368	if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
369		return -ENODEV;
370
371	/* For now, get the endpoint from the pipe.  Eventually drivers
372	 * will be required to set urb->ep directly and we will eliminate
373	 * urb->pipe.
374	 */
375	ep = usb_pipe_endpoint(dev, urb->pipe);
376	if (!ep)
377		return -ENOENT;
378
379	urb->ep = ep;
380	urb->status = -EINPROGRESS;
381	urb->actual_length = 0;
382
383	/* Lots of sanity checks, so HCDs can rely on clean data
384	 * and don't need to duplicate tests
385	 */
386	xfertype = usb_endpoint_type(&ep->desc);
387	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
388		struct usb_ctrlrequest *setup =
389				(struct usb_ctrlrequest *) urb->setup_packet;
390
391		if (!setup)
392			return -ENOEXEC;
393		is_out = !(setup->bRequestType & USB_DIR_IN) ||
394				!setup->wLength;
395	} else {
396		is_out = usb_endpoint_dir_out(&ep->desc);
397	}
398
399	/* Clear the internal flags and cache the direction for later use */
400	urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
401			URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
402			URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
403			URB_DMA_SG_COMBINED);
404	urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
405
406	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
407			dev->state < USB_STATE_CONFIGURED)
408		return -ENODEV;
409
410	max = usb_endpoint_maxp(&ep->desc);
411	if (max <= 0) {
412		dev_dbg(&dev->dev,
413			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
414			usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
415			__func__, max);
416		return -EMSGSIZE;
417	}
418
419	/* periodic transfers limit size per frame/uframe,
420	 * but drivers only control those sizes for ISO.
421	 * while we're checking, initialize return status.
422	 */
423	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
424		int	n, len;
425
426		/* SuperSpeed isoc endpoints have up to 16 bursts of up to
427		 * 3 packets each
428		 */
429		if (dev->speed >= USB_SPEED_SUPER) {
430			int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
431			int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
432			max *= burst;
433			max *= mult;
434		}
435
436		if (dev->speed == USB_SPEED_SUPER_PLUS &&
437		    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) {
438			struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp;
439
440			isoc_ep_comp = &ep->ssp_isoc_ep_comp;
441			max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval);
442		}
443
444		/* "high bandwidth" mode, 1-3 packets/uframe? */
445		if (dev->speed == USB_SPEED_HIGH)
446			max *= usb_endpoint_maxp_mult(&ep->desc);
447
448		if (urb->number_of_packets <= 0)
449			return -EINVAL;
450		for (n = 0; n < urb->number_of_packets; n++) {
451			len = urb->iso_frame_desc[n].length;
452			if (len < 0 || len > max)
453				return -EMSGSIZE;
454			urb->iso_frame_desc[n].status = -EXDEV;
455			urb->iso_frame_desc[n].actual_length = 0;
456		}
457	} else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
458			dev->speed != USB_SPEED_WIRELESS) {
459		struct scatterlist *sg;
460		int i;
461
462		for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
463			if (sg->length % max)
464				return -EINVAL;
465	}
466
467	/* the I/O buffer must be mapped/unmapped, except when length=0 */
468	if (urb->transfer_buffer_length > INT_MAX)
469		return -EMSGSIZE;
470
471	/*
472	 * stuff that drivers shouldn't do, but which shouldn't
473	 * cause problems in HCDs if they get it wrong.
474	 */
475
476	/* Check that the pipe's type matches the endpoint's type */
477	if (usb_urb_ep_type_check(urb))
478		dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
479			usb_pipetype(urb->pipe), pipetypes[xfertype]);
480
481	/* Check against a simple/standard policy */
482	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
483			URB_FREE_BUFFER);
484	switch (xfertype) {
485	case USB_ENDPOINT_XFER_BULK:
486	case USB_ENDPOINT_XFER_INT:
487		if (is_out)
488			allowed |= URB_ZERO_PACKET;
489		/* FALLTHROUGH */
 
 
 
490	default:			/* all non-iso endpoints */
491		if (!is_out)
492			allowed |= URB_SHORT_NOT_OK;
493		break;
494	case USB_ENDPOINT_XFER_ISOC:
495		allowed |= URB_ISO_ASAP;
496		break;
497	}
498	allowed &= urb->transfer_flags;
499
500	/* warn if submitter gave bogus flags */
501	if (allowed != urb->transfer_flags)
502		dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
503			urb->transfer_flags, allowed);
504
505	/*
506	 * Force periodic transfer intervals to be legal values that are
507	 * a power of two (so HCDs don't need to).
508	 *
509	 * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
510	 * supports different values... this uses EHCI/UHCI defaults (and
511	 * EHCI can use smaller non-default values).
512	 */
513	switch (xfertype) {
514	case USB_ENDPOINT_XFER_ISOC:
515	case USB_ENDPOINT_XFER_INT:
516		/* too small? */
517		switch (dev->speed) {
518		case USB_SPEED_WIRELESS:
519			if ((urb->interval < 6)
520				&& (xfertype == USB_ENDPOINT_XFER_INT))
521				return -EINVAL;
522			/* fall through */
523		default:
524			if (urb->interval <= 0)
525				return -EINVAL;
526			break;
527		}
528		/* too big? */
529		switch (dev->speed) {
530		case USB_SPEED_SUPER_PLUS:
531		case USB_SPEED_SUPER:	/* units are 125us */
532			/* Handle up to 2^(16-1) microframes */
533			if (urb->interval > (1 << 15))
534				return -EINVAL;
535			max = 1 << 15;
536			break;
537		case USB_SPEED_WIRELESS:
538			if (urb->interval > 16)
539				return -EINVAL;
540			break;
541		case USB_SPEED_HIGH:	/* units are microframes */
542			/* NOTE usb handles 2^15 */
543			if (urb->interval > (1024 * 8))
544				urb->interval = 1024 * 8;
545			max = 1024 * 8;
546			break;
547		case USB_SPEED_FULL:	/* units are frames/msec */
548		case USB_SPEED_LOW:
549			if (xfertype == USB_ENDPOINT_XFER_INT) {
550				if (urb->interval > 255)
551					return -EINVAL;
552				/* NOTE ohci only handles up to 32 */
553				max = 128;
554			} else {
555				if (urb->interval > 1024)
556					urb->interval = 1024;
557				/* NOTE usb and ohci handle up to 2^15 */
558				max = 1024;
559			}
560			break;
561		default:
562			return -EINVAL;
563		}
564		if (dev->speed != USB_SPEED_WIRELESS) {
565			/* Round down to a power of 2, no more than max */
566			urb->interval = min(max, 1 << ilog2(urb->interval));
567		}
568	}
569
570	return usb_hcd_submit_urb(urb, mem_flags);
571}
572EXPORT_SYMBOL_GPL(usb_submit_urb);
573
574/*-------------------------------------------------------------------*/
575
576/**
577 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
578 * @urb: pointer to urb describing a previously submitted request,
579 *	may be NULL
580 *
581 * This routine cancels an in-progress request.  URBs complete only once
582 * per submission, and may be canceled only once per submission.
583 * Successful cancellation means termination of @urb will be expedited
584 * and the completion handler will be called with a status code
585 * indicating that the request has been canceled (rather than any other
586 * code).
587 *
588 * Drivers should not call this routine or related routines, such as
589 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
590 * method has returned.  The disconnect function should synchronize with
591 * a driver's I/O routines to insure that all URB-related activity has
592 * completed before it returns.
593 *
594 * This request is asynchronous, however the HCD might call the ->complete()
595 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
596 * must not hold any locks that may be taken by the completion function.
597 * Success is indicated by returning -EINPROGRESS, at which time the URB will
598 * probably not yet have been given back to the device driver. When it is
599 * eventually called, the completion function will see @urb->status ==
600 * -ECONNRESET.
601 * Failure is indicated by usb_unlink_urb() returning any other value.
602 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
603 * never submitted, or it was unlinked before, or the hardware is already
604 * finished with it), even if the completion handler has not yet run.
605 *
606 * The URB must not be deallocated while this routine is running.  In
607 * particular, when a driver calls this routine, it must insure that the
608 * completion handler cannot deallocate the URB.
609 *
610 * Return: -EINPROGRESS on success. See description for other values on
611 * failure.
612 *
613 * Unlinking and Endpoint Queues:
614 *
615 * [The behaviors and guarantees described below do not apply to virtual
616 * root hubs but only to endpoint queues for physical USB devices.]
617 *
618 * Host Controller Drivers (HCDs) place all the URBs for a particular
619 * endpoint in a queue.  Normally the queue advances as the controller
620 * hardware processes each request.  But when an URB terminates with an
621 * error its queue generally stops (see below), at least until that URB's
622 * completion routine returns.  It is guaranteed that a stopped queue
623 * will not restart until all its unlinked URBs have been fully retired,
624 * with their completion routines run, even if that's not until some time
625 * after the original completion handler returns.  The same behavior and
626 * guarantee apply when an URB terminates because it was unlinked.
627 *
628 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
629 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
630 * and -EREMOTEIO.  Control endpoint queues behave the same way except
631 * that they are not guaranteed to stop for -EREMOTEIO errors.  Queues
632 * for isochronous endpoints are treated differently, because they must
633 * advance at fixed rates.  Such queues do not stop when an URB
634 * encounters an error or is unlinked.  An unlinked isochronous URB may
635 * leave a gap in the stream of packets; it is undefined whether such
636 * gaps can be filled in.
637 *
638 * Note that early termination of an URB because a short packet was
639 * received will generate a -EREMOTEIO error if and only if the
640 * URB_SHORT_NOT_OK flag is set.  By setting this flag, USB device
641 * drivers can build deep queues for large or complex bulk transfers
642 * and clean them up reliably after any sort of aborted transfer by
643 * unlinking all pending URBs at the first fault.
644 *
645 * When a control URB terminates with an error other than -EREMOTEIO, it
646 * is quite likely that the status stage of the transfer will not take
647 * place.
648 */
649int usb_unlink_urb(struct urb *urb)
650{
651	if (!urb)
652		return -EINVAL;
653	if (!urb->dev)
654		return -ENODEV;
655	if (!urb->ep)
656		return -EIDRM;
657	return usb_hcd_unlink_urb(urb, -ECONNRESET);
658}
659EXPORT_SYMBOL_GPL(usb_unlink_urb);
660
661/**
662 * usb_kill_urb - cancel a transfer request and wait for it to finish
663 * @urb: pointer to URB describing a previously submitted request,
664 *	may be NULL
665 *
666 * This routine cancels an in-progress request.  It is guaranteed that
667 * upon return all completion handlers will have finished and the URB
668 * will be totally idle and available for reuse.  These features make
669 * this an ideal way to stop I/O in a disconnect() callback or close()
670 * function.  If the request has not already finished or been unlinked
671 * the completion handler will see urb->status == -ENOENT.
672 *
673 * While the routine is running, attempts to resubmit the URB will fail
674 * with error -EPERM.  Thus even if the URB's completion handler always
675 * tries to resubmit, it will not succeed and the URB will become idle.
676 *
677 * The URB must not be deallocated while this routine is running.  In
678 * particular, when a driver calls this routine, it must insure that the
679 * completion handler cannot deallocate the URB.
680 *
681 * This routine may not be used in an interrupt context (such as a bottom
682 * half or a completion handler), or when holding a spinlock, or in other
683 * situations where the caller can't schedule().
684 *
685 * This routine should not be called by a driver after its disconnect
686 * method has returned.
687 */
688void usb_kill_urb(struct urb *urb)
689{
690	might_sleep();
691	if (!(urb && urb->dev && urb->ep))
692		return;
693	atomic_inc(&urb->reject);
694
695	usb_hcd_unlink_urb(urb, -ENOENT);
696	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
697
698	atomic_dec(&urb->reject);
699}
700EXPORT_SYMBOL_GPL(usb_kill_urb);
701
702/**
703 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
704 * @urb: pointer to URB describing a previously submitted request,
705 *	may be NULL
706 *
707 * This routine cancels an in-progress request.  It is guaranteed that
708 * upon return all completion handlers will have finished and the URB
709 * will be totally idle and cannot be reused.  These features make
710 * this an ideal way to stop I/O in a disconnect() callback.
711 * If the request has not already finished or been unlinked
712 * the completion handler will see urb->status == -ENOENT.
713 *
714 * After and while the routine runs, attempts to resubmit the URB will fail
715 * with error -EPERM.  Thus even if the URB's completion handler always
716 * tries to resubmit, it will not succeed and the URB will become idle.
717 *
718 * The URB must not be deallocated while this routine is running.  In
719 * particular, when a driver calls this routine, it must insure that the
720 * completion handler cannot deallocate the URB.
721 *
722 * This routine may not be used in an interrupt context (such as a bottom
723 * half or a completion handler), or when holding a spinlock, or in other
724 * situations where the caller can't schedule().
725 *
726 * This routine should not be called by a driver after its disconnect
727 * method has returned.
728 */
729void usb_poison_urb(struct urb *urb)
730{
731	might_sleep();
732	if (!urb)
733		return;
734	atomic_inc(&urb->reject);
735
736	if (!urb->dev || !urb->ep)
737		return;
738
739	usb_hcd_unlink_urb(urb, -ENOENT);
740	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
741}
742EXPORT_SYMBOL_GPL(usb_poison_urb);
743
744void usb_unpoison_urb(struct urb *urb)
745{
746	if (!urb)
747		return;
748
749	atomic_dec(&urb->reject);
750}
751EXPORT_SYMBOL_GPL(usb_unpoison_urb);
752
753/**
754 * usb_block_urb - reliably prevent further use of an URB
755 * @urb: pointer to URB to be blocked, may be NULL
756 *
757 * After the routine has run, attempts to resubmit the URB will fail
758 * with error -EPERM.  Thus even if the URB's completion handler always
759 * tries to resubmit, it will not succeed and the URB will become idle.
760 *
761 * The URB must not be deallocated while this routine is running.  In
762 * particular, when a driver calls this routine, it must insure that the
763 * completion handler cannot deallocate the URB.
764 */
765void usb_block_urb(struct urb *urb)
766{
767	if (!urb)
768		return;
769
770	atomic_inc(&urb->reject);
771}
772EXPORT_SYMBOL_GPL(usb_block_urb);
773
774/**
775 * usb_kill_anchored_urbs - cancel transfer requests en masse
776 * @anchor: anchor the requests are bound to
777 *
778 * this allows all outstanding URBs to be killed starting
779 * from the back of the queue
780 *
781 * This routine should not be called by a driver after its disconnect
782 * method has returned.
783 */
784void usb_kill_anchored_urbs(struct usb_anchor *anchor)
785{
786	struct urb *victim;
787
788	spin_lock_irq(&anchor->lock);
789	while (!list_empty(&anchor->urb_list)) {
790		victim = list_entry(anchor->urb_list.prev, struct urb,
791				    anchor_list);
792		/* we must make sure the URB isn't freed before we kill it*/
793		usb_get_urb(victim);
794		spin_unlock_irq(&anchor->lock);
795		/* this will unanchor the URB */
796		usb_kill_urb(victim);
797		usb_put_urb(victim);
798		spin_lock_irq(&anchor->lock);
799	}
800	spin_unlock_irq(&anchor->lock);
801}
802EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
803
804
805/**
806 * usb_poison_anchored_urbs - cease all traffic from an anchor
807 * @anchor: anchor the requests are bound to
808 *
809 * this allows all outstanding URBs to be poisoned starting
810 * from the back of the queue. Newly added URBs will also be
811 * poisoned
812 *
813 * This routine should not be called by a driver after its disconnect
814 * method has returned.
815 */
816void usb_poison_anchored_urbs(struct usb_anchor *anchor)
817{
818	struct urb *victim;
819
820	spin_lock_irq(&anchor->lock);
821	anchor->poisoned = 1;
822	while (!list_empty(&anchor->urb_list)) {
823		victim = list_entry(anchor->urb_list.prev, struct urb,
824				    anchor_list);
825		/* we must make sure the URB isn't freed before we kill it*/
826		usb_get_urb(victim);
827		spin_unlock_irq(&anchor->lock);
828		/* this will unanchor the URB */
829		usb_poison_urb(victim);
830		usb_put_urb(victim);
831		spin_lock_irq(&anchor->lock);
832	}
833	spin_unlock_irq(&anchor->lock);
834}
835EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
836
837/**
838 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
839 * @anchor: anchor the requests are bound to
840 *
841 * Reverses the effect of usb_poison_anchored_urbs
842 * the anchor can be used normally after it returns
843 */
844void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
845{
846	unsigned long flags;
847	struct urb *lazarus;
848
849	spin_lock_irqsave(&anchor->lock, flags);
850	list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
851		usb_unpoison_urb(lazarus);
852	}
853	anchor->poisoned = 0;
854	spin_unlock_irqrestore(&anchor->lock, flags);
855}
856EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
857/**
858 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
859 * @anchor: anchor the requests are bound to
860 *
861 * this allows all outstanding URBs to be unlinked starting
862 * from the back of the queue. This function is asynchronous.
863 * The unlinking is just triggered. It may happen after this
864 * function has returned.
865 *
866 * This routine should not be called by a driver after its disconnect
867 * method has returned.
868 */
869void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
870{
871	struct urb *victim;
872
873	while ((victim = usb_get_from_anchor(anchor)) != NULL) {
874		usb_unlink_urb(victim);
875		usb_put_urb(victim);
876	}
877}
878EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
879
880/**
881 * usb_anchor_suspend_wakeups
882 * @anchor: the anchor you want to suspend wakeups on
883 *
884 * Call this to stop the last urb being unanchored from waking up any
885 * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
886 * back path to delay waking up until after the completion handler has run.
887 */
888void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
889{
890	if (anchor)
891		atomic_inc(&anchor->suspend_wakeups);
892}
893EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
894
895/**
896 * usb_anchor_resume_wakeups
897 * @anchor: the anchor you want to resume wakeups on
898 *
899 * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
900 * wake up any current waiters if the anchor is empty.
901 */
902void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
903{
904	if (!anchor)
905		return;
906
907	atomic_dec(&anchor->suspend_wakeups);
908	if (usb_anchor_check_wakeup(anchor))
909		wake_up(&anchor->wait);
910}
911EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
912
913/**
914 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
915 * @anchor: the anchor you want to become unused
916 * @timeout: how long you are willing to wait in milliseconds
917 *
918 * Call this is you want to be sure all an anchor's
919 * URBs have finished
920 *
921 * Return: Non-zero if the anchor became unused. Zero on timeout.
922 */
923int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
924				  unsigned int timeout)
925{
926	return wait_event_timeout(anchor->wait,
927				  usb_anchor_check_wakeup(anchor),
928				  msecs_to_jiffies(timeout));
929}
930EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
931
932/**
933 * usb_get_from_anchor - get an anchor's oldest urb
934 * @anchor: the anchor whose urb you want
935 *
936 * This will take the oldest urb from an anchor,
937 * unanchor and return it
938 *
939 * Return: The oldest urb from @anchor, or %NULL if @anchor has no
940 * urbs associated with it.
941 */
942struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
943{
944	struct urb *victim;
945	unsigned long flags;
946
947	spin_lock_irqsave(&anchor->lock, flags);
948	if (!list_empty(&anchor->urb_list)) {
949		victim = list_entry(anchor->urb_list.next, struct urb,
950				    anchor_list);
951		usb_get_urb(victim);
952		__usb_unanchor_urb(victim, anchor);
953	} else {
954		victim = NULL;
955	}
956	spin_unlock_irqrestore(&anchor->lock, flags);
957
958	return victim;
959}
960
961EXPORT_SYMBOL_GPL(usb_get_from_anchor);
962
963/**
964 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
965 * @anchor: the anchor whose urbs you want to unanchor
966 *
967 * use this to get rid of all an anchor's urbs
968 */
969void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
970{
971	struct urb *victim;
972	unsigned long flags;
973
974	spin_lock_irqsave(&anchor->lock, flags);
975	while (!list_empty(&anchor->urb_list)) {
976		victim = list_entry(anchor->urb_list.prev, struct urb,
977				    anchor_list);
978		__usb_unanchor_urb(victim, anchor);
979	}
980	spin_unlock_irqrestore(&anchor->lock, flags);
981}
982
983EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
984
985/**
986 * usb_anchor_empty - is an anchor empty
987 * @anchor: the anchor you want to query
988 *
989 * Return: 1 if the anchor has no urbs associated with it.
990 */
991int usb_anchor_empty(struct usb_anchor *anchor)
992{
993	return list_empty(&anchor->urb_list);
994}
995
996EXPORT_SYMBOL_GPL(usb_anchor_empty);
997