Linux Audio

Check our new training course

Loading...
v3.1
 
   1#include <linux/kernel.h>
   2#include <linux/errno.h>
   3#include <linux/init.h>
   4#include <linux/slab.h>
   5#include <linux/mm.h>
   6#include <linux/module.h>
   7#include <linux/moduleparam.h>
   8#include <linux/scatterlist.h>
   9#include <linux/mutex.h>
  10
  11#include <linux/usb.h>
  12
 
 
 
 
 
 
 
 
  13
  14/*-------------------------------------------------------------------------*/
  15
  16/* FIXME make these public somewhere; usbdevfs.h? */
  17struct usbtest_param {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  18	/* inputs */
  19	unsigned		test_num;	/* 0..(TEST_CASES-1) */
  20	unsigned		iterations;
  21	unsigned		length;
  22	unsigned		vary;
  23	unsigned		sglen;
  24
  25	/* outputs */
  26	struct timeval		duration;
 
  27};
  28#define USBTEST_REQUEST	_IOWR('U', 100, struct usbtest_param)
 
 
 
 
  29
  30/*-------------------------------------------------------------------------*/
  31
  32#define	GENERIC		/* let probe() bind using module params */
  33
  34/* Some devices that can be used for testing will have "real" drivers.
  35 * Entries for those need to be enabled here by hand, after disabling
  36 * that "real" driver.
  37 */
  38//#define	IBOT2		/* grab iBOT2 webcams */
  39//#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
  40
  41/*-------------------------------------------------------------------------*/
  42
  43struct usbtest_info {
  44	const char		*name;
  45	u8			ep_in;		/* bulk/intr source */
  46	u8			ep_out;		/* bulk/intr sink */
  47	unsigned		autoconf:1;
  48	unsigned		ctrl_out:1;
  49	unsigned		iso:1;		/* try iso in/out */
 
  50	int			alt;
  51};
  52
  53/* this is accessed only through usbfs ioctl calls.
  54 * one ioctl to issue a test ... one lock per device.
  55 * tests create other threads if they need them.
  56 * urbs and buffers are allocated dynamically,
  57 * and data generated deterministically.
  58 */
  59struct usbtest_dev {
  60	struct usb_interface	*intf;
  61	struct usbtest_info	*info;
  62	int			in_pipe;
  63	int			out_pipe;
  64	int			in_iso_pipe;
  65	int			out_iso_pipe;
 
 
  66	struct usb_endpoint_descriptor	*iso_in, *iso_out;
 
  67	struct mutex		lock;
  68
  69#define TBUF_SIZE	256
  70	u8			*buf;
  71};
  72
  73static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
  74{
  75	return interface_to_usbdev(test->intf);
  76}
  77
  78/* set up all urbs so they can be used with either bulk or interrupt */
  79#define	INTERRUPT_RATE		1	/* msec/transfer */
  80
  81#define ERROR(tdev, fmt, args...) \
  82	dev_err(&(tdev)->intf->dev , fmt , ## args)
  83#define WARNING(tdev, fmt, args...) \
  84	dev_warn(&(tdev)->intf->dev , fmt , ## args)
  85
  86#define GUARD_BYTE	0xA5
 
  87
  88/*-------------------------------------------------------------------------*/
  89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90static int
  91get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
  92{
  93	int				tmp;
  94	struct usb_host_interface	*alt;
  95	struct usb_host_endpoint	*in, *out;
  96	struct usb_host_endpoint	*iso_in, *iso_out;
 
  97	struct usb_device		*udev;
  98
  99	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
 100		unsigned	ep;
 101
 102		in = out = NULL;
 103		iso_in = iso_out = NULL;
 
 104		alt = intf->altsetting + tmp;
 105
 
 
 
 
 106		/* take the first altsetting with in-bulk + out-bulk;
 107		 * ignore other endpoints and altsettings.
 108		 */
 109		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
 110			struct usb_host_endpoint	*e;
 
 111
 112			e = alt->endpoint + ep;
 113			switch (e->desc.bmAttributes) {
 
 
 114			case USB_ENDPOINT_XFER_BULK:
 115				break;
 
 
 
 
 
 116			case USB_ENDPOINT_XFER_ISOC:
 117				if (dev->info->iso)
 118					goto try_iso;
 119				/* FALLTHROUGH */
 120			default:
 121				continue;
 122			}
 123			if (usb_endpoint_dir_in(&e->desc)) {
 124				if (!in)
 125					in = e;
 126			} else {
 127				if (!out)
 128					out = e;
 129			}
 130			continue;
 131try_iso:
 132			if (usb_endpoint_dir_in(&e->desc)) {
 133				if (!iso_in)
 134					iso_in = e;
 135			} else {
 136				if (!iso_out)
 137					iso_out = e;
 138			}
 139		}
 140		if ((in && out)  ||  iso_in || iso_out)
 141			goto found;
 142	}
 143	return -EINVAL;
 144
 145found:
 146	udev = testdev_to_usbdev(dev);
 
 147	if (alt->desc.bAlternateSetting != 0) {
 148		tmp = usb_set_interface(udev,
 149				alt->desc.bInterfaceNumber,
 150				alt->desc.bAlternateSetting);
 151		if (tmp < 0)
 152			return tmp;
 153	}
 154
 155	if (in) {
 156		dev->in_pipe = usb_rcvbulkpipe(udev,
 157			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 
 158		dev->out_pipe = usb_sndbulkpipe(udev,
 159			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 160	}
 161	if (iso_in) {
 162		dev->iso_in = &iso_in->desc;
 163		dev->in_iso_pipe = usb_rcvisocpipe(udev,
 164				iso_in->desc.bEndpointAddress
 165					& USB_ENDPOINT_NUMBER_MASK);
 166	}
 167
 168	if (iso_out) {
 169		dev->iso_out = &iso_out->desc;
 170		dev->out_iso_pipe = usb_sndisocpipe(udev,
 171				iso_out->desc.bEndpointAddress
 172					& USB_ENDPOINT_NUMBER_MASK);
 173	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 174	return 0;
 175}
 176
 177/*-------------------------------------------------------------------------*/
 178
 179/* Support for testing basic non-queued I/O streams.
 180 *
 181 * These just package urbs as requests that can be easily canceled.
 182 * Each urb's data buffer is dynamically allocated; callers can fill
 183 * them with non-zero test data (or test for it) when appropriate.
 184 */
 185
 186static void simple_callback(struct urb *urb)
 187{
 188	complete(urb->context);
 189}
 190
 191static struct urb *usbtest_alloc_urb(
 192	struct usb_device	*udev,
 193	int			pipe,
 194	unsigned long		bytes,
 195	unsigned		transfer_flags,
 196	unsigned		offset)
 
 
 197{
 198	struct urb		*urb;
 199
 200	urb = usb_alloc_urb(0, GFP_KERNEL);
 201	if (!urb)
 202		return urb;
 203	usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
 
 
 
 
 
 
 
 204	urb->interval = (udev->speed == USB_SPEED_HIGH)
 205			? (INTERRUPT_RATE << 3)
 206			: INTERRUPT_RATE;
 207	urb->transfer_flags = transfer_flags;
 208	if (usb_pipein(pipe))
 209		urb->transfer_flags |= URB_SHORT_NOT_OK;
 210
 
 
 
 211	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 212		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
 213			GFP_KERNEL, &urb->transfer_dma);
 214	else
 215		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
 216
 217	if (!urb->transfer_buffer) {
 218		usb_free_urb(urb);
 219		return NULL;
 220	}
 221
 222	/* To test unaligned transfers add an offset and fill the
 223		unused memory with a guard value */
 224	if (offset) {
 225		memset(urb->transfer_buffer, GUARD_BYTE, offset);
 226		urb->transfer_buffer += offset;
 227		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 228			urb->transfer_dma += offset;
 229	}
 230
 231	/* For inbound transfers use guard byte so that test fails if
 232		data not correctly copied */
 233	memset(urb->transfer_buffer,
 234			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
 235			bytes);
 236	return urb;
 237}
 238
 239static struct urb *simple_alloc_urb(
 240	struct usb_device	*udev,
 241	int			pipe,
 242	unsigned long		bytes)
 
 
 
 
 
 
 
 
 
 
 
 243{
 244	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
 
 245}
 246
 247static unsigned pattern;
 248static unsigned mod_pattern;
 249module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
 250MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
 251
 252static inline void simple_fill_buf(struct urb *urb)
 
 
 
 
 
 
 
 
 253{
 254	unsigned	i;
 255	u8		*buf = urb->transfer_buffer;
 256	unsigned	len = urb->transfer_buffer_length;
 
 257
 258	switch (pattern) {
 259	default:
 260		/* FALLTHROUGH */
 261	case 0:
 262		memset(buf, 0, len);
 263		break;
 264	case 1:			/* mod63 */
 
 265		for (i = 0; i < len; i++)
 266			*buf++ = (u8) (i % 63);
 267		break;
 268	}
 269}
 270
 271static inline unsigned long buffer_offset(void *buf)
 272{
 273	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
 274}
 275
 276static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
 277{
 278	u8 *buf = urb->transfer_buffer;
 279	u8 *guard = buf - buffer_offset(buf);
 280	unsigned i;
 281
 282	for (i = 0; guard < buf; i++, guard++) {
 283		if (*guard != GUARD_BYTE) {
 284			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
 285				i, *guard, GUARD_BYTE);
 286			return -EINVAL;
 287		}
 288	}
 289	return 0;
 290}
 291
 292static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
 293{
 294	unsigned	i;
 295	u8		expected;
 296	u8		*buf = urb->transfer_buffer;
 297	unsigned	len = urb->actual_length;
 
 298
 299	int ret = check_guard_bytes(tdev, urb);
 300	if (ret)
 301		return ret;
 302
 303	for (i = 0; i < len; i++, buf++) {
 304		switch (pattern) {
 305		/* all-zeroes has no synchronization issues */
 306		case 0:
 307			expected = 0;
 308			break;
 309		/* mod63 stays in sync with short-terminated transfers,
 310		 * or otherwise when host and gadget agree on how large
 311		 * each usb transfer request should be.  resync is done
 312		 * with set_interface or set_config.
 313		 */
 314		case 1:			/* mod63 */
 315			expected = i % 63;
 316			break;
 317		/* always fail unsupported patterns */
 318		default:
 319			expected = !*buf;
 320			break;
 321		}
 322		if (*buf == expected)
 323			continue;
 324		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
 325		return -EINVAL;
 326	}
 327	return 0;
 328}
 329
 330static void simple_free_urb(struct urb *urb)
 331{
 332	unsigned long offset = buffer_offset(urb->transfer_buffer);
 333
 334	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 335		usb_free_coherent(
 336			urb->dev,
 337			urb->transfer_buffer_length + offset,
 338			urb->transfer_buffer - offset,
 339			urb->transfer_dma - offset);
 340	else
 341		kfree(urb->transfer_buffer - offset);
 342	usb_free_urb(urb);
 343}
 344
 345static int simple_io(
 346	struct usbtest_dev	*tdev,
 347	struct urb		*urb,
 348	int			iterations,
 349	int			vary,
 350	int			expected,
 351	const char		*label
 352)
 353{
 354	struct usb_device	*udev = urb->dev;
 355	int			max = urb->transfer_buffer_length;
 356	struct completion	completion;
 357	int			retval = 0;
 
 358
 359	urb->context = &completion;
 360	while (retval == 0 && iterations-- > 0) {
 361		init_completion(&completion);
 362		if (usb_pipeout(urb->pipe))
 363			simple_fill_buf(urb);
 
 
 364		retval = usb_submit_urb(urb, GFP_KERNEL);
 365		if (retval != 0)
 366			break;
 367
 368		/* NOTE:  no timeouts; can't be broken out of by interrupt */
 369		wait_for_completion(&completion);
 370		retval = urb->status;
 
 
 
 
 
 
 371		urb->dev = udev;
 372		if (retval == 0 && usb_pipein(urb->pipe))
 373			retval = simple_check_buf(tdev, urb);
 374
 375		if (vary) {
 376			int	len = urb->transfer_buffer_length;
 377
 378			len += vary;
 379			len %= max;
 380			if (len == 0)
 381				len = (vary < max) ? vary : max;
 382			urb->transfer_buffer_length = len;
 383		}
 384
 385		/* FIXME if endpoint halted, clear halt (and log) */
 386	}
 387	urb->transfer_buffer_length = max;
 388
 389	if (expected != retval)
 390		dev_err(&udev->dev,
 391			"%s failed, iterations left %d, status %d (not %d)\n",
 392				label, iterations, retval, expected);
 393	return retval;
 394}
 395
 396
 397/*-------------------------------------------------------------------------*/
 398
 399/* We use scatterlist primitives to test queued I/O.
 400 * Yes, this also tests the scatterlist primitives.
 401 */
 402
 403static void free_sglist(struct scatterlist *sg, int nents)
 404{
 405	unsigned		i;
 406
 407	if (!sg)
 408		return;
 409	for (i = 0; i < nents; i++) {
 410		if (!sg_page(&sg[i]))
 411			continue;
 412		kfree(sg_virt(&sg[i]));
 413	}
 414	kfree(sg);
 415}
 416
 417static struct scatterlist *
 418alloc_sglist(int nents, int max, int vary)
 419{
 420	struct scatterlist	*sg;
 
 421	unsigned		i;
 422	unsigned		size = max;
 
 
 423
 424	sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
 
 
 
 425	if (!sg)
 426		return NULL;
 427	sg_init_table(sg, nents);
 428
 429	for (i = 0; i < nents; i++) {
 430		char		*buf;
 431		unsigned	j;
 432
 433		buf = kzalloc(size, GFP_KERNEL);
 434		if (!buf) {
 435			free_sglist(sg, i);
 436			return NULL;
 437		}
 438
 439		/* kmalloc pages are always physically contiguous! */
 440		sg_set_buf(&sg[i], buf, size);
 441
 442		switch (pattern) {
 443		case 0:
 444			/* already zeroed */
 445			break;
 446		case 1:
 447			for (j = 0; j < size; j++)
 448				*buf++ = (u8) (j % 63);
 
 449			break;
 450		}
 451
 452		if (vary) {
 453			size += vary;
 454			size %= max;
 455			if (size == 0)
 456				size = (vary < max) ? vary : max;
 457		}
 458	}
 459
 460	return sg;
 461}
 462
 
 
 
 
 
 
 
 
 
 
 
 
 463static int perform_sglist(
 464	struct usbtest_dev	*tdev,
 465	unsigned		iterations,
 466	int			pipe,
 467	struct usb_sg_request	*req,
 468	struct scatterlist	*sg,
 469	int			nents
 470)
 471{
 472	struct usb_device	*udev = testdev_to_usbdev(tdev);
 473	int			retval = 0;
 
 
 
 
 
 474
 475	while (retval == 0 && iterations-- > 0) {
 476		retval = usb_sg_init(req, udev, pipe,
 477				(udev->speed == USB_SPEED_HIGH)
 478					? (INTERRUPT_RATE << 3)
 479					: INTERRUPT_RATE,
 480				sg, nents, 0, GFP_KERNEL);
 481
 482		if (retval)
 483			break;
 
 
 484		usb_sg_wait(req);
 485		retval = req->status;
 
 
 
 
 486
 487		/* FIXME check resulting data pattern */
 488
 489		/* FIXME if endpoint halted, clear halt (and log) */
 490	}
 491
 492	/* FIXME for unlink or fault handling tests, don't report
 493	 * failure if retval is as we expected ...
 494	 */
 495	if (retval)
 496		ERROR(tdev, "perform_sglist failed, "
 497				"iterations left %d, status %d\n",
 498				iterations, retval);
 499	return retval;
 500}
 501
 502
 503/*-------------------------------------------------------------------------*/
 504
 505/* unqueued control message testing
 506 *
 507 * there's a nice set of device functional requirements in chapter 9 of the
 508 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
 509 * special test firmware.
 510 *
 511 * we know the device is configured (or suspended) by the time it's visible
 512 * through usbfs.  we can't change that, so we won't test enumeration (which
 513 * worked 'well enough' to get here, this time), power management (ditto),
 514 * or remote wakeup (which needs human interaction).
 515 */
 516
 517static unsigned realworld = 1;
 518module_param(realworld, uint, 0);
 519MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
 520
 521static int get_altsetting(struct usbtest_dev *dev)
 522{
 523	struct usb_interface	*iface = dev->intf;
 524	struct usb_device	*udev = interface_to_usbdev(iface);
 525	int			retval;
 526
 527	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
 528			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
 529			0, iface->altsetting[0].desc.bInterfaceNumber,
 530			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
 531	switch (retval) {
 532	case 1:
 533		return dev->buf[0];
 534	case 0:
 535		retval = -ERANGE;
 536		/* FALLTHROUGH */
 537	default:
 538		return retval;
 539	}
 540}
 541
 542static int set_altsetting(struct usbtest_dev *dev, int alternate)
 543{
 544	struct usb_interface		*iface = dev->intf;
 545	struct usb_device		*udev;
 546
 547	if (alternate < 0 || alternate >= 256)
 548		return -EINVAL;
 549
 550	udev = interface_to_usbdev(iface);
 551	return usb_set_interface(udev,
 552			iface->altsetting[0].desc.bInterfaceNumber,
 553			alternate);
 554}
 555
 556static int is_good_config(struct usbtest_dev *tdev, int len)
 557{
 558	struct usb_config_descriptor	*config;
 559
 560	if (len < sizeof *config)
 561		return 0;
 562	config = (struct usb_config_descriptor *) tdev->buf;
 563
 564	switch (config->bDescriptorType) {
 565	case USB_DT_CONFIG:
 566	case USB_DT_OTHER_SPEED_CONFIG:
 567		if (config->bLength != 9) {
 568			ERROR(tdev, "bogus config descriptor length\n");
 569			return 0;
 570		}
 571		/* this bit 'must be 1' but often isn't */
 572		if (!realworld && !(config->bmAttributes & 0x80)) {
 573			ERROR(tdev, "high bit of config attributes not set\n");
 574			return 0;
 575		}
 576		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
 577			ERROR(tdev, "reserved config bits set\n");
 578			return 0;
 579		}
 580		break;
 581	default:
 582		return 0;
 583	}
 584
 585	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
 586		return 1;
 587	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
 588		return 1;
 589	ERROR(tdev, "bogus config descriptor read size\n");
 590	return 0;
 591}
 592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593/* sanity test for standard requests working with usb_control_mesg() and some
 594 * of the utility functions which use it.
 595 *
 596 * this doesn't test how endpoint halts behave or data toggles get set, since
 597 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
 598 * halt or toggle).  toggle testing is impractical without support from hcds.
 599 *
 600 * this avoids failing devices linux would normally work with, by not testing
 601 * config/altsetting operations for devices that only support their defaults.
 602 * such devices rarely support those needless operations.
 603 *
 604 * NOTE that since this is a sanity test, it's not examining boundary cases
 605 * to see if usbcore, hcd, and device all behave right.  such testing would
 606 * involve varied read sizes and other operation sequences.
 607 */
 608static int ch9_postconfig(struct usbtest_dev *dev)
 609{
 610	struct usb_interface	*iface = dev->intf;
 611	struct usb_device	*udev = interface_to_usbdev(iface);
 612	int			i, alt, retval;
 613
 614	/* [9.2.3] if there's more than one altsetting, we need to be able to
 615	 * set and get each one.  mostly trusts the descriptors from usbcore.
 616	 */
 617	for (i = 0; i < iface->num_altsetting; i++) {
 618
 619		/* 9.2.3 constrains the range here */
 620		alt = iface->altsetting[i].desc.bAlternateSetting;
 621		if (alt < 0 || alt >= iface->num_altsetting) {
 622			dev_err(&iface->dev,
 623					"invalid alt [%d].bAltSetting = %d\n",
 624					i, alt);
 625		}
 626
 627		/* [real world] get/set unimplemented if there's only one */
 628		if (realworld && iface->num_altsetting == 1)
 629			continue;
 630
 631		/* [9.4.10] set_interface */
 632		retval = set_altsetting(dev, alt);
 633		if (retval) {
 634			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
 635					alt, retval);
 636			return retval;
 637		}
 638
 639		/* [9.4.4] get_interface always works */
 640		retval = get_altsetting(dev);
 641		if (retval != alt) {
 642			dev_err(&iface->dev, "get alt should be %d, was %d\n",
 643					alt, retval);
 644			return (retval < 0) ? retval : -EDOM;
 645		}
 646
 647	}
 648
 649	/* [real world] get_config unimplemented if there's only one */
 650	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
 651		int	expected = udev->actconfig->desc.bConfigurationValue;
 652
 653		/* [9.4.2] get_configuration always works
 654		 * ... although some cheap devices (like one TI Hub I've got)
 655		 * won't return config descriptors except before set_config.
 656		 */
 657		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
 658				USB_REQ_GET_CONFIGURATION,
 659				USB_DIR_IN | USB_RECIP_DEVICE,
 660				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
 661		if (retval != 1 || dev->buf[0] != expected) {
 662			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
 663				retval, dev->buf[0], expected);
 664			return (retval < 0) ? retval : -EDOM;
 665		}
 666	}
 667
 668	/* there's always [9.4.3] a device descriptor [9.6.1] */
 669	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
 670			dev->buf, sizeof udev->descriptor);
 671	if (retval != sizeof udev->descriptor) {
 672		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
 673		return (retval < 0) ? retval : -EDOM;
 674	}
 675
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
 677	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
 678		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
 679				dev->buf, TBUF_SIZE);
 680		if (!is_good_config(dev, retval)) {
 681			dev_err(&iface->dev,
 682					"config [%d] descriptor --> %d\n",
 683					i, retval);
 684			return (retval < 0) ? retval : -EDOM;
 685		}
 686
 687		/* FIXME cross-checking udev->config[i] to make sure usbcore
 688		 * parsed it right (etc) would be good testing paranoia
 689		 */
 690	}
 691
 692	/* and sometimes [9.2.6.6] speed dependent descriptors */
 693	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
 694		struct usb_qualifier_descriptor *d = NULL;
 695
 696		/* device qualifier [9.6.2] */
 697		retval = usb_get_descriptor(udev,
 698				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
 699				sizeof(struct usb_qualifier_descriptor));
 700		if (retval == -EPIPE) {
 701			if (udev->speed == USB_SPEED_HIGH) {
 702				dev_err(&iface->dev,
 703						"hs dev qualifier --> %d\n",
 704						retval);
 705				return (retval < 0) ? retval : -EDOM;
 706			}
 707			/* usb2.0 but not high-speed capable; fine */
 708		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
 709			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
 710			return (retval < 0) ? retval : -EDOM;
 711		} else
 712			d = (struct usb_qualifier_descriptor *) dev->buf;
 713
 714		/* might not have [9.6.2] any other-speed configs [9.6.4] */
 715		if (d) {
 716			unsigned max = d->bNumConfigurations;
 717			for (i = 0; i < max; i++) {
 718				retval = usb_get_descriptor(udev,
 719					USB_DT_OTHER_SPEED_CONFIG, i,
 720					dev->buf, TBUF_SIZE);
 721				if (!is_good_config(dev, retval)) {
 722					dev_err(&iface->dev,
 723						"other speed config --> %d\n",
 724						retval);
 725					return (retval < 0) ? retval : -EDOM;
 726				}
 727			}
 728		}
 729	}
 730	/* FIXME fetch strings from at least the device descriptor */
 731
 732	/* [9.4.5] get_status always works */
 733	retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
 734	if (retval != 2) {
 735		dev_err(&iface->dev, "get dev status --> %d\n", retval);
 736		return (retval < 0) ? retval : -EDOM;
 737	}
 738
 739	/* FIXME configuration.bmAttributes says if we could try to set/clear
 740	 * the device's remote wakeup feature ... if we can, test that here
 741	 */
 742
 743	retval = usb_get_status(udev, USB_RECIP_INTERFACE,
 744			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
 745	if (retval != 2) {
 746		dev_err(&iface->dev, "get interface status --> %d\n", retval);
 747		return (retval < 0) ? retval : -EDOM;
 748	}
 749	/* FIXME get status for each endpoint in the interface */
 750
 751	return 0;
 752}
 753
 754/*-------------------------------------------------------------------------*/
 755
 756/* use ch9 requests to test whether:
 757 *   (a) queues work for control, keeping N subtests queued and
 758 *       active (auto-resubmit) for M loops through the queue.
 759 *   (b) protocol stalls (control-only) will autorecover.
 760 *       it's not like bulk/intr; no halt clearing.
 761 *   (c) short control reads are reported and handled.
 762 *   (d) queues are always processed in-order
 763 */
 764
 765struct ctrl_ctx {
 766	spinlock_t		lock;
 767	struct usbtest_dev	*dev;
 768	struct completion	complete;
 769	unsigned		count;
 770	unsigned		pending;
 771	int			status;
 772	struct urb		**urb;
 773	struct usbtest_param	*param;
 774	int			last;
 775};
 776
 777#define NUM_SUBCASES	15		/* how many test subcases here? */
 778
 779struct subcase {
 780	struct usb_ctrlrequest	setup;
 781	int			number;
 782	int			expected;
 783};
 784
 785static void ctrl_complete(struct urb *urb)
 786{
 787	struct ctrl_ctx		*ctx = urb->context;
 788	struct usb_ctrlrequest	*reqp;
 789	struct subcase		*subcase;
 790	int			status = urb->status;
 791
 792	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
 793	subcase = container_of(reqp, struct subcase, setup);
 794
 795	spin_lock(&ctx->lock);
 796	ctx->count--;
 797	ctx->pending--;
 798
 799	/* queue must transfer and complete in fifo order, unless
 800	 * usb_unlink_urb() is used to unlink something not at the
 801	 * physical queue head (not tested).
 802	 */
 803	if (subcase->number > 0) {
 804		if ((subcase->number - ctx->last) != 1) {
 805			ERROR(ctx->dev,
 806				"subcase %d completed out of order, last %d\n",
 807				subcase->number, ctx->last);
 808			status = -EDOM;
 809			ctx->last = subcase->number;
 810			goto error;
 811		}
 812	}
 813	ctx->last = subcase->number;
 814
 815	/* succeed or fault in only one way? */
 816	if (status == subcase->expected)
 817		status = 0;
 818
 819	/* async unlink for cleanup? */
 820	else if (status != -ECONNRESET) {
 821
 822		/* some faults are allowed, not required */
 823		if (subcase->expected > 0 && (
 824			  ((status == -subcase->expected	/* happened */
 825			   || status == 0))))			/* didn't */
 826			status = 0;
 827		/* sometimes more than one fault is allowed */
 828		else if (subcase->number == 12 && status == -EPIPE)
 829			status = 0;
 830		else
 831			ERROR(ctx->dev, "subtest %d error, status %d\n",
 832					subcase->number, status);
 833	}
 834
 835	/* unexpected status codes mean errors; ideally, in hardware */
 836	if (status) {
 837error:
 838		if (ctx->status == 0) {
 839			int		i;
 840
 841			ctx->status = status;
 842			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
 843					"%d left, subcase %d, len %d/%d\n",
 844					reqp->bRequestType, reqp->bRequest,
 845					status, ctx->count, subcase->number,
 846					urb->actual_length,
 847					urb->transfer_buffer_length);
 848
 849			/* FIXME this "unlink everything" exit route should
 850			 * be a separate test case.
 851			 */
 852
 853			/* unlink whatever's still pending */
 854			for (i = 1; i < ctx->param->sglen; i++) {
 855				struct urb *u = ctx->urb[
 856							(i + subcase->number)
 857							% ctx->param->sglen];
 858
 859				if (u == urb || !u->dev)
 860					continue;
 861				spin_unlock(&ctx->lock);
 862				status = usb_unlink_urb(u);
 863				spin_lock(&ctx->lock);
 864				switch (status) {
 865				case -EINPROGRESS:
 866				case -EBUSY:
 867				case -EIDRM:
 868					continue;
 869				default:
 870					ERROR(ctx->dev, "urb unlink --> %d\n",
 871							status);
 872				}
 873			}
 874			status = ctx->status;
 875		}
 876	}
 877
 878	/* resubmit if we need to, else mark this as done */
 879	if ((status == 0) && (ctx->pending < ctx->count)) {
 880		status = usb_submit_urb(urb, GFP_ATOMIC);
 881		if (status != 0) {
 882			ERROR(ctx->dev,
 883				"can't resubmit ctrl %02x.%02x, err %d\n",
 884				reqp->bRequestType, reqp->bRequest, status);
 885			urb->dev = NULL;
 886		} else
 887			ctx->pending++;
 888	} else
 889		urb->dev = NULL;
 890
 891	/* signal completion when nothing's queued */
 892	if (ctx->pending == 0)
 893		complete(&ctx->complete);
 894	spin_unlock(&ctx->lock);
 895}
 896
 897static int
 898test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
 899{
 900	struct usb_device	*udev = testdev_to_usbdev(dev);
 901	struct urb		**urb;
 902	struct ctrl_ctx		context;
 903	int			i;
 904
 
 
 
 905	spin_lock_init(&context.lock);
 906	context.dev = dev;
 907	init_completion(&context.complete);
 908	context.count = param->sglen * param->iterations;
 909	context.pending = 0;
 910	context.status = -ENOMEM;
 911	context.param = param;
 912	context.last = -1;
 913
 914	/* allocate and init the urbs we'll queue.
 915	 * as with bulk/intr sglists, sglen is the queue depth; it also
 916	 * controls which subtests run (more tests than sglen) or rerun.
 917	 */
 918	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
 919	if (!urb)
 920		return -ENOMEM;
 921	for (i = 0; i < param->sglen; i++) {
 922		int			pipe = usb_rcvctrlpipe(udev, 0);
 923		unsigned		len;
 924		struct urb		*u;
 925		struct usb_ctrlrequest	req;
 926		struct subcase		*reqp;
 927
 928		/* sign of this variable means:
 929		 *  -: tested code must return this (negative) error code
 930		 *  +: tested code may return this (negative too) error code
 931		 */
 932		int			expected = 0;
 933
 934		/* requests here are mostly expected to succeed on any
 935		 * device, but some are chosen to trigger protocol stalls
 936		 * or short reads.
 937		 */
 938		memset(&req, 0, sizeof req);
 939		req.bRequest = USB_REQ_GET_DESCRIPTOR;
 940		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
 941
 942		switch (i % NUM_SUBCASES) {
 943		case 0:		/* get device descriptor */
 944			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
 945			len = sizeof(struct usb_device_descriptor);
 946			break;
 947		case 1:		/* get first config descriptor (only) */
 948			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
 949			len = sizeof(struct usb_config_descriptor);
 950			break;
 951		case 2:		/* get altsetting (OFTEN STALLS) */
 952			req.bRequest = USB_REQ_GET_INTERFACE;
 953			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
 954			/* index = 0 means first interface */
 955			len = 1;
 956			expected = EPIPE;
 957			break;
 958		case 3:		/* get interface status */
 959			req.bRequest = USB_REQ_GET_STATUS;
 960			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
 961			/* interface 0 */
 962			len = 2;
 963			break;
 964		case 4:		/* get device status */
 965			req.bRequest = USB_REQ_GET_STATUS;
 966			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
 967			len = 2;
 968			break;
 969		case 5:		/* get device qualifier (MAY STALL) */
 970			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
 971			len = sizeof(struct usb_qualifier_descriptor);
 972			if (udev->speed != USB_SPEED_HIGH)
 973				expected = EPIPE;
 974			break;
 975		case 6:		/* get first config descriptor, plus interface */
 976			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
 977			len = sizeof(struct usb_config_descriptor);
 978			len += sizeof(struct usb_interface_descriptor);
 979			break;
 980		case 7:		/* get interface descriptor (ALWAYS STALLS) */
 981			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
 982			/* interface == 0 */
 983			len = sizeof(struct usb_interface_descriptor);
 984			expected = -EPIPE;
 985			break;
 986		/* NOTE: two consecutive stalls in the queue here.
 987		 *  that tests fault recovery a bit more aggressively. */
 988		case 8:		/* clear endpoint halt (MAY STALL) */
 989			req.bRequest = USB_REQ_CLEAR_FEATURE;
 990			req.bRequestType = USB_RECIP_ENDPOINT;
 991			/* wValue 0 == ep halt */
 992			/* wIndex 0 == ep0 (shouldn't halt!) */
 993			len = 0;
 994			pipe = usb_sndctrlpipe(udev, 0);
 995			expected = EPIPE;
 996			break;
 997		case 9:		/* get endpoint status */
 998			req.bRequest = USB_REQ_GET_STATUS;
 999			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1000			/* endpoint 0 */
1001			len = 2;
1002			break;
1003		case 10:	/* trigger short read (EREMOTEIO) */
1004			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1005			len = 1024;
1006			expected = -EREMOTEIO;
1007			break;
1008		/* NOTE: two consecutive _different_ faults in the queue. */
1009		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1010			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1011			/* endpoint == 0 */
1012			len = sizeof(struct usb_interface_descriptor);
1013			expected = EPIPE;
1014			break;
1015		/* NOTE: sometimes even a third fault in the queue! */
1016		case 12:	/* get string 0 descriptor (MAY STALL) */
1017			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1018			/* string == 0, for language IDs */
1019			len = sizeof(struct usb_interface_descriptor);
1020			/* may succeed when > 4 languages */
1021			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1022			break;
1023		case 13:	/* short read, resembling case 10 */
1024			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1025			/* last data packet "should" be DATA1, not DATA0 */
1026			len = 1024 - udev->descriptor.bMaxPacketSize0;
 
 
 
1027			expected = -EREMOTEIO;
1028			break;
1029		case 14:	/* short read; try to fill the last packet */
1030			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1031			/* device descriptor size == 18 bytes */
1032			len = udev->descriptor.bMaxPacketSize0;
1033			if (udev->speed == USB_SPEED_SUPER)
1034				len = 512;
1035			switch (len) {
1036			case 8:
1037				len = 24;
1038				break;
1039			case 16:
1040				len = 32;
1041				break;
1042			}
1043			expected = -EREMOTEIO;
1044			break;
 
 
 
 
 
 
 
 
 
1045		default:
1046			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1047			context.status = -EINVAL;
1048			goto cleanup;
1049		}
1050		req.wLength = cpu_to_le16(len);
1051		urb[i] = u = simple_alloc_urb(udev, pipe, len);
1052		if (!u)
1053			goto cleanup;
1054
1055		reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
1056		if (!reqp)
1057			goto cleanup;
1058		reqp->setup = req;
1059		reqp->number = i % NUM_SUBCASES;
1060		reqp->expected = expected;
1061		u->setup_packet = (char *) &reqp->setup;
1062
1063		u->context = &context;
1064		u->complete = ctrl_complete;
1065	}
1066
1067	/* queue the urbs */
1068	context.urb = urb;
1069	spin_lock_irq(&context.lock);
1070	for (i = 0; i < param->sglen; i++) {
1071		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1072		if (context.status != 0) {
1073			ERROR(dev, "can't submit urb[%d], status %d\n",
1074					i, context.status);
1075			context.count = context.pending;
1076			break;
1077		}
1078		context.pending++;
1079	}
1080	spin_unlock_irq(&context.lock);
1081
1082	/* FIXME  set timer and time out; provide a disconnect hook */
1083
1084	/* wait for the last one to complete */
1085	if (context.pending > 0)
1086		wait_for_completion(&context.complete);
1087
1088cleanup:
1089	for (i = 0; i < param->sglen; i++) {
1090		if (!urb[i])
1091			continue;
1092		urb[i]->dev = udev;
1093		kfree(urb[i]->setup_packet);
1094		simple_free_urb(urb[i]);
1095	}
1096	kfree(urb);
1097	return context.status;
1098}
1099#undef NUM_SUBCASES
1100
1101
1102/*-------------------------------------------------------------------------*/
1103
1104static void unlink1_callback(struct urb *urb)
1105{
1106	int	status = urb->status;
1107
1108	/* we "know" -EPIPE (stall) never happens */
1109	if (!status)
1110		status = usb_submit_urb(urb, GFP_ATOMIC);
1111	if (status) {
1112		urb->status = status;
1113		complete(urb->context);
1114	}
1115}
1116
1117static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1118{
1119	struct urb		*urb;
1120	struct completion	completion;
1121	int			retval = 0;
1122
1123	init_completion(&completion);
1124	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
1125	if (!urb)
1126		return -ENOMEM;
1127	urb->context = &completion;
1128	urb->complete = unlink1_callback;
1129
 
 
 
 
 
1130	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1131	 * states, and testing should get to all of them over time.
1132	 *
1133	 * FIXME want additional tests for when endpoint is STALLing
1134	 * due to errors, or is just NAKing requests.
1135	 */
1136	retval = usb_submit_urb(urb, GFP_KERNEL);
1137	if (retval != 0) {
1138		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1139		return retval;
1140	}
1141
1142	/* unlinking that should always work.  variable delay tests more
1143	 * hcd states and code paths, even with little other system load.
1144	 */
1145	msleep(jiffies % (2 * INTERRUPT_RATE));
1146	if (async) {
1147		while (!completion_done(&completion)) {
1148			retval = usb_unlink_urb(urb);
1149
 
 
 
1150			switch (retval) {
1151			case -EBUSY:
1152			case -EIDRM:
1153				/* we can't unlink urbs while they're completing
1154				 * or if they've completed, and we haven't
1155				 * resubmitted. "normal" drivers would prevent
1156				 * resubmission, but since we're testing unlink
1157				 * paths, we can't.
1158				 */
1159				ERROR(dev, "unlink retry\n");
1160				continue;
1161			case 0:
1162			case -EINPROGRESS:
1163				break;
1164
1165			default:
1166				dev_err(&dev->intf->dev,
1167					"unlink fail %d\n", retval);
1168				return retval;
1169			}
1170
1171			break;
1172		}
1173	} else
1174		usb_kill_urb(urb);
1175
1176	wait_for_completion(&completion);
1177	retval = urb->status;
1178	simple_free_urb(urb);
1179
1180	if (async)
1181		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1182	else
1183		return (retval == -ENOENT || retval == -EPERM) ?
1184				0 : retval - 2000;
1185}
1186
1187static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1188{
1189	int			retval = 0;
1190
1191	/* test sync and async paths */
1192	retval = unlink1(dev, pipe, len, 1);
1193	if (!retval)
1194		retval = unlink1(dev, pipe, len, 0);
1195	return retval;
1196}
1197
1198/*-------------------------------------------------------------------------*/
1199
1200struct queued_ctx {
1201	struct completion	complete;
1202	atomic_t		pending;
1203	unsigned		num;
1204	int			status;
1205	struct urb		**urbs;
1206};
1207
1208static void unlink_queued_callback(struct urb *urb)
1209{
1210	int			status = urb->status;
1211	struct queued_ctx	*ctx = urb->context;
1212
1213	if (ctx->status)
1214		goto done;
1215	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1216		if (status == -ECONNRESET)
1217			goto done;
1218		/* What error should we report if the URB completed normally? */
1219	}
1220	if (status != 0)
1221		ctx->status = status;
1222
1223 done:
1224	if (atomic_dec_and_test(&ctx->pending))
1225		complete(&ctx->complete);
1226}
1227
1228static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1229		unsigned size)
1230{
1231	struct queued_ctx	ctx;
1232	struct usb_device	*udev = testdev_to_usbdev(dev);
1233	void			*buf;
1234	dma_addr_t		buf_dma;
1235	int			i;
1236	int			retval = -ENOMEM;
1237
1238	init_completion(&ctx.complete);
1239	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1240	ctx.num = num;
1241	ctx.status = 0;
1242
1243	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1244	if (!buf)
1245		return retval;
1246	memset(buf, 0, size);
1247
1248	/* Allocate and init the urbs we'll queue */
1249	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1250	if (!ctx.urbs)
1251		goto free_buf;
1252	for (i = 0; i < num; i++) {
1253		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1254		if (!ctx.urbs[i])
1255			goto free_urbs;
1256		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1257				unlink_queued_callback, &ctx);
1258		ctx.urbs[i]->transfer_dma = buf_dma;
1259		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 
 
 
 
 
1260	}
1261
1262	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1263	for (i = 0; i < num; i++) {
1264		atomic_inc(&ctx.pending);
1265		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1266		if (retval != 0) {
1267			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1268					i, retval);
1269			atomic_dec(&ctx.pending);
1270			ctx.status = retval;
1271			break;
1272		}
1273	}
1274	if (i == num) {
1275		usb_unlink_urb(ctx.urbs[num - 4]);
1276		usb_unlink_urb(ctx.urbs[num - 2]);
1277	} else {
1278		while (--i >= 0)
1279			usb_unlink_urb(ctx.urbs[i]);
1280	}
1281
1282	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1283		complete(&ctx.complete);
1284	wait_for_completion(&ctx.complete);
1285	retval = ctx.status;
1286
1287 free_urbs:
1288	for (i = 0; i < num; i++)
1289		usb_free_urb(ctx.urbs[i]);
1290	kfree(ctx.urbs);
1291 free_buf:
1292	usb_free_coherent(udev, size, buf, buf_dma);
1293	return retval;
1294}
1295
1296/*-------------------------------------------------------------------------*/
1297
1298static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1299{
1300	int	retval;
1301	u16	status;
1302
1303	/* shouldn't look or act halted */
1304	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1305	if (retval < 0) {
1306		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1307				ep, retval);
1308		return retval;
1309	}
1310	if (status != 0) {
1311		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1312		return -EINVAL;
1313	}
1314	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1315	if (retval != 0)
1316		return -EINVAL;
1317	return 0;
1318}
1319
1320static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1321{
1322	int	retval;
1323	u16	status;
1324
1325	/* should look and act halted */
1326	retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1327	if (retval < 0) {
1328		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1329				ep, retval);
1330		return retval;
1331	}
1332	le16_to_cpus(&status);
1333	if (status != 1) {
1334		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1335		return -EINVAL;
1336	}
1337	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1338	if (retval != -EPIPE)
1339		return -EINVAL;
1340	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1341	if (retval != -EPIPE)
1342		return -EINVAL;
1343	return 0;
1344}
1345
1346static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1347{
1348	int	retval;
1349
1350	/* shouldn't look or act halted now */
1351	retval = verify_not_halted(tdev, ep, urb);
1352	if (retval < 0)
1353		return retval;
1354
1355	/* set halt (protocol test only), verify it worked */
1356	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1357			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1358			USB_ENDPOINT_HALT, ep,
1359			NULL, 0, USB_CTRL_SET_TIMEOUT);
1360	if (retval < 0) {
1361		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1362		return retval;
1363	}
1364	retval = verify_halted(tdev, ep, urb);
1365	if (retval < 0)
 
 
 
 
 
 
 
 
1366		return retval;
 
1367
1368	/* clear halt (tests API + protocol), verify it worked */
1369	retval = usb_clear_halt(urb->dev, urb->pipe);
1370	if (retval < 0) {
1371		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1372		return retval;
1373	}
1374	retval = verify_not_halted(tdev, ep, urb);
1375	if (retval < 0)
1376		return retval;
1377
1378	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1379
1380	return 0;
1381}
1382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383static int halt_simple(struct usbtest_dev *dev)
1384{
1385	int		ep;
1386	int		retval = 0;
1387	struct urb	*urb;
 
1388
1389	urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
 
 
 
1390	if (urb == NULL)
1391		return -ENOMEM;
1392
1393	if (dev->in_pipe) {
1394		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1395		urb->pipe = dev->in_pipe;
1396		retval = test_halt(dev, ep, urb);
1397		if (retval < 0)
1398			goto done;
1399	}
1400
1401	if (dev->out_pipe) {
1402		ep = usb_pipeendpoint(dev->out_pipe);
1403		urb->pipe = dev->out_pipe;
1404		retval = test_halt(dev, ep, urb);
1405	}
1406done:
1407	simple_free_urb(urb);
1408	return retval;
1409}
1410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1411/*-------------------------------------------------------------------------*/
1412
1413/* Control OUT tests use the vendor control requests from Intel's
1414 * USB 2.0 compliance test device:  write a buffer, read it back.
1415 *
1416 * Intel's spec only _requires_ that it work for one packet, which
1417 * is pretty weak.   Some HCDs place limits here; most devices will
1418 * need to be able to handle more than one OUT data packet.  We'll
1419 * try whatever we're told to try.
1420 */
1421static int ctrl_out(struct usbtest_dev *dev,
1422		unsigned count, unsigned length, unsigned vary, unsigned offset)
1423{
1424	unsigned		i, j, len;
1425	int			retval;
1426	u8			*buf;
1427	char			*what = "?";
1428	struct usb_device	*udev;
1429
1430	if (length < 1 || length > 0xffff || vary >= length)
1431		return -EINVAL;
1432
1433	buf = kmalloc(length + offset, GFP_KERNEL);
1434	if (!buf)
1435		return -ENOMEM;
1436
1437	buf += offset;
1438	udev = testdev_to_usbdev(dev);
1439	len = length;
1440	retval = 0;
1441
1442	/* NOTE:  hardware might well act differently if we pushed it
1443	 * with lots back-to-back queued requests.
1444	 */
1445	for (i = 0; i < count; i++) {
1446		/* write patterned data */
1447		for (j = 0; j < len; j++)
1448			buf[j] = i + j;
1449		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1450				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1451				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1452		if (retval != len) {
1453			what = "write";
1454			if (retval >= 0) {
1455				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1456						retval, len);
1457				retval = -EBADMSG;
1458			}
1459			break;
1460		}
1461
1462		/* read it back -- assuming nothing intervened!!  */
1463		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1464				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1465				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1466		if (retval != len) {
1467			what = "read";
1468			if (retval >= 0) {
1469				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1470						retval, len);
1471				retval = -EBADMSG;
1472			}
1473			break;
1474		}
1475
1476		/* fail if we can't verify */
1477		for (j = 0; j < len; j++) {
1478			if (buf[j] != (u8) (i + j)) {
1479				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1480					j, buf[j], (u8) i + j);
1481				retval = -EBADMSG;
1482				break;
1483			}
1484		}
1485		if (retval < 0) {
1486			what = "verify";
1487			break;
1488		}
1489
1490		len += vary;
1491
1492		/* [real world] the "zero bytes IN" case isn't really used.
1493		 * hardware can easily trip up in this weird case, since its
1494		 * status stage is IN, not OUT like other ep0in transfers.
1495		 */
1496		if (len > length)
1497			len = realworld ? 1 : 0;
1498	}
1499
1500	if (retval < 0)
1501		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1502			what, retval, i);
1503
1504	kfree(buf - offset);
1505	return retval;
1506}
1507
1508/*-------------------------------------------------------------------------*/
1509
1510/* ISO tests ... mimics common usage
1511 *  - buffer length is split into N packets (mostly maxpacket sized)
1512 *  - multi-buffers according to sglen
1513 */
1514
1515struct iso_context {
1516	unsigned		count;
1517	unsigned		pending;
1518	spinlock_t		lock;
1519	struct completion	done;
1520	int			submit_error;
1521	unsigned long		errors;
1522	unsigned long		packet_count;
1523	struct usbtest_dev	*dev;
 
1524};
1525
1526static void iso_callback(struct urb *urb)
1527{
1528	struct iso_context	*ctx = urb->context;
1529
1530	spin_lock(&ctx->lock);
1531	ctx->count--;
1532
1533	ctx->packet_count += urb->number_of_packets;
1534	if (urb->error_count > 0)
1535		ctx->errors += urb->error_count;
1536	else if (urb->status != 0)
1537		ctx->errors += urb->number_of_packets;
1538	else if (urb->actual_length != urb->transfer_buffer_length)
1539		ctx->errors++;
1540	else if (check_guard_bytes(ctx->dev, urb) != 0)
1541		ctx->errors++;
1542
1543	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1544			&& !ctx->submit_error) {
1545		int status = usb_submit_urb(urb, GFP_ATOMIC);
1546		switch (status) {
1547		case 0:
1548			goto done;
1549		default:
1550			dev_err(&ctx->dev->intf->dev,
1551					"iso resubmit err %d\n",
1552					status);
1553			/* FALLTHROUGH */
1554		case -ENODEV:			/* disconnected */
1555		case -ESHUTDOWN:		/* endpoint disabled */
1556			ctx->submit_error = 1;
1557			break;
1558		}
1559	}
1560
1561	ctx->pending--;
1562	if (ctx->pending == 0) {
1563		if (ctx->errors)
1564			dev_err(&ctx->dev->intf->dev,
1565				"iso test, %lu errors out of %lu\n",
1566				ctx->errors, ctx->packet_count);
1567		complete(&ctx->done);
1568	}
1569done:
1570	spin_unlock(&ctx->lock);
1571}
1572
1573static struct urb *iso_alloc_urb(
1574	struct usb_device	*udev,
1575	int			pipe,
1576	struct usb_endpoint_descriptor	*desc,
1577	long			bytes,
1578	unsigned offset
1579)
1580{
1581	struct urb		*urb;
1582	unsigned		i, maxp, packets;
1583
1584	if (bytes < 0 || !desc)
1585		return NULL;
1586	maxp = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
1587	maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
1588	packets = DIV_ROUND_UP(bytes, maxp);
1589
1590	urb = usb_alloc_urb(packets, GFP_KERNEL);
1591	if (!urb)
1592		return urb;
1593	urb->dev = udev;
1594	urb->pipe = pipe;
1595
1596	urb->number_of_packets = packets;
1597	urb->transfer_buffer_length = bytes;
1598	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1599							GFP_KERNEL,
1600							&urb->transfer_dma);
1601	if (!urb->transfer_buffer) {
1602		usb_free_urb(urb);
1603		return NULL;
1604	}
1605	if (offset) {
1606		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1607		urb->transfer_buffer += offset;
1608		urb->transfer_dma += offset;
1609	}
1610	/* For inbound transfers use guard byte so that test fails if
1611		data not correctly copied */
1612	memset(urb->transfer_buffer,
1613			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1614			bytes);
1615
1616	for (i = 0; i < packets; i++) {
1617		/* here, only the last packet will be short */
1618		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1619		bytes -= urb->iso_frame_desc[i].length;
1620
1621		urb->iso_frame_desc[i].offset = maxp * i;
1622	}
1623
1624	urb->complete = iso_callback;
1625	/* urb->context = SET BY CALLER */
1626	urb->interval = 1 << (desc->bInterval - 1);
1627	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1628	return urb;
1629}
1630
1631static int
1632test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1633		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1634{
1635	struct iso_context	context;
1636	struct usb_device	*udev;
1637	unsigned		i;
1638	unsigned long		packets = 0;
1639	int			status = 0;
1640	struct urb		*urbs[10];	/* FIXME no limit */
 
 
 
1641
1642	if (param->sglen > 10)
1643		return -EDOM;
1644
1645	memset(&context, 0, sizeof context);
1646	context.count = param->iterations * param->sglen;
1647	context.dev = dev;
 
1648	init_completion(&context.done);
1649	spin_lock_init(&context.lock);
1650
1651	memset(urbs, 0, sizeof urbs);
1652	udev = testdev_to_usbdev(dev);
1653	dev_info(&dev->intf->dev,
1654		"... iso period %d %sframes, wMaxPacket %04x\n",
1655		1 << (desc->bInterval - 1),
1656		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1657		le16_to_cpu(desc->wMaxPacketSize));
1658
1659	for (i = 0; i < param->sglen; i++) {
1660		urbs[i] = iso_alloc_urb(udev, pipe, desc,
 
1661					param->length, offset);
 
 
 
 
1662		if (!urbs[i]) {
1663			status = -ENOMEM;
1664			goto fail;
1665		}
1666		packets += urbs[i]->number_of_packets;
1667		urbs[i]->context = &context;
1668	}
1669	packets *= param->iterations;
1670	dev_info(&dev->intf->dev,
1671		"... total %lu msec (%lu packets)\n",
1672		(packets * (1 << (desc->bInterval - 1)))
1673			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1674		packets);
 
 
 
 
 
 
 
 
 
 
1675
1676	spin_lock_irq(&context.lock);
1677	for (i = 0; i < param->sglen; i++) {
1678		++context.pending;
1679		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1680		if (status < 0) {
1681			ERROR(dev, "submit iso[%d], error %d\n", i, status);
1682			if (i == 0) {
1683				spin_unlock_irq(&context.lock);
1684				goto fail;
1685			}
1686
1687			simple_free_urb(urbs[i]);
1688			urbs[i] = NULL;
1689			context.pending--;
1690			context.submit_error = 1;
1691			break;
1692		}
1693	}
1694	spin_unlock_irq(&context.lock);
1695
1696	wait_for_completion(&context.done);
1697
1698	for (i = 0; i < param->sglen; i++) {
1699		if (urbs[i])
1700			simple_free_urb(urbs[i]);
1701	}
1702	/*
1703	 * Isochronous transfers are expected to fail sometimes.  As an
1704	 * arbitrary limit, we will report an error if any submissions
1705	 * fail or if the transfer failure rate is > 10%.
1706	 */
1707	if (status != 0)
1708		;
1709	else if (context.submit_error)
1710		status = -EACCES;
1711	else if (context.errors > context.packet_count / 10)
 
1712		status = -EIO;
1713	return status;
1714
1715fail:
1716	for (i = 0; i < param->sglen; i++) {
1717		if (urbs[i])
1718			simple_free_urb(urbs[i]);
1719	}
1720	return status;
1721}
1722
1723static int test_unaligned_bulk(
1724	struct usbtest_dev *tdev,
1725	int pipe,
1726	unsigned length,
1727	int iterations,
1728	unsigned transfer_flags,
1729	const char *label)
1730{
1731	int retval;
1732	struct urb *urb = usbtest_alloc_urb(
1733		testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
1734
1735	if (!urb)
1736		return -ENOMEM;
1737
1738	retval = simple_io(tdev, urb, iterations, 0, 0, label);
1739	simple_free_urb(urb);
1740	return retval;
1741}
1742
1743/*-------------------------------------------------------------------------*/
1744
1745/* We only have this one interface to user space, through usbfs.
1746 * User mode code can scan usbfs to find N different devices (maybe on
1747 * different busses) to use when testing, and allocate one thread per
1748 * test.  So discovery is simplified, and we have no device naming issues.
1749 *
1750 * Don't use these only as stress/load tests.  Use them along with with
1751 * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
1752 * video capture, and so on.  Run different tests at different times, in
1753 * different sequences.  Nothing here should interact with other devices,
1754 * except indirectly by consuming USB bandwidth and CPU resources for test
1755 * threads and request completion.  But the only way to know that for sure
1756 * is to test when HC queues are in use by many devices.
1757 *
1758 * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
1759 * it locks out usbcore in certain code paths.  Notably, if you disconnect
1760 * the device-under-test, khubd will wait block forever waiting for the
1761 * ioctl to complete ... so that usb_disconnect() can abort the pending
1762 * urbs and then call usbtest_disconnect().  To abort a test, you're best
1763 * off just killing the userspace task and waiting for it to exit.
1764 */
1765
1766/* No BKL needed */
1767static int
1768usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
1769{
1770	struct usbtest_dev	*dev = usb_get_intfdata(intf);
1771	struct usb_device	*udev = testdev_to_usbdev(dev);
1772	struct usbtest_param	*param = buf;
1773	int			retval = -EOPNOTSUPP;
1774	struct urb		*urb;
1775	struct scatterlist	*sg;
1776	struct usb_sg_request	req;
1777	struct timeval		start;
1778	unsigned		i;
1779
1780	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
1781
1782	pattern = mod_pattern;
1783
1784	if (code != USBTEST_REQUEST)
1785		return -EOPNOTSUPP;
1786
1787	if (param->iterations <= 0)
1788		return -EINVAL;
1789
1790	if (mutex_lock_interruptible(&dev->lock))
1791		return -ERESTARTSYS;
1792
1793	/* FIXME: What if a system sleep starts while a test is running? */
1794
1795	/* some devices, like ez-usb default devices, need a non-default
1796	 * altsetting to have any active endpoints.  some tests change
1797	 * altsettings; force a default so most tests don't need to check.
1798	 */
1799	if (dev->info->alt >= 0) {
1800		int	res;
1801
1802		if (intf->altsetting->desc.bInterfaceNumber) {
1803			mutex_unlock(&dev->lock);
1804			return -ENODEV;
1805		}
1806		res = set_altsetting(dev, dev->info->alt);
1807		if (res) {
1808			dev_err(&intf->dev,
1809					"set altsetting to %d failed, %d\n",
1810					dev->info->alt, res);
1811			mutex_unlock(&dev->lock);
1812			return res;
1813		}
1814	}
1815
1816	/*
1817	 * Just a bunch of test cases that every HCD is expected to handle.
1818	 *
1819	 * Some may need specific firmware, though it'd be good to have
1820	 * one firmware image to handle all the test cases.
1821	 *
1822	 * FIXME add more tests!  cancel requests, verify the data, control
1823	 * queueing, concurrent read+write threads, and so on.
1824	 */
1825	do_gettimeofday(&start);
1826	switch (param->test_num) {
1827
1828	case 0:
1829		dev_info(&intf->dev, "TEST 0:  NOP\n");
1830		retval = 0;
1831		break;
1832
1833	/* Simple non-queued bulk I/O tests */
1834	case 1:
1835		if (dev->out_pipe == 0)
1836			break;
1837		dev_info(&intf->dev,
1838				"TEST 1:  write %d bytes %u times\n",
1839				param->length, param->iterations);
1840		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1841		if (!urb) {
1842			retval = -ENOMEM;
1843			break;
1844		}
1845		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1846		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
1847		simple_free_urb(urb);
1848		break;
1849	case 2:
1850		if (dev->in_pipe == 0)
1851			break;
1852		dev_info(&intf->dev,
1853				"TEST 2:  read %d bytes %u times\n",
1854				param->length, param->iterations);
1855		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1856		if (!urb) {
1857			retval = -ENOMEM;
1858			break;
1859		}
1860		/* FIRMWARE:  bulk source (maybe generates short writes) */
1861		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
1862		simple_free_urb(urb);
1863		break;
1864	case 3:
1865		if (dev->out_pipe == 0 || param->vary == 0)
1866			break;
1867		dev_info(&intf->dev,
1868				"TEST 3:  write/%d 0..%d bytes %u times\n",
1869				param->vary, param->length, param->iterations);
1870		urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
1871		if (!urb) {
1872			retval = -ENOMEM;
1873			break;
1874		}
1875		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1876		retval = simple_io(dev, urb, param->iterations, param->vary,
1877					0, "test3");
1878		simple_free_urb(urb);
1879		break;
1880	case 4:
1881		if (dev->in_pipe == 0 || param->vary == 0)
1882			break;
1883		dev_info(&intf->dev,
1884				"TEST 4:  read/%d 0..%d bytes %u times\n",
1885				param->vary, param->length, param->iterations);
1886		urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
1887		if (!urb) {
1888			retval = -ENOMEM;
1889			break;
1890		}
1891		/* FIRMWARE:  bulk source (maybe generates short writes) */
1892		retval = simple_io(dev, urb, param->iterations, param->vary,
1893					0, "test4");
1894		simple_free_urb(urb);
1895		break;
1896
1897	/* Queued bulk I/O tests */
1898	case 5:
1899		if (dev->out_pipe == 0 || param->sglen == 0)
1900			break;
1901		dev_info(&intf->dev,
1902			"TEST 5:  write %d sglists %d entries of %d bytes\n",
1903				param->iterations,
1904				param->sglen, param->length);
1905		sg = alloc_sglist(param->sglen, param->length, 0);
 
1906		if (!sg) {
1907			retval = -ENOMEM;
1908			break;
1909		}
1910		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1911		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1912				&req, sg, param->sglen);
1913		free_sglist(sg, param->sglen);
1914		break;
1915
1916	case 6:
1917		if (dev->in_pipe == 0 || param->sglen == 0)
1918			break;
1919		dev_info(&intf->dev,
1920			"TEST 6:  read %d sglists %d entries of %d bytes\n",
1921				param->iterations,
1922				param->sglen, param->length);
1923		sg = alloc_sglist(param->sglen, param->length, 0);
 
1924		if (!sg) {
1925			retval = -ENOMEM;
1926			break;
1927		}
1928		/* FIRMWARE:  bulk source (maybe generates short writes) */
1929		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1930				&req, sg, param->sglen);
1931		free_sglist(sg, param->sglen);
1932		break;
1933	case 7:
1934		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
1935			break;
1936		dev_info(&intf->dev,
1937			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
1938				param->vary, param->iterations,
1939				param->sglen, param->length);
1940		sg = alloc_sglist(param->sglen, param->length, param->vary);
 
1941		if (!sg) {
1942			retval = -ENOMEM;
1943			break;
1944		}
1945		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
1946		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
1947				&req, sg, param->sglen);
1948		free_sglist(sg, param->sglen);
1949		break;
1950	case 8:
1951		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
1952			break;
1953		dev_info(&intf->dev,
1954			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
1955				param->vary, param->iterations,
1956				param->sglen, param->length);
1957		sg = alloc_sglist(param->sglen, param->length, param->vary);
 
1958		if (!sg) {
1959			retval = -ENOMEM;
1960			break;
1961		}
1962		/* FIRMWARE:  bulk source (maybe generates short writes) */
1963		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
1964				&req, sg, param->sglen);
1965		free_sglist(sg, param->sglen);
1966		break;
1967
1968	/* non-queued sanity tests for control (chapter 9 subset) */
1969	case 9:
1970		retval = 0;
1971		dev_info(&intf->dev,
1972			"TEST 9:  ch9 (subset) control tests, %d times\n",
1973				param->iterations);
1974		for (i = param->iterations; retval == 0 && i--; /* NOP */)
1975			retval = ch9_postconfig(dev);
1976		if (retval)
1977			dev_err(&intf->dev, "ch9 subset failed, "
1978					"iterations left %d\n", i);
1979		break;
1980
1981	/* queued control messaging */
1982	case 10:
1983		if (param->sglen == 0)
1984			break;
1985		retval = 0;
1986		dev_info(&intf->dev,
1987				"TEST 10:  queue %d control calls, %d times\n",
1988				param->sglen,
1989				param->iterations);
1990		retval = test_ctrl_queue(dev, param);
1991		break;
1992
1993	/* simple non-queued unlinks (ring with one urb) */
1994	case 11:
1995		if (dev->in_pipe == 0 || !param->length)
1996			break;
1997		retval = 0;
1998		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
1999				param->iterations, param->length);
2000		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2001			retval = unlink_simple(dev, dev->in_pipe,
2002						param->length);
2003		if (retval)
2004			dev_err(&intf->dev, "unlink reads failed %d, "
2005				"iterations left %d\n", retval, i);
2006		break;
2007	case 12:
2008		if (dev->out_pipe == 0 || !param->length)
2009			break;
2010		retval = 0;
2011		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2012				param->iterations, param->length);
2013		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2014			retval = unlink_simple(dev, dev->out_pipe,
2015						param->length);
2016		if (retval)
2017			dev_err(&intf->dev, "unlink writes failed %d, "
2018				"iterations left %d\n", retval, i);
2019		break;
2020
2021	/* ep halt tests */
2022	case 13:
2023		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2024			break;
2025		retval = 0;
2026		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2027				param->iterations);
2028		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2029			retval = halt_simple(dev);
2030
2031		if (retval)
2032			ERROR(dev, "halts failed, iterations left %d\n", i);
2033		break;
2034
2035	/* control write tests */
2036	case 14:
2037		if (!dev->info->ctrl_out)
2038			break;
2039		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2040				param->iterations,
2041				realworld ? 1 : 0, param->length,
2042				param->vary);
2043		retval = ctrl_out(dev, param->iterations,
2044				param->length, param->vary, 0);
2045		break;
2046
2047	/* iso write tests */
2048	case 15:
2049		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2050			break;
2051		dev_info(&intf->dev,
2052			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2053				param->iterations,
2054				param->sglen, param->length);
2055		/* FIRMWARE:  iso sink */
2056		retval = test_iso_queue(dev, param,
2057				dev->out_iso_pipe, dev->iso_out, 0);
2058		break;
2059
2060	/* iso read tests */
2061	case 16:
2062		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2063			break;
2064		dev_info(&intf->dev,
2065			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2066				param->iterations,
2067				param->sglen, param->length);
2068		/* FIRMWARE:  iso source */
2069		retval = test_iso_queue(dev, param,
2070				dev->in_iso_pipe, dev->iso_in, 0);
2071		break;
2072
2073	/* FIXME scatterlist cancel (needs helper thread) */
2074
2075	/* Tests for bulk I/O using DMA mapping by core and odd address */
2076	case 17:
2077		if (dev->out_pipe == 0)
2078			break;
2079		dev_info(&intf->dev,
2080			"TEST 17:  write odd addr %d bytes %u times core map\n",
2081			param->length, param->iterations);
2082
2083		retval = test_unaligned_bulk(
2084				dev, dev->out_pipe,
2085				param->length, param->iterations,
2086				0, "test17");
2087		break;
2088
2089	case 18:
2090		if (dev->in_pipe == 0)
2091			break;
2092		dev_info(&intf->dev,
2093			"TEST 18:  read odd addr %d bytes %u times core map\n",
2094			param->length, param->iterations);
2095
2096		retval = test_unaligned_bulk(
2097				dev, dev->in_pipe,
2098				param->length, param->iterations,
2099				0, "test18");
2100		break;
2101
2102	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2103	case 19:
2104		if (dev->out_pipe == 0)
2105			break;
2106		dev_info(&intf->dev,
2107			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2108			param->length, param->iterations);
2109
2110		retval = test_unaligned_bulk(
2111				dev, dev->out_pipe,
2112				param->length, param->iterations,
2113				URB_NO_TRANSFER_DMA_MAP, "test19");
2114		break;
2115
2116	case 20:
2117		if (dev->in_pipe == 0)
2118			break;
2119		dev_info(&intf->dev,
2120			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2121			param->length, param->iterations);
2122
2123		retval = test_unaligned_bulk(
2124				dev, dev->in_pipe,
2125				param->length, param->iterations,
2126				URB_NO_TRANSFER_DMA_MAP, "test20");
2127		break;
2128
2129	/* control write tests with unaligned buffer */
2130	case 21:
2131		if (!dev->info->ctrl_out)
2132			break;
2133		dev_info(&intf->dev,
2134				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2135				param->iterations,
2136				realworld ? 1 : 0, param->length,
2137				param->vary);
2138		retval = ctrl_out(dev, param->iterations,
2139				param->length, param->vary, 1);
2140		break;
2141
2142	/* unaligned iso tests */
2143	case 22:
2144		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2145			break;
2146		dev_info(&intf->dev,
2147			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2148				param->iterations,
2149				param->sglen, param->length);
2150		retval = test_iso_queue(dev, param,
2151				dev->out_iso_pipe, dev->iso_out, 1);
2152		break;
2153
2154	case 23:
2155		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2156			break;
2157		dev_info(&intf->dev,
2158			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2159				param->iterations,
2160				param->sglen, param->length);
2161		retval = test_iso_queue(dev, param,
2162				dev->in_iso_pipe, dev->iso_in, 1);
2163		break;
2164
2165	/* unlink URBs from a bulk-OUT queue */
2166	case 24:
2167		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2168			break;
2169		retval = 0;
2170		dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
2171				"%d %d-byte writes\n",
2172				param->iterations, param->sglen, param->length);
2173		for (i = param->iterations; retval == 0 && i > 0; --i) {
2174			retval = unlink_queued(dev, dev->out_pipe,
2175						param->sglen, param->length);
2176			if (retval) {
2177				dev_err(&intf->dev,
2178					"unlink queued writes failed %d, "
2179					"iterations left %d\n", retval, i);
2180				break;
2181			}
2182		}
2183		break;
2184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2185	}
2186	do_gettimeofday(&param->duration);
2187	param->duration.tv_sec -= start.tv_sec;
2188	param->duration.tv_usec -= start.tv_usec;
2189	if (param->duration.tv_usec < 0) {
2190		param->duration.tv_usec += 1000 * 1000;
2191		param->duration.tv_sec -= 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2192	}
 
 
2193	mutex_unlock(&dev->lock);
2194	return retval;
2195}
2196
2197/*-------------------------------------------------------------------------*/
2198
2199static unsigned force_interrupt;
2200module_param(force_interrupt, uint, 0);
2201MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2202
2203#ifdef	GENERIC
2204static unsigned short vendor;
2205module_param(vendor, ushort, 0);
2206MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2207
2208static unsigned short product;
2209module_param(product, ushort, 0);
2210MODULE_PARM_DESC(product, "product code (from vendor)");
2211#endif
2212
2213static int
2214usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2215{
2216	struct usb_device	*udev;
2217	struct usbtest_dev	*dev;
2218	struct usbtest_info	*info;
2219	char			*rtest, *wtest;
2220	char			*irtest, *iwtest;
 
2221
2222	udev = interface_to_usbdev(intf);
2223
2224#ifdef	GENERIC
2225	/* specify devices by module parameters? */
2226	if (id->match_flags == 0) {
2227		/* vendor match required, product match optional */
2228		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2229			return -ENODEV;
2230		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2231			return -ENODEV;
2232		dev_info(&intf->dev, "matched module params, "
2233					"vend=0x%04x prod=0x%04x\n",
2234				le16_to_cpu(udev->descriptor.idVendor),
2235				le16_to_cpu(udev->descriptor.idProduct));
2236	}
2237#endif
2238
2239	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2240	if (!dev)
2241		return -ENOMEM;
2242	info = (struct usbtest_info *) id->driver_info;
2243	dev->info = info;
2244	mutex_init(&dev->lock);
2245
2246	dev->intf = intf;
2247
2248	/* cacheline-aligned scratch for i/o */
2249	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2250	if (dev->buf == NULL) {
2251		kfree(dev);
2252		return -ENOMEM;
2253	}
2254
2255	/* NOTE this doesn't yet test the handful of difference that are
2256	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2257	 * "high bandwidth" modes (up to 3 packets/uframe).
2258	 */
2259	rtest = wtest = "";
2260	irtest = iwtest = "";
 
2261	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2262		if (info->ep_in) {
2263			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2264			rtest = " intr-in";
2265		}
2266		if (info->ep_out) {
2267			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2268			wtest = " intr-out";
2269		}
2270	} else {
2271		if (info->autoconf) {
2272			int status;
2273
2274			status = get_endpoints(dev, intf);
2275			if (status < 0) {
2276				WARNING(dev, "couldn't get endpoints, %d\n",
2277						status);
 
 
2278				return status;
2279			}
2280			/* may find bulk or ISO pipes */
2281		} else {
2282			if (info->ep_in)
2283				dev->in_pipe = usb_rcvbulkpipe(udev,
2284							info->ep_in);
2285			if (info->ep_out)
2286				dev->out_pipe = usb_sndbulkpipe(udev,
2287							info->ep_out);
2288		}
2289		if (dev->in_pipe)
2290			rtest = " bulk-in";
2291		if (dev->out_pipe)
2292			wtest = " bulk-out";
2293		if (dev->in_iso_pipe)
2294			irtest = " iso-in";
2295		if (dev->out_iso_pipe)
2296			iwtest = " iso-out";
 
 
 
 
2297	}
2298
2299	usb_set_intfdata(intf, dev);
2300	dev_info(&intf->dev, "%s\n", info->name);
2301	dev_info(&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n",
2302			({ char *tmp;
2303			switch (udev->speed) {
2304			case USB_SPEED_LOW:
2305				tmp = "low";
2306				break;
2307			case USB_SPEED_FULL:
2308				tmp = "full";
2309				break;
2310			case USB_SPEED_HIGH:
2311				tmp = "high";
2312				break;
2313			case USB_SPEED_SUPER:
2314				tmp = "super";
2315				break;
2316			default:
2317				tmp = "unknown";
2318				break;
2319			}; tmp; }),
2320			info->ctrl_out ? " in/out" : "",
2321			rtest, wtest,
2322			irtest, iwtest,
 
2323			info->alt >= 0 ? " (+alt)" : "");
2324	return 0;
2325}
2326
2327static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2328{
2329	return 0;
2330}
2331
2332static int usbtest_resume(struct usb_interface *intf)
2333{
2334	return 0;
2335}
2336
2337
2338static void usbtest_disconnect(struct usb_interface *intf)
2339{
2340	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2341
2342	usb_set_intfdata(intf, NULL);
2343	dev_dbg(&intf->dev, "disconnect\n");
2344	kfree(dev);
2345}
2346
2347/* Basic testing only needs a device that can source or sink bulk traffic.
2348 * Any device can test control transfers (default with GENERIC binding).
2349 *
2350 * Several entries work with the default EP0 implementation that's built
2351 * into EZ-USB chips.  There's a default vendor ID which can be overridden
2352 * by (very) small config EEPROMS, but otherwise all these devices act
2353 * identically until firmware is loaded:  only EP0 works.  It turns out
2354 * to be easy to make other endpoints work, without modifying that EP0
2355 * behavior.  For now, we expect that kind of firmware.
2356 */
2357
2358/* an21xx or fx versions of ez-usb */
2359static struct usbtest_info ez1_info = {
2360	.name		= "EZ-USB device",
2361	.ep_in		= 2,
2362	.ep_out		= 2,
2363	.alt		= 1,
2364};
2365
2366/* fx2 version of ez-usb */
2367static struct usbtest_info ez2_info = {
2368	.name		= "FX2 device",
2369	.ep_in		= 6,
2370	.ep_out		= 2,
2371	.alt		= 1,
2372};
2373
2374/* ezusb family device with dedicated usb test firmware,
2375 */
2376static struct usbtest_info fw_info = {
2377	.name		= "usb test device",
2378	.ep_in		= 2,
2379	.ep_out		= 2,
2380	.alt		= 1,
2381	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2382	.ctrl_out	= 1,
2383	.iso		= 1,		/* iso_ep's are #8 in/out */
2384};
2385
2386/* peripheral running Linux and 'zero.c' test firmware, or
2387 * its user-mode cousin. different versions of this use
2388 * different hardware with the same vendor/product codes.
2389 * host side MUST rely on the endpoint descriptors.
2390 */
2391static struct usbtest_info gz_info = {
2392	.name		= "Linux gadget zero",
2393	.autoconf	= 1,
2394	.ctrl_out	= 1,
 
 
2395	.alt		= 0,
2396};
2397
2398static struct usbtest_info um_info = {
2399	.name		= "Linux user mode test driver",
2400	.autoconf	= 1,
2401	.alt		= -1,
2402};
2403
2404static struct usbtest_info um2_info = {
2405	.name		= "Linux user mode ISO test driver",
2406	.autoconf	= 1,
2407	.iso		= 1,
2408	.alt		= -1,
2409};
2410
2411#ifdef IBOT2
2412/* this is a nice source of high speed bulk data;
2413 * uses an FX2, with firmware provided in the device
2414 */
2415static struct usbtest_info ibot2_info = {
2416	.name		= "iBOT2 webcam",
2417	.ep_in		= 2,
2418	.alt		= -1,
2419};
2420#endif
2421
2422#ifdef GENERIC
2423/* we can use any device to test control traffic */
2424static struct usbtest_info generic_info = {
2425	.name		= "Generic USB device",
2426	.alt		= -1,
2427};
2428#endif
2429
2430
2431static const struct usb_device_id id_table[] = {
2432
2433	/*-------------------------------------------------------------*/
2434
2435	/* EZ-USB devices which download firmware to replace (or in our
2436	 * case augment) the default device implementation.
2437	 */
2438
2439	/* generic EZ-USB FX controller */
2440	{ USB_DEVICE(0x0547, 0x2235),
2441		.driver_info = (unsigned long) &ez1_info,
2442	},
2443
2444	/* CY3671 development board with EZ-USB FX */
2445	{ USB_DEVICE(0x0547, 0x0080),
2446		.driver_info = (unsigned long) &ez1_info,
2447	},
2448
2449	/* generic EZ-USB FX2 controller (or development board) */
2450	{ USB_DEVICE(0x04b4, 0x8613),
2451		.driver_info = (unsigned long) &ez2_info,
2452	},
2453
2454	/* re-enumerated usb test device firmware */
2455	{ USB_DEVICE(0xfff0, 0xfff0),
2456		.driver_info = (unsigned long) &fw_info,
2457	},
2458
2459	/* "Gadget Zero" firmware runs under Linux */
2460	{ USB_DEVICE(0x0525, 0xa4a0),
2461		.driver_info = (unsigned long) &gz_info,
2462	},
2463
2464	/* so does a user-mode variant */
2465	{ USB_DEVICE(0x0525, 0xa4a4),
2466		.driver_info = (unsigned long) &um_info,
2467	},
2468
2469	/* ... and a user-mode variant that talks iso */
2470	{ USB_DEVICE(0x0525, 0xa4a3),
2471		.driver_info = (unsigned long) &um2_info,
2472	},
2473
2474#ifdef KEYSPAN_19Qi
2475	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2476	/* this does not coexist with the real Keyspan 19qi driver! */
2477	{ USB_DEVICE(0x06cd, 0x010b),
2478		.driver_info = (unsigned long) &ez1_info,
2479	},
2480#endif
2481
2482	/*-------------------------------------------------------------*/
2483
2484#ifdef IBOT2
2485	/* iBOT2 makes a nice source of high speed bulk-in data */
2486	/* this does not coexist with a real iBOT2 driver! */
2487	{ USB_DEVICE(0x0b62, 0x0059),
2488		.driver_info = (unsigned long) &ibot2_info,
2489	},
2490#endif
2491
2492	/*-------------------------------------------------------------*/
2493
2494#ifdef GENERIC
2495	/* module params can specify devices to use for control tests */
2496	{ .driver_info = (unsigned long) &generic_info, },
2497#endif
2498
2499	/*-------------------------------------------------------------*/
2500
2501	{ }
2502};
2503MODULE_DEVICE_TABLE(usb, id_table);
2504
2505static struct usb_driver usbtest_driver = {
2506	.name =		"usbtest",
2507	.id_table =	id_table,
2508	.probe =	usbtest_probe,
2509	.unlocked_ioctl = usbtest_ioctl,
2510	.disconnect =	usbtest_disconnect,
2511	.suspend =	usbtest_suspend,
2512	.resume =	usbtest_resume,
2513};
2514
2515/*-------------------------------------------------------------------------*/
2516
2517static int __init usbtest_init(void)
2518{
2519#ifdef GENERIC
2520	if (vendor)
2521		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2522#endif
2523	return usb_register(&usbtest_driver);
2524}
2525module_init(usbtest_init);
2526
2527static void __exit usbtest_exit(void)
2528{
2529	usb_deregister(&usbtest_driver);
2530}
2531module_exit(usbtest_exit);
2532
2533MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2534MODULE_LICENSE("GPL");
2535
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/kernel.h>
   3#include <linux/errno.h>
   4#include <linux/init.h>
   5#include <linux/slab.h>
   6#include <linux/mm.h>
   7#include <linux/module.h>
   8#include <linux/moduleparam.h>
   9#include <linux/scatterlist.h>
  10#include <linux/mutex.h>
  11#include <linux/timer.h>
  12#include <linux/usb.h>
  13
  14#define SIMPLE_IO_TIMEOUT	10000	/* in milliseconds */
  15
  16/*-------------------------------------------------------------------------*/
  17
  18static int override_alt = -1;
  19module_param_named(alt, override_alt, int, 0644);
  20MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
  21static void complicated_callback(struct urb *urb);
  22
  23/*-------------------------------------------------------------------------*/
  24
  25/* FIXME make these public somewhere; usbdevfs.h? */
  26
  27/* Parameter for usbtest driver. */
  28struct usbtest_param_32 {
  29	/* inputs */
  30	__u32		test_num;	/* 0..(TEST_CASES-1) */
  31	__u32		iterations;
  32	__u32		length;
  33	__u32		vary;
  34	__u32		sglen;
  35
  36	/* outputs */
  37	__s32		duration_sec;
  38	__s32		duration_usec;
  39};
  40
  41/*
  42 * Compat parameter to the usbtest driver.
  43 * This supports older user space binaries compiled with 64 bit compiler.
  44 */
  45struct usbtest_param_64 {
  46	/* inputs */
  47	__u32		test_num;	/* 0..(TEST_CASES-1) */
  48	__u32		iterations;
  49	__u32		length;
  50	__u32		vary;
  51	__u32		sglen;
  52
  53	/* outputs */
  54	__s64		duration_sec;
  55	__s64		duration_usec;
  56};
  57
  58/* IOCTL interface to the driver. */
  59#define USBTEST_REQUEST_32    _IOWR('U', 100, struct usbtest_param_32)
  60/* COMPAT IOCTL interface to the driver. */
  61#define USBTEST_REQUEST_64    _IOWR('U', 100, struct usbtest_param_64)
  62
  63/*-------------------------------------------------------------------------*/
  64
  65#define	GENERIC		/* let probe() bind using module params */
  66
  67/* Some devices that can be used for testing will have "real" drivers.
  68 * Entries for those need to be enabled here by hand, after disabling
  69 * that "real" driver.
  70 */
  71//#define	IBOT2		/* grab iBOT2 webcams */
  72//#define	KEYSPAN_19Qi	/* grab un-renumerated serial adapter */
  73
  74/*-------------------------------------------------------------------------*/
  75
  76struct usbtest_info {
  77	const char		*name;
  78	u8			ep_in;		/* bulk/intr source */
  79	u8			ep_out;		/* bulk/intr sink */
  80	unsigned		autoconf:1;
  81	unsigned		ctrl_out:1;
  82	unsigned		iso:1;		/* try iso in/out */
  83	unsigned		intr:1;		/* try interrupt in/out */
  84	int			alt;
  85};
  86
  87/* this is accessed only through usbfs ioctl calls.
  88 * one ioctl to issue a test ... one lock per device.
  89 * tests create other threads if they need them.
  90 * urbs and buffers are allocated dynamically,
  91 * and data generated deterministically.
  92 */
  93struct usbtest_dev {
  94	struct usb_interface	*intf;
  95	struct usbtest_info	*info;
  96	int			in_pipe;
  97	int			out_pipe;
  98	int			in_iso_pipe;
  99	int			out_iso_pipe;
 100	int			in_int_pipe;
 101	int			out_int_pipe;
 102	struct usb_endpoint_descriptor	*iso_in, *iso_out;
 103	struct usb_endpoint_descriptor	*int_in, *int_out;
 104	struct mutex		lock;
 105
 106#define TBUF_SIZE	256
 107	u8			*buf;
 108};
 109
 110static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
 111{
 112	return interface_to_usbdev(test->intf);
 113}
 114
 115/* set up all urbs so they can be used with either bulk or interrupt */
 116#define	INTERRUPT_RATE		1	/* msec/transfer */
 117
 118#define ERROR(tdev, fmt, args...) \
 119	dev_err(&(tdev)->intf->dev , fmt , ## args)
 120#define WARNING(tdev, fmt, args...) \
 121	dev_warn(&(tdev)->intf->dev , fmt , ## args)
 122
 123#define GUARD_BYTE	0xA5
 124#define MAX_SGLEN	128
 125
 126/*-------------------------------------------------------------------------*/
 127
 128static inline void endpoint_update(int edi,
 129				   struct usb_host_endpoint **in,
 130				   struct usb_host_endpoint **out,
 131				   struct usb_host_endpoint *e)
 132{
 133	if (edi) {
 134		if (!*in)
 135			*in = e;
 136	} else {
 137		if (!*out)
 138			*out = e;
 139	}
 140}
 141
 142static int
 143get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
 144{
 145	int				tmp;
 146	struct usb_host_interface	*alt;
 147	struct usb_host_endpoint	*in, *out;
 148	struct usb_host_endpoint	*iso_in, *iso_out;
 149	struct usb_host_endpoint	*int_in, *int_out;
 150	struct usb_device		*udev;
 151
 152	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
 153		unsigned	ep;
 154
 155		in = out = NULL;
 156		iso_in = iso_out = NULL;
 157		int_in = int_out = NULL;
 158		alt = intf->altsetting + tmp;
 159
 160		if (override_alt >= 0 &&
 161				override_alt != alt->desc.bAlternateSetting)
 162			continue;
 163
 164		/* take the first altsetting with in-bulk + out-bulk;
 165		 * ignore other endpoints and altsettings.
 166		 */
 167		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
 168			struct usb_host_endpoint	*e;
 169			int edi;
 170
 171			e = alt->endpoint + ep;
 172			edi = usb_endpoint_dir_in(&e->desc);
 173
 174			switch (usb_endpoint_type(&e->desc)) {
 175			case USB_ENDPOINT_XFER_BULK:
 176				endpoint_update(edi, &in, &out, e);
 177				continue;
 178			case USB_ENDPOINT_XFER_INT:
 179				if (dev->info->intr)
 180					endpoint_update(edi, &int_in, &int_out, e);
 181				continue;
 182			case USB_ENDPOINT_XFER_ISOC:
 183				if (dev->info->iso)
 184					endpoint_update(edi, &iso_in, &iso_out, e);
 185				/* FALLTHROUGH */
 186			default:
 187				continue;
 188			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189		}
 190		if ((in && out)  ||  iso_in || iso_out || int_in || int_out)
 191			goto found;
 192	}
 193	return -EINVAL;
 194
 195found:
 196	udev = testdev_to_usbdev(dev);
 197	dev->info->alt = alt->desc.bAlternateSetting;
 198	if (alt->desc.bAlternateSetting != 0) {
 199		tmp = usb_set_interface(udev,
 200				alt->desc.bInterfaceNumber,
 201				alt->desc.bAlternateSetting);
 202		if (tmp < 0)
 203			return tmp;
 204	}
 205
 206	if (in)
 207		dev->in_pipe = usb_rcvbulkpipe(udev,
 208			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 209	if (out)
 210		dev->out_pipe = usb_sndbulkpipe(udev,
 211			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
 212
 213	if (iso_in) {
 214		dev->iso_in = &iso_in->desc;
 215		dev->in_iso_pipe = usb_rcvisocpipe(udev,
 216				iso_in->desc.bEndpointAddress
 217					& USB_ENDPOINT_NUMBER_MASK);
 218	}
 219
 220	if (iso_out) {
 221		dev->iso_out = &iso_out->desc;
 222		dev->out_iso_pipe = usb_sndisocpipe(udev,
 223				iso_out->desc.bEndpointAddress
 224					& USB_ENDPOINT_NUMBER_MASK);
 225	}
 226
 227	if (int_in) {
 228		dev->int_in = &int_in->desc;
 229		dev->in_int_pipe = usb_rcvintpipe(udev,
 230				int_in->desc.bEndpointAddress
 231					& USB_ENDPOINT_NUMBER_MASK);
 232	}
 233
 234	if (int_out) {
 235		dev->int_out = &int_out->desc;
 236		dev->out_int_pipe = usb_sndintpipe(udev,
 237				int_out->desc.bEndpointAddress
 238					& USB_ENDPOINT_NUMBER_MASK);
 239	}
 240	return 0;
 241}
 242
 243/*-------------------------------------------------------------------------*/
 244
 245/* Support for testing basic non-queued I/O streams.
 246 *
 247 * These just package urbs as requests that can be easily canceled.
 248 * Each urb's data buffer is dynamically allocated; callers can fill
 249 * them with non-zero test data (or test for it) when appropriate.
 250 */
 251
 252static void simple_callback(struct urb *urb)
 253{
 254	complete(urb->context);
 255}
 256
 257static struct urb *usbtest_alloc_urb(
 258	struct usb_device	*udev,
 259	int			pipe,
 260	unsigned long		bytes,
 261	unsigned		transfer_flags,
 262	unsigned		offset,
 263	u8			bInterval,
 264	usb_complete_t		complete_fn)
 265{
 266	struct urb		*urb;
 267
 268	urb = usb_alloc_urb(0, GFP_KERNEL);
 269	if (!urb)
 270		return urb;
 271
 272	if (bInterval)
 273		usb_fill_int_urb(urb, udev, pipe, NULL, bytes, complete_fn,
 274				NULL, bInterval);
 275	else
 276		usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, complete_fn,
 277				NULL);
 278
 279	urb->interval = (udev->speed == USB_SPEED_HIGH)
 280			? (INTERRUPT_RATE << 3)
 281			: INTERRUPT_RATE;
 282	urb->transfer_flags = transfer_flags;
 283	if (usb_pipein(pipe))
 284		urb->transfer_flags |= URB_SHORT_NOT_OK;
 285
 286	if ((bytes + offset) == 0)
 287		return urb;
 288
 289	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 290		urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
 291			GFP_KERNEL, &urb->transfer_dma);
 292	else
 293		urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
 294
 295	if (!urb->transfer_buffer) {
 296		usb_free_urb(urb);
 297		return NULL;
 298	}
 299
 300	/* To test unaligned transfers add an offset and fill the
 301		unused memory with a guard value */
 302	if (offset) {
 303		memset(urb->transfer_buffer, GUARD_BYTE, offset);
 304		urb->transfer_buffer += offset;
 305		if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 306			urb->transfer_dma += offset;
 307	}
 308
 309	/* For inbound transfers use guard byte so that test fails if
 310		data not correctly copied */
 311	memset(urb->transfer_buffer,
 312			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
 313			bytes);
 314	return urb;
 315}
 316
 317static struct urb *simple_alloc_urb(
 318	struct usb_device	*udev,
 319	int			pipe,
 320	unsigned long		bytes,
 321	u8			bInterval)
 322{
 323	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
 324			bInterval, simple_callback);
 325}
 326
 327static struct urb *complicated_alloc_urb(
 328	struct usb_device	*udev,
 329	int			pipe,
 330	unsigned long		bytes,
 331	u8			bInterval)
 332{
 333	return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
 334			bInterval, complicated_callback);
 335}
 336
 337static unsigned pattern;
 338static unsigned mod_pattern;
 339module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
 340MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
 341
 342static unsigned get_maxpacket(struct usb_device *udev, int pipe)
 343{
 344	struct usb_host_endpoint	*ep;
 345
 346	ep = usb_pipe_endpoint(udev, pipe);
 347	return le16_to_cpup(&ep->desc.wMaxPacketSize);
 348}
 349
 350static void simple_fill_buf(struct urb *urb)
 351{
 352	unsigned	i;
 353	u8		*buf = urb->transfer_buffer;
 354	unsigned	len = urb->transfer_buffer_length;
 355	unsigned	maxpacket;
 356
 357	switch (pattern) {
 358	default:
 359		/* FALLTHROUGH */
 360	case 0:
 361		memset(buf, 0, len);
 362		break;
 363	case 1:			/* mod63 */
 364		maxpacket = get_maxpacket(urb->dev, urb->pipe);
 365		for (i = 0; i < len; i++)
 366			*buf++ = (u8) ((i % maxpacket) % 63);
 367		break;
 368	}
 369}
 370
 371static inline unsigned long buffer_offset(void *buf)
 372{
 373	return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
 374}
 375
 376static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
 377{
 378	u8 *buf = urb->transfer_buffer;
 379	u8 *guard = buf - buffer_offset(buf);
 380	unsigned i;
 381
 382	for (i = 0; guard < buf; i++, guard++) {
 383		if (*guard != GUARD_BYTE) {
 384			ERROR(tdev, "guard byte[%d] %d (not %d)\n",
 385				i, *guard, GUARD_BYTE);
 386			return -EINVAL;
 387		}
 388	}
 389	return 0;
 390}
 391
 392static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
 393{
 394	unsigned	i;
 395	u8		expected;
 396	u8		*buf = urb->transfer_buffer;
 397	unsigned	len = urb->actual_length;
 398	unsigned	maxpacket = get_maxpacket(urb->dev, urb->pipe);
 399
 400	int ret = check_guard_bytes(tdev, urb);
 401	if (ret)
 402		return ret;
 403
 404	for (i = 0; i < len; i++, buf++) {
 405		switch (pattern) {
 406		/* all-zeroes has no synchronization issues */
 407		case 0:
 408			expected = 0;
 409			break;
 410		/* mod63 stays in sync with short-terminated transfers,
 411		 * or otherwise when host and gadget agree on how large
 412		 * each usb transfer request should be.  resync is done
 413		 * with set_interface or set_config.
 414		 */
 415		case 1:			/* mod63 */
 416			expected = (i % maxpacket) % 63;
 417			break;
 418		/* always fail unsupported patterns */
 419		default:
 420			expected = !*buf;
 421			break;
 422		}
 423		if (*buf == expected)
 424			continue;
 425		ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
 426		return -EINVAL;
 427	}
 428	return 0;
 429}
 430
 431static void simple_free_urb(struct urb *urb)
 432{
 433	unsigned long offset = buffer_offset(urb->transfer_buffer);
 434
 435	if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 436		usb_free_coherent(
 437			urb->dev,
 438			urb->transfer_buffer_length + offset,
 439			urb->transfer_buffer - offset,
 440			urb->transfer_dma - offset);
 441	else
 442		kfree(urb->transfer_buffer - offset);
 443	usb_free_urb(urb);
 444}
 445
 446static int simple_io(
 447	struct usbtest_dev	*tdev,
 448	struct urb		*urb,
 449	int			iterations,
 450	int			vary,
 451	int			expected,
 452	const char		*label
 453)
 454{
 455	struct usb_device	*udev = urb->dev;
 456	int			max = urb->transfer_buffer_length;
 457	struct completion	completion;
 458	int			retval = 0;
 459	unsigned long		expire;
 460
 461	urb->context = &completion;
 462	while (retval == 0 && iterations-- > 0) {
 463		init_completion(&completion);
 464		if (usb_pipeout(urb->pipe)) {
 465			simple_fill_buf(urb);
 466			urb->transfer_flags |= URB_ZERO_PACKET;
 467		}
 468		retval = usb_submit_urb(urb, GFP_KERNEL);
 469		if (retval != 0)
 470			break;
 471
 472		expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
 473		if (!wait_for_completion_timeout(&completion, expire)) {
 474			usb_kill_urb(urb);
 475			retval = (urb->status == -ENOENT ?
 476				  -ETIMEDOUT : urb->status);
 477		} else {
 478			retval = urb->status;
 479		}
 480
 481		urb->dev = udev;
 482		if (retval == 0 && usb_pipein(urb->pipe))
 483			retval = simple_check_buf(tdev, urb);
 484
 485		if (vary) {
 486			int	len = urb->transfer_buffer_length;
 487
 488			len += vary;
 489			len %= max;
 490			if (len == 0)
 491				len = (vary < max) ? vary : max;
 492			urb->transfer_buffer_length = len;
 493		}
 494
 495		/* FIXME if endpoint halted, clear halt (and log) */
 496	}
 497	urb->transfer_buffer_length = max;
 498
 499	if (expected != retval)
 500		dev_err(&udev->dev,
 501			"%s failed, iterations left %d, status %d (not %d)\n",
 502				label, iterations, retval, expected);
 503	return retval;
 504}
 505
 506
 507/*-------------------------------------------------------------------------*/
 508
 509/* We use scatterlist primitives to test queued I/O.
 510 * Yes, this also tests the scatterlist primitives.
 511 */
 512
 513static void free_sglist(struct scatterlist *sg, int nents)
 514{
 515	unsigned		i;
 516
 517	if (!sg)
 518		return;
 519	for (i = 0; i < nents; i++) {
 520		if (!sg_page(&sg[i]))
 521			continue;
 522		kfree(sg_virt(&sg[i]));
 523	}
 524	kfree(sg);
 525}
 526
 527static struct scatterlist *
 528alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
 529{
 530	struct scatterlist	*sg;
 531	unsigned int		n_size = 0;
 532	unsigned		i;
 533	unsigned		size = max;
 534	unsigned		maxpacket =
 535		get_maxpacket(interface_to_usbdev(dev->intf), pipe);
 536
 537	if (max == 0)
 538		return NULL;
 539
 540	sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
 541	if (!sg)
 542		return NULL;
 543	sg_init_table(sg, nents);
 544
 545	for (i = 0; i < nents; i++) {
 546		char		*buf;
 547		unsigned	j;
 548
 549		buf = kzalloc(size, GFP_KERNEL);
 550		if (!buf) {
 551			free_sglist(sg, i);
 552			return NULL;
 553		}
 554
 555		/* kmalloc pages are always physically contiguous! */
 556		sg_set_buf(&sg[i], buf, size);
 557
 558		switch (pattern) {
 559		case 0:
 560			/* already zeroed */
 561			break;
 562		case 1:
 563			for (j = 0; j < size; j++)
 564				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
 565			n_size += size;
 566			break;
 567		}
 568
 569		if (vary) {
 570			size += vary;
 571			size %= max;
 572			if (size == 0)
 573				size = (vary < max) ? vary : max;
 574		}
 575	}
 576
 577	return sg;
 578}
 579
 580struct sg_timeout {
 581	struct timer_list timer;
 582	struct usb_sg_request *req;
 583};
 584
 585static void sg_timeout(struct timer_list *t)
 586{
 587	struct sg_timeout *timeout = from_timer(timeout, t, timer);
 588
 589	usb_sg_cancel(timeout->req);
 590}
 591
 592static int perform_sglist(
 593	struct usbtest_dev	*tdev,
 594	unsigned		iterations,
 595	int			pipe,
 596	struct usb_sg_request	*req,
 597	struct scatterlist	*sg,
 598	int			nents
 599)
 600{
 601	struct usb_device	*udev = testdev_to_usbdev(tdev);
 602	int			retval = 0;
 603	struct sg_timeout	timeout = {
 604		.req = req,
 605	};
 606
 607	timer_setup_on_stack(&timeout.timer, sg_timeout, 0);
 608
 609	while (retval == 0 && iterations-- > 0) {
 610		retval = usb_sg_init(req, udev, pipe,
 611				(udev->speed == USB_SPEED_HIGH)
 612					? (INTERRUPT_RATE << 3)
 613					: INTERRUPT_RATE,
 614				sg, nents, 0, GFP_KERNEL);
 615
 616		if (retval)
 617			break;
 618		mod_timer(&timeout.timer, jiffies +
 619				msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
 620		usb_sg_wait(req);
 621		if (!del_timer_sync(&timeout.timer))
 622			retval = -ETIMEDOUT;
 623		else
 624			retval = req->status;
 625		destroy_timer_on_stack(&timeout.timer);
 626
 627		/* FIXME check resulting data pattern */
 628
 629		/* FIXME if endpoint halted, clear halt (and log) */
 630	}
 631
 632	/* FIXME for unlink or fault handling tests, don't report
 633	 * failure if retval is as we expected ...
 634	 */
 635	if (retval)
 636		ERROR(tdev, "perform_sglist failed, "
 637				"iterations left %d, status %d\n",
 638				iterations, retval);
 639	return retval;
 640}
 641
 642
 643/*-------------------------------------------------------------------------*/
 644
 645/* unqueued control message testing
 646 *
 647 * there's a nice set of device functional requirements in chapter 9 of the
 648 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
 649 * special test firmware.
 650 *
 651 * we know the device is configured (or suspended) by the time it's visible
 652 * through usbfs.  we can't change that, so we won't test enumeration (which
 653 * worked 'well enough' to get here, this time), power management (ditto),
 654 * or remote wakeup (which needs human interaction).
 655 */
 656
 657static unsigned realworld = 1;
 658module_param(realworld, uint, 0);
 659MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
 660
 661static int get_altsetting(struct usbtest_dev *dev)
 662{
 663	struct usb_interface	*iface = dev->intf;
 664	struct usb_device	*udev = interface_to_usbdev(iface);
 665	int			retval;
 666
 667	retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
 668			USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
 669			0, iface->altsetting[0].desc.bInterfaceNumber,
 670			dev->buf, 1, USB_CTRL_GET_TIMEOUT);
 671	switch (retval) {
 672	case 1:
 673		return dev->buf[0];
 674	case 0:
 675		retval = -ERANGE;
 676		/* FALLTHROUGH */
 677	default:
 678		return retval;
 679	}
 680}
 681
 682static int set_altsetting(struct usbtest_dev *dev, int alternate)
 683{
 684	struct usb_interface		*iface = dev->intf;
 685	struct usb_device		*udev;
 686
 687	if (alternate < 0 || alternate >= 256)
 688		return -EINVAL;
 689
 690	udev = interface_to_usbdev(iface);
 691	return usb_set_interface(udev,
 692			iface->altsetting[0].desc.bInterfaceNumber,
 693			alternate);
 694}
 695
 696static int is_good_config(struct usbtest_dev *tdev, int len)
 697{
 698	struct usb_config_descriptor	*config;
 699
 700	if (len < sizeof(*config))
 701		return 0;
 702	config = (struct usb_config_descriptor *) tdev->buf;
 703
 704	switch (config->bDescriptorType) {
 705	case USB_DT_CONFIG:
 706	case USB_DT_OTHER_SPEED_CONFIG:
 707		if (config->bLength != 9) {
 708			ERROR(tdev, "bogus config descriptor length\n");
 709			return 0;
 710		}
 711		/* this bit 'must be 1' but often isn't */
 712		if (!realworld && !(config->bmAttributes & 0x80)) {
 713			ERROR(tdev, "high bit of config attributes not set\n");
 714			return 0;
 715		}
 716		if (config->bmAttributes & 0x1f) {	/* reserved == 0 */
 717			ERROR(tdev, "reserved config bits set\n");
 718			return 0;
 719		}
 720		break;
 721	default:
 722		return 0;
 723	}
 724
 725	if (le16_to_cpu(config->wTotalLength) == len)	/* read it all */
 726		return 1;
 727	if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE)	/* max partial read */
 728		return 1;
 729	ERROR(tdev, "bogus config descriptor read size\n");
 730	return 0;
 731}
 732
 733static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
 734{
 735	struct usb_ext_cap_descriptor *ext;
 736	u32 attr;
 737
 738	ext = (struct usb_ext_cap_descriptor *) buf;
 739
 740	if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
 741		ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
 742		return 0;
 743	}
 744
 745	attr = le32_to_cpu(ext->bmAttributes);
 746	/* bits[1:15] is used and others are reserved */
 747	if (attr & ~0xfffe) {	/* reserved == 0 */
 748		ERROR(tdev, "reserved bits set\n");
 749		return 0;
 750	}
 751
 752	return 1;
 753}
 754
 755static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
 756{
 757	struct usb_ss_cap_descriptor *ss;
 758
 759	ss = (struct usb_ss_cap_descriptor *) buf;
 760
 761	if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
 762		ERROR(tdev, "bogus superspeed device capability descriptor length\n");
 763		return 0;
 764	}
 765
 766	/*
 767	 * only bit[1] of bmAttributes is used for LTM and others are
 768	 * reserved
 769	 */
 770	if (ss->bmAttributes & ~0x02) {	/* reserved == 0 */
 771		ERROR(tdev, "reserved bits set in bmAttributes\n");
 772		return 0;
 773	}
 774
 775	/* bits[0:3] of wSpeedSupported is used and others are reserved */
 776	if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) {	/* reserved == 0 */
 777		ERROR(tdev, "reserved bits set in wSpeedSupported\n");
 778		return 0;
 779	}
 780
 781	return 1;
 782}
 783
 784static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
 785{
 786	struct usb_ss_container_id_descriptor *con_id;
 787
 788	con_id = (struct usb_ss_container_id_descriptor *) buf;
 789
 790	if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
 791		ERROR(tdev, "bogus container id descriptor length\n");
 792		return 0;
 793	}
 794
 795	if (con_id->bReserved) {	/* reserved == 0 */
 796		ERROR(tdev, "reserved bits set\n");
 797		return 0;
 798	}
 799
 800	return 1;
 801}
 802
 803/* sanity test for standard requests working with usb_control_mesg() and some
 804 * of the utility functions which use it.
 805 *
 806 * this doesn't test how endpoint halts behave or data toggles get set, since
 807 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
 808 * halt or toggle).  toggle testing is impractical without support from hcds.
 809 *
 810 * this avoids failing devices linux would normally work with, by not testing
 811 * config/altsetting operations for devices that only support their defaults.
 812 * such devices rarely support those needless operations.
 813 *
 814 * NOTE that since this is a sanity test, it's not examining boundary cases
 815 * to see if usbcore, hcd, and device all behave right.  such testing would
 816 * involve varied read sizes and other operation sequences.
 817 */
 818static int ch9_postconfig(struct usbtest_dev *dev)
 819{
 820	struct usb_interface	*iface = dev->intf;
 821	struct usb_device	*udev = interface_to_usbdev(iface);
 822	int			i, alt, retval;
 823
 824	/* [9.2.3] if there's more than one altsetting, we need to be able to
 825	 * set and get each one.  mostly trusts the descriptors from usbcore.
 826	 */
 827	for (i = 0; i < iface->num_altsetting; i++) {
 828
 829		/* 9.2.3 constrains the range here */
 830		alt = iface->altsetting[i].desc.bAlternateSetting;
 831		if (alt < 0 || alt >= iface->num_altsetting) {
 832			dev_err(&iface->dev,
 833					"invalid alt [%d].bAltSetting = %d\n",
 834					i, alt);
 835		}
 836
 837		/* [real world] get/set unimplemented if there's only one */
 838		if (realworld && iface->num_altsetting == 1)
 839			continue;
 840
 841		/* [9.4.10] set_interface */
 842		retval = set_altsetting(dev, alt);
 843		if (retval) {
 844			dev_err(&iface->dev, "can't set_interface = %d, %d\n",
 845					alt, retval);
 846			return retval;
 847		}
 848
 849		/* [9.4.4] get_interface always works */
 850		retval = get_altsetting(dev);
 851		if (retval != alt) {
 852			dev_err(&iface->dev, "get alt should be %d, was %d\n",
 853					alt, retval);
 854			return (retval < 0) ? retval : -EDOM;
 855		}
 856
 857	}
 858
 859	/* [real world] get_config unimplemented if there's only one */
 860	if (!realworld || udev->descriptor.bNumConfigurations != 1) {
 861		int	expected = udev->actconfig->desc.bConfigurationValue;
 862
 863		/* [9.4.2] get_configuration always works
 864		 * ... although some cheap devices (like one TI Hub I've got)
 865		 * won't return config descriptors except before set_config.
 866		 */
 867		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
 868				USB_REQ_GET_CONFIGURATION,
 869				USB_DIR_IN | USB_RECIP_DEVICE,
 870				0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
 871		if (retval != 1 || dev->buf[0] != expected) {
 872			dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
 873				retval, dev->buf[0], expected);
 874			return (retval < 0) ? retval : -EDOM;
 875		}
 876	}
 877
 878	/* there's always [9.4.3] a device descriptor [9.6.1] */
 879	retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
 880			dev->buf, sizeof(udev->descriptor));
 881	if (retval != sizeof(udev->descriptor)) {
 882		dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
 883		return (retval < 0) ? retval : -EDOM;
 884	}
 885
 886	/*
 887	 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
 888	 * 3.0 spec
 889	 */
 890	if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
 891		struct usb_bos_descriptor *bos = NULL;
 892		struct usb_dev_cap_header *header = NULL;
 893		unsigned total, num, length;
 894		u8 *buf;
 895
 896		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
 897				sizeof(*udev->bos->desc));
 898		if (retval != sizeof(*udev->bos->desc)) {
 899			dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
 900			return (retval < 0) ? retval : -EDOM;
 901		}
 902
 903		bos = (struct usb_bos_descriptor *)dev->buf;
 904		total = le16_to_cpu(bos->wTotalLength);
 905		num = bos->bNumDeviceCaps;
 906
 907		if (total > TBUF_SIZE)
 908			total = TBUF_SIZE;
 909
 910		/*
 911		 * get generic device-level capability descriptors [9.6.2]
 912		 * in USB 3.0 spec
 913		 */
 914		retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
 915				total);
 916		if (retval != total) {
 917			dev_err(&iface->dev, "bos descriptor set --> %d\n",
 918					retval);
 919			return (retval < 0) ? retval : -EDOM;
 920		}
 921
 922		length = sizeof(*udev->bos->desc);
 923		buf = dev->buf;
 924		for (i = 0; i < num; i++) {
 925			buf += length;
 926			if (buf + sizeof(struct usb_dev_cap_header) >
 927					dev->buf + total)
 928				break;
 929
 930			header = (struct usb_dev_cap_header *)buf;
 931			length = header->bLength;
 932
 933			if (header->bDescriptorType !=
 934					USB_DT_DEVICE_CAPABILITY) {
 935				dev_warn(&udev->dev, "not device capability descriptor, skip\n");
 936				continue;
 937			}
 938
 939			switch (header->bDevCapabilityType) {
 940			case USB_CAP_TYPE_EXT:
 941				if (buf + USB_DT_USB_EXT_CAP_SIZE >
 942						dev->buf + total ||
 943						!is_good_ext(dev, buf)) {
 944					dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
 945					return -EDOM;
 946				}
 947				break;
 948			case USB_SS_CAP_TYPE:
 949				if (buf + USB_DT_USB_SS_CAP_SIZE >
 950						dev->buf + total ||
 951						!is_good_ss_cap(dev, buf)) {
 952					dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
 953					return -EDOM;
 954				}
 955				break;
 956			case CONTAINER_ID_TYPE:
 957				if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
 958						dev->buf + total ||
 959						!is_good_con_id(dev, buf)) {
 960					dev_err(&iface->dev, "bogus container id descriptor\n");
 961					return -EDOM;
 962				}
 963				break;
 964			default:
 965				break;
 966			}
 967		}
 968	}
 969
 970	/* there's always [9.4.3] at least one config descriptor [9.6.3] */
 971	for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
 972		retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
 973				dev->buf, TBUF_SIZE);
 974		if (!is_good_config(dev, retval)) {
 975			dev_err(&iface->dev,
 976					"config [%d] descriptor --> %d\n",
 977					i, retval);
 978			return (retval < 0) ? retval : -EDOM;
 979		}
 980
 981		/* FIXME cross-checking udev->config[i] to make sure usbcore
 982		 * parsed it right (etc) would be good testing paranoia
 983		 */
 984	}
 985
 986	/* and sometimes [9.2.6.6] speed dependent descriptors */
 987	if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
 988		struct usb_qualifier_descriptor *d = NULL;
 989
 990		/* device qualifier [9.6.2] */
 991		retval = usb_get_descriptor(udev,
 992				USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
 993				sizeof(struct usb_qualifier_descriptor));
 994		if (retval == -EPIPE) {
 995			if (udev->speed == USB_SPEED_HIGH) {
 996				dev_err(&iface->dev,
 997						"hs dev qualifier --> %d\n",
 998						retval);
 999				return retval;
1000			}
1001			/* usb2.0 but not high-speed capable; fine */
1002		} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
1003			dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
1004			return (retval < 0) ? retval : -EDOM;
1005		} else
1006			d = (struct usb_qualifier_descriptor *) dev->buf;
1007
1008		/* might not have [9.6.2] any other-speed configs [9.6.4] */
1009		if (d) {
1010			unsigned max = d->bNumConfigurations;
1011			for (i = 0; i < max; i++) {
1012				retval = usb_get_descriptor(udev,
1013					USB_DT_OTHER_SPEED_CONFIG, i,
1014					dev->buf, TBUF_SIZE);
1015				if (!is_good_config(dev, retval)) {
1016					dev_err(&iface->dev,
1017						"other speed config --> %d\n",
1018						retval);
1019					return (retval < 0) ? retval : -EDOM;
1020				}
1021			}
1022		}
1023	}
1024	/* FIXME fetch strings from at least the device descriptor */
1025
1026	/* [9.4.5] get_status always works */
1027	retval = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
1028	if (retval) {
1029		dev_err(&iface->dev, "get dev status --> %d\n", retval);
1030		return retval;
1031	}
1032
1033	/* FIXME configuration.bmAttributes says if we could try to set/clear
1034	 * the device's remote wakeup feature ... if we can, test that here
1035	 */
1036
1037	retval = usb_get_std_status(udev, USB_RECIP_INTERFACE,
1038			iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
1039	if (retval) {
1040		dev_err(&iface->dev, "get interface status --> %d\n", retval);
1041		return retval;
1042	}
1043	/* FIXME get status for each endpoint in the interface */
1044
1045	return 0;
1046}
1047
1048/*-------------------------------------------------------------------------*/
1049
1050/* use ch9 requests to test whether:
1051 *   (a) queues work for control, keeping N subtests queued and
1052 *       active (auto-resubmit) for M loops through the queue.
1053 *   (b) protocol stalls (control-only) will autorecover.
1054 *       it's not like bulk/intr; no halt clearing.
1055 *   (c) short control reads are reported and handled.
1056 *   (d) queues are always processed in-order
1057 */
1058
1059struct ctrl_ctx {
1060	spinlock_t		lock;
1061	struct usbtest_dev	*dev;
1062	struct completion	complete;
1063	unsigned		count;
1064	unsigned		pending;
1065	int			status;
1066	struct urb		**urb;
1067	struct usbtest_param_32	*param;
1068	int			last;
1069};
1070
1071#define NUM_SUBCASES	16		/* how many test subcases here? */
1072
1073struct subcase {
1074	struct usb_ctrlrequest	setup;
1075	int			number;
1076	int			expected;
1077};
1078
1079static void ctrl_complete(struct urb *urb)
1080{
1081	struct ctrl_ctx		*ctx = urb->context;
1082	struct usb_ctrlrequest	*reqp;
1083	struct subcase		*subcase;
1084	int			status = urb->status;
1085
1086	reqp = (struct usb_ctrlrequest *)urb->setup_packet;
1087	subcase = container_of(reqp, struct subcase, setup);
1088
1089	spin_lock(&ctx->lock);
1090	ctx->count--;
1091	ctx->pending--;
1092
1093	/* queue must transfer and complete in fifo order, unless
1094	 * usb_unlink_urb() is used to unlink something not at the
1095	 * physical queue head (not tested).
1096	 */
1097	if (subcase->number > 0) {
1098		if ((subcase->number - ctx->last) != 1) {
1099			ERROR(ctx->dev,
1100				"subcase %d completed out of order, last %d\n",
1101				subcase->number, ctx->last);
1102			status = -EDOM;
1103			ctx->last = subcase->number;
1104			goto error;
1105		}
1106	}
1107	ctx->last = subcase->number;
1108
1109	/* succeed or fault in only one way? */
1110	if (status == subcase->expected)
1111		status = 0;
1112
1113	/* async unlink for cleanup? */
1114	else if (status != -ECONNRESET) {
1115
1116		/* some faults are allowed, not required */
1117		if (subcase->expected > 0 && (
1118			  ((status == -subcase->expected	/* happened */
1119			   || status == 0))))			/* didn't */
1120			status = 0;
1121		/* sometimes more than one fault is allowed */
1122		else if (subcase->number == 12 && status == -EPIPE)
1123			status = 0;
1124		else
1125			ERROR(ctx->dev, "subtest %d error, status %d\n",
1126					subcase->number, status);
1127	}
1128
1129	/* unexpected status codes mean errors; ideally, in hardware */
1130	if (status) {
1131error:
1132		if (ctx->status == 0) {
1133			int		i;
1134
1135			ctx->status = status;
1136			ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
1137					"%d left, subcase %d, len %d/%d\n",
1138					reqp->bRequestType, reqp->bRequest,
1139					status, ctx->count, subcase->number,
1140					urb->actual_length,
1141					urb->transfer_buffer_length);
1142
1143			/* FIXME this "unlink everything" exit route should
1144			 * be a separate test case.
1145			 */
1146
1147			/* unlink whatever's still pending */
1148			for (i = 1; i < ctx->param->sglen; i++) {
1149				struct urb *u = ctx->urb[
1150							(i + subcase->number)
1151							% ctx->param->sglen];
1152
1153				if (u == urb || !u->dev)
1154					continue;
1155				spin_unlock(&ctx->lock);
1156				status = usb_unlink_urb(u);
1157				spin_lock(&ctx->lock);
1158				switch (status) {
1159				case -EINPROGRESS:
1160				case -EBUSY:
1161				case -EIDRM:
1162					continue;
1163				default:
1164					ERROR(ctx->dev, "urb unlink --> %d\n",
1165							status);
1166				}
1167			}
1168			status = ctx->status;
1169		}
1170	}
1171
1172	/* resubmit if we need to, else mark this as done */
1173	if ((status == 0) && (ctx->pending < ctx->count)) {
1174		status = usb_submit_urb(urb, GFP_ATOMIC);
1175		if (status != 0) {
1176			ERROR(ctx->dev,
1177				"can't resubmit ctrl %02x.%02x, err %d\n",
1178				reqp->bRequestType, reqp->bRequest, status);
1179			urb->dev = NULL;
1180		} else
1181			ctx->pending++;
1182	} else
1183		urb->dev = NULL;
1184
1185	/* signal completion when nothing's queued */
1186	if (ctx->pending == 0)
1187		complete(&ctx->complete);
1188	spin_unlock(&ctx->lock);
1189}
1190
1191static int
1192test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param)
1193{
1194	struct usb_device	*udev = testdev_to_usbdev(dev);
1195	struct urb		**urb;
1196	struct ctrl_ctx		context;
1197	int			i;
1198
1199	if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1200		return -EOPNOTSUPP;
1201
1202	spin_lock_init(&context.lock);
1203	context.dev = dev;
1204	init_completion(&context.complete);
1205	context.count = param->sglen * param->iterations;
1206	context.pending = 0;
1207	context.status = -ENOMEM;
1208	context.param = param;
1209	context.last = -1;
1210
1211	/* allocate and init the urbs we'll queue.
1212	 * as with bulk/intr sglists, sglen is the queue depth; it also
1213	 * controls which subtests run (more tests than sglen) or rerun.
1214	 */
1215	urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1216	if (!urb)
1217		return -ENOMEM;
1218	for (i = 0; i < param->sglen; i++) {
1219		int			pipe = usb_rcvctrlpipe(udev, 0);
1220		unsigned		len;
1221		struct urb		*u;
1222		struct usb_ctrlrequest	req;
1223		struct subcase		*reqp;
1224
1225		/* sign of this variable means:
1226		 *  -: tested code must return this (negative) error code
1227		 *  +: tested code may return this (negative too) error code
1228		 */
1229		int			expected = 0;
1230
1231		/* requests here are mostly expected to succeed on any
1232		 * device, but some are chosen to trigger protocol stalls
1233		 * or short reads.
1234		 */
1235		memset(&req, 0, sizeof(req));
1236		req.bRequest = USB_REQ_GET_DESCRIPTOR;
1237		req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1238
1239		switch (i % NUM_SUBCASES) {
1240		case 0:		/* get device descriptor */
1241			req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1242			len = sizeof(struct usb_device_descriptor);
1243			break;
1244		case 1:		/* get first config descriptor (only) */
1245			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1246			len = sizeof(struct usb_config_descriptor);
1247			break;
1248		case 2:		/* get altsetting (OFTEN STALLS) */
1249			req.bRequest = USB_REQ_GET_INTERFACE;
1250			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1251			/* index = 0 means first interface */
1252			len = 1;
1253			expected = EPIPE;
1254			break;
1255		case 3:		/* get interface status */
1256			req.bRequest = USB_REQ_GET_STATUS;
1257			req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1258			/* interface 0 */
1259			len = 2;
1260			break;
1261		case 4:		/* get device status */
1262			req.bRequest = USB_REQ_GET_STATUS;
1263			req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1264			len = 2;
1265			break;
1266		case 5:		/* get device qualifier (MAY STALL) */
1267			req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1268			len = sizeof(struct usb_qualifier_descriptor);
1269			if (udev->speed != USB_SPEED_HIGH)
1270				expected = EPIPE;
1271			break;
1272		case 6:		/* get first config descriptor, plus interface */
1273			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1274			len = sizeof(struct usb_config_descriptor);
1275			len += sizeof(struct usb_interface_descriptor);
1276			break;
1277		case 7:		/* get interface descriptor (ALWAYS STALLS) */
1278			req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1279			/* interface == 0 */
1280			len = sizeof(struct usb_interface_descriptor);
1281			expected = -EPIPE;
1282			break;
1283		/* NOTE: two consecutive stalls in the queue here.
1284		 *  that tests fault recovery a bit more aggressively. */
1285		case 8:		/* clear endpoint halt (MAY STALL) */
1286			req.bRequest = USB_REQ_CLEAR_FEATURE;
1287			req.bRequestType = USB_RECIP_ENDPOINT;
1288			/* wValue 0 == ep halt */
1289			/* wIndex 0 == ep0 (shouldn't halt!) */
1290			len = 0;
1291			pipe = usb_sndctrlpipe(udev, 0);
1292			expected = EPIPE;
1293			break;
1294		case 9:		/* get endpoint status */
1295			req.bRequest = USB_REQ_GET_STATUS;
1296			req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1297			/* endpoint 0 */
1298			len = 2;
1299			break;
1300		case 10:	/* trigger short read (EREMOTEIO) */
1301			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1302			len = 1024;
1303			expected = -EREMOTEIO;
1304			break;
1305		/* NOTE: two consecutive _different_ faults in the queue. */
1306		case 11:	/* get endpoint descriptor (ALWAYS STALLS) */
1307			req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1308			/* endpoint == 0 */
1309			len = sizeof(struct usb_interface_descriptor);
1310			expected = EPIPE;
1311			break;
1312		/* NOTE: sometimes even a third fault in the queue! */
1313		case 12:	/* get string 0 descriptor (MAY STALL) */
1314			req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1315			/* string == 0, for language IDs */
1316			len = sizeof(struct usb_interface_descriptor);
1317			/* may succeed when > 4 languages */
1318			expected = EREMOTEIO;	/* or EPIPE, if no strings */
1319			break;
1320		case 13:	/* short read, resembling case 10 */
1321			req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1322			/* last data packet "should" be DATA1, not DATA0 */
1323			if (udev->speed == USB_SPEED_SUPER)
1324				len = 1024 - 512;
1325			else
1326				len = 1024 - udev->descriptor.bMaxPacketSize0;
1327			expected = -EREMOTEIO;
1328			break;
1329		case 14:	/* short read; try to fill the last packet */
1330			req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1331			/* device descriptor size == 18 bytes */
1332			len = udev->descriptor.bMaxPacketSize0;
1333			if (udev->speed == USB_SPEED_SUPER)
1334				len = 512;
1335			switch (len) {
1336			case 8:
1337				len = 24;
1338				break;
1339			case 16:
1340				len = 32;
1341				break;
1342			}
1343			expected = -EREMOTEIO;
1344			break;
1345		case 15:
1346			req.wValue = cpu_to_le16(USB_DT_BOS << 8);
1347			if (udev->bos)
1348				len = le16_to_cpu(udev->bos->desc->wTotalLength);
1349			else
1350				len = sizeof(struct usb_bos_descriptor);
1351			if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
1352				expected = -EPIPE;
1353			break;
1354		default:
1355			ERROR(dev, "bogus number of ctrl queue testcases!\n");
1356			context.status = -EINVAL;
1357			goto cleanup;
1358		}
1359		req.wLength = cpu_to_le16(len);
1360		urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
1361		if (!u)
1362			goto cleanup;
1363
1364		reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1365		if (!reqp)
1366			goto cleanup;
1367		reqp->setup = req;
1368		reqp->number = i % NUM_SUBCASES;
1369		reqp->expected = expected;
1370		u->setup_packet = (char *) &reqp->setup;
1371
1372		u->context = &context;
1373		u->complete = ctrl_complete;
1374	}
1375
1376	/* queue the urbs */
1377	context.urb = urb;
1378	spin_lock_irq(&context.lock);
1379	for (i = 0; i < param->sglen; i++) {
1380		context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1381		if (context.status != 0) {
1382			ERROR(dev, "can't submit urb[%d], status %d\n",
1383					i, context.status);
1384			context.count = context.pending;
1385			break;
1386		}
1387		context.pending++;
1388	}
1389	spin_unlock_irq(&context.lock);
1390
1391	/* FIXME  set timer and time out; provide a disconnect hook */
1392
1393	/* wait for the last one to complete */
1394	if (context.pending > 0)
1395		wait_for_completion(&context.complete);
1396
1397cleanup:
1398	for (i = 0; i < param->sglen; i++) {
1399		if (!urb[i])
1400			continue;
1401		urb[i]->dev = udev;
1402		kfree(urb[i]->setup_packet);
1403		simple_free_urb(urb[i]);
1404	}
1405	kfree(urb);
1406	return context.status;
1407}
1408#undef NUM_SUBCASES
1409
1410
1411/*-------------------------------------------------------------------------*/
1412
1413static void unlink1_callback(struct urb *urb)
1414{
1415	int	status = urb->status;
1416
1417	/* we "know" -EPIPE (stall) never happens */
1418	if (!status)
1419		status = usb_submit_urb(urb, GFP_ATOMIC);
1420	if (status) {
1421		urb->status = status;
1422		complete(urb->context);
1423	}
1424}
1425
1426static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1427{
1428	struct urb		*urb;
1429	struct completion	completion;
1430	int			retval = 0;
1431
1432	init_completion(&completion);
1433	urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
1434	if (!urb)
1435		return -ENOMEM;
1436	urb->context = &completion;
1437	urb->complete = unlink1_callback;
1438
1439	if (usb_pipeout(urb->pipe)) {
1440		simple_fill_buf(urb);
1441		urb->transfer_flags |= URB_ZERO_PACKET;
1442	}
1443
1444	/* keep the endpoint busy.  there are lots of hc/hcd-internal
1445	 * states, and testing should get to all of them over time.
1446	 *
1447	 * FIXME want additional tests for when endpoint is STALLing
1448	 * due to errors, or is just NAKing requests.
1449	 */
1450	retval = usb_submit_urb(urb, GFP_KERNEL);
1451	if (retval != 0) {
1452		dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1453		return retval;
1454	}
1455
1456	/* unlinking that should always work.  variable delay tests more
1457	 * hcd states and code paths, even with little other system load.
1458	 */
1459	msleep(jiffies % (2 * INTERRUPT_RATE));
1460	if (async) {
1461		while (!completion_done(&completion)) {
1462			retval = usb_unlink_urb(urb);
1463
1464			if (retval == 0 && usb_pipein(urb->pipe))
1465				retval = simple_check_buf(dev, urb);
1466
1467			switch (retval) {
1468			case -EBUSY:
1469			case -EIDRM:
1470				/* we can't unlink urbs while they're completing
1471				 * or if they've completed, and we haven't
1472				 * resubmitted. "normal" drivers would prevent
1473				 * resubmission, but since we're testing unlink
1474				 * paths, we can't.
1475				 */
1476				ERROR(dev, "unlink retry\n");
1477				continue;
1478			case 0:
1479			case -EINPROGRESS:
1480				break;
1481
1482			default:
1483				dev_err(&dev->intf->dev,
1484					"unlink fail %d\n", retval);
1485				return retval;
1486			}
1487
1488			break;
1489		}
1490	} else
1491		usb_kill_urb(urb);
1492
1493	wait_for_completion(&completion);
1494	retval = urb->status;
1495	simple_free_urb(urb);
1496
1497	if (async)
1498		return (retval == -ECONNRESET) ? 0 : retval - 1000;
1499	else
1500		return (retval == -ENOENT || retval == -EPERM) ?
1501				0 : retval - 2000;
1502}
1503
1504static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1505{
1506	int			retval = 0;
1507
1508	/* test sync and async paths */
1509	retval = unlink1(dev, pipe, len, 1);
1510	if (!retval)
1511		retval = unlink1(dev, pipe, len, 0);
1512	return retval;
1513}
1514
1515/*-------------------------------------------------------------------------*/
1516
1517struct queued_ctx {
1518	struct completion	complete;
1519	atomic_t		pending;
1520	unsigned		num;
1521	int			status;
1522	struct urb		**urbs;
1523};
1524
1525static void unlink_queued_callback(struct urb *urb)
1526{
1527	int			status = urb->status;
1528	struct queued_ctx	*ctx = urb->context;
1529
1530	if (ctx->status)
1531		goto done;
1532	if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1533		if (status == -ECONNRESET)
1534			goto done;
1535		/* What error should we report if the URB completed normally? */
1536	}
1537	if (status != 0)
1538		ctx->status = status;
1539
1540 done:
1541	if (atomic_dec_and_test(&ctx->pending))
1542		complete(&ctx->complete);
1543}
1544
1545static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1546		unsigned size)
1547{
1548	struct queued_ctx	ctx;
1549	struct usb_device	*udev = testdev_to_usbdev(dev);
1550	void			*buf;
1551	dma_addr_t		buf_dma;
1552	int			i;
1553	int			retval = -ENOMEM;
1554
1555	init_completion(&ctx.complete);
1556	atomic_set(&ctx.pending, 1);	/* One more than the actual value */
1557	ctx.num = num;
1558	ctx.status = 0;
1559
1560	buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1561	if (!buf)
1562		return retval;
1563	memset(buf, 0, size);
1564
1565	/* Allocate and init the urbs we'll queue */
1566	ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1567	if (!ctx.urbs)
1568		goto free_buf;
1569	for (i = 0; i < num; i++) {
1570		ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1571		if (!ctx.urbs[i])
1572			goto free_urbs;
1573		usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1574				unlink_queued_callback, &ctx);
1575		ctx.urbs[i]->transfer_dma = buf_dma;
1576		ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1577
1578		if (usb_pipeout(ctx.urbs[i]->pipe)) {
1579			simple_fill_buf(ctx.urbs[i]);
1580			ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1581		}
1582	}
1583
1584	/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1585	for (i = 0; i < num; i++) {
1586		atomic_inc(&ctx.pending);
1587		retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1588		if (retval != 0) {
1589			dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1590					i, retval);
1591			atomic_dec(&ctx.pending);
1592			ctx.status = retval;
1593			break;
1594		}
1595	}
1596	if (i == num) {
1597		usb_unlink_urb(ctx.urbs[num - 4]);
1598		usb_unlink_urb(ctx.urbs[num - 2]);
1599	} else {
1600		while (--i >= 0)
1601			usb_unlink_urb(ctx.urbs[i]);
1602	}
1603
1604	if (atomic_dec_and_test(&ctx.pending))		/* The extra count */
1605		complete(&ctx.complete);
1606	wait_for_completion(&ctx.complete);
1607	retval = ctx.status;
1608
1609 free_urbs:
1610	for (i = 0; i < num; i++)
1611		usb_free_urb(ctx.urbs[i]);
1612	kfree(ctx.urbs);
1613 free_buf:
1614	usb_free_coherent(udev, size, buf, buf_dma);
1615	return retval;
1616}
1617
1618/*-------------------------------------------------------------------------*/
1619
1620static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1621{
1622	int	retval;
1623	u16	status;
1624
1625	/* shouldn't look or act halted */
1626	retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1627	if (retval < 0) {
1628		ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1629				ep, retval);
1630		return retval;
1631	}
1632	if (status != 0) {
1633		ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1634		return -EINVAL;
1635	}
1636	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1637	if (retval != 0)
1638		return -EINVAL;
1639	return 0;
1640}
1641
1642static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1643{
1644	int	retval;
1645	u16	status;
1646
1647	/* should look and act halted */
1648	retval = usb_get_std_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1649	if (retval < 0) {
1650		ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1651				ep, retval);
1652		return retval;
1653	}
 
1654	if (status != 1) {
1655		ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1656		return -EINVAL;
1657	}
1658	retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1659	if (retval != -EPIPE)
1660		return -EINVAL;
1661	retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1662	if (retval != -EPIPE)
1663		return -EINVAL;
1664	return 0;
1665}
1666
1667static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1668{
1669	int	retval;
1670
1671	/* shouldn't look or act halted now */
1672	retval = verify_not_halted(tdev, ep, urb);
1673	if (retval < 0)
1674		return retval;
1675
1676	/* set halt (protocol test only), verify it worked */
1677	retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1678			USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1679			USB_ENDPOINT_HALT, ep,
1680			NULL, 0, USB_CTRL_SET_TIMEOUT);
1681	if (retval < 0) {
1682		ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1683		return retval;
1684	}
1685	retval = verify_halted(tdev, ep, urb);
1686	if (retval < 0) {
1687		int ret;
1688
1689		/* clear halt anyways, else further tests will fail */
1690		ret = usb_clear_halt(urb->dev, urb->pipe);
1691		if (ret)
1692			ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
1693			      ep, ret);
1694
1695		return retval;
1696	}
1697
1698	/* clear halt (tests API + protocol), verify it worked */
1699	retval = usb_clear_halt(urb->dev, urb->pipe);
1700	if (retval < 0) {
1701		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1702		return retval;
1703	}
1704	retval = verify_not_halted(tdev, ep, urb);
1705	if (retval < 0)
1706		return retval;
1707
1708	/* NOTE:  could also verify SET_INTERFACE clear halts ... */
1709
1710	return 0;
1711}
1712
1713static int test_toggle_sync(struct usbtest_dev *tdev, int ep, struct urb *urb)
1714{
1715	int	retval;
1716
1717	/* clear initial data toggle to DATA0 */
1718	retval = usb_clear_halt(urb->dev, urb->pipe);
1719	if (retval < 0) {
1720		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1721		return retval;
1722	}
1723
1724	/* transfer 3 data packets, should be DATA0, DATA1, DATA0 */
1725	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1726	if (retval != 0)
1727		return -EINVAL;
1728
1729	/* clear halt resets device side data toggle, host should react to it */
1730	retval = usb_clear_halt(urb->dev, urb->pipe);
1731	if (retval < 0) {
1732		ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1733		return retval;
1734	}
1735
1736	/* host should use DATA0 again after clear halt */
1737	retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1738
1739	return retval;
1740}
1741
1742static int halt_simple(struct usbtest_dev *dev)
1743{
1744	int			ep;
1745	int			retval = 0;
1746	struct urb		*urb;
1747	struct usb_device	*udev = testdev_to_usbdev(dev);
1748
1749	if (udev->speed == USB_SPEED_SUPER)
1750		urb = simple_alloc_urb(udev, 0, 1024, 0);
1751	else
1752		urb = simple_alloc_urb(udev, 0, 512, 0);
1753	if (urb == NULL)
1754		return -ENOMEM;
1755
1756	if (dev->in_pipe) {
1757		ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1758		urb->pipe = dev->in_pipe;
1759		retval = test_halt(dev, ep, urb);
1760		if (retval < 0)
1761			goto done;
1762	}
1763
1764	if (dev->out_pipe) {
1765		ep = usb_pipeendpoint(dev->out_pipe);
1766		urb->pipe = dev->out_pipe;
1767		retval = test_halt(dev, ep, urb);
1768	}
1769done:
1770	simple_free_urb(urb);
1771	return retval;
1772}
1773
1774static int toggle_sync_simple(struct usbtest_dev *dev)
1775{
1776	int			ep;
1777	int			retval = 0;
1778	struct urb		*urb;
1779	struct usb_device	*udev = testdev_to_usbdev(dev);
1780	unsigned		maxp = get_maxpacket(udev, dev->out_pipe);
1781
1782	/*
1783	 * Create a URB that causes a transfer of uneven amount of data packets
1784	 * This way the clear toggle has an impact on the data toggle sequence.
1785	 * Use 2 maxpacket length packets and one zero packet.
1786	 */
1787	urb = simple_alloc_urb(udev, 0,  2 * maxp, 0);
1788	if (urb == NULL)
1789		return -ENOMEM;
1790
1791	urb->transfer_flags |= URB_ZERO_PACKET;
1792
1793	ep = usb_pipeendpoint(dev->out_pipe);
1794	urb->pipe = dev->out_pipe;
1795	retval = test_toggle_sync(dev, ep, urb);
1796
1797	simple_free_urb(urb);
1798	return retval;
1799}
1800
1801/*-------------------------------------------------------------------------*/
1802
1803/* Control OUT tests use the vendor control requests from Intel's
1804 * USB 2.0 compliance test device:  write a buffer, read it back.
1805 *
1806 * Intel's spec only _requires_ that it work for one packet, which
1807 * is pretty weak.   Some HCDs place limits here; most devices will
1808 * need to be able to handle more than one OUT data packet.  We'll
1809 * try whatever we're told to try.
1810 */
1811static int ctrl_out(struct usbtest_dev *dev,
1812		unsigned count, unsigned length, unsigned vary, unsigned offset)
1813{
1814	unsigned		i, j, len;
1815	int			retval;
1816	u8			*buf;
1817	char			*what = "?";
1818	struct usb_device	*udev;
1819
1820	if (length < 1 || length > 0xffff || vary >= length)
1821		return -EINVAL;
1822
1823	buf = kmalloc(length + offset, GFP_KERNEL);
1824	if (!buf)
1825		return -ENOMEM;
1826
1827	buf += offset;
1828	udev = testdev_to_usbdev(dev);
1829	len = length;
1830	retval = 0;
1831
1832	/* NOTE:  hardware might well act differently if we pushed it
1833	 * with lots back-to-back queued requests.
1834	 */
1835	for (i = 0; i < count; i++) {
1836		/* write patterned data */
1837		for (j = 0; j < len; j++)
1838			buf[j] = (u8)(i + j);
1839		retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1840				0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1841				0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1842		if (retval != len) {
1843			what = "write";
1844			if (retval >= 0) {
1845				ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1846						retval, len);
1847				retval = -EBADMSG;
1848			}
1849			break;
1850		}
1851
1852		/* read it back -- assuming nothing intervened!!  */
1853		retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1854				0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1855				0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1856		if (retval != len) {
1857			what = "read";
1858			if (retval >= 0) {
1859				ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1860						retval, len);
1861				retval = -EBADMSG;
1862			}
1863			break;
1864		}
1865
1866		/* fail if we can't verify */
1867		for (j = 0; j < len; j++) {
1868			if (buf[j] != (u8)(i + j)) {
1869				ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1870					j, buf[j], (u8)(i + j));
1871				retval = -EBADMSG;
1872				break;
1873			}
1874		}
1875		if (retval < 0) {
1876			what = "verify";
1877			break;
1878		}
1879
1880		len += vary;
1881
1882		/* [real world] the "zero bytes IN" case isn't really used.
1883		 * hardware can easily trip up in this weird case, since its
1884		 * status stage is IN, not OUT like other ep0in transfers.
1885		 */
1886		if (len > length)
1887			len = realworld ? 1 : 0;
1888	}
1889
1890	if (retval < 0)
1891		ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1892			what, retval, i);
1893
1894	kfree(buf - offset);
1895	return retval;
1896}
1897
1898/*-------------------------------------------------------------------------*/
1899
1900/* ISO/BULK tests ... mimics common usage
1901 *  - buffer length is split into N packets (mostly maxpacket sized)
1902 *  - multi-buffers according to sglen
1903 */
1904
1905struct transfer_context {
1906	unsigned		count;
1907	unsigned		pending;
1908	spinlock_t		lock;
1909	struct completion	done;
1910	int			submit_error;
1911	unsigned long		errors;
1912	unsigned long		packet_count;
1913	struct usbtest_dev	*dev;
1914	bool			is_iso;
1915};
1916
1917static void complicated_callback(struct urb *urb)
1918{
1919	struct transfer_context	*ctx = urb->context;
1920
1921	spin_lock(&ctx->lock);
1922	ctx->count--;
1923
1924	ctx->packet_count += urb->number_of_packets;
1925	if (urb->error_count > 0)
1926		ctx->errors += urb->error_count;
1927	else if (urb->status != 0)
1928		ctx->errors += (ctx->is_iso ? urb->number_of_packets : 1);
1929	else if (urb->actual_length != urb->transfer_buffer_length)
1930		ctx->errors++;
1931	else if (check_guard_bytes(ctx->dev, urb) != 0)
1932		ctx->errors++;
1933
1934	if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1935			&& !ctx->submit_error) {
1936		int status = usb_submit_urb(urb, GFP_ATOMIC);
1937		switch (status) {
1938		case 0:
1939			goto done;
1940		default:
1941			dev_err(&ctx->dev->intf->dev,
1942					"resubmit err %d\n",
1943					status);
1944			/* FALLTHROUGH */
1945		case -ENODEV:			/* disconnected */
1946		case -ESHUTDOWN:		/* endpoint disabled */
1947			ctx->submit_error = 1;
1948			break;
1949		}
1950	}
1951
1952	ctx->pending--;
1953	if (ctx->pending == 0) {
1954		if (ctx->errors)
1955			dev_err(&ctx->dev->intf->dev,
1956				"during the test, %lu errors out of %lu\n",
1957				ctx->errors, ctx->packet_count);
1958		complete(&ctx->done);
1959	}
1960done:
1961	spin_unlock(&ctx->lock);
1962}
1963
1964static struct urb *iso_alloc_urb(
1965	struct usb_device	*udev,
1966	int			pipe,
1967	struct usb_endpoint_descriptor	*desc,
1968	long			bytes,
1969	unsigned offset
1970)
1971{
1972	struct urb		*urb;
1973	unsigned		i, maxp, packets;
1974
1975	if (bytes < 0 || !desc)
1976		return NULL;
1977	maxp = usb_endpoint_maxp(desc);
1978	maxp *= usb_endpoint_maxp_mult(desc);
1979	packets = DIV_ROUND_UP(bytes, maxp);
1980
1981	urb = usb_alloc_urb(packets, GFP_KERNEL);
1982	if (!urb)
1983		return urb;
1984	urb->dev = udev;
1985	urb->pipe = pipe;
1986
1987	urb->number_of_packets = packets;
1988	urb->transfer_buffer_length = bytes;
1989	urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1990							GFP_KERNEL,
1991							&urb->transfer_dma);
1992	if (!urb->transfer_buffer) {
1993		usb_free_urb(urb);
1994		return NULL;
1995	}
1996	if (offset) {
1997		memset(urb->transfer_buffer, GUARD_BYTE, offset);
1998		urb->transfer_buffer += offset;
1999		urb->transfer_dma += offset;
2000	}
2001	/* For inbound transfers use guard byte so that test fails if
2002		data not correctly copied */
2003	memset(urb->transfer_buffer,
2004			usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
2005			bytes);
2006
2007	for (i = 0; i < packets; i++) {
2008		/* here, only the last packet will be short */
2009		urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
2010		bytes -= urb->iso_frame_desc[i].length;
2011
2012		urb->iso_frame_desc[i].offset = maxp * i;
2013	}
2014
2015	urb->complete = complicated_callback;
2016	/* urb->context = SET BY CALLER */
2017	urb->interval = 1 << (desc->bInterval - 1);
2018	urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
2019	return urb;
2020}
2021
2022static int
2023test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
2024		int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
2025{
2026	struct transfer_context	context;
2027	struct usb_device	*udev;
2028	unsigned		i;
2029	unsigned long		packets = 0;
2030	int			status = 0;
2031	struct urb		*urbs[MAX_SGLEN];
2032
2033	if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
2034		return -EINVAL;
2035
2036	if (param->sglen > MAX_SGLEN)
2037		return -EINVAL;
2038
2039	memset(&context, 0, sizeof(context));
2040	context.count = param->iterations * param->sglen;
2041	context.dev = dev;
2042	context.is_iso = !!desc;
2043	init_completion(&context.done);
2044	spin_lock_init(&context.lock);
2045
 
2046	udev = testdev_to_usbdev(dev);
 
 
 
 
 
2047
2048	for (i = 0; i < param->sglen; i++) {
2049		if (context.is_iso)
2050			urbs[i] = iso_alloc_urb(udev, pipe, desc,
2051					param->length, offset);
2052		else
2053			urbs[i] = complicated_alloc_urb(udev, pipe,
2054					param->length, 0);
2055
2056		if (!urbs[i]) {
2057			status = -ENOMEM;
2058			goto fail;
2059		}
2060		packets += urbs[i]->number_of_packets;
2061		urbs[i]->context = &context;
2062	}
2063	packets *= param->iterations;
2064
2065	if (context.is_iso) {
2066		dev_info(&dev->intf->dev,
2067			"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
2068			1 << (desc->bInterval - 1),
2069			(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
2070			usb_endpoint_maxp(desc),
2071			usb_endpoint_maxp_mult(desc));
2072
2073		dev_info(&dev->intf->dev,
2074			"total %lu msec (%lu packets)\n",
2075			(packets * (1 << (desc->bInterval - 1)))
2076				/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
2077			packets);
2078	}
2079
2080	spin_lock_irq(&context.lock);
2081	for (i = 0; i < param->sglen; i++) {
2082		++context.pending;
2083		status = usb_submit_urb(urbs[i], GFP_ATOMIC);
2084		if (status < 0) {
2085			ERROR(dev, "submit iso[%d], error %d\n", i, status);
2086			if (i == 0) {
2087				spin_unlock_irq(&context.lock);
2088				goto fail;
2089			}
2090
2091			simple_free_urb(urbs[i]);
2092			urbs[i] = NULL;
2093			context.pending--;
2094			context.submit_error = 1;
2095			break;
2096		}
2097	}
2098	spin_unlock_irq(&context.lock);
2099
2100	wait_for_completion(&context.done);
2101
2102	for (i = 0; i < param->sglen; i++) {
2103		if (urbs[i])
2104			simple_free_urb(urbs[i]);
2105	}
2106	/*
2107	 * Isochronous transfers are expected to fail sometimes.  As an
2108	 * arbitrary limit, we will report an error if any submissions
2109	 * fail or if the transfer failure rate is > 10%.
2110	 */
2111	if (status != 0)
2112		;
2113	else if (context.submit_error)
2114		status = -EACCES;
2115	else if (context.errors >
2116			(context.is_iso ? context.packet_count / 10 : 0))
2117		status = -EIO;
2118	return status;
2119
2120fail:
2121	for (i = 0; i < param->sglen; i++) {
2122		if (urbs[i])
2123			simple_free_urb(urbs[i]);
2124	}
2125	return status;
2126}
2127
2128static int test_unaligned_bulk(
2129	struct usbtest_dev *tdev,
2130	int pipe,
2131	unsigned length,
2132	int iterations,
2133	unsigned transfer_flags,
2134	const char *label)
2135{
2136	int retval;
2137	struct urb *urb = usbtest_alloc_urb(testdev_to_usbdev(tdev),
2138			pipe, length, transfer_flags, 1, 0, simple_callback);
2139
2140	if (!urb)
2141		return -ENOMEM;
2142
2143	retval = simple_io(tdev, urb, iterations, 0, 0, label);
2144	simple_free_urb(urb);
2145	return retval;
2146}
2147
2148/* Run tests. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2149static int
2150usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
2151{
2152	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2153	struct usb_device	*udev = testdev_to_usbdev(dev);
 
 
2154	struct urb		*urb;
2155	struct scatterlist	*sg;
2156	struct usb_sg_request	req;
 
2157	unsigned		i;
2158	int	retval = -EOPNOTSUPP;
 
 
 
 
 
 
2159
2160	if (param->iterations <= 0)
2161		return -EINVAL;
2162	if (param->sglen > MAX_SGLEN)
2163		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2164	/*
2165	 * Just a bunch of test cases that every HCD is expected to handle.
2166	 *
2167	 * Some may need specific firmware, though it'd be good to have
2168	 * one firmware image to handle all the test cases.
2169	 *
2170	 * FIXME add more tests!  cancel requests, verify the data, control
2171	 * queueing, concurrent read+write threads, and so on.
2172	 */
 
2173	switch (param->test_num) {
2174
2175	case 0:
2176		dev_info(&intf->dev, "TEST 0:  NOP\n");
2177		retval = 0;
2178		break;
2179
2180	/* Simple non-queued bulk I/O tests */
2181	case 1:
2182		if (dev->out_pipe == 0)
2183			break;
2184		dev_info(&intf->dev,
2185				"TEST 1:  write %d bytes %u times\n",
2186				param->length, param->iterations);
2187		urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2188		if (!urb) {
2189			retval = -ENOMEM;
2190			break;
2191		}
2192		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2193		retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
2194		simple_free_urb(urb);
2195		break;
2196	case 2:
2197		if (dev->in_pipe == 0)
2198			break;
2199		dev_info(&intf->dev,
2200				"TEST 2:  read %d bytes %u times\n",
2201				param->length, param->iterations);
2202		urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2203		if (!urb) {
2204			retval = -ENOMEM;
2205			break;
2206		}
2207		/* FIRMWARE:  bulk source (maybe generates short writes) */
2208		retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2209		simple_free_urb(urb);
2210		break;
2211	case 3:
2212		if (dev->out_pipe == 0 || param->vary == 0)
2213			break;
2214		dev_info(&intf->dev,
2215				"TEST 3:  write/%d 0..%d bytes %u times\n",
2216				param->vary, param->length, param->iterations);
2217		urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2218		if (!urb) {
2219			retval = -ENOMEM;
2220			break;
2221		}
2222		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2223		retval = simple_io(dev, urb, param->iterations, param->vary,
2224					0, "test3");
2225		simple_free_urb(urb);
2226		break;
2227	case 4:
2228		if (dev->in_pipe == 0 || param->vary == 0)
2229			break;
2230		dev_info(&intf->dev,
2231				"TEST 4:  read/%d 0..%d bytes %u times\n",
2232				param->vary, param->length, param->iterations);
2233		urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2234		if (!urb) {
2235			retval = -ENOMEM;
2236			break;
2237		}
2238		/* FIRMWARE:  bulk source (maybe generates short writes) */
2239		retval = simple_io(dev, urb, param->iterations, param->vary,
2240					0, "test4");
2241		simple_free_urb(urb);
2242		break;
2243
2244	/* Queued bulk I/O tests */
2245	case 5:
2246		if (dev->out_pipe == 0 || param->sglen == 0)
2247			break;
2248		dev_info(&intf->dev,
2249			"TEST 5:  write %d sglists %d entries of %d bytes\n",
2250				param->iterations,
2251				param->sglen, param->length);
2252		sg = alloc_sglist(param->sglen, param->length,
2253				0, dev, dev->out_pipe);
2254		if (!sg) {
2255			retval = -ENOMEM;
2256			break;
2257		}
2258		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2259		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2260				&req, sg, param->sglen);
2261		free_sglist(sg, param->sglen);
2262		break;
2263
2264	case 6:
2265		if (dev->in_pipe == 0 || param->sglen == 0)
2266			break;
2267		dev_info(&intf->dev,
2268			"TEST 6:  read %d sglists %d entries of %d bytes\n",
2269				param->iterations,
2270				param->sglen, param->length);
2271		sg = alloc_sglist(param->sglen, param->length,
2272				0, dev, dev->in_pipe);
2273		if (!sg) {
2274			retval = -ENOMEM;
2275			break;
2276		}
2277		/* FIRMWARE:  bulk source (maybe generates short writes) */
2278		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2279				&req, sg, param->sglen);
2280		free_sglist(sg, param->sglen);
2281		break;
2282	case 7:
2283		if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2284			break;
2285		dev_info(&intf->dev,
2286			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
2287				param->vary, param->iterations,
2288				param->sglen, param->length);
2289		sg = alloc_sglist(param->sglen, param->length,
2290				param->vary, dev, dev->out_pipe);
2291		if (!sg) {
2292			retval = -ENOMEM;
2293			break;
2294		}
2295		/* FIRMWARE:  bulk sink (maybe accepts short writes) */
2296		retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2297				&req, sg, param->sglen);
2298		free_sglist(sg, param->sglen);
2299		break;
2300	case 8:
2301		if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2302			break;
2303		dev_info(&intf->dev,
2304			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
2305				param->vary, param->iterations,
2306				param->sglen, param->length);
2307		sg = alloc_sglist(param->sglen, param->length,
2308				param->vary, dev, dev->in_pipe);
2309		if (!sg) {
2310			retval = -ENOMEM;
2311			break;
2312		}
2313		/* FIRMWARE:  bulk source (maybe generates short writes) */
2314		retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2315				&req, sg, param->sglen);
2316		free_sglist(sg, param->sglen);
2317		break;
2318
2319	/* non-queued sanity tests for control (chapter 9 subset) */
2320	case 9:
2321		retval = 0;
2322		dev_info(&intf->dev,
2323			"TEST 9:  ch9 (subset) control tests, %d times\n",
2324				param->iterations);
2325		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2326			retval = ch9_postconfig(dev);
2327		if (retval)
2328			dev_err(&intf->dev, "ch9 subset failed, "
2329					"iterations left %d\n", i);
2330		break;
2331
2332	/* queued control messaging */
2333	case 10:
 
 
2334		retval = 0;
2335		dev_info(&intf->dev,
2336				"TEST 10:  queue %d control calls, %d times\n",
2337				param->sglen,
2338				param->iterations);
2339		retval = test_ctrl_queue(dev, param);
2340		break;
2341
2342	/* simple non-queued unlinks (ring with one urb) */
2343	case 11:
2344		if (dev->in_pipe == 0 || !param->length)
2345			break;
2346		retval = 0;
2347		dev_info(&intf->dev, "TEST 11:  unlink %d reads of %d\n",
2348				param->iterations, param->length);
2349		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2350			retval = unlink_simple(dev, dev->in_pipe,
2351						param->length);
2352		if (retval)
2353			dev_err(&intf->dev, "unlink reads failed %d, "
2354				"iterations left %d\n", retval, i);
2355		break;
2356	case 12:
2357		if (dev->out_pipe == 0 || !param->length)
2358			break;
2359		retval = 0;
2360		dev_info(&intf->dev, "TEST 12:  unlink %d writes of %d\n",
2361				param->iterations, param->length);
2362		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2363			retval = unlink_simple(dev, dev->out_pipe,
2364						param->length);
2365		if (retval)
2366			dev_err(&intf->dev, "unlink writes failed %d, "
2367				"iterations left %d\n", retval, i);
2368		break;
2369
2370	/* ep halt tests */
2371	case 13:
2372		if (dev->out_pipe == 0 && dev->in_pipe == 0)
2373			break;
2374		retval = 0;
2375		dev_info(&intf->dev, "TEST 13:  set/clear %d halts\n",
2376				param->iterations);
2377		for (i = param->iterations; retval == 0 && i--; /* NOP */)
2378			retval = halt_simple(dev);
2379
2380		if (retval)
2381			ERROR(dev, "halts failed, iterations left %d\n", i);
2382		break;
2383
2384	/* control write tests */
2385	case 14:
2386		if (!dev->info->ctrl_out)
2387			break;
2388		dev_info(&intf->dev, "TEST 14:  %d ep0out, %d..%d vary %d\n",
2389				param->iterations,
2390				realworld ? 1 : 0, param->length,
2391				param->vary);
2392		retval = ctrl_out(dev, param->iterations,
2393				param->length, param->vary, 0);
2394		break;
2395
2396	/* iso write tests */
2397	case 15:
2398		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2399			break;
2400		dev_info(&intf->dev,
2401			"TEST 15:  write %d iso, %d entries of %d bytes\n",
2402				param->iterations,
2403				param->sglen, param->length);
2404		/* FIRMWARE:  iso sink */
2405		retval = test_queue(dev, param,
2406				dev->out_iso_pipe, dev->iso_out, 0);
2407		break;
2408
2409	/* iso read tests */
2410	case 16:
2411		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2412			break;
2413		dev_info(&intf->dev,
2414			"TEST 16:  read %d iso, %d entries of %d bytes\n",
2415				param->iterations,
2416				param->sglen, param->length);
2417		/* FIRMWARE:  iso source */
2418		retval = test_queue(dev, param,
2419				dev->in_iso_pipe, dev->iso_in, 0);
2420		break;
2421
2422	/* FIXME scatterlist cancel (needs helper thread) */
2423
2424	/* Tests for bulk I/O using DMA mapping by core and odd address */
2425	case 17:
2426		if (dev->out_pipe == 0)
2427			break;
2428		dev_info(&intf->dev,
2429			"TEST 17:  write odd addr %d bytes %u times core map\n",
2430			param->length, param->iterations);
2431
2432		retval = test_unaligned_bulk(
2433				dev, dev->out_pipe,
2434				param->length, param->iterations,
2435				0, "test17");
2436		break;
2437
2438	case 18:
2439		if (dev->in_pipe == 0)
2440			break;
2441		dev_info(&intf->dev,
2442			"TEST 18:  read odd addr %d bytes %u times core map\n",
2443			param->length, param->iterations);
2444
2445		retval = test_unaligned_bulk(
2446				dev, dev->in_pipe,
2447				param->length, param->iterations,
2448				0, "test18");
2449		break;
2450
2451	/* Tests for bulk I/O using premapped coherent buffer and odd address */
2452	case 19:
2453		if (dev->out_pipe == 0)
2454			break;
2455		dev_info(&intf->dev,
2456			"TEST 19:  write odd addr %d bytes %u times premapped\n",
2457			param->length, param->iterations);
2458
2459		retval = test_unaligned_bulk(
2460				dev, dev->out_pipe,
2461				param->length, param->iterations,
2462				URB_NO_TRANSFER_DMA_MAP, "test19");
2463		break;
2464
2465	case 20:
2466		if (dev->in_pipe == 0)
2467			break;
2468		dev_info(&intf->dev,
2469			"TEST 20:  read odd addr %d bytes %u times premapped\n",
2470			param->length, param->iterations);
2471
2472		retval = test_unaligned_bulk(
2473				dev, dev->in_pipe,
2474				param->length, param->iterations,
2475				URB_NO_TRANSFER_DMA_MAP, "test20");
2476		break;
2477
2478	/* control write tests with unaligned buffer */
2479	case 21:
2480		if (!dev->info->ctrl_out)
2481			break;
2482		dev_info(&intf->dev,
2483				"TEST 21:  %d ep0out odd addr, %d..%d vary %d\n",
2484				param->iterations,
2485				realworld ? 1 : 0, param->length,
2486				param->vary);
2487		retval = ctrl_out(dev, param->iterations,
2488				param->length, param->vary, 1);
2489		break;
2490
2491	/* unaligned iso tests */
2492	case 22:
2493		if (dev->out_iso_pipe == 0 || param->sglen == 0)
2494			break;
2495		dev_info(&intf->dev,
2496			"TEST 22:  write %d iso odd, %d entries of %d bytes\n",
2497				param->iterations,
2498				param->sglen, param->length);
2499		retval = test_queue(dev, param,
2500				dev->out_iso_pipe, dev->iso_out, 1);
2501		break;
2502
2503	case 23:
2504		if (dev->in_iso_pipe == 0 || param->sglen == 0)
2505			break;
2506		dev_info(&intf->dev,
2507			"TEST 23:  read %d iso odd, %d entries of %d bytes\n",
2508				param->iterations,
2509				param->sglen, param->length);
2510		retval = test_queue(dev, param,
2511				dev->in_iso_pipe, dev->iso_in, 1);
2512		break;
2513
2514	/* unlink URBs from a bulk-OUT queue */
2515	case 24:
2516		if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2517			break;
2518		retval = 0;
2519		dev_info(&intf->dev, "TEST 24:  unlink from %d queues of "
2520				"%d %d-byte writes\n",
2521				param->iterations, param->sglen, param->length);
2522		for (i = param->iterations; retval == 0 && i > 0; --i) {
2523			retval = unlink_queued(dev, dev->out_pipe,
2524						param->sglen, param->length);
2525			if (retval) {
2526				dev_err(&intf->dev,
2527					"unlink queued writes failed %d, "
2528					"iterations left %d\n", retval, i);
2529				break;
2530			}
2531		}
2532		break;
2533
2534	/* Simple non-queued interrupt I/O tests */
2535	case 25:
2536		if (dev->out_int_pipe == 0)
2537			break;
2538		dev_info(&intf->dev,
2539				"TEST 25: write %d bytes %u times\n",
2540				param->length, param->iterations);
2541		urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
2542				dev->int_out->bInterval);
2543		if (!urb) {
2544			retval = -ENOMEM;
2545			break;
2546		}
2547		/* FIRMWARE: interrupt sink (maybe accepts short writes) */
2548		retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
2549		simple_free_urb(urb);
2550		break;
2551	case 26:
2552		if (dev->in_int_pipe == 0)
2553			break;
2554		dev_info(&intf->dev,
2555				"TEST 26: read %d bytes %u times\n",
2556				param->length, param->iterations);
2557		urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
2558				dev->int_in->bInterval);
2559		if (!urb) {
2560			retval = -ENOMEM;
2561			break;
2562		}
2563		/* FIRMWARE: interrupt source (maybe generates short writes) */
2564		retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
2565		simple_free_urb(urb);
2566		break;
2567	case 27:
2568		/* We do performance test, so ignore data compare */
2569		if (dev->out_pipe == 0 || param->sglen == 0 || pattern != 0)
2570			break;
2571		dev_info(&intf->dev,
2572			"TEST 27: bulk write %dMbytes\n", (param->iterations *
2573			param->sglen * param->length) / (1024 * 1024));
2574		retval = test_queue(dev, param,
2575				dev->out_pipe, NULL, 0);
2576		break;
2577	case 28:
2578		if (dev->in_pipe == 0 || param->sglen == 0 || pattern != 0)
2579			break;
2580		dev_info(&intf->dev,
2581			"TEST 28: bulk read %dMbytes\n", (param->iterations *
2582			param->sglen * param->length) / (1024 * 1024));
2583		retval = test_queue(dev, param,
2584				dev->in_pipe, NULL, 0);
2585		break;
2586	/* Test data Toggle/seq_nr clear between bulk out transfers */
2587	case 29:
2588		if (dev->out_pipe == 0)
2589			break;
2590		retval = 0;
2591		dev_info(&intf->dev, "TEST 29: Clear toggle between bulk writes %d times\n",
2592				param->iterations);
2593		for (i = param->iterations; retval == 0 && i > 0; --i)
2594			retval = toggle_sync_simple(dev);
2595
2596		if (retval)
2597			ERROR(dev, "toggle sync failed, iterations left %d\n",
2598			      i);
2599		break;
2600	}
2601	return retval;
2602}
2603
2604/*-------------------------------------------------------------------------*/
2605
2606/* We only have this one interface to user space, through usbfs.
2607 * User mode code can scan usbfs to find N different devices (maybe on
2608 * different busses) to use when testing, and allocate one thread per
2609 * test.  So discovery is simplified, and we have no device naming issues.
2610 *
2611 * Don't use these only as stress/load tests.  Use them along with with
2612 * other USB bus activity:  plugging, unplugging, mousing, mp3 playback,
2613 * video capture, and so on.  Run different tests at different times, in
2614 * different sequences.  Nothing here should interact with other devices,
2615 * except indirectly by consuming USB bandwidth and CPU resources for test
2616 * threads and request completion.  But the only way to know that for sure
2617 * is to test when HC queues are in use by many devices.
2618 *
2619 * WARNING:  Because usbfs grabs udev->dev.sem before calling this ioctl(),
2620 * it locks out usbcore in certain code paths.  Notably, if you disconnect
2621 * the device-under-test, hub_wq will wait block forever waiting for the
2622 * ioctl to complete ... so that usb_disconnect() can abort the pending
2623 * urbs and then call usbtest_disconnect().  To abort a test, you're best
2624 * off just killing the userspace task and waiting for it to exit.
2625 */
2626
2627static int
2628usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
2629{
2630
2631	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2632	struct usbtest_param_64 *param_64 = buf;
2633	struct usbtest_param_32 temp;
2634	struct usbtest_param_32 *param_32 = buf;
2635	struct timespec64 start;
2636	struct timespec64 end;
2637	struct timespec64 duration;
2638	int retval = -EOPNOTSUPP;
2639
2640	/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
2641
2642	pattern = mod_pattern;
2643
2644	if (mutex_lock_interruptible(&dev->lock))
2645		return -ERESTARTSYS;
2646
2647	/* FIXME: What if a system sleep starts while a test is running? */
2648
2649	/* some devices, like ez-usb default devices, need a non-default
2650	 * altsetting to have any active endpoints.  some tests change
2651	 * altsettings; force a default so most tests don't need to check.
2652	 */
2653	if (dev->info->alt >= 0) {
2654		if (intf->altsetting->desc.bInterfaceNumber) {
2655			retval = -ENODEV;
2656			goto free_mutex;
2657		}
2658		retval = set_altsetting(dev, dev->info->alt);
2659		if (retval) {
2660			dev_err(&intf->dev,
2661					"set altsetting to %d failed, %d\n",
2662					dev->info->alt, retval);
2663			goto free_mutex;
2664		}
2665	}
2666
2667	switch (code) {
2668	case USBTEST_REQUEST_64:
2669		temp.test_num = param_64->test_num;
2670		temp.iterations = param_64->iterations;
2671		temp.length = param_64->length;
2672		temp.sglen = param_64->sglen;
2673		temp.vary = param_64->vary;
2674		param_32 = &temp;
2675		break;
2676
2677	case USBTEST_REQUEST_32:
2678		break;
2679
2680	default:
2681		retval = -EOPNOTSUPP;
2682		goto free_mutex;
2683	}
2684
2685	ktime_get_ts64(&start);
2686
2687	retval = usbtest_do_ioctl(intf, param_32);
2688	if (retval < 0)
2689		goto free_mutex;
2690
2691	ktime_get_ts64(&end);
2692
2693	duration = timespec64_sub(end, start);
2694
2695	temp.duration_sec = duration.tv_sec;
2696	temp.duration_usec = duration.tv_nsec/NSEC_PER_USEC;
2697
2698	switch (code) {
2699	case USBTEST_REQUEST_32:
2700		param_32->duration_sec = temp.duration_sec;
2701		param_32->duration_usec = temp.duration_usec;
2702		break;
2703
2704	case USBTEST_REQUEST_64:
2705		param_64->duration_sec = temp.duration_sec;
2706		param_64->duration_usec = temp.duration_usec;
2707		break;
2708	}
2709
2710free_mutex:
2711	mutex_unlock(&dev->lock);
2712	return retval;
2713}
2714
2715/*-------------------------------------------------------------------------*/
2716
2717static unsigned force_interrupt;
2718module_param(force_interrupt, uint, 0);
2719MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2720
2721#ifdef	GENERIC
2722static unsigned short vendor;
2723module_param(vendor, ushort, 0);
2724MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2725
2726static unsigned short product;
2727module_param(product, ushort, 0);
2728MODULE_PARM_DESC(product, "product code (from vendor)");
2729#endif
2730
2731static int
2732usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2733{
2734	struct usb_device	*udev;
2735	struct usbtest_dev	*dev;
2736	struct usbtest_info	*info;
2737	char			*rtest, *wtest;
2738	char			*irtest, *iwtest;
2739	char			*intrtest, *intwtest;
2740
2741	udev = interface_to_usbdev(intf);
2742
2743#ifdef	GENERIC
2744	/* specify devices by module parameters? */
2745	if (id->match_flags == 0) {
2746		/* vendor match required, product match optional */
2747		if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2748			return -ENODEV;
2749		if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2750			return -ENODEV;
2751		dev_info(&intf->dev, "matched module params, "
2752					"vend=0x%04x prod=0x%04x\n",
2753				le16_to_cpu(udev->descriptor.idVendor),
2754				le16_to_cpu(udev->descriptor.idProduct));
2755	}
2756#endif
2757
2758	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2759	if (!dev)
2760		return -ENOMEM;
2761	info = (struct usbtest_info *) id->driver_info;
2762	dev->info = info;
2763	mutex_init(&dev->lock);
2764
2765	dev->intf = intf;
2766
2767	/* cacheline-aligned scratch for i/o */
2768	dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2769	if (dev->buf == NULL) {
2770		kfree(dev);
2771		return -ENOMEM;
2772	}
2773
2774	/* NOTE this doesn't yet test the handful of difference that are
2775	 * visible with high speed interrupts:  bigger maxpacket (1K) and
2776	 * "high bandwidth" modes (up to 3 packets/uframe).
2777	 */
2778	rtest = wtest = "";
2779	irtest = iwtest = "";
2780	intrtest = intwtest = "";
2781	if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2782		if (info->ep_in) {
2783			dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2784			rtest = " intr-in";
2785		}
2786		if (info->ep_out) {
2787			dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2788			wtest = " intr-out";
2789		}
2790	} else {
2791		if (override_alt >= 0 || info->autoconf) {
2792			int status;
2793
2794			status = get_endpoints(dev, intf);
2795			if (status < 0) {
2796				WARNING(dev, "couldn't get endpoints, %d\n",
2797						status);
2798				kfree(dev->buf);
2799				kfree(dev);
2800				return status;
2801			}
2802			/* may find bulk or ISO pipes */
2803		} else {
2804			if (info->ep_in)
2805				dev->in_pipe = usb_rcvbulkpipe(udev,
2806							info->ep_in);
2807			if (info->ep_out)
2808				dev->out_pipe = usb_sndbulkpipe(udev,
2809							info->ep_out);
2810		}
2811		if (dev->in_pipe)
2812			rtest = " bulk-in";
2813		if (dev->out_pipe)
2814			wtest = " bulk-out";
2815		if (dev->in_iso_pipe)
2816			irtest = " iso-in";
2817		if (dev->out_iso_pipe)
2818			iwtest = " iso-out";
2819		if (dev->in_int_pipe)
2820			intrtest = " int-in";
2821		if (dev->out_int_pipe)
2822			intwtest = " int-out";
2823	}
2824
2825	usb_set_intfdata(intf, dev);
2826	dev_info(&intf->dev, "%s\n", info->name);
2827	dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
2828			usb_speed_string(udev->speed),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2829			info->ctrl_out ? " in/out" : "",
2830			rtest, wtest,
2831			irtest, iwtest,
2832			intrtest, intwtest,
2833			info->alt >= 0 ? " (+alt)" : "");
2834	return 0;
2835}
2836
2837static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2838{
2839	return 0;
2840}
2841
2842static int usbtest_resume(struct usb_interface *intf)
2843{
2844	return 0;
2845}
2846
2847
2848static void usbtest_disconnect(struct usb_interface *intf)
2849{
2850	struct usbtest_dev	*dev = usb_get_intfdata(intf);
2851
2852	usb_set_intfdata(intf, NULL);
2853	dev_dbg(&intf->dev, "disconnect\n");
2854	kfree(dev);
2855}
2856
2857/* Basic testing only needs a device that can source or sink bulk traffic.
2858 * Any device can test control transfers (default with GENERIC binding).
2859 *
2860 * Several entries work with the default EP0 implementation that's built
2861 * into EZ-USB chips.  There's a default vendor ID which can be overridden
2862 * by (very) small config EEPROMS, but otherwise all these devices act
2863 * identically until firmware is loaded:  only EP0 works.  It turns out
2864 * to be easy to make other endpoints work, without modifying that EP0
2865 * behavior.  For now, we expect that kind of firmware.
2866 */
2867
2868/* an21xx or fx versions of ez-usb */
2869static struct usbtest_info ez1_info = {
2870	.name		= "EZ-USB device",
2871	.ep_in		= 2,
2872	.ep_out		= 2,
2873	.alt		= 1,
2874};
2875
2876/* fx2 version of ez-usb */
2877static struct usbtest_info ez2_info = {
2878	.name		= "FX2 device",
2879	.ep_in		= 6,
2880	.ep_out		= 2,
2881	.alt		= 1,
2882};
2883
2884/* ezusb family device with dedicated usb test firmware,
2885 */
2886static struct usbtest_info fw_info = {
2887	.name		= "usb test device",
2888	.ep_in		= 2,
2889	.ep_out		= 2,
2890	.alt		= 1,
2891	.autoconf	= 1,		/* iso and ctrl_out need autoconf */
2892	.ctrl_out	= 1,
2893	.iso		= 1,		/* iso_ep's are #8 in/out */
2894};
2895
2896/* peripheral running Linux and 'zero.c' test firmware, or
2897 * its user-mode cousin. different versions of this use
2898 * different hardware with the same vendor/product codes.
2899 * host side MUST rely on the endpoint descriptors.
2900 */
2901static struct usbtest_info gz_info = {
2902	.name		= "Linux gadget zero",
2903	.autoconf	= 1,
2904	.ctrl_out	= 1,
2905	.iso		= 1,
2906	.intr		= 1,
2907	.alt		= 0,
2908};
2909
2910static struct usbtest_info um_info = {
2911	.name		= "Linux user mode test driver",
2912	.autoconf	= 1,
2913	.alt		= -1,
2914};
2915
2916static struct usbtest_info um2_info = {
2917	.name		= "Linux user mode ISO test driver",
2918	.autoconf	= 1,
2919	.iso		= 1,
2920	.alt		= -1,
2921};
2922
2923#ifdef IBOT2
2924/* this is a nice source of high speed bulk data;
2925 * uses an FX2, with firmware provided in the device
2926 */
2927static struct usbtest_info ibot2_info = {
2928	.name		= "iBOT2 webcam",
2929	.ep_in		= 2,
2930	.alt		= -1,
2931};
2932#endif
2933
2934#ifdef GENERIC
2935/* we can use any device to test control traffic */
2936static struct usbtest_info generic_info = {
2937	.name		= "Generic USB device",
2938	.alt		= -1,
2939};
2940#endif
2941
2942
2943static const struct usb_device_id id_table[] = {
2944
2945	/*-------------------------------------------------------------*/
2946
2947	/* EZ-USB devices which download firmware to replace (or in our
2948	 * case augment) the default device implementation.
2949	 */
2950
2951	/* generic EZ-USB FX controller */
2952	{ USB_DEVICE(0x0547, 0x2235),
2953		.driver_info = (unsigned long) &ez1_info,
2954	},
2955
2956	/* CY3671 development board with EZ-USB FX */
2957	{ USB_DEVICE(0x0547, 0x0080),
2958		.driver_info = (unsigned long) &ez1_info,
2959	},
2960
2961	/* generic EZ-USB FX2 controller (or development board) */
2962	{ USB_DEVICE(0x04b4, 0x8613),
2963		.driver_info = (unsigned long) &ez2_info,
2964	},
2965
2966	/* re-enumerated usb test device firmware */
2967	{ USB_DEVICE(0xfff0, 0xfff0),
2968		.driver_info = (unsigned long) &fw_info,
2969	},
2970
2971	/* "Gadget Zero" firmware runs under Linux */
2972	{ USB_DEVICE(0x0525, 0xa4a0),
2973		.driver_info = (unsigned long) &gz_info,
2974	},
2975
2976	/* so does a user-mode variant */
2977	{ USB_DEVICE(0x0525, 0xa4a4),
2978		.driver_info = (unsigned long) &um_info,
2979	},
2980
2981	/* ... and a user-mode variant that talks iso */
2982	{ USB_DEVICE(0x0525, 0xa4a3),
2983		.driver_info = (unsigned long) &um2_info,
2984	},
2985
2986#ifdef KEYSPAN_19Qi
2987	/* Keyspan 19qi uses an21xx (original EZ-USB) */
2988	/* this does not coexist with the real Keyspan 19qi driver! */
2989	{ USB_DEVICE(0x06cd, 0x010b),
2990		.driver_info = (unsigned long) &ez1_info,
2991	},
2992#endif
2993
2994	/*-------------------------------------------------------------*/
2995
2996#ifdef IBOT2
2997	/* iBOT2 makes a nice source of high speed bulk-in data */
2998	/* this does not coexist with a real iBOT2 driver! */
2999	{ USB_DEVICE(0x0b62, 0x0059),
3000		.driver_info = (unsigned long) &ibot2_info,
3001	},
3002#endif
3003
3004	/*-------------------------------------------------------------*/
3005
3006#ifdef GENERIC
3007	/* module params can specify devices to use for control tests */
3008	{ .driver_info = (unsigned long) &generic_info, },
3009#endif
3010
3011	/*-------------------------------------------------------------*/
3012
3013	{ }
3014};
3015MODULE_DEVICE_TABLE(usb, id_table);
3016
3017static struct usb_driver usbtest_driver = {
3018	.name =		"usbtest",
3019	.id_table =	id_table,
3020	.probe =	usbtest_probe,
3021	.unlocked_ioctl = usbtest_ioctl,
3022	.disconnect =	usbtest_disconnect,
3023	.suspend =	usbtest_suspend,
3024	.resume =	usbtest_resume,
3025};
3026
3027/*-------------------------------------------------------------------------*/
3028
3029static int __init usbtest_init(void)
3030{
3031#ifdef GENERIC
3032	if (vendor)
3033		pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
3034#endif
3035	return usb_register(&usbtest_driver);
3036}
3037module_init(usbtest_init);
3038
3039static void __exit usbtest_exit(void)
3040{
3041	usb_deregister(&usbtest_driver);
3042}
3043module_exit(usbtest_exit);
3044
3045MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
3046MODULE_LICENSE("GPL");
3047