Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c -- user mode filesystem api for usb gadget controllers
   4 *
   5 * Copyright (C) 2003-2004 David Brownell
   6 * Copyright (C) 2003 Agilent Technologies
   7 */
   8
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/fs_context.h>
  16#include <linux/pagemap.h>
  17#include <linux/uts.h>
  18#include <linux/wait.h>
  19#include <linux/compiler.h>
  20#include <linux/uaccess.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/poll.h>
  24#include <linux/kthread.h>
  25#include <linux/aio.h>
  26#include <linux/uio.h>
  27#include <linux/refcount.h>
  28#include <linux/delay.h>
  29#include <linux/device.h>
  30#include <linux/moduleparam.h>
  31
  32#include <linux/usb/gadgetfs.h>
  33#include <linux/usb/gadget.h>
  34#include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
  35
  36/* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
  37#undef DBG
  38#undef ERROR
  39#undef INFO
  40
  41
  42/*
  43 * The gadgetfs API maps each endpoint to a file descriptor so that you
  44 * can use standard synchronous read/write calls for I/O.  There's some
  45 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
  46 * drivers show how this works in practice.  You can also use AIO to
  47 * eliminate I/O gaps between requests, to help when streaming data.
  48 *
  49 * Key parts that must be USB-specific are protocols defining how the
  50 * read/write operations relate to the hardware state machines.  There
  51 * are two types of files.  One type is for the device, implementing ep0.
  52 * The other type is for each IN or OUT endpoint.  In both cases, the
  53 * user mode driver must configure the hardware before using it.
  54 *
  55 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
  56 *   (by writing configuration and device descriptors).  Afterwards it
  57 *   may serve as a source of device events, used to handle all control
  58 *   requests other than basic enumeration.
  59 *
  60 * - Then, after a SET_CONFIGURATION control request, ep_config() is
  61 *   called when each /dev/gadget/ep* file is configured (by writing
  62 *   endpoint descriptors).  Afterwards these files are used to write()
  63 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
  64 *   direction" request is issued (like reading an IN endpoint).
  65 *
  66 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
  67 * not possible on all hardware.  For example, precise fault handling with
  68 * respect to data left in endpoint fifos after aborted operations; or
  69 * selective clearing of endpoint halts, to implement SET_INTERFACE.
  70 */
  71
  72#define	DRIVER_DESC	"USB Gadget filesystem"
  73#define	DRIVER_VERSION	"24 Aug 2004"
  74
  75static const char driver_desc [] = DRIVER_DESC;
  76static const char shortname [] = "gadgetfs";
  77
  78MODULE_DESCRIPTION (DRIVER_DESC);
  79MODULE_AUTHOR ("David Brownell");
  80MODULE_LICENSE ("GPL");
  81
  82static int ep_open(struct inode *, struct file *);
  83
  84
  85/*----------------------------------------------------------------------*/
  86
  87#define GADGETFS_MAGIC		0xaee71ee7
  88
  89/* /dev/gadget/$CHIP represents ep0 and the whole device */
  90enum ep0_state {
  91	/* DISABLED is the initial state. */
  92	STATE_DEV_DISABLED = 0,
  93
  94	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
  95	 * ep0/device i/o modes and binding to the controller.  Driver
  96	 * must always write descriptors to initialize the device, then
  97	 * the device becomes UNCONNECTED until enumeration.
  98	 */
  99	STATE_DEV_OPENED,
 100
 101	/* From then on, ep0 fd is in either of two basic modes:
 102	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
 103	 * - SETUP: read/write will transfer control data and succeed;
 104	 *   or if "wrong direction", performs protocol stall
 105	 */
 106	STATE_DEV_UNCONNECTED,
 107	STATE_DEV_CONNECTED,
 108	STATE_DEV_SETUP,
 109
 110	/* UNBOUND means the driver closed ep0, so the device won't be
 111	 * accessible again (DEV_DISABLED) until all fds are closed.
 112	 */
 113	STATE_DEV_UNBOUND,
 114};
 115
 116/* enough for the whole queue: most events invalidate others */
 117#define	N_EVENT			5
 118
 119#define RBUF_SIZE		256
 120
 121struct dev_data {
 122	spinlock_t			lock;
 123	refcount_t			count;
 124	int				udc_usage;
 125	enum ep0_state			state;		/* P: lock */
 126	struct usb_gadgetfs_event	event [N_EVENT];
 127	unsigned			ev_next;
 128	struct fasync_struct		*fasync;
 129	u8				current_config;
 130
 131	/* drivers reading ep0 MUST handle control requests (SETUP)
 132	 * reported that way; else the host will time out.
 133	 */
 134	unsigned			usermode_setup : 1,
 135					setup_in : 1,
 136					setup_can_stall : 1,
 137					setup_out_ready : 1,
 138					setup_out_error : 1,
 139					setup_abort : 1,
 140					gadget_registered : 1;
 141	unsigned			setup_wLength;
 142
 143	/* the rest is basically write-once */
 144	struct usb_config_descriptor	*config, *hs_config;
 145	struct usb_device_descriptor	*dev;
 146	struct usb_request		*req;
 147	struct usb_gadget		*gadget;
 148	struct list_head		epfiles;
 149	void				*buf;
 150	wait_queue_head_t		wait;
 151	struct super_block		*sb;
 152	struct dentry			*dentry;
 153
 154	/* except this scratch i/o buffer for ep0 */
 155	u8				rbuf[RBUF_SIZE];
 156};
 157
 158static inline void get_dev (struct dev_data *data)
 159{
 160	refcount_inc (&data->count);
 161}
 162
 163static void put_dev (struct dev_data *data)
 164{
 165	if (likely (!refcount_dec_and_test (&data->count)))
 166		return;
 167	/* needs no more cleanup */
 168	BUG_ON (waitqueue_active (&data->wait));
 169	kfree (data);
 170}
 171
 172static struct dev_data *dev_new (void)
 173{
 174	struct dev_data		*dev;
 175
 176	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 177	if (!dev)
 178		return NULL;
 179	dev->state = STATE_DEV_DISABLED;
 180	refcount_set (&dev->count, 1);
 181	spin_lock_init (&dev->lock);
 182	INIT_LIST_HEAD (&dev->epfiles);
 183	init_waitqueue_head (&dev->wait);
 184	return dev;
 185}
 186
 187/*----------------------------------------------------------------------*/
 188
 189/* other /dev/gadget/$ENDPOINT files represent endpoints */
 190enum ep_state {
 191	STATE_EP_DISABLED = 0,
 192	STATE_EP_READY,
 193	STATE_EP_ENABLED,
 194	STATE_EP_UNBOUND,
 195};
 196
 197struct ep_data {
 198	struct mutex			lock;
 199	enum ep_state			state;
 200	refcount_t			count;
 201	struct dev_data			*dev;
 202	/* must hold dev->lock before accessing ep or req */
 203	struct usb_ep			*ep;
 204	struct usb_request		*req;
 205	ssize_t				status;
 206	char				name [16];
 207	struct usb_endpoint_descriptor	desc, hs_desc;
 208	struct list_head		epfiles;
 209	wait_queue_head_t		wait;
 210	struct dentry			*dentry;
 211};
 212
 213static inline void get_ep (struct ep_data *data)
 214{
 215	refcount_inc (&data->count);
 216}
 217
 218static void put_ep (struct ep_data *data)
 219{
 220	if (likely (!refcount_dec_and_test (&data->count)))
 221		return;
 222	put_dev (data->dev);
 223	/* needs no more cleanup */
 224	BUG_ON (!list_empty (&data->epfiles));
 225	BUG_ON (waitqueue_active (&data->wait));
 226	kfree (data);
 227}
 228
 229/*----------------------------------------------------------------------*/
 230
 231/* most "how to use the hardware" policy choices are in userspace:
 232 * mapping endpoint roles (which the driver needs) to the capabilities
 233 * which the usb controller has.  most of those capabilities are exposed
 234 * implicitly, starting with the driver name and then endpoint names.
 235 */
 236
 237static const char *CHIP;
 238static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
 239
 240/*----------------------------------------------------------------------*/
 241
 242/* NOTE:  don't use dev_printk calls before binding to the gadget
 243 * at the end of ep0 configuration, or after unbind.
 244 */
 245
 246/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
 247#define xprintk(d,level,fmt,args...) \
 248	printk(level "%s: " fmt , shortname , ## args)
 249
 250#ifdef DEBUG
 251#define DBG(dev,fmt,args...) \
 252	xprintk(dev , KERN_DEBUG , fmt , ## args)
 253#else
 254#define DBG(dev,fmt,args...) \
 255	do { } while (0)
 256#endif /* DEBUG */
 257
 258#ifdef VERBOSE_DEBUG
 259#define VDEBUG	DBG
 260#else
 261#define VDEBUG(dev,fmt,args...) \
 262	do { } while (0)
 263#endif /* DEBUG */
 264
 265#define ERROR(dev,fmt,args...) \
 266	xprintk(dev , KERN_ERR , fmt , ## args)
 267#define INFO(dev,fmt,args...) \
 268	xprintk(dev , KERN_INFO , fmt , ## args)
 269
 270
 271/*----------------------------------------------------------------------*/
 272
 273/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
 274 *
 275 * After opening, configure non-control endpoints.  Then use normal
 276 * stream read() and write() requests; and maybe ioctl() to get more
 277 * precise FIFO status when recovering from cancellation.
 278 */
 279
 280static void epio_complete (struct usb_ep *ep, struct usb_request *req)
 281{
 282	struct ep_data	*epdata = ep->driver_data;
 283
 284	if (!req->context)
 285		return;
 286	if (req->status)
 287		epdata->status = req->status;
 288	else
 289		epdata->status = req->actual;
 290	complete ((struct completion *)req->context);
 291}
 292
 293/* tasklock endpoint, returning when it's connected.
 294 * still need dev->lock to use epdata->ep.
 295 */
 296static int
 297get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 298{
 299	int	val;
 300
 301	if (f_flags & O_NONBLOCK) {
 302		if (!mutex_trylock(&epdata->lock))
 303			goto nonblock;
 304		if (epdata->state != STATE_EP_ENABLED &&
 305		    (!is_write || epdata->state != STATE_EP_READY)) {
 306			mutex_unlock(&epdata->lock);
 307nonblock:
 308			val = -EAGAIN;
 309		} else
 310			val = 0;
 311		return val;
 312	}
 313
 314	val = mutex_lock_interruptible(&epdata->lock);
 315	if (val < 0)
 316		return val;
 317
 318	switch (epdata->state) {
 319	case STATE_EP_ENABLED:
 320		return 0;
 321	case STATE_EP_READY:			/* not configured yet */
 322		if (is_write)
 323			return 0;
 324		fallthrough;
 325	case STATE_EP_UNBOUND:			/* clean disconnect */
 326		break;
 327	// case STATE_EP_DISABLED:		/* "can't happen" */
 328	default:				/* error! */
 329		pr_debug ("%s: ep %p not available, state %d\n",
 330				shortname, epdata, epdata->state);
 331	}
 332	mutex_unlock(&epdata->lock);
 333	return -ENODEV;
 334}
 335
 336static ssize_t
 337ep_io (struct ep_data *epdata, void *buf, unsigned len)
 338{
 339	DECLARE_COMPLETION_ONSTACK (done);
 340	int value;
 341
 342	spin_lock_irq (&epdata->dev->lock);
 343	if (likely (epdata->ep != NULL)) {
 344		struct usb_request	*req = epdata->req;
 345
 346		req->context = &done;
 347		req->complete = epio_complete;
 348		req->buf = buf;
 349		req->length = len;
 350		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
 351	} else
 352		value = -ENODEV;
 353	spin_unlock_irq (&epdata->dev->lock);
 354
 355	if (likely (value == 0)) {
 356		value = wait_for_completion_interruptible(&done);
 357		if (value != 0) {
 358			spin_lock_irq (&epdata->dev->lock);
 359			if (likely (epdata->ep != NULL)) {
 360				DBG (epdata->dev, "%s i/o interrupted\n",
 361						epdata->name);
 362				usb_ep_dequeue (epdata->ep, epdata->req);
 363				spin_unlock_irq (&epdata->dev->lock);
 364
 365				wait_for_completion(&done);
 366				if (epdata->status == -ECONNRESET)
 367					epdata->status = -EINTR;
 368			} else {
 369				spin_unlock_irq (&epdata->dev->lock);
 370
 371				DBG (epdata->dev, "endpoint gone\n");
 372				wait_for_completion(&done);
 373				epdata->status = -ENODEV;
 374			}
 375		}
 376		return epdata->status;
 377	}
 378	return value;
 379}
 380
 381static int
 382ep_release (struct inode *inode, struct file *fd)
 383{
 384	struct ep_data		*data = fd->private_data;
 385	int value;
 386
 387	value = mutex_lock_interruptible(&data->lock);
 388	if (value < 0)
 389		return value;
 390
 391	/* clean up if this can be reopened */
 392	if (data->state != STATE_EP_UNBOUND) {
 393		data->state = STATE_EP_DISABLED;
 394		data->desc.bDescriptorType = 0;
 395		data->hs_desc.bDescriptorType = 0;
 396		usb_ep_disable(data->ep);
 397	}
 398	mutex_unlock(&data->lock);
 399	put_ep (data);
 400	return 0;
 401}
 402
 403static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
 404{
 405	struct ep_data		*data = fd->private_data;
 406	int			status;
 407
 408	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
 409		return status;
 410
 411	spin_lock_irq (&data->dev->lock);
 412	if (likely (data->ep != NULL)) {
 413		switch (code) {
 414		case GADGETFS_FIFO_STATUS:
 415			status = usb_ep_fifo_status (data->ep);
 416			break;
 417		case GADGETFS_FIFO_FLUSH:
 418			usb_ep_fifo_flush (data->ep);
 419			break;
 420		case GADGETFS_CLEAR_HALT:
 421			status = usb_ep_clear_halt (data->ep);
 422			break;
 423		default:
 424			status = -ENOTTY;
 425		}
 426	} else
 427		status = -ENODEV;
 428	spin_unlock_irq (&data->dev->lock);
 429	mutex_unlock(&data->lock);
 430	return status;
 431}
 432
 433/*----------------------------------------------------------------------*/
 434
 435/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
 436
 437struct kiocb_priv {
 438	struct usb_request	*req;
 439	struct ep_data		*epdata;
 440	struct kiocb		*iocb;
 441	struct mm_struct	*mm;
 442	struct work_struct	work;
 443	void			*buf;
 444	struct iov_iter		to;
 445	const void		*to_free;
 446	unsigned		actual;
 447};
 448
 449static int ep_aio_cancel(struct kiocb *iocb)
 450{
 451	struct kiocb_priv	*priv = iocb->private;
 452	struct ep_data		*epdata;
 453	int			value;
 454
 455	local_irq_disable();
 456	epdata = priv->epdata;
 457	// spin_lock(&epdata->dev->lock);
 458	if (likely(epdata && epdata->ep && priv->req))
 459		value = usb_ep_dequeue (epdata->ep, priv->req);
 460	else
 461		value = -EINVAL;
 462	// spin_unlock(&epdata->dev->lock);
 463	local_irq_enable();
 464
 465	return value;
 466}
 467
 468static void ep_user_copy_worker(struct work_struct *work)
 469{
 470	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
 471	struct mm_struct *mm = priv->mm;
 472	struct kiocb *iocb = priv->iocb;
 473	size_t ret;
 474
 475	kthread_use_mm(mm);
 476	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
 477	kthread_unuse_mm(mm);
 478	if (!ret)
 479		ret = -EFAULT;
 480
 481	/* completing the iocb can drop the ctx and mm, don't touch mm after */
 482	iocb->ki_complete(iocb, ret);
 483
 484	kfree(priv->buf);
 485	kfree(priv->to_free);
 486	kfree(priv);
 487}
 488
 489static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
 490{
 491	struct kiocb		*iocb = req->context;
 492	struct kiocb_priv	*priv = iocb->private;
 493	struct ep_data		*epdata = priv->epdata;
 494
 495	/* lock against disconnect (and ideally, cancel) */
 496	spin_lock(&epdata->dev->lock);
 497	priv->req = NULL;
 498	priv->epdata = NULL;
 499
 500	/* if this was a write or a read returning no data then we
 501	 * don't need to copy anything to userspace, so we can
 502	 * complete the aio request immediately.
 503	 */
 504	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
 505		kfree(req->buf);
 506		kfree(priv->to_free);
 507		kfree(priv);
 508		iocb->private = NULL;
 509		iocb->ki_complete(iocb,
 510				req->actual ? req->actual : (long)req->status);
 511	} else {
 512		/* ep_copy_to_user() won't report both; we hide some faults */
 513		if (unlikely(0 != req->status))
 514			DBG(epdata->dev, "%s fault %d len %d\n",
 515				ep->name, req->status, req->actual);
 516
 517		priv->buf = req->buf;
 518		priv->actual = req->actual;
 519		INIT_WORK(&priv->work, ep_user_copy_worker);
 520		schedule_work(&priv->work);
 521	}
 522
 523	usb_ep_free_request(ep, req);
 524	spin_unlock(&epdata->dev->lock);
 525	put_ep(epdata);
 526}
 527
 528static ssize_t ep_aio(struct kiocb *iocb,
 529		      struct kiocb_priv *priv,
 530		      struct ep_data *epdata,
 531		      char *buf,
 532		      size_t len)
 533{
 534	struct usb_request *req;
 535	ssize_t value;
 536
 537	iocb->private = priv;
 538	priv->iocb = iocb;
 539
 540	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 541	get_ep(epdata);
 542	priv->epdata = epdata;
 543	priv->actual = 0;
 544	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
 545
 546	/* each kiocb is coupled to one usb_request, but we can't
 547	 * allocate or submit those if the host disconnected.
 548	 */
 549	spin_lock_irq(&epdata->dev->lock);
 550	value = -ENODEV;
 551	if (unlikely(epdata->ep == NULL))
 552		goto fail;
 553
 554	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
 555	value = -ENOMEM;
 556	if (unlikely(!req))
 557		goto fail;
 558
 559	priv->req = req;
 560	req->buf = buf;
 561	req->length = len;
 562	req->complete = ep_aio_complete;
 563	req->context = iocb;
 564	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
 565	if (unlikely(0 != value)) {
 566		usb_ep_free_request(epdata->ep, req);
 567		goto fail;
 568	}
 569	spin_unlock_irq(&epdata->dev->lock);
 570	return -EIOCBQUEUED;
 571
 572fail:
 573	spin_unlock_irq(&epdata->dev->lock);
 574	kfree(priv->to_free);
 575	kfree(priv);
 576	put_ep(epdata);
 577	return value;
 578}
 579
 580static ssize_t
 581ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 582{
 583	struct file *file = iocb->ki_filp;
 584	struct ep_data *epdata = file->private_data;
 585	size_t len = iov_iter_count(to);
 586	ssize_t value;
 587	char *buf;
 588
 589	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
 590		return value;
 591
 592	/* halt any endpoint by doing a "wrong direction" i/o call */
 593	if (usb_endpoint_dir_in(&epdata->desc)) {
 594		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 595		    !is_sync_kiocb(iocb)) {
 596			mutex_unlock(&epdata->lock);
 597			return -EINVAL;
 598		}
 599		DBG (epdata->dev, "%s halt\n", epdata->name);
 600		spin_lock_irq(&epdata->dev->lock);
 601		if (likely(epdata->ep != NULL))
 602			usb_ep_set_halt(epdata->ep);
 603		spin_unlock_irq(&epdata->dev->lock);
 604		mutex_unlock(&epdata->lock);
 605		return -EBADMSG;
 606	}
 607
 608	buf = kmalloc(len, GFP_KERNEL);
 609	if (unlikely(!buf)) {
 610		mutex_unlock(&epdata->lock);
 611		return -ENOMEM;
 612	}
 613	if (is_sync_kiocb(iocb)) {
 614		value = ep_io(epdata, buf, len);
 615		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
 616			value = -EFAULT;
 617	} else {
 618		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 619		value = -ENOMEM;
 620		if (!priv)
 621			goto fail;
 622		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
 623		if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
 624			kfree(priv);
 625			goto fail;
 626		}
 627		value = ep_aio(iocb, priv, epdata, buf, len);
 628		if (value == -EIOCBQUEUED)
 629			buf = NULL;
 630	}
 631fail:
 632	kfree(buf);
 633	mutex_unlock(&epdata->lock);
 634	return value;
 635}
 636
 637static ssize_t ep_config(struct ep_data *, const char *, size_t);
 638
 639static ssize_t
 640ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 641{
 642	struct file *file = iocb->ki_filp;
 643	struct ep_data *epdata = file->private_data;
 644	size_t len = iov_iter_count(from);
 645	bool configured;
 646	ssize_t value;
 647	char *buf;
 648
 649	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
 650		return value;
 651
 652	configured = epdata->state == STATE_EP_ENABLED;
 653
 654	/* halt any endpoint by doing a "wrong direction" i/o call */
 655	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
 656		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 657		    !is_sync_kiocb(iocb)) {
 658			mutex_unlock(&epdata->lock);
 659			return -EINVAL;
 660		}
 661		DBG (epdata->dev, "%s halt\n", epdata->name);
 662		spin_lock_irq(&epdata->dev->lock);
 663		if (likely(epdata->ep != NULL))
 664			usb_ep_set_halt(epdata->ep);
 665		spin_unlock_irq(&epdata->dev->lock);
 666		mutex_unlock(&epdata->lock);
 667		return -EBADMSG;
 668	}
 669
 670	buf = kmalloc(len, GFP_KERNEL);
 671	if (unlikely(!buf)) {
 672		mutex_unlock(&epdata->lock);
 673		return -ENOMEM;
 674	}
 675
 676	if (unlikely(!copy_from_iter_full(buf, len, from))) {
 677		value = -EFAULT;
 678		goto out;
 679	}
 680
 681	if (unlikely(!configured)) {
 682		value = ep_config(epdata, buf, len);
 683	} else if (is_sync_kiocb(iocb)) {
 684		value = ep_io(epdata, buf, len);
 685	} else {
 686		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 687		value = -ENOMEM;
 688		if (priv) {
 689			value = ep_aio(iocb, priv, epdata, buf, len);
 690			if (value == -EIOCBQUEUED)
 691				buf = NULL;
 692		}
 693	}
 694out:
 695	kfree(buf);
 696	mutex_unlock(&epdata->lock);
 697	return value;
 698}
 699
 700/*----------------------------------------------------------------------*/
 701
 702/* used after endpoint configuration */
 703static const struct file_operations ep_io_operations = {
 704	.owner =	THIS_MODULE,
 705
 706	.open =		ep_open,
 707	.release =	ep_release,
 708	.llseek =	no_llseek,
 709	.unlocked_ioctl = ep_ioctl,
 710	.read_iter =	ep_read_iter,
 711	.write_iter =	ep_write_iter,
 712};
 713
 714/* ENDPOINT INITIALIZATION
 715 *
 716 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
 717 *     status = write (fd, descriptors, sizeof descriptors)
 718 *
 719 * That write establishes the endpoint configuration, configuring
 720 * the controller to process bulk, interrupt, or isochronous transfers
 721 * at the right maxpacket size, and so on.
 722 *
 723 * The descriptors are message type 1, identified by a host order u32
 724 * at the beginning of what's written.  Descriptor order is: full/low
 725 * speed descriptor, then optional high speed descriptor.
 726 */
 727static ssize_t
 728ep_config (struct ep_data *data, const char *buf, size_t len)
 729{
 730	struct usb_ep		*ep;
 731	u32			tag;
 732	int			value, length = len;
 733
 734	if (data->state != STATE_EP_READY) {
 735		value = -EL2HLT;
 736		goto fail;
 737	}
 738
 739	value = len;
 740	if (len < USB_DT_ENDPOINT_SIZE + 4)
 741		goto fail0;
 742
 743	/* we might need to change message format someday */
 744	memcpy(&tag, buf, 4);
 745	if (tag != 1) {
 746		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
 747		goto fail0;
 748	}
 749	buf += 4;
 750	len -= 4;
 751
 752	/* NOTE:  audio endpoint extensions not accepted here;
 753	 * just don't include the extra bytes.
 754	 */
 755
 756	/* full/low speed descriptor, then high speed */
 757	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
 758	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
 759			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
 760		goto fail0;
 761	if (len != USB_DT_ENDPOINT_SIZE) {
 762		if (len != 2 * USB_DT_ENDPOINT_SIZE)
 763			goto fail0;
 764		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
 765			USB_DT_ENDPOINT_SIZE);
 766		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
 767				|| data->hs_desc.bDescriptorType
 768					!= USB_DT_ENDPOINT) {
 769			DBG(data->dev, "config %s, bad hs length or type\n",
 770					data->name);
 771			goto fail0;
 772		}
 773	}
 774
 775	spin_lock_irq (&data->dev->lock);
 776	if (data->dev->state == STATE_DEV_UNBOUND) {
 777		value = -ENOENT;
 778		goto gone;
 779	} else {
 780		ep = data->ep;
 781		if (ep == NULL) {
 782			value = -ENODEV;
 783			goto gone;
 784		}
 785	}
 786	switch (data->dev->gadget->speed) {
 787	case USB_SPEED_LOW:
 788	case USB_SPEED_FULL:
 789		ep->desc = &data->desc;
 790		break;
 791	case USB_SPEED_HIGH:
 792		/* fails if caller didn't provide that descriptor... */
 793		ep->desc = &data->hs_desc;
 794		break;
 795	default:
 796		DBG(data->dev, "unconnected, %s init abandoned\n",
 797				data->name);
 798		value = -EINVAL;
 799		goto gone;
 800	}
 801	value = usb_ep_enable(ep);
 802	if (value == 0) {
 803		data->state = STATE_EP_ENABLED;
 804		value = length;
 805	}
 806gone:
 807	spin_unlock_irq (&data->dev->lock);
 808	if (value < 0) {
 809fail:
 810		data->desc.bDescriptorType = 0;
 811		data->hs_desc.bDescriptorType = 0;
 812	}
 813	return value;
 814fail0:
 815	value = -EINVAL;
 816	goto fail;
 817}
 818
 819static int
 820ep_open (struct inode *inode, struct file *fd)
 821{
 822	struct ep_data		*data = inode->i_private;
 823	int			value = -EBUSY;
 824
 825	if (mutex_lock_interruptible(&data->lock) != 0)
 826		return -EINTR;
 827	spin_lock_irq (&data->dev->lock);
 828	if (data->dev->state == STATE_DEV_UNBOUND)
 829		value = -ENOENT;
 830	else if (data->state == STATE_EP_DISABLED) {
 831		value = 0;
 832		data->state = STATE_EP_READY;
 833		get_ep (data);
 834		fd->private_data = data;
 835		VDEBUG (data->dev, "%s ready\n", data->name);
 836	} else
 837		DBG (data->dev, "%s state %d\n",
 838			data->name, data->state);
 839	spin_unlock_irq (&data->dev->lock);
 840	mutex_unlock(&data->lock);
 841	return value;
 842}
 843
 844/*----------------------------------------------------------------------*/
 845
 846/* EP0 IMPLEMENTATION can be partly in userspace.
 847 *
 848 * Drivers that use this facility receive various events, including
 849 * control requests the kernel doesn't handle.  Drivers that don't
 850 * use this facility may be too simple-minded for real applications.
 851 */
 852
 853static inline void ep0_readable (struct dev_data *dev)
 854{
 855	wake_up (&dev->wait);
 856	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
 857}
 858
 859static void clean_req (struct usb_ep *ep, struct usb_request *req)
 860{
 861	struct dev_data		*dev = ep->driver_data;
 862
 863	if (req->buf != dev->rbuf) {
 864		kfree(req->buf);
 865		req->buf = dev->rbuf;
 866	}
 867	req->complete = epio_complete;
 868	dev->setup_out_ready = 0;
 869}
 870
 871static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
 872{
 873	struct dev_data		*dev = ep->driver_data;
 874	unsigned long		flags;
 875	int			free = 1;
 876
 877	/* for control OUT, data must still get to userspace */
 878	spin_lock_irqsave(&dev->lock, flags);
 879	if (!dev->setup_in) {
 880		dev->setup_out_error = (req->status != 0);
 881		if (!dev->setup_out_error)
 882			free = 0;
 883		dev->setup_out_ready = 1;
 884		ep0_readable (dev);
 885	}
 886
 887	/* clean up as appropriate */
 888	if (free && req->buf != &dev->rbuf)
 889		clean_req (ep, req);
 890	req->complete = epio_complete;
 891	spin_unlock_irqrestore(&dev->lock, flags);
 892}
 893
 894static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
 895{
 896	struct dev_data	*dev = ep->driver_data;
 897
 898	if (dev->setup_out_ready) {
 899		DBG (dev, "ep0 request busy!\n");
 900		return -EBUSY;
 901	}
 902	if (len > sizeof (dev->rbuf))
 903		req->buf = kmalloc(len, GFP_ATOMIC);
 904	if (req->buf == NULL) {
 905		req->buf = dev->rbuf;
 906		return -ENOMEM;
 907	}
 908	req->complete = ep0_complete;
 909	req->length = len;
 910	req->zero = 0;
 911	return 0;
 912}
 913
 914static ssize_t
 915ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 916{
 917	struct dev_data			*dev = fd->private_data;
 918	ssize_t				retval;
 919	enum ep0_state			state;
 920
 921	spin_lock_irq (&dev->lock);
 922	if (dev->state <= STATE_DEV_OPENED) {
 923		retval = -EINVAL;
 924		goto done;
 925	}
 926
 927	/* report fd mode change before acting on it */
 928	if (dev->setup_abort) {
 929		dev->setup_abort = 0;
 930		retval = -EIDRM;
 931		goto done;
 932	}
 933
 934	/* control DATA stage */
 935	if ((state = dev->state) == STATE_DEV_SETUP) {
 936
 937		if (dev->setup_in) {		/* stall IN */
 938			VDEBUG(dev, "ep0in stall\n");
 939			(void) usb_ep_set_halt (dev->gadget->ep0);
 940			retval = -EL2HLT;
 941			dev->state = STATE_DEV_CONNECTED;
 942
 943		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
 944			struct usb_ep		*ep = dev->gadget->ep0;
 945			struct usb_request	*req = dev->req;
 946
 947			if ((retval = setup_req (ep, req, 0)) == 0) {
 948				++dev->udc_usage;
 949				spin_unlock_irq (&dev->lock);
 950				retval = usb_ep_queue (ep, req, GFP_KERNEL);
 951				spin_lock_irq (&dev->lock);
 952				--dev->udc_usage;
 953			}
 954			dev->state = STATE_DEV_CONNECTED;
 955
 956			/* assume that was SET_CONFIGURATION */
 957			if (dev->current_config) {
 958				unsigned power;
 959
 960				if (gadget_is_dualspeed(dev->gadget)
 961						&& (dev->gadget->speed
 962							== USB_SPEED_HIGH))
 963					power = dev->hs_config->bMaxPower;
 964				else
 965					power = dev->config->bMaxPower;
 966				usb_gadget_vbus_draw(dev->gadget, 2 * power);
 967			}
 968
 969		} else {			/* collect OUT data */
 970			if ((fd->f_flags & O_NONBLOCK) != 0
 971					&& !dev->setup_out_ready) {
 972				retval = -EAGAIN;
 973				goto done;
 974			}
 975			spin_unlock_irq (&dev->lock);
 976			retval = wait_event_interruptible (dev->wait,
 977					dev->setup_out_ready != 0);
 978
 979			/* FIXME state could change from under us */
 980			spin_lock_irq (&dev->lock);
 981			if (retval)
 982				goto done;
 983
 984			if (dev->state != STATE_DEV_SETUP) {
 985				retval = -ECANCELED;
 986				goto done;
 987			}
 988			dev->state = STATE_DEV_CONNECTED;
 989
 990			if (dev->setup_out_error)
 991				retval = -EIO;
 992			else {
 993				len = min (len, (size_t)dev->req->actual);
 994				++dev->udc_usage;
 995				spin_unlock_irq(&dev->lock);
 996				if (copy_to_user (buf, dev->req->buf, len))
 997					retval = -EFAULT;
 998				else
 999					retval = len;
1000				spin_lock_irq(&dev->lock);
1001				--dev->udc_usage;
1002				clean_req (dev->gadget->ep0, dev->req);
1003				/* NOTE userspace can't yet choose to stall */
1004			}
1005		}
1006		goto done;
1007	}
1008
1009	/* else normal: return event data */
1010	if (len < sizeof dev->event [0]) {
1011		retval = -EINVAL;
1012		goto done;
1013	}
1014	len -= len % sizeof (struct usb_gadgetfs_event);
1015	dev->usermode_setup = 1;
1016
1017scan:
1018	/* return queued events right away */
1019	if (dev->ev_next != 0) {
1020		unsigned		i, n;
1021
1022		n = len / sizeof (struct usb_gadgetfs_event);
1023		if (dev->ev_next < n)
1024			n = dev->ev_next;
1025
1026		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1027		for (i = 0; i < n; i++) {
1028			if (dev->event [i].type == GADGETFS_SETUP) {
1029				dev->state = STATE_DEV_SETUP;
1030				n = i + 1;
1031				break;
1032			}
1033		}
1034		spin_unlock_irq (&dev->lock);
1035		len = n * sizeof (struct usb_gadgetfs_event);
1036		if (copy_to_user (buf, &dev->event, len))
1037			retval = -EFAULT;
1038		else
1039			retval = len;
1040		if (len > 0) {
1041			/* NOTE this doesn't guard against broken drivers;
1042			 * concurrent ep0 readers may lose events.
1043			 */
1044			spin_lock_irq (&dev->lock);
1045			if (dev->ev_next > n) {
1046				memmove(&dev->event[0], &dev->event[n],
1047					sizeof (struct usb_gadgetfs_event)
1048						* (dev->ev_next - n));
1049			}
1050			dev->ev_next -= n;
1051			spin_unlock_irq (&dev->lock);
1052		}
1053		return retval;
1054	}
1055	if (fd->f_flags & O_NONBLOCK) {
1056		retval = -EAGAIN;
1057		goto done;
1058	}
1059
1060	switch (state) {
1061	default:
1062		DBG (dev, "fail %s, state %d\n", __func__, state);
1063		retval = -ESRCH;
1064		break;
1065	case STATE_DEV_UNCONNECTED:
1066	case STATE_DEV_CONNECTED:
1067		spin_unlock_irq (&dev->lock);
1068		DBG (dev, "%s wait\n", __func__);
1069
1070		/* wait for events */
1071		retval = wait_event_interruptible (dev->wait,
1072				dev->ev_next != 0);
1073		if (retval < 0)
1074			return retval;
1075		spin_lock_irq (&dev->lock);
1076		goto scan;
1077	}
1078
1079done:
1080	spin_unlock_irq (&dev->lock);
1081	return retval;
1082}
1083
1084static struct usb_gadgetfs_event *
1085next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1086{
1087	struct usb_gadgetfs_event	*event;
1088	unsigned			i;
1089
1090	switch (type) {
1091	/* these events purge the queue */
1092	case GADGETFS_DISCONNECT:
1093		if (dev->state == STATE_DEV_SETUP)
1094			dev->setup_abort = 1;
1095		fallthrough;
1096	case GADGETFS_CONNECT:
1097		dev->ev_next = 0;
1098		break;
1099	case GADGETFS_SETUP:		/* previous request timed out */
1100	case GADGETFS_SUSPEND:		/* same effect */
1101		/* these events can't be repeated */
1102		for (i = 0; i != dev->ev_next; i++) {
1103			if (dev->event [i].type != type)
1104				continue;
1105			DBG(dev, "discard old event[%d] %d\n", i, type);
1106			dev->ev_next--;
1107			if (i == dev->ev_next)
1108				break;
1109			/* indices start at zero, for simplicity */
1110			memmove (&dev->event [i], &dev->event [i + 1],
1111				sizeof (struct usb_gadgetfs_event)
1112					* (dev->ev_next - i));
1113		}
1114		break;
1115	default:
1116		BUG ();
1117	}
1118	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1119	event = &dev->event [dev->ev_next++];
1120	BUG_ON (dev->ev_next > N_EVENT);
1121	memset (event, 0, sizeof *event);
1122	event->type = type;
1123	return event;
1124}
1125
1126static ssize_t
1127ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1128{
1129	struct dev_data		*dev = fd->private_data;
1130	ssize_t			retval = -ESRCH;
1131
1132	/* report fd mode change before acting on it */
1133	if (dev->setup_abort) {
1134		dev->setup_abort = 0;
1135		retval = -EIDRM;
1136
1137	/* data and/or status stage for control request */
1138	} else if (dev->state == STATE_DEV_SETUP) {
1139
1140		len = min_t(size_t, len, dev->setup_wLength);
1141		if (dev->setup_in) {
1142			retval = setup_req (dev->gadget->ep0, dev->req, len);
1143			if (retval == 0) {
1144				dev->state = STATE_DEV_CONNECTED;
1145				++dev->udc_usage;
1146				spin_unlock_irq (&dev->lock);
1147				if (copy_from_user (dev->req->buf, buf, len))
1148					retval = -EFAULT;
1149				else {
1150					if (len < dev->setup_wLength)
1151						dev->req->zero = 1;
1152					retval = usb_ep_queue (
1153						dev->gadget->ep0, dev->req,
1154						GFP_KERNEL);
1155				}
1156				spin_lock_irq(&dev->lock);
1157				--dev->udc_usage;
1158				if (retval < 0) {
1159					clean_req (dev->gadget->ep0, dev->req);
1160				} else
1161					retval = len;
1162
1163				return retval;
1164			}
1165
1166		/* can stall some OUT transfers */
1167		} else if (dev->setup_can_stall) {
1168			VDEBUG(dev, "ep0out stall\n");
1169			(void) usb_ep_set_halt (dev->gadget->ep0);
1170			retval = -EL2HLT;
1171			dev->state = STATE_DEV_CONNECTED;
1172		} else {
1173			DBG(dev, "bogus ep0out stall!\n");
1174		}
1175	} else
1176		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1177
1178	return retval;
1179}
1180
1181static int
1182ep0_fasync (int f, struct file *fd, int on)
1183{
1184	struct dev_data		*dev = fd->private_data;
1185	// caller must F_SETOWN before signal delivery happens
1186	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1187	return fasync_helper (f, fd, on, &dev->fasync);
1188}
1189
1190static struct usb_gadget_driver gadgetfs_driver;
1191
1192static int
1193dev_release (struct inode *inode, struct file *fd)
1194{
1195	struct dev_data		*dev = fd->private_data;
1196
1197	/* closing ep0 === shutdown all */
1198
1199	if (dev->gadget_registered) {
1200		usb_gadget_unregister_driver (&gadgetfs_driver);
1201		dev->gadget_registered = false;
1202	}
1203
1204	/* at this point "good" hardware has disconnected the
1205	 * device from USB; the host won't see it any more.
1206	 * alternatively, all host requests will time out.
1207	 */
1208
1209	kfree (dev->buf);
1210	dev->buf = NULL;
1211
1212	/* other endpoints were all decoupled from this device */
1213	spin_lock_irq(&dev->lock);
1214	dev->state = STATE_DEV_DISABLED;
1215	spin_unlock_irq(&dev->lock);
1216
1217	put_dev (dev);
1218	return 0;
1219}
1220
1221static __poll_t
1222ep0_poll (struct file *fd, poll_table *wait)
1223{
1224	struct dev_data         *dev = fd->private_data;
1225	__poll_t                mask = 0;
1226
1227	if (dev->state <= STATE_DEV_OPENED)
1228		return DEFAULT_POLLMASK;
1229
1230	poll_wait(fd, &dev->wait, wait);
1231
1232	spin_lock_irq(&dev->lock);
1233
1234	/* report fd mode change before acting on it */
1235	if (dev->setup_abort) {
1236		dev->setup_abort = 0;
1237		mask = EPOLLHUP;
1238		goto out;
1239	}
1240
1241	if (dev->state == STATE_DEV_SETUP) {
1242		if (dev->setup_in || dev->setup_can_stall)
1243			mask = EPOLLOUT;
1244	} else {
1245		if (dev->ev_next != 0)
1246			mask = EPOLLIN;
1247	}
1248out:
1249	spin_unlock_irq(&dev->lock);
1250	return mask;
1251}
1252
1253static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1254{
1255	struct dev_data		*dev = fd->private_data;
1256	struct usb_gadget	*gadget = dev->gadget;
1257	long ret = -ENOTTY;
1258
1259	spin_lock_irq(&dev->lock);
1260	if (dev->state == STATE_DEV_OPENED ||
1261			dev->state == STATE_DEV_UNBOUND) {
1262		/* Not bound to a UDC */
1263	} else if (gadget->ops->ioctl) {
1264		++dev->udc_usage;
1265		spin_unlock_irq(&dev->lock);
1266
1267		ret = gadget->ops->ioctl (gadget, code, value);
1268
1269		spin_lock_irq(&dev->lock);
1270		--dev->udc_usage;
1271	}
1272	spin_unlock_irq(&dev->lock);
1273
1274	return ret;
1275}
1276
1277/*----------------------------------------------------------------------*/
1278
1279/* The in-kernel gadget driver handles most ep0 issues, in particular
1280 * enumerating the single configuration (as provided from user space).
1281 *
1282 * Unrecognized ep0 requests may be handled in user space.
1283 */
1284
1285static void make_qualifier (struct dev_data *dev)
1286{
1287	struct usb_qualifier_descriptor		qual;
1288	struct usb_device_descriptor		*desc;
1289
1290	qual.bLength = sizeof qual;
1291	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1292	qual.bcdUSB = cpu_to_le16 (0x0200);
1293
1294	desc = dev->dev;
1295	qual.bDeviceClass = desc->bDeviceClass;
1296	qual.bDeviceSubClass = desc->bDeviceSubClass;
1297	qual.bDeviceProtocol = desc->bDeviceProtocol;
1298
1299	/* assumes ep0 uses the same value for both speeds ... */
1300	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1301
1302	qual.bNumConfigurations = 1;
1303	qual.bRESERVED = 0;
1304
1305	memcpy (dev->rbuf, &qual, sizeof qual);
1306}
1307
1308static int
1309config_buf (struct dev_data *dev, u8 type, unsigned index)
1310{
1311	int		len;
1312	int		hs = 0;
1313
1314	/* only one configuration */
1315	if (index > 0)
1316		return -EINVAL;
1317
1318	if (gadget_is_dualspeed(dev->gadget)) {
1319		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1320		if (type == USB_DT_OTHER_SPEED_CONFIG)
1321			hs = !hs;
1322	}
1323	if (hs) {
1324		dev->req->buf = dev->hs_config;
1325		len = le16_to_cpu(dev->hs_config->wTotalLength);
1326	} else {
1327		dev->req->buf = dev->config;
1328		len = le16_to_cpu(dev->config->wTotalLength);
1329	}
1330	((u8 *)dev->req->buf) [1] = type;
1331	return len;
1332}
1333
1334static int
1335gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1336{
1337	struct dev_data			*dev = get_gadget_data (gadget);
1338	struct usb_request		*req = dev->req;
1339	int				value = -EOPNOTSUPP;
1340	struct usb_gadgetfs_event	*event;
1341	u16				w_value = le16_to_cpu(ctrl->wValue);
1342	u16				w_length = le16_to_cpu(ctrl->wLength);
1343
1344	if (w_length > RBUF_SIZE) {
1345		if (ctrl->bRequestType & USB_DIR_IN) {
1346			/* Cast away the const, we are going to overwrite on purpose. */
1347			__le16 *temp = (__le16 *)&ctrl->wLength;
1348
1349			*temp = cpu_to_le16(RBUF_SIZE);
1350			w_length = RBUF_SIZE;
1351		} else {
1352			return value;
1353		}
1354	}
1355
1356	spin_lock (&dev->lock);
1357	dev->setup_abort = 0;
1358	if (dev->state == STATE_DEV_UNCONNECTED) {
1359		if (gadget_is_dualspeed(gadget)
1360				&& gadget->speed == USB_SPEED_HIGH
1361				&& dev->hs_config == NULL) {
1362			spin_unlock(&dev->lock);
1363			ERROR (dev, "no high speed config??\n");
1364			return -EINVAL;
1365		}
1366
1367		dev->state = STATE_DEV_CONNECTED;
1368
1369		INFO (dev, "connected\n");
1370		event = next_event (dev, GADGETFS_CONNECT);
1371		event->u.speed = gadget->speed;
1372		ep0_readable (dev);
1373
1374	/* host may have given up waiting for response.  we can miss control
1375	 * requests handled lower down (device/endpoint status and features);
1376	 * then ep0_{read,write} will report the wrong status. controller
1377	 * driver will have aborted pending i/o.
1378	 */
1379	} else if (dev->state == STATE_DEV_SETUP)
1380		dev->setup_abort = 1;
1381
1382	req->buf = dev->rbuf;
1383	req->context = NULL;
1384	switch (ctrl->bRequest) {
1385
1386	case USB_REQ_GET_DESCRIPTOR:
1387		if (ctrl->bRequestType != USB_DIR_IN)
1388			goto unrecognized;
1389		switch (w_value >> 8) {
1390
1391		case USB_DT_DEVICE:
1392			value = min (w_length, (u16) sizeof *dev->dev);
1393			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1394			req->buf = dev->dev;
1395			break;
1396		case USB_DT_DEVICE_QUALIFIER:
1397			if (!dev->hs_config)
1398				break;
1399			value = min (w_length, (u16)
1400				sizeof (struct usb_qualifier_descriptor));
1401			make_qualifier (dev);
1402			break;
1403		case USB_DT_OTHER_SPEED_CONFIG:
1404		case USB_DT_CONFIG:
1405			value = config_buf (dev,
1406					w_value >> 8,
1407					w_value & 0xff);
1408			if (value >= 0)
1409				value = min (w_length, (u16) value);
1410			break;
1411		case USB_DT_STRING:
1412			goto unrecognized;
1413
1414		default:		// all others are errors
1415			break;
1416		}
1417		break;
1418
1419	/* currently one config, two speeds */
1420	case USB_REQ_SET_CONFIGURATION:
1421		if (ctrl->bRequestType != 0)
1422			goto unrecognized;
1423		if (0 == (u8) w_value) {
1424			value = 0;
1425			dev->current_config = 0;
1426			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1427			// user mode expected to disable endpoints
1428		} else {
1429			u8	config, power;
1430
1431			if (gadget_is_dualspeed(gadget)
1432					&& gadget->speed == USB_SPEED_HIGH) {
1433				config = dev->hs_config->bConfigurationValue;
1434				power = dev->hs_config->bMaxPower;
1435			} else {
1436				config = dev->config->bConfigurationValue;
1437				power = dev->config->bMaxPower;
1438			}
1439
1440			if (config == (u8) w_value) {
1441				value = 0;
1442				dev->current_config = config;
1443				usb_gadget_vbus_draw(gadget, 2 * power);
1444			}
1445		}
1446
1447		/* report SET_CONFIGURATION like any other control request,
1448		 * except that usermode may not stall this.  the next
1449		 * request mustn't be allowed start until this finishes:
1450		 * endpoints and threads set up, etc.
1451		 *
1452		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1453		 * has bad/racey automagic that prevents synchronizing here.
1454		 * even kernel mode drivers often miss them.
1455		 */
1456		if (value == 0) {
1457			INFO (dev, "configuration #%d\n", dev->current_config);
1458			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1459			if (dev->usermode_setup) {
1460				dev->setup_can_stall = 0;
1461				goto delegate;
1462			}
1463		}
1464		break;
1465
1466#ifndef	CONFIG_USB_PXA25X
1467	/* PXA automagically handles this request too */
1468	case USB_REQ_GET_CONFIGURATION:
1469		if (ctrl->bRequestType != 0x80)
1470			goto unrecognized;
1471		*(u8 *)req->buf = dev->current_config;
1472		value = min (w_length, (u16) 1);
1473		break;
1474#endif
1475
1476	default:
1477unrecognized:
1478		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1479			dev->usermode_setup ? "delegate" : "fail",
1480			ctrl->bRequestType, ctrl->bRequest,
1481			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1482
1483		/* if there's an ep0 reader, don't stall */
1484		if (dev->usermode_setup) {
1485			dev->setup_can_stall = 1;
1486delegate:
1487			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1488						? 1 : 0;
1489			dev->setup_wLength = w_length;
1490			dev->setup_out_ready = 0;
1491			dev->setup_out_error = 0;
1492
1493			/* read DATA stage for OUT right away */
1494			if (unlikely (!dev->setup_in && w_length)) {
1495				value = setup_req (gadget->ep0, dev->req,
1496							w_length);
1497				if (value < 0)
1498					break;
1499
1500				++dev->udc_usage;
1501				spin_unlock (&dev->lock);
1502				value = usb_ep_queue (gadget->ep0, dev->req,
1503							GFP_KERNEL);
1504				spin_lock (&dev->lock);
1505				--dev->udc_usage;
1506				if (value < 0) {
1507					clean_req (gadget->ep0, dev->req);
1508					break;
1509				}
1510
1511				/* we can't currently stall these */
1512				dev->setup_can_stall = 0;
1513			}
1514
1515			/* state changes when reader collects event */
1516			event = next_event (dev, GADGETFS_SETUP);
1517			event->u.setup = *ctrl;
1518			ep0_readable (dev);
1519			spin_unlock (&dev->lock);
1520			/*
1521			 * Return USB_GADGET_DELAYED_STATUS as a workaround to
1522			 * stop some UDC drivers (e.g. dwc3) from automatically
1523			 * proceeding with the status stage for 0-length
1524			 * transfers.
1525			 * Should be removed once all UDC drivers are fixed to
1526			 * always delay the status stage until a response is
1527			 * queued to EP0.
1528			 */
1529			return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
1530		}
1531	}
1532
1533	/* proceed with data transfer and status phases? */
1534	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1535		req->length = value;
1536		req->zero = value < w_length;
1537
1538		++dev->udc_usage;
1539		spin_unlock (&dev->lock);
1540		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1541		spin_lock(&dev->lock);
1542		--dev->udc_usage;
1543		spin_unlock(&dev->lock);
1544		if (value < 0) {
1545			DBG (dev, "ep_queue --> %d\n", value);
1546			req->status = 0;
1547		}
1548		return value;
1549	}
1550
1551	/* device stalls when value < 0 */
1552	spin_unlock (&dev->lock);
1553	return value;
1554}
1555
1556static void destroy_ep_files (struct dev_data *dev)
1557{
1558	DBG (dev, "%s %d\n", __func__, dev->state);
1559
1560	/* dev->state must prevent interference */
1561	spin_lock_irq (&dev->lock);
1562	while (!list_empty(&dev->epfiles)) {
1563		struct ep_data	*ep;
1564		struct inode	*parent;
1565		struct dentry	*dentry;
1566
1567		/* break link to FS */
1568		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1569		list_del_init (&ep->epfiles);
1570		spin_unlock_irq (&dev->lock);
1571
1572		dentry = ep->dentry;
1573		ep->dentry = NULL;
1574		parent = d_inode(dentry->d_parent);
1575
1576		/* break link to controller */
1577		mutex_lock(&ep->lock);
1578		if (ep->state == STATE_EP_ENABLED)
1579			(void) usb_ep_disable (ep->ep);
1580		ep->state = STATE_EP_UNBOUND;
1581		usb_ep_free_request (ep->ep, ep->req);
1582		ep->ep = NULL;
1583		mutex_unlock(&ep->lock);
1584
1585		wake_up (&ep->wait);
1586		put_ep (ep);
1587
1588		/* break link to dcache */
1589		inode_lock(parent);
1590		d_delete (dentry);
1591		dput (dentry);
1592		inode_unlock(parent);
1593
1594		spin_lock_irq (&dev->lock);
1595	}
1596	spin_unlock_irq (&dev->lock);
1597}
1598
1599
1600static struct dentry *
1601gadgetfs_create_file (struct super_block *sb, char const *name,
1602		void *data, const struct file_operations *fops);
1603
1604static int activate_ep_files (struct dev_data *dev)
1605{
1606	struct usb_ep	*ep;
1607	struct ep_data	*data;
1608
1609	gadget_for_each_ep (ep, dev->gadget) {
1610
1611		data = kzalloc(sizeof(*data), GFP_KERNEL);
1612		if (!data)
1613			goto enomem0;
1614		data->state = STATE_EP_DISABLED;
1615		mutex_init(&data->lock);
1616		init_waitqueue_head (&data->wait);
1617
1618		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1619		refcount_set (&data->count, 1);
1620		data->dev = dev;
1621		get_dev (dev);
1622
1623		data->ep = ep;
1624		ep->driver_data = data;
1625
1626		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1627		if (!data->req)
1628			goto enomem1;
1629
1630		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1631				data, &ep_io_operations);
1632		if (!data->dentry)
1633			goto enomem2;
1634		list_add_tail (&data->epfiles, &dev->epfiles);
1635	}
1636	return 0;
1637
1638enomem2:
1639	usb_ep_free_request (ep, data->req);
1640enomem1:
1641	put_dev (dev);
1642	kfree (data);
1643enomem0:
1644	DBG (dev, "%s enomem\n", __func__);
1645	destroy_ep_files (dev);
1646	return -ENOMEM;
1647}
1648
1649static void
1650gadgetfs_unbind (struct usb_gadget *gadget)
1651{
1652	struct dev_data		*dev = get_gadget_data (gadget);
1653
1654	DBG (dev, "%s\n", __func__);
1655
1656	spin_lock_irq (&dev->lock);
1657	dev->state = STATE_DEV_UNBOUND;
1658	while (dev->udc_usage > 0) {
1659		spin_unlock_irq(&dev->lock);
1660		usleep_range(1000, 2000);
1661		spin_lock_irq(&dev->lock);
1662	}
1663	spin_unlock_irq (&dev->lock);
1664
1665	destroy_ep_files (dev);
1666	gadget->ep0->driver_data = NULL;
1667	set_gadget_data (gadget, NULL);
1668
1669	/* we've already been disconnected ... no i/o is active */
1670	if (dev->req)
1671		usb_ep_free_request (gadget->ep0, dev->req);
1672	DBG (dev, "%s done\n", __func__);
1673	put_dev (dev);
1674}
1675
1676static struct dev_data		*the_device;
1677
1678static int gadgetfs_bind(struct usb_gadget *gadget,
1679		struct usb_gadget_driver *driver)
1680{
1681	struct dev_data		*dev = the_device;
1682
1683	if (!dev)
1684		return -ESRCH;
1685	if (0 != strcmp (CHIP, gadget->name)) {
1686		pr_err("%s expected %s controller not %s\n",
1687			shortname, CHIP, gadget->name);
1688		return -ENODEV;
1689	}
1690
1691	set_gadget_data (gadget, dev);
1692	dev->gadget = gadget;
1693	gadget->ep0->driver_data = dev;
1694
1695	/* preallocate control response and buffer */
1696	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1697	if (!dev->req)
1698		goto enomem;
1699	dev->req->context = NULL;
1700	dev->req->complete = epio_complete;
1701
1702	if (activate_ep_files (dev) < 0)
1703		goto enomem;
1704
1705	INFO (dev, "bound to %s driver\n", gadget->name);
1706	spin_lock_irq(&dev->lock);
1707	dev->state = STATE_DEV_UNCONNECTED;
1708	spin_unlock_irq(&dev->lock);
1709	get_dev (dev);
1710	return 0;
1711
1712enomem:
1713	gadgetfs_unbind (gadget);
1714	return -ENOMEM;
1715}
1716
1717static void
1718gadgetfs_disconnect (struct usb_gadget *gadget)
1719{
1720	struct dev_data		*dev = get_gadget_data (gadget);
1721	unsigned long		flags;
1722
1723	spin_lock_irqsave (&dev->lock, flags);
1724	if (dev->state == STATE_DEV_UNCONNECTED)
1725		goto exit;
1726	dev->state = STATE_DEV_UNCONNECTED;
1727
1728	INFO (dev, "disconnected\n");
1729	next_event (dev, GADGETFS_DISCONNECT);
1730	ep0_readable (dev);
1731exit:
1732	spin_unlock_irqrestore (&dev->lock, flags);
1733}
1734
1735static void
1736gadgetfs_suspend (struct usb_gadget *gadget)
1737{
1738	struct dev_data		*dev = get_gadget_data (gadget);
1739	unsigned long		flags;
1740
1741	INFO (dev, "suspended from state %d\n", dev->state);
1742	spin_lock_irqsave(&dev->lock, flags);
1743	switch (dev->state) {
1744	case STATE_DEV_SETUP:		// VERY odd... host died??
1745	case STATE_DEV_CONNECTED:
1746	case STATE_DEV_UNCONNECTED:
1747		next_event (dev, GADGETFS_SUSPEND);
1748		ep0_readable (dev);
1749		fallthrough;
1750	default:
1751		break;
1752	}
1753	spin_unlock_irqrestore(&dev->lock, flags);
1754}
1755
1756static struct usb_gadget_driver gadgetfs_driver = {
1757	.function	= (char *) driver_desc,
1758	.bind		= gadgetfs_bind,
1759	.unbind		= gadgetfs_unbind,
1760	.setup		= gadgetfs_setup,
1761	.reset		= gadgetfs_disconnect,
1762	.disconnect	= gadgetfs_disconnect,
1763	.suspend	= gadgetfs_suspend,
1764
1765	.driver	= {
1766		.name		= shortname,
1767	},
1768};
1769
1770/*----------------------------------------------------------------------*/
1771/* DEVICE INITIALIZATION
1772 *
1773 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1774 *     status = write (fd, descriptors, sizeof descriptors)
1775 *
1776 * That write establishes the device configuration, so the kernel can
1777 * bind to the controller ... guaranteeing it can handle enumeration
1778 * at all necessary speeds.  Descriptor order is:
1779 *
1780 * . message tag (u32, host order) ... for now, must be zero; it
1781 *	would change to support features like multi-config devices
1782 * . full/low speed config ... all wTotalLength bytes (with interface,
1783 *	class, altsetting, endpoint, and other descriptors)
1784 * . high speed config ... all descriptors, for high speed operation;
1785 *	this one's optional except for high-speed hardware
1786 * . device descriptor
1787 *
1788 * Endpoints are not yet enabled. Drivers must wait until device
1789 * configuration and interface altsetting changes create
1790 * the need to configure (or unconfigure) them.
1791 *
1792 * After initialization, the device stays active for as long as that
1793 * $CHIP file is open.  Events must then be read from that descriptor,
1794 * such as configuration notifications.
1795 */
1796
1797static int is_valid_config(struct usb_config_descriptor *config,
1798		unsigned int total)
1799{
1800	return config->bDescriptorType == USB_DT_CONFIG
1801		&& config->bLength == USB_DT_CONFIG_SIZE
1802		&& total >= USB_DT_CONFIG_SIZE
1803		&& config->bConfigurationValue != 0
1804		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1805		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1806	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1807	/* FIXME check lengths: walk to end */
1808}
1809
1810static ssize_t
1811dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1812{
1813	struct dev_data		*dev = fd->private_data;
1814	ssize_t			value, length = len;
1815	unsigned		total;
1816	u32			tag;
1817	char			*kbuf;
1818
1819	spin_lock_irq(&dev->lock);
1820	if (dev->state > STATE_DEV_OPENED) {
1821		value = ep0_write(fd, buf, len, ptr);
1822		spin_unlock_irq(&dev->lock);
1823		return value;
1824	}
1825	spin_unlock_irq(&dev->lock);
1826
1827	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1828	    (len > PAGE_SIZE * 4))
1829		return -EINVAL;
1830
1831	/* we might need to change message format someday */
1832	if (copy_from_user (&tag, buf, 4))
1833		return -EFAULT;
1834	if (tag != 0)
1835		return -EINVAL;
1836	buf += 4;
1837	length -= 4;
1838
1839	kbuf = memdup_user(buf, length);
1840	if (IS_ERR(kbuf))
1841		return PTR_ERR(kbuf);
1842
1843	spin_lock_irq (&dev->lock);
1844	value = -EINVAL;
1845	if (dev->buf) {
1846		spin_unlock_irq(&dev->lock);
1847		kfree(kbuf);
1848		return value;
1849	}
1850	dev->buf = kbuf;
1851
1852	/* full or low speed config */
1853	dev->config = (void *) kbuf;
1854	total = le16_to_cpu(dev->config->wTotalLength);
1855	if (!is_valid_config(dev->config, total) ||
1856			total > length - USB_DT_DEVICE_SIZE)
1857		goto fail;
1858	kbuf += total;
1859	length -= total;
1860
1861	/* optional high speed config */
1862	if (kbuf [1] == USB_DT_CONFIG) {
1863		dev->hs_config = (void *) kbuf;
1864		total = le16_to_cpu(dev->hs_config->wTotalLength);
1865		if (!is_valid_config(dev->hs_config, total) ||
1866				total > length - USB_DT_DEVICE_SIZE)
1867			goto fail;
1868		kbuf += total;
1869		length -= total;
1870	} else {
1871		dev->hs_config = NULL;
1872	}
1873
1874	/* could support multiple configs, using another encoding! */
1875
1876	/* device descriptor (tweaked for paranoia) */
1877	if (length != USB_DT_DEVICE_SIZE)
1878		goto fail;
1879	dev->dev = (void *)kbuf;
1880	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1881			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1882			|| dev->dev->bNumConfigurations != 1)
1883		goto fail;
1884	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1885
1886	/* triggers gadgetfs_bind(); then we can enumerate. */
1887	spin_unlock_irq (&dev->lock);
1888	if (dev->hs_config)
1889		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1890	else
1891		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1892
1893	value = usb_gadget_register_driver(&gadgetfs_driver);
1894	if (value != 0) {
1895		spin_lock_irq(&dev->lock);
1896		goto fail;
1897	} else {
1898		/* at this point "good" hardware has for the first time
1899		 * let the USB the host see us.  alternatively, if users
1900		 * unplug/replug that will clear all the error state.
1901		 *
1902		 * note:  everything running before here was guaranteed
1903		 * to choke driver model style diagnostics.  from here
1904		 * on, they can work ... except in cleanup paths that
1905		 * kick in after the ep0 descriptor is closed.
1906		 */
1907		value = len;
1908		dev->gadget_registered = true;
1909	}
1910	return value;
1911
1912fail:
1913	dev->config = NULL;
1914	dev->hs_config = NULL;
1915	dev->dev = NULL;
1916	spin_unlock_irq (&dev->lock);
1917	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1918	kfree (dev->buf);
1919	dev->buf = NULL;
1920	return value;
1921}
1922
1923static int
1924gadget_dev_open (struct inode *inode, struct file *fd)
1925{
1926	struct dev_data		*dev = inode->i_private;
1927	int			value = -EBUSY;
1928
1929	spin_lock_irq(&dev->lock);
1930	if (dev->state == STATE_DEV_DISABLED) {
1931		dev->ev_next = 0;
1932		dev->state = STATE_DEV_OPENED;
1933		fd->private_data = dev;
1934		get_dev (dev);
1935		value = 0;
1936	}
1937	spin_unlock_irq(&dev->lock);
1938	return value;
1939}
1940
1941static const struct file_operations ep0_operations = {
1942	.llseek =	no_llseek,
1943
1944	.open =		gadget_dev_open,
1945	.read =		ep0_read,
1946	.write =	dev_config,
1947	.fasync =	ep0_fasync,
1948	.poll =		ep0_poll,
1949	.unlocked_ioctl = gadget_dev_ioctl,
1950	.release =	dev_release,
1951};
1952
1953/*----------------------------------------------------------------------*/
1954
1955/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1956 *
1957 * Mounting the filesystem creates a controller file, used first for
1958 * device configuration then later for event monitoring.
1959 */
1960
1961
1962/* FIXME PAM etc could set this security policy without mount options
1963 * if epfiles inherited ownership and permissons from ep0 ...
1964 */
1965
1966static unsigned default_uid;
1967static unsigned default_gid;
1968static unsigned default_perm = S_IRUSR | S_IWUSR;
1969
1970module_param (default_uid, uint, 0644);
1971module_param (default_gid, uint, 0644);
1972module_param (default_perm, uint, 0644);
1973
1974
1975static struct inode *
1976gadgetfs_make_inode (struct super_block *sb,
1977		void *data, const struct file_operations *fops,
1978		int mode)
1979{
1980	struct inode *inode = new_inode (sb);
1981
1982	if (inode) {
1983		inode->i_ino = get_next_ino();
1984		inode->i_mode = mode;
1985		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1986		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1987		simple_inode_init_ts(inode);
1988		inode->i_private = data;
1989		inode->i_fop = fops;
1990	}
1991	return inode;
1992}
1993
1994/* creates in fs root directory, so non-renamable and non-linkable.
1995 * so inode and dentry are paired, until device reconfig.
1996 */
1997static struct dentry *
1998gadgetfs_create_file (struct super_block *sb, char const *name,
1999		void *data, const struct file_operations *fops)
2000{
2001	struct dentry	*dentry;
2002	struct inode	*inode;
2003
2004	dentry = d_alloc_name(sb->s_root, name);
2005	if (!dentry)
2006		return NULL;
2007
2008	inode = gadgetfs_make_inode (sb, data, fops,
2009			S_IFREG | (default_perm & S_IRWXUGO));
2010	if (!inode) {
2011		dput(dentry);
2012		return NULL;
2013	}
2014	d_add (dentry, inode);
2015	return dentry;
2016}
2017
2018static const struct super_operations gadget_fs_operations = {
2019	.statfs =	simple_statfs,
2020	.drop_inode =	generic_delete_inode,
2021};
2022
2023static int
2024gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2025{
2026	struct inode	*inode;
2027	struct dev_data	*dev;
2028	int		rc;
2029
2030	mutex_lock(&sb_mutex);
2031
2032	if (the_device) {
2033		rc = -ESRCH;
2034		goto Done;
2035	}
2036
2037	CHIP = usb_get_gadget_udc_name();
2038	if (!CHIP) {
2039		rc = -ENODEV;
2040		goto Done;
2041	}
2042
2043	/* superblock */
2044	sb->s_blocksize = PAGE_SIZE;
2045	sb->s_blocksize_bits = PAGE_SHIFT;
2046	sb->s_magic = GADGETFS_MAGIC;
2047	sb->s_op = &gadget_fs_operations;
2048	sb->s_time_gran = 1;
2049
2050	/* root inode */
2051	inode = gadgetfs_make_inode (sb,
2052			NULL, &simple_dir_operations,
2053			S_IFDIR | S_IRUGO | S_IXUGO);
2054	if (!inode)
2055		goto Enomem;
2056	inode->i_op = &simple_dir_inode_operations;
2057	if (!(sb->s_root = d_make_root (inode)))
2058		goto Enomem;
2059
2060	/* the ep0 file is named after the controller we expect;
2061	 * user mode code can use it for sanity checks, like we do.
2062	 */
2063	dev = dev_new ();
2064	if (!dev)
2065		goto Enomem;
2066
2067	dev->sb = sb;
2068	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2069	if (!dev->dentry) {
2070		put_dev(dev);
2071		goto Enomem;
2072	}
2073
2074	/* other endpoint files are available after hardware setup,
2075	 * from binding to a controller.
2076	 */
2077	the_device = dev;
2078	rc = 0;
2079	goto Done;
2080
2081 Enomem:
2082	kfree(CHIP);
2083	CHIP = NULL;
2084	rc = -ENOMEM;
2085
2086 Done:
2087	mutex_unlock(&sb_mutex);
2088	return rc;
2089}
2090
2091/* "mount -t gadgetfs path /dev/gadget" ends up here */
2092static int gadgetfs_get_tree(struct fs_context *fc)
2093{
2094	return get_tree_single(fc, gadgetfs_fill_super);
2095}
2096
2097static const struct fs_context_operations gadgetfs_context_ops = {
2098	.get_tree	= gadgetfs_get_tree,
2099};
2100
2101static int gadgetfs_init_fs_context(struct fs_context *fc)
2102{
2103	fc->ops = &gadgetfs_context_ops;
2104	return 0;
2105}
2106
2107static void
2108gadgetfs_kill_sb (struct super_block *sb)
2109{
2110	mutex_lock(&sb_mutex);
2111	kill_litter_super (sb);
2112	if (the_device) {
2113		put_dev (the_device);
2114		the_device = NULL;
2115	}
2116	kfree(CHIP);
2117	CHIP = NULL;
2118	mutex_unlock(&sb_mutex);
2119}
2120
2121/*----------------------------------------------------------------------*/
2122
2123static struct file_system_type gadgetfs_type = {
2124	.owner		= THIS_MODULE,
2125	.name		= shortname,
2126	.init_fs_context = gadgetfs_init_fs_context,
2127	.kill_sb	= gadgetfs_kill_sb,
2128};
2129MODULE_ALIAS_FS("gadgetfs");
2130
2131/*----------------------------------------------------------------------*/
2132
2133static int __init gadgetfs_init (void)
2134{
2135	int status;
2136
2137	status = register_filesystem (&gadgetfs_type);
2138	if (status == 0)
2139		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2140			shortname, driver_desc);
2141	return status;
2142}
2143module_init (gadgetfs_init);
2144
2145static void __exit gadgetfs_cleanup (void)
2146{
2147	pr_debug ("unregister %s\n", shortname);
2148	unregister_filesystem (&gadgetfs_type);
2149}
2150module_exit (gadgetfs_cleanup);
2151
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c -- user mode filesystem api for usb gadget controllers
   4 *
   5 * Copyright (C) 2003-2004 David Brownell
   6 * Copyright (C) 2003 Agilent Technologies
   7 */
   8
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/fs_context.h>
  16#include <linux/pagemap.h>
  17#include <linux/uts.h>
  18#include <linux/wait.h>
  19#include <linux/compiler.h>
  20#include <linux/uaccess.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/poll.h>
  24#include <linux/kthread.h>
  25#include <linux/aio.h>
  26#include <linux/uio.h>
  27#include <linux/refcount.h>
  28#include <linux/delay.h>
  29#include <linux/device.h>
  30#include <linux/moduleparam.h>
  31
  32#include <linux/usb/gadgetfs.h>
  33#include <linux/usb/gadget.h>
  34#include <linux/usb/composite.h> /* for USB_GADGET_DELAYED_STATUS */
  35
  36/* Undef helpers from linux/usb/composite.h as gadgetfs redefines them */
  37#undef DBG
  38#undef ERROR
  39#undef INFO
  40
  41
  42/*
  43 * The gadgetfs API maps each endpoint to a file descriptor so that you
  44 * can use standard synchronous read/write calls for I/O.  There's some
  45 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
  46 * drivers show how this works in practice.  You can also use AIO to
  47 * eliminate I/O gaps between requests, to help when streaming data.
  48 *
  49 * Key parts that must be USB-specific are protocols defining how the
  50 * read/write operations relate to the hardware state machines.  There
  51 * are two types of files.  One type is for the device, implementing ep0.
  52 * The other type is for each IN or OUT endpoint.  In both cases, the
  53 * user mode driver must configure the hardware before using it.
  54 *
  55 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
  56 *   (by writing configuration and device descriptors).  Afterwards it
  57 *   may serve as a source of device events, used to handle all control
  58 *   requests other than basic enumeration.
  59 *
  60 * - Then, after a SET_CONFIGURATION control request, ep_config() is
  61 *   called when each /dev/gadget/ep* file is configured (by writing
  62 *   endpoint descriptors).  Afterwards these files are used to write()
  63 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
  64 *   direction" request is issued (like reading an IN endpoint).
  65 *
  66 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
  67 * not possible on all hardware.  For example, precise fault handling with
  68 * respect to data left in endpoint fifos after aborted operations; or
  69 * selective clearing of endpoint halts, to implement SET_INTERFACE.
  70 */
  71
  72#define	DRIVER_DESC	"USB Gadget filesystem"
  73#define	DRIVER_VERSION	"24 Aug 2004"
  74
  75static const char driver_desc [] = DRIVER_DESC;
  76static const char shortname [] = "gadgetfs";
  77
  78MODULE_DESCRIPTION (DRIVER_DESC);
  79MODULE_AUTHOR ("David Brownell");
  80MODULE_LICENSE ("GPL");
  81
  82static int ep_open(struct inode *, struct file *);
  83
  84
  85/*----------------------------------------------------------------------*/
  86
  87#define GADGETFS_MAGIC		0xaee71ee7
  88
  89/* /dev/gadget/$CHIP represents ep0 and the whole device */
  90enum ep0_state {
  91	/* DISABLED is the initial state. */
  92	STATE_DEV_DISABLED = 0,
  93
  94	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
  95	 * ep0/device i/o modes and binding to the controller.  Driver
  96	 * must always write descriptors to initialize the device, then
  97	 * the device becomes UNCONNECTED until enumeration.
  98	 */
  99	STATE_DEV_OPENED,
 100
 101	/* From then on, ep0 fd is in either of two basic modes:
 102	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
 103	 * - SETUP: read/write will transfer control data and succeed;
 104	 *   or if "wrong direction", performs protocol stall
 105	 */
 106	STATE_DEV_UNCONNECTED,
 107	STATE_DEV_CONNECTED,
 108	STATE_DEV_SETUP,
 109
 110	/* UNBOUND means the driver closed ep0, so the device won't be
 111	 * accessible again (DEV_DISABLED) until all fds are closed.
 112	 */
 113	STATE_DEV_UNBOUND,
 114};
 115
 116/* enough for the whole queue: most events invalidate others */
 117#define	N_EVENT			5
 118
 119#define RBUF_SIZE		256
 120
 121struct dev_data {
 122	spinlock_t			lock;
 123	refcount_t			count;
 124	int				udc_usage;
 125	enum ep0_state			state;		/* P: lock */
 126	struct usb_gadgetfs_event	event [N_EVENT];
 127	unsigned			ev_next;
 128	struct fasync_struct		*fasync;
 129	u8				current_config;
 130
 131	/* drivers reading ep0 MUST handle control requests (SETUP)
 132	 * reported that way; else the host will time out.
 133	 */
 134	unsigned			usermode_setup : 1,
 135					setup_in : 1,
 136					setup_can_stall : 1,
 137					setup_out_ready : 1,
 138					setup_out_error : 1,
 139					setup_abort : 1,
 140					gadget_registered : 1;
 141	unsigned			setup_wLength;
 142
 143	/* the rest is basically write-once */
 144	struct usb_config_descriptor	*config, *hs_config;
 145	struct usb_device_descriptor	*dev;
 146	struct usb_request		*req;
 147	struct usb_gadget		*gadget;
 148	struct list_head		epfiles;
 149	void				*buf;
 150	wait_queue_head_t		wait;
 151	struct super_block		*sb;
 152	struct dentry			*dentry;
 153
 154	/* except this scratch i/o buffer for ep0 */
 155	u8				rbuf[RBUF_SIZE];
 156};
 157
 158static inline void get_dev (struct dev_data *data)
 159{
 160	refcount_inc (&data->count);
 161}
 162
 163static void put_dev (struct dev_data *data)
 164{
 165	if (likely (!refcount_dec_and_test (&data->count)))
 166		return;
 167	/* needs no more cleanup */
 168	BUG_ON (waitqueue_active (&data->wait));
 169	kfree (data);
 170}
 171
 172static struct dev_data *dev_new (void)
 173{
 174	struct dev_data		*dev;
 175
 176	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 177	if (!dev)
 178		return NULL;
 179	dev->state = STATE_DEV_DISABLED;
 180	refcount_set (&dev->count, 1);
 181	spin_lock_init (&dev->lock);
 182	INIT_LIST_HEAD (&dev->epfiles);
 183	init_waitqueue_head (&dev->wait);
 184	return dev;
 185}
 186
 187/*----------------------------------------------------------------------*/
 188
 189/* other /dev/gadget/$ENDPOINT files represent endpoints */
 190enum ep_state {
 191	STATE_EP_DISABLED = 0,
 192	STATE_EP_READY,
 193	STATE_EP_ENABLED,
 194	STATE_EP_UNBOUND,
 195};
 196
 197struct ep_data {
 198	struct mutex			lock;
 199	enum ep_state			state;
 200	refcount_t			count;
 201	struct dev_data			*dev;
 202	/* must hold dev->lock before accessing ep or req */
 203	struct usb_ep			*ep;
 204	struct usb_request		*req;
 205	ssize_t				status;
 206	char				name [16];
 207	struct usb_endpoint_descriptor	desc, hs_desc;
 208	struct list_head		epfiles;
 209	wait_queue_head_t		wait;
 210	struct dentry			*dentry;
 211};
 212
 213static inline void get_ep (struct ep_data *data)
 214{
 215	refcount_inc (&data->count);
 216}
 217
 218static void put_ep (struct ep_data *data)
 219{
 220	if (likely (!refcount_dec_and_test (&data->count)))
 221		return;
 222	put_dev (data->dev);
 223	/* needs no more cleanup */
 224	BUG_ON (!list_empty (&data->epfiles));
 225	BUG_ON (waitqueue_active (&data->wait));
 226	kfree (data);
 227}
 228
 229/*----------------------------------------------------------------------*/
 230
 231/* most "how to use the hardware" policy choices are in userspace:
 232 * mapping endpoint roles (which the driver needs) to the capabilities
 233 * which the usb controller has.  most of those capabilities are exposed
 234 * implicitly, starting with the driver name and then endpoint names.
 235 */
 236
 237static const char *CHIP;
 238static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
 239
 240/*----------------------------------------------------------------------*/
 241
 242/* NOTE:  don't use dev_printk calls before binding to the gadget
 243 * at the end of ep0 configuration, or after unbind.
 244 */
 245
 246/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
 247#define xprintk(d,level,fmt,args...) \
 248	printk(level "%s: " fmt , shortname , ## args)
 249
 250#ifdef DEBUG
 251#define DBG(dev,fmt,args...) \
 252	xprintk(dev , KERN_DEBUG , fmt , ## args)
 253#else
 254#define DBG(dev,fmt,args...) \
 255	do { } while (0)
 256#endif /* DEBUG */
 257
 258#ifdef VERBOSE_DEBUG
 259#define VDEBUG	DBG
 260#else
 261#define VDEBUG(dev,fmt,args...) \
 262	do { } while (0)
 263#endif /* DEBUG */
 264
 265#define ERROR(dev,fmt,args...) \
 266	xprintk(dev , KERN_ERR , fmt , ## args)
 267#define INFO(dev,fmt,args...) \
 268	xprintk(dev , KERN_INFO , fmt , ## args)
 269
 270
 271/*----------------------------------------------------------------------*/
 272
 273/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
 274 *
 275 * After opening, configure non-control endpoints.  Then use normal
 276 * stream read() and write() requests; and maybe ioctl() to get more
 277 * precise FIFO status when recovering from cancellation.
 278 */
 279
 280static void epio_complete (struct usb_ep *ep, struct usb_request *req)
 281{
 282	struct ep_data	*epdata = ep->driver_data;
 283
 284	if (!req->context)
 285		return;
 286	if (req->status)
 287		epdata->status = req->status;
 288	else
 289		epdata->status = req->actual;
 290	complete ((struct completion *)req->context);
 291}
 292
 293/* tasklock endpoint, returning when it's connected.
 294 * still need dev->lock to use epdata->ep.
 295 */
 296static int
 297get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 298{
 299	int	val;
 300
 301	if (f_flags & O_NONBLOCK) {
 302		if (!mutex_trylock(&epdata->lock))
 303			goto nonblock;
 304		if (epdata->state != STATE_EP_ENABLED &&
 305		    (!is_write || epdata->state != STATE_EP_READY)) {
 306			mutex_unlock(&epdata->lock);
 307nonblock:
 308			val = -EAGAIN;
 309		} else
 310			val = 0;
 311		return val;
 312	}
 313
 314	val = mutex_lock_interruptible(&epdata->lock);
 315	if (val < 0)
 316		return val;
 317
 318	switch (epdata->state) {
 319	case STATE_EP_ENABLED:
 320		return 0;
 321	case STATE_EP_READY:			/* not configured yet */
 322		if (is_write)
 323			return 0;
 324		fallthrough;
 325	case STATE_EP_UNBOUND:			/* clean disconnect */
 326		break;
 327	// case STATE_EP_DISABLED:		/* "can't happen" */
 328	default:				/* error! */
 329		pr_debug ("%s: ep %p not available, state %d\n",
 330				shortname, epdata, epdata->state);
 331	}
 332	mutex_unlock(&epdata->lock);
 333	return -ENODEV;
 334}
 335
 336static ssize_t
 337ep_io (struct ep_data *epdata, void *buf, unsigned len)
 338{
 339	DECLARE_COMPLETION_ONSTACK (done);
 340	int value;
 341
 342	spin_lock_irq (&epdata->dev->lock);
 343	if (likely (epdata->ep != NULL)) {
 344		struct usb_request	*req = epdata->req;
 345
 346		req->context = &done;
 347		req->complete = epio_complete;
 348		req->buf = buf;
 349		req->length = len;
 350		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
 351	} else
 352		value = -ENODEV;
 353	spin_unlock_irq (&epdata->dev->lock);
 354
 355	if (likely (value == 0)) {
 356		value = wait_for_completion_interruptible(&done);
 357		if (value != 0) {
 358			spin_lock_irq (&epdata->dev->lock);
 359			if (likely (epdata->ep != NULL)) {
 360				DBG (epdata->dev, "%s i/o interrupted\n",
 361						epdata->name);
 362				usb_ep_dequeue (epdata->ep, epdata->req);
 363				spin_unlock_irq (&epdata->dev->lock);
 364
 365				wait_for_completion(&done);
 366				if (epdata->status == -ECONNRESET)
 367					epdata->status = -EINTR;
 368			} else {
 369				spin_unlock_irq (&epdata->dev->lock);
 370
 371				DBG (epdata->dev, "endpoint gone\n");
 372				wait_for_completion(&done);
 373				epdata->status = -ENODEV;
 374			}
 375		}
 376		return epdata->status;
 377	}
 378	return value;
 379}
 380
 381static int
 382ep_release (struct inode *inode, struct file *fd)
 383{
 384	struct ep_data		*data = fd->private_data;
 385	int value;
 386
 387	value = mutex_lock_interruptible(&data->lock);
 388	if (value < 0)
 389		return value;
 390
 391	/* clean up if this can be reopened */
 392	if (data->state != STATE_EP_UNBOUND) {
 393		data->state = STATE_EP_DISABLED;
 394		data->desc.bDescriptorType = 0;
 395		data->hs_desc.bDescriptorType = 0;
 396		usb_ep_disable(data->ep);
 397	}
 398	mutex_unlock(&data->lock);
 399	put_ep (data);
 400	return 0;
 401}
 402
 403static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
 404{
 405	struct ep_data		*data = fd->private_data;
 406	int			status;
 407
 408	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
 409		return status;
 410
 411	spin_lock_irq (&data->dev->lock);
 412	if (likely (data->ep != NULL)) {
 413		switch (code) {
 414		case GADGETFS_FIFO_STATUS:
 415			status = usb_ep_fifo_status (data->ep);
 416			break;
 417		case GADGETFS_FIFO_FLUSH:
 418			usb_ep_fifo_flush (data->ep);
 419			break;
 420		case GADGETFS_CLEAR_HALT:
 421			status = usb_ep_clear_halt (data->ep);
 422			break;
 423		default:
 424			status = -ENOTTY;
 425		}
 426	} else
 427		status = -ENODEV;
 428	spin_unlock_irq (&data->dev->lock);
 429	mutex_unlock(&data->lock);
 430	return status;
 431}
 432
 433/*----------------------------------------------------------------------*/
 434
 435/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
 436
 437struct kiocb_priv {
 438	struct usb_request	*req;
 439	struct ep_data		*epdata;
 440	struct kiocb		*iocb;
 441	struct mm_struct	*mm;
 442	struct work_struct	work;
 443	void			*buf;
 444	struct iov_iter		to;
 445	const void		*to_free;
 446	unsigned		actual;
 447};
 448
 449static int ep_aio_cancel(struct kiocb *iocb)
 450{
 451	struct kiocb_priv	*priv = iocb->private;
 452	struct ep_data		*epdata;
 453	int			value;
 454
 455	local_irq_disable();
 456	epdata = priv->epdata;
 457	// spin_lock(&epdata->dev->lock);
 458	if (likely(epdata && epdata->ep && priv->req))
 459		value = usb_ep_dequeue (epdata->ep, priv->req);
 460	else
 461		value = -EINVAL;
 462	// spin_unlock(&epdata->dev->lock);
 463	local_irq_enable();
 464
 465	return value;
 466}
 467
 468static void ep_user_copy_worker(struct work_struct *work)
 469{
 470	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
 471	struct mm_struct *mm = priv->mm;
 472	struct kiocb *iocb = priv->iocb;
 473	size_t ret;
 474
 475	kthread_use_mm(mm);
 476	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
 477	kthread_unuse_mm(mm);
 478	if (!ret)
 479		ret = -EFAULT;
 480
 481	/* completing the iocb can drop the ctx and mm, don't touch mm after */
 482	iocb->ki_complete(iocb, ret);
 483
 484	kfree(priv->buf);
 485	kfree(priv->to_free);
 486	kfree(priv);
 487}
 488
 489static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
 490{
 491	struct kiocb		*iocb = req->context;
 492	struct kiocb_priv	*priv = iocb->private;
 493	struct ep_data		*epdata = priv->epdata;
 494
 495	/* lock against disconnect (and ideally, cancel) */
 496	spin_lock(&epdata->dev->lock);
 497	priv->req = NULL;
 498	priv->epdata = NULL;
 499
 500	/* if this was a write or a read returning no data then we
 501	 * don't need to copy anything to userspace, so we can
 502	 * complete the aio request immediately.
 503	 */
 504	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
 505		kfree(req->buf);
 506		kfree(priv->to_free);
 507		kfree(priv);
 508		iocb->private = NULL;
 509		iocb->ki_complete(iocb,
 510				req->actual ? req->actual : (long)req->status);
 511	} else {
 512		/* ep_copy_to_user() won't report both; we hide some faults */
 513		if (unlikely(0 != req->status))
 514			DBG(epdata->dev, "%s fault %d len %d\n",
 515				ep->name, req->status, req->actual);
 516
 517		priv->buf = req->buf;
 518		priv->actual = req->actual;
 519		INIT_WORK(&priv->work, ep_user_copy_worker);
 520		schedule_work(&priv->work);
 521	}
 522
 523	usb_ep_free_request(ep, req);
 524	spin_unlock(&epdata->dev->lock);
 525	put_ep(epdata);
 526}
 527
 528static ssize_t ep_aio(struct kiocb *iocb,
 529		      struct kiocb_priv *priv,
 530		      struct ep_data *epdata,
 531		      char *buf,
 532		      size_t len)
 533{
 534	struct usb_request *req;
 535	ssize_t value;
 536
 537	iocb->private = priv;
 538	priv->iocb = iocb;
 539
 540	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 541	get_ep(epdata);
 542	priv->epdata = epdata;
 543	priv->actual = 0;
 544	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
 545
 546	/* each kiocb is coupled to one usb_request, but we can't
 547	 * allocate or submit those if the host disconnected.
 548	 */
 549	spin_lock_irq(&epdata->dev->lock);
 550	value = -ENODEV;
 551	if (unlikely(epdata->ep == NULL))
 552		goto fail;
 553
 554	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
 555	value = -ENOMEM;
 556	if (unlikely(!req))
 557		goto fail;
 558
 559	priv->req = req;
 560	req->buf = buf;
 561	req->length = len;
 562	req->complete = ep_aio_complete;
 563	req->context = iocb;
 564	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
 565	if (unlikely(0 != value)) {
 566		usb_ep_free_request(epdata->ep, req);
 567		goto fail;
 568	}
 569	spin_unlock_irq(&epdata->dev->lock);
 570	return -EIOCBQUEUED;
 571
 572fail:
 573	spin_unlock_irq(&epdata->dev->lock);
 574	kfree(priv->to_free);
 575	kfree(priv);
 576	put_ep(epdata);
 577	return value;
 578}
 579
 580static ssize_t
 581ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 582{
 583	struct file *file = iocb->ki_filp;
 584	struct ep_data *epdata = file->private_data;
 585	size_t len = iov_iter_count(to);
 586	ssize_t value;
 587	char *buf;
 588
 589	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
 590		return value;
 591
 592	/* halt any endpoint by doing a "wrong direction" i/o call */
 593	if (usb_endpoint_dir_in(&epdata->desc)) {
 594		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 595		    !is_sync_kiocb(iocb)) {
 596			mutex_unlock(&epdata->lock);
 597			return -EINVAL;
 598		}
 599		DBG (epdata->dev, "%s halt\n", epdata->name);
 600		spin_lock_irq(&epdata->dev->lock);
 601		if (likely(epdata->ep != NULL))
 602			usb_ep_set_halt(epdata->ep);
 603		spin_unlock_irq(&epdata->dev->lock);
 604		mutex_unlock(&epdata->lock);
 605		return -EBADMSG;
 606	}
 607
 608	buf = kmalloc(len, GFP_KERNEL);
 609	if (unlikely(!buf)) {
 610		mutex_unlock(&epdata->lock);
 611		return -ENOMEM;
 612	}
 613	if (is_sync_kiocb(iocb)) {
 614		value = ep_io(epdata, buf, len);
 615		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
 616			value = -EFAULT;
 617	} else {
 618		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 619		value = -ENOMEM;
 620		if (!priv)
 621			goto fail;
 622		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
 623		if (!iter_is_ubuf(&priv->to) && !priv->to_free) {
 624			kfree(priv);
 625			goto fail;
 626		}
 627		value = ep_aio(iocb, priv, epdata, buf, len);
 628		if (value == -EIOCBQUEUED)
 629			buf = NULL;
 630	}
 631fail:
 632	kfree(buf);
 633	mutex_unlock(&epdata->lock);
 634	return value;
 635}
 636
 637static ssize_t ep_config(struct ep_data *, const char *, size_t);
 638
 639static ssize_t
 640ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 641{
 642	struct file *file = iocb->ki_filp;
 643	struct ep_data *epdata = file->private_data;
 644	size_t len = iov_iter_count(from);
 645	bool configured;
 646	ssize_t value;
 647	char *buf;
 648
 649	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
 650		return value;
 651
 652	configured = epdata->state == STATE_EP_ENABLED;
 653
 654	/* halt any endpoint by doing a "wrong direction" i/o call */
 655	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
 656		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 657		    !is_sync_kiocb(iocb)) {
 658			mutex_unlock(&epdata->lock);
 659			return -EINVAL;
 660		}
 661		DBG (epdata->dev, "%s halt\n", epdata->name);
 662		spin_lock_irq(&epdata->dev->lock);
 663		if (likely(epdata->ep != NULL))
 664			usb_ep_set_halt(epdata->ep);
 665		spin_unlock_irq(&epdata->dev->lock);
 666		mutex_unlock(&epdata->lock);
 667		return -EBADMSG;
 668	}
 669
 670	buf = kmalloc(len, GFP_KERNEL);
 671	if (unlikely(!buf)) {
 672		mutex_unlock(&epdata->lock);
 673		return -ENOMEM;
 674	}
 675
 676	if (unlikely(!copy_from_iter_full(buf, len, from))) {
 677		value = -EFAULT;
 678		goto out;
 679	}
 680
 681	if (unlikely(!configured)) {
 682		value = ep_config(epdata, buf, len);
 683	} else if (is_sync_kiocb(iocb)) {
 684		value = ep_io(epdata, buf, len);
 685	} else {
 686		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 687		value = -ENOMEM;
 688		if (priv) {
 689			value = ep_aio(iocb, priv, epdata, buf, len);
 690			if (value == -EIOCBQUEUED)
 691				buf = NULL;
 692		}
 693	}
 694out:
 695	kfree(buf);
 696	mutex_unlock(&epdata->lock);
 697	return value;
 698}
 699
 700/*----------------------------------------------------------------------*/
 701
 702/* used after endpoint configuration */
 703static const struct file_operations ep_io_operations = {
 704	.owner =	THIS_MODULE,
 705
 706	.open =		ep_open,
 707	.release =	ep_release,
 
 708	.unlocked_ioctl = ep_ioctl,
 709	.read_iter =	ep_read_iter,
 710	.write_iter =	ep_write_iter,
 711};
 712
 713/* ENDPOINT INITIALIZATION
 714 *
 715 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
 716 *     status = write (fd, descriptors, sizeof descriptors)
 717 *
 718 * That write establishes the endpoint configuration, configuring
 719 * the controller to process bulk, interrupt, or isochronous transfers
 720 * at the right maxpacket size, and so on.
 721 *
 722 * The descriptors are message type 1, identified by a host order u32
 723 * at the beginning of what's written.  Descriptor order is: full/low
 724 * speed descriptor, then optional high speed descriptor.
 725 */
 726static ssize_t
 727ep_config (struct ep_data *data, const char *buf, size_t len)
 728{
 729	struct usb_ep		*ep;
 730	u32			tag;
 731	int			value, length = len;
 732
 733	if (data->state != STATE_EP_READY) {
 734		value = -EL2HLT;
 735		goto fail;
 736	}
 737
 738	value = len;
 739	if (len < USB_DT_ENDPOINT_SIZE + 4)
 740		goto fail0;
 741
 742	/* we might need to change message format someday */
 743	memcpy(&tag, buf, 4);
 744	if (tag != 1) {
 745		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
 746		goto fail0;
 747	}
 748	buf += 4;
 749	len -= 4;
 750
 751	/* NOTE:  audio endpoint extensions not accepted here;
 752	 * just don't include the extra bytes.
 753	 */
 754
 755	/* full/low speed descriptor, then high speed */
 756	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
 757	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
 758			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
 759		goto fail0;
 760	if (len != USB_DT_ENDPOINT_SIZE) {
 761		if (len != 2 * USB_DT_ENDPOINT_SIZE)
 762			goto fail0;
 763		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
 764			USB_DT_ENDPOINT_SIZE);
 765		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
 766				|| data->hs_desc.bDescriptorType
 767					!= USB_DT_ENDPOINT) {
 768			DBG(data->dev, "config %s, bad hs length or type\n",
 769					data->name);
 770			goto fail0;
 771		}
 772	}
 773
 774	spin_lock_irq (&data->dev->lock);
 775	if (data->dev->state == STATE_DEV_UNBOUND) {
 776		value = -ENOENT;
 777		goto gone;
 778	} else {
 779		ep = data->ep;
 780		if (ep == NULL) {
 781			value = -ENODEV;
 782			goto gone;
 783		}
 784	}
 785	switch (data->dev->gadget->speed) {
 786	case USB_SPEED_LOW:
 787	case USB_SPEED_FULL:
 788		ep->desc = &data->desc;
 789		break;
 790	case USB_SPEED_HIGH:
 791		/* fails if caller didn't provide that descriptor... */
 792		ep->desc = &data->hs_desc;
 793		break;
 794	default:
 795		DBG(data->dev, "unconnected, %s init abandoned\n",
 796				data->name);
 797		value = -EINVAL;
 798		goto gone;
 799	}
 800	value = usb_ep_enable(ep);
 801	if (value == 0) {
 802		data->state = STATE_EP_ENABLED;
 803		value = length;
 804	}
 805gone:
 806	spin_unlock_irq (&data->dev->lock);
 807	if (value < 0) {
 808fail:
 809		data->desc.bDescriptorType = 0;
 810		data->hs_desc.bDescriptorType = 0;
 811	}
 812	return value;
 813fail0:
 814	value = -EINVAL;
 815	goto fail;
 816}
 817
 818static int
 819ep_open (struct inode *inode, struct file *fd)
 820{
 821	struct ep_data		*data = inode->i_private;
 822	int			value = -EBUSY;
 823
 824	if (mutex_lock_interruptible(&data->lock) != 0)
 825		return -EINTR;
 826	spin_lock_irq (&data->dev->lock);
 827	if (data->dev->state == STATE_DEV_UNBOUND)
 828		value = -ENOENT;
 829	else if (data->state == STATE_EP_DISABLED) {
 830		value = 0;
 831		data->state = STATE_EP_READY;
 832		get_ep (data);
 833		fd->private_data = data;
 834		VDEBUG (data->dev, "%s ready\n", data->name);
 835	} else
 836		DBG (data->dev, "%s state %d\n",
 837			data->name, data->state);
 838	spin_unlock_irq (&data->dev->lock);
 839	mutex_unlock(&data->lock);
 840	return value;
 841}
 842
 843/*----------------------------------------------------------------------*/
 844
 845/* EP0 IMPLEMENTATION can be partly in userspace.
 846 *
 847 * Drivers that use this facility receive various events, including
 848 * control requests the kernel doesn't handle.  Drivers that don't
 849 * use this facility may be too simple-minded for real applications.
 850 */
 851
 852static inline void ep0_readable (struct dev_data *dev)
 853{
 854	wake_up (&dev->wait);
 855	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
 856}
 857
 858static void clean_req (struct usb_ep *ep, struct usb_request *req)
 859{
 860	struct dev_data		*dev = ep->driver_data;
 861
 862	if (req->buf != dev->rbuf) {
 863		kfree(req->buf);
 864		req->buf = dev->rbuf;
 865	}
 866	req->complete = epio_complete;
 867	dev->setup_out_ready = 0;
 868}
 869
 870static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
 871{
 872	struct dev_data		*dev = ep->driver_data;
 873	unsigned long		flags;
 874	int			free = 1;
 875
 876	/* for control OUT, data must still get to userspace */
 877	spin_lock_irqsave(&dev->lock, flags);
 878	if (!dev->setup_in) {
 879		dev->setup_out_error = (req->status != 0);
 880		if (!dev->setup_out_error)
 881			free = 0;
 882		dev->setup_out_ready = 1;
 883		ep0_readable (dev);
 884	}
 885
 886	/* clean up as appropriate */
 887	if (free && req->buf != &dev->rbuf)
 888		clean_req (ep, req);
 889	req->complete = epio_complete;
 890	spin_unlock_irqrestore(&dev->lock, flags);
 891}
 892
 893static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
 894{
 895	struct dev_data	*dev = ep->driver_data;
 896
 897	if (dev->setup_out_ready) {
 898		DBG (dev, "ep0 request busy!\n");
 899		return -EBUSY;
 900	}
 901	if (len > sizeof (dev->rbuf))
 902		req->buf = kmalloc(len, GFP_ATOMIC);
 903	if (req->buf == NULL) {
 904		req->buf = dev->rbuf;
 905		return -ENOMEM;
 906	}
 907	req->complete = ep0_complete;
 908	req->length = len;
 909	req->zero = 0;
 910	return 0;
 911}
 912
 913static ssize_t
 914ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 915{
 916	struct dev_data			*dev = fd->private_data;
 917	ssize_t				retval;
 918	enum ep0_state			state;
 919
 920	spin_lock_irq (&dev->lock);
 921	if (dev->state <= STATE_DEV_OPENED) {
 922		retval = -EINVAL;
 923		goto done;
 924	}
 925
 926	/* report fd mode change before acting on it */
 927	if (dev->setup_abort) {
 928		dev->setup_abort = 0;
 929		retval = -EIDRM;
 930		goto done;
 931	}
 932
 933	/* control DATA stage */
 934	if ((state = dev->state) == STATE_DEV_SETUP) {
 935
 936		if (dev->setup_in) {		/* stall IN */
 937			VDEBUG(dev, "ep0in stall\n");
 938			(void) usb_ep_set_halt (dev->gadget->ep0);
 939			retval = -EL2HLT;
 940			dev->state = STATE_DEV_CONNECTED;
 941
 942		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
 943			struct usb_ep		*ep = dev->gadget->ep0;
 944			struct usb_request	*req = dev->req;
 945
 946			if ((retval = setup_req (ep, req, 0)) == 0) {
 947				++dev->udc_usage;
 948				spin_unlock_irq (&dev->lock);
 949				retval = usb_ep_queue (ep, req, GFP_KERNEL);
 950				spin_lock_irq (&dev->lock);
 951				--dev->udc_usage;
 952			}
 953			dev->state = STATE_DEV_CONNECTED;
 954
 955			/* assume that was SET_CONFIGURATION */
 956			if (dev->current_config) {
 957				unsigned power;
 958
 959				if (gadget_is_dualspeed(dev->gadget)
 960						&& (dev->gadget->speed
 961							== USB_SPEED_HIGH))
 962					power = dev->hs_config->bMaxPower;
 963				else
 964					power = dev->config->bMaxPower;
 965				usb_gadget_vbus_draw(dev->gadget, 2 * power);
 966			}
 967
 968		} else {			/* collect OUT data */
 969			if ((fd->f_flags & O_NONBLOCK) != 0
 970					&& !dev->setup_out_ready) {
 971				retval = -EAGAIN;
 972				goto done;
 973			}
 974			spin_unlock_irq (&dev->lock);
 975			retval = wait_event_interruptible (dev->wait,
 976					dev->setup_out_ready != 0);
 977
 978			/* FIXME state could change from under us */
 979			spin_lock_irq (&dev->lock);
 980			if (retval)
 981				goto done;
 982
 983			if (dev->state != STATE_DEV_SETUP) {
 984				retval = -ECANCELED;
 985				goto done;
 986			}
 987			dev->state = STATE_DEV_CONNECTED;
 988
 989			if (dev->setup_out_error)
 990				retval = -EIO;
 991			else {
 992				len = min (len, (size_t)dev->req->actual);
 993				++dev->udc_usage;
 994				spin_unlock_irq(&dev->lock);
 995				if (copy_to_user (buf, dev->req->buf, len))
 996					retval = -EFAULT;
 997				else
 998					retval = len;
 999				spin_lock_irq(&dev->lock);
1000				--dev->udc_usage;
1001				clean_req (dev->gadget->ep0, dev->req);
1002				/* NOTE userspace can't yet choose to stall */
1003			}
1004		}
1005		goto done;
1006	}
1007
1008	/* else normal: return event data */
1009	if (len < sizeof dev->event [0]) {
1010		retval = -EINVAL;
1011		goto done;
1012	}
1013	len -= len % sizeof (struct usb_gadgetfs_event);
1014	dev->usermode_setup = 1;
1015
1016scan:
1017	/* return queued events right away */
1018	if (dev->ev_next != 0) {
1019		unsigned		i, n;
1020
1021		n = len / sizeof (struct usb_gadgetfs_event);
1022		if (dev->ev_next < n)
1023			n = dev->ev_next;
1024
1025		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1026		for (i = 0; i < n; i++) {
1027			if (dev->event [i].type == GADGETFS_SETUP) {
1028				dev->state = STATE_DEV_SETUP;
1029				n = i + 1;
1030				break;
1031			}
1032		}
1033		spin_unlock_irq (&dev->lock);
1034		len = n * sizeof (struct usb_gadgetfs_event);
1035		if (copy_to_user (buf, &dev->event, len))
1036			retval = -EFAULT;
1037		else
1038			retval = len;
1039		if (len > 0) {
1040			/* NOTE this doesn't guard against broken drivers;
1041			 * concurrent ep0 readers may lose events.
1042			 */
1043			spin_lock_irq (&dev->lock);
1044			if (dev->ev_next > n) {
1045				memmove(&dev->event[0], &dev->event[n],
1046					sizeof (struct usb_gadgetfs_event)
1047						* (dev->ev_next - n));
1048			}
1049			dev->ev_next -= n;
1050			spin_unlock_irq (&dev->lock);
1051		}
1052		return retval;
1053	}
1054	if (fd->f_flags & O_NONBLOCK) {
1055		retval = -EAGAIN;
1056		goto done;
1057	}
1058
1059	switch (state) {
1060	default:
1061		DBG (dev, "fail %s, state %d\n", __func__, state);
1062		retval = -ESRCH;
1063		break;
1064	case STATE_DEV_UNCONNECTED:
1065	case STATE_DEV_CONNECTED:
1066		spin_unlock_irq (&dev->lock);
1067		DBG (dev, "%s wait\n", __func__);
1068
1069		/* wait for events */
1070		retval = wait_event_interruptible (dev->wait,
1071				dev->ev_next != 0);
1072		if (retval < 0)
1073			return retval;
1074		spin_lock_irq (&dev->lock);
1075		goto scan;
1076	}
1077
1078done:
1079	spin_unlock_irq (&dev->lock);
1080	return retval;
1081}
1082
1083static struct usb_gadgetfs_event *
1084next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1085{
1086	struct usb_gadgetfs_event	*event;
1087	unsigned			i;
1088
1089	switch (type) {
1090	/* these events purge the queue */
1091	case GADGETFS_DISCONNECT:
1092		if (dev->state == STATE_DEV_SETUP)
1093			dev->setup_abort = 1;
1094		fallthrough;
1095	case GADGETFS_CONNECT:
1096		dev->ev_next = 0;
1097		break;
1098	case GADGETFS_SETUP:		/* previous request timed out */
1099	case GADGETFS_SUSPEND:		/* same effect */
1100		/* these events can't be repeated */
1101		for (i = 0; i != dev->ev_next; i++) {
1102			if (dev->event [i].type != type)
1103				continue;
1104			DBG(dev, "discard old event[%d] %d\n", i, type);
1105			dev->ev_next--;
1106			if (i == dev->ev_next)
1107				break;
1108			/* indices start at zero, for simplicity */
1109			memmove (&dev->event [i], &dev->event [i + 1],
1110				sizeof (struct usb_gadgetfs_event)
1111					* (dev->ev_next - i));
1112		}
1113		break;
1114	default:
1115		BUG ();
1116	}
1117	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1118	event = &dev->event [dev->ev_next++];
1119	BUG_ON (dev->ev_next > N_EVENT);
1120	memset (event, 0, sizeof *event);
1121	event->type = type;
1122	return event;
1123}
1124
1125static ssize_t
1126ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1127{
1128	struct dev_data		*dev = fd->private_data;
1129	ssize_t			retval = -ESRCH;
1130
1131	/* report fd mode change before acting on it */
1132	if (dev->setup_abort) {
1133		dev->setup_abort = 0;
1134		retval = -EIDRM;
1135
1136	/* data and/or status stage for control request */
1137	} else if (dev->state == STATE_DEV_SETUP) {
1138
1139		len = min_t(size_t, len, dev->setup_wLength);
1140		if (dev->setup_in) {
1141			retval = setup_req (dev->gadget->ep0, dev->req, len);
1142			if (retval == 0) {
1143				dev->state = STATE_DEV_CONNECTED;
1144				++dev->udc_usage;
1145				spin_unlock_irq (&dev->lock);
1146				if (copy_from_user (dev->req->buf, buf, len))
1147					retval = -EFAULT;
1148				else {
1149					if (len < dev->setup_wLength)
1150						dev->req->zero = 1;
1151					retval = usb_ep_queue (
1152						dev->gadget->ep0, dev->req,
1153						GFP_KERNEL);
1154				}
1155				spin_lock_irq(&dev->lock);
1156				--dev->udc_usage;
1157				if (retval < 0) {
1158					clean_req (dev->gadget->ep0, dev->req);
1159				} else
1160					retval = len;
1161
1162				return retval;
1163			}
1164
1165		/* can stall some OUT transfers */
1166		} else if (dev->setup_can_stall) {
1167			VDEBUG(dev, "ep0out stall\n");
1168			(void) usb_ep_set_halt (dev->gadget->ep0);
1169			retval = -EL2HLT;
1170			dev->state = STATE_DEV_CONNECTED;
1171		} else {
1172			DBG(dev, "bogus ep0out stall!\n");
1173		}
1174	} else
1175		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1176
1177	return retval;
1178}
1179
1180static int
1181ep0_fasync (int f, struct file *fd, int on)
1182{
1183	struct dev_data		*dev = fd->private_data;
1184	// caller must F_SETOWN before signal delivery happens
1185	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1186	return fasync_helper (f, fd, on, &dev->fasync);
1187}
1188
1189static struct usb_gadget_driver gadgetfs_driver;
1190
1191static int
1192dev_release (struct inode *inode, struct file *fd)
1193{
1194	struct dev_data		*dev = fd->private_data;
1195
1196	/* closing ep0 === shutdown all */
1197
1198	if (dev->gadget_registered) {
1199		usb_gadget_unregister_driver (&gadgetfs_driver);
1200		dev->gadget_registered = false;
1201	}
1202
1203	/* at this point "good" hardware has disconnected the
1204	 * device from USB; the host won't see it any more.
1205	 * alternatively, all host requests will time out.
1206	 */
1207
1208	kfree (dev->buf);
1209	dev->buf = NULL;
1210
1211	/* other endpoints were all decoupled from this device */
1212	spin_lock_irq(&dev->lock);
1213	dev->state = STATE_DEV_DISABLED;
1214	spin_unlock_irq(&dev->lock);
1215
1216	put_dev (dev);
1217	return 0;
1218}
1219
1220static __poll_t
1221ep0_poll (struct file *fd, poll_table *wait)
1222{
1223	struct dev_data         *dev = fd->private_data;
1224	__poll_t                mask = 0;
1225
1226	if (dev->state <= STATE_DEV_OPENED)
1227		return DEFAULT_POLLMASK;
1228
1229	poll_wait(fd, &dev->wait, wait);
1230
1231	spin_lock_irq(&dev->lock);
1232
1233	/* report fd mode change before acting on it */
1234	if (dev->setup_abort) {
1235		dev->setup_abort = 0;
1236		mask = EPOLLHUP;
1237		goto out;
1238	}
1239
1240	if (dev->state == STATE_DEV_SETUP) {
1241		if (dev->setup_in || dev->setup_can_stall)
1242			mask = EPOLLOUT;
1243	} else {
1244		if (dev->ev_next != 0)
1245			mask = EPOLLIN;
1246	}
1247out:
1248	spin_unlock_irq(&dev->lock);
1249	return mask;
1250}
1251
1252static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1253{
1254	struct dev_data		*dev = fd->private_data;
1255	struct usb_gadget	*gadget = dev->gadget;
1256	long ret = -ENOTTY;
1257
1258	spin_lock_irq(&dev->lock);
1259	if (dev->state == STATE_DEV_OPENED ||
1260			dev->state == STATE_DEV_UNBOUND) {
1261		/* Not bound to a UDC */
1262	} else if (gadget->ops->ioctl) {
1263		++dev->udc_usage;
1264		spin_unlock_irq(&dev->lock);
1265
1266		ret = gadget->ops->ioctl (gadget, code, value);
1267
1268		spin_lock_irq(&dev->lock);
1269		--dev->udc_usage;
1270	}
1271	spin_unlock_irq(&dev->lock);
1272
1273	return ret;
1274}
1275
1276/*----------------------------------------------------------------------*/
1277
1278/* The in-kernel gadget driver handles most ep0 issues, in particular
1279 * enumerating the single configuration (as provided from user space).
1280 *
1281 * Unrecognized ep0 requests may be handled in user space.
1282 */
1283
1284static void make_qualifier (struct dev_data *dev)
1285{
1286	struct usb_qualifier_descriptor		qual;
1287	struct usb_device_descriptor		*desc;
1288
1289	qual.bLength = sizeof qual;
1290	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1291	qual.bcdUSB = cpu_to_le16 (0x0200);
1292
1293	desc = dev->dev;
1294	qual.bDeviceClass = desc->bDeviceClass;
1295	qual.bDeviceSubClass = desc->bDeviceSubClass;
1296	qual.bDeviceProtocol = desc->bDeviceProtocol;
1297
1298	/* assumes ep0 uses the same value for both speeds ... */
1299	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1300
1301	qual.bNumConfigurations = 1;
1302	qual.bRESERVED = 0;
1303
1304	memcpy (dev->rbuf, &qual, sizeof qual);
1305}
1306
1307static int
1308config_buf (struct dev_data *dev, u8 type, unsigned index)
1309{
1310	int		len;
1311	int		hs = 0;
1312
1313	/* only one configuration */
1314	if (index > 0)
1315		return -EINVAL;
1316
1317	if (gadget_is_dualspeed(dev->gadget)) {
1318		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1319		if (type == USB_DT_OTHER_SPEED_CONFIG)
1320			hs = !hs;
1321	}
1322	if (hs) {
1323		dev->req->buf = dev->hs_config;
1324		len = le16_to_cpu(dev->hs_config->wTotalLength);
1325	} else {
1326		dev->req->buf = dev->config;
1327		len = le16_to_cpu(dev->config->wTotalLength);
1328	}
1329	((u8 *)dev->req->buf) [1] = type;
1330	return len;
1331}
1332
1333static int
1334gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1335{
1336	struct dev_data			*dev = get_gadget_data (gadget);
1337	struct usb_request		*req = dev->req;
1338	int				value = -EOPNOTSUPP;
1339	struct usb_gadgetfs_event	*event;
1340	u16				w_value = le16_to_cpu(ctrl->wValue);
1341	u16				w_length = le16_to_cpu(ctrl->wLength);
1342
1343	if (w_length > RBUF_SIZE) {
1344		if (ctrl->bRequestType & USB_DIR_IN) {
1345			/* Cast away the const, we are going to overwrite on purpose. */
1346			__le16 *temp = (__le16 *)&ctrl->wLength;
1347
1348			*temp = cpu_to_le16(RBUF_SIZE);
1349			w_length = RBUF_SIZE;
1350		} else {
1351			return value;
1352		}
1353	}
1354
1355	spin_lock (&dev->lock);
1356	dev->setup_abort = 0;
1357	if (dev->state == STATE_DEV_UNCONNECTED) {
1358		if (gadget_is_dualspeed(gadget)
1359				&& gadget->speed == USB_SPEED_HIGH
1360				&& dev->hs_config == NULL) {
1361			spin_unlock(&dev->lock);
1362			ERROR (dev, "no high speed config??\n");
1363			return -EINVAL;
1364		}
1365
1366		dev->state = STATE_DEV_CONNECTED;
1367
1368		INFO (dev, "connected\n");
1369		event = next_event (dev, GADGETFS_CONNECT);
1370		event->u.speed = gadget->speed;
1371		ep0_readable (dev);
1372
1373	/* host may have given up waiting for response.  we can miss control
1374	 * requests handled lower down (device/endpoint status and features);
1375	 * then ep0_{read,write} will report the wrong status. controller
1376	 * driver will have aborted pending i/o.
1377	 */
1378	} else if (dev->state == STATE_DEV_SETUP)
1379		dev->setup_abort = 1;
1380
1381	req->buf = dev->rbuf;
1382	req->context = NULL;
1383	switch (ctrl->bRequest) {
1384
1385	case USB_REQ_GET_DESCRIPTOR:
1386		if (ctrl->bRequestType != USB_DIR_IN)
1387			goto unrecognized;
1388		switch (w_value >> 8) {
1389
1390		case USB_DT_DEVICE:
1391			value = min (w_length, (u16) sizeof *dev->dev);
1392			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1393			req->buf = dev->dev;
1394			break;
1395		case USB_DT_DEVICE_QUALIFIER:
1396			if (!dev->hs_config)
1397				break;
1398			value = min (w_length, (u16)
1399				sizeof (struct usb_qualifier_descriptor));
1400			make_qualifier (dev);
1401			break;
1402		case USB_DT_OTHER_SPEED_CONFIG:
1403		case USB_DT_CONFIG:
1404			value = config_buf (dev,
1405					w_value >> 8,
1406					w_value & 0xff);
1407			if (value >= 0)
1408				value = min (w_length, (u16) value);
1409			break;
1410		case USB_DT_STRING:
1411			goto unrecognized;
1412
1413		default:		// all others are errors
1414			break;
1415		}
1416		break;
1417
1418	/* currently one config, two speeds */
1419	case USB_REQ_SET_CONFIGURATION:
1420		if (ctrl->bRequestType != 0)
1421			goto unrecognized;
1422		if (0 == (u8) w_value) {
1423			value = 0;
1424			dev->current_config = 0;
1425			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1426			// user mode expected to disable endpoints
1427		} else {
1428			u8	config, power;
1429
1430			if (gadget_is_dualspeed(gadget)
1431					&& gadget->speed == USB_SPEED_HIGH) {
1432				config = dev->hs_config->bConfigurationValue;
1433				power = dev->hs_config->bMaxPower;
1434			} else {
1435				config = dev->config->bConfigurationValue;
1436				power = dev->config->bMaxPower;
1437			}
1438
1439			if (config == (u8) w_value) {
1440				value = 0;
1441				dev->current_config = config;
1442				usb_gadget_vbus_draw(gadget, 2 * power);
1443			}
1444		}
1445
1446		/* report SET_CONFIGURATION like any other control request,
1447		 * except that usermode may not stall this.  the next
1448		 * request mustn't be allowed start until this finishes:
1449		 * endpoints and threads set up, etc.
1450		 *
1451		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1452		 * has bad/racey automagic that prevents synchronizing here.
1453		 * even kernel mode drivers often miss them.
1454		 */
1455		if (value == 0) {
1456			INFO (dev, "configuration #%d\n", dev->current_config);
1457			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1458			if (dev->usermode_setup) {
1459				dev->setup_can_stall = 0;
1460				goto delegate;
1461			}
1462		}
1463		break;
1464
1465#ifndef	CONFIG_USB_PXA25X
1466	/* PXA automagically handles this request too */
1467	case USB_REQ_GET_CONFIGURATION:
1468		if (ctrl->bRequestType != 0x80)
1469			goto unrecognized;
1470		*(u8 *)req->buf = dev->current_config;
1471		value = min (w_length, (u16) 1);
1472		break;
1473#endif
1474
1475	default:
1476unrecognized:
1477		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1478			dev->usermode_setup ? "delegate" : "fail",
1479			ctrl->bRequestType, ctrl->bRequest,
1480			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1481
1482		/* if there's an ep0 reader, don't stall */
1483		if (dev->usermode_setup) {
1484			dev->setup_can_stall = 1;
1485delegate:
1486			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1487						? 1 : 0;
1488			dev->setup_wLength = w_length;
1489			dev->setup_out_ready = 0;
1490			dev->setup_out_error = 0;
1491
1492			/* read DATA stage for OUT right away */
1493			if (unlikely (!dev->setup_in && w_length)) {
1494				value = setup_req (gadget->ep0, dev->req,
1495							w_length);
1496				if (value < 0)
1497					break;
1498
1499				++dev->udc_usage;
1500				spin_unlock (&dev->lock);
1501				value = usb_ep_queue (gadget->ep0, dev->req,
1502							GFP_KERNEL);
1503				spin_lock (&dev->lock);
1504				--dev->udc_usage;
1505				if (value < 0) {
1506					clean_req (gadget->ep0, dev->req);
1507					break;
1508				}
1509
1510				/* we can't currently stall these */
1511				dev->setup_can_stall = 0;
1512			}
1513
1514			/* state changes when reader collects event */
1515			event = next_event (dev, GADGETFS_SETUP);
1516			event->u.setup = *ctrl;
1517			ep0_readable (dev);
1518			spin_unlock (&dev->lock);
1519			/*
1520			 * Return USB_GADGET_DELAYED_STATUS as a workaround to
1521			 * stop some UDC drivers (e.g. dwc3) from automatically
1522			 * proceeding with the status stage for 0-length
1523			 * transfers.
1524			 * Should be removed once all UDC drivers are fixed to
1525			 * always delay the status stage until a response is
1526			 * queued to EP0.
1527			 */
1528			return w_length == 0 ? USB_GADGET_DELAYED_STATUS : 0;
1529		}
1530	}
1531
1532	/* proceed with data transfer and status phases? */
1533	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1534		req->length = value;
1535		req->zero = value < w_length;
1536
1537		++dev->udc_usage;
1538		spin_unlock (&dev->lock);
1539		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1540		spin_lock(&dev->lock);
1541		--dev->udc_usage;
1542		spin_unlock(&dev->lock);
1543		if (value < 0) {
1544			DBG (dev, "ep_queue --> %d\n", value);
1545			req->status = 0;
1546		}
1547		return value;
1548	}
1549
1550	/* device stalls when value < 0 */
1551	spin_unlock (&dev->lock);
1552	return value;
1553}
1554
1555static void destroy_ep_files (struct dev_data *dev)
1556{
1557	DBG (dev, "%s %d\n", __func__, dev->state);
1558
1559	/* dev->state must prevent interference */
1560	spin_lock_irq (&dev->lock);
1561	while (!list_empty(&dev->epfiles)) {
1562		struct ep_data	*ep;
1563		struct inode	*parent;
1564		struct dentry	*dentry;
1565
1566		/* break link to FS */
1567		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1568		list_del_init (&ep->epfiles);
1569		spin_unlock_irq (&dev->lock);
1570
1571		dentry = ep->dentry;
1572		ep->dentry = NULL;
1573		parent = d_inode(dentry->d_parent);
1574
1575		/* break link to controller */
1576		mutex_lock(&ep->lock);
1577		if (ep->state == STATE_EP_ENABLED)
1578			(void) usb_ep_disable (ep->ep);
1579		ep->state = STATE_EP_UNBOUND;
1580		usb_ep_free_request (ep->ep, ep->req);
1581		ep->ep = NULL;
1582		mutex_unlock(&ep->lock);
1583
1584		wake_up (&ep->wait);
1585		put_ep (ep);
1586
1587		/* break link to dcache */
1588		inode_lock(parent);
1589		d_delete (dentry);
1590		dput (dentry);
1591		inode_unlock(parent);
1592
1593		spin_lock_irq (&dev->lock);
1594	}
1595	spin_unlock_irq (&dev->lock);
1596}
1597
1598
1599static struct dentry *
1600gadgetfs_create_file (struct super_block *sb, char const *name,
1601		void *data, const struct file_operations *fops);
1602
1603static int activate_ep_files (struct dev_data *dev)
1604{
1605	struct usb_ep	*ep;
1606	struct ep_data	*data;
1607
1608	gadget_for_each_ep (ep, dev->gadget) {
1609
1610		data = kzalloc(sizeof(*data), GFP_KERNEL);
1611		if (!data)
1612			goto enomem0;
1613		data->state = STATE_EP_DISABLED;
1614		mutex_init(&data->lock);
1615		init_waitqueue_head (&data->wait);
1616
1617		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1618		refcount_set (&data->count, 1);
1619		data->dev = dev;
1620		get_dev (dev);
1621
1622		data->ep = ep;
1623		ep->driver_data = data;
1624
1625		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1626		if (!data->req)
1627			goto enomem1;
1628
1629		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1630				data, &ep_io_operations);
1631		if (!data->dentry)
1632			goto enomem2;
1633		list_add_tail (&data->epfiles, &dev->epfiles);
1634	}
1635	return 0;
1636
1637enomem2:
1638	usb_ep_free_request (ep, data->req);
1639enomem1:
1640	put_dev (dev);
1641	kfree (data);
1642enomem0:
1643	DBG (dev, "%s enomem\n", __func__);
1644	destroy_ep_files (dev);
1645	return -ENOMEM;
1646}
1647
1648static void
1649gadgetfs_unbind (struct usb_gadget *gadget)
1650{
1651	struct dev_data		*dev = get_gadget_data (gadget);
1652
1653	DBG (dev, "%s\n", __func__);
1654
1655	spin_lock_irq (&dev->lock);
1656	dev->state = STATE_DEV_UNBOUND;
1657	while (dev->udc_usage > 0) {
1658		spin_unlock_irq(&dev->lock);
1659		usleep_range(1000, 2000);
1660		spin_lock_irq(&dev->lock);
1661	}
1662	spin_unlock_irq (&dev->lock);
1663
1664	destroy_ep_files (dev);
1665	gadget->ep0->driver_data = NULL;
1666	set_gadget_data (gadget, NULL);
1667
1668	/* we've already been disconnected ... no i/o is active */
1669	if (dev->req)
1670		usb_ep_free_request (gadget->ep0, dev->req);
1671	DBG (dev, "%s done\n", __func__);
1672	put_dev (dev);
1673}
1674
1675static struct dev_data		*the_device;
1676
1677static int gadgetfs_bind(struct usb_gadget *gadget,
1678		struct usb_gadget_driver *driver)
1679{
1680	struct dev_data		*dev = the_device;
1681
1682	if (!dev)
1683		return -ESRCH;
1684	if (0 != strcmp (CHIP, gadget->name)) {
1685		pr_err("%s expected %s controller not %s\n",
1686			shortname, CHIP, gadget->name);
1687		return -ENODEV;
1688	}
1689
1690	set_gadget_data (gadget, dev);
1691	dev->gadget = gadget;
1692	gadget->ep0->driver_data = dev;
1693
1694	/* preallocate control response and buffer */
1695	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1696	if (!dev->req)
1697		goto enomem;
1698	dev->req->context = NULL;
1699	dev->req->complete = epio_complete;
1700
1701	if (activate_ep_files (dev) < 0)
1702		goto enomem;
1703
1704	INFO (dev, "bound to %s driver\n", gadget->name);
1705	spin_lock_irq(&dev->lock);
1706	dev->state = STATE_DEV_UNCONNECTED;
1707	spin_unlock_irq(&dev->lock);
1708	get_dev (dev);
1709	return 0;
1710
1711enomem:
1712	gadgetfs_unbind (gadget);
1713	return -ENOMEM;
1714}
1715
1716static void
1717gadgetfs_disconnect (struct usb_gadget *gadget)
1718{
1719	struct dev_data		*dev = get_gadget_data (gadget);
1720	unsigned long		flags;
1721
1722	spin_lock_irqsave (&dev->lock, flags);
1723	if (dev->state == STATE_DEV_UNCONNECTED)
1724		goto exit;
1725	dev->state = STATE_DEV_UNCONNECTED;
1726
1727	INFO (dev, "disconnected\n");
1728	next_event (dev, GADGETFS_DISCONNECT);
1729	ep0_readable (dev);
1730exit:
1731	spin_unlock_irqrestore (&dev->lock, flags);
1732}
1733
1734static void
1735gadgetfs_suspend (struct usb_gadget *gadget)
1736{
1737	struct dev_data		*dev = get_gadget_data (gadget);
1738	unsigned long		flags;
1739
1740	INFO (dev, "suspended from state %d\n", dev->state);
1741	spin_lock_irqsave(&dev->lock, flags);
1742	switch (dev->state) {
1743	case STATE_DEV_SETUP:		// VERY odd... host died??
1744	case STATE_DEV_CONNECTED:
1745	case STATE_DEV_UNCONNECTED:
1746		next_event (dev, GADGETFS_SUSPEND);
1747		ep0_readable (dev);
1748		fallthrough;
1749	default:
1750		break;
1751	}
1752	spin_unlock_irqrestore(&dev->lock, flags);
1753}
1754
1755static struct usb_gadget_driver gadgetfs_driver = {
1756	.function	= (char *) driver_desc,
1757	.bind		= gadgetfs_bind,
1758	.unbind		= gadgetfs_unbind,
1759	.setup		= gadgetfs_setup,
1760	.reset		= gadgetfs_disconnect,
1761	.disconnect	= gadgetfs_disconnect,
1762	.suspend	= gadgetfs_suspend,
1763
1764	.driver	= {
1765		.name		= shortname,
1766	},
1767};
1768
1769/*----------------------------------------------------------------------*/
1770/* DEVICE INITIALIZATION
1771 *
1772 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1773 *     status = write (fd, descriptors, sizeof descriptors)
1774 *
1775 * That write establishes the device configuration, so the kernel can
1776 * bind to the controller ... guaranteeing it can handle enumeration
1777 * at all necessary speeds.  Descriptor order is:
1778 *
1779 * . message tag (u32, host order) ... for now, must be zero; it
1780 *	would change to support features like multi-config devices
1781 * . full/low speed config ... all wTotalLength bytes (with interface,
1782 *	class, altsetting, endpoint, and other descriptors)
1783 * . high speed config ... all descriptors, for high speed operation;
1784 *	this one's optional except for high-speed hardware
1785 * . device descriptor
1786 *
1787 * Endpoints are not yet enabled. Drivers must wait until device
1788 * configuration and interface altsetting changes create
1789 * the need to configure (or unconfigure) them.
1790 *
1791 * After initialization, the device stays active for as long as that
1792 * $CHIP file is open.  Events must then be read from that descriptor,
1793 * such as configuration notifications.
1794 */
1795
1796static int is_valid_config(struct usb_config_descriptor *config,
1797		unsigned int total)
1798{
1799	return config->bDescriptorType == USB_DT_CONFIG
1800		&& config->bLength == USB_DT_CONFIG_SIZE
1801		&& total >= USB_DT_CONFIG_SIZE
1802		&& config->bConfigurationValue != 0
1803		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1804		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1805	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1806	/* FIXME check lengths: walk to end */
1807}
1808
1809static ssize_t
1810dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1811{
1812	struct dev_data		*dev = fd->private_data;
1813	ssize_t			value, length = len;
1814	unsigned		total;
1815	u32			tag;
1816	char			*kbuf;
1817
1818	spin_lock_irq(&dev->lock);
1819	if (dev->state > STATE_DEV_OPENED) {
1820		value = ep0_write(fd, buf, len, ptr);
1821		spin_unlock_irq(&dev->lock);
1822		return value;
1823	}
1824	spin_unlock_irq(&dev->lock);
1825
1826	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1827	    (len > PAGE_SIZE * 4))
1828		return -EINVAL;
1829
1830	/* we might need to change message format someday */
1831	if (copy_from_user (&tag, buf, 4))
1832		return -EFAULT;
1833	if (tag != 0)
1834		return -EINVAL;
1835	buf += 4;
1836	length -= 4;
1837
1838	kbuf = memdup_user(buf, length);
1839	if (IS_ERR(kbuf))
1840		return PTR_ERR(kbuf);
1841
1842	spin_lock_irq (&dev->lock);
1843	value = -EINVAL;
1844	if (dev->buf) {
1845		spin_unlock_irq(&dev->lock);
1846		kfree(kbuf);
1847		return value;
1848	}
1849	dev->buf = kbuf;
1850
1851	/* full or low speed config */
1852	dev->config = (void *) kbuf;
1853	total = le16_to_cpu(dev->config->wTotalLength);
1854	if (!is_valid_config(dev->config, total) ||
1855			total > length - USB_DT_DEVICE_SIZE)
1856		goto fail;
1857	kbuf += total;
1858	length -= total;
1859
1860	/* optional high speed config */
1861	if (kbuf [1] == USB_DT_CONFIG) {
1862		dev->hs_config = (void *) kbuf;
1863		total = le16_to_cpu(dev->hs_config->wTotalLength);
1864		if (!is_valid_config(dev->hs_config, total) ||
1865				total > length - USB_DT_DEVICE_SIZE)
1866			goto fail;
1867		kbuf += total;
1868		length -= total;
1869	} else {
1870		dev->hs_config = NULL;
1871	}
1872
1873	/* could support multiple configs, using another encoding! */
1874
1875	/* device descriptor (tweaked for paranoia) */
1876	if (length != USB_DT_DEVICE_SIZE)
1877		goto fail;
1878	dev->dev = (void *)kbuf;
1879	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1880			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1881			|| dev->dev->bNumConfigurations != 1)
1882		goto fail;
1883	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1884
1885	/* triggers gadgetfs_bind(); then we can enumerate. */
1886	spin_unlock_irq (&dev->lock);
1887	if (dev->hs_config)
1888		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1889	else
1890		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1891
1892	value = usb_gadget_register_driver(&gadgetfs_driver);
1893	if (value != 0) {
1894		spin_lock_irq(&dev->lock);
1895		goto fail;
1896	} else {
1897		/* at this point "good" hardware has for the first time
1898		 * let the USB the host see us.  alternatively, if users
1899		 * unplug/replug that will clear all the error state.
1900		 *
1901		 * note:  everything running before here was guaranteed
1902		 * to choke driver model style diagnostics.  from here
1903		 * on, they can work ... except in cleanup paths that
1904		 * kick in after the ep0 descriptor is closed.
1905		 */
1906		value = len;
1907		dev->gadget_registered = true;
1908	}
1909	return value;
1910
1911fail:
1912	dev->config = NULL;
1913	dev->hs_config = NULL;
1914	dev->dev = NULL;
1915	spin_unlock_irq (&dev->lock);
1916	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1917	kfree (dev->buf);
1918	dev->buf = NULL;
1919	return value;
1920}
1921
1922static int
1923gadget_dev_open (struct inode *inode, struct file *fd)
1924{
1925	struct dev_data		*dev = inode->i_private;
1926	int			value = -EBUSY;
1927
1928	spin_lock_irq(&dev->lock);
1929	if (dev->state == STATE_DEV_DISABLED) {
1930		dev->ev_next = 0;
1931		dev->state = STATE_DEV_OPENED;
1932		fd->private_data = dev;
1933		get_dev (dev);
1934		value = 0;
1935	}
1936	spin_unlock_irq(&dev->lock);
1937	return value;
1938}
1939
1940static const struct file_operations ep0_operations = {
 
1941
1942	.open =		gadget_dev_open,
1943	.read =		ep0_read,
1944	.write =	dev_config,
1945	.fasync =	ep0_fasync,
1946	.poll =		ep0_poll,
1947	.unlocked_ioctl = gadget_dev_ioctl,
1948	.release =	dev_release,
1949};
1950
1951/*----------------------------------------------------------------------*/
1952
1953/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1954 *
1955 * Mounting the filesystem creates a controller file, used first for
1956 * device configuration then later for event monitoring.
1957 */
1958
1959
1960/* FIXME PAM etc could set this security policy without mount options
1961 * if epfiles inherited ownership and permissons from ep0 ...
1962 */
1963
1964static unsigned default_uid;
1965static unsigned default_gid;
1966static unsigned default_perm = S_IRUSR | S_IWUSR;
1967
1968module_param (default_uid, uint, 0644);
1969module_param (default_gid, uint, 0644);
1970module_param (default_perm, uint, 0644);
1971
1972
1973static struct inode *
1974gadgetfs_make_inode (struct super_block *sb,
1975		void *data, const struct file_operations *fops,
1976		int mode)
1977{
1978	struct inode *inode = new_inode (sb);
1979
1980	if (inode) {
1981		inode->i_ino = get_next_ino();
1982		inode->i_mode = mode;
1983		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1984		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1985		simple_inode_init_ts(inode);
1986		inode->i_private = data;
1987		inode->i_fop = fops;
1988	}
1989	return inode;
1990}
1991
1992/* creates in fs root directory, so non-renamable and non-linkable.
1993 * so inode and dentry are paired, until device reconfig.
1994 */
1995static struct dentry *
1996gadgetfs_create_file (struct super_block *sb, char const *name,
1997		void *data, const struct file_operations *fops)
1998{
1999	struct dentry	*dentry;
2000	struct inode	*inode;
2001
2002	dentry = d_alloc_name(sb->s_root, name);
2003	if (!dentry)
2004		return NULL;
2005
2006	inode = gadgetfs_make_inode (sb, data, fops,
2007			S_IFREG | (default_perm & S_IRWXUGO));
2008	if (!inode) {
2009		dput(dentry);
2010		return NULL;
2011	}
2012	d_add (dentry, inode);
2013	return dentry;
2014}
2015
2016static const struct super_operations gadget_fs_operations = {
2017	.statfs =	simple_statfs,
2018	.drop_inode =	generic_delete_inode,
2019};
2020
2021static int
2022gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2023{
2024	struct inode	*inode;
2025	struct dev_data	*dev;
2026	int		rc;
2027
2028	mutex_lock(&sb_mutex);
2029
2030	if (the_device) {
2031		rc = -ESRCH;
2032		goto Done;
2033	}
2034
2035	CHIP = usb_get_gadget_udc_name();
2036	if (!CHIP) {
2037		rc = -ENODEV;
2038		goto Done;
2039	}
2040
2041	/* superblock */
2042	sb->s_blocksize = PAGE_SIZE;
2043	sb->s_blocksize_bits = PAGE_SHIFT;
2044	sb->s_magic = GADGETFS_MAGIC;
2045	sb->s_op = &gadget_fs_operations;
2046	sb->s_time_gran = 1;
2047
2048	/* root inode */
2049	inode = gadgetfs_make_inode (sb,
2050			NULL, &simple_dir_operations,
2051			S_IFDIR | S_IRUGO | S_IXUGO);
2052	if (!inode)
2053		goto Enomem;
2054	inode->i_op = &simple_dir_inode_operations;
2055	if (!(sb->s_root = d_make_root (inode)))
2056		goto Enomem;
2057
2058	/* the ep0 file is named after the controller we expect;
2059	 * user mode code can use it for sanity checks, like we do.
2060	 */
2061	dev = dev_new ();
2062	if (!dev)
2063		goto Enomem;
2064
2065	dev->sb = sb;
2066	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2067	if (!dev->dentry) {
2068		put_dev(dev);
2069		goto Enomem;
2070	}
2071
2072	/* other endpoint files are available after hardware setup,
2073	 * from binding to a controller.
2074	 */
2075	the_device = dev;
2076	rc = 0;
2077	goto Done;
2078
2079 Enomem:
2080	kfree(CHIP);
2081	CHIP = NULL;
2082	rc = -ENOMEM;
2083
2084 Done:
2085	mutex_unlock(&sb_mutex);
2086	return rc;
2087}
2088
2089/* "mount -t gadgetfs path /dev/gadget" ends up here */
2090static int gadgetfs_get_tree(struct fs_context *fc)
2091{
2092	return get_tree_single(fc, gadgetfs_fill_super);
2093}
2094
2095static const struct fs_context_operations gadgetfs_context_ops = {
2096	.get_tree	= gadgetfs_get_tree,
2097};
2098
2099static int gadgetfs_init_fs_context(struct fs_context *fc)
2100{
2101	fc->ops = &gadgetfs_context_ops;
2102	return 0;
2103}
2104
2105static void
2106gadgetfs_kill_sb (struct super_block *sb)
2107{
2108	mutex_lock(&sb_mutex);
2109	kill_litter_super (sb);
2110	if (the_device) {
2111		put_dev (the_device);
2112		the_device = NULL;
2113	}
2114	kfree(CHIP);
2115	CHIP = NULL;
2116	mutex_unlock(&sb_mutex);
2117}
2118
2119/*----------------------------------------------------------------------*/
2120
2121static struct file_system_type gadgetfs_type = {
2122	.owner		= THIS_MODULE,
2123	.name		= shortname,
2124	.init_fs_context = gadgetfs_init_fs_context,
2125	.kill_sb	= gadgetfs_kill_sb,
2126};
2127MODULE_ALIAS_FS("gadgetfs");
2128
2129/*----------------------------------------------------------------------*/
2130
2131static int __init gadgetfs_init (void)
2132{
2133	int status;
2134
2135	status = register_filesystem (&gadgetfs_type);
2136	if (status == 0)
2137		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2138			shortname, driver_desc);
2139	return status;
2140}
2141module_init (gadgetfs_init);
2142
2143static void __exit gadgetfs_cleanup (void)
2144{
2145	pr_debug ("unregister %s\n", shortname);
2146	unregister_filesystem (&gadgetfs_type);
2147}
2148module_exit (gadgetfs_cleanup);
2149