Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c -- user mode filesystem api for usb gadget controllers
   4 *
   5 * Copyright (C) 2003-2004 David Brownell
   6 * Copyright (C) 2003 Agilent Technologies
   7 */
   8
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/fs_context.h>
  16#include <linux/pagemap.h>
  17#include <linux/uts.h>
  18#include <linux/wait.h>
  19#include <linux/compiler.h>
  20#include <linux/uaccess.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/poll.h>
  24#include <linux/kthread.h>
  25#include <linux/aio.h>
  26#include <linux/uio.h>
  27#include <linux/refcount.h>
  28#include <linux/delay.h>
  29#include <linux/device.h>
  30#include <linux/moduleparam.h>
  31
  32#include <linux/usb/gadgetfs.h>
  33#include <linux/usb/gadget.h>
  34
  35
  36/*
  37 * The gadgetfs API maps each endpoint to a file descriptor so that you
  38 * can use standard synchronous read/write calls for I/O.  There's some
  39 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
  40 * drivers show how this works in practice.  You can also use AIO to
  41 * eliminate I/O gaps between requests, to help when streaming data.
  42 *
  43 * Key parts that must be USB-specific are protocols defining how the
  44 * read/write operations relate to the hardware state machines.  There
  45 * are two types of files.  One type is for the device, implementing ep0.
  46 * The other type is for each IN or OUT endpoint.  In both cases, the
  47 * user mode driver must configure the hardware before using it.
  48 *
  49 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
  50 *   (by writing configuration and device descriptors).  Afterwards it
  51 *   may serve as a source of device events, used to handle all control
  52 *   requests other than basic enumeration.
  53 *
  54 * - Then, after a SET_CONFIGURATION control request, ep_config() is
  55 *   called when each /dev/gadget/ep* file is configured (by writing
  56 *   endpoint descriptors).  Afterwards these files are used to write()
  57 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
  58 *   direction" request is issued (like reading an IN endpoint).
  59 *
  60 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
  61 * not possible on all hardware.  For example, precise fault handling with
  62 * respect to data left in endpoint fifos after aborted operations; or
  63 * selective clearing of endpoint halts, to implement SET_INTERFACE.
  64 */
  65
  66#define	DRIVER_DESC	"USB Gadget filesystem"
  67#define	DRIVER_VERSION	"24 Aug 2004"
  68
  69static const char driver_desc [] = DRIVER_DESC;
  70static const char shortname [] = "gadgetfs";
  71
  72MODULE_DESCRIPTION (DRIVER_DESC);
  73MODULE_AUTHOR ("David Brownell");
  74MODULE_LICENSE ("GPL");
  75
  76static int ep_open(struct inode *, struct file *);
  77
  78
  79/*----------------------------------------------------------------------*/
  80
  81#define GADGETFS_MAGIC		0xaee71ee7
  82
  83/* /dev/gadget/$CHIP represents ep0 and the whole device */
  84enum ep0_state {
  85	/* DISABLED is the initial state. */
  86	STATE_DEV_DISABLED = 0,
  87
  88	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
  89	 * ep0/device i/o modes and binding to the controller.  Driver
  90	 * must always write descriptors to initialize the device, then
  91	 * the device becomes UNCONNECTED until enumeration.
  92	 */
  93	STATE_DEV_OPENED,
  94
  95	/* From then on, ep0 fd is in either of two basic modes:
  96	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
  97	 * - SETUP: read/write will transfer control data and succeed;
  98	 *   or if "wrong direction", performs protocol stall
  99	 */
 100	STATE_DEV_UNCONNECTED,
 101	STATE_DEV_CONNECTED,
 102	STATE_DEV_SETUP,
 103
 104	/* UNBOUND means the driver closed ep0, so the device won't be
 105	 * accessible again (DEV_DISABLED) until all fds are closed.
 106	 */
 107	STATE_DEV_UNBOUND,
 108};
 109
 110/* enough for the whole queue: most events invalidate others */
 111#define	N_EVENT			5
 112
 113#define RBUF_SIZE		256
 114
 115struct dev_data {
 116	spinlock_t			lock;
 117	refcount_t			count;
 118	int				udc_usage;
 119	enum ep0_state			state;		/* P: lock */
 120	struct usb_gadgetfs_event	event [N_EVENT];
 121	unsigned			ev_next;
 122	struct fasync_struct		*fasync;
 123	u8				current_config;
 124
 125	/* drivers reading ep0 MUST handle control requests (SETUP)
 126	 * reported that way; else the host will time out.
 127	 */
 128	unsigned			usermode_setup : 1,
 129					setup_in : 1,
 130					setup_can_stall : 1,
 131					setup_out_ready : 1,
 132					setup_out_error : 1,
 133					setup_abort : 1,
 134					gadget_registered : 1;
 135	unsigned			setup_wLength;
 136
 137	/* the rest is basically write-once */
 138	struct usb_config_descriptor	*config, *hs_config;
 139	struct usb_device_descriptor	*dev;
 140	struct usb_request		*req;
 141	struct usb_gadget		*gadget;
 142	struct list_head		epfiles;
 143	void				*buf;
 144	wait_queue_head_t		wait;
 145	struct super_block		*sb;
 146	struct dentry			*dentry;
 147
 148	/* except this scratch i/o buffer for ep0 */
 149	u8				rbuf[RBUF_SIZE];
 150};
 151
 152static inline void get_dev (struct dev_data *data)
 153{
 154	refcount_inc (&data->count);
 155}
 156
 157static void put_dev (struct dev_data *data)
 158{
 159	if (likely (!refcount_dec_and_test (&data->count)))
 160		return;
 161	/* needs no more cleanup */
 162	BUG_ON (waitqueue_active (&data->wait));
 163	kfree (data);
 164}
 165
 166static struct dev_data *dev_new (void)
 167{
 168	struct dev_data		*dev;
 169
 170	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 171	if (!dev)
 172		return NULL;
 173	dev->state = STATE_DEV_DISABLED;
 174	refcount_set (&dev->count, 1);
 175	spin_lock_init (&dev->lock);
 176	INIT_LIST_HEAD (&dev->epfiles);
 177	init_waitqueue_head (&dev->wait);
 178	return dev;
 179}
 180
 181/*----------------------------------------------------------------------*/
 182
 183/* other /dev/gadget/$ENDPOINT files represent endpoints */
 184enum ep_state {
 185	STATE_EP_DISABLED = 0,
 186	STATE_EP_READY,
 187	STATE_EP_ENABLED,
 188	STATE_EP_UNBOUND,
 189};
 190
 191struct ep_data {
 192	struct mutex			lock;
 193	enum ep_state			state;
 194	refcount_t			count;
 195	struct dev_data			*dev;
 196	/* must hold dev->lock before accessing ep or req */
 197	struct usb_ep			*ep;
 198	struct usb_request		*req;
 199	ssize_t				status;
 200	char				name [16];
 201	struct usb_endpoint_descriptor	desc, hs_desc;
 202	struct list_head		epfiles;
 203	wait_queue_head_t		wait;
 204	struct dentry			*dentry;
 205};
 206
 207static inline void get_ep (struct ep_data *data)
 208{
 209	refcount_inc (&data->count);
 210}
 211
 212static void put_ep (struct ep_data *data)
 213{
 214	if (likely (!refcount_dec_and_test (&data->count)))
 215		return;
 216	put_dev (data->dev);
 217	/* needs no more cleanup */
 218	BUG_ON (!list_empty (&data->epfiles));
 219	BUG_ON (waitqueue_active (&data->wait));
 220	kfree (data);
 221}
 222
 223/*----------------------------------------------------------------------*/
 224
 225/* most "how to use the hardware" policy choices are in userspace:
 226 * mapping endpoint roles (which the driver needs) to the capabilities
 227 * which the usb controller has.  most of those capabilities are exposed
 228 * implicitly, starting with the driver name and then endpoint names.
 229 */
 230
 231static const char *CHIP;
 232static DEFINE_MUTEX(sb_mutex);		/* Serialize superblock operations */
 233
 234/*----------------------------------------------------------------------*/
 235
 236/* NOTE:  don't use dev_printk calls before binding to the gadget
 237 * at the end of ep0 configuration, or after unbind.
 238 */
 239
 240/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
 241#define xprintk(d,level,fmt,args...) \
 242	printk(level "%s: " fmt , shortname , ## args)
 243
 244#ifdef DEBUG
 245#define DBG(dev,fmt,args...) \
 246	xprintk(dev , KERN_DEBUG , fmt , ## args)
 247#else
 248#define DBG(dev,fmt,args...) \
 249	do { } while (0)
 250#endif /* DEBUG */
 251
 252#ifdef VERBOSE_DEBUG
 253#define VDEBUG	DBG
 254#else
 255#define VDEBUG(dev,fmt,args...) \
 256	do { } while (0)
 257#endif /* DEBUG */
 258
 259#define ERROR(dev,fmt,args...) \
 260	xprintk(dev , KERN_ERR , fmt , ## args)
 261#define INFO(dev,fmt,args...) \
 262	xprintk(dev , KERN_INFO , fmt , ## args)
 263
 264
 265/*----------------------------------------------------------------------*/
 266
 267/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
 268 *
 269 * After opening, configure non-control endpoints.  Then use normal
 270 * stream read() and write() requests; and maybe ioctl() to get more
 271 * precise FIFO status when recovering from cancellation.
 272 */
 273
 274static void epio_complete (struct usb_ep *ep, struct usb_request *req)
 275{
 276	struct ep_data	*epdata = ep->driver_data;
 277
 278	if (!req->context)
 279		return;
 280	if (req->status)
 281		epdata->status = req->status;
 282	else
 283		epdata->status = req->actual;
 284	complete ((struct completion *)req->context);
 285}
 286
 287/* tasklock endpoint, returning when it's connected.
 288 * still need dev->lock to use epdata->ep.
 289 */
 290static int
 291get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 292{
 293	int	val;
 294
 295	if (f_flags & O_NONBLOCK) {
 296		if (!mutex_trylock(&epdata->lock))
 297			goto nonblock;
 298		if (epdata->state != STATE_EP_ENABLED &&
 299		    (!is_write || epdata->state != STATE_EP_READY)) {
 300			mutex_unlock(&epdata->lock);
 301nonblock:
 302			val = -EAGAIN;
 303		} else
 304			val = 0;
 305		return val;
 306	}
 307
 308	val = mutex_lock_interruptible(&epdata->lock);
 309	if (val < 0)
 310		return val;
 311
 312	switch (epdata->state) {
 313	case STATE_EP_ENABLED:
 314		return 0;
 315	case STATE_EP_READY:			/* not configured yet */
 316		if (is_write)
 317			return 0;
 318		fallthrough;
 319	case STATE_EP_UNBOUND:			/* clean disconnect */
 320		break;
 321	// case STATE_EP_DISABLED:		/* "can't happen" */
 322	default:				/* error! */
 323		pr_debug ("%s: ep %p not available, state %d\n",
 324				shortname, epdata, epdata->state);
 325	}
 326	mutex_unlock(&epdata->lock);
 327	return -ENODEV;
 328}
 329
 330static ssize_t
 331ep_io (struct ep_data *epdata, void *buf, unsigned len)
 332{
 333	DECLARE_COMPLETION_ONSTACK (done);
 334	int value;
 335
 336	spin_lock_irq (&epdata->dev->lock);
 337	if (likely (epdata->ep != NULL)) {
 338		struct usb_request	*req = epdata->req;
 339
 340		req->context = &done;
 341		req->complete = epio_complete;
 342		req->buf = buf;
 343		req->length = len;
 344		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
 345	} else
 346		value = -ENODEV;
 347	spin_unlock_irq (&epdata->dev->lock);
 348
 349	if (likely (value == 0)) {
 350		value = wait_for_completion_interruptible(&done);
 351		if (value != 0) {
 352			spin_lock_irq (&epdata->dev->lock);
 353			if (likely (epdata->ep != NULL)) {
 354				DBG (epdata->dev, "%s i/o interrupted\n",
 355						epdata->name);
 356				usb_ep_dequeue (epdata->ep, epdata->req);
 357				spin_unlock_irq (&epdata->dev->lock);
 358
 359				wait_for_completion(&done);
 360				if (epdata->status == -ECONNRESET)
 361					epdata->status = -EINTR;
 362			} else {
 363				spin_unlock_irq (&epdata->dev->lock);
 364
 365				DBG (epdata->dev, "endpoint gone\n");
 366				wait_for_completion(&done);
 367				epdata->status = -ENODEV;
 368			}
 369		}
 370		return epdata->status;
 371	}
 372	return value;
 373}
 374
 375static int
 376ep_release (struct inode *inode, struct file *fd)
 377{
 378	struct ep_data		*data = fd->private_data;
 379	int value;
 380
 381	value = mutex_lock_interruptible(&data->lock);
 382	if (value < 0)
 383		return value;
 384
 385	/* clean up if this can be reopened */
 386	if (data->state != STATE_EP_UNBOUND) {
 387		data->state = STATE_EP_DISABLED;
 388		data->desc.bDescriptorType = 0;
 389		data->hs_desc.bDescriptorType = 0;
 390		usb_ep_disable(data->ep);
 391	}
 392	mutex_unlock(&data->lock);
 393	put_ep (data);
 394	return 0;
 395}
 396
 397static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
 398{
 399	struct ep_data		*data = fd->private_data;
 400	int			status;
 401
 402	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
 403		return status;
 404
 405	spin_lock_irq (&data->dev->lock);
 406	if (likely (data->ep != NULL)) {
 407		switch (code) {
 408		case GADGETFS_FIFO_STATUS:
 409			status = usb_ep_fifo_status (data->ep);
 410			break;
 411		case GADGETFS_FIFO_FLUSH:
 412			usb_ep_fifo_flush (data->ep);
 413			break;
 414		case GADGETFS_CLEAR_HALT:
 415			status = usb_ep_clear_halt (data->ep);
 416			break;
 417		default:
 418			status = -ENOTTY;
 419		}
 420	} else
 421		status = -ENODEV;
 422	spin_unlock_irq (&data->dev->lock);
 423	mutex_unlock(&data->lock);
 424	return status;
 425}
 426
 427/*----------------------------------------------------------------------*/
 428
 429/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
 430
 431struct kiocb_priv {
 432	struct usb_request	*req;
 433	struct ep_data		*epdata;
 434	struct kiocb		*iocb;
 435	struct mm_struct	*mm;
 436	struct work_struct	work;
 437	void			*buf;
 438	struct iov_iter		to;
 439	const void		*to_free;
 440	unsigned		actual;
 441};
 442
 443static int ep_aio_cancel(struct kiocb *iocb)
 444{
 445	struct kiocb_priv	*priv = iocb->private;
 446	struct ep_data		*epdata;
 447	int			value;
 448
 449	local_irq_disable();
 450	epdata = priv->epdata;
 451	// spin_lock(&epdata->dev->lock);
 452	if (likely(epdata && epdata->ep && priv->req))
 453		value = usb_ep_dequeue (epdata->ep, priv->req);
 454	else
 455		value = -EINVAL;
 456	// spin_unlock(&epdata->dev->lock);
 457	local_irq_enable();
 458
 459	return value;
 460}
 461
 462static void ep_user_copy_worker(struct work_struct *work)
 463{
 464	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
 465	struct mm_struct *mm = priv->mm;
 466	struct kiocb *iocb = priv->iocb;
 467	size_t ret;
 468
 469	kthread_use_mm(mm);
 470	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
 471	kthread_unuse_mm(mm);
 472	if (!ret)
 473		ret = -EFAULT;
 474
 475	/* completing the iocb can drop the ctx and mm, don't touch mm after */
 476	iocb->ki_complete(iocb, ret);
 477
 478	kfree(priv->buf);
 479	kfree(priv->to_free);
 480	kfree(priv);
 481}
 482
 483static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
 484{
 485	struct kiocb		*iocb = req->context;
 486	struct kiocb_priv	*priv = iocb->private;
 487	struct ep_data		*epdata = priv->epdata;
 488
 489	/* lock against disconnect (and ideally, cancel) */
 490	spin_lock(&epdata->dev->lock);
 491	priv->req = NULL;
 492	priv->epdata = NULL;
 493
 494	/* if this was a write or a read returning no data then we
 495	 * don't need to copy anything to userspace, so we can
 496	 * complete the aio request immediately.
 497	 */
 498	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
 499		kfree(req->buf);
 500		kfree(priv->to_free);
 501		kfree(priv);
 502		iocb->private = NULL;
 503		iocb->ki_complete(iocb,
 504				req->actual ? req->actual : (long)req->status);
 
 
 505	} else {
 506		/* ep_copy_to_user() won't report both; we hide some faults */
 507		if (unlikely(0 != req->status))
 508			DBG(epdata->dev, "%s fault %d len %d\n",
 509				ep->name, req->status, req->actual);
 510
 511		priv->buf = req->buf;
 512		priv->actual = req->actual;
 513		INIT_WORK(&priv->work, ep_user_copy_worker);
 514		schedule_work(&priv->work);
 515	}
 516
 517	usb_ep_free_request(ep, req);
 518	spin_unlock(&epdata->dev->lock);
 519	put_ep(epdata);
 520}
 521
 522static ssize_t ep_aio(struct kiocb *iocb,
 523		      struct kiocb_priv *priv,
 524		      struct ep_data *epdata,
 525		      char *buf,
 526		      size_t len)
 527{
 528	struct usb_request *req;
 529	ssize_t value;
 530
 531	iocb->private = priv;
 532	priv->iocb = iocb;
 533
 534	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 535	get_ep(epdata);
 536	priv->epdata = epdata;
 537	priv->actual = 0;
 538	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
 539
 540	/* each kiocb is coupled to one usb_request, but we can't
 541	 * allocate or submit those if the host disconnected.
 542	 */
 543	spin_lock_irq(&epdata->dev->lock);
 544	value = -ENODEV;
 545	if (unlikely(epdata->ep == NULL))
 546		goto fail;
 547
 548	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
 549	value = -ENOMEM;
 550	if (unlikely(!req))
 551		goto fail;
 552
 553	priv->req = req;
 554	req->buf = buf;
 555	req->length = len;
 556	req->complete = ep_aio_complete;
 557	req->context = iocb;
 558	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
 559	if (unlikely(0 != value)) {
 560		usb_ep_free_request(epdata->ep, req);
 561		goto fail;
 562	}
 563	spin_unlock_irq(&epdata->dev->lock);
 564	return -EIOCBQUEUED;
 565
 566fail:
 567	spin_unlock_irq(&epdata->dev->lock);
 568	kfree(priv->to_free);
 569	kfree(priv);
 570	put_ep(epdata);
 571	return value;
 572}
 573
 574static ssize_t
 575ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 576{
 577	struct file *file = iocb->ki_filp;
 578	struct ep_data *epdata = file->private_data;
 579	size_t len = iov_iter_count(to);
 580	ssize_t value;
 581	char *buf;
 582
 583	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
 584		return value;
 585
 586	/* halt any endpoint by doing a "wrong direction" i/o call */
 587	if (usb_endpoint_dir_in(&epdata->desc)) {
 588		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 589		    !is_sync_kiocb(iocb)) {
 590			mutex_unlock(&epdata->lock);
 591			return -EINVAL;
 592		}
 593		DBG (epdata->dev, "%s halt\n", epdata->name);
 594		spin_lock_irq(&epdata->dev->lock);
 595		if (likely(epdata->ep != NULL))
 596			usb_ep_set_halt(epdata->ep);
 597		spin_unlock_irq(&epdata->dev->lock);
 598		mutex_unlock(&epdata->lock);
 599		return -EBADMSG;
 600	}
 601
 602	buf = kmalloc(len, GFP_KERNEL);
 603	if (unlikely(!buf)) {
 604		mutex_unlock(&epdata->lock);
 605		return -ENOMEM;
 606	}
 607	if (is_sync_kiocb(iocb)) {
 608		value = ep_io(epdata, buf, len);
 609		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
 610			value = -EFAULT;
 611	} else {
 612		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 613		value = -ENOMEM;
 614		if (!priv)
 615			goto fail;
 616		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
 617		if (!priv->to_free) {
 618			kfree(priv);
 619			goto fail;
 620		}
 621		value = ep_aio(iocb, priv, epdata, buf, len);
 622		if (value == -EIOCBQUEUED)
 623			buf = NULL;
 624	}
 625fail:
 626	kfree(buf);
 627	mutex_unlock(&epdata->lock);
 628	return value;
 629}
 630
 631static ssize_t ep_config(struct ep_data *, const char *, size_t);
 632
 633static ssize_t
 634ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 635{
 636	struct file *file = iocb->ki_filp;
 637	struct ep_data *epdata = file->private_data;
 638	size_t len = iov_iter_count(from);
 639	bool configured;
 640	ssize_t value;
 641	char *buf;
 642
 643	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
 644		return value;
 645
 646	configured = epdata->state == STATE_EP_ENABLED;
 647
 648	/* halt any endpoint by doing a "wrong direction" i/o call */
 649	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
 650		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 651		    !is_sync_kiocb(iocb)) {
 652			mutex_unlock(&epdata->lock);
 653			return -EINVAL;
 654		}
 655		DBG (epdata->dev, "%s halt\n", epdata->name);
 656		spin_lock_irq(&epdata->dev->lock);
 657		if (likely(epdata->ep != NULL))
 658			usb_ep_set_halt(epdata->ep);
 659		spin_unlock_irq(&epdata->dev->lock);
 660		mutex_unlock(&epdata->lock);
 661		return -EBADMSG;
 662	}
 663
 664	buf = kmalloc(len, GFP_KERNEL);
 665	if (unlikely(!buf)) {
 666		mutex_unlock(&epdata->lock);
 667		return -ENOMEM;
 668	}
 669
 670	if (unlikely(!copy_from_iter_full(buf, len, from))) {
 671		value = -EFAULT;
 672		goto out;
 673	}
 674
 675	if (unlikely(!configured)) {
 676		value = ep_config(epdata, buf, len);
 677	} else if (is_sync_kiocb(iocb)) {
 678		value = ep_io(epdata, buf, len);
 679	} else {
 680		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 681		value = -ENOMEM;
 682		if (priv) {
 683			value = ep_aio(iocb, priv, epdata, buf, len);
 684			if (value == -EIOCBQUEUED)
 685				buf = NULL;
 686		}
 687	}
 688out:
 689	kfree(buf);
 690	mutex_unlock(&epdata->lock);
 691	return value;
 692}
 693
 694/*----------------------------------------------------------------------*/
 695
 696/* used after endpoint configuration */
 697static const struct file_operations ep_io_operations = {
 698	.owner =	THIS_MODULE,
 699
 700	.open =		ep_open,
 701	.release =	ep_release,
 702	.llseek =	no_llseek,
 703	.unlocked_ioctl = ep_ioctl,
 704	.read_iter =	ep_read_iter,
 705	.write_iter =	ep_write_iter,
 706};
 707
 708/* ENDPOINT INITIALIZATION
 709 *
 710 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
 711 *     status = write (fd, descriptors, sizeof descriptors)
 712 *
 713 * That write establishes the endpoint configuration, configuring
 714 * the controller to process bulk, interrupt, or isochronous transfers
 715 * at the right maxpacket size, and so on.
 716 *
 717 * The descriptors are message type 1, identified by a host order u32
 718 * at the beginning of what's written.  Descriptor order is: full/low
 719 * speed descriptor, then optional high speed descriptor.
 720 */
 721static ssize_t
 722ep_config (struct ep_data *data, const char *buf, size_t len)
 723{
 724	struct usb_ep		*ep;
 725	u32			tag;
 726	int			value, length = len;
 727
 728	if (data->state != STATE_EP_READY) {
 729		value = -EL2HLT;
 730		goto fail;
 731	}
 732
 733	value = len;
 734	if (len < USB_DT_ENDPOINT_SIZE + 4)
 735		goto fail0;
 736
 737	/* we might need to change message format someday */
 738	memcpy(&tag, buf, 4);
 739	if (tag != 1) {
 740		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
 741		goto fail0;
 742	}
 743	buf += 4;
 744	len -= 4;
 745
 746	/* NOTE:  audio endpoint extensions not accepted here;
 747	 * just don't include the extra bytes.
 748	 */
 749
 750	/* full/low speed descriptor, then high speed */
 751	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
 752	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
 753			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
 754		goto fail0;
 755	if (len != USB_DT_ENDPOINT_SIZE) {
 756		if (len != 2 * USB_DT_ENDPOINT_SIZE)
 757			goto fail0;
 758		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
 759			USB_DT_ENDPOINT_SIZE);
 760		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
 761				|| data->hs_desc.bDescriptorType
 762					!= USB_DT_ENDPOINT) {
 763			DBG(data->dev, "config %s, bad hs length or type\n",
 764					data->name);
 765			goto fail0;
 766		}
 767	}
 768
 769	spin_lock_irq (&data->dev->lock);
 770	if (data->dev->state == STATE_DEV_UNBOUND) {
 771		value = -ENOENT;
 772		goto gone;
 773	} else {
 774		ep = data->ep;
 775		if (ep == NULL) {
 776			value = -ENODEV;
 777			goto gone;
 778		}
 779	}
 780	switch (data->dev->gadget->speed) {
 781	case USB_SPEED_LOW:
 782	case USB_SPEED_FULL:
 783		ep->desc = &data->desc;
 784		break;
 785	case USB_SPEED_HIGH:
 786		/* fails if caller didn't provide that descriptor... */
 787		ep->desc = &data->hs_desc;
 788		break;
 789	default:
 790		DBG(data->dev, "unconnected, %s init abandoned\n",
 791				data->name);
 792		value = -EINVAL;
 793		goto gone;
 794	}
 795	value = usb_ep_enable(ep);
 796	if (value == 0) {
 797		data->state = STATE_EP_ENABLED;
 798		value = length;
 799	}
 800gone:
 801	spin_unlock_irq (&data->dev->lock);
 802	if (value < 0) {
 803fail:
 804		data->desc.bDescriptorType = 0;
 805		data->hs_desc.bDescriptorType = 0;
 806	}
 807	return value;
 808fail0:
 809	value = -EINVAL;
 810	goto fail;
 811}
 812
 813static int
 814ep_open (struct inode *inode, struct file *fd)
 815{
 816	struct ep_data		*data = inode->i_private;
 817	int			value = -EBUSY;
 818
 819	if (mutex_lock_interruptible(&data->lock) != 0)
 820		return -EINTR;
 821	spin_lock_irq (&data->dev->lock);
 822	if (data->dev->state == STATE_DEV_UNBOUND)
 823		value = -ENOENT;
 824	else if (data->state == STATE_EP_DISABLED) {
 825		value = 0;
 826		data->state = STATE_EP_READY;
 827		get_ep (data);
 828		fd->private_data = data;
 829		VDEBUG (data->dev, "%s ready\n", data->name);
 830	} else
 831		DBG (data->dev, "%s state %d\n",
 832			data->name, data->state);
 833	spin_unlock_irq (&data->dev->lock);
 834	mutex_unlock(&data->lock);
 835	return value;
 836}
 837
 838/*----------------------------------------------------------------------*/
 839
 840/* EP0 IMPLEMENTATION can be partly in userspace.
 841 *
 842 * Drivers that use this facility receive various events, including
 843 * control requests the kernel doesn't handle.  Drivers that don't
 844 * use this facility may be too simple-minded for real applications.
 845 */
 846
 847static inline void ep0_readable (struct dev_data *dev)
 848{
 849	wake_up (&dev->wait);
 850	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
 851}
 852
 853static void clean_req (struct usb_ep *ep, struct usb_request *req)
 854{
 855	struct dev_data		*dev = ep->driver_data;
 856
 857	if (req->buf != dev->rbuf) {
 858		kfree(req->buf);
 859		req->buf = dev->rbuf;
 860	}
 861	req->complete = epio_complete;
 862	dev->setup_out_ready = 0;
 863}
 864
 865static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
 866{
 867	struct dev_data		*dev = ep->driver_data;
 868	unsigned long		flags;
 869	int			free = 1;
 870
 871	/* for control OUT, data must still get to userspace */
 872	spin_lock_irqsave(&dev->lock, flags);
 873	if (!dev->setup_in) {
 874		dev->setup_out_error = (req->status != 0);
 875		if (!dev->setup_out_error)
 876			free = 0;
 877		dev->setup_out_ready = 1;
 878		ep0_readable (dev);
 879	}
 880
 881	/* clean up as appropriate */
 882	if (free && req->buf != &dev->rbuf)
 883		clean_req (ep, req);
 884	req->complete = epio_complete;
 885	spin_unlock_irqrestore(&dev->lock, flags);
 886}
 887
 888static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
 889{
 890	struct dev_data	*dev = ep->driver_data;
 891
 892	if (dev->setup_out_ready) {
 893		DBG (dev, "ep0 request busy!\n");
 894		return -EBUSY;
 895	}
 896	if (len > sizeof (dev->rbuf))
 897		req->buf = kmalloc(len, GFP_ATOMIC);
 898	if (req->buf == NULL) {
 899		req->buf = dev->rbuf;
 900		return -ENOMEM;
 901	}
 902	req->complete = ep0_complete;
 903	req->length = len;
 904	req->zero = 0;
 905	return 0;
 906}
 907
 908static ssize_t
 909ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 910{
 911	struct dev_data			*dev = fd->private_data;
 912	ssize_t				retval;
 913	enum ep0_state			state;
 914
 915	spin_lock_irq (&dev->lock);
 916	if (dev->state <= STATE_DEV_OPENED) {
 917		retval = -EINVAL;
 918		goto done;
 919	}
 920
 921	/* report fd mode change before acting on it */
 922	if (dev->setup_abort) {
 923		dev->setup_abort = 0;
 924		retval = -EIDRM;
 925		goto done;
 926	}
 927
 928	/* control DATA stage */
 929	if ((state = dev->state) == STATE_DEV_SETUP) {
 930
 931		if (dev->setup_in) {		/* stall IN */
 932			VDEBUG(dev, "ep0in stall\n");
 933			(void) usb_ep_set_halt (dev->gadget->ep0);
 934			retval = -EL2HLT;
 935			dev->state = STATE_DEV_CONNECTED;
 936
 937		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
 938			struct usb_ep		*ep = dev->gadget->ep0;
 939			struct usb_request	*req = dev->req;
 940
 941			if ((retval = setup_req (ep, req, 0)) == 0) {
 942				++dev->udc_usage;
 943				spin_unlock_irq (&dev->lock);
 944				retval = usb_ep_queue (ep, req, GFP_KERNEL);
 945				spin_lock_irq (&dev->lock);
 946				--dev->udc_usage;
 947			}
 948			dev->state = STATE_DEV_CONNECTED;
 949
 950			/* assume that was SET_CONFIGURATION */
 951			if (dev->current_config) {
 952				unsigned power;
 953
 954				if (gadget_is_dualspeed(dev->gadget)
 955						&& (dev->gadget->speed
 956							== USB_SPEED_HIGH))
 957					power = dev->hs_config->bMaxPower;
 958				else
 959					power = dev->config->bMaxPower;
 960				usb_gadget_vbus_draw(dev->gadget, 2 * power);
 961			}
 962
 963		} else {			/* collect OUT data */
 964			if ((fd->f_flags & O_NONBLOCK) != 0
 965					&& !dev->setup_out_ready) {
 966				retval = -EAGAIN;
 967				goto done;
 968			}
 969			spin_unlock_irq (&dev->lock);
 970			retval = wait_event_interruptible (dev->wait,
 971					dev->setup_out_ready != 0);
 972
 973			/* FIXME state could change from under us */
 974			spin_lock_irq (&dev->lock);
 975			if (retval)
 976				goto done;
 977
 978			if (dev->state != STATE_DEV_SETUP) {
 979				retval = -ECANCELED;
 980				goto done;
 981			}
 982			dev->state = STATE_DEV_CONNECTED;
 983
 984			if (dev->setup_out_error)
 985				retval = -EIO;
 986			else {
 987				len = min (len, (size_t)dev->req->actual);
 988				++dev->udc_usage;
 989				spin_unlock_irq(&dev->lock);
 990				if (copy_to_user (buf, dev->req->buf, len))
 991					retval = -EFAULT;
 992				else
 993					retval = len;
 994				spin_lock_irq(&dev->lock);
 995				--dev->udc_usage;
 996				clean_req (dev->gadget->ep0, dev->req);
 997				/* NOTE userspace can't yet choose to stall */
 998			}
 999		}
1000		goto done;
1001	}
1002
1003	/* else normal: return event data */
1004	if (len < sizeof dev->event [0]) {
1005		retval = -EINVAL;
1006		goto done;
1007	}
1008	len -= len % sizeof (struct usb_gadgetfs_event);
1009	dev->usermode_setup = 1;
1010
1011scan:
1012	/* return queued events right away */
1013	if (dev->ev_next != 0) {
1014		unsigned		i, n;
1015
1016		n = len / sizeof (struct usb_gadgetfs_event);
1017		if (dev->ev_next < n)
1018			n = dev->ev_next;
1019
1020		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1021		for (i = 0; i < n; i++) {
1022			if (dev->event [i].type == GADGETFS_SETUP) {
1023				dev->state = STATE_DEV_SETUP;
1024				n = i + 1;
1025				break;
1026			}
1027		}
1028		spin_unlock_irq (&dev->lock);
1029		len = n * sizeof (struct usb_gadgetfs_event);
1030		if (copy_to_user (buf, &dev->event, len))
1031			retval = -EFAULT;
1032		else
1033			retval = len;
1034		if (len > 0) {
1035			/* NOTE this doesn't guard against broken drivers;
1036			 * concurrent ep0 readers may lose events.
1037			 */
1038			spin_lock_irq (&dev->lock);
1039			if (dev->ev_next > n) {
1040				memmove(&dev->event[0], &dev->event[n],
1041					sizeof (struct usb_gadgetfs_event)
1042						* (dev->ev_next - n));
1043			}
1044			dev->ev_next -= n;
1045			spin_unlock_irq (&dev->lock);
1046		}
1047		return retval;
1048	}
1049	if (fd->f_flags & O_NONBLOCK) {
1050		retval = -EAGAIN;
1051		goto done;
1052	}
1053
1054	switch (state) {
1055	default:
1056		DBG (dev, "fail %s, state %d\n", __func__, state);
1057		retval = -ESRCH;
1058		break;
1059	case STATE_DEV_UNCONNECTED:
1060	case STATE_DEV_CONNECTED:
1061		spin_unlock_irq (&dev->lock);
1062		DBG (dev, "%s wait\n", __func__);
1063
1064		/* wait for events */
1065		retval = wait_event_interruptible (dev->wait,
1066				dev->ev_next != 0);
1067		if (retval < 0)
1068			return retval;
1069		spin_lock_irq (&dev->lock);
1070		goto scan;
1071	}
1072
1073done:
1074	spin_unlock_irq (&dev->lock);
1075	return retval;
1076}
1077
1078static struct usb_gadgetfs_event *
1079next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1080{
1081	struct usb_gadgetfs_event	*event;
1082	unsigned			i;
1083
1084	switch (type) {
1085	/* these events purge the queue */
1086	case GADGETFS_DISCONNECT:
1087		if (dev->state == STATE_DEV_SETUP)
1088			dev->setup_abort = 1;
1089		fallthrough;
1090	case GADGETFS_CONNECT:
1091		dev->ev_next = 0;
1092		break;
1093	case GADGETFS_SETUP:		/* previous request timed out */
1094	case GADGETFS_SUSPEND:		/* same effect */
1095		/* these events can't be repeated */
1096		for (i = 0; i != dev->ev_next; i++) {
1097			if (dev->event [i].type != type)
1098				continue;
1099			DBG(dev, "discard old event[%d] %d\n", i, type);
1100			dev->ev_next--;
1101			if (i == dev->ev_next)
1102				break;
1103			/* indices start at zero, for simplicity */
1104			memmove (&dev->event [i], &dev->event [i + 1],
1105				sizeof (struct usb_gadgetfs_event)
1106					* (dev->ev_next - i));
1107		}
1108		break;
1109	default:
1110		BUG ();
1111	}
1112	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1113	event = &dev->event [dev->ev_next++];
1114	BUG_ON (dev->ev_next > N_EVENT);
1115	memset (event, 0, sizeof *event);
1116	event->type = type;
1117	return event;
1118}
1119
1120static ssize_t
1121ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1122{
1123	struct dev_data		*dev = fd->private_data;
1124	ssize_t			retval = -ESRCH;
1125
1126	/* report fd mode change before acting on it */
1127	if (dev->setup_abort) {
1128		dev->setup_abort = 0;
1129		retval = -EIDRM;
1130
1131	/* data and/or status stage for control request */
1132	} else if (dev->state == STATE_DEV_SETUP) {
1133
1134		len = min_t(size_t, len, dev->setup_wLength);
1135		if (dev->setup_in) {
1136			retval = setup_req (dev->gadget->ep0, dev->req, len);
1137			if (retval == 0) {
1138				dev->state = STATE_DEV_CONNECTED;
1139				++dev->udc_usage;
1140				spin_unlock_irq (&dev->lock);
1141				if (copy_from_user (dev->req->buf, buf, len))
1142					retval = -EFAULT;
1143				else {
1144					if (len < dev->setup_wLength)
1145						dev->req->zero = 1;
1146					retval = usb_ep_queue (
1147						dev->gadget->ep0, dev->req,
1148						GFP_KERNEL);
1149				}
1150				spin_lock_irq(&dev->lock);
1151				--dev->udc_usage;
1152				if (retval < 0) {
1153					clean_req (dev->gadget->ep0, dev->req);
1154				} else
1155					retval = len;
1156
1157				return retval;
1158			}
1159
1160		/* can stall some OUT transfers */
1161		} else if (dev->setup_can_stall) {
1162			VDEBUG(dev, "ep0out stall\n");
1163			(void) usb_ep_set_halt (dev->gadget->ep0);
1164			retval = -EL2HLT;
1165			dev->state = STATE_DEV_CONNECTED;
1166		} else {
1167			DBG(dev, "bogus ep0out stall!\n");
1168		}
1169	} else
1170		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1171
1172	return retval;
1173}
1174
1175static int
1176ep0_fasync (int f, struct file *fd, int on)
1177{
1178	struct dev_data		*dev = fd->private_data;
1179	// caller must F_SETOWN before signal delivery happens
1180	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1181	return fasync_helper (f, fd, on, &dev->fasync);
1182}
1183
1184static struct usb_gadget_driver gadgetfs_driver;
1185
1186static int
1187dev_release (struct inode *inode, struct file *fd)
1188{
1189	struct dev_data		*dev = fd->private_data;
1190
1191	/* closing ep0 === shutdown all */
1192
1193	if (dev->gadget_registered) {
1194		usb_gadget_unregister_driver (&gadgetfs_driver);
1195		dev->gadget_registered = false;
1196	}
1197
1198	/* at this point "good" hardware has disconnected the
1199	 * device from USB; the host won't see it any more.
1200	 * alternatively, all host requests will time out.
1201	 */
1202
1203	kfree (dev->buf);
1204	dev->buf = NULL;
1205
1206	/* other endpoints were all decoupled from this device */
1207	spin_lock_irq(&dev->lock);
1208	dev->state = STATE_DEV_DISABLED;
1209	spin_unlock_irq(&dev->lock);
1210
1211	put_dev (dev);
1212	return 0;
1213}
1214
1215static __poll_t
1216ep0_poll (struct file *fd, poll_table *wait)
1217{
1218	struct dev_data         *dev = fd->private_data;
1219	__poll_t                mask = 0;
1220
1221	if (dev->state <= STATE_DEV_OPENED)
1222		return DEFAULT_POLLMASK;
1223
1224	poll_wait(fd, &dev->wait, wait);
1225
1226	spin_lock_irq(&dev->lock);
1227
1228	/* report fd mode change before acting on it */
1229	if (dev->setup_abort) {
1230		dev->setup_abort = 0;
1231		mask = EPOLLHUP;
1232		goto out;
1233	}
1234
1235	if (dev->state == STATE_DEV_SETUP) {
1236		if (dev->setup_in || dev->setup_can_stall)
1237			mask = EPOLLOUT;
1238	} else {
1239		if (dev->ev_next != 0)
1240			mask = EPOLLIN;
1241	}
1242out:
1243	spin_unlock_irq(&dev->lock);
1244	return mask;
1245}
1246
1247static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1248{
1249	struct dev_data		*dev = fd->private_data;
1250	struct usb_gadget	*gadget = dev->gadget;
1251	long ret = -ENOTTY;
1252
1253	spin_lock_irq(&dev->lock);
1254	if (dev->state == STATE_DEV_OPENED ||
1255			dev->state == STATE_DEV_UNBOUND) {
1256		/* Not bound to a UDC */
1257	} else if (gadget->ops->ioctl) {
1258		++dev->udc_usage;
1259		spin_unlock_irq(&dev->lock);
1260
1261		ret = gadget->ops->ioctl (gadget, code, value);
1262
1263		spin_lock_irq(&dev->lock);
1264		--dev->udc_usage;
1265	}
1266	spin_unlock_irq(&dev->lock);
1267
1268	return ret;
1269}
1270
1271/*----------------------------------------------------------------------*/
1272
1273/* The in-kernel gadget driver handles most ep0 issues, in particular
1274 * enumerating the single configuration (as provided from user space).
1275 *
1276 * Unrecognized ep0 requests may be handled in user space.
1277 */
1278
1279static void make_qualifier (struct dev_data *dev)
1280{
1281	struct usb_qualifier_descriptor		qual;
1282	struct usb_device_descriptor		*desc;
1283
1284	qual.bLength = sizeof qual;
1285	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1286	qual.bcdUSB = cpu_to_le16 (0x0200);
1287
1288	desc = dev->dev;
1289	qual.bDeviceClass = desc->bDeviceClass;
1290	qual.bDeviceSubClass = desc->bDeviceSubClass;
1291	qual.bDeviceProtocol = desc->bDeviceProtocol;
1292
1293	/* assumes ep0 uses the same value for both speeds ... */
1294	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1295
1296	qual.bNumConfigurations = 1;
1297	qual.bRESERVED = 0;
1298
1299	memcpy (dev->rbuf, &qual, sizeof qual);
1300}
1301
1302static int
1303config_buf (struct dev_data *dev, u8 type, unsigned index)
1304{
1305	int		len;
1306	int		hs = 0;
1307
1308	/* only one configuration */
1309	if (index > 0)
1310		return -EINVAL;
1311
1312	if (gadget_is_dualspeed(dev->gadget)) {
1313		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1314		if (type == USB_DT_OTHER_SPEED_CONFIG)
1315			hs = !hs;
1316	}
1317	if (hs) {
1318		dev->req->buf = dev->hs_config;
1319		len = le16_to_cpu(dev->hs_config->wTotalLength);
1320	} else {
1321		dev->req->buf = dev->config;
1322		len = le16_to_cpu(dev->config->wTotalLength);
1323	}
1324	((u8 *)dev->req->buf) [1] = type;
1325	return len;
1326}
1327
1328static int
1329gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1330{
1331	struct dev_data			*dev = get_gadget_data (gadget);
1332	struct usb_request		*req = dev->req;
1333	int				value = -EOPNOTSUPP;
1334	struct usb_gadgetfs_event	*event;
1335	u16				w_value = le16_to_cpu(ctrl->wValue);
1336	u16				w_length = le16_to_cpu(ctrl->wLength);
1337
1338	if (w_length > RBUF_SIZE) {
1339		if (ctrl->bRequestType & USB_DIR_IN) {
1340			/* Cast away the const, we are going to overwrite on purpose. */
1341			__le16 *temp = (__le16 *)&ctrl->wLength;
1342
1343			*temp = cpu_to_le16(RBUF_SIZE);
1344			w_length = RBUF_SIZE;
1345		} else {
1346			return value;
1347		}
1348	}
1349
1350	spin_lock (&dev->lock);
1351	dev->setup_abort = 0;
1352	if (dev->state == STATE_DEV_UNCONNECTED) {
1353		if (gadget_is_dualspeed(gadget)
1354				&& gadget->speed == USB_SPEED_HIGH
1355				&& dev->hs_config == NULL) {
1356			spin_unlock(&dev->lock);
1357			ERROR (dev, "no high speed config??\n");
1358			return -EINVAL;
1359		}
1360
1361		dev->state = STATE_DEV_CONNECTED;
1362
1363		INFO (dev, "connected\n");
1364		event = next_event (dev, GADGETFS_CONNECT);
1365		event->u.speed = gadget->speed;
1366		ep0_readable (dev);
1367
1368	/* host may have given up waiting for response.  we can miss control
1369	 * requests handled lower down (device/endpoint status and features);
1370	 * then ep0_{read,write} will report the wrong status. controller
1371	 * driver will have aborted pending i/o.
1372	 */
1373	} else if (dev->state == STATE_DEV_SETUP)
1374		dev->setup_abort = 1;
1375
1376	req->buf = dev->rbuf;
1377	req->context = NULL;
1378	switch (ctrl->bRequest) {
1379
1380	case USB_REQ_GET_DESCRIPTOR:
1381		if (ctrl->bRequestType != USB_DIR_IN)
1382			goto unrecognized;
1383		switch (w_value >> 8) {
1384
1385		case USB_DT_DEVICE:
1386			value = min (w_length, (u16) sizeof *dev->dev);
1387			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1388			req->buf = dev->dev;
1389			break;
1390		case USB_DT_DEVICE_QUALIFIER:
1391			if (!dev->hs_config)
1392				break;
1393			value = min (w_length, (u16)
1394				sizeof (struct usb_qualifier_descriptor));
1395			make_qualifier (dev);
1396			break;
1397		case USB_DT_OTHER_SPEED_CONFIG:
1398		case USB_DT_CONFIG:
1399			value = config_buf (dev,
1400					w_value >> 8,
1401					w_value & 0xff);
1402			if (value >= 0)
1403				value = min (w_length, (u16) value);
1404			break;
1405		case USB_DT_STRING:
1406			goto unrecognized;
1407
1408		default:		// all others are errors
1409			break;
1410		}
1411		break;
1412
1413	/* currently one config, two speeds */
1414	case USB_REQ_SET_CONFIGURATION:
1415		if (ctrl->bRequestType != 0)
1416			goto unrecognized;
1417		if (0 == (u8) w_value) {
1418			value = 0;
1419			dev->current_config = 0;
1420			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1421			// user mode expected to disable endpoints
1422		} else {
1423			u8	config, power;
1424
1425			if (gadget_is_dualspeed(gadget)
1426					&& gadget->speed == USB_SPEED_HIGH) {
1427				config = dev->hs_config->bConfigurationValue;
1428				power = dev->hs_config->bMaxPower;
1429			} else {
1430				config = dev->config->bConfigurationValue;
1431				power = dev->config->bMaxPower;
1432			}
1433
1434			if (config == (u8) w_value) {
1435				value = 0;
1436				dev->current_config = config;
1437				usb_gadget_vbus_draw(gadget, 2 * power);
1438			}
1439		}
1440
1441		/* report SET_CONFIGURATION like any other control request,
1442		 * except that usermode may not stall this.  the next
1443		 * request mustn't be allowed start until this finishes:
1444		 * endpoints and threads set up, etc.
1445		 *
1446		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1447		 * has bad/racey automagic that prevents synchronizing here.
1448		 * even kernel mode drivers often miss them.
1449		 */
1450		if (value == 0) {
1451			INFO (dev, "configuration #%d\n", dev->current_config);
1452			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1453			if (dev->usermode_setup) {
1454				dev->setup_can_stall = 0;
1455				goto delegate;
1456			}
1457		}
1458		break;
1459
1460#ifndef	CONFIG_USB_PXA25X
1461	/* PXA automagically handles this request too */
1462	case USB_REQ_GET_CONFIGURATION:
1463		if (ctrl->bRequestType != 0x80)
1464			goto unrecognized;
1465		*(u8 *)req->buf = dev->current_config;
1466		value = min (w_length, (u16) 1);
1467		break;
1468#endif
1469
1470	default:
1471unrecognized:
1472		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1473			dev->usermode_setup ? "delegate" : "fail",
1474			ctrl->bRequestType, ctrl->bRequest,
1475			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1476
1477		/* if there's an ep0 reader, don't stall */
1478		if (dev->usermode_setup) {
1479			dev->setup_can_stall = 1;
1480delegate:
1481			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1482						? 1 : 0;
1483			dev->setup_wLength = w_length;
1484			dev->setup_out_ready = 0;
1485			dev->setup_out_error = 0;
1486
1487			/* read DATA stage for OUT right away */
1488			if (unlikely (!dev->setup_in && w_length)) {
1489				value = setup_req (gadget->ep0, dev->req,
1490							w_length);
1491				if (value < 0)
1492					break;
1493
1494				++dev->udc_usage;
1495				spin_unlock (&dev->lock);
1496				value = usb_ep_queue (gadget->ep0, dev->req,
1497							GFP_KERNEL);
1498				spin_lock (&dev->lock);
1499				--dev->udc_usage;
1500				if (value < 0) {
1501					clean_req (gadget->ep0, dev->req);
1502					break;
1503				}
1504
1505				/* we can't currently stall these */
1506				dev->setup_can_stall = 0;
1507			}
1508
1509			/* state changes when reader collects event */
1510			event = next_event (dev, GADGETFS_SETUP);
1511			event->u.setup = *ctrl;
1512			ep0_readable (dev);
1513			spin_unlock (&dev->lock);
1514			return 0;
1515		}
1516	}
1517
1518	/* proceed with data transfer and status phases? */
1519	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1520		req->length = value;
1521		req->zero = value < w_length;
1522
1523		++dev->udc_usage;
1524		spin_unlock (&dev->lock);
1525		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1526		spin_lock(&dev->lock);
1527		--dev->udc_usage;
1528		spin_unlock(&dev->lock);
1529		if (value < 0) {
1530			DBG (dev, "ep_queue --> %d\n", value);
1531			req->status = 0;
1532		}
1533		return value;
1534	}
1535
1536	/* device stalls when value < 0 */
1537	spin_unlock (&dev->lock);
1538	return value;
1539}
1540
1541static void destroy_ep_files (struct dev_data *dev)
1542{
1543	DBG (dev, "%s %d\n", __func__, dev->state);
1544
1545	/* dev->state must prevent interference */
1546	spin_lock_irq (&dev->lock);
1547	while (!list_empty(&dev->epfiles)) {
1548		struct ep_data	*ep;
1549		struct inode	*parent;
1550		struct dentry	*dentry;
1551
1552		/* break link to FS */
1553		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1554		list_del_init (&ep->epfiles);
1555		spin_unlock_irq (&dev->lock);
1556
1557		dentry = ep->dentry;
1558		ep->dentry = NULL;
1559		parent = d_inode(dentry->d_parent);
1560
1561		/* break link to controller */
1562		mutex_lock(&ep->lock);
1563		if (ep->state == STATE_EP_ENABLED)
1564			(void) usb_ep_disable (ep->ep);
1565		ep->state = STATE_EP_UNBOUND;
1566		usb_ep_free_request (ep->ep, ep->req);
1567		ep->ep = NULL;
1568		mutex_unlock(&ep->lock);
1569
1570		wake_up (&ep->wait);
1571		put_ep (ep);
1572
1573		/* break link to dcache */
1574		inode_lock(parent);
1575		d_delete (dentry);
1576		dput (dentry);
1577		inode_unlock(parent);
1578
1579		spin_lock_irq (&dev->lock);
1580	}
1581	spin_unlock_irq (&dev->lock);
1582}
1583
1584
1585static struct dentry *
1586gadgetfs_create_file (struct super_block *sb, char const *name,
1587		void *data, const struct file_operations *fops);
1588
1589static int activate_ep_files (struct dev_data *dev)
1590{
1591	struct usb_ep	*ep;
1592	struct ep_data	*data;
1593
1594	gadget_for_each_ep (ep, dev->gadget) {
1595
1596		data = kzalloc(sizeof(*data), GFP_KERNEL);
1597		if (!data)
1598			goto enomem0;
1599		data->state = STATE_EP_DISABLED;
1600		mutex_init(&data->lock);
1601		init_waitqueue_head (&data->wait);
1602
1603		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1604		refcount_set (&data->count, 1);
1605		data->dev = dev;
1606		get_dev (dev);
1607
1608		data->ep = ep;
1609		ep->driver_data = data;
1610
1611		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1612		if (!data->req)
1613			goto enomem1;
1614
1615		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1616				data, &ep_io_operations);
1617		if (!data->dentry)
1618			goto enomem2;
1619		list_add_tail (&data->epfiles, &dev->epfiles);
1620	}
1621	return 0;
1622
1623enomem2:
1624	usb_ep_free_request (ep, data->req);
1625enomem1:
1626	put_dev (dev);
1627	kfree (data);
1628enomem0:
1629	DBG (dev, "%s enomem\n", __func__);
1630	destroy_ep_files (dev);
1631	return -ENOMEM;
1632}
1633
1634static void
1635gadgetfs_unbind (struct usb_gadget *gadget)
1636{
1637	struct dev_data		*dev = get_gadget_data (gadget);
1638
1639	DBG (dev, "%s\n", __func__);
1640
1641	spin_lock_irq (&dev->lock);
1642	dev->state = STATE_DEV_UNBOUND;
1643	while (dev->udc_usage > 0) {
1644		spin_unlock_irq(&dev->lock);
1645		usleep_range(1000, 2000);
1646		spin_lock_irq(&dev->lock);
1647	}
1648	spin_unlock_irq (&dev->lock);
1649
1650	destroy_ep_files (dev);
1651	gadget->ep0->driver_data = NULL;
1652	set_gadget_data (gadget, NULL);
1653
1654	/* we've already been disconnected ... no i/o is active */
1655	if (dev->req)
1656		usb_ep_free_request (gadget->ep0, dev->req);
1657	DBG (dev, "%s done\n", __func__);
1658	put_dev (dev);
1659}
1660
1661static struct dev_data		*the_device;
1662
1663static int gadgetfs_bind(struct usb_gadget *gadget,
1664		struct usb_gadget_driver *driver)
1665{
1666	struct dev_data		*dev = the_device;
1667
1668	if (!dev)
1669		return -ESRCH;
1670	if (0 != strcmp (CHIP, gadget->name)) {
1671		pr_err("%s expected %s controller not %s\n",
1672			shortname, CHIP, gadget->name);
1673		return -ENODEV;
1674	}
1675
1676	set_gadget_data (gadget, dev);
1677	dev->gadget = gadget;
1678	gadget->ep0->driver_data = dev;
1679
1680	/* preallocate control response and buffer */
1681	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1682	if (!dev->req)
1683		goto enomem;
1684	dev->req->context = NULL;
1685	dev->req->complete = epio_complete;
1686
1687	if (activate_ep_files (dev) < 0)
1688		goto enomem;
1689
1690	INFO (dev, "bound to %s driver\n", gadget->name);
1691	spin_lock_irq(&dev->lock);
1692	dev->state = STATE_DEV_UNCONNECTED;
1693	spin_unlock_irq(&dev->lock);
1694	get_dev (dev);
1695	return 0;
1696
1697enomem:
1698	gadgetfs_unbind (gadget);
1699	return -ENOMEM;
1700}
1701
1702static void
1703gadgetfs_disconnect (struct usb_gadget *gadget)
1704{
1705	struct dev_data		*dev = get_gadget_data (gadget);
1706	unsigned long		flags;
1707
1708	spin_lock_irqsave (&dev->lock, flags);
1709	if (dev->state == STATE_DEV_UNCONNECTED)
1710		goto exit;
1711	dev->state = STATE_DEV_UNCONNECTED;
1712
1713	INFO (dev, "disconnected\n");
1714	next_event (dev, GADGETFS_DISCONNECT);
1715	ep0_readable (dev);
1716exit:
1717	spin_unlock_irqrestore (&dev->lock, flags);
1718}
1719
1720static void
1721gadgetfs_suspend (struct usb_gadget *gadget)
1722{
1723	struct dev_data		*dev = get_gadget_data (gadget);
1724	unsigned long		flags;
1725
1726	INFO (dev, "suspended from state %d\n", dev->state);
1727	spin_lock_irqsave(&dev->lock, flags);
1728	switch (dev->state) {
1729	case STATE_DEV_SETUP:		// VERY odd... host died??
1730	case STATE_DEV_CONNECTED:
1731	case STATE_DEV_UNCONNECTED:
1732		next_event (dev, GADGETFS_SUSPEND);
1733		ep0_readable (dev);
1734		fallthrough;
1735	default:
1736		break;
1737	}
1738	spin_unlock_irqrestore(&dev->lock, flags);
1739}
1740
1741static struct usb_gadget_driver gadgetfs_driver = {
1742	.function	= (char *) driver_desc,
1743	.bind		= gadgetfs_bind,
1744	.unbind		= gadgetfs_unbind,
1745	.setup		= gadgetfs_setup,
1746	.reset		= gadgetfs_disconnect,
1747	.disconnect	= gadgetfs_disconnect,
1748	.suspend	= gadgetfs_suspend,
1749
1750	.driver	= {
1751		.name		= shortname,
1752	},
1753};
1754
1755/*----------------------------------------------------------------------*/
1756/* DEVICE INITIALIZATION
1757 *
1758 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1759 *     status = write (fd, descriptors, sizeof descriptors)
1760 *
1761 * That write establishes the device configuration, so the kernel can
1762 * bind to the controller ... guaranteeing it can handle enumeration
1763 * at all necessary speeds.  Descriptor order is:
1764 *
1765 * . message tag (u32, host order) ... for now, must be zero; it
1766 *	would change to support features like multi-config devices
1767 * . full/low speed config ... all wTotalLength bytes (with interface,
1768 *	class, altsetting, endpoint, and other descriptors)
1769 * . high speed config ... all descriptors, for high speed operation;
1770 *	this one's optional except for high-speed hardware
1771 * . device descriptor
1772 *
1773 * Endpoints are not yet enabled. Drivers must wait until device
1774 * configuration and interface altsetting changes create
1775 * the need to configure (or unconfigure) them.
1776 *
1777 * After initialization, the device stays active for as long as that
1778 * $CHIP file is open.  Events must then be read from that descriptor,
1779 * such as configuration notifications.
1780 */
1781
1782static int is_valid_config(struct usb_config_descriptor *config,
1783		unsigned int total)
1784{
1785	return config->bDescriptorType == USB_DT_CONFIG
1786		&& config->bLength == USB_DT_CONFIG_SIZE
1787		&& total >= USB_DT_CONFIG_SIZE
1788		&& config->bConfigurationValue != 0
1789		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1790		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1791	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1792	/* FIXME check lengths: walk to end */
1793}
1794
1795static ssize_t
1796dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1797{
1798	struct dev_data		*dev = fd->private_data;
1799	ssize_t			value, length = len;
1800	unsigned		total;
1801	u32			tag;
1802	char			*kbuf;
1803
1804	spin_lock_irq(&dev->lock);
1805	if (dev->state > STATE_DEV_OPENED) {
1806		value = ep0_write(fd, buf, len, ptr);
1807		spin_unlock_irq(&dev->lock);
1808		return value;
1809	}
1810	spin_unlock_irq(&dev->lock);
1811
1812	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1813	    (len > PAGE_SIZE * 4))
1814		return -EINVAL;
1815
1816	/* we might need to change message format someday */
1817	if (copy_from_user (&tag, buf, 4))
1818		return -EFAULT;
1819	if (tag != 0)
1820		return -EINVAL;
1821	buf += 4;
1822	length -= 4;
1823
1824	kbuf = memdup_user(buf, length);
1825	if (IS_ERR(kbuf))
1826		return PTR_ERR(kbuf);
1827
1828	spin_lock_irq (&dev->lock);
1829	value = -EINVAL;
1830	if (dev->buf) {
1831		spin_unlock_irq(&dev->lock);
1832		kfree(kbuf);
1833		return value;
1834	}
1835	dev->buf = kbuf;
1836
1837	/* full or low speed config */
1838	dev->config = (void *) kbuf;
1839	total = le16_to_cpu(dev->config->wTotalLength);
1840	if (!is_valid_config(dev->config, total) ||
1841			total > length - USB_DT_DEVICE_SIZE)
1842		goto fail;
1843	kbuf += total;
1844	length -= total;
1845
1846	/* optional high speed config */
1847	if (kbuf [1] == USB_DT_CONFIG) {
1848		dev->hs_config = (void *) kbuf;
1849		total = le16_to_cpu(dev->hs_config->wTotalLength);
1850		if (!is_valid_config(dev->hs_config, total) ||
1851				total > length - USB_DT_DEVICE_SIZE)
1852			goto fail;
1853		kbuf += total;
1854		length -= total;
1855	} else {
1856		dev->hs_config = NULL;
1857	}
1858
1859	/* could support multiple configs, using another encoding! */
1860
1861	/* device descriptor (tweaked for paranoia) */
1862	if (length != USB_DT_DEVICE_SIZE)
1863		goto fail;
1864	dev->dev = (void *)kbuf;
1865	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1866			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1867			|| dev->dev->bNumConfigurations != 1)
1868		goto fail;
1869	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1870
1871	/* triggers gadgetfs_bind(); then we can enumerate. */
1872	spin_unlock_irq (&dev->lock);
1873	if (dev->hs_config)
1874		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1875	else
1876		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1877
1878	value = usb_gadget_register_driver(&gadgetfs_driver);
1879	if (value != 0) {
1880		spin_lock_irq(&dev->lock);
1881		goto fail;
1882	} else {
1883		/* at this point "good" hardware has for the first time
1884		 * let the USB the host see us.  alternatively, if users
1885		 * unplug/replug that will clear all the error state.
1886		 *
1887		 * note:  everything running before here was guaranteed
1888		 * to choke driver model style diagnostics.  from here
1889		 * on, they can work ... except in cleanup paths that
1890		 * kick in after the ep0 descriptor is closed.
1891		 */
1892		value = len;
1893		dev->gadget_registered = true;
1894	}
1895	return value;
1896
1897fail:
1898	dev->config = NULL;
1899	dev->hs_config = NULL;
1900	dev->dev = NULL;
1901	spin_unlock_irq (&dev->lock);
1902	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1903	kfree (dev->buf);
1904	dev->buf = NULL;
1905	return value;
1906}
1907
1908static int
1909gadget_dev_open (struct inode *inode, struct file *fd)
1910{
1911	struct dev_data		*dev = inode->i_private;
1912	int			value = -EBUSY;
1913
1914	spin_lock_irq(&dev->lock);
1915	if (dev->state == STATE_DEV_DISABLED) {
1916		dev->ev_next = 0;
1917		dev->state = STATE_DEV_OPENED;
1918		fd->private_data = dev;
1919		get_dev (dev);
1920		value = 0;
1921	}
1922	spin_unlock_irq(&dev->lock);
1923	return value;
1924}
1925
1926static const struct file_operations ep0_operations = {
1927	.llseek =	no_llseek,
1928
1929	.open =		gadget_dev_open,
1930	.read =		ep0_read,
1931	.write =	dev_config,
1932	.fasync =	ep0_fasync,
1933	.poll =		ep0_poll,
1934	.unlocked_ioctl = gadget_dev_ioctl,
1935	.release =	dev_release,
1936};
1937
1938/*----------------------------------------------------------------------*/
1939
1940/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1941 *
1942 * Mounting the filesystem creates a controller file, used first for
1943 * device configuration then later for event monitoring.
1944 */
1945
1946
1947/* FIXME PAM etc could set this security policy without mount options
1948 * if epfiles inherited ownership and permissons from ep0 ...
1949 */
1950
1951static unsigned default_uid;
1952static unsigned default_gid;
1953static unsigned default_perm = S_IRUSR | S_IWUSR;
1954
1955module_param (default_uid, uint, 0644);
1956module_param (default_gid, uint, 0644);
1957module_param (default_perm, uint, 0644);
1958
1959
1960static struct inode *
1961gadgetfs_make_inode (struct super_block *sb,
1962		void *data, const struct file_operations *fops,
1963		int mode)
1964{
1965	struct inode *inode = new_inode (sb);
1966
1967	if (inode) {
1968		inode->i_ino = get_next_ino();
1969		inode->i_mode = mode;
1970		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1971		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1972		inode->i_atime = inode->i_mtime = inode->i_ctime
1973				= current_time(inode);
1974		inode->i_private = data;
1975		inode->i_fop = fops;
1976	}
1977	return inode;
1978}
1979
1980/* creates in fs root directory, so non-renamable and non-linkable.
1981 * so inode and dentry are paired, until device reconfig.
1982 */
1983static struct dentry *
1984gadgetfs_create_file (struct super_block *sb, char const *name,
1985		void *data, const struct file_operations *fops)
1986{
1987	struct dentry	*dentry;
1988	struct inode	*inode;
1989
1990	dentry = d_alloc_name(sb->s_root, name);
1991	if (!dentry)
1992		return NULL;
1993
1994	inode = gadgetfs_make_inode (sb, data, fops,
1995			S_IFREG | (default_perm & S_IRWXUGO));
1996	if (!inode) {
1997		dput(dentry);
1998		return NULL;
1999	}
2000	d_add (dentry, inode);
2001	return dentry;
2002}
2003
2004static const struct super_operations gadget_fs_operations = {
2005	.statfs =	simple_statfs,
2006	.drop_inode =	generic_delete_inode,
2007};
2008
2009static int
2010gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2011{
2012	struct inode	*inode;
2013	struct dev_data	*dev;
2014	int		rc;
2015
2016	mutex_lock(&sb_mutex);
2017
2018	if (the_device) {
2019		rc = -ESRCH;
2020		goto Done;
2021	}
2022
2023	CHIP = usb_get_gadget_udc_name();
2024	if (!CHIP) {
2025		rc = -ENODEV;
2026		goto Done;
2027	}
2028
2029	/* superblock */
2030	sb->s_blocksize = PAGE_SIZE;
2031	sb->s_blocksize_bits = PAGE_SHIFT;
2032	sb->s_magic = GADGETFS_MAGIC;
2033	sb->s_op = &gadget_fs_operations;
2034	sb->s_time_gran = 1;
2035
2036	/* root inode */
2037	inode = gadgetfs_make_inode (sb,
2038			NULL, &simple_dir_operations,
2039			S_IFDIR | S_IRUGO | S_IXUGO);
2040	if (!inode)
2041		goto Enomem;
2042	inode->i_op = &simple_dir_inode_operations;
2043	if (!(sb->s_root = d_make_root (inode)))
2044		goto Enomem;
2045
2046	/* the ep0 file is named after the controller we expect;
2047	 * user mode code can use it for sanity checks, like we do.
2048	 */
2049	dev = dev_new ();
2050	if (!dev)
2051		goto Enomem;
2052
2053	dev->sb = sb;
2054	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2055	if (!dev->dentry) {
2056		put_dev(dev);
2057		goto Enomem;
2058	}
2059
2060	/* other endpoint files are available after hardware setup,
2061	 * from binding to a controller.
2062	 */
2063	the_device = dev;
2064	rc = 0;
2065	goto Done;
2066
2067 Enomem:
2068	kfree(CHIP);
2069	CHIP = NULL;
2070	rc = -ENOMEM;
2071
2072 Done:
2073	mutex_unlock(&sb_mutex);
2074	return rc;
2075}
2076
2077/* "mount -t gadgetfs path /dev/gadget" ends up here */
2078static int gadgetfs_get_tree(struct fs_context *fc)
2079{
2080	return get_tree_single(fc, gadgetfs_fill_super);
2081}
2082
2083static const struct fs_context_operations gadgetfs_context_ops = {
2084	.get_tree	= gadgetfs_get_tree,
2085};
2086
2087static int gadgetfs_init_fs_context(struct fs_context *fc)
2088{
2089	fc->ops = &gadgetfs_context_ops;
2090	return 0;
2091}
2092
2093static void
2094gadgetfs_kill_sb (struct super_block *sb)
2095{
2096	mutex_lock(&sb_mutex);
2097	kill_litter_super (sb);
2098	if (the_device) {
2099		put_dev (the_device);
2100		the_device = NULL;
2101	}
2102	kfree(CHIP);
2103	CHIP = NULL;
2104	mutex_unlock(&sb_mutex);
2105}
2106
2107/*----------------------------------------------------------------------*/
2108
2109static struct file_system_type gadgetfs_type = {
2110	.owner		= THIS_MODULE,
2111	.name		= shortname,
2112	.init_fs_context = gadgetfs_init_fs_context,
2113	.kill_sb	= gadgetfs_kill_sb,
2114};
2115MODULE_ALIAS_FS("gadgetfs");
2116
2117/*----------------------------------------------------------------------*/
2118
2119static int __init gadgetfs_init (void)
2120{
2121	int status;
2122
2123	status = register_filesystem (&gadgetfs_type);
2124	if (status == 0)
2125		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2126			shortname, driver_desc);
2127	return status;
2128}
2129module_init (gadgetfs_init);
2130
2131static void __exit gadgetfs_cleanup (void)
2132{
2133	pr_debug ("unregister %s\n", shortname);
2134	unregister_filesystem (&gadgetfs_type);
2135}
2136module_exit (gadgetfs_cleanup);
2137
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * inode.c -- user mode filesystem api for usb gadget controllers
   4 *
   5 * Copyright (C) 2003-2004 David Brownell
   6 * Copyright (C) 2003 Agilent Technologies
   7 */
   8
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/fs_context.h>
  16#include <linux/pagemap.h>
  17#include <linux/uts.h>
  18#include <linux/wait.h>
  19#include <linux/compiler.h>
  20#include <linux/uaccess.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/poll.h>
  24#include <linux/kthread.h>
  25#include <linux/aio.h>
  26#include <linux/uio.h>
  27#include <linux/refcount.h>
  28#include <linux/delay.h>
  29#include <linux/device.h>
  30#include <linux/moduleparam.h>
  31
  32#include <linux/usb/gadgetfs.h>
  33#include <linux/usb/gadget.h>
  34
  35
  36/*
  37 * The gadgetfs API maps each endpoint to a file descriptor so that you
  38 * can use standard synchronous read/write calls for I/O.  There's some
  39 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
  40 * drivers show how this works in practice.  You can also use AIO to
  41 * eliminate I/O gaps between requests, to help when streaming data.
  42 *
  43 * Key parts that must be USB-specific are protocols defining how the
  44 * read/write operations relate to the hardware state machines.  There
  45 * are two types of files.  One type is for the device, implementing ep0.
  46 * The other type is for each IN or OUT endpoint.  In both cases, the
  47 * user mode driver must configure the hardware before using it.
  48 *
  49 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
  50 *   (by writing configuration and device descriptors).  Afterwards it
  51 *   may serve as a source of device events, used to handle all control
  52 *   requests other than basic enumeration.
  53 *
  54 * - Then, after a SET_CONFIGURATION control request, ep_config() is
  55 *   called when each /dev/gadget/ep* file is configured (by writing
  56 *   endpoint descriptors).  Afterwards these files are used to write()
  57 *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
  58 *   direction" request is issued (like reading an IN endpoint).
  59 *
  60 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
  61 * not possible on all hardware.  For example, precise fault handling with
  62 * respect to data left in endpoint fifos after aborted operations; or
  63 * selective clearing of endpoint halts, to implement SET_INTERFACE.
  64 */
  65
  66#define	DRIVER_DESC	"USB Gadget filesystem"
  67#define	DRIVER_VERSION	"24 Aug 2004"
  68
  69static const char driver_desc [] = DRIVER_DESC;
  70static const char shortname [] = "gadgetfs";
  71
  72MODULE_DESCRIPTION (DRIVER_DESC);
  73MODULE_AUTHOR ("David Brownell");
  74MODULE_LICENSE ("GPL");
  75
  76static int ep_open(struct inode *, struct file *);
  77
  78
  79/*----------------------------------------------------------------------*/
  80
  81#define GADGETFS_MAGIC		0xaee71ee7
  82
  83/* /dev/gadget/$CHIP represents ep0 and the whole device */
  84enum ep0_state {
  85	/* DISABLED is the initial state. */
  86	STATE_DEV_DISABLED = 0,
  87
  88	/* Only one open() of /dev/gadget/$CHIP; only one file tracks
  89	 * ep0/device i/o modes and binding to the controller.  Driver
  90	 * must always write descriptors to initialize the device, then
  91	 * the device becomes UNCONNECTED until enumeration.
  92	 */
  93	STATE_DEV_OPENED,
  94
  95	/* From then on, ep0 fd is in either of two basic modes:
  96	 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
  97	 * - SETUP: read/write will transfer control data and succeed;
  98	 *   or if "wrong direction", performs protocol stall
  99	 */
 100	STATE_DEV_UNCONNECTED,
 101	STATE_DEV_CONNECTED,
 102	STATE_DEV_SETUP,
 103
 104	/* UNBOUND means the driver closed ep0, so the device won't be
 105	 * accessible again (DEV_DISABLED) until all fds are closed.
 106	 */
 107	STATE_DEV_UNBOUND,
 108};
 109
 110/* enough for the whole queue: most events invalidate others */
 111#define	N_EVENT			5
 112
 
 
 113struct dev_data {
 114	spinlock_t			lock;
 115	refcount_t			count;
 116	int				udc_usage;
 117	enum ep0_state			state;		/* P: lock */
 118	struct usb_gadgetfs_event	event [N_EVENT];
 119	unsigned			ev_next;
 120	struct fasync_struct		*fasync;
 121	u8				current_config;
 122
 123	/* drivers reading ep0 MUST handle control requests (SETUP)
 124	 * reported that way; else the host will time out.
 125	 */
 126	unsigned			usermode_setup : 1,
 127					setup_in : 1,
 128					setup_can_stall : 1,
 129					setup_out_ready : 1,
 130					setup_out_error : 1,
 131					setup_abort : 1,
 132					gadget_registered : 1;
 133	unsigned			setup_wLength;
 134
 135	/* the rest is basically write-once */
 136	struct usb_config_descriptor	*config, *hs_config;
 137	struct usb_device_descriptor	*dev;
 138	struct usb_request		*req;
 139	struct usb_gadget		*gadget;
 140	struct list_head		epfiles;
 141	void				*buf;
 142	wait_queue_head_t		wait;
 143	struct super_block		*sb;
 144	struct dentry			*dentry;
 145
 146	/* except this scratch i/o buffer for ep0 */
 147	u8				rbuf [256];
 148};
 149
 150static inline void get_dev (struct dev_data *data)
 151{
 152	refcount_inc (&data->count);
 153}
 154
 155static void put_dev (struct dev_data *data)
 156{
 157	if (likely (!refcount_dec_and_test (&data->count)))
 158		return;
 159	/* needs no more cleanup */
 160	BUG_ON (waitqueue_active (&data->wait));
 161	kfree (data);
 162}
 163
 164static struct dev_data *dev_new (void)
 165{
 166	struct dev_data		*dev;
 167
 168	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 169	if (!dev)
 170		return NULL;
 171	dev->state = STATE_DEV_DISABLED;
 172	refcount_set (&dev->count, 1);
 173	spin_lock_init (&dev->lock);
 174	INIT_LIST_HEAD (&dev->epfiles);
 175	init_waitqueue_head (&dev->wait);
 176	return dev;
 177}
 178
 179/*----------------------------------------------------------------------*/
 180
 181/* other /dev/gadget/$ENDPOINT files represent endpoints */
 182enum ep_state {
 183	STATE_EP_DISABLED = 0,
 184	STATE_EP_READY,
 185	STATE_EP_ENABLED,
 186	STATE_EP_UNBOUND,
 187};
 188
 189struct ep_data {
 190	struct mutex			lock;
 191	enum ep_state			state;
 192	refcount_t			count;
 193	struct dev_data			*dev;
 194	/* must hold dev->lock before accessing ep or req */
 195	struct usb_ep			*ep;
 196	struct usb_request		*req;
 197	ssize_t				status;
 198	char				name [16];
 199	struct usb_endpoint_descriptor	desc, hs_desc;
 200	struct list_head		epfiles;
 201	wait_queue_head_t		wait;
 202	struct dentry			*dentry;
 203};
 204
 205static inline void get_ep (struct ep_data *data)
 206{
 207	refcount_inc (&data->count);
 208}
 209
 210static void put_ep (struct ep_data *data)
 211{
 212	if (likely (!refcount_dec_and_test (&data->count)))
 213		return;
 214	put_dev (data->dev);
 215	/* needs no more cleanup */
 216	BUG_ON (!list_empty (&data->epfiles));
 217	BUG_ON (waitqueue_active (&data->wait));
 218	kfree (data);
 219}
 220
 221/*----------------------------------------------------------------------*/
 222
 223/* most "how to use the hardware" policy choices are in userspace:
 224 * mapping endpoint roles (which the driver needs) to the capabilities
 225 * which the usb controller has.  most of those capabilities are exposed
 226 * implicitly, starting with the driver name and then endpoint names.
 227 */
 228
 229static const char *CHIP;
 
 230
 231/*----------------------------------------------------------------------*/
 232
 233/* NOTE:  don't use dev_printk calls before binding to the gadget
 234 * at the end of ep0 configuration, or after unbind.
 235 */
 236
 237/* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
 238#define xprintk(d,level,fmt,args...) \
 239	printk(level "%s: " fmt , shortname , ## args)
 240
 241#ifdef DEBUG
 242#define DBG(dev,fmt,args...) \
 243	xprintk(dev , KERN_DEBUG , fmt , ## args)
 244#else
 245#define DBG(dev,fmt,args...) \
 246	do { } while (0)
 247#endif /* DEBUG */
 248
 249#ifdef VERBOSE_DEBUG
 250#define VDEBUG	DBG
 251#else
 252#define VDEBUG(dev,fmt,args...) \
 253	do { } while (0)
 254#endif /* DEBUG */
 255
 256#define ERROR(dev,fmt,args...) \
 257	xprintk(dev , KERN_ERR , fmt , ## args)
 258#define INFO(dev,fmt,args...) \
 259	xprintk(dev , KERN_INFO , fmt , ## args)
 260
 261
 262/*----------------------------------------------------------------------*/
 263
 264/* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
 265 *
 266 * After opening, configure non-control endpoints.  Then use normal
 267 * stream read() and write() requests; and maybe ioctl() to get more
 268 * precise FIFO status when recovering from cancellation.
 269 */
 270
 271static void epio_complete (struct usb_ep *ep, struct usb_request *req)
 272{
 273	struct ep_data	*epdata = ep->driver_data;
 274
 275	if (!req->context)
 276		return;
 277	if (req->status)
 278		epdata->status = req->status;
 279	else
 280		epdata->status = req->actual;
 281	complete ((struct completion *)req->context);
 282}
 283
 284/* tasklock endpoint, returning when it's connected.
 285 * still need dev->lock to use epdata->ep.
 286 */
 287static int
 288get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
 289{
 290	int	val;
 291
 292	if (f_flags & O_NONBLOCK) {
 293		if (!mutex_trylock(&epdata->lock))
 294			goto nonblock;
 295		if (epdata->state != STATE_EP_ENABLED &&
 296		    (!is_write || epdata->state != STATE_EP_READY)) {
 297			mutex_unlock(&epdata->lock);
 298nonblock:
 299			val = -EAGAIN;
 300		} else
 301			val = 0;
 302		return val;
 303	}
 304
 305	val = mutex_lock_interruptible(&epdata->lock);
 306	if (val < 0)
 307		return val;
 308
 309	switch (epdata->state) {
 310	case STATE_EP_ENABLED:
 311		return 0;
 312	case STATE_EP_READY:			/* not configured yet */
 313		if (is_write)
 314			return 0;
 315		fallthrough;
 316	case STATE_EP_UNBOUND:			/* clean disconnect */
 317		break;
 318	// case STATE_EP_DISABLED:		/* "can't happen" */
 319	default:				/* error! */
 320		pr_debug ("%s: ep %p not available, state %d\n",
 321				shortname, epdata, epdata->state);
 322	}
 323	mutex_unlock(&epdata->lock);
 324	return -ENODEV;
 325}
 326
 327static ssize_t
 328ep_io (struct ep_data *epdata, void *buf, unsigned len)
 329{
 330	DECLARE_COMPLETION_ONSTACK (done);
 331	int value;
 332
 333	spin_lock_irq (&epdata->dev->lock);
 334	if (likely (epdata->ep != NULL)) {
 335		struct usb_request	*req = epdata->req;
 336
 337		req->context = &done;
 338		req->complete = epio_complete;
 339		req->buf = buf;
 340		req->length = len;
 341		value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
 342	} else
 343		value = -ENODEV;
 344	spin_unlock_irq (&epdata->dev->lock);
 345
 346	if (likely (value == 0)) {
 347		value = wait_for_completion_interruptible(&done);
 348		if (value != 0) {
 349			spin_lock_irq (&epdata->dev->lock);
 350			if (likely (epdata->ep != NULL)) {
 351				DBG (epdata->dev, "%s i/o interrupted\n",
 352						epdata->name);
 353				usb_ep_dequeue (epdata->ep, epdata->req);
 354				spin_unlock_irq (&epdata->dev->lock);
 355
 356				wait_for_completion(&done);
 357				if (epdata->status == -ECONNRESET)
 358					epdata->status = -EINTR;
 359			} else {
 360				spin_unlock_irq (&epdata->dev->lock);
 361
 362				DBG (epdata->dev, "endpoint gone\n");
 
 363				epdata->status = -ENODEV;
 364			}
 365		}
 366		return epdata->status;
 367	}
 368	return value;
 369}
 370
 371static int
 372ep_release (struct inode *inode, struct file *fd)
 373{
 374	struct ep_data		*data = fd->private_data;
 375	int value;
 376
 377	value = mutex_lock_interruptible(&data->lock);
 378	if (value < 0)
 379		return value;
 380
 381	/* clean up if this can be reopened */
 382	if (data->state != STATE_EP_UNBOUND) {
 383		data->state = STATE_EP_DISABLED;
 384		data->desc.bDescriptorType = 0;
 385		data->hs_desc.bDescriptorType = 0;
 386		usb_ep_disable(data->ep);
 387	}
 388	mutex_unlock(&data->lock);
 389	put_ep (data);
 390	return 0;
 391}
 392
 393static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
 394{
 395	struct ep_data		*data = fd->private_data;
 396	int			status;
 397
 398	if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
 399		return status;
 400
 401	spin_lock_irq (&data->dev->lock);
 402	if (likely (data->ep != NULL)) {
 403		switch (code) {
 404		case GADGETFS_FIFO_STATUS:
 405			status = usb_ep_fifo_status (data->ep);
 406			break;
 407		case GADGETFS_FIFO_FLUSH:
 408			usb_ep_fifo_flush (data->ep);
 409			break;
 410		case GADGETFS_CLEAR_HALT:
 411			status = usb_ep_clear_halt (data->ep);
 412			break;
 413		default:
 414			status = -ENOTTY;
 415		}
 416	} else
 417		status = -ENODEV;
 418	spin_unlock_irq (&data->dev->lock);
 419	mutex_unlock(&data->lock);
 420	return status;
 421}
 422
 423/*----------------------------------------------------------------------*/
 424
 425/* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
 426
 427struct kiocb_priv {
 428	struct usb_request	*req;
 429	struct ep_data		*epdata;
 430	struct kiocb		*iocb;
 431	struct mm_struct	*mm;
 432	struct work_struct	work;
 433	void			*buf;
 434	struct iov_iter		to;
 435	const void		*to_free;
 436	unsigned		actual;
 437};
 438
 439static int ep_aio_cancel(struct kiocb *iocb)
 440{
 441	struct kiocb_priv	*priv = iocb->private;
 442	struct ep_data		*epdata;
 443	int			value;
 444
 445	local_irq_disable();
 446	epdata = priv->epdata;
 447	// spin_lock(&epdata->dev->lock);
 448	if (likely(epdata && epdata->ep && priv->req))
 449		value = usb_ep_dequeue (epdata->ep, priv->req);
 450	else
 451		value = -EINVAL;
 452	// spin_unlock(&epdata->dev->lock);
 453	local_irq_enable();
 454
 455	return value;
 456}
 457
 458static void ep_user_copy_worker(struct work_struct *work)
 459{
 460	struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
 461	struct mm_struct *mm = priv->mm;
 462	struct kiocb *iocb = priv->iocb;
 463	size_t ret;
 464
 465	kthread_use_mm(mm);
 466	ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
 467	kthread_unuse_mm(mm);
 468	if (!ret)
 469		ret = -EFAULT;
 470
 471	/* completing the iocb can drop the ctx and mm, don't touch mm after */
 472	iocb->ki_complete(iocb, ret, ret);
 473
 474	kfree(priv->buf);
 475	kfree(priv->to_free);
 476	kfree(priv);
 477}
 478
 479static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
 480{
 481	struct kiocb		*iocb = req->context;
 482	struct kiocb_priv	*priv = iocb->private;
 483	struct ep_data		*epdata = priv->epdata;
 484
 485	/* lock against disconnect (and ideally, cancel) */
 486	spin_lock(&epdata->dev->lock);
 487	priv->req = NULL;
 488	priv->epdata = NULL;
 489
 490	/* if this was a write or a read returning no data then we
 491	 * don't need to copy anything to userspace, so we can
 492	 * complete the aio request immediately.
 493	 */
 494	if (priv->to_free == NULL || unlikely(req->actual == 0)) {
 495		kfree(req->buf);
 496		kfree(priv->to_free);
 497		kfree(priv);
 498		iocb->private = NULL;
 499		/* aio_complete() reports bytes-transferred _and_ faults */
 500
 501		iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
 502				req->status);
 503	} else {
 504		/* ep_copy_to_user() won't report both; we hide some faults */
 505		if (unlikely(0 != req->status))
 506			DBG(epdata->dev, "%s fault %d len %d\n",
 507				ep->name, req->status, req->actual);
 508
 509		priv->buf = req->buf;
 510		priv->actual = req->actual;
 511		INIT_WORK(&priv->work, ep_user_copy_worker);
 512		schedule_work(&priv->work);
 513	}
 514
 515	usb_ep_free_request(ep, req);
 516	spin_unlock(&epdata->dev->lock);
 517	put_ep(epdata);
 518}
 519
 520static ssize_t ep_aio(struct kiocb *iocb,
 521		      struct kiocb_priv *priv,
 522		      struct ep_data *epdata,
 523		      char *buf,
 524		      size_t len)
 525{
 526	struct usb_request *req;
 527	ssize_t value;
 528
 529	iocb->private = priv;
 530	priv->iocb = iocb;
 531
 532	kiocb_set_cancel_fn(iocb, ep_aio_cancel);
 533	get_ep(epdata);
 534	priv->epdata = epdata;
 535	priv->actual = 0;
 536	priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
 537
 538	/* each kiocb is coupled to one usb_request, but we can't
 539	 * allocate or submit those if the host disconnected.
 540	 */
 541	spin_lock_irq(&epdata->dev->lock);
 542	value = -ENODEV;
 543	if (unlikely(epdata->ep == NULL))
 544		goto fail;
 545
 546	req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
 547	value = -ENOMEM;
 548	if (unlikely(!req))
 549		goto fail;
 550
 551	priv->req = req;
 552	req->buf = buf;
 553	req->length = len;
 554	req->complete = ep_aio_complete;
 555	req->context = iocb;
 556	value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
 557	if (unlikely(0 != value)) {
 558		usb_ep_free_request(epdata->ep, req);
 559		goto fail;
 560	}
 561	spin_unlock_irq(&epdata->dev->lock);
 562	return -EIOCBQUEUED;
 563
 564fail:
 565	spin_unlock_irq(&epdata->dev->lock);
 566	kfree(priv->to_free);
 567	kfree(priv);
 568	put_ep(epdata);
 569	return value;
 570}
 571
 572static ssize_t
 573ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
 574{
 575	struct file *file = iocb->ki_filp;
 576	struct ep_data *epdata = file->private_data;
 577	size_t len = iov_iter_count(to);
 578	ssize_t value;
 579	char *buf;
 580
 581	if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
 582		return value;
 583
 584	/* halt any endpoint by doing a "wrong direction" i/o call */
 585	if (usb_endpoint_dir_in(&epdata->desc)) {
 586		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 587		    !is_sync_kiocb(iocb)) {
 588			mutex_unlock(&epdata->lock);
 589			return -EINVAL;
 590		}
 591		DBG (epdata->dev, "%s halt\n", epdata->name);
 592		spin_lock_irq(&epdata->dev->lock);
 593		if (likely(epdata->ep != NULL))
 594			usb_ep_set_halt(epdata->ep);
 595		spin_unlock_irq(&epdata->dev->lock);
 596		mutex_unlock(&epdata->lock);
 597		return -EBADMSG;
 598	}
 599
 600	buf = kmalloc(len, GFP_KERNEL);
 601	if (unlikely(!buf)) {
 602		mutex_unlock(&epdata->lock);
 603		return -ENOMEM;
 604	}
 605	if (is_sync_kiocb(iocb)) {
 606		value = ep_io(epdata, buf, len);
 607		if (value >= 0 && (copy_to_iter(buf, value, to) != value))
 608			value = -EFAULT;
 609	} else {
 610		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 611		value = -ENOMEM;
 612		if (!priv)
 613			goto fail;
 614		priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
 615		if (!priv->to_free) {
 616			kfree(priv);
 617			goto fail;
 618		}
 619		value = ep_aio(iocb, priv, epdata, buf, len);
 620		if (value == -EIOCBQUEUED)
 621			buf = NULL;
 622	}
 623fail:
 624	kfree(buf);
 625	mutex_unlock(&epdata->lock);
 626	return value;
 627}
 628
 629static ssize_t ep_config(struct ep_data *, const char *, size_t);
 630
 631static ssize_t
 632ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 633{
 634	struct file *file = iocb->ki_filp;
 635	struct ep_data *epdata = file->private_data;
 636	size_t len = iov_iter_count(from);
 637	bool configured;
 638	ssize_t value;
 639	char *buf;
 640
 641	if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
 642		return value;
 643
 644	configured = epdata->state == STATE_EP_ENABLED;
 645
 646	/* halt any endpoint by doing a "wrong direction" i/o call */
 647	if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
 648		if (usb_endpoint_xfer_isoc(&epdata->desc) ||
 649		    !is_sync_kiocb(iocb)) {
 650			mutex_unlock(&epdata->lock);
 651			return -EINVAL;
 652		}
 653		DBG (epdata->dev, "%s halt\n", epdata->name);
 654		spin_lock_irq(&epdata->dev->lock);
 655		if (likely(epdata->ep != NULL))
 656			usb_ep_set_halt(epdata->ep);
 657		spin_unlock_irq(&epdata->dev->lock);
 658		mutex_unlock(&epdata->lock);
 659		return -EBADMSG;
 660	}
 661
 662	buf = kmalloc(len, GFP_KERNEL);
 663	if (unlikely(!buf)) {
 664		mutex_unlock(&epdata->lock);
 665		return -ENOMEM;
 666	}
 667
 668	if (unlikely(!copy_from_iter_full(buf, len, from))) {
 669		value = -EFAULT;
 670		goto out;
 671	}
 672
 673	if (unlikely(!configured)) {
 674		value = ep_config(epdata, buf, len);
 675	} else if (is_sync_kiocb(iocb)) {
 676		value = ep_io(epdata, buf, len);
 677	} else {
 678		struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
 679		value = -ENOMEM;
 680		if (priv) {
 681			value = ep_aio(iocb, priv, epdata, buf, len);
 682			if (value == -EIOCBQUEUED)
 683				buf = NULL;
 684		}
 685	}
 686out:
 687	kfree(buf);
 688	mutex_unlock(&epdata->lock);
 689	return value;
 690}
 691
 692/*----------------------------------------------------------------------*/
 693
 694/* used after endpoint configuration */
 695static const struct file_operations ep_io_operations = {
 696	.owner =	THIS_MODULE,
 697
 698	.open =		ep_open,
 699	.release =	ep_release,
 700	.llseek =	no_llseek,
 701	.unlocked_ioctl = ep_ioctl,
 702	.read_iter =	ep_read_iter,
 703	.write_iter =	ep_write_iter,
 704};
 705
 706/* ENDPOINT INITIALIZATION
 707 *
 708 *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
 709 *     status = write (fd, descriptors, sizeof descriptors)
 710 *
 711 * That write establishes the endpoint configuration, configuring
 712 * the controller to process bulk, interrupt, or isochronous transfers
 713 * at the right maxpacket size, and so on.
 714 *
 715 * The descriptors are message type 1, identified by a host order u32
 716 * at the beginning of what's written.  Descriptor order is: full/low
 717 * speed descriptor, then optional high speed descriptor.
 718 */
 719static ssize_t
 720ep_config (struct ep_data *data, const char *buf, size_t len)
 721{
 722	struct usb_ep		*ep;
 723	u32			tag;
 724	int			value, length = len;
 725
 726	if (data->state != STATE_EP_READY) {
 727		value = -EL2HLT;
 728		goto fail;
 729	}
 730
 731	value = len;
 732	if (len < USB_DT_ENDPOINT_SIZE + 4)
 733		goto fail0;
 734
 735	/* we might need to change message format someday */
 736	memcpy(&tag, buf, 4);
 737	if (tag != 1) {
 738		DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
 739		goto fail0;
 740	}
 741	buf += 4;
 742	len -= 4;
 743
 744	/* NOTE:  audio endpoint extensions not accepted here;
 745	 * just don't include the extra bytes.
 746	 */
 747
 748	/* full/low speed descriptor, then high speed */
 749	memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
 750	if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
 751			|| data->desc.bDescriptorType != USB_DT_ENDPOINT)
 752		goto fail0;
 753	if (len != USB_DT_ENDPOINT_SIZE) {
 754		if (len != 2 * USB_DT_ENDPOINT_SIZE)
 755			goto fail0;
 756		memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
 757			USB_DT_ENDPOINT_SIZE);
 758		if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
 759				|| data->hs_desc.bDescriptorType
 760					!= USB_DT_ENDPOINT) {
 761			DBG(data->dev, "config %s, bad hs length or type\n",
 762					data->name);
 763			goto fail0;
 764		}
 765	}
 766
 767	spin_lock_irq (&data->dev->lock);
 768	if (data->dev->state == STATE_DEV_UNBOUND) {
 769		value = -ENOENT;
 770		goto gone;
 771	} else {
 772		ep = data->ep;
 773		if (ep == NULL) {
 774			value = -ENODEV;
 775			goto gone;
 776		}
 777	}
 778	switch (data->dev->gadget->speed) {
 779	case USB_SPEED_LOW:
 780	case USB_SPEED_FULL:
 781		ep->desc = &data->desc;
 782		break;
 783	case USB_SPEED_HIGH:
 784		/* fails if caller didn't provide that descriptor... */
 785		ep->desc = &data->hs_desc;
 786		break;
 787	default:
 788		DBG(data->dev, "unconnected, %s init abandoned\n",
 789				data->name);
 790		value = -EINVAL;
 791		goto gone;
 792	}
 793	value = usb_ep_enable(ep);
 794	if (value == 0) {
 795		data->state = STATE_EP_ENABLED;
 796		value = length;
 797	}
 798gone:
 799	spin_unlock_irq (&data->dev->lock);
 800	if (value < 0) {
 801fail:
 802		data->desc.bDescriptorType = 0;
 803		data->hs_desc.bDescriptorType = 0;
 804	}
 805	return value;
 806fail0:
 807	value = -EINVAL;
 808	goto fail;
 809}
 810
 811static int
 812ep_open (struct inode *inode, struct file *fd)
 813{
 814	struct ep_data		*data = inode->i_private;
 815	int			value = -EBUSY;
 816
 817	if (mutex_lock_interruptible(&data->lock) != 0)
 818		return -EINTR;
 819	spin_lock_irq (&data->dev->lock);
 820	if (data->dev->state == STATE_DEV_UNBOUND)
 821		value = -ENOENT;
 822	else if (data->state == STATE_EP_DISABLED) {
 823		value = 0;
 824		data->state = STATE_EP_READY;
 825		get_ep (data);
 826		fd->private_data = data;
 827		VDEBUG (data->dev, "%s ready\n", data->name);
 828	} else
 829		DBG (data->dev, "%s state %d\n",
 830			data->name, data->state);
 831	spin_unlock_irq (&data->dev->lock);
 832	mutex_unlock(&data->lock);
 833	return value;
 834}
 835
 836/*----------------------------------------------------------------------*/
 837
 838/* EP0 IMPLEMENTATION can be partly in userspace.
 839 *
 840 * Drivers that use this facility receive various events, including
 841 * control requests the kernel doesn't handle.  Drivers that don't
 842 * use this facility may be too simple-minded for real applications.
 843 */
 844
 845static inline void ep0_readable (struct dev_data *dev)
 846{
 847	wake_up (&dev->wait);
 848	kill_fasync (&dev->fasync, SIGIO, POLL_IN);
 849}
 850
 851static void clean_req (struct usb_ep *ep, struct usb_request *req)
 852{
 853	struct dev_data		*dev = ep->driver_data;
 854
 855	if (req->buf != dev->rbuf) {
 856		kfree(req->buf);
 857		req->buf = dev->rbuf;
 858	}
 859	req->complete = epio_complete;
 860	dev->setup_out_ready = 0;
 861}
 862
 863static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
 864{
 865	struct dev_data		*dev = ep->driver_data;
 866	unsigned long		flags;
 867	int			free = 1;
 868
 869	/* for control OUT, data must still get to userspace */
 870	spin_lock_irqsave(&dev->lock, flags);
 871	if (!dev->setup_in) {
 872		dev->setup_out_error = (req->status != 0);
 873		if (!dev->setup_out_error)
 874			free = 0;
 875		dev->setup_out_ready = 1;
 876		ep0_readable (dev);
 877	}
 878
 879	/* clean up as appropriate */
 880	if (free && req->buf != &dev->rbuf)
 881		clean_req (ep, req);
 882	req->complete = epio_complete;
 883	spin_unlock_irqrestore(&dev->lock, flags);
 884}
 885
 886static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
 887{
 888	struct dev_data	*dev = ep->driver_data;
 889
 890	if (dev->setup_out_ready) {
 891		DBG (dev, "ep0 request busy!\n");
 892		return -EBUSY;
 893	}
 894	if (len > sizeof (dev->rbuf))
 895		req->buf = kmalloc(len, GFP_ATOMIC);
 896	if (req->buf == NULL) {
 897		req->buf = dev->rbuf;
 898		return -ENOMEM;
 899	}
 900	req->complete = ep0_complete;
 901	req->length = len;
 902	req->zero = 0;
 903	return 0;
 904}
 905
 906static ssize_t
 907ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
 908{
 909	struct dev_data			*dev = fd->private_data;
 910	ssize_t				retval;
 911	enum ep0_state			state;
 912
 913	spin_lock_irq (&dev->lock);
 914	if (dev->state <= STATE_DEV_OPENED) {
 915		retval = -EINVAL;
 916		goto done;
 917	}
 918
 919	/* report fd mode change before acting on it */
 920	if (dev->setup_abort) {
 921		dev->setup_abort = 0;
 922		retval = -EIDRM;
 923		goto done;
 924	}
 925
 926	/* control DATA stage */
 927	if ((state = dev->state) == STATE_DEV_SETUP) {
 928
 929		if (dev->setup_in) {		/* stall IN */
 930			VDEBUG(dev, "ep0in stall\n");
 931			(void) usb_ep_set_halt (dev->gadget->ep0);
 932			retval = -EL2HLT;
 933			dev->state = STATE_DEV_CONNECTED;
 934
 935		} else if (len == 0) {		/* ack SET_CONFIGURATION etc */
 936			struct usb_ep		*ep = dev->gadget->ep0;
 937			struct usb_request	*req = dev->req;
 938
 939			if ((retval = setup_req (ep, req, 0)) == 0) {
 940				++dev->udc_usage;
 941				spin_unlock_irq (&dev->lock);
 942				retval = usb_ep_queue (ep, req, GFP_KERNEL);
 943				spin_lock_irq (&dev->lock);
 944				--dev->udc_usage;
 945			}
 946			dev->state = STATE_DEV_CONNECTED;
 947
 948			/* assume that was SET_CONFIGURATION */
 949			if (dev->current_config) {
 950				unsigned power;
 951
 952				if (gadget_is_dualspeed(dev->gadget)
 953						&& (dev->gadget->speed
 954							== USB_SPEED_HIGH))
 955					power = dev->hs_config->bMaxPower;
 956				else
 957					power = dev->config->bMaxPower;
 958				usb_gadget_vbus_draw(dev->gadget, 2 * power);
 959			}
 960
 961		} else {			/* collect OUT data */
 962			if ((fd->f_flags & O_NONBLOCK) != 0
 963					&& !dev->setup_out_ready) {
 964				retval = -EAGAIN;
 965				goto done;
 966			}
 967			spin_unlock_irq (&dev->lock);
 968			retval = wait_event_interruptible (dev->wait,
 969					dev->setup_out_ready != 0);
 970
 971			/* FIXME state could change from under us */
 972			spin_lock_irq (&dev->lock);
 973			if (retval)
 974				goto done;
 975
 976			if (dev->state != STATE_DEV_SETUP) {
 977				retval = -ECANCELED;
 978				goto done;
 979			}
 980			dev->state = STATE_DEV_CONNECTED;
 981
 982			if (dev->setup_out_error)
 983				retval = -EIO;
 984			else {
 985				len = min (len, (size_t)dev->req->actual);
 986				++dev->udc_usage;
 987				spin_unlock_irq(&dev->lock);
 988				if (copy_to_user (buf, dev->req->buf, len))
 989					retval = -EFAULT;
 990				else
 991					retval = len;
 992				spin_lock_irq(&dev->lock);
 993				--dev->udc_usage;
 994				clean_req (dev->gadget->ep0, dev->req);
 995				/* NOTE userspace can't yet choose to stall */
 996			}
 997		}
 998		goto done;
 999	}
1000
1001	/* else normal: return event data */
1002	if (len < sizeof dev->event [0]) {
1003		retval = -EINVAL;
1004		goto done;
1005	}
1006	len -= len % sizeof (struct usb_gadgetfs_event);
1007	dev->usermode_setup = 1;
1008
1009scan:
1010	/* return queued events right away */
1011	if (dev->ev_next != 0) {
1012		unsigned		i, n;
1013
1014		n = len / sizeof (struct usb_gadgetfs_event);
1015		if (dev->ev_next < n)
1016			n = dev->ev_next;
1017
1018		/* ep0 i/o has special semantics during STATE_DEV_SETUP */
1019		for (i = 0; i < n; i++) {
1020			if (dev->event [i].type == GADGETFS_SETUP) {
1021				dev->state = STATE_DEV_SETUP;
1022				n = i + 1;
1023				break;
1024			}
1025		}
1026		spin_unlock_irq (&dev->lock);
1027		len = n * sizeof (struct usb_gadgetfs_event);
1028		if (copy_to_user (buf, &dev->event, len))
1029			retval = -EFAULT;
1030		else
1031			retval = len;
1032		if (len > 0) {
1033			/* NOTE this doesn't guard against broken drivers;
1034			 * concurrent ep0 readers may lose events.
1035			 */
1036			spin_lock_irq (&dev->lock);
1037			if (dev->ev_next > n) {
1038				memmove(&dev->event[0], &dev->event[n],
1039					sizeof (struct usb_gadgetfs_event)
1040						* (dev->ev_next - n));
1041			}
1042			dev->ev_next -= n;
1043			spin_unlock_irq (&dev->lock);
1044		}
1045		return retval;
1046	}
1047	if (fd->f_flags & O_NONBLOCK) {
1048		retval = -EAGAIN;
1049		goto done;
1050	}
1051
1052	switch (state) {
1053	default:
1054		DBG (dev, "fail %s, state %d\n", __func__, state);
1055		retval = -ESRCH;
1056		break;
1057	case STATE_DEV_UNCONNECTED:
1058	case STATE_DEV_CONNECTED:
1059		spin_unlock_irq (&dev->lock);
1060		DBG (dev, "%s wait\n", __func__);
1061
1062		/* wait for events */
1063		retval = wait_event_interruptible (dev->wait,
1064				dev->ev_next != 0);
1065		if (retval < 0)
1066			return retval;
1067		spin_lock_irq (&dev->lock);
1068		goto scan;
1069	}
1070
1071done:
1072	spin_unlock_irq (&dev->lock);
1073	return retval;
1074}
1075
1076static struct usb_gadgetfs_event *
1077next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1078{
1079	struct usb_gadgetfs_event	*event;
1080	unsigned			i;
1081
1082	switch (type) {
1083	/* these events purge the queue */
1084	case GADGETFS_DISCONNECT:
1085		if (dev->state == STATE_DEV_SETUP)
1086			dev->setup_abort = 1;
1087		fallthrough;
1088	case GADGETFS_CONNECT:
1089		dev->ev_next = 0;
1090		break;
1091	case GADGETFS_SETUP:		/* previous request timed out */
1092	case GADGETFS_SUSPEND:		/* same effect */
1093		/* these events can't be repeated */
1094		for (i = 0; i != dev->ev_next; i++) {
1095			if (dev->event [i].type != type)
1096				continue;
1097			DBG(dev, "discard old event[%d] %d\n", i, type);
1098			dev->ev_next--;
1099			if (i == dev->ev_next)
1100				break;
1101			/* indices start at zero, for simplicity */
1102			memmove (&dev->event [i], &dev->event [i + 1],
1103				sizeof (struct usb_gadgetfs_event)
1104					* (dev->ev_next - i));
1105		}
1106		break;
1107	default:
1108		BUG ();
1109	}
1110	VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1111	event = &dev->event [dev->ev_next++];
1112	BUG_ON (dev->ev_next > N_EVENT);
1113	memset (event, 0, sizeof *event);
1114	event->type = type;
1115	return event;
1116}
1117
1118static ssize_t
1119ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1120{
1121	struct dev_data		*dev = fd->private_data;
1122	ssize_t			retval = -ESRCH;
1123
1124	/* report fd mode change before acting on it */
1125	if (dev->setup_abort) {
1126		dev->setup_abort = 0;
1127		retval = -EIDRM;
1128
1129	/* data and/or status stage for control request */
1130	} else if (dev->state == STATE_DEV_SETUP) {
1131
1132		len = min_t(size_t, len, dev->setup_wLength);
1133		if (dev->setup_in) {
1134			retval = setup_req (dev->gadget->ep0, dev->req, len);
1135			if (retval == 0) {
1136				dev->state = STATE_DEV_CONNECTED;
1137				++dev->udc_usage;
1138				spin_unlock_irq (&dev->lock);
1139				if (copy_from_user (dev->req->buf, buf, len))
1140					retval = -EFAULT;
1141				else {
1142					if (len < dev->setup_wLength)
1143						dev->req->zero = 1;
1144					retval = usb_ep_queue (
1145						dev->gadget->ep0, dev->req,
1146						GFP_KERNEL);
1147				}
1148				spin_lock_irq(&dev->lock);
1149				--dev->udc_usage;
1150				if (retval < 0) {
1151					clean_req (dev->gadget->ep0, dev->req);
1152				} else
1153					retval = len;
1154
1155				return retval;
1156			}
1157
1158		/* can stall some OUT transfers */
1159		} else if (dev->setup_can_stall) {
1160			VDEBUG(dev, "ep0out stall\n");
1161			(void) usb_ep_set_halt (dev->gadget->ep0);
1162			retval = -EL2HLT;
1163			dev->state = STATE_DEV_CONNECTED;
1164		} else {
1165			DBG(dev, "bogus ep0out stall!\n");
1166		}
1167	} else
1168		DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1169
1170	return retval;
1171}
1172
1173static int
1174ep0_fasync (int f, struct file *fd, int on)
1175{
1176	struct dev_data		*dev = fd->private_data;
1177	// caller must F_SETOWN before signal delivery happens
1178	VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1179	return fasync_helper (f, fd, on, &dev->fasync);
1180}
1181
1182static struct usb_gadget_driver gadgetfs_driver;
1183
1184static int
1185dev_release (struct inode *inode, struct file *fd)
1186{
1187	struct dev_data		*dev = fd->private_data;
1188
1189	/* closing ep0 === shutdown all */
1190
1191	if (dev->gadget_registered) {
1192		usb_gadget_unregister_driver (&gadgetfs_driver);
1193		dev->gadget_registered = false;
1194	}
1195
1196	/* at this point "good" hardware has disconnected the
1197	 * device from USB; the host won't see it any more.
1198	 * alternatively, all host requests will time out.
1199	 */
1200
1201	kfree (dev->buf);
1202	dev->buf = NULL;
1203
1204	/* other endpoints were all decoupled from this device */
1205	spin_lock_irq(&dev->lock);
1206	dev->state = STATE_DEV_DISABLED;
1207	spin_unlock_irq(&dev->lock);
1208
1209	put_dev (dev);
1210	return 0;
1211}
1212
1213static __poll_t
1214ep0_poll (struct file *fd, poll_table *wait)
1215{
1216       struct dev_data         *dev = fd->private_data;
1217       __poll_t                mask = 0;
1218
1219	if (dev->state <= STATE_DEV_OPENED)
1220		return DEFAULT_POLLMASK;
1221
1222	poll_wait(fd, &dev->wait, wait);
1223
1224	spin_lock_irq(&dev->lock);
1225
1226	/* report fd mode change before acting on it */
1227	if (dev->setup_abort) {
1228		dev->setup_abort = 0;
1229		mask = EPOLLHUP;
1230		goto out;
1231	}
1232
1233	if (dev->state == STATE_DEV_SETUP) {
1234		if (dev->setup_in || dev->setup_can_stall)
1235			mask = EPOLLOUT;
1236	} else {
1237		if (dev->ev_next != 0)
1238			mask = EPOLLIN;
1239	}
1240out:
1241	spin_unlock_irq(&dev->lock);
1242	return mask;
1243}
1244
1245static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1246{
1247	struct dev_data		*dev = fd->private_data;
1248	struct usb_gadget	*gadget = dev->gadget;
1249	long ret = -ENOTTY;
1250
1251	spin_lock_irq(&dev->lock);
1252	if (dev->state == STATE_DEV_OPENED ||
1253			dev->state == STATE_DEV_UNBOUND) {
1254		/* Not bound to a UDC */
1255	} else if (gadget->ops->ioctl) {
1256		++dev->udc_usage;
1257		spin_unlock_irq(&dev->lock);
1258
1259		ret = gadget->ops->ioctl (gadget, code, value);
1260
1261		spin_lock_irq(&dev->lock);
1262		--dev->udc_usage;
1263	}
1264	spin_unlock_irq(&dev->lock);
1265
1266	return ret;
1267}
1268
1269/*----------------------------------------------------------------------*/
1270
1271/* The in-kernel gadget driver handles most ep0 issues, in particular
1272 * enumerating the single configuration (as provided from user space).
1273 *
1274 * Unrecognized ep0 requests may be handled in user space.
1275 */
1276
1277static void make_qualifier (struct dev_data *dev)
1278{
1279	struct usb_qualifier_descriptor		qual;
1280	struct usb_device_descriptor		*desc;
1281
1282	qual.bLength = sizeof qual;
1283	qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1284	qual.bcdUSB = cpu_to_le16 (0x0200);
1285
1286	desc = dev->dev;
1287	qual.bDeviceClass = desc->bDeviceClass;
1288	qual.bDeviceSubClass = desc->bDeviceSubClass;
1289	qual.bDeviceProtocol = desc->bDeviceProtocol;
1290
1291	/* assumes ep0 uses the same value for both speeds ... */
1292	qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1293
1294	qual.bNumConfigurations = 1;
1295	qual.bRESERVED = 0;
1296
1297	memcpy (dev->rbuf, &qual, sizeof qual);
1298}
1299
1300static int
1301config_buf (struct dev_data *dev, u8 type, unsigned index)
1302{
1303	int		len;
1304	int		hs = 0;
1305
1306	/* only one configuration */
1307	if (index > 0)
1308		return -EINVAL;
1309
1310	if (gadget_is_dualspeed(dev->gadget)) {
1311		hs = (dev->gadget->speed == USB_SPEED_HIGH);
1312		if (type == USB_DT_OTHER_SPEED_CONFIG)
1313			hs = !hs;
1314	}
1315	if (hs) {
1316		dev->req->buf = dev->hs_config;
1317		len = le16_to_cpu(dev->hs_config->wTotalLength);
1318	} else {
1319		dev->req->buf = dev->config;
1320		len = le16_to_cpu(dev->config->wTotalLength);
1321	}
1322	((u8 *)dev->req->buf) [1] = type;
1323	return len;
1324}
1325
1326static int
1327gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1328{
1329	struct dev_data			*dev = get_gadget_data (gadget);
1330	struct usb_request		*req = dev->req;
1331	int				value = -EOPNOTSUPP;
1332	struct usb_gadgetfs_event	*event;
1333	u16				w_value = le16_to_cpu(ctrl->wValue);
1334	u16				w_length = le16_to_cpu(ctrl->wLength);
1335
 
 
 
 
 
 
 
 
 
 
 
 
1336	spin_lock (&dev->lock);
1337	dev->setup_abort = 0;
1338	if (dev->state == STATE_DEV_UNCONNECTED) {
1339		if (gadget_is_dualspeed(gadget)
1340				&& gadget->speed == USB_SPEED_HIGH
1341				&& dev->hs_config == NULL) {
1342			spin_unlock(&dev->lock);
1343			ERROR (dev, "no high speed config??\n");
1344			return -EINVAL;
1345		}
1346
1347		dev->state = STATE_DEV_CONNECTED;
1348
1349		INFO (dev, "connected\n");
1350		event = next_event (dev, GADGETFS_CONNECT);
1351		event->u.speed = gadget->speed;
1352		ep0_readable (dev);
1353
1354	/* host may have given up waiting for response.  we can miss control
1355	 * requests handled lower down (device/endpoint status and features);
1356	 * then ep0_{read,write} will report the wrong status. controller
1357	 * driver will have aborted pending i/o.
1358	 */
1359	} else if (dev->state == STATE_DEV_SETUP)
1360		dev->setup_abort = 1;
1361
1362	req->buf = dev->rbuf;
1363	req->context = NULL;
1364	switch (ctrl->bRequest) {
1365
1366	case USB_REQ_GET_DESCRIPTOR:
1367		if (ctrl->bRequestType != USB_DIR_IN)
1368			goto unrecognized;
1369		switch (w_value >> 8) {
1370
1371		case USB_DT_DEVICE:
1372			value = min (w_length, (u16) sizeof *dev->dev);
1373			dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1374			req->buf = dev->dev;
1375			break;
1376		case USB_DT_DEVICE_QUALIFIER:
1377			if (!dev->hs_config)
1378				break;
1379			value = min (w_length, (u16)
1380				sizeof (struct usb_qualifier_descriptor));
1381			make_qualifier (dev);
1382			break;
1383		case USB_DT_OTHER_SPEED_CONFIG:
1384		case USB_DT_CONFIG:
1385			value = config_buf (dev,
1386					w_value >> 8,
1387					w_value & 0xff);
1388			if (value >= 0)
1389				value = min (w_length, (u16) value);
1390			break;
1391		case USB_DT_STRING:
1392			goto unrecognized;
1393
1394		default:		// all others are errors
1395			break;
1396		}
1397		break;
1398
1399	/* currently one config, two speeds */
1400	case USB_REQ_SET_CONFIGURATION:
1401		if (ctrl->bRequestType != 0)
1402			goto unrecognized;
1403		if (0 == (u8) w_value) {
1404			value = 0;
1405			dev->current_config = 0;
1406			usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1407			// user mode expected to disable endpoints
1408		} else {
1409			u8	config, power;
1410
1411			if (gadget_is_dualspeed(gadget)
1412					&& gadget->speed == USB_SPEED_HIGH) {
1413				config = dev->hs_config->bConfigurationValue;
1414				power = dev->hs_config->bMaxPower;
1415			} else {
1416				config = dev->config->bConfigurationValue;
1417				power = dev->config->bMaxPower;
1418			}
1419
1420			if (config == (u8) w_value) {
1421				value = 0;
1422				dev->current_config = config;
1423				usb_gadget_vbus_draw(gadget, 2 * power);
1424			}
1425		}
1426
1427		/* report SET_CONFIGURATION like any other control request,
1428		 * except that usermode may not stall this.  the next
1429		 * request mustn't be allowed start until this finishes:
1430		 * endpoints and threads set up, etc.
1431		 *
1432		 * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
1433		 * has bad/racey automagic that prevents synchronizing here.
1434		 * even kernel mode drivers often miss them.
1435		 */
1436		if (value == 0) {
1437			INFO (dev, "configuration #%d\n", dev->current_config);
1438			usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1439			if (dev->usermode_setup) {
1440				dev->setup_can_stall = 0;
1441				goto delegate;
1442			}
1443		}
1444		break;
1445
1446#ifndef	CONFIG_USB_PXA25X
1447	/* PXA automagically handles this request too */
1448	case USB_REQ_GET_CONFIGURATION:
1449		if (ctrl->bRequestType != 0x80)
1450			goto unrecognized;
1451		*(u8 *)req->buf = dev->current_config;
1452		value = min (w_length, (u16) 1);
1453		break;
1454#endif
1455
1456	default:
1457unrecognized:
1458		VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1459			dev->usermode_setup ? "delegate" : "fail",
1460			ctrl->bRequestType, ctrl->bRequest,
1461			w_value, le16_to_cpu(ctrl->wIndex), w_length);
1462
1463		/* if there's an ep0 reader, don't stall */
1464		if (dev->usermode_setup) {
1465			dev->setup_can_stall = 1;
1466delegate:
1467			dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1468						? 1 : 0;
1469			dev->setup_wLength = w_length;
1470			dev->setup_out_ready = 0;
1471			dev->setup_out_error = 0;
1472
1473			/* read DATA stage for OUT right away */
1474			if (unlikely (!dev->setup_in && w_length)) {
1475				value = setup_req (gadget->ep0, dev->req,
1476							w_length);
1477				if (value < 0)
1478					break;
1479
1480				++dev->udc_usage;
1481				spin_unlock (&dev->lock);
1482				value = usb_ep_queue (gadget->ep0, dev->req,
1483							GFP_KERNEL);
1484				spin_lock (&dev->lock);
1485				--dev->udc_usage;
1486				if (value < 0) {
1487					clean_req (gadget->ep0, dev->req);
1488					break;
1489				}
1490
1491				/* we can't currently stall these */
1492				dev->setup_can_stall = 0;
1493			}
1494
1495			/* state changes when reader collects event */
1496			event = next_event (dev, GADGETFS_SETUP);
1497			event->u.setup = *ctrl;
1498			ep0_readable (dev);
1499			spin_unlock (&dev->lock);
1500			return 0;
1501		}
1502	}
1503
1504	/* proceed with data transfer and status phases? */
1505	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1506		req->length = value;
1507		req->zero = value < w_length;
1508
1509		++dev->udc_usage;
1510		spin_unlock (&dev->lock);
1511		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1512		spin_lock(&dev->lock);
1513		--dev->udc_usage;
1514		spin_unlock(&dev->lock);
1515		if (value < 0) {
1516			DBG (dev, "ep_queue --> %d\n", value);
1517			req->status = 0;
1518		}
1519		return value;
1520	}
1521
1522	/* device stalls when value < 0 */
1523	spin_unlock (&dev->lock);
1524	return value;
1525}
1526
1527static void destroy_ep_files (struct dev_data *dev)
1528{
1529	DBG (dev, "%s %d\n", __func__, dev->state);
1530
1531	/* dev->state must prevent interference */
1532	spin_lock_irq (&dev->lock);
1533	while (!list_empty(&dev->epfiles)) {
1534		struct ep_data	*ep;
1535		struct inode	*parent;
1536		struct dentry	*dentry;
1537
1538		/* break link to FS */
1539		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1540		list_del_init (&ep->epfiles);
1541		spin_unlock_irq (&dev->lock);
1542
1543		dentry = ep->dentry;
1544		ep->dentry = NULL;
1545		parent = d_inode(dentry->d_parent);
1546
1547		/* break link to controller */
1548		mutex_lock(&ep->lock);
1549		if (ep->state == STATE_EP_ENABLED)
1550			(void) usb_ep_disable (ep->ep);
1551		ep->state = STATE_EP_UNBOUND;
1552		usb_ep_free_request (ep->ep, ep->req);
1553		ep->ep = NULL;
1554		mutex_unlock(&ep->lock);
1555
1556		wake_up (&ep->wait);
1557		put_ep (ep);
1558
1559		/* break link to dcache */
1560		inode_lock(parent);
1561		d_delete (dentry);
1562		dput (dentry);
1563		inode_unlock(parent);
1564
1565		spin_lock_irq (&dev->lock);
1566	}
1567	spin_unlock_irq (&dev->lock);
1568}
1569
1570
1571static struct dentry *
1572gadgetfs_create_file (struct super_block *sb, char const *name,
1573		void *data, const struct file_operations *fops);
1574
1575static int activate_ep_files (struct dev_data *dev)
1576{
1577	struct usb_ep	*ep;
1578	struct ep_data	*data;
1579
1580	gadget_for_each_ep (ep, dev->gadget) {
1581
1582		data = kzalloc(sizeof(*data), GFP_KERNEL);
1583		if (!data)
1584			goto enomem0;
1585		data->state = STATE_EP_DISABLED;
1586		mutex_init(&data->lock);
1587		init_waitqueue_head (&data->wait);
1588
1589		strncpy (data->name, ep->name, sizeof (data->name) - 1);
1590		refcount_set (&data->count, 1);
1591		data->dev = dev;
1592		get_dev (dev);
1593
1594		data->ep = ep;
1595		ep->driver_data = data;
1596
1597		data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1598		if (!data->req)
1599			goto enomem1;
1600
1601		data->dentry = gadgetfs_create_file (dev->sb, data->name,
1602				data, &ep_io_operations);
1603		if (!data->dentry)
1604			goto enomem2;
1605		list_add_tail (&data->epfiles, &dev->epfiles);
1606	}
1607	return 0;
1608
1609enomem2:
1610	usb_ep_free_request (ep, data->req);
1611enomem1:
1612	put_dev (dev);
1613	kfree (data);
1614enomem0:
1615	DBG (dev, "%s enomem\n", __func__);
1616	destroy_ep_files (dev);
1617	return -ENOMEM;
1618}
1619
1620static void
1621gadgetfs_unbind (struct usb_gadget *gadget)
1622{
1623	struct dev_data		*dev = get_gadget_data (gadget);
1624
1625	DBG (dev, "%s\n", __func__);
1626
1627	spin_lock_irq (&dev->lock);
1628	dev->state = STATE_DEV_UNBOUND;
1629	while (dev->udc_usage > 0) {
1630		spin_unlock_irq(&dev->lock);
1631		usleep_range(1000, 2000);
1632		spin_lock_irq(&dev->lock);
1633	}
1634	spin_unlock_irq (&dev->lock);
1635
1636	destroy_ep_files (dev);
1637	gadget->ep0->driver_data = NULL;
1638	set_gadget_data (gadget, NULL);
1639
1640	/* we've already been disconnected ... no i/o is active */
1641	if (dev->req)
1642		usb_ep_free_request (gadget->ep0, dev->req);
1643	DBG (dev, "%s done\n", __func__);
1644	put_dev (dev);
1645}
1646
1647static struct dev_data		*the_device;
1648
1649static int gadgetfs_bind(struct usb_gadget *gadget,
1650		struct usb_gadget_driver *driver)
1651{
1652	struct dev_data		*dev = the_device;
1653
1654	if (!dev)
1655		return -ESRCH;
1656	if (0 != strcmp (CHIP, gadget->name)) {
1657		pr_err("%s expected %s controller not %s\n",
1658			shortname, CHIP, gadget->name);
1659		return -ENODEV;
1660	}
1661
1662	set_gadget_data (gadget, dev);
1663	dev->gadget = gadget;
1664	gadget->ep0->driver_data = dev;
1665
1666	/* preallocate control response and buffer */
1667	dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1668	if (!dev->req)
1669		goto enomem;
1670	dev->req->context = NULL;
1671	dev->req->complete = epio_complete;
1672
1673	if (activate_ep_files (dev) < 0)
1674		goto enomem;
1675
1676	INFO (dev, "bound to %s driver\n", gadget->name);
1677	spin_lock_irq(&dev->lock);
1678	dev->state = STATE_DEV_UNCONNECTED;
1679	spin_unlock_irq(&dev->lock);
1680	get_dev (dev);
1681	return 0;
1682
1683enomem:
1684	gadgetfs_unbind (gadget);
1685	return -ENOMEM;
1686}
1687
1688static void
1689gadgetfs_disconnect (struct usb_gadget *gadget)
1690{
1691	struct dev_data		*dev = get_gadget_data (gadget);
1692	unsigned long		flags;
1693
1694	spin_lock_irqsave (&dev->lock, flags);
1695	if (dev->state == STATE_DEV_UNCONNECTED)
1696		goto exit;
1697	dev->state = STATE_DEV_UNCONNECTED;
1698
1699	INFO (dev, "disconnected\n");
1700	next_event (dev, GADGETFS_DISCONNECT);
1701	ep0_readable (dev);
1702exit:
1703	spin_unlock_irqrestore (&dev->lock, flags);
1704}
1705
1706static void
1707gadgetfs_suspend (struct usb_gadget *gadget)
1708{
1709	struct dev_data		*dev = get_gadget_data (gadget);
1710	unsigned long		flags;
1711
1712	INFO (dev, "suspended from state %d\n", dev->state);
1713	spin_lock_irqsave(&dev->lock, flags);
1714	switch (dev->state) {
1715	case STATE_DEV_SETUP:		// VERY odd... host died??
1716	case STATE_DEV_CONNECTED:
1717	case STATE_DEV_UNCONNECTED:
1718		next_event (dev, GADGETFS_SUSPEND);
1719		ep0_readable (dev);
1720		fallthrough;
1721	default:
1722		break;
1723	}
1724	spin_unlock_irqrestore(&dev->lock, flags);
1725}
1726
1727static struct usb_gadget_driver gadgetfs_driver = {
1728	.function	= (char *) driver_desc,
1729	.bind		= gadgetfs_bind,
1730	.unbind		= gadgetfs_unbind,
1731	.setup		= gadgetfs_setup,
1732	.reset		= gadgetfs_disconnect,
1733	.disconnect	= gadgetfs_disconnect,
1734	.suspend	= gadgetfs_suspend,
1735
1736	.driver	= {
1737		.name		= shortname,
1738	},
1739};
1740
1741/*----------------------------------------------------------------------*/
1742/* DEVICE INITIALIZATION
1743 *
1744 *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
1745 *     status = write (fd, descriptors, sizeof descriptors)
1746 *
1747 * That write establishes the device configuration, so the kernel can
1748 * bind to the controller ... guaranteeing it can handle enumeration
1749 * at all necessary speeds.  Descriptor order is:
1750 *
1751 * . message tag (u32, host order) ... for now, must be zero; it
1752 *	would change to support features like multi-config devices
1753 * . full/low speed config ... all wTotalLength bytes (with interface,
1754 *	class, altsetting, endpoint, and other descriptors)
1755 * . high speed config ... all descriptors, for high speed operation;
1756 *	this one's optional except for high-speed hardware
1757 * . device descriptor
1758 *
1759 * Endpoints are not yet enabled. Drivers must wait until device
1760 * configuration and interface altsetting changes create
1761 * the need to configure (or unconfigure) them.
1762 *
1763 * After initialization, the device stays active for as long as that
1764 * $CHIP file is open.  Events must then be read from that descriptor,
1765 * such as configuration notifications.
1766 */
1767
1768static int is_valid_config(struct usb_config_descriptor *config,
1769		unsigned int total)
1770{
1771	return config->bDescriptorType == USB_DT_CONFIG
1772		&& config->bLength == USB_DT_CONFIG_SIZE
1773		&& total >= USB_DT_CONFIG_SIZE
1774		&& config->bConfigurationValue != 0
1775		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1776		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1777	/* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1778	/* FIXME check lengths: walk to end */
1779}
1780
1781static ssize_t
1782dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1783{
1784	struct dev_data		*dev = fd->private_data;
1785	ssize_t			value, length = len;
1786	unsigned		total;
1787	u32			tag;
1788	char			*kbuf;
1789
1790	spin_lock_irq(&dev->lock);
1791	if (dev->state > STATE_DEV_OPENED) {
1792		value = ep0_write(fd, buf, len, ptr);
1793		spin_unlock_irq(&dev->lock);
1794		return value;
1795	}
1796	spin_unlock_irq(&dev->lock);
1797
1798	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1799	    (len > PAGE_SIZE * 4))
1800		return -EINVAL;
1801
1802	/* we might need to change message format someday */
1803	if (copy_from_user (&tag, buf, 4))
1804		return -EFAULT;
1805	if (tag != 0)
1806		return -EINVAL;
1807	buf += 4;
1808	length -= 4;
1809
1810	kbuf = memdup_user(buf, length);
1811	if (IS_ERR(kbuf))
1812		return PTR_ERR(kbuf);
1813
1814	spin_lock_irq (&dev->lock);
1815	value = -EINVAL;
1816	if (dev->buf) {
 
1817		kfree(kbuf);
1818		goto fail;
1819	}
1820	dev->buf = kbuf;
1821
1822	/* full or low speed config */
1823	dev->config = (void *) kbuf;
1824	total = le16_to_cpu(dev->config->wTotalLength);
1825	if (!is_valid_config(dev->config, total) ||
1826			total > length - USB_DT_DEVICE_SIZE)
1827		goto fail;
1828	kbuf += total;
1829	length -= total;
1830
1831	/* optional high speed config */
1832	if (kbuf [1] == USB_DT_CONFIG) {
1833		dev->hs_config = (void *) kbuf;
1834		total = le16_to_cpu(dev->hs_config->wTotalLength);
1835		if (!is_valid_config(dev->hs_config, total) ||
1836				total > length - USB_DT_DEVICE_SIZE)
1837			goto fail;
1838		kbuf += total;
1839		length -= total;
1840	} else {
1841		dev->hs_config = NULL;
1842	}
1843
1844	/* could support multiple configs, using another encoding! */
1845
1846	/* device descriptor (tweaked for paranoia) */
1847	if (length != USB_DT_DEVICE_SIZE)
1848		goto fail;
1849	dev->dev = (void *)kbuf;
1850	if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1851			|| dev->dev->bDescriptorType != USB_DT_DEVICE
1852			|| dev->dev->bNumConfigurations != 1)
1853		goto fail;
1854	dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1855
1856	/* triggers gadgetfs_bind(); then we can enumerate. */
1857	spin_unlock_irq (&dev->lock);
1858	if (dev->hs_config)
1859		gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1860	else
1861		gadgetfs_driver.max_speed = USB_SPEED_FULL;
1862
1863	value = usb_gadget_probe_driver(&gadgetfs_driver);
1864	if (value != 0) {
1865		kfree (dev->buf);
1866		dev->buf = NULL;
1867	} else {
1868		/* at this point "good" hardware has for the first time
1869		 * let the USB the host see us.  alternatively, if users
1870		 * unplug/replug that will clear all the error state.
1871		 *
1872		 * note:  everything running before here was guaranteed
1873		 * to choke driver model style diagnostics.  from here
1874		 * on, they can work ... except in cleanup paths that
1875		 * kick in after the ep0 descriptor is closed.
1876		 */
1877		value = len;
1878		dev->gadget_registered = true;
1879	}
1880	return value;
1881
1882fail:
 
 
 
1883	spin_unlock_irq (&dev->lock);
1884	pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1885	kfree (dev->buf);
1886	dev->buf = NULL;
1887	return value;
1888}
1889
1890static int
1891dev_open (struct inode *inode, struct file *fd)
1892{
1893	struct dev_data		*dev = inode->i_private;
1894	int			value = -EBUSY;
1895
1896	spin_lock_irq(&dev->lock);
1897	if (dev->state == STATE_DEV_DISABLED) {
1898		dev->ev_next = 0;
1899		dev->state = STATE_DEV_OPENED;
1900		fd->private_data = dev;
1901		get_dev (dev);
1902		value = 0;
1903	}
1904	spin_unlock_irq(&dev->lock);
1905	return value;
1906}
1907
1908static const struct file_operations ep0_operations = {
1909	.llseek =	no_llseek,
1910
1911	.open =		dev_open,
1912	.read =		ep0_read,
1913	.write =	dev_config,
1914	.fasync =	ep0_fasync,
1915	.poll =		ep0_poll,
1916	.unlocked_ioctl = dev_ioctl,
1917	.release =	dev_release,
1918};
1919
1920/*----------------------------------------------------------------------*/
1921
1922/* FILESYSTEM AND SUPERBLOCK OPERATIONS
1923 *
1924 * Mounting the filesystem creates a controller file, used first for
1925 * device configuration then later for event monitoring.
1926 */
1927
1928
1929/* FIXME PAM etc could set this security policy without mount options
1930 * if epfiles inherited ownership and permissons from ep0 ...
1931 */
1932
1933static unsigned default_uid;
1934static unsigned default_gid;
1935static unsigned default_perm = S_IRUSR | S_IWUSR;
1936
1937module_param (default_uid, uint, 0644);
1938module_param (default_gid, uint, 0644);
1939module_param (default_perm, uint, 0644);
1940
1941
1942static struct inode *
1943gadgetfs_make_inode (struct super_block *sb,
1944		void *data, const struct file_operations *fops,
1945		int mode)
1946{
1947	struct inode *inode = new_inode (sb);
1948
1949	if (inode) {
1950		inode->i_ino = get_next_ino();
1951		inode->i_mode = mode;
1952		inode->i_uid = make_kuid(&init_user_ns, default_uid);
1953		inode->i_gid = make_kgid(&init_user_ns, default_gid);
1954		inode->i_atime = inode->i_mtime = inode->i_ctime
1955				= current_time(inode);
1956		inode->i_private = data;
1957		inode->i_fop = fops;
1958	}
1959	return inode;
1960}
1961
1962/* creates in fs root directory, so non-renamable and non-linkable.
1963 * so inode and dentry are paired, until device reconfig.
1964 */
1965static struct dentry *
1966gadgetfs_create_file (struct super_block *sb, char const *name,
1967		void *data, const struct file_operations *fops)
1968{
1969	struct dentry	*dentry;
1970	struct inode	*inode;
1971
1972	dentry = d_alloc_name(sb->s_root, name);
1973	if (!dentry)
1974		return NULL;
1975
1976	inode = gadgetfs_make_inode (sb, data, fops,
1977			S_IFREG | (default_perm & S_IRWXUGO));
1978	if (!inode) {
1979		dput(dentry);
1980		return NULL;
1981	}
1982	d_add (dentry, inode);
1983	return dentry;
1984}
1985
1986static const struct super_operations gadget_fs_operations = {
1987	.statfs =	simple_statfs,
1988	.drop_inode =	generic_delete_inode,
1989};
1990
1991static int
1992gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
1993{
1994	struct inode	*inode;
1995	struct dev_data	*dev;
 
 
 
1996
1997	if (the_device)
1998		return -ESRCH;
 
 
1999
2000	CHIP = usb_get_gadget_udc_name();
2001	if (!CHIP)
2002		return -ENODEV;
 
 
2003
2004	/* superblock */
2005	sb->s_blocksize = PAGE_SIZE;
2006	sb->s_blocksize_bits = PAGE_SHIFT;
2007	sb->s_magic = GADGETFS_MAGIC;
2008	sb->s_op = &gadget_fs_operations;
2009	sb->s_time_gran = 1;
2010
2011	/* root inode */
2012	inode = gadgetfs_make_inode (sb,
2013			NULL, &simple_dir_operations,
2014			S_IFDIR | S_IRUGO | S_IXUGO);
2015	if (!inode)
2016		goto Enomem;
2017	inode->i_op = &simple_dir_inode_operations;
2018	if (!(sb->s_root = d_make_root (inode)))
2019		goto Enomem;
2020
2021	/* the ep0 file is named after the controller we expect;
2022	 * user mode code can use it for sanity checks, like we do.
2023	 */
2024	dev = dev_new ();
2025	if (!dev)
2026		goto Enomem;
2027
2028	dev->sb = sb;
2029	dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2030	if (!dev->dentry) {
2031		put_dev(dev);
2032		goto Enomem;
2033	}
2034
2035	/* other endpoint files are available after hardware setup,
2036	 * from binding to a controller.
2037	 */
2038	the_device = dev;
2039	return 0;
 
 
 
 
 
 
2040
2041Enomem:
2042	return -ENOMEM;
 
2043}
2044
2045/* "mount -t gadgetfs path /dev/gadget" ends up here */
2046static int gadgetfs_get_tree(struct fs_context *fc)
2047{
2048	return get_tree_single(fc, gadgetfs_fill_super);
2049}
2050
2051static const struct fs_context_operations gadgetfs_context_ops = {
2052	.get_tree	= gadgetfs_get_tree,
2053};
2054
2055static int gadgetfs_init_fs_context(struct fs_context *fc)
2056{
2057	fc->ops = &gadgetfs_context_ops;
2058	return 0;
2059}
2060
2061static void
2062gadgetfs_kill_sb (struct super_block *sb)
2063{
 
2064	kill_litter_super (sb);
2065	if (the_device) {
2066		put_dev (the_device);
2067		the_device = NULL;
2068	}
2069	kfree(CHIP);
2070	CHIP = NULL;
 
2071}
2072
2073/*----------------------------------------------------------------------*/
2074
2075static struct file_system_type gadgetfs_type = {
2076	.owner		= THIS_MODULE,
2077	.name		= shortname,
2078	.init_fs_context = gadgetfs_init_fs_context,
2079	.kill_sb	= gadgetfs_kill_sb,
2080};
2081MODULE_ALIAS_FS("gadgetfs");
2082
2083/*----------------------------------------------------------------------*/
2084
2085static int __init init (void)
2086{
2087	int status;
2088
2089	status = register_filesystem (&gadgetfs_type);
2090	if (status == 0)
2091		pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2092			shortname, driver_desc);
2093	return status;
2094}
2095module_init (init);
2096
2097static void __exit cleanup (void)
2098{
2099	pr_debug ("unregister %s\n", shortname);
2100	unregister_filesystem (&gadgetfs_type);
2101}
2102module_exit (cleanup);
2103