Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
   4 *
   5 * Copyright (C) 2003-2005,2008 David Brownell
   6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
   7 * Copyright (C) 2008 Nokia Corporation
   8 */
   9
  10/* #define VERBOSE_DEBUG */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/gfp.h>
  15#include <linux/device.h>
  16#include <linux/ctype.h>
  17#include <linux/etherdevice.h>
  18#include <linux/ethtool.h>
  19#include <linux/if_vlan.h>
  20
  21#include "u_ether.h"
  22
  23
  24/*
  25 * This component encapsulates the Ethernet link glue needed to provide
  26 * one (!) network link through the USB gadget stack, normally "usb0".
  27 *
  28 * The control and data models are handled by the function driver which
  29 * connects to this code; such as CDC Ethernet (ECM or EEM),
  30 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
  31 * management.
  32 *
  33 * Link level addressing is handled by this component using module
  34 * parameters; if no such parameters are provided, random link level
  35 * addresses are used.  Each end of the link uses one address.  The
  36 * host end address is exported in various ways, and is often recorded
  37 * in configuration databases.
  38 *
  39 * The driver which assembles each configuration using such a link is
  40 * responsible for ensuring that each configuration includes at most one
  41 * instance of is network link.  (The network layer provides ways for
  42 * this single "physical" link to be used by multiple virtual links.)
  43 */
  44
  45#define UETH__VERSION	"29-May-2008"
  46
  47/* Experiments show that both Linux and Windows hosts allow up to 16k
  48 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  49 * blocks and still have efficient handling. */
  50#define GETHER_MAX_MTU_SIZE 15412
  51#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
  52
  53struct eth_dev {
  54	/* lock is held while accessing port_usb
  55	 */
  56	spinlock_t		lock;
  57	struct gether		*port_usb;
  58
  59	struct net_device	*net;
  60	struct usb_gadget	*gadget;
  61
  62	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
  63	struct list_head	tx_reqs, rx_reqs;
  64	atomic_t		tx_qlen;
  65
  66	struct sk_buff_head	rx_frames;
  67
  68	unsigned		qmult;
  69
  70	unsigned		header_len;
  71	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
  72	int			(*unwrap)(struct gether *,
  73						struct sk_buff *skb,
  74						struct sk_buff_head *list);
  75
  76	struct work_struct	work;
  77
  78	unsigned long		todo;
  79#define	WORK_RX_MEMORY		0
  80
  81	bool			zlp;
  82	bool			no_skb_reserve;
  83	bool			ifname_set;
  84	u8			host_mac[ETH_ALEN];
  85	u8			dev_mac[ETH_ALEN];
  86};
  87
  88/*-------------------------------------------------------------------------*/
  89
  90#define RX_EXTRA	20	/* bytes guarding against rx overflows */
  91
  92#define DEFAULT_QLEN	2	/* double buffering by default */
  93
  94/* for dual-speed hardware, use deeper queues at high/super speed */
  95static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
  96{
  97	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
  98					    gadget->speed >= USB_SPEED_SUPER))
  99		return qmult * DEFAULT_QLEN;
 100	else
 101		return DEFAULT_QLEN;
 102}
 103
 104/*-------------------------------------------------------------------------*/
 105
 106/* REVISIT there must be a better way than having two sets
 107 * of debug calls ...
 108 */
 109
 110#undef DBG
 111#undef VDBG
 112#undef ERROR
 113#undef INFO
 114
 115#define xprintk(d, level, fmt, args...) \
 116	printk(level "%s: " fmt , (d)->net->name , ## args)
 117
 118#ifdef DEBUG
 119#undef DEBUG
 120#define DBG(dev, fmt, args...) \
 121	xprintk(dev , KERN_DEBUG , fmt , ## args)
 122#else
 123#define DBG(dev, fmt, args...) \
 124	do { } while (0)
 125#endif /* DEBUG */
 126
 127#ifdef VERBOSE_DEBUG
 128#define VDBG	DBG
 129#else
 130#define VDBG(dev, fmt, args...) \
 131	do { } while (0)
 132#endif /* DEBUG */
 133
 134#define ERROR(dev, fmt, args...) \
 135	xprintk(dev , KERN_ERR , fmt , ## args)
 136#define INFO(dev, fmt, args...) \
 137	xprintk(dev , KERN_INFO , fmt , ## args)
 138
 139/*-------------------------------------------------------------------------*/
 140
 141/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 142
 143static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
 144{
 145	struct eth_dev *dev = netdev_priv(net);
 146
 147	strlcpy(p->driver, "g_ether", sizeof(p->driver));
 148	strlcpy(p->version, UETH__VERSION, sizeof(p->version));
 149	strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
 150	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
 151}
 152
 153/* REVISIT can also support:
 154 *   - WOL (by tracking suspends and issuing remote wakeup)
 155 *   - msglevel (implies updated messaging)
 156 *   - ... probably more ethtool ops
 157 */
 158
 159static const struct ethtool_ops ops = {
 160	.get_drvinfo = eth_get_drvinfo,
 161	.get_link = ethtool_op_get_link,
 162};
 163
 164static void defer_kevent(struct eth_dev *dev, int flag)
 165{
 166	if (test_and_set_bit(flag, &dev->todo))
 167		return;
 168	if (!schedule_work(&dev->work))
 169		ERROR(dev, "kevent %d may have been dropped\n", flag);
 170	else
 171		DBG(dev, "kevent %d scheduled\n", flag);
 172}
 173
 174static void rx_complete(struct usb_ep *ep, struct usb_request *req);
 175
 176static int
 177rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 178{
 179	struct usb_gadget *g = dev->gadget;
 180	struct sk_buff	*skb;
 181	int		retval = -ENOMEM;
 182	size_t		size = 0;
 183	struct usb_ep	*out;
 184	unsigned long	flags;
 185
 186	spin_lock_irqsave(&dev->lock, flags);
 187	if (dev->port_usb)
 188		out = dev->port_usb->out_ep;
 189	else
 190		out = NULL;
 191
 192	if (!out)
 193	{
 194		spin_unlock_irqrestore(&dev->lock, flags);
 195		return -ENOTCONN;
 196	}
 197
 198	/* Padding up to RX_EXTRA handles minor disagreements with host.
 199	 * Normally we use the USB "terminate on short read" convention;
 200	 * so allow up to (N*maxpacket), since that memory is normally
 201	 * already allocated.  Some hardware doesn't deal well with short
 202	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
 203	 * byte off the end (to force hardware errors on overflow).
 204	 *
 205	 * RNDIS uses internal framing, and explicitly allows senders to
 206	 * pad to end-of-packet.  That's potentially nice for speed, but
 207	 * means receivers can't recover lost synch on their own (because
 208	 * new packets don't only start after a short RX).
 209	 */
 210	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
 211	size += dev->port_usb->header_len;
 212
 213	if (g->quirk_ep_out_aligned_size) {
 214		size += out->maxpacket - 1;
 215		size -= size % out->maxpacket;
 216	}
 217
 218	if (dev->port_usb->is_fixed)
 219		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 220	spin_unlock_irqrestore(&dev->lock, flags);
 221
 222	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
 223	if (skb == NULL) {
 224		DBG(dev, "no rx skb\n");
 225		goto enomem;
 226	}
 227
 228	/* Some platforms perform better when IP packets are aligned,
 229	 * but on at least one, checksumming fails otherwise.  Note:
 230	 * RNDIS headers involve variable numbers of LE32 values.
 231	 */
 232	if (likely(!dev->no_skb_reserve))
 233		skb_reserve(skb, NET_IP_ALIGN);
 234
 235	req->buf = skb->data;
 236	req->length = size;
 237	req->complete = rx_complete;
 238	req->context = skb;
 239
 240	retval = usb_ep_queue(out, req, gfp_flags);
 241	if (retval == -ENOMEM)
 242enomem:
 243		defer_kevent(dev, WORK_RX_MEMORY);
 244	if (retval) {
 245		DBG(dev, "rx submit --> %d\n", retval);
 246		if (skb)
 247			dev_kfree_skb_any(skb);
 248		spin_lock_irqsave(&dev->req_lock, flags);
 249		list_add(&req->list, &dev->rx_reqs);
 250		spin_unlock_irqrestore(&dev->req_lock, flags);
 251	}
 252	return retval;
 253}
 254
 255static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 256{
 257	struct sk_buff	*skb = req->context, *skb2;
 258	struct eth_dev	*dev = ep->driver_data;
 259	int		status = req->status;
 260
 261	switch (status) {
 262
 263	/* normal completion */
 264	case 0:
 265		skb_put(skb, req->actual);
 266
 267		if (dev->unwrap) {
 268			unsigned long	flags;
 269
 270			spin_lock_irqsave(&dev->lock, flags);
 271			if (dev->port_usb) {
 272				status = dev->unwrap(dev->port_usb,
 273							skb,
 274							&dev->rx_frames);
 275			} else {
 276				dev_kfree_skb_any(skb);
 277				status = -ENOTCONN;
 278			}
 279			spin_unlock_irqrestore(&dev->lock, flags);
 280		} else {
 281			skb_queue_tail(&dev->rx_frames, skb);
 282		}
 283		skb = NULL;
 284
 285		skb2 = skb_dequeue(&dev->rx_frames);
 286		while (skb2) {
 287			if (status < 0
 288					|| ETH_HLEN > skb2->len
 289					|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
 290				dev->net->stats.rx_errors++;
 291				dev->net->stats.rx_length_errors++;
 292				DBG(dev, "rx length %d\n", skb2->len);
 293				dev_kfree_skb_any(skb2);
 294				goto next_frame;
 295			}
 296			skb2->protocol = eth_type_trans(skb2, dev->net);
 297			dev->net->stats.rx_packets++;
 298			dev->net->stats.rx_bytes += skb2->len;
 299
 300			/* no buffer copies needed, unless hardware can't
 301			 * use skb buffers.
 302			 */
 303			status = netif_rx(skb2);
 304next_frame:
 305			skb2 = skb_dequeue(&dev->rx_frames);
 306		}
 307		break;
 308
 309	/* software-driven interface shutdown */
 310	case -ECONNRESET:		/* unlink */
 311	case -ESHUTDOWN:		/* disconnect etc */
 312		VDBG(dev, "rx shutdown, code %d\n", status);
 313		goto quiesce;
 314
 315	/* for hardware automagic (such as pxa) */
 316	case -ECONNABORTED:		/* endpoint reset */
 317		DBG(dev, "rx %s reset\n", ep->name);
 318		defer_kevent(dev, WORK_RX_MEMORY);
 319quiesce:
 320		dev_kfree_skb_any(skb);
 321		goto clean;
 322
 323	/* data overrun */
 324	case -EOVERFLOW:
 325		dev->net->stats.rx_over_errors++;
 326		fallthrough;
 327
 328	default:
 329		dev->net->stats.rx_errors++;
 330		DBG(dev, "rx status %d\n", status);
 331		break;
 332	}
 333
 334	if (skb)
 335		dev_kfree_skb_any(skb);
 336	if (!netif_running(dev->net)) {
 337clean:
 338		spin_lock(&dev->req_lock);
 339		list_add(&req->list, &dev->rx_reqs);
 340		spin_unlock(&dev->req_lock);
 341		req = NULL;
 342	}
 343	if (req)
 344		rx_submit(dev, req, GFP_ATOMIC);
 345}
 346
 347static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 348{
 349	unsigned		i;
 350	struct usb_request	*req;
 351
 352	if (!n)
 353		return -ENOMEM;
 354
 355	/* queue/recycle up to N requests */
 356	i = n;
 357	list_for_each_entry(req, list, list) {
 358		if (i-- == 0)
 359			goto extra;
 360	}
 361	while (i--) {
 362		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
 363		if (!req)
 364			return list_empty(list) ? -ENOMEM : 0;
 365		list_add(&req->list, list);
 366	}
 367	return 0;
 368
 369extra:
 370	/* free extras */
 371	for (;;) {
 372		struct list_head	*next;
 373
 374		next = req->list.next;
 375		list_del(&req->list);
 376		usb_ep_free_request(ep, req);
 377
 378		if (next == list)
 379			break;
 380
 381		req = container_of(next, struct usb_request, list);
 382	}
 383	return 0;
 384}
 385
 386static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 387{
 388	int	status;
 389
 390	spin_lock(&dev->req_lock);
 391	status = prealloc(&dev->tx_reqs, link->in_ep, n);
 392	if (status < 0)
 393		goto fail;
 394	status = prealloc(&dev->rx_reqs, link->out_ep, n);
 395	if (status < 0)
 396		goto fail;
 397	goto done;
 398fail:
 399	DBG(dev, "can't alloc requests\n");
 400done:
 401	spin_unlock(&dev->req_lock);
 402	return status;
 403}
 404
 405static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 406{
 407	struct usb_request	*req;
 408	unsigned long		flags;
 409
 410	/* fill unused rxq slots with some skb */
 411	spin_lock_irqsave(&dev->req_lock, flags);
 412	while (!list_empty(&dev->rx_reqs)) {
 413		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
 414		list_del_init(&req->list);
 415		spin_unlock_irqrestore(&dev->req_lock, flags);
 416
 417		if (rx_submit(dev, req, gfp_flags) < 0) {
 418			defer_kevent(dev, WORK_RX_MEMORY);
 419			return;
 420		}
 421
 422		spin_lock_irqsave(&dev->req_lock, flags);
 423	}
 424	spin_unlock_irqrestore(&dev->req_lock, flags);
 425}
 426
 427static void eth_work(struct work_struct *work)
 428{
 429	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
 430
 431	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
 432		if (netif_running(dev->net))
 433			rx_fill(dev, GFP_KERNEL);
 434	}
 435
 436	if (dev->todo)
 437		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
 438}
 439
 440static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 441{
 442	struct sk_buff	*skb = req->context;
 443	struct eth_dev	*dev = ep->driver_data;
 444
 445	switch (req->status) {
 446	default:
 447		dev->net->stats.tx_errors++;
 448		VDBG(dev, "tx err %d\n", req->status);
 449		fallthrough;
 450	case -ECONNRESET:		/* unlink */
 451	case -ESHUTDOWN:		/* disconnect etc */
 452		dev_kfree_skb_any(skb);
 453		break;
 454	case 0:
 455		dev->net->stats.tx_bytes += skb->len;
 456		dev_consume_skb_any(skb);
 457	}
 458	dev->net->stats.tx_packets++;
 459
 460	spin_lock(&dev->req_lock);
 461	list_add(&req->list, &dev->tx_reqs);
 462	spin_unlock(&dev->req_lock);
 463
 464	atomic_dec(&dev->tx_qlen);
 465	if (netif_carrier_ok(dev->net))
 466		netif_wake_queue(dev->net);
 467}
 468
 469static inline int is_promisc(u16 cdc_filter)
 470{
 471	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 472}
 473
 474static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 475					struct net_device *net)
 476{
 477	struct eth_dev		*dev = netdev_priv(net);
 478	int			length = 0;
 479	int			retval;
 480	struct usb_request	*req = NULL;
 481	unsigned long		flags;
 482	struct usb_ep		*in;
 483	u16			cdc_filter;
 484
 485	spin_lock_irqsave(&dev->lock, flags);
 486	if (dev->port_usb) {
 487		in = dev->port_usb->in_ep;
 488		cdc_filter = dev->port_usb->cdc_filter;
 489	} else {
 490		in = NULL;
 491		cdc_filter = 0;
 492	}
 493	spin_unlock_irqrestore(&dev->lock, flags);
 494
 495	if (!in) {
 496		if (skb)
 497			dev_kfree_skb_any(skb);
 498		return NETDEV_TX_OK;
 499	}
 500
 501	/* apply outgoing CDC or RNDIS filters */
 502	if (skb && !is_promisc(cdc_filter)) {
 503		u8		*dest = skb->data;
 504
 505		if (is_multicast_ether_addr(dest)) {
 506			u16	type;
 507
 508			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
 509			 * SET_ETHERNET_MULTICAST_FILTERS requests
 510			 */
 511			if (is_broadcast_ether_addr(dest))
 512				type = USB_CDC_PACKET_TYPE_BROADCAST;
 513			else
 514				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
 515			if (!(cdc_filter & type)) {
 516				dev_kfree_skb_any(skb);
 517				return NETDEV_TX_OK;
 518			}
 519		}
 520		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
 521	}
 522
 523	spin_lock_irqsave(&dev->req_lock, flags);
 524	/*
 525	 * this freelist can be empty if an interrupt triggered disconnect()
 526	 * and reconfigured the gadget (shutting down this queue) after the
 527	 * network stack decided to xmit but before we got the spinlock.
 528	 */
 529	if (list_empty(&dev->tx_reqs)) {
 530		spin_unlock_irqrestore(&dev->req_lock, flags);
 531		return NETDEV_TX_BUSY;
 532	}
 533
 534	req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
 535	list_del(&req->list);
 536
 537	/* temporarily stop TX queue when the freelist empties */
 538	if (list_empty(&dev->tx_reqs))
 539		netif_stop_queue(net);
 540	spin_unlock_irqrestore(&dev->req_lock, flags);
 541
 542	/* no buffer copies needed, unless the network stack did it
 543	 * or the hardware can't use skb buffers.
 544	 * or there's not enough space for extra headers we need
 545	 */
 546	if (dev->wrap) {
 547		unsigned long	flags;
 548
 549		spin_lock_irqsave(&dev->lock, flags);
 550		if (dev->port_usb)
 551			skb = dev->wrap(dev->port_usb, skb);
 552		spin_unlock_irqrestore(&dev->lock, flags);
 553		if (!skb) {
 554			/* Multi frame CDC protocols may store the frame for
 555			 * later which is not a dropped frame.
 556			 */
 557			if (dev->port_usb &&
 558					dev->port_usb->supports_multi_frame)
 559				goto multiframe;
 560			goto drop;
 561		}
 562	}
 563
 564	length = skb->len;
 565	req->buf = skb->data;
 566	req->context = skb;
 567	req->complete = tx_complete;
 568
 569	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
 570	if (dev->port_usb &&
 571	    dev->port_usb->is_fixed &&
 572	    length == dev->port_usb->fixed_in_len &&
 573	    (length % in->maxpacket) == 0)
 574		req->zero = 0;
 575	else
 576		req->zero = 1;
 577
 578	/* use zlp framing on tx for strict CDC-Ether conformance,
 579	 * though any robust network rx path ignores extra padding.
 580	 * and some hardware doesn't like to write zlps.
 581	 */
 582	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
 583		length++;
 584
 585	req->length = length;
 586
 587	retval = usb_ep_queue(in, req, GFP_ATOMIC);
 588	switch (retval) {
 589	default:
 590		DBG(dev, "tx queue err %d\n", retval);
 591		break;
 592	case 0:
 593		netif_trans_update(net);
 594		atomic_inc(&dev->tx_qlen);
 595	}
 596
 597	if (retval) {
 598		dev_kfree_skb_any(skb);
 599drop:
 600		dev->net->stats.tx_dropped++;
 601multiframe:
 602		spin_lock_irqsave(&dev->req_lock, flags);
 603		if (list_empty(&dev->tx_reqs))
 604			netif_start_queue(net);
 605		list_add(&req->list, &dev->tx_reqs);
 606		spin_unlock_irqrestore(&dev->req_lock, flags);
 607	}
 608	return NETDEV_TX_OK;
 609}
 610
 611/*-------------------------------------------------------------------------*/
 612
 613static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 614{
 615	DBG(dev, "%s\n", __func__);
 616
 617	/* fill the rx queue */
 618	rx_fill(dev, gfp_flags);
 619
 620	/* and open the tx floodgates */
 621	atomic_set(&dev->tx_qlen, 0);
 622	netif_wake_queue(dev->net);
 623}
 624
 625static int eth_open(struct net_device *net)
 626{
 627	struct eth_dev	*dev = netdev_priv(net);
 628	struct gether	*link;
 629
 630	DBG(dev, "%s\n", __func__);
 631	if (netif_carrier_ok(dev->net))
 632		eth_start(dev, GFP_KERNEL);
 633
 634	spin_lock_irq(&dev->lock);
 635	link = dev->port_usb;
 636	if (link && link->open)
 637		link->open(link);
 638	spin_unlock_irq(&dev->lock);
 639
 640	return 0;
 641}
 642
 643static int eth_stop(struct net_device *net)
 644{
 645	struct eth_dev	*dev = netdev_priv(net);
 646	unsigned long	flags;
 647
 648	VDBG(dev, "%s\n", __func__);
 649	netif_stop_queue(net);
 650
 651	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
 652		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
 653		dev->net->stats.rx_errors, dev->net->stats.tx_errors
 654		);
 655
 656	/* ensure there are no more active requests */
 657	spin_lock_irqsave(&dev->lock, flags);
 658	if (dev->port_usb) {
 659		struct gether	*link = dev->port_usb;
 660		const struct usb_endpoint_descriptor *in;
 661		const struct usb_endpoint_descriptor *out;
 662
 663		if (link->close)
 664			link->close(link);
 665
 666		/* NOTE:  we have no abort-queue primitive we could use
 667		 * to cancel all pending I/O.  Instead, we disable then
 668		 * reenable the endpoints ... this idiom may leave toggle
 669		 * wrong, but that's a self-correcting error.
 670		 *
 671		 * REVISIT:  we *COULD* just let the transfers complete at
 672		 * their own pace; the network stack can handle old packets.
 673		 * For the moment we leave this here, since it works.
 674		 */
 675		in = link->in_ep->desc;
 676		out = link->out_ep->desc;
 677		usb_ep_disable(link->in_ep);
 678		usb_ep_disable(link->out_ep);
 679		if (netif_carrier_ok(net)) {
 680			DBG(dev, "host still using in/out endpoints\n");
 681			link->in_ep->desc = in;
 682			link->out_ep->desc = out;
 683			usb_ep_enable(link->in_ep);
 684			usb_ep_enable(link->out_ep);
 685		}
 686	}
 687	spin_unlock_irqrestore(&dev->lock, flags);
 688
 689	return 0;
 690}
 691
 692/*-------------------------------------------------------------------------*/
 693
 694static int get_ether_addr(const char *str, u8 *dev_addr)
 695{
 696	if (str) {
 697		unsigned	i;
 698
 699		for (i = 0; i < 6; i++) {
 700			unsigned char num;
 701
 702			if ((*str == '.') || (*str == ':'))
 703				str++;
 704			num = hex_to_bin(*str++) << 4;
 705			num |= hex_to_bin(*str++);
 706			dev_addr [i] = num;
 707		}
 708		if (is_valid_ether_addr(dev_addr))
 709			return 0;
 710	}
 711	eth_random_addr(dev_addr);
 712	return 1;
 713}
 714
 715static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
 716{
 717	if (len < 18)
 718		return -EINVAL;
 719
 720	snprintf(str, len, "%pM", dev_addr);
 721	return 18;
 722}
 723
 724static const struct net_device_ops eth_netdev_ops = {
 725	.ndo_open		= eth_open,
 726	.ndo_stop		= eth_stop,
 727	.ndo_start_xmit		= eth_start_xmit,
 728	.ndo_set_mac_address 	= eth_mac_addr,
 729	.ndo_validate_addr	= eth_validate_addr,
 730};
 731
 732static struct device_type gadget_type = {
 733	.name	= "gadget",
 734};
 735
 736/*
 737 * gether_setup_name - initialize one ethernet-over-usb link
 738 * @g: gadget to associated with these links
 739 * @ethaddr: NULL, or a buffer in which the ethernet address of the
 740 *	host side of the link is recorded
 741 * @netname: name for network device (for example, "usb")
 742 * Context: may sleep
 743 *
 744 * This sets up the single network link that may be exported by a
 745 * gadget driver using this framework.  The link layer addresses are
 746 * set up using module parameters.
 747 *
 748 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
 749 */
 750struct eth_dev *gether_setup_name(struct usb_gadget *g,
 751		const char *dev_addr, const char *host_addr,
 752		u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
 753{
 754	struct eth_dev		*dev;
 755	struct net_device	*net;
 756	int			status;
 757
 758	net = alloc_etherdev(sizeof *dev);
 759	if (!net)
 760		return ERR_PTR(-ENOMEM);
 761
 762	dev = netdev_priv(net);
 763	spin_lock_init(&dev->lock);
 764	spin_lock_init(&dev->req_lock);
 765	INIT_WORK(&dev->work, eth_work);
 766	INIT_LIST_HEAD(&dev->tx_reqs);
 767	INIT_LIST_HEAD(&dev->rx_reqs);
 768
 769	skb_queue_head_init(&dev->rx_frames);
 770
 771	/* network device setup */
 772	dev->net = net;
 773	dev->qmult = qmult;
 774	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 775
 776	if (get_ether_addr(dev_addr, net->dev_addr))
 777		dev_warn(&g->dev,
 778			"using random %s ethernet address\n", "self");
 779	if (get_ether_addr(host_addr, dev->host_mac))
 780		dev_warn(&g->dev,
 781			"using random %s ethernet address\n", "host");
 782
 783	if (ethaddr)
 784		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
 785
 786	net->netdev_ops = &eth_netdev_ops;
 787
 788	net->ethtool_ops = &ops;
 789
 790	/* MTU range: 14 - 15412 */
 791	net->min_mtu = ETH_HLEN;
 792	net->max_mtu = GETHER_MAX_MTU_SIZE;
 793
 794	dev->gadget = g;
 795	SET_NETDEV_DEV(net, &g->dev);
 796	SET_NETDEV_DEVTYPE(net, &gadget_type);
 797
 798	status = register_netdev(net);
 799	if (status < 0) {
 800		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 801		free_netdev(net);
 802		dev = ERR_PTR(status);
 803	} else {
 804		INFO(dev, "MAC %pM\n", net->dev_addr);
 805		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 806
 807		/*
 808		 * two kinds of host-initiated state changes:
 809		 *  - iff DATA transfer is active, carrier is "on"
 810		 *  - tx queueing enabled if open *and* carrier is "on"
 811		 */
 812		netif_carrier_off(net);
 813	}
 814
 815	return dev;
 816}
 817EXPORT_SYMBOL_GPL(gether_setup_name);
 818
 819struct net_device *gether_setup_name_default(const char *netname)
 820{
 821	struct net_device	*net;
 822	struct eth_dev		*dev;
 823
 824	net = alloc_etherdev(sizeof(*dev));
 825	if (!net)
 826		return ERR_PTR(-ENOMEM);
 827
 828	dev = netdev_priv(net);
 829	spin_lock_init(&dev->lock);
 830	spin_lock_init(&dev->req_lock);
 831	INIT_WORK(&dev->work, eth_work);
 832	INIT_LIST_HEAD(&dev->tx_reqs);
 833	INIT_LIST_HEAD(&dev->rx_reqs);
 834
 835	skb_queue_head_init(&dev->rx_frames);
 836
 837	/* network device setup */
 838	dev->net = net;
 839	dev->qmult = QMULT_DEFAULT;
 840	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
 841
 842	eth_random_addr(dev->dev_mac);
 843	pr_warn("using random %s ethernet address\n", "self");
 844	eth_random_addr(dev->host_mac);
 845	pr_warn("using random %s ethernet address\n", "host");
 846
 847	net->netdev_ops = &eth_netdev_ops;
 848
 849	net->ethtool_ops = &ops;
 850	SET_NETDEV_DEVTYPE(net, &gadget_type);
 851
 852	/* MTU range: 14 - 15412 */
 853	net->min_mtu = ETH_HLEN;
 854	net->max_mtu = GETHER_MAX_MTU_SIZE;
 855
 856	return net;
 857}
 858EXPORT_SYMBOL_GPL(gether_setup_name_default);
 859
 860int gether_register_netdev(struct net_device *net)
 861{
 862	struct eth_dev *dev;
 863	struct usb_gadget *g;
 864	struct sockaddr sa;
 865	int status;
 866
 867	if (!net->dev.parent)
 868		return -EINVAL;
 869	dev = netdev_priv(net);
 870	g = dev->gadget;
 871	status = register_netdev(net);
 872	if (status < 0) {
 873		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
 874		return status;
 875	} else {
 876		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 877
 878		/* two kinds of host-initiated state changes:
 879		 *  - iff DATA transfer is active, carrier is "on"
 880		 *  - tx queueing enabled if open *and* carrier is "on"
 881		 */
 882		netif_carrier_off(net);
 883	}
 884	sa.sa_family = net->type;
 885	memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
 886	rtnl_lock();
 887	status = dev_set_mac_address(net, &sa, NULL);
 888	rtnl_unlock();
 889	if (status)
 890		pr_warn("cannot set self ethernet address: %d\n", status);
 891	else
 892		INFO(dev, "MAC %pM\n", dev->dev_mac);
 893
 894	return status;
 895}
 896EXPORT_SYMBOL_GPL(gether_register_netdev);
 897
 898void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
 899{
 900	struct eth_dev *dev;
 901
 902	dev = netdev_priv(net);
 903	dev->gadget = g;
 904	SET_NETDEV_DEV(net, &g->dev);
 905}
 906EXPORT_SYMBOL_GPL(gether_set_gadget);
 907
 908int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
 909{
 910	struct eth_dev *dev;
 911	u8 new_addr[ETH_ALEN];
 912
 913	dev = netdev_priv(net);
 914	if (get_ether_addr(dev_addr, new_addr))
 915		return -EINVAL;
 916	memcpy(dev->dev_mac, new_addr, ETH_ALEN);
 917	return 0;
 918}
 919EXPORT_SYMBOL_GPL(gether_set_dev_addr);
 920
 921int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
 922{
 923	struct eth_dev *dev;
 924	int ret;
 925
 926	dev = netdev_priv(net);
 927	ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
 928	if (ret + 1 < len) {
 929		dev_addr[ret++] = '\n';
 930		dev_addr[ret] = '\0';
 931	}
 932
 933	return ret;
 934}
 935EXPORT_SYMBOL_GPL(gether_get_dev_addr);
 936
 937int gether_set_host_addr(struct net_device *net, const char *host_addr)
 938{
 939	struct eth_dev *dev;
 940	u8 new_addr[ETH_ALEN];
 941
 942	dev = netdev_priv(net);
 943	if (get_ether_addr(host_addr, new_addr))
 944		return -EINVAL;
 945	memcpy(dev->host_mac, new_addr, ETH_ALEN);
 946	return 0;
 947}
 948EXPORT_SYMBOL_GPL(gether_set_host_addr);
 949
 950int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
 951{
 952	struct eth_dev *dev;
 953	int ret;
 954
 955	dev = netdev_priv(net);
 956	ret = get_ether_addr_str(dev->host_mac, host_addr, len);
 957	if (ret + 1 < len) {
 958		host_addr[ret++] = '\n';
 959		host_addr[ret] = '\0';
 960	}
 961
 962	return ret;
 963}
 964EXPORT_SYMBOL_GPL(gether_get_host_addr);
 965
 966int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
 967{
 968	struct eth_dev *dev;
 969
 970	if (len < 13)
 971		return -EINVAL;
 972
 973	dev = netdev_priv(net);
 974	snprintf(host_addr, len, "%pm", dev->host_mac);
 975
 976	return strlen(host_addr);
 977}
 978EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
 979
 980void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
 981{
 982	struct eth_dev *dev;
 983
 984	dev = netdev_priv(net);
 985	memcpy(host_mac, dev->host_mac, ETH_ALEN);
 986}
 987EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
 988
 989void gether_set_qmult(struct net_device *net, unsigned qmult)
 990{
 991	struct eth_dev *dev;
 992
 993	dev = netdev_priv(net);
 994	dev->qmult = qmult;
 995}
 996EXPORT_SYMBOL_GPL(gether_set_qmult);
 997
 998unsigned gether_get_qmult(struct net_device *net)
 999{
1000	struct eth_dev *dev;
1001
1002	dev = netdev_priv(net);
1003	return dev->qmult;
1004}
1005EXPORT_SYMBOL_GPL(gether_get_qmult);
1006
1007int gether_get_ifname(struct net_device *net, char *name, int len)
1008{
1009	struct eth_dev *dev = netdev_priv(net);
1010	int ret;
1011
1012	rtnl_lock();
1013	ret = scnprintf(name, len, "%s\n",
1014			dev->ifname_set ? net->name : netdev_name(net));
1015	rtnl_unlock();
1016	return ret;
1017}
1018EXPORT_SYMBOL_GPL(gether_get_ifname);
1019
1020int gether_set_ifname(struct net_device *net, const char *name, int len)
1021{
1022	struct eth_dev *dev = netdev_priv(net);
1023	char tmp[IFNAMSIZ];
1024	const char *p;
1025
1026	if (name[len - 1] == '\n')
1027		len--;
1028
1029	if (len >= sizeof(tmp))
1030		return -E2BIG;
1031
1032	strscpy(tmp, name, len + 1);
1033	if (!dev_valid_name(tmp))
1034		return -EINVAL;
1035
1036	/* Require exactly one %d, so binding will not fail with EEXIST. */
1037	p = strchr(name, '%');
1038	if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1039		return -EINVAL;
1040
1041	strncpy(net->name, tmp, sizeof(net->name));
1042	dev->ifname_set = true;
1043
1044	return 0;
1045}
1046EXPORT_SYMBOL_GPL(gether_set_ifname);
1047
1048/*
1049 * gether_cleanup - remove Ethernet-over-USB device
1050 * Context: may sleep
1051 *
1052 * This is called to free all resources allocated by @gether_setup().
1053 */
1054void gether_cleanup(struct eth_dev *dev)
1055{
1056	if (!dev)
1057		return;
1058
1059	unregister_netdev(dev->net);
1060	flush_work(&dev->work);
1061	free_netdev(dev->net);
1062}
1063EXPORT_SYMBOL_GPL(gether_cleanup);
1064
1065/**
1066 * gether_connect - notify network layer that USB link is active
1067 * @link: the USB link, set up with endpoints, descriptors matching
1068 *	current device speed, and any framing wrapper(s) set up.
1069 * Context: irqs blocked
1070 *
1071 * This is called to activate endpoints and let the network layer know
1072 * the connection is active ("carrier detect").  It may cause the I/O
1073 * queues to open and start letting network packets flow, but will in
1074 * any case activate the endpoints so that they respond properly to the
1075 * USB host.
1076 *
1077 * Verify net_device pointer returned using IS_ERR().  If it doesn't
1078 * indicate some error code (negative errno), ep->driver_data values
1079 * have been overwritten.
1080 */
1081struct net_device *gether_connect(struct gether *link)
1082{
1083	struct eth_dev		*dev = link->ioport;
1084	int			result = 0;
1085
1086	if (!dev)
1087		return ERR_PTR(-EINVAL);
1088
1089	link->in_ep->driver_data = dev;
1090	result = usb_ep_enable(link->in_ep);
1091	if (result != 0) {
1092		DBG(dev, "enable %s --> %d\n",
1093			link->in_ep->name, result);
1094		goto fail0;
1095	}
1096
1097	link->out_ep->driver_data = dev;
1098	result = usb_ep_enable(link->out_ep);
1099	if (result != 0) {
1100		DBG(dev, "enable %s --> %d\n",
1101			link->out_ep->name, result);
1102		goto fail1;
1103	}
1104
1105	if (result == 0)
1106		result = alloc_requests(dev, link, qlen(dev->gadget,
1107					dev->qmult));
1108
1109	if (result == 0) {
1110		dev->zlp = link->is_zlp_ok;
1111		dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1112		DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1113
1114		dev->header_len = link->header_len;
1115		dev->unwrap = link->unwrap;
1116		dev->wrap = link->wrap;
1117
1118		spin_lock(&dev->lock);
1119		dev->port_usb = link;
1120		if (netif_running(dev->net)) {
1121			if (link->open)
1122				link->open(link);
1123		} else {
1124			if (link->close)
1125				link->close(link);
1126		}
1127		spin_unlock(&dev->lock);
1128
1129		netif_carrier_on(dev->net);
1130		if (netif_running(dev->net))
1131			eth_start(dev, GFP_ATOMIC);
1132
1133	/* on error, disable any endpoints  */
1134	} else {
1135		(void) usb_ep_disable(link->out_ep);
1136fail1:
1137		(void) usb_ep_disable(link->in_ep);
1138	}
1139fail0:
1140	/* caller is responsible for cleanup on error */
1141	if (result < 0)
1142		return ERR_PTR(result);
1143	return dev->net;
1144}
1145EXPORT_SYMBOL_GPL(gether_connect);
1146
1147/**
1148 * gether_disconnect - notify network layer that USB link is inactive
1149 * @link: the USB link, on which gether_connect() was called
1150 * Context: irqs blocked
1151 *
1152 * This is called to deactivate endpoints and let the network layer know
1153 * the connection went inactive ("no carrier").
1154 *
1155 * On return, the state is as if gether_connect() had never been called.
1156 * The endpoints are inactive, and accordingly without active USB I/O.
1157 * Pointers to endpoint descriptors and endpoint private data are nulled.
1158 */
1159void gether_disconnect(struct gether *link)
1160{
1161	struct eth_dev		*dev = link->ioport;
1162	struct usb_request	*req;
1163
1164	WARN_ON(!dev);
1165	if (!dev)
1166		return;
1167
1168	DBG(dev, "%s\n", __func__);
1169
1170	netif_stop_queue(dev->net);
1171	netif_carrier_off(dev->net);
1172
1173	/* disable endpoints, forcing (synchronous) completion
1174	 * of all pending i/o.  then free the request objects
1175	 * and forget about the endpoints.
1176	 */
1177	usb_ep_disable(link->in_ep);
1178	spin_lock(&dev->req_lock);
1179	while (!list_empty(&dev->tx_reqs)) {
1180		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1181		list_del(&req->list);
1182
1183		spin_unlock(&dev->req_lock);
1184		usb_ep_free_request(link->in_ep, req);
1185		spin_lock(&dev->req_lock);
1186	}
1187	spin_unlock(&dev->req_lock);
1188	link->in_ep->desc = NULL;
1189
1190	usb_ep_disable(link->out_ep);
1191	spin_lock(&dev->req_lock);
1192	while (!list_empty(&dev->rx_reqs)) {
1193		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1194		list_del(&req->list);
1195
1196		spin_unlock(&dev->req_lock);
1197		usb_ep_free_request(link->out_ep, req);
1198		spin_lock(&dev->req_lock);
1199	}
1200	spin_unlock(&dev->req_lock);
1201	link->out_ep->desc = NULL;
1202
1203	/* finish forgetting about this USB link episode */
1204	dev->header_len = 0;
1205	dev->unwrap = NULL;
1206	dev->wrap = NULL;
1207
1208	spin_lock(&dev->lock);
1209	dev->port_usb = NULL;
1210	spin_unlock(&dev->lock);
1211}
1212EXPORT_SYMBOL_GPL(gether_disconnect);
1213
1214MODULE_LICENSE("GPL");
1215MODULE_AUTHOR("David Brownell");