Linux Audio

Check our new training course

Loading...
  1/*
  2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
  3 *
  4 * Copyright (C) 2003-2005,2008 David Brownell
  5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  6 * Copyright (C) 2008 Nokia Corporation
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 */
 13
 14/* #define VERBOSE_DEBUG */
 15
 16#include <linux/kernel.h>
 17#include <linux/gfp.h>
 18#include <linux/device.h>
 19#include <linux/ctype.h>
 20#include <linux/etherdevice.h>
 21#include <linux/ethtool.h>
 22
 23#include "u_ether.h"
 24
 25
 26/*
 27 * This component encapsulates the Ethernet link glue needed to provide
 28 * one (!) network link through the USB gadget stack, normally "usb0".
 29 *
 30 * The control and data models are handled by the function driver which
 31 * connects to this code; such as CDC Ethernet (ECM or EEM),
 32 * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
 33 * management.
 34 *
 35 * Link level addressing is handled by this component using module
 36 * parameters; if no such parameters are provided, random link level
 37 * addresses are used.  Each end of the link uses one address.  The
 38 * host end address is exported in various ways, and is often recorded
 39 * in configuration databases.
 40 *
 41 * The driver which assembles each configuration using such a link is
 42 * responsible for ensuring that each configuration includes at most one
 43 * instance of is network link.  (The network layer provides ways for
 44 * this single "physical" link to be used by multiple virtual links.)
 45 */
 46
 47#define UETH__VERSION	"29-May-2008"
 48
 49struct eth_dev {
 50	/* lock is held while accessing port_usb
 51	 * or updating its backlink port_usb->ioport
 52	 */
 53	spinlock_t		lock;
 54	struct gether		*port_usb;
 55
 56	struct net_device	*net;
 57	struct usb_gadget	*gadget;
 58
 59	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
 60	struct list_head	tx_reqs, rx_reqs;
 61	atomic_t		tx_qlen;
 62
 63	struct sk_buff_head	rx_frames;
 64
 65	unsigned		header_len;
 66	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
 67	int			(*unwrap)(struct gether *,
 68						struct sk_buff *skb,
 69						struct sk_buff_head *list);
 70
 71	struct work_struct	work;
 72
 73	unsigned long		todo;
 74#define	WORK_RX_MEMORY		0
 75
 76	bool			zlp;
 77	u8			host_mac[ETH_ALEN];
 78};
 79
 80/*-------------------------------------------------------------------------*/
 81
 82#define RX_EXTRA	20	/* bytes guarding against rx overflows */
 83
 84#define DEFAULT_QLEN	2	/* double buffering by default */
 85
 86
 87#ifdef CONFIG_USB_GADGET_DUALSPEED
 88
 89static unsigned qmult = 5;
 90module_param(qmult, uint, S_IRUGO|S_IWUSR);
 91MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 92
 93#else	/* full speed (low speed doesn't do bulk) */
 94#define qmult		1
 95#endif
 96
 97/* for dual-speed hardware, use deeper queues at high/super speed */
 98static inline int qlen(struct usb_gadget *gadget)
 99{
100	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
101					    gadget->speed == USB_SPEED_SUPER))
102		return qmult * DEFAULT_QLEN;
103	else
104		return DEFAULT_QLEN;
105}
106
107/*-------------------------------------------------------------------------*/
108
109/* REVISIT there must be a better way than having two sets
110 * of debug calls ...
111 */
112
113#undef DBG
114#undef VDBG
115#undef ERROR
116#undef INFO
117
118#define xprintk(d, level, fmt, args...) \
119	printk(level "%s: " fmt , (d)->net->name , ## args)
120
121#ifdef DEBUG
122#undef DEBUG
123#define DBG(dev, fmt, args...) \
124	xprintk(dev , KERN_DEBUG , fmt , ## args)
125#else
126#define DBG(dev, fmt, args...) \
127	do { } while (0)
128#endif /* DEBUG */
129
130#ifdef VERBOSE_DEBUG
131#define VDBG	DBG
132#else
133#define VDBG(dev, fmt, args...) \
134	do { } while (0)
135#endif /* DEBUG */
136
137#define ERROR(dev, fmt, args...) \
138	xprintk(dev , KERN_ERR , fmt , ## args)
139#define INFO(dev, fmt, args...) \
140	xprintk(dev , KERN_INFO , fmt , ## args)
141
142/*-------------------------------------------------------------------------*/
143
144/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
145
146static int ueth_change_mtu(struct net_device *net, int new_mtu)
147{
148	struct eth_dev	*dev = netdev_priv(net);
149	unsigned long	flags;
150	int		status = 0;
151
152	/* don't change MTU on "live" link (peer won't know) */
153	spin_lock_irqsave(&dev->lock, flags);
154	if (dev->port_usb)
155		status = -EBUSY;
156	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
157		status = -ERANGE;
158	else
159		net->mtu = new_mtu;
160	spin_unlock_irqrestore(&dev->lock, flags);
161
162	return status;
163}
164
165static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
166{
167	struct eth_dev	*dev = netdev_priv(net);
168
169	strlcpy(p->driver, "g_ether", sizeof p->driver);
170	strlcpy(p->version, UETH__VERSION, sizeof p->version);
171	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
172	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
173}
174
175/* REVISIT can also support:
176 *   - WOL (by tracking suspends and issuing remote wakeup)
177 *   - msglevel (implies updated messaging)
178 *   - ... probably more ethtool ops
179 */
180
181static const struct ethtool_ops ops = {
182	.get_drvinfo = eth_get_drvinfo,
183	.get_link = ethtool_op_get_link,
184};
185
186static void defer_kevent(struct eth_dev *dev, int flag)
187{
188	if (test_and_set_bit(flag, &dev->todo))
189		return;
190	if (!schedule_work(&dev->work))
191		ERROR(dev, "kevent %d may have been dropped\n", flag);
192	else
193		DBG(dev, "kevent %d scheduled\n", flag);
194}
195
196static void rx_complete(struct usb_ep *ep, struct usb_request *req);
197
198static int
199rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
200{
201	struct sk_buff	*skb;
202	int		retval = -ENOMEM;
203	size_t		size = 0;
204	struct usb_ep	*out;
205	unsigned long	flags;
206
207	spin_lock_irqsave(&dev->lock, flags);
208	if (dev->port_usb)
209		out = dev->port_usb->out_ep;
210	else
211		out = NULL;
212	spin_unlock_irqrestore(&dev->lock, flags);
213
214	if (!out)
215		return -ENOTCONN;
216
217
218	/* Padding up to RX_EXTRA handles minor disagreements with host.
219	 * Normally we use the USB "terminate on short read" convention;
220	 * so allow up to (N*maxpacket), since that memory is normally
221	 * already allocated.  Some hardware doesn't deal well with short
222	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
223	 * byte off the end (to force hardware errors on overflow).
224	 *
225	 * RNDIS uses internal framing, and explicitly allows senders to
226	 * pad to end-of-packet.  That's potentially nice for speed, but
227	 * means receivers can't recover lost synch on their own (because
228	 * new packets don't only start after a short RX).
229	 */
230	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
231	size += dev->port_usb->header_len;
232	size += out->maxpacket - 1;
233	size -= size % out->maxpacket;
234
235	if (dev->port_usb->is_fixed)
236		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
237
238	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
239	if (skb == NULL) {
240		DBG(dev, "no rx skb\n");
241		goto enomem;
242	}
243
244	/* Some platforms perform better when IP packets are aligned,
245	 * but on at least one, checksumming fails otherwise.  Note:
246	 * RNDIS headers involve variable numbers of LE32 values.
247	 */
248	skb_reserve(skb, NET_IP_ALIGN);
249
250	req->buf = skb->data;
251	req->length = size;
252	req->complete = rx_complete;
253	req->context = skb;
254
255	retval = usb_ep_queue(out, req, gfp_flags);
256	if (retval == -ENOMEM)
257enomem:
258		defer_kevent(dev, WORK_RX_MEMORY);
259	if (retval) {
260		DBG(dev, "rx submit --> %d\n", retval);
261		if (skb)
262			dev_kfree_skb_any(skb);
263		spin_lock_irqsave(&dev->req_lock, flags);
264		list_add(&req->list, &dev->rx_reqs);
265		spin_unlock_irqrestore(&dev->req_lock, flags);
266	}
267	return retval;
268}
269
270static void rx_complete(struct usb_ep *ep, struct usb_request *req)
271{
272	struct sk_buff	*skb = req->context, *skb2;
273	struct eth_dev	*dev = ep->driver_data;
274	int		status = req->status;
275
276	switch (status) {
277
278	/* normal completion */
279	case 0:
280		skb_put(skb, req->actual);
281
282		if (dev->unwrap) {
283			unsigned long	flags;
284
285			spin_lock_irqsave(&dev->lock, flags);
286			if (dev->port_usb) {
287				status = dev->unwrap(dev->port_usb,
288							skb,
289							&dev->rx_frames);
290			} else {
291				dev_kfree_skb_any(skb);
292				status = -ENOTCONN;
293			}
294			spin_unlock_irqrestore(&dev->lock, flags);
295		} else {
296			skb_queue_tail(&dev->rx_frames, skb);
297		}
298		skb = NULL;
299
300		skb2 = skb_dequeue(&dev->rx_frames);
301		while (skb2) {
302			if (status < 0
303					|| ETH_HLEN > skb2->len
304					|| skb2->len > ETH_FRAME_LEN) {
305				dev->net->stats.rx_errors++;
306				dev->net->stats.rx_length_errors++;
307				DBG(dev, "rx length %d\n", skb2->len);
308				dev_kfree_skb_any(skb2);
309				goto next_frame;
310			}
311			skb2->protocol = eth_type_trans(skb2, dev->net);
312			dev->net->stats.rx_packets++;
313			dev->net->stats.rx_bytes += skb2->len;
314
315			/* no buffer copies needed, unless hardware can't
316			 * use skb buffers.
317			 */
318			status = netif_rx(skb2);
319next_frame:
320			skb2 = skb_dequeue(&dev->rx_frames);
321		}
322		break;
323
324	/* software-driven interface shutdown */
325	case -ECONNRESET:		/* unlink */
326	case -ESHUTDOWN:		/* disconnect etc */
327		VDBG(dev, "rx shutdown, code %d\n", status);
328		goto quiesce;
329
330	/* for hardware automagic (such as pxa) */
331	case -ECONNABORTED:		/* endpoint reset */
332		DBG(dev, "rx %s reset\n", ep->name);
333		defer_kevent(dev, WORK_RX_MEMORY);
334quiesce:
335		dev_kfree_skb_any(skb);
336		goto clean;
337
338	/* data overrun */
339	case -EOVERFLOW:
340		dev->net->stats.rx_over_errors++;
341		/* FALLTHROUGH */
342
343	default:
344		dev->net->stats.rx_errors++;
345		DBG(dev, "rx status %d\n", status);
346		break;
347	}
348
349	if (skb)
350		dev_kfree_skb_any(skb);
351	if (!netif_running(dev->net)) {
352clean:
353		spin_lock(&dev->req_lock);
354		list_add(&req->list, &dev->rx_reqs);
355		spin_unlock(&dev->req_lock);
356		req = NULL;
357	}
358	if (req)
359		rx_submit(dev, req, GFP_ATOMIC);
360}
361
362static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
363{
364	unsigned		i;
365	struct usb_request	*req;
366
367	if (!n)
368		return -ENOMEM;
369
370	/* queue/recycle up to N requests */
371	i = n;
372	list_for_each_entry(req, list, list) {
373		if (i-- == 0)
374			goto extra;
375	}
376	while (i--) {
377		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
378		if (!req)
379			return list_empty(list) ? -ENOMEM : 0;
380		list_add(&req->list, list);
381	}
382	return 0;
383
384extra:
385	/* free extras */
386	for (;;) {
387		struct list_head	*next;
388
389		next = req->list.next;
390		list_del(&req->list);
391		usb_ep_free_request(ep, req);
392
393		if (next == list)
394			break;
395
396		req = container_of(next, struct usb_request, list);
397	}
398	return 0;
399}
400
401static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
402{
403	int	status;
404
405	spin_lock(&dev->req_lock);
406	status = prealloc(&dev->tx_reqs, link->in_ep, n);
407	if (status < 0)
408		goto fail;
409	status = prealloc(&dev->rx_reqs, link->out_ep, n);
410	if (status < 0)
411		goto fail;
412	goto done;
413fail:
414	DBG(dev, "can't alloc requests\n");
415done:
416	spin_unlock(&dev->req_lock);
417	return status;
418}
419
420static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
421{
422	struct usb_request	*req;
423	unsigned long		flags;
424
425	/* fill unused rxq slots with some skb */
426	spin_lock_irqsave(&dev->req_lock, flags);
427	while (!list_empty(&dev->rx_reqs)) {
428		req = container_of(dev->rx_reqs.next,
429				struct usb_request, list);
430		list_del_init(&req->list);
431		spin_unlock_irqrestore(&dev->req_lock, flags);
432
433		if (rx_submit(dev, req, gfp_flags) < 0) {
434			defer_kevent(dev, WORK_RX_MEMORY);
435			return;
436		}
437
438		spin_lock_irqsave(&dev->req_lock, flags);
439	}
440	spin_unlock_irqrestore(&dev->req_lock, flags);
441}
442
443static void eth_work(struct work_struct *work)
444{
445	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
446
447	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
448		if (netif_running(dev->net))
449			rx_fill(dev, GFP_KERNEL);
450	}
451
452	if (dev->todo)
453		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
454}
455
456static void tx_complete(struct usb_ep *ep, struct usb_request *req)
457{
458	struct sk_buff	*skb = req->context;
459	struct eth_dev	*dev = ep->driver_data;
460
461	switch (req->status) {
462	default:
463		dev->net->stats.tx_errors++;
464		VDBG(dev, "tx err %d\n", req->status);
465		/* FALLTHROUGH */
466	case -ECONNRESET:		/* unlink */
467	case -ESHUTDOWN:		/* disconnect etc */
468		break;
469	case 0:
470		dev->net->stats.tx_bytes += skb->len;
471	}
472	dev->net->stats.tx_packets++;
473
474	spin_lock(&dev->req_lock);
475	list_add(&req->list, &dev->tx_reqs);
476	spin_unlock(&dev->req_lock);
477	dev_kfree_skb_any(skb);
478
479	atomic_dec(&dev->tx_qlen);
480	if (netif_carrier_ok(dev->net))
481		netif_wake_queue(dev->net);
482}
483
484static inline int is_promisc(u16 cdc_filter)
485{
486	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
487}
488
489static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
490					struct net_device *net)
491{
492	struct eth_dev		*dev = netdev_priv(net);
493	int			length = skb->len;
494	int			retval;
495	struct usb_request	*req = NULL;
496	unsigned long		flags;
497	struct usb_ep		*in;
498	u16			cdc_filter;
499
500	spin_lock_irqsave(&dev->lock, flags);
501	if (dev->port_usb) {
502		in = dev->port_usb->in_ep;
503		cdc_filter = dev->port_usb->cdc_filter;
504	} else {
505		in = NULL;
506		cdc_filter = 0;
507	}
508	spin_unlock_irqrestore(&dev->lock, flags);
509
510	if (!in) {
511		dev_kfree_skb_any(skb);
512		return NETDEV_TX_OK;
513	}
514
515	/* apply outgoing CDC or RNDIS filters */
516	if (!is_promisc(cdc_filter)) {
517		u8		*dest = skb->data;
518
519		if (is_multicast_ether_addr(dest)) {
520			u16	type;
521
522			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
523			 * SET_ETHERNET_MULTICAST_FILTERS requests
524			 */
525			if (is_broadcast_ether_addr(dest))
526				type = USB_CDC_PACKET_TYPE_BROADCAST;
527			else
528				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
529			if (!(cdc_filter & type)) {
530				dev_kfree_skb_any(skb);
531				return NETDEV_TX_OK;
532			}
533		}
534		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
535	}
536
537	spin_lock_irqsave(&dev->req_lock, flags);
538	/*
539	 * this freelist can be empty if an interrupt triggered disconnect()
540	 * and reconfigured the gadget (shutting down this queue) after the
541	 * network stack decided to xmit but before we got the spinlock.
542	 */
543	if (list_empty(&dev->tx_reqs)) {
544		spin_unlock_irqrestore(&dev->req_lock, flags);
545		return NETDEV_TX_BUSY;
546	}
547
548	req = container_of(dev->tx_reqs.next, struct usb_request, list);
549	list_del(&req->list);
550
551	/* temporarily stop TX queue when the freelist empties */
552	if (list_empty(&dev->tx_reqs))
553		netif_stop_queue(net);
554	spin_unlock_irqrestore(&dev->req_lock, flags);
555
556	/* no buffer copies needed, unless the network stack did it
557	 * or the hardware can't use skb buffers.
558	 * or there's not enough space for extra headers we need
559	 */
560	if (dev->wrap) {
561		unsigned long	flags;
562
563		spin_lock_irqsave(&dev->lock, flags);
564		if (dev->port_usb)
565			skb = dev->wrap(dev->port_usb, skb);
566		spin_unlock_irqrestore(&dev->lock, flags);
567		if (!skb)
568			goto drop;
569
570		length = skb->len;
571	}
572	req->buf = skb->data;
573	req->context = skb;
574	req->complete = tx_complete;
575
576	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
577	if (dev->port_usb->is_fixed &&
578	    length == dev->port_usb->fixed_in_len &&
579	    (length % in->maxpacket) == 0)
580		req->zero = 0;
581	else
582		req->zero = 1;
583
584	/* use zlp framing on tx for strict CDC-Ether conformance,
585	 * though any robust network rx path ignores extra padding.
586	 * and some hardware doesn't like to write zlps.
587	 */
588	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
589		length++;
590
591	req->length = length;
592
593	/* throttle high/super speed IRQ rate back slightly */
594	if (gadget_is_dualspeed(dev->gadget))
595		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
596				     dev->gadget->speed == USB_SPEED_SUPER)
597			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
598			: 0;
599
600	retval = usb_ep_queue(in, req, GFP_ATOMIC);
601	switch (retval) {
602	default:
603		DBG(dev, "tx queue err %d\n", retval);
604		break;
605	case 0:
606		net->trans_start = jiffies;
607		atomic_inc(&dev->tx_qlen);
608	}
609
610	if (retval) {
611		dev_kfree_skb_any(skb);
612drop:
613		dev->net->stats.tx_dropped++;
614		spin_lock_irqsave(&dev->req_lock, flags);
615		if (list_empty(&dev->tx_reqs))
616			netif_start_queue(net);
617		list_add(&req->list, &dev->tx_reqs);
618		spin_unlock_irqrestore(&dev->req_lock, flags);
619	}
620	return NETDEV_TX_OK;
621}
622
623/*-------------------------------------------------------------------------*/
624
625static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
626{
627	DBG(dev, "%s\n", __func__);
628
629	/* fill the rx queue */
630	rx_fill(dev, gfp_flags);
631
632	/* and open the tx floodgates */
633	atomic_set(&dev->tx_qlen, 0);
634	netif_wake_queue(dev->net);
635}
636
637static int eth_open(struct net_device *net)
638{
639	struct eth_dev	*dev = netdev_priv(net);
640	struct gether	*link;
641
642	DBG(dev, "%s\n", __func__);
643	if (netif_carrier_ok(dev->net))
644		eth_start(dev, GFP_KERNEL);
645
646	spin_lock_irq(&dev->lock);
647	link = dev->port_usb;
648	if (link && link->open)
649		link->open(link);
650	spin_unlock_irq(&dev->lock);
651
652	return 0;
653}
654
655static int eth_stop(struct net_device *net)
656{
657	struct eth_dev	*dev = netdev_priv(net);
658	unsigned long	flags;
659
660	VDBG(dev, "%s\n", __func__);
661	netif_stop_queue(net);
662
663	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
664		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
665		dev->net->stats.rx_errors, dev->net->stats.tx_errors
666		);
667
668	/* ensure there are no more active requests */
669	spin_lock_irqsave(&dev->lock, flags);
670	if (dev->port_usb) {
671		struct gether	*link = dev->port_usb;
672		const struct usb_endpoint_descriptor *in;
673		const struct usb_endpoint_descriptor *out;
674
675		if (link->close)
676			link->close(link);
677
678		/* NOTE:  we have no abort-queue primitive we could use
679		 * to cancel all pending I/O.  Instead, we disable then
680		 * reenable the endpoints ... this idiom may leave toggle
681		 * wrong, but that's a self-correcting error.
682		 *
683		 * REVISIT:  we *COULD* just let the transfers complete at
684		 * their own pace; the network stack can handle old packets.
685		 * For the moment we leave this here, since it works.
686		 */
687		in = link->in_ep->desc;
688		out = link->out_ep->desc;
689		usb_ep_disable(link->in_ep);
690		usb_ep_disable(link->out_ep);
691		if (netif_carrier_ok(net)) {
692			DBG(dev, "host still using in/out endpoints\n");
693			link->in_ep->desc = in;
694			link->out_ep->desc = out;
695			usb_ep_enable(link->in_ep);
696			usb_ep_enable(link->out_ep);
697		}
698	}
699	spin_unlock_irqrestore(&dev->lock, flags);
700
701	return 0;
702}
703
704/*-------------------------------------------------------------------------*/
705
706/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
707static char *dev_addr;
708module_param(dev_addr, charp, S_IRUGO);
709MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
710
711/* this address is invisible to ifconfig */
712static char *host_addr;
713module_param(host_addr, charp, S_IRUGO);
714MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
715
716static int get_ether_addr(const char *str, u8 *dev_addr)
717{
718	if (str) {
719		unsigned	i;
720
721		for (i = 0; i < 6; i++) {
722			unsigned char num;
723
724			if ((*str == '.') || (*str == ':'))
725				str++;
726			num = hex_to_bin(*str++) << 4;
727			num |= hex_to_bin(*str++);
728			dev_addr [i] = num;
729		}
730		if (is_valid_ether_addr(dev_addr))
731			return 0;
732	}
733	random_ether_addr(dev_addr);
734	return 1;
735}
736
737static struct eth_dev *the_dev;
738
739static const struct net_device_ops eth_netdev_ops = {
740	.ndo_open		= eth_open,
741	.ndo_stop		= eth_stop,
742	.ndo_start_xmit		= eth_start_xmit,
743	.ndo_change_mtu		= ueth_change_mtu,
744	.ndo_set_mac_address 	= eth_mac_addr,
745	.ndo_validate_addr	= eth_validate_addr,
746};
747
748static struct device_type gadget_type = {
749	.name	= "gadget",
750};
751
752/**
753 * gether_setup_name - initialize one ethernet-over-usb link
754 * @g: gadget to associated with these links
755 * @ethaddr: NULL, or a buffer in which the ethernet address of the
756 *	host side of the link is recorded
757 * @netname: name for network device (for example, "usb")
758 * Context: may sleep
759 *
760 * This sets up the single network link that may be exported by a
761 * gadget driver using this framework.  The link layer addresses are
762 * set up using module parameters.
763 *
764 * Returns negative errno, or zero on success
765 */
766int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
767		const char *netname)
768{
769	struct eth_dev		*dev;
770	struct net_device	*net;
771	int			status;
772
773	if (the_dev)
774		return -EBUSY;
775
776	net = alloc_etherdev(sizeof *dev);
777	if (!net)
778		return -ENOMEM;
779
780	dev = netdev_priv(net);
781	spin_lock_init(&dev->lock);
782	spin_lock_init(&dev->req_lock);
783	INIT_WORK(&dev->work, eth_work);
784	INIT_LIST_HEAD(&dev->tx_reqs);
785	INIT_LIST_HEAD(&dev->rx_reqs);
786
787	skb_queue_head_init(&dev->rx_frames);
788
789	/* network device setup */
790	dev->net = net;
791	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
792
793	if (get_ether_addr(dev_addr, net->dev_addr))
794		dev_warn(&g->dev,
795			"using random %s ethernet address\n", "self");
796	if (get_ether_addr(host_addr, dev->host_mac))
797		dev_warn(&g->dev,
798			"using random %s ethernet address\n", "host");
799
800	if (ethaddr)
801		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
802
803	net->netdev_ops = &eth_netdev_ops;
804
805	SET_ETHTOOL_OPS(net, &ops);
806
807	dev->gadget = g;
808	SET_NETDEV_DEV(net, &g->dev);
809	SET_NETDEV_DEVTYPE(net, &gadget_type);
810
811	status = register_netdev(net);
812	if (status < 0) {
813		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
814		free_netdev(net);
815	} else {
816		INFO(dev, "MAC %pM\n", net->dev_addr);
817		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
818
819		the_dev = dev;
820
821		/* two kinds of host-initiated state changes:
822		 *  - iff DATA transfer is active, carrier is "on"
823		 *  - tx queueing enabled if open *and* carrier is "on"
824		 */
825		netif_carrier_off(net);
826	}
827
828	return status;
829}
830
831/**
832 * gether_cleanup - remove Ethernet-over-USB device
833 * Context: may sleep
834 *
835 * This is called to free all resources allocated by @gether_setup().
836 */
837void gether_cleanup(void)
838{
839	if (!the_dev)
840		return;
841
842	unregister_netdev(the_dev->net);
843	flush_work_sync(&the_dev->work);
844	free_netdev(the_dev->net);
845
846	the_dev = NULL;
847}
848
849
850/**
851 * gether_connect - notify network layer that USB link is active
852 * @link: the USB link, set up with endpoints, descriptors matching
853 *	current device speed, and any framing wrapper(s) set up.
854 * Context: irqs blocked
855 *
856 * This is called to activate endpoints and let the network layer know
857 * the connection is active ("carrier detect").  It may cause the I/O
858 * queues to open and start letting network packets flow, but will in
859 * any case activate the endpoints so that they respond properly to the
860 * USB host.
861 *
862 * Verify net_device pointer returned using IS_ERR().  If it doesn't
863 * indicate some error code (negative errno), ep->driver_data values
864 * have been overwritten.
865 */
866struct net_device *gether_connect(struct gether *link)
867{
868	struct eth_dev		*dev = the_dev;
869	int			result = 0;
870
871	if (!dev)
872		return ERR_PTR(-EINVAL);
873
874	link->in_ep->driver_data = dev;
875	result = usb_ep_enable(link->in_ep);
876	if (result != 0) {
877		DBG(dev, "enable %s --> %d\n",
878			link->in_ep->name, result);
879		goto fail0;
880	}
881
882	link->out_ep->driver_data = dev;
883	result = usb_ep_enable(link->out_ep);
884	if (result != 0) {
885		DBG(dev, "enable %s --> %d\n",
886			link->out_ep->name, result);
887		goto fail1;
888	}
889
890	if (result == 0)
891		result = alloc_requests(dev, link, qlen(dev->gadget));
892
893	if (result == 0) {
894		dev->zlp = link->is_zlp_ok;
895		DBG(dev, "qlen %d\n", qlen(dev->gadget));
896
897		dev->header_len = link->header_len;
898		dev->unwrap = link->unwrap;
899		dev->wrap = link->wrap;
900
901		spin_lock(&dev->lock);
902		dev->port_usb = link;
903		link->ioport = dev;
904		if (netif_running(dev->net)) {
905			if (link->open)
906				link->open(link);
907		} else {
908			if (link->close)
909				link->close(link);
910		}
911		spin_unlock(&dev->lock);
912
913		netif_carrier_on(dev->net);
914		if (netif_running(dev->net))
915			eth_start(dev, GFP_ATOMIC);
916
917	/* on error, disable any endpoints  */
918	} else {
919		(void) usb_ep_disable(link->out_ep);
920fail1:
921		(void) usb_ep_disable(link->in_ep);
922	}
923fail0:
924	/* caller is responsible for cleanup on error */
925	if (result < 0)
926		return ERR_PTR(result);
927	return dev->net;
928}
929
930/**
931 * gether_disconnect - notify network layer that USB link is inactive
932 * @link: the USB link, on which gether_connect() was called
933 * Context: irqs blocked
934 *
935 * This is called to deactivate endpoints and let the network layer know
936 * the connection went inactive ("no carrier").
937 *
938 * On return, the state is as if gether_connect() had never been called.
939 * The endpoints are inactive, and accordingly without active USB I/O.
940 * Pointers to endpoint descriptors and endpoint private data are nulled.
941 */
942void gether_disconnect(struct gether *link)
943{
944	struct eth_dev		*dev = link->ioport;
945	struct usb_request	*req;
946
947	WARN_ON(!dev);
948	if (!dev)
949		return;
950
951	DBG(dev, "%s\n", __func__);
952
953	netif_stop_queue(dev->net);
954	netif_carrier_off(dev->net);
955
956	/* disable endpoints, forcing (synchronous) completion
957	 * of all pending i/o.  then free the request objects
958	 * and forget about the endpoints.
959	 */
960	usb_ep_disable(link->in_ep);
961	spin_lock(&dev->req_lock);
962	while (!list_empty(&dev->tx_reqs)) {
963		req = container_of(dev->tx_reqs.next,
964					struct usb_request, list);
965		list_del(&req->list);
966
967		spin_unlock(&dev->req_lock);
968		usb_ep_free_request(link->in_ep, req);
969		spin_lock(&dev->req_lock);
970	}
971	spin_unlock(&dev->req_lock);
972	link->in_ep->driver_data = NULL;
973	link->in_ep->desc = NULL;
974
975	usb_ep_disable(link->out_ep);
976	spin_lock(&dev->req_lock);
977	while (!list_empty(&dev->rx_reqs)) {
978		req = container_of(dev->rx_reqs.next,
979					struct usb_request, list);
980		list_del(&req->list);
981
982		spin_unlock(&dev->req_lock);
983		usb_ep_free_request(link->out_ep, req);
984		spin_lock(&dev->req_lock);
985	}
986	spin_unlock(&dev->req_lock);
987	link->out_ep->driver_data = NULL;
988	link->out_ep->desc = NULL;
989
990	/* finish forgetting about this USB link episode */
991	dev->header_len = 0;
992	dev->unwrap = NULL;
993	dev->wrap = NULL;
994
995	spin_lock(&dev->lock);
996	dev->port_usb = NULL;
997	link->ioport = NULL;
998	spin_unlock(&dev->lock);
999}