Loading...
1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
4 * James Leu (jleu@mindspring.net).
5 * Copyright (C) 2001 by various other people who didn't put their name here.
6 * Licensed under the GPL.
7 */
8
9#include <linux/bootmem.h>
10#include <linux/etherdevice.h>
11#include <linux/ethtool.h>
12#include <linux/inetdevice.h>
13#include <linux/init.h>
14#include <linux/list.h>
15#include <linux/netdevice.h>
16#include <linux/platform_device.h>
17#include <linux/rtnetlink.h>
18#include <linux/skbuff.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include "init.h"
22#include "irq_kern.h"
23#include "irq_user.h"
24#include "mconsole_kern.h"
25#include "net_kern.h"
26#include "net_user.h"
27
28#define DRIVER_NAME "uml-netdev"
29
30static DEFINE_SPINLOCK(opened_lock);
31static LIST_HEAD(opened);
32
33/*
34 * The drop_skb is used when we can't allocate an skb. The
35 * packet is read into drop_skb in order to get the data off the
36 * connection to the host.
37 * It is reallocated whenever a maximum packet size is seen which is
38 * larger than any seen before. update_drop_skb is called from
39 * eth_configure when a new interface is added.
40 */
41static DEFINE_SPINLOCK(drop_lock);
42static struct sk_buff *drop_skb;
43static int drop_max;
44
45static int update_drop_skb(int max)
46{
47 struct sk_buff *new;
48 unsigned long flags;
49 int err = 0;
50
51 spin_lock_irqsave(&drop_lock, flags);
52
53 if (max <= drop_max)
54 goto out;
55
56 err = -ENOMEM;
57 new = dev_alloc_skb(max);
58 if (new == NULL)
59 goto out;
60
61 skb_put(new, max);
62
63 kfree_skb(drop_skb);
64 drop_skb = new;
65 drop_max = max;
66 err = 0;
67out:
68 spin_unlock_irqrestore(&drop_lock, flags);
69
70 return err;
71}
72
73static int uml_net_rx(struct net_device *dev)
74{
75 struct uml_net_private *lp = netdev_priv(dev);
76 int pkt_len;
77 struct sk_buff *skb;
78
79 /* If we can't allocate memory, try again next round. */
80 skb = dev_alloc_skb(lp->max_packet);
81 if (skb == NULL) {
82 drop_skb->dev = dev;
83 /* Read a packet into drop_skb and don't do anything with it. */
84 (*lp->read)(lp->fd, drop_skb, lp);
85 dev->stats.rx_dropped++;
86 return 0;
87 }
88
89 skb->dev = dev;
90 skb_put(skb, lp->max_packet);
91 skb_reset_mac_header(skb);
92 pkt_len = (*lp->read)(lp->fd, skb, lp);
93
94 if (pkt_len > 0) {
95 skb_trim(skb, pkt_len);
96 skb->protocol = (*lp->protocol)(skb);
97
98 dev->stats.rx_bytes += skb->len;
99 dev->stats.rx_packets++;
100 netif_rx(skb);
101 return pkt_len;
102 }
103
104 kfree_skb(skb);
105 return pkt_len;
106}
107
108static void uml_dev_close(struct work_struct *work)
109{
110 struct uml_net_private *lp =
111 container_of(work, struct uml_net_private, work);
112 dev_close(lp->dev);
113}
114
115static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
116{
117 struct net_device *dev = dev_id;
118 struct uml_net_private *lp = netdev_priv(dev);
119 int err;
120
121 if (!netif_running(dev))
122 return IRQ_NONE;
123
124 spin_lock(&lp->lock);
125 while ((err = uml_net_rx(dev)) > 0) ;
126 if (err < 0) {
127 printk(KERN_ERR
128 "Device '%s' read returned %d, shutting it down\n",
129 dev->name, err);
130 /* dev_close can't be called in interrupt context, and takes
131 * again lp->lock.
132 * And dev_close() can be safely called multiple times on the
133 * same device, since it tests for (dev->flags & IFF_UP). So
134 * there's no harm in delaying the device shutdown.
135 * Furthermore, the workqueue will not re-enqueue an already
136 * enqueued work item. */
137 schedule_work(&lp->work);
138 goto out;
139 }
140 reactivate_fd(lp->fd, UM_ETH_IRQ);
141
142out:
143 spin_unlock(&lp->lock);
144 return IRQ_HANDLED;
145}
146
147static int uml_net_open(struct net_device *dev)
148{
149 struct uml_net_private *lp = netdev_priv(dev);
150 int err;
151
152 if (lp->fd >= 0) {
153 err = -ENXIO;
154 goto out;
155 }
156
157 lp->fd = (*lp->open)(&lp->user);
158 if (lp->fd < 0) {
159 err = lp->fd;
160 goto out;
161 }
162
163 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
164 IRQF_SHARED, dev->name, dev);
165 if (err != 0) {
166 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
167 err = -ENETUNREACH;
168 goto out_close;
169 }
170
171 lp->tl.data = (unsigned long) &lp->user;
172 netif_start_queue(dev);
173
174 /* clear buffer - it can happen that the host side of the interface
175 * is full when we get here. In this case, new data is never queued,
176 * SIGIOs never arrive, and the net never works.
177 */
178 while ((err = uml_net_rx(dev)) > 0) ;
179
180 spin_lock(&opened_lock);
181 list_add(&lp->list, &opened);
182 spin_unlock(&opened_lock);
183
184 return 0;
185out_close:
186 if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
187 lp->fd = -1;
188out:
189 return err;
190}
191
192static int uml_net_close(struct net_device *dev)
193{
194 struct uml_net_private *lp = netdev_priv(dev);
195
196 netif_stop_queue(dev);
197
198 um_free_irq(dev->irq, dev);
199 if (lp->close != NULL)
200 (*lp->close)(lp->fd, &lp->user);
201 lp->fd = -1;
202
203 spin_lock(&opened_lock);
204 list_del(&lp->list);
205 spin_unlock(&opened_lock);
206
207 return 0;
208}
209
210static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
211{
212 struct uml_net_private *lp = netdev_priv(dev);
213 unsigned long flags;
214 int len;
215
216 netif_stop_queue(dev);
217
218 spin_lock_irqsave(&lp->lock, flags);
219
220 len = (*lp->write)(lp->fd, skb, lp);
221
222 if (len == skb->len) {
223 dev->stats.tx_packets++;
224 dev->stats.tx_bytes += skb->len;
225 dev->trans_start = jiffies;
226 netif_start_queue(dev);
227
228 /* this is normally done in the interrupt when tx finishes */
229 netif_wake_queue(dev);
230 }
231 else if (len == 0) {
232 netif_start_queue(dev);
233 dev->stats.tx_dropped++;
234 }
235 else {
236 netif_start_queue(dev);
237 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
238 }
239
240 spin_unlock_irqrestore(&lp->lock, flags);
241
242 dev_kfree_skb(skb);
243
244 return NETDEV_TX_OK;
245}
246
247static void uml_net_set_multicast_list(struct net_device *dev)
248{
249 return;
250}
251
252static void uml_net_tx_timeout(struct net_device *dev)
253{
254 dev->trans_start = jiffies;
255 netif_wake_queue(dev);
256}
257
258static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
259{
260 dev->mtu = new_mtu;
261
262 return 0;
263}
264
265#ifdef CONFIG_NET_POLL_CONTROLLER
266static void uml_net_poll_controller(struct net_device *dev)
267{
268 disable_irq(dev->irq);
269 uml_net_interrupt(dev->irq, dev);
270 enable_irq(dev->irq);
271}
272#endif
273
274static void uml_net_get_drvinfo(struct net_device *dev,
275 struct ethtool_drvinfo *info)
276{
277 strcpy(info->driver, DRIVER_NAME);
278 strcpy(info->version, "42");
279}
280
281static const struct ethtool_ops uml_net_ethtool_ops = {
282 .get_drvinfo = uml_net_get_drvinfo,
283 .get_link = ethtool_op_get_link,
284};
285
286static void uml_net_user_timer_expire(unsigned long _conn)
287{
288#ifdef undef
289 struct connection *conn = (struct connection *)_conn;
290
291 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
292 do_connect(conn);
293#endif
294}
295
296static int setup_etheraddr(char *str, unsigned char *addr, char *name)
297{
298 char *end;
299 int i;
300
301 if (str == NULL)
302 goto random;
303
304 for (i = 0; i < 6; i++) {
305 addr[i] = simple_strtoul(str, &end, 16);
306 if ((end == str) ||
307 ((*end != ':') && (*end != ',') && (*end != '\0'))) {
308 printk(KERN_ERR
309 "setup_etheraddr: failed to parse '%s' "
310 "as an ethernet address\n", str);
311 goto random;
312 }
313 str = end + 1;
314 }
315 if (is_multicast_ether_addr(addr)) {
316 printk(KERN_ERR
317 "Attempt to assign a multicast ethernet address to a "
318 "device disallowed\n");
319 goto random;
320 }
321 if (!is_valid_ether_addr(addr)) {
322 printk(KERN_ERR
323 "Attempt to assign an invalid ethernet address to a "
324 "device disallowed\n");
325 goto random;
326 }
327 if (!is_local_ether_addr(addr)) {
328 printk(KERN_WARNING
329 "Warning: Assigning a globally valid ethernet "
330 "address to a device\n");
331 printk(KERN_WARNING "You should set the 2nd rightmost bit in "
332 "the first byte of the MAC,\n");
333 printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
334 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
335 addr[5]);
336 }
337 return 0;
338
339random:
340 printk(KERN_INFO
341 "Choosing a random ethernet address for device %s\n", name);
342 random_ether_addr(addr);
343 return 1;
344}
345
346static DEFINE_SPINLOCK(devices_lock);
347static LIST_HEAD(devices);
348
349static struct platform_driver uml_net_driver = {
350 .driver = {
351 .name = DRIVER_NAME,
352 },
353};
354
355static void net_device_release(struct device *dev)
356{
357 struct uml_net *device = dev_get_drvdata(dev);
358 struct net_device *netdev = device->dev;
359 struct uml_net_private *lp = netdev_priv(netdev);
360
361 if (lp->remove != NULL)
362 (*lp->remove)(&lp->user);
363 list_del(&device->list);
364 kfree(device);
365 free_netdev(netdev);
366}
367
368static const struct net_device_ops uml_netdev_ops = {
369 .ndo_open = uml_net_open,
370 .ndo_stop = uml_net_close,
371 .ndo_start_xmit = uml_net_start_xmit,
372 .ndo_set_rx_mode = uml_net_set_multicast_list,
373 .ndo_tx_timeout = uml_net_tx_timeout,
374 .ndo_set_mac_address = eth_mac_addr,
375 .ndo_change_mtu = uml_net_change_mtu,
376 .ndo_validate_addr = eth_validate_addr,
377#ifdef CONFIG_NET_POLL_CONTROLLER
378 .ndo_poll_controller = uml_net_poll_controller,
379#endif
380};
381
382/*
383 * Ensures that platform_driver_register is called only once by
384 * eth_configure. Will be set in an initcall.
385 */
386static int driver_registered;
387
388static void eth_configure(int n, void *init, char *mac,
389 struct transport *transport)
390{
391 struct uml_net *device;
392 struct net_device *dev;
393 struct uml_net_private *lp;
394 int err, size;
395 int random_mac;
396
397 size = transport->private_size + sizeof(struct uml_net_private);
398
399 device = kzalloc(sizeof(*device), GFP_KERNEL);
400 if (device == NULL) {
401 printk(KERN_ERR "eth_configure failed to allocate struct "
402 "uml_net\n");
403 return;
404 }
405
406 dev = alloc_etherdev(size);
407 if (dev == NULL) {
408 printk(KERN_ERR "eth_configure: failed to allocate struct "
409 "net_device for eth%d\n", n);
410 goto out_free_device;
411 }
412
413 INIT_LIST_HEAD(&device->list);
414 device->index = n;
415
416 /* If this name ends up conflicting with an existing registered
417 * netdevice, that is OK, register_netdev{,ice}() will notice this
418 * and fail.
419 */
420 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
421
422 random_mac = setup_etheraddr(mac, device->mac, dev->name);
423
424 printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac);
425
426 lp = netdev_priv(dev);
427 /* This points to the transport private data. It's still clear, but we
428 * must memset it to 0 *now*. Let's help the drivers. */
429 memset(lp, 0, size);
430 INIT_WORK(&lp->work, uml_dev_close);
431
432 /* sysfs register */
433 if (!driver_registered) {
434 platform_driver_register(¨_net_driver);
435 driver_registered = 1;
436 }
437 device->pdev.id = n;
438 device->pdev.name = DRIVER_NAME;
439 device->pdev.dev.release = net_device_release;
440 dev_set_drvdata(&device->pdev.dev, device);
441 if (platform_device_register(&device->pdev))
442 goto out_free_netdev;
443 SET_NETDEV_DEV(dev,&device->pdev.dev);
444
445 device->dev = dev;
446
447 /*
448 * These just fill in a data structure, so there's no failure
449 * to be worried about.
450 */
451 (*transport->kern->init)(dev, init);
452
453 *lp = ((struct uml_net_private)
454 { .list = LIST_HEAD_INIT(lp->list),
455 .dev = dev,
456 .fd = -1,
457 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
458 .max_packet = transport->user->max_packet,
459 .protocol = transport->kern->protocol,
460 .open = transport->user->open,
461 .close = transport->user->close,
462 .remove = transport->user->remove,
463 .read = transport->kern->read,
464 .write = transport->kern->write,
465 .add_address = transport->user->add_address,
466 .delete_address = transport->user->delete_address });
467
468 init_timer(&lp->tl);
469 spin_lock_init(&lp->lock);
470 lp->tl.function = uml_net_user_timer_expire;
471 memcpy(lp->mac, device->mac, sizeof(lp->mac));
472
473 if ((transport->user->init != NULL) &&
474 ((*transport->user->init)(&lp->user, dev) != 0))
475 goto out_unregister;
476
477 /* don't use eth_mac_addr, it will not work here */
478 memcpy(dev->dev_addr, device->mac, ETH_ALEN);
479 if (random_mac)
480 dev->addr_assign_type |= NET_ADDR_RANDOM;
481
482 dev->mtu = transport->user->mtu;
483 dev->netdev_ops = ¨_netdev_ops;
484 dev->ethtool_ops = ¨_net_ethtool_ops;
485 dev->watchdog_timeo = (HZ >> 1);
486 dev->irq = UM_ETH_IRQ;
487
488 err = update_drop_skb(lp->max_packet);
489 if (err)
490 goto out_undo_user_init;
491
492 rtnl_lock();
493 err = register_netdevice(dev);
494 rtnl_unlock();
495 if (err)
496 goto out_undo_user_init;
497
498 spin_lock(&devices_lock);
499 list_add(&device->list, &devices);
500 spin_unlock(&devices_lock);
501
502 return;
503
504out_undo_user_init:
505 if (transport->user->remove != NULL)
506 (*transport->user->remove)(&lp->user);
507out_unregister:
508 platform_device_unregister(&device->pdev);
509 return; /* platform_device_unregister frees dev and device */
510out_free_netdev:
511 free_netdev(dev);
512out_free_device:
513 kfree(device);
514}
515
516static struct uml_net *find_device(int n)
517{
518 struct uml_net *device;
519 struct list_head *ele;
520
521 spin_lock(&devices_lock);
522 list_for_each(ele, &devices) {
523 device = list_entry(ele, struct uml_net, list);
524 if (device->index == n)
525 goto out;
526 }
527 device = NULL;
528 out:
529 spin_unlock(&devices_lock);
530 return device;
531}
532
533static int eth_parse(char *str, int *index_out, char **str_out,
534 char **error_out)
535{
536 char *end;
537 int n, err = -EINVAL;
538
539 n = simple_strtoul(str, &end, 0);
540 if (end == str) {
541 *error_out = "Bad device number";
542 return err;
543 }
544
545 str = end;
546 if (*str != '=') {
547 *error_out = "Expected '=' after device number";
548 return err;
549 }
550
551 str++;
552 if (find_device(n)) {
553 *error_out = "Device already configured";
554 return err;
555 }
556
557 *index_out = n;
558 *str_out = str;
559 return 0;
560}
561
562struct eth_init {
563 struct list_head list;
564 char *init;
565 int index;
566};
567
568static DEFINE_SPINLOCK(transports_lock);
569static LIST_HEAD(transports);
570
571/* Filled in during early boot */
572static LIST_HEAD(eth_cmd_line);
573
574static int check_transport(struct transport *transport, char *eth, int n,
575 void **init_out, char **mac_out)
576{
577 int len;
578
579 len = strlen(transport->name);
580 if (strncmp(eth, transport->name, len))
581 return 0;
582
583 eth += len;
584 if (*eth == ',')
585 eth++;
586 else if (*eth != '\0')
587 return 0;
588
589 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
590 if (*init_out == NULL)
591 return 1;
592
593 if (!transport->setup(eth, mac_out, *init_out)) {
594 kfree(*init_out);
595 *init_out = NULL;
596 }
597 return 1;
598}
599
600void register_transport(struct transport *new)
601{
602 struct list_head *ele, *next;
603 struct eth_init *eth;
604 void *init;
605 char *mac = NULL;
606 int match;
607
608 spin_lock(&transports_lock);
609 BUG_ON(!list_empty(&new->list));
610 list_add(&new->list, &transports);
611 spin_unlock(&transports_lock);
612
613 list_for_each_safe(ele, next, ð_cmd_line) {
614 eth = list_entry(ele, struct eth_init, list);
615 match = check_transport(new, eth->init, eth->index, &init,
616 &mac);
617 if (!match)
618 continue;
619 else if (init != NULL) {
620 eth_configure(eth->index, init, mac, new);
621 kfree(init);
622 }
623 list_del(ð->list);
624 }
625}
626
627static int eth_setup_common(char *str, int index)
628{
629 struct list_head *ele;
630 struct transport *transport;
631 void *init;
632 char *mac = NULL;
633 int found = 0;
634
635 spin_lock(&transports_lock);
636 list_for_each(ele, &transports) {
637 transport = list_entry(ele, struct transport, list);
638 if (!check_transport(transport, str, index, &init, &mac))
639 continue;
640 if (init != NULL) {
641 eth_configure(index, init, mac, transport);
642 kfree(init);
643 }
644 found = 1;
645 break;
646 }
647
648 spin_unlock(&transports_lock);
649 return found;
650}
651
652static int __init eth_setup(char *str)
653{
654 struct eth_init *new;
655 char *error;
656 int n, err;
657
658 err = eth_parse(str, &n, &str, &error);
659 if (err) {
660 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
661 str, error);
662 return 1;
663 }
664
665 new = alloc_bootmem(sizeof(*new));
666 if (new == NULL) {
667 printk(KERN_ERR "eth_init : alloc_bootmem failed\n");
668 return 1;
669 }
670
671 INIT_LIST_HEAD(&new->list);
672 new->index = n;
673 new->init = str;
674
675 list_add_tail(&new->list, ð_cmd_line);
676 return 1;
677}
678
679__setup("eth", eth_setup);
680__uml_help(eth_setup,
681"eth[0-9]+=<transport>,<options>\n"
682" Configure a network device.\n\n"
683);
684
685static int net_config(char *str, char **error_out)
686{
687 int n, err;
688
689 err = eth_parse(str, &n, &str, error_out);
690 if (err)
691 return err;
692
693 /* This string is broken up and the pieces used by the underlying
694 * driver. So, it is freed only if eth_setup_common fails.
695 */
696 str = kstrdup(str, GFP_KERNEL);
697 if (str == NULL) {
698 *error_out = "net_config failed to strdup string";
699 return -ENOMEM;
700 }
701 err = !eth_setup_common(str, n);
702 if (err)
703 kfree(str);
704 return err;
705}
706
707static int net_id(char **str, int *start_out, int *end_out)
708{
709 char *end;
710 int n;
711
712 n = simple_strtoul(*str, &end, 0);
713 if ((*end != '\0') || (end == *str))
714 return -1;
715
716 *start_out = n;
717 *end_out = n;
718 *str = end;
719 return n;
720}
721
722static int net_remove(int n, char **error_out)
723{
724 struct uml_net *device;
725 struct net_device *dev;
726 struct uml_net_private *lp;
727
728 device = find_device(n);
729 if (device == NULL)
730 return -ENODEV;
731
732 dev = device->dev;
733 lp = netdev_priv(dev);
734 if (lp->fd > 0)
735 return -EBUSY;
736 unregister_netdev(dev);
737 platform_device_unregister(&device->pdev);
738
739 return 0;
740}
741
742static struct mc_device net_mc = {
743 .list = LIST_HEAD_INIT(net_mc.list),
744 .name = "eth",
745 .config = net_config,
746 .get_config = NULL,
747 .id = net_id,
748 .remove = net_remove,
749};
750
751#ifdef CONFIG_INET
752static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
753 void *ptr)
754{
755 struct in_ifaddr *ifa = ptr;
756 struct net_device *dev = ifa->ifa_dev->dev;
757 struct uml_net_private *lp;
758 void (*proc)(unsigned char *, unsigned char *, void *);
759 unsigned char addr_buf[4], netmask_buf[4];
760
761 if (dev->netdev_ops->ndo_open != uml_net_open)
762 return NOTIFY_DONE;
763
764 lp = netdev_priv(dev);
765
766 proc = NULL;
767 switch (event) {
768 case NETDEV_UP:
769 proc = lp->add_address;
770 break;
771 case NETDEV_DOWN:
772 proc = lp->delete_address;
773 break;
774 }
775 if (proc != NULL) {
776 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
777 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
778 (*proc)(addr_buf, netmask_buf, &lp->user);
779 }
780 return NOTIFY_DONE;
781}
782
783/* uml_net_init shouldn't be called twice on two CPUs at the same time */
784static struct notifier_block uml_inetaddr_notifier = {
785 .notifier_call = uml_inetaddr_event,
786};
787
788static void inet_register(void)
789{
790 struct list_head *ele;
791 struct uml_net_private *lp;
792 struct in_device *ip;
793 struct in_ifaddr *in;
794
795 register_inetaddr_notifier(¨_inetaddr_notifier);
796
797 /* Devices may have been opened already, so the uml_inetaddr_notifier
798 * didn't get a chance to run for them. This fakes it so that
799 * addresses which have already been set up get handled properly.
800 */
801 spin_lock(&opened_lock);
802 list_for_each(ele, &opened) {
803 lp = list_entry(ele, struct uml_net_private, list);
804 ip = lp->dev->ip_ptr;
805 if (ip == NULL)
806 continue;
807 in = ip->ifa_list;
808 while (in != NULL) {
809 uml_inetaddr_event(NULL, NETDEV_UP, in);
810 in = in->ifa_next;
811 }
812 }
813 spin_unlock(&opened_lock);
814}
815#else
816static inline void inet_register(void)
817{
818}
819#endif
820
821static int uml_net_init(void)
822{
823 mconsole_register_dev(&net_mc);
824 inet_register();
825 return 0;
826}
827
828__initcall(uml_net_init);
829
830static void close_devices(void)
831{
832 struct list_head *ele;
833 struct uml_net_private *lp;
834
835 spin_lock(&opened_lock);
836 list_for_each(ele, &opened) {
837 lp = list_entry(ele, struct uml_net_private, list);
838 um_free_irq(lp->dev->irq, lp->dev);
839 if ((lp->close != NULL) && (lp->fd >= 0))
840 (*lp->close)(lp->fd, &lp->user);
841 if (lp->remove != NULL)
842 (*lp->remove)(&lp->user);
843 }
844 spin_unlock(&opened_lock);
845}
846
847__uml_exitcall(close_devices);
848
849void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
850 void *),
851 void *arg)
852{
853 struct net_device *dev = d;
854 struct in_device *ip = dev->ip_ptr;
855 struct in_ifaddr *in;
856 unsigned char address[4], netmask[4];
857
858 if (ip == NULL) return;
859 in = ip->ifa_list;
860 while (in != NULL) {
861 memcpy(address, &in->ifa_address, sizeof(address));
862 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
863 (*cb)(address, netmask, arg);
864 in = in->ifa_next;
865 }
866}
867
868int dev_netmask(void *d, void *m)
869{
870 struct net_device *dev = d;
871 struct in_device *ip = dev->ip_ptr;
872 struct in_ifaddr *in;
873 __be32 *mask_out = m;
874
875 if (ip == NULL)
876 return 1;
877
878 in = ip->ifa_list;
879 if (in == NULL)
880 return 1;
881
882 *mask_out = in->ifa_mask;
883 return 0;
884}
885
886void *get_output_buffer(int *len_out)
887{
888 void *ret;
889
890 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
891 if (ret) *len_out = PAGE_SIZE;
892 else *len_out = 0;
893 return ret;
894}
895
896void free_output_buffer(void *buffer)
897{
898 free_pages((unsigned long) buffer, 0);
899}
900
901int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
902 char **gate_addr)
903{
904 char *remain;
905
906 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
907 if (remain != NULL) {
908 printk(KERN_ERR "tap_setup_common - Extra garbage on "
909 "specification : '%s'\n", remain);
910 return 1;
911 }
912
913 return 0;
914}
915
916unsigned short eth_protocol(struct sk_buff *skb)
917{
918 return eth_type_trans(skb, skb->dev);
919}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
5 * James Leu (jleu@mindspring.net).
6 * Copyright (C) 2001 by various other people who didn't put their name here.
7 */
8
9#include <linux/memblock.h>
10#include <linux/etherdevice.h>
11#include <linux/ethtool.h>
12#include <linux/inetdevice.h>
13#include <linux/init.h>
14#include <linux/list.h>
15#include <linux/netdevice.h>
16#include <linux/platform_device.h>
17#include <linux/rtnetlink.h>
18#include <linux/skbuff.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <init.h>
22#include <irq_kern.h>
23#include <irq_user.h>
24#include "mconsole_kern.h"
25#include <net_kern.h>
26#include <net_user.h>
27
28#define DRIVER_NAME "uml-netdev"
29
30static DEFINE_SPINLOCK(opened_lock);
31static LIST_HEAD(opened);
32
33/*
34 * The drop_skb is used when we can't allocate an skb. The
35 * packet is read into drop_skb in order to get the data off the
36 * connection to the host.
37 * It is reallocated whenever a maximum packet size is seen which is
38 * larger than any seen before. update_drop_skb is called from
39 * eth_configure when a new interface is added.
40 */
41static DEFINE_SPINLOCK(drop_lock);
42static struct sk_buff *drop_skb;
43static int drop_max;
44
45static int update_drop_skb(int max)
46{
47 struct sk_buff *new;
48 unsigned long flags;
49 int err = 0;
50
51 spin_lock_irqsave(&drop_lock, flags);
52
53 if (max <= drop_max)
54 goto out;
55
56 err = -ENOMEM;
57 new = dev_alloc_skb(max);
58 if (new == NULL)
59 goto out;
60
61 skb_put(new, max);
62
63 kfree_skb(drop_skb);
64 drop_skb = new;
65 drop_max = max;
66 err = 0;
67out:
68 spin_unlock_irqrestore(&drop_lock, flags);
69
70 return err;
71}
72
73static int uml_net_rx(struct net_device *dev)
74{
75 struct uml_net_private *lp = netdev_priv(dev);
76 int pkt_len;
77 struct sk_buff *skb;
78
79 /* If we can't allocate memory, try again next round. */
80 skb = dev_alloc_skb(lp->max_packet);
81 if (skb == NULL) {
82 drop_skb->dev = dev;
83 /* Read a packet into drop_skb and don't do anything with it. */
84 (*lp->read)(lp->fd, drop_skb, lp);
85 dev->stats.rx_dropped++;
86 return 0;
87 }
88
89 skb->dev = dev;
90 skb_put(skb, lp->max_packet);
91 skb_reset_mac_header(skb);
92 pkt_len = (*lp->read)(lp->fd, skb, lp);
93
94 if (pkt_len > 0) {
95 skb_trim(skb, pkt_len);
96 skb->protocol = (*lp->protocol)(skb);
97
98 dev->stats.rx_bytes += skb->len;
99 dev->stats.rx_packets++;
100 netif_rx(skb);
101 return pkt_len;
102 }
103
104 kfree_skb(skb);
105 return pkt_len;
106}
107
108static void uml_dev_close(struct work_struct *work)
109{
110 struct uml_net_private *lp =
111 container_of(work, struct uml_net_private, work);
112 dev_close(lp->dev);
113}
114
115static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
116{
117 struct net_device *dev = dev_id;
118 struct uml_net_private *lp = netdev_priv(dev);
119 int err;
120
121 if (!netif_running(dev))
122 return IRQ_NONE;
123
124 spin_lock(&lp->lock);
125 while ((err = uml_net_rx(dev)) > 0) ;
126 if (err < 0) {
127 printk(KERN_ERR
128 "Device '%s' read returned %d, shutting it down\n",
129 dev->name, err);
130 /* dev_close can't be called in interrupt context, and takes
131 * again lp->lock.
132 * And dev_close() can be safely called multiple times on the
133 * same device, since it tests for (dev->flags & IFF_UP). So
134 * there's no harm in delaying the device shutdown.
135 * Furthermore, the workqueue will not re-enqueue an already
136 * enqueued work item. */
137 schedule_work(&lp->work);
138 goto out;
139 }
140out:
141 spin_unlock(&lp->lock);
142 return IRQ_HANDLED;
143}
144
145static int uml_net_open(struct net_device *dev)
146{
147 struct uml_net_private *lp = netdev_priv(dev);
148 int err;
149
150 if (lp->fd >= 0) {
151 err = -ENXIO;
152 goto out;
153 }
154
155 lp->fd = (*lp->open)(&lp->user);
156 if (lp->fd < 0) {
157 err = lp->fd;
158 goto out;
159 }
160
161 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
162 IRQF_SHARED, dev->name, dev);
163 if (err < 0) {
164 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
165 err = -ENETUNREACH;
166 goto out_close;
167 }
168
169 netif_start_queue(dev);
170
171 /* clear buffer - it can happen that the host side of the interface
172 * is full when we get here. In this case, new data is never queued,
173 * SIGIOs never arrive, and the net never works.
174 */
175 while ((err = uml_net_rx(dev)) > 0) ;
176
177 spin_lock(&opened_lock);
178 list_add(&lp->list, &opened);
179 spin_unlock(&opened_lock);
180
181 return 0;
182out_close:
183 if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
184 lp->fd = -1;
185out:
186 return err;
187}
188
189static int uml_net_close(struct net_device *dev)
190{
191 struct uml_net_private *lp = netdev_priv(dev);
192
193 netif_stop_queue(dev);
194
195 um_free_irq(dev->irq, dev);
196 if (lp->close != NULL)
197 (*lp->close)(lp->fd, &lp->user);
198 lp->fd = -1;
199
200 spin_lock(&opened_lock);
201 list_del(&lp->list);
202 spin_unlock(&opened_lock);
203
204 return 0;
205}
206
207static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
208{
209 struct uml_net_private *lp = netdev_priv(dev);
210 unsigned long flags;
211 int len;
212
213 netif_stop_queue(dev);
214
215 spin_lock_irqsave(&lp->lock, flags);
216
217 len = (*lp->write)(lp->fd, skb, lp);
218 skb_tx_timestamp(skb);
219
220 if (len == skb->len) {
221 dev->stats.tx_packets++;
222 dev->stats.tx_bytes += skb->len;
223 netif_trans_update(dev);
224 netif_start_queue(dev);
225
226 /* this is normally done in the interrupt when tx finishes */
227 netif_wake_queue(dev);
228 }
229 else if (len == 0) {
230 netif_start_queue(dev);
231 dev->stats.tx_dropped++;
232 }
233 else {
234 netif_start_queue(dev);
235 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
236 }
237
238 spin_unlock_irqrestore(&lp->lock, flags);
239
240 dev_consume_skb_any(skb);
241
242 return NETDEV_TX_OK;
243}
244
245static void uml_net_set_multicast_list(struct net_device *dev)
246{
247 return;
248}
249
250static void uml_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
251{
252 netif_trans_update(dev);
253 netif_wake_queue(dev);
254}
255
256#ifdef CONFIG_NET_POLL_CONTROLLER
257static void uml_net_poll_controller(struct net_device *dev)
258{
259 disable_irq(dev->irq);
260 uml_net_interrupt(dev->irq, dev);
261 enable_irq(dev->irq);
262}
263#endif
264
265static void uml_net_get_drvinfo(struct net_device *dev,
266 struct ethtool_drvinfo *info)
267{
268 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
269}
270
271static const struct ethtool_ops uml_net_ethtool_ops = {
272 .get_drvinfo = uml_net_get_drvinfo,
273 .get_link = ethtool_op_get_link,
274 .get_ts_info = ethtool_op_get_ts_info,
275};
276
277void uml_net_setup_etheraddr(struct net_device *dev, char *str)
278{
279 u8 addr[ETH_ALEN];
280 char *end;
281 int i;
282
283 if (str == NULL)
284 goto random;
285
286 for (i = 0; i < 6; i++) {
287 addr[i] = simple_strtoul(str, &end, 16);
288 if ((end == str) ||
289 ((*end != ':') && (*end != ',') && (*end != '\0'))) {
290 printk(KERN_ERR
291 "setup_etheraddr: failed to parse '%s' "
292 "as an ethernet address\n", str);
293 goto random;
294 }
295 str = end + 1;
296 }
297 if (is_multicast_ether_addr(addr)) {
298 printk(KERN_ERR
299 "Attempt to assign a multicast ethernet address to a "
300 "device disallowed\n");
301 goto random;
302 }
303 if (!is_valid_ether_addr(addr)) {
304 printk(KERN_ERR
305 "Attempt to assign an invalid ethernet address to a "
306 "device disallowed\n");
307 goto random;
308 }
309 if (!is_local_ether_addr(addr)) {
310 printk(KERN_WARNING
311 "Warning: Assigning a globally valid ethernet "
312 "address to a device\n");
313 printk(KERN_WARNING "You should set the 2nd rightmost bit in "
314 "the first byte of the MAC,\n");
315 printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
316 addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
317 addr[5]);
318 }
319 eth_hw_addr_set(dev, addr);
320 return;
321
322random:
323 printk(KERN_INFO
324 "Choosing a random ethernet address for device %s\n", dev->name);
325 eth_hw_addr_random(dev);
326}
327
328static DEFINE_SPINLOCK(devices_lock);
329static LIST_HEAD(devices);
330
331static struct platform_driver uml_net_driver = {
332 .driver = {
333 .name = DRIVER_NAME,
334 },
335};
336
337static void net_device_release(struct device *dev)
338{
339 struct uml_net *device = dev_get_drvdata(dev);
340 struct net_device *netdev = device->dev;
341 struct uml_net_private *lp = netdev_priv(netdev);
342
343 if (lp->remove != NULL)
344 (*lp->remove)(&lp->user);
345 list_del(&device->list);
346 kfree(device);
347 free_netdev(netdev);
348}
349
350static const struct net_device_ops uml_netdev_ops = {
351 .ndo_open = uml_net_open,
352 .ndo_stop = uml_net_close,
353 .ndo_start_xmit = uml_net_start_xmit,
354 .ndo_set_rx_mode = uml_net_set_multicast_list,
355 .ndo_tx_timeout = uml_net_tx_timeout,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
358#ifdef CONFIG_NET_POLL_CONTROLLER
359 .ndo_poll_controller = uml_net_poll_controller,
360#endif
361};
362
363/*
364 * Ensures that platform_driver_register is called only once by
365 * eth_configure. Will be set in an initcall.
366 */
367static int driver_registered;
368
369static void eth_configure(int n, void *init, char *mac,
370 struct transport *transport, gfp_t gfp_mask)
371{
372 struct uml_net *device;
373 struct net_device *dev;
374 struct uml_net_private *lp;
375 int err, size;
376
377 size = transport->private_size + sizeof(struct uml_net_private);
378
379 device = kzalloc(sizeof(*device), gfp_mask);
380 if (device == NULL) {
381 printk(KERN_ERR "eth_configure failed to allocate struct "
382 "uml_net\n");
383 return;
384 }
385
386 dev = alloc_etherdev(size);
387 if (dev == NULL) {
388 printk(KERN_ERR "eth_configure: failed to allocate struct "
389 "net_device for eth%d\n", n);
390 goto out_free_device;
391 }
392
393 INIT_LIST_HEAD(&device->list);
394 device->index = n;
395
396 /* If this name ends up conflicting with an existing registered
397 * netdevice, that is OK, register_netdev{,ice}() will notice this
398 * and fail.
399 */
400 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
401
402 uml_net_setup_etheraddr(dev, mac);
403
404 printk(KERN_INFO "Netdevice %d (%pM) : ", n, dev->dev_addr);
405
406 lp = netdev_priv(dev);
407 /* This points to the transport private data. It's still clear, but we
408 * must memset it to 0 *now*. Let's help the drivers. */
409 memset(lp, 0, size);
410 INIT_WORK(&lp->work, uml_dev_close);
411
412 /* sysfs register */
413 if (!driver_registered) {
414 platform_driver_register(¨_net_driver);
415 driver_registered = 1;
416 }
417 device->pdev.id = n;
418 device->pdev.name = DRIVER_NAME;
419 device->pdev.dev.release = net_device_release;
420 dev_set_drvdata(&device->pdev.dev, device);
421 if (platform_device_register(&device->pdev))
422 goto out_free_netdev;
423 SET_NETDEV_DEV(dev,&device->pdev.dev);
424
425 device->dev = dev;
426
427 /*
428 * These just fill in a data structure, so there's no failure
429 * to be worried about.
430 */
431 (*transport->kern->init)(dev, init);
432
433 *lp = ((struct uml_net_private)
434 { .list = LIST_HEAD_INIT(lp->list),
435 .dev = dev,
436 .fd = -1,
437 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
438 .max_packet = transport->user->max_packet,
439 .protocol = transport->kern->protocol,
440 .open = transport->user->open,
441 .close = transport->user->close,
442 .remove = transport->user->remove,
443 .read = transport->kern->read,
444 .write = transport->kern->write,
445 .add_address = transport->user->add_address,
446 .delete_address = transport->user->delete_address });
447
448 spin_lock_init(&lp->lock);
449 memcpy(lp->mac, dev->dev_addr, sizeof(lp->mac));
450
451 if ((transport->user->init != NULL) &&
452 ((*transport->user->init)(&lp->user, dev) != 0))
453 goto out_unregister;
454
455 dev->mtu = transport->user->mtu;
456 dev->netdev_ops = ¨_netdev_ops;
457 dev->ethtool_ops = ¨_net_ethtool_ops;
458 dev->watchdog_timeo = (HZ >> 1);
459 dev->irq = UM_ETH_IRQ;
460
461 err = update_drop_skb(lp->max_packet);
462 if (err)
463 goto out_undo_user_init;
464
465 rtnl_lock();
466 err = register_netdevice(dev);
467 rtnl_unlock();
468 if (err)
469 goto out_undo_user_init;
470
471 spin_lock(&devices_lock);
472 list_add(&device->list, &devices);
473 spin_unlock(&devices_lock);
474
475 return;
476
477out_undo_user_init:
478 if (transport->user->remove != NULL)
479 (*transport->user->remove)(&lp->user);
480out_unregister:
481 platform_device_unregister(&device->pdev);
482 return; /* platform_device_unregister frees dev and device */
483out_free_netdev:
484 free_netdev(dev);
485out_free_device:
486 kfree(device);
487}
488
489static struct uml_net *find_device(int n)
490{
491 struct uml_net *device;
492 struct list_head *ele;
493
494 spin_lock(&devices_lock);
495 list_for_each(ele, &devices) {
496 device = list_entry(ele, struct uml_net, list);
497 if (device->index == n)
498 goto out;
499 }
500 device = NULL;
501 out:
502 spin_unlock(&devices_lock);
503 return device;
504}
505
506static int eth_parse(char *str, int *index_out, char **str_out,
507 char **error_out)
508{
509 char *end;
510 int n, err = -EINVAL;
511
512 n = simple_strtoul(str, &end, 0);
513 if (end == str) {
514 *error_out = "Bad device number";
515 return err;
516 }
517
518 str = end;
519 if (*str != '=') {
520 *error_out = "Expected '=' after device number";
521 return err;
522 }
523
524 str++;
525 if (find_device(n)) {
526 *error_out = "Device already configured";
527 return err;
528 }
529
530 *index_out = n;
531 *str_out = str;
532 return 0;
533}
534
535struct eth_init {
536 struct list_head list;
537 char *init;
538 int index;
539};
540
541static DEFINE_SPINLOCK(transports_lock);
542static LIST_HEAD(transports);
543
544/* Filled in during early boot */
545static LIST_HEAD(eth_cmd_line);
546
547static int check_transport(struct transport *transport, char *eth, int n,
548 void **init_out, char **mac_out, gfp_t gfp_mask)
549{
550 int len;
551
552 len = strlen(transport->name);
553 if (strncmp(eth, transport->name, len))
554 return 0;
555
556 eth += len;
557 if (*eth == ',')
558 eth++;
559 else if (*eth != '\0')
560 return 0;
561
562 *init_out = kmalloc(transport->setup_size, gfp_mask);
563 if (*init_out == NULL)
564 return 1;
565
566 if (!transport->setup(eth, mac_out, *init_out)) {
567 kfree(*init_out);
568 *init_out = NULL;
569 }
570 return 1;
571}
572
573void register_transport(struct transport *new)
574{
575 struct list_head *ele, *next;
576 struct eth_init *eth;
577 void *init;
578 char *mac = NULL;
579 int match;
580
581 spin_lock(&transports_lock);
582 BUG_ON(!list_empty(&new->list));
583 list_add(&new->list, &transports);
584 spin_unlock(&transports_lock);
585
586 list_for_each_safe(ele, next, ð_cmd_line) {
587 eth = list_entry(ele, struct eth_init, list);
588 match = check_transport(new, eth->init, eth->index, &init,
589 &mac, GFP_KERNEL);
590 if (!match)
591 continue;
592 else if (init != NULL) {
593 eth_configure(eth->index, init, mac, new, GFP_KERNEL);
594 kfree(init);
595 }
596 list_del(ð->list);
597 }
598}
599
600static int eth_setup_common(char *str, int index)
601{
602 struct list_head *ele;
603 struct transport *transport;
604 void *init;
605 char *mac = NULL;
606 int found = 0;
607
608 spin_lock(&transports_lock);
609 list_for_each(ele, &transports) {
610 transport = list_entry(ele, struct transport, list);
611 if (!check_transport(transport, str, index, &init,
612 &mac, GFP_ATOMIC))
613 continue;
614 if (init != NULL) {
615 eth_configure(index, init, mac, transport, GFP_ATOMIC);
616 kfree(init);
617 }
618 found = 1;
619 break;
620 }
621
622 spin_unlock(&transports_lock);
623 return found;
624}
625
626static int __init eth_setup(char *str)
627{
628 struct eth_init *new;
629 char *error;
630 int n, err;
631
632 err = eth_parse(str, &n, &str, &error);
633 if (err) {
634 printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n",
635 str, error);
636 return 1;
637 }
638
639 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
640 if (!new)
641 panic("%s: Failed to allocate %zu bytes\n", __func__,
642 sizeof(*new));
643
644 INIT_LIST_HEAD(&new->list);
645 new->index = n;
646 new->init = str;
647
648 list_add_tail(&new->list, ð_cmd_line);
649 return 1;
650}
651
652__setup("eth", eth_setup);
653__uml_help(eth_setup,
654"eth[0-9]+=<transport>,<options>\n"
655" Configure a network device.\n\n"
656);
657
658static int net_config(char *str, char **error_out)
659{
660 int n, err;
661
662 err = eth_parse(str, &n, &str, error_out);
663 if (err)
664 return err;
665
666 /* This string is broken up and the pieces used by the underlying
667 * driver. So, it is freed only if eth_setup_common fails.
668 */
669 str = kstrdup(str, GFP_KERNEL);
670 if (str == NULL) {
671 *error_out = "net_config failed to strdup string";
672 return -ENOMEM;
673 }
674 err = !eth_setup_common(str, n);
675 if (err)
676 kfree(str);
677 return err;
678}
679
680static int net_id(char **str, int *start_out, int *end_out)
681{
682 char *end;
683 int n;
684
685 n = simple_strtoul(*str, &end, 0);
686 if ((*end != '\0') || (end == *str))
687 return -1;
688
689 *start_out = n;
690 *end_out = n;
691 *str = end;
692 return n;
693}
694
695static int net_remove(int n, char **error_out)
696{
697 struct uml_net *device;
698 struct net_device *dev;
699 struct uml_net_private *lp;
700
701 device = find_device(n);
702 if (device == NULL)
703 return -ENODEV;
704
705 dev = device->dev;
706 lp = netdev_priv(dev);
707 if (lp->fd > 0)
708 return -EBUSY;
709 unregister_netdev(dev);
710 platform_device_unregister(&device->pdev);
711
712 return 0;
713}
714
715static struct mc_device net_mc = {
716 .list = LIST_HEAD_INIT(net_mc.list),
717 .name = "eth",
718 .config = net_config,
719 .get_config = NULL,
720 .id = net_id,
721 .remove = net_remove,
722};
723
724#ifdef CONFIG_INET
725static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
726 void *ptr)
727{
728 struct in_ifaddr *ifa = ptr;
729 struct net_device *dev = ifa->ifa_dev->dev;
730 struct uml_net_private *lp;
731 void (*proc)(unsigned char *, unsigned char *, void *);
732 unsigned char addr_buf[4], netmask_buf[4];
733
734 if (dev->netdev_ops->ndo_open != uml_net_open)
735 return NOTIFY_DONE;
736
737 lp = netdev_priv(dev);
738
739 proc = NULL;
740 switch (event) {
741 case NETDEV_UP:
742 proc = lp->add_address;
743 break;
744 case NETDEV_DOWN:
745 proc = lp->delete_address;
746 break;
747 }
748 if (proc != NULL) {
749 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
750 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
751 (*proc)(addr_buf, netmask_buf, &lp->user);
752 }
753 return NOTIFY_DONE;
754}
755
756/* uml_net_init shouldn't be called twice on two CPUs at the same time */
757static struct notifier_block uml_inetaddr_notifier = {
758 .notifier_call = uml_inetaddr_event,
759};
760
761static void inet_register(void)
762{
763 struct list_head *ele;
764 struct uml_net_private *lp;
765 struct in_device *ip;
766 struct in_ifaddr *in;
767
768 register_inetaddr_notifier(¨_inetaddr_notifier);
769
770 /* Devices may have been opened already, so the uml_inetaddr_notifier
771 * didn't get a chance to run for them. This fakes it so that
772 * addresses which have already been set up get handled properly.
773 */
774 spin_lock(&opened_lock);
775 list_for_each(ele, &opened) {
776 lp = list_entry(ele, struct uml_net_private, list);
777 ip = lp->dev->ip_ptr;
778 if (ip == NULL)
779 continue;
780 in = ip->ifa_list;
781 while (in != NULL) {
782 uml_inetaddr_event(NULL, NETDEV_UP, in);
783 in = in->ifa_next;
784 }
785 }
786 spin_unlock(&opened_lock);
787}
788#else
789static inline void inet_register(void)
790{
791}
792#endif
793
794static int uml_net_init(void)
795{
796 mconsole_register_dev(&net_mc);
797 inet_register();
798 return 0;
799}
800
801__initcall(uml_net_init);
802
803static void close_devices(void)
804{
805 struct list_head *ele;
806 struct uml_net_private *lp;
807
808 spin_lock(&opened_lock);
809 list_for_each(ele, &opened) {
810 lp = list_entry(ele, struct uml_net_private, list);
811 um_free_irq(lp->dev->irq, lp->dev);
812 if ((lp->close != NULL) && (lp->fd >= 0))
813 (*lp->close)(lp->fd, &lp->user);
814 if (lp->remove != NULL)
815 (*lp->remove)(&lp->user);
816 }
817 spin_unlock(&opened_lock);
818}
819
820__uml_exitcall(close_devices);
821
822void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
823 void *),
824 void *arg)
825{
826 struct net_device *dev = d;
827 struct in_device *ip = dev->ip_ptr;
828 struct in_ifaddr *in;
829 unsigned char address[4], netmask[4];
830
831 if (ip == NULL) return;
832 in = ip->ifa_list;
833 while (in != NULL) {
834 memcpy(address, &in->ifa_address, sizeof(address));
835 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
836 (*cb)(address, netmask, arg);
837 in = in->ifa_next;
838 }
839}
840
841int dev_netmask(void *d, void *m)
842{
843 struct net_device *dev = d;
844 struct in_device *ip = dev->ip_ptr;
845 struct in_ifaddr *in;
846 __be32 *mask_out = m;
847
848 if (ip == NULL)
849 return 1;
850
851 in = ip->ifa_list;
852 if (in == NULL)
853 return 1;
854
855 *mask_out = in->ifa_mask;
856 return 0;
857}
858
859void *get_output_buffer(int *len_out)
860{
861 void *ret;
862
863 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
864 if (ret) *len_out = PAGE_SIZE;
865 else *len_out = 0;
866 return ret;
867}
868
869void free_output_buffer(void *buffer)
870{
871 free_pages((unsigned long) buffer, 0);
872}
873
874int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
875 char **gate_addr)
876{
877 char *remain;
878
879 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
880 if (remain != NULL) {
881 printk(KERN_ERR "tap_setup_common - Extra garbage on "
882 "specification : '%s'\n", remain);
883 return 1;
884 }
885
886 return 0;
887}
888
889unsigned short eth_protocol(struct sk_buff *skb)
890{
891 return eth_type_trans(skb, skb->dev);
892}