Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/string.h>
20#include <linux/if_arp.h>
21#include <linux/inetdevice.h>
22#include <linux/inet.h>
23#include <linux/interrupt.h>
24#include <linux/netpoll.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/if_vlan.h>
32#include <net/tcp.h>
33#include <net/udp.h>
34#include <net/addrconf.h>
35#include <net/ndisc.h>
36#include <net/ip6_checksum.h>
37#include <asm/unaligned.h>
38#include <trace/events/napi.h>
39
40/*
41 * We maintain a small pool of fully-sized skbs, to make sure the
42 * message gets out even in extreme OOM situations.
43 */
44
45#define MAX_UDP_CHUNK 1460
46#define MAX_SKBS 32
47
48static struct sk_buff_head skb_pool;
49
50DEFINE_STATIC_SRCU(netpoll_srcu);
51
52#define USEC_PER_POLL 50
53
54#define MAX_SKB_SIZE \
55 (sizeof(struct ethhdr) + \
56 sizeof(struct iphdr) + \
57 sizeof(struct udphdr) + \
58 MAX_UDP_CHUNK)
59
60static void zap_completion_queue(void);
61
62static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
65#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
80 if (skb_vlan_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_hwaccel_push_inside(skb);
83 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
90 }
91
92 status = netdev_start_xmit(skb, dev, txq, false);
93
94out:
95 return status;
96}
97
98static void queue_process(struct work_struct *work)
99{
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
102 struct sk_buff *skb;
103 unsigned long flags;
104
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq;
108 unsigned int q_index;
109
110 if (!netif_device_present(dev) || !netif_running(dev)) {
111 kfree_skb(skb);
112 continue;
113 }
114
115 local_irq_save(flags);
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (netif_xmit_frozen_or_stopped(txq) ||
125 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
126 skb_queue_head(&npinfo->txq, skb);
127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags);
129
130 schedule_delayed_work(&npinfo->tx_work, HZ/10);
131 return;
132 }
133 HARD_TX_UNLOCK(dev, txq);
134 local_irq_restore(flags);
135 }
136}
137
138static void poll_one_napi(struct napi_struct *napi)
139{
140 int work;
141
142 /* If we set this bit but see that it has already been set,
143 * that indicates that napi has been disabled and we need
144 * to abort this operation
145 */
146 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
147 return;
148
149 /* We explicilty pass the polling call a budget of 0 to
150 * indicate that we are clearing the Tx path only.
151 */
152 work = napi->poll(napi, 0);
153 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
154 trace_napi_poll(napi, work, 0);
155
156 clear_bit(NAPI_STATE_NPSVC, &napi->state);
157}
158
159static void poll_napi(struct net_device *dev)
160{
161 struct napi_struct *napi;
162 int cpu = smp_processor_id();
163
164 list_for_each_entry(napi, &dev->napi_list, dev_list) {
165 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
166 poll_one_napi(napi);
167 smp_store_release(&napi->poll_owner, -1);
168 }
169 }
170}
171
172void netpoll_poll_dev(struct net_device *dev)
173{
174 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
175 const struct net_device_ops *ops;
176
177 /* Don't do any rx activity if the dev_lock mutex is held
178 * the dev_open/close paths use this to block netpoll activity
179 * while changing device state
180 */
181 if (!ni || down_trylock(&ni->dev_lock))
182 return;
183
184 if (!netif_running(dev)) {
185 up(&ni->dev_lock);
186 return;
187 }
188
189 ops = dev->netdev_ops;
190 if (ops->ndo_poll_controller)
191 ops->ndo_poll_controller(dev);
192
193 poll_napi(dev);
194
195 up(&ni->dev_lock);
196
197 zap_completion_queue();
198}
199EXPORT_SYMBOL(netpoll_poll_dev);
200
201void netpoll_poll_disable(struct net_device *dev)
202{
203 struct netpoll_info *ni;
204 int idx;
205 might_sleep();
206 idx = srcu_read_lock(&netpoll_srcu);
207 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
208 if (ni)
209 down(&ni->dev_lock);
210 srcu_read_unlock(&netpoll_srcu, idx);
211}
212EXPORT_SYMBOL(netpoll_poll_disable);
213
214void netpoll_poll_enable(struct net_device *dev)
215{
216 struct netpoll_info *ni;
217 rcu_read_lock();
218 ni = rcu_dereference(dev->npinfo);
219 if (ni)
220 up(&ni->dev_lock);
221 rcu_read_unlock();
222}
223EXPORT_SYMBOL(netpoll_poll_enable);
224
225static void refill_skbs(void)
226{
227 struct sk_buff *skb;
228 unsigned long flags;
229
230 spin_lock_irqsave(&skb_pool.lock, flags);
231 while (skb_pool.qlen < MAX_SKBS) {
232 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
233 if (!skb)
234 break;
235
236 __skb_queue_tail(&skb_pool, skb);
237 }
238 spin_unlock_irqrestore(&skb_pool.lock, flags);
239}
240
241static void zap_completion_queue(void)
242{
243 unsigned long flags;
244 struct softnet_data *sd = &get_cpu_var(softnet_data);
245
246 if (sd->completion_queue) {
247 struct sk_buff *clist;
248
249 local_irq_save(flags);
250 clist = sd->completion_queue;
251 sd->completion_queue = NULL;
252 local_irq_restore(flags);
253
254 while (clist != NULL) {
255 struct sk_buff *skb = clist;
256 clist = clist->next;
257 if (!skb_irq_freeable(skb)) {
258 refcount_set(&skb->users, 1);
259 dev_kfree_skb_any(skb); /* put this one back */
260 } else {
261 __kfree_skb(skb);
262 }
263 }
264 }
265
266 put_cpu_var(softnet_data);
267}
268
269static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
270{
271 int count = 0;
272 struct sk_buff *skb;
273
274 zap_completion_queue();
275 refill_skbs();
276repeat:
277
278 skb = alloc_skb(len, GFP_ATOMIC);
279 if (!skb)
280 skb = skb_dequeue(&skb_pool);
281
282 if (!skb) {
283 if (++count < 10) {
284 netpoll_poll_dev(np->dev);
285 goto repeat;
286 }
287 return NULL;
288 }
289
290 refcount_set(&skb->users, 1);
291 skb_reserve(skb, reserve);
292 return skb;
293}
294
295static int netpoll_owner_active(struct net_device *dev)
296{
297 struct napi_struct *napi;
298
299 list_for_each_entry(napi, &dev->napi_list, dev_list) {
300 if (napi->poll_owner == smp_processor_id())
301 return 1;
302 }
303 return 0;
304}
305
306/* call with IRQ disabled */
307void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
308 struct net_device *dev)
309{
310 int status = NETDEV_TX_BUSY;
311 unsigned long tries;
312 /* It is up to the caller to keep npinfo alive. */
313 struct netpoll_info *npinfo;
314
315 lockdep_assert_irqs_disabled();
316
317 npinfo = rcu_dereference_bh(np->dev->npinfo);
318 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
319 dev_kfree_skb_irq(skb);
320 return;
321 }
322
323 /* don't get messages out of order, and no recursion */
324 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
325 struct netdev_queue *txq;
326
327 txq = netdev_core_pick_tx(dev, skb, NULL);
328
329 /* try until next clock tick */
330 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
331 tries > 0; --tries) {
332 if (HARD_TX_TRYLOCK(dev, txq)) {
333 if (!netif_xmit_stopped(txq))
334 status = netpoll_start_xmit(skb, dev, txq);
335
336 HARD_TX_UNLOCK(dev, txq);
337
338 if (dev_xmit_complete(status))
339 break;
340
341 }
342
343 /* tickle device maybe there is some cleanup */
344 netpoll_poll_dev(np->dev);
345
346 udelay(USEC_PER_POLL);
347 }
348
349 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
351 dev->name, dev->netdev_ops->ndo_start_xmit);
352
353 }
354
355 if (!dev_xmit_complete(status)) {
356 skb_queue_tail(&npinfo->txq, skb);
357 schedule_delayed_work(&npinfo->tx_work,0);
358 }
359}
360EXPORT_SYMBOL(netpoll_send_skb_on_dev);
361
362void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
363{
364 int total_len, ip_len, udp_len;
365 struct sk_buff *skb;
366 struct udphdr *udph;
367 struct iphdr *iph;
368 struct ethhdr *eth;
369 static atomic_t ip_ident;
370 struct ipv6hdr *ip6h;
371
372 WARN_ON_ONCE(!irqs_disabled());
373
374 udp_len = len + sizeof(*udph);
375 if (np->ipv6)
376 ip_len = udp_len + sizeof(*ip6h);
377 else
378 ip_len = udp_len + sizeof(*iph);
379
380 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
381
382 skb = find_skb(np, total_len + np->dev->needed_tailroom,
383 total_len - len);
384 if (!skb)
385 return;
386
387 skb_copy_to_linear_data(skb, msg, len);
388 skb_put(skb, len);
389
390 skb_push(skb, sizeof(*udph));
391 skb_reset_transport_header(skb);
392 udph = udp_hdr(skb);
393 udph->source = htons(np->local_port);
394 udph->dest = htons(np->remote_port);
395 udph->len = htons(udp_len);
396
397 if (np->ipv6) {
398 udph->check = 0;
399 udph->check = csum_ipv6_magic(&np->local_ip.in6,
400 &np->remote_ip.in6,
401 udp_len, IPPROTO_UDP,
402 csum_partial(udph, udp_len, 0));
403 if (udph->check == 0)
404 udph->check = CSUM_MANGLED_0;
405
406 skb_push(skb, sizeof(*ip6h));
407 skb_reset_network_header(skb);
408 ip6h = ipv6_hdr(skb);
409
410 /* ip6h->version = 6; ip6h->priority = 0; */
411 put_unaligned(0x60, (unsigned char *)ip6h);
412 ip6h->flow_lbl[0] = 0;
413 ip6h->flow_lbl[1] = 0;
414 ip6h->flow_lbl[2] = 0;
415
416 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
417 ip6h->nexthdr = IPPROTO_UDP;
418 ip6h->hop_limit = 32;
419 ip6h->saddr = np->local_ip.in6;
420 ip6h->daddr = np->remote_ip.in6;
421
422 eth = skb_push(skb, ETH_HLEN);
423 skb_reset_mac_header(skb);
424 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
425 } else {
426 udph->check = 0;
427 udph->check = csum_tcpudp_magic(np->local_ip.ip,
428 np->remote_ip.ip,
429 udp_len, IPPROTO_UDP,
430 csum_partial(udph, udp_len, 0));
431 if (udph->check == 0)
432 udph->check = CSUM_MANGLED_0;
433
434 skb_push(skb, sizeof(*iph));
435 skb_reset_network_header(skb);
436 iph = ip_hdr(skb);
437
438 /* iph->version = 4; iph->ihl = 5; */
439 put_unaligned(0x45, (unsigned char *)iph);
440 iph->tos = 0;
441 put_unaligned(htons(ip_len), &(iph->tot_len));
442 iph->id = htons(atomic_inc_return(&ip_ident));
443 iph->frag_off = 0;
444 iph->ttl = 64;
445 iph->protocol = IPPROTO_UDP;
446 iph->check = 0;
447 put_unaligned(np->local_ip.ip, &(iph->saddr));
448 put_unaligned(np->remote_ip.ip, &(iph->daddr));
449 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
450
451 eth = skb_push(skb, ETH_HLEN);
452 skb_reset_mac_header(skb);
453 skb->protocol = eth->h_proto = htons(ETH_P_IP);
454 }
455
456 ether_addr_copy(eth->h_source, np->dev->dev_addr);
457 ether_addr_copy(eth->h_dest, np->remote_mac);
458
459 skb->dev = np->dev;
460
461 netpoll_send_skb(np, skb);
462}
463EXPORT_SYMBOL(netpoll_send_udp);
464
465void netpoll_print_options(struct netpoll *np)
466{
467 np_info(np, "local port %d\n", np->local_port);
468 if (np->ipv6)
469 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
470 else
471 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
472 np_info(np, "interface '%s'\n", np->dev_name);
473 np_info(np, "remote port %d\n", np->remote_port);
474 if (np->ipv6)
475 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
476 else
477 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
478 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
479}
480EXPORT_SYMBOL(netpoll_print_options);
481
482static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
483{
484 const char *end;
485
486 if (!strchr(str, ':') &&
487 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
488 if (!*end)
489 return 0;
490 }
491 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
492#if IS_ENABLED(CONFIG_IPV6)
493 if (!*end)
494 return 1;
495#else
496 return -1;
497#endif
498 }
499 return -1;
500}
501
502int netpoll_parse_options(struct netpoll *np, char *opt)
503{
504 char *cur=opt, *delim;
505 int ipv6;
506 bool ipversion_set = false;
507
508 if (*cur != '@') {
509 if ((delim = strchr(cur, '@')) == NULL)
510 goto parse_failed;
511 *delim = 0;
512 if (kstrtou16(cur, 10, &np->local_port))
513 goto parse_failed;
514 cur = delim;
515 }
516 cur++;
517
518 if (*cur != '/') {
519 ipversion_set = true;
520 if ((delim = strchr(cur, '/')) == NULL)
521 goto parse_failed;
522 *delim = 0;
523 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
524 if (ipv6 < 0)
525 goto parse_failed;
526 else
527 np->ipv6 = (bool)ipv6;
528 cur = delim;
529 }
530 cur++;
531
532 if (*cur != ',') {
533 /* parse out dev name */
534 if ((delim = strchr(cur, ',')) == NULL)
535 goto parse_failed;
536 *delim = 0;
537 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
538 cur = delim;
539 }
540 cur++;
541
542 if (*cur != '@') {
543 /* dst port */
544 if ((delim = strchr(cur, '@')) == NULL)
545 goto parse_failed;
546 *delim = 0;
547 if (*cur == ' ' || *cur == '\t')
548 np_info(np, "warning: whitespace is not allowed\n");
549 if (kstrtou16(cur, 10, &np->remote_port))
550 goto parse_failed;
551 cur = delim;
552 }
553 cur++;
554
555 /* dst ip */
556 if ((delim = strchr(cur, '/')) == NULL)
557 goto parse_failed;
558 *delim = 0;
559 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
560 if (ipv6 < 0)
561 goto parse_failed;
562 else if (ipversion_set && np->ipv6 != (bool)ipv6)
563 goto parse_failed;
564 else
565 np->ipv6 = (bool)ipv6;
566 cur = delim + 1;
567
568 if (*cur != 0) {
569 /* MAC address */
570 if (!mac_pton(cur, np->remote_mac))
571 goto parse_failed;
572 }
573
574 netpoll_print_options(np);
575
576 return 0;
577
578 parse_failed:
579 np_info(np, "couldn't parse config at '%s'!\n", cur);
580 return -1;
581}
582EXPORT_SYMBOL(netpoll_parse_options);
583
584int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
585{
586 struct netpoll_info *npinfo;
587 const struct net_device_ops *ops;
588 int err;
589
590 np->dev = ndev;
591 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
592
593 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
594 np_err(np, "%s doesn't support polling, aborting\n",
595 np->dev_name);
596 err = -ENOTSUPP;
597 goto out;
598 }
599
600 if (!ndev->npinfo) {
601 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
602 if (!npinfo) {
603 err = -ENOMEM;
604 goto out;
605 }
606
607 sema_init(&npinfo->dev_lock, 1);
608 skb_queue_head_init(&npinfo->txq);
609 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
610
611 refcount_set(&npinfo->refcnt, 1);
612
613 ops = np->dev->netdev_ops;
614 if (ops->ndo_netpoll_setup) {
615 err = ops->ndo_netpoll_setup(ndev, npinfo);
616 if (err)
617 goto free_npinfo;
618 }
619 } else {
620 npinfo = rtnl_dereference(ndev->npinfo);
621 refcount_inc(&npinfo->refcnt);
622 }
623
624 npinfo->netpoll = np;
625
626 /* last thing to do is link it to the net device structure */
627 rcu_assign_pointer(ndev->npinfo, npinfo);
628
629 return 0;
630
631free_npinfo:
632 kfree(npinfo);
633out:
634 return err;
635}
636EXPORT_SYMBOL_GPL(__netpoll_setup);
637
638int netpoll_setup(struct netpoll *np)
639{
640 struct net_device *ndev = NULL;
641 struct in_device *in_dev;
642 int err;
643
644 rtnl_lock();
645 if (np->dev_name[0]) {
646 struct net *net = current->nsproxy->net_ns;
647 ndev = __dev_get_by_name(net, np->dev_name);
648 }
649 if (!ndev) {
650 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
651 err = -ENODEV;
652 goto unlock;
653 }
654 dev_hold(ndev);
655
656 if (netdev_master_upper_dev_get(ndev)) {
657 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
658 err = -EBUSY;
659 goto put;
660 }
661
662 if (!netif_running(ndev)) {
663 unsigned long atmost, atleast;
664
665 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
666
667 err = dev_open(ndev, NULL);
668
669 if (err) {
670 np_err(np, "failed to open %s\n", ndev->name);
671 goto put;
672 }
673
674 rtnl_unlock();
675 atleast = jiffies + HZ/10;
676 atmost = jiffies + carrier_timeout * HZ;
677 while (!netif_carrier_ok(ndev)) {
678 if (time_after(jiffies, atmost)) {
679 np_notice(np, "timeout waiting for carrier\n");
680 break;
681 }
682 msleep(1);
683 }
684
685 /* If carrier appears to come up instantly, we don't
686 * trust it and pause so that we don't pump all our
687 * queued console messages into the bitbucket.
688 */
689
690 if (time_before(jiffies, atleast)) {
691 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
692 msleep(4000);
693 }
694 rtnl_lock();
695 }
696
697 if (!np->local_ip.ip) {
698 if (!np->ipv6) {
699 const struct in_ifaddr *ifa;
700
701 in_dev = __in_dev_get_rtnl(ndev);
702 if (!in_dev)
703 goto put_noaddr;
704
705 ifa = rtnl_dereference(in_dev->ifa_list);
706 if (!ifa) {
707put_noaddr:
708 np_err(np, "no IP address for %s, aborting\n",
709 np->dev_name);
710 err = -EDESTADDRREQ;
711 goto put;
712 }
713
714 np->local_ip.ip = ifa->ifa_local;
715 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
716 } else {
717#if IS_ENABLED(CONFIG_IPV6)
718 struct inet6_dev *idev;
719
720 err = -EDESTADDRREQ;
721 idev = __in6_dev_get(ndev);
722 if (idev) {
723 struct inet6_ifaddr *ifp;
724
725 read_lock_bh(&idev->lock);
726 list_for_each_entry(ifp, &idev->addr_list, if_list) {
727 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
728 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
729 continue;
730 np->local_ip.in6 = ifp->addr;
731 err = 0;
732 break;
733 }
734 read_unlock_bh(&idev->lock);
735 }
736 if (err) {
737 np_err(np, "no IPv6 address for %s, aborting\n",
738 np->dev_name);
739 goto put;
740 } else
741 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
742#else
743 np_err(np, "IPv6 is not supported %s, aborting\n",
744 np->dev_name);
745 err = -EINVAL;
746 goto put;
747#endif
748 }
749 }
750
751 /* fill up the skb queue */
752 refill_skbs();
753
754 err = __netpoll_setup(np, ndev);
755 if (err)
756 goto put;
757
758 rtnl_unlock();
759 return 0;
760
761put:
762 dev_put(ndev);
763unlock:
764 rtnl_unlock();
765 return err;
766}
767EXPORT_SYMBOL(netpoll_setup);
768
769static int __init netpoll_init(void)
770{
771 skb_queue_head_init(&skb_pool);
772 return 0;
773}
774core_initcall(netpoll_init);
775
776static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
777{
778 struct netpoll_info *npinfo =
779 container_of(rcu_head, struct netpoll_info, rcu);
780
781 skb_queue_purge(&npinfo->txq);
782
783 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
784 cancel_delayed_work(&npinfo->tx_work);
785
786 /* clean after last, unfinished work */
787 __skb_queue_purge(&npinfo->txq);
788 /* now cancel it again */
789 cancel_delayed_work(&npinfo->tx_work);
790 kfree(npinfo);
791}
792
793void __netpoll_cleanup(struct netpoll *np)
794{
795 struct netpoll_info *npinfo;
796
797 npinfo = rtnl_dereference(np->dev->npinfo);
798 if (!npinfo)
799 return;
800
801 synchronize_srcu(&netpoll_srcu);
802
803 if (refcount_dec_and_test(&npinfo->refcnt)) {
804 const struct net_device_ops *ops;
805
806 ops = np->dev->netdev_ops;
807 if (ops->ndo_netpoll_cleanup)
808 ops->ndo_netpoll_cleanup(np->dev);
809
810 RCU_INIT_POINTER(np->dev->npinfo, NULL);
811 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
812 } else
813 RCU_INIT_POINTER(np->dev->npinfo, NULL);
814}
815EXPORT_SYMBOL_GPL(__netpoll_cleanup);
816
817void __netpoll_free(struct netpoll *np)
818{
819 ASSERT_RTNL();
820
821 /* Wait for transmitting packets to finish before freeing. */
822 synchronize_rcu();
823 __netpoll_cleanup(np);
824 kfree(np);
825}
826EXPORT_SYMBOL_GPL(__netpoll_free);
827
828void netpoll_cleanup(struct netpoll *np)
829{
830 rtnl_lock();
831 if (!np->dev)
832 goto out;
833 __netpoll_cleanup(np);
834 dev_put(np->dev);
835 np->dev = NULL;
836out:
837 rtnl_unlock();
838}
839EXPORT_SYMBOL(netpoll_cleanup);
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/string.h>
19#include <linux/if_arp.h>
20#include <linux/inetdevice.h>
21#include <linux/inet.h>
22#include <linux/interrupt.h>
23#include <linux/netpoll.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/rcupdate.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29#include <linux/export.h>
30#include <linux/if_vlan.h>
31#include <net/tcp.h>
32#include <net/udp.h>
33#include <net/addrconf.h>
34#include <net/ndisc.h>
35#include <net/ip6_checksum.h>
36#include <asm/unaligned.h>
37#include <trace/events/napi.h>
38
39/*
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
42 */
43
44#define MAX_UDP_CHUNK 1460
45#define MAX_SKBS 32
46
47static struct sk_buff_head skb_pool;
48
49DEFINE_STATIC_SRCU(netpoll_srcu);
50
51#define USEC_PER_POLL 50
52
53#define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
57 MAX_UDP_CHUNK)
58
59static void zap_completion_queue(void);
60static void netpoll_async_cleanup(struct work_struct *work);
61
62static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
65#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
75 const struct net_device_ops *ops = dev->netdev_ops;
76 int status = NETDEV_TX_OK;
77 netdev_features_t features;
78
79 features = netif_skb_features(skb);
80
81 if (vlan_tx_tag_present(skb) &&
82 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
83 skb = __vlan_put_tag(skb, skb->vlan_proto,
84 vlan_tx_tag_get(skb));
85 if (unlikely(!skb)) {
86 /* This is actually a packet drop, but we
87 * don't want the code that calls this
88 * function to try and operate on a NULL skb.
89 */
90 goto out;
91 }
92 skb->vlan_tci = 0;
93 }
94
95 status = ops->ndo_start_xmit(skb, dev);
96 if (status == NETDEV_TX_OK)
97 txq_trans_update(txq);
98
99out:
100 return status;
101}
102
103static void queue_process(struct work_struct *work)
104{
105 struct netpoll_info *npinfo =
106 container_of(work, struct netpoll_info, tx_work.work);
107 struct sk_buff *skb;
108 unsigned long flags;
109
110 while ((skb = skb_dequeue(&npinfo->txq))) {
111 struct net_device *dev = skb->dev;
112 struct netdev_queue *txq;
113
114 if (!netif_device_present(dev) || !netif_running(dev)) {
115 kfree_skb(skb);
116 continue;
117 }
118
119 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
120
121 local_irq_save(flags);
122 HARD_TX_LOCK(dev, txq, smp_processor_id());
123 if (netif_xmit_frozen_or_stopped(txq) ||
124 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
125 skb_queue_head(&npinfo->txq, skb);
126 HARD_TX_UNLOCK(dev, txq);
127 local_irq_restore(flags);
128
129 schedule_delayed_work(&npinfo->tx_work, HZ/10);
130 return;
131 }
132 HARD_TX_UNLOCK(dev, txq);
133 local_irq_restore(flags);
134 }
135}
136
137/*
138 * Check whether delayed processing was scheduled for our NIC. If so,
139 * we attempt to grab the poll lock and use ->poll() to pump the card.
140 * If this fails, either we've recursed in ->poll() or it's already
141 * running on another CPU.
142 *
143 * Note: we don't mask interrupts with this lock because we're using
144 * trylock here and interrupts are already disabled in the softirq
145 * case. Further, we test the poll_owner to avoid recursion on UP
146 * systems where the lock doesn't exist.
147 */
148static int poll_one_napi(struct napi_struct *napi, int budget)
149{
150 int work;
151
152 /* net_rx_action's ->poll() invocations and our's are
153 * synchronized by this test which is only made while
154 * holding the napi->poll_lock.
155 */
156 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
157 return budget;
158
159 set_bit(NAPI_STATE_NPSVC, &napi->state);
160
161 work = napi->poll(napi, budget);
162 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
163 trace_napi_poll(napi);
164
165 clear_bit(NAPI_STATE_NPSVC, &napi->state);
166
167 return budget - work;
168}
169
170static void poll_napi(struct net_device *dev, int budget)
171{
172 struct napi_struct *napi;
173
174 list_for_each_entry(napi, &dev->napi_list, dev_list) {
175 if (napi->poll_owner != smp_processor_id() &&
176 spin_trylock(&napi->poll_lock)) {
177 budget = poll_one_napi(napi, budget);
178 spin_unlock(&napi->poll_lock);
179 }
180 }
181}
182
183static void netpoll_poll_dev(struct net_device *dev)
184{
185 const struct net_device_ops *ops;
186 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
187 int budget = 0;
188
189 /* Don't do any rx activity if the dev_lock mutex is held
190 * the dev_open/close paths use this to block netpoll activity
191 * while changing device state
192 */
193 if (down_trylock(&ni->dev_lock))
194 return;
195
196 if (!netif_running(dev)) {
197 up(&ni->dev_lock);
198 return;
199 }
200
201 ops = dev->netdev_ops;
202 if (!ops->ndo_poll_controller) {
203 up(&ni->dev_lock);
204 return;
205 }
206
207 /* Process pending work on NIC */
208 ops->ndo_poll_controller(dev);
209
210 poll_napi(dev, budget);
211
212 up(&ni->dev_lock);
213
214 zap_completion_queue();
215}
216
217void netpoll_poll_disable(struct net_device *dev)
218{
219 struct netpoll_info *ni;
220 int idx;
221 might_sleep();
222 idx = srcu_read_lock(&netpoll_srcu);
223 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
224 if (ni)
225 down(&ni->dev_lock);
226 srcu_read_unlock(&netpoll_srcu, idx);
227}
228EXPORT_SYMBOL(netpoll_poll_disable);
229
230void netpoll_poll_enable(struct net_device *dev)
231{
232 struct netpoll_info *ni;
233 rcu_read_lock();
234 ni = rcu_dereference(dev->npinfo);
235 if (ni)
236 up(&ni->dev_lock);
237 rcu_read_unlock();
238}
239EXPORT_SYMBOL(netpoll_poll_enable);
240
241static void refill_skbs(void)
242{
243 struct sk_buff *skb;
244 unsigned long flags;
245
246 spin_lock_irqsave(&skb_pool.lock, flags);
247 while (skb_pool.qlen < MAX_SKBS) {
248 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
249 if (!skb)
250 break;
251
252 __skb_queue_tail(&skb_pool, skb);
253 }
254 spin_unlock_irqrestore(&skb_pool.lock, flags);
255}
256
257static void zap_completion_queue(void)
258{
259 unsigned long flags;
260 struct softnet_data *sd = &get_cpu_var(softnet_data);
261
262 if (sd->completion_queue) {
263 struct sk_buff *clist;
264
265 local_irq_save(flags);
266 clist = sd->completion_queue;
267 sd->completion_queue = NULL;
268 local_irq_restore(flags);
269
270 while (clist != NULL) {
271 struct sk_buff *skb = clist;
272 clist = clist->next;
273 if (!skb_irq_freeable(skb)) {
274 atomic_inc(&skb->users);
275 dev_kfree_skb_any(skb); /* put this one back */
276 } else {
277 __kfree_skb(skb);
278 }
279 }
280 }
281
282 put_cpu_var(softnet_data);
283}
284
285static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
286{
287 int count = 0;
288 struct sk_buff *skb;
289
290 zap_completion_queue();
291 refill_skbs();
292repeat:
293
294 skb = alloc_skb(len, GFP_ATOMIC);
295 if (!skb)
296 skb = skb_dequeue(&skb_pool);
297
298 if (!skb) {
299 if (++count < 10) {
300 netpoll_poll_dev(np->dev);
301 goto repeat;
302 }
303 return NULL;
304 }
305
306 atomic_set(&skb->users, 1);
307 skb_reserve(skb, reserve);
308 return skb;
309}
310
311static int netpoll_owner_active(struct net_device *dev)
312{
313 struct napi_struct *napi;
314
315 list_for_each_entry(napi, &dev->napi_list, dev_list) {
316 if (napi->poll_owner == smp_processor_id())
317 return 1;
318 }
319 return 0;
320}
321
322/* call with IRQ disabled */
323void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
324 struct net_device *dev)
325{
326 int status = NETDEV_TX_BUSY;
327 unsigned long tries;
328 /* It is up to the caller to keep npinfo alive. */
329 struct netpoll_info *npinfo;
330
331 WARN_ON_ONCE(!irqs_disabled());
332
333 npinfo = rcu_dereference_bh(np->dev->npinfo);
334 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
335 dev_kfree_skb_irq(skb);
336 return;
337 }
338
339 /* don't get messages out of order, and no recursion */
340 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
341 struct netdev_queue *txq;
342
343 txq = netdev_pick_tx(dev, skb, NULL);
344
345 /* try until next clock tick */
346 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
347 tries > 0; --tries) {
348 if (HARD_TX_TRYLOCK(dev, txq)) {
349 if (!netif_xmit_stopped(txq))
350 status = netpoll_start_xmit(skb, dev, txq);
351
352 HARD_TX_UNLOCK(dev, txq);
353
354 if (status == NETDEV_TX_OK)
355 break;
356
357 }
358
359 /* tickle device maybe there is some cleanup */
360 netpoll_poll_dev(np->dev);
361
362 udelay(USEC_PER_POLL);
363 }
364
365 WARN_ONCE(!irqs_disabled(),
366 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
367 dev->name, dev->netdev_ops->ndo_start_xmit);
368
369 }
370
371 if (status != NETDEV_TX_OK) {
372 skb_queue_tail(&npinfo->txq, skb);
373 schedule_delayed_work(&npinfo->tx_work,0);
374 }
375}
376EXPORT_SYMBOL(netpoll_send_skb_on_dev);
377
378void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
379{
380 int total_len, ip_len, udp_len;
381 struct sk_buff *skb;
382 struct udphdr *udph;
383 struct iphdr *iph;
384 struct ethhdr *eth;
385 static atomic_t ip_ident;
386 struct ipv6hdr *ip6h;
387
388 udp_len = len + sizeof(*udph);
389 if (np->ipv6)
390 ip_len = udp_len + sizeof(*ip6h);
391 else
392 ip_len = udp_len + sizeof(*iph);
393
394 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
395
396 skb = find_skb(np, total_len + np->dev->needed_tailroom,
397 total_len - len);
398 if (!skb)
399 return;
400
401 skb_copy_to_linear_data(skb, msg, len);
402 skb_put(skb, len);
403
404 skb_push(skb, sizeof(*udph));
405 skb_reset_transport_header(skb);
406 udph = udp_hdr(skb);
407 udph->source = htons(np->local_port);
408 udph->dest = htons(np->remote_port);
409 udph->len = htons(udp_len);
410
411 if (np->ipv6) {
412 udph->check = 0;
413 udph->check = csum_ipv6_magic(&np->local_ip.in6,
414 &np->remote_ip.in6,
415 udp_len, IPPROTO_UDP,
416 csum_partial(udph, udp_len, 0));
417 if (udph->check == 0)
418 udph->check = CSUM_MANGLED_0;
419
420 skb_push(skb, sizeof(*ip6h));
421 skb_reset_network_header(skb);
422 ip6h = ipv6_hdr(skb);
423
424 /* ip6h->version = 6; ip6h->priority = 0; */
425 put_unaligned(0x60, (unsigned char *)ip6h);
426 ip6h->flow_lbl[0] = 0;
427 ip6h->flow_lbl[1] = 0;
428 ip6h->flow_lbl[2] = 0;
429
430 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
431 ip6h->nexthdr = IPPROTO_UDP;
432 ip6h->hop_limit = 32;
433 ip6h->saddr = np->local_ip.in6;
434 ip6h->daddr = np->remote_ip.in6;
435
436 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
437 skb_reset_mac_header(skb);
438 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
439 } else {
440 udph->check = 0;
441 udph->check = csum_tcpudp_magic(np->local_ip.ip,
442 np->remote_ip.ip,
443 udp_len, IPPROTO_UDP,
444 csum_partial(udph, udp_len, 0));
445 if (udph->check == 0)
446 udph->check = CSUM_MANGLED_0;
447
448 skb_push(skb, sizeof(*iph));
449 skb_reset_network_header(skb);
450 iph = ip_hdr(skb);
451
452 /* iph->version = 4; iph->ihl = 5; */
453 put_unaligned(0x45, (unsigned char *)iph);
454 iph->tos = 0;
455 put_unaligned(htons(ip_len), &(iph->tot_len));
456 iph->id = htons(atomic_inc_return(&ip_ident));
457 iph->frag_off = 0;
458 iph->ttl = 64;
459 iph->protocol = IPPROTO_UDP;
460 iph->check = 0;
461 put_unaligned(np->local_ip.ip, &(iph->saddr));
462 put_unaligned(np->remote_ip.ip, &(iph->daddr));
463 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
464
465 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
466 skb_reset_mac_header(skb);
467 skb->protocol = eth->h_proto = htons(ETH_P_IP);
468 }
469
470 ether_addr_copy(eth->h_source, np->dev->dev_addr);
471 ether_addr_copy(eth->h_dest, np->remote_mac);
472
473 skb->dev = np->dev;
474
475 netpoll_send_skb(np, skb);
476}
477EXPORT_SYMBOL(netpoll_send_udp);
478
479void netpoll_print_options(struct netpoll *np)
480{
481 np_info(np, "local port %d\n", np->local_port);
482 if (np->ipv6)
483 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
484 else
485 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
486 np_info(np, "interface '%s'\n", np->dev_name);
487 np_info(np, "remote port %d\n", np->remote_port);
488 if (np->ipv6)
489 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
490 else
491 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
492 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
493}
494EXPORT_SYMBOL(netpoll_print_options);
495
496static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
497{
498 const char *end;
499
500 if (!strchr(str, ':') &&
501 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
502 if (!*end)
503 return 0;
504 }
505 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
506#if IS_ENABLED(CONFIG_IPV6)
507 if (!*end)
508 return 1;
509#else
510 return -1;
511#endif
512 }
513 return -1;
514}
515
516int netpoll_parse_options(struct netpoll *np, char *opt)
517{
518 char *cur=opt, *delim;
519 int ipv6;
520 bool ipversion_set = false;
521
522 if (*cur != '@') {
523 if ((delim = strchr(cur, '@')) == NULL)
524 goto parse_failed;
525 *delim = 0;
526 if (kstrtou16(cur, 10, &np->local_port))
527 goto parse_failed;
528 cur = delim;
529 }
530 cur++;
531
532 if (*cur != '/') {
533 ipversion_set = true;
534 if ((delim = strchr(cur, '/')) == NULL)
535 goto parse_failed;
536 *delim = 0;
537 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
538 if (ipv6 < 0)
539 goto parse_failed;
540 else
541 np->ipv6 = (bool)ipv6;
542 cur = delim;
543 }
544 cur++;
545
546 if (*cur != ',') {
547 /* parse out dev name */
548 if ((delim = strchr(cur, ',')) == NULL)
549 goto parse_failed;
550 *delim = 0;
551 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
552 cur = delim;
553 }
554 cur++;
555
556 if (*cur != '@') {
557 /* dst port */
558 if ((delim = strchr(cur, '@')) == NULL)
559 goto parse_failed;
560 *delim = 0;
561 if (*cur == ' ' || *cur == '\t')
562 np_info(np, "warning: whitespace is not allowed\n");
563 if (kstrtou16(cur, 10, &np->remote_port))
564 goto parse_failed;
565 cur = delim;
566 }
567 cur++;
568
569 /* dst ip */
570 if ((delim = strchr(cur, '/')) == NULL)
571 goto parse_failed;
572 *delim = 0;
573 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
574 if (ipv6 < 0)
575 goto parse_failed;
576 else if (ipversion_set && np->ipv6 != (bool)ipv6)
577 goto parse_failed;
578 else
579 np->ipv6 = (bool)ipv6;
580 cur = delim + 1;
581
582 if (*cur != 0) {
583 /* MAC address */
584 if (!mac_pton(cur, np->remote_mac))
585 goto parse_failed;
586 }
587
588 netpoll_print_options(np);
589
590 return 0;
591
592 parse_failed:
593 np_info(np, "couldn't parse config at '%s'!\n", cur);
594 return -1;
595}
596EXPORT_SYMBOL(netpoll_parse_options);
597
598int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
599{
600 struct netpoll_info *npinfo;
601 const struct net_device_ops *ops;
602 int err;
603
604 np->dev = ndev;
605 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
606 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
607
608 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
609 !ndev->netdev_ops->ndo_poll_controller) {
610 np_err(np, "%s doesn't support polling, aborting\n",
611 np->dev_name);
612 err = -ENOTSUPP;
613 goto out;
614 }
615
616 if (!ndev->npinfo) {
617 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
618 if (!npinfo) {
619 err = -ENOMEM;
620 goto out;
621 }
622
623 sema_init(&npinfo->dev_lock, 1);
624 skb_queue_head_init(&npinfo->txq);
625 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
626
627 atomic_set(&npinfo->refcnt, 1);
628
629 ops = np->dev->netdev_ops;
630 if (ops->ndo_netpoll_setup) {
631 err = ops->ndo_netpoll_setup(ndev, npinfo);
632 if (err)
633 goto free_npinfo;
634 }
635 } else {
636 npinfo = rtnl_dereference(ndev->npinfo);
637 atomic_inc(&npinfo->refcnt);
638 }
639
640 npinfo->netpoll = np;
641
642 /* last thing to do is link it to the net device structure */
643 rcu_assign_pointer(ndev->npinfo, npinfo);
644
645 return 0;
646
647free_npinfo:
648 kfree(npinfo);
649out:
650 return err;
651}
652EXPORT_SYMBOL_GPL(__netpoll_setup);
653
654int netpoll_setup(struct netpoll *np)
655{
656 struct net_device *ndev = NULL;
657 struct in_device *in_dev;
658 int err;
659
660 rtnl_lock();
661 if (np->dev_name) {
662 struct net *net = current->nsproxy->net_ns;
663 ndev = __dev_get_by_name(net, np->dev_name);
664 }
665 if (!ndev) {
666 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
667 err = -ENODEV;
668 goto unlock;
669 }
670 dev_hold(ndev);
671
672 if (netdev_master_upper_dev_get(ndev)) {
673 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
674 err = -EBUSY;
675 goto put;
676 }
677
678 if (!netif_running(ndev)) {
679 unsigned long atmost, atleast;
680
681 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
682
683 err = dev_open(ndev);
684
685 if (err) {
686 np_err(np, "failed to open %s\n", ndev->name);
687 goto put;
688 }
689
690 rtnl_unlock();
691 atleast = jiffies + HZ/10;
692 atmost = jiffies + carrier_timeout * HZ;
693 while (!netif_carrier_ok(ndev)) {
694 if (time_after(jiffies, atmost)) {
695 np_notice(np, "timeout waiting for carrier\n");
696 break;
697 }
698 msleep(1);
699 }
700
701 /* If carrier appears to come up instantly, we don't
702 * trust it and pause so that we don't pump all our
703 * queued console messages into the bitbucket.
704 */
705
706 if (time_before(jiffies, atleast)) {
707 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
708 msleep(4000);
709 }
710 rtnl_lock();
711 }
712
713 if (!np->local_ip.ip) {
714 if (!np->ipv6) {
715 in_dev = __in_dev_get_rtnl(ndev);
716
717 if (!in_dev || !in_dev->ifa_list) {
718 np_err(np, "no IP address for %s, aborting\n",
719 np->dev_name);
720 err = -EDESTADDRREQ;
721 goto put;
722 }
723
724 np->local_ip.ip = in_dev->ifa_list->ifa_local;
725 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
726 } else {
727#if IS_ENABLED(CONFIG_IPV6)
728 struct inet6_dev *idev;
729
730 err = -EDESTADDRREQ;
731 idev = __in6_dev_get(ndev);
732 if (idev) {
733 struct inet6_ifaddr *ifp;
734
735 read_lock_bh(&idev->lock);
736 list_for_each_entry(ifp, &idev->addr_list, if_list) {
737 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
738 continue;
739 np->local_ip.in6 = ifp->addr;
740 err = 0;
741 break;
742 }
743 read_unlock_bh(&idev->lock);
744 }
745 if (err) {
746 np_err(np, "no IPv6 address for %s, aborting\n",
747 np->dev_name);
748 goto put;
749 } else
750 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
751#else
752 np_err(np, "IPv6 is not supported %s, aborting\n",
753 np->dev_name);
754 err = -EINVAL;
755 goto put;
756#endif
757 }
758 }
759
760 /* fill up the skb queue */
761 refill_skbs();
762
763 err = __netpoll_setup(np, ndev);
764 if (err)
765 goto put;
766
767 rtnl_unlock();
768 return 0;
769
770put:
771 dev_put(ndev);
772unlock:
773 rtnl_unlock();
774 return err;
775}
776EXPORT_SYMBOL(netpoll_setup);
777
778static int __init netpoll_init(void)
779{
780 skb_queue_head_init(&skb_pool);
781 return 0;
782}
783core_initcall(netpoll_init);
784
785static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
786{
787 struct netpoll_info *npinfo =
788 container_of(rcu_head, struct netpoll_info, rcu);
789
790 skb_queue_purge(&npinfo->txq);
791
792 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
793 cancel_delayed_work(&npinfo->tx_work);
794
795 /* clean after last, unfinished work */
796 __skb_queue_purge(&npinfo->txq);
797 /* now cancel it again */
798 cancel_delayed_work(&npinfo->tx_work);
799 kfree(npinfo);
800}
801
802void __netpoll_cleanup(struct netpoll *np)
803{
804 struct netpoll_info *npinfo;
805
806 /* rtnl_dereference would be preferable here but
807 * rcu_cleanup_netpoll path can put us in here safely without
808 * holding the rtnl, so plain rcu_dereference it is
809 */
810 npinfo = rtnl_dereference(np->dev->npinfo);
811 if (!npinfo)
812 return;
813
814 synchronize_srcu(&netpoll_srcu);
815
816 if (atomic_dec_and_test(&npinfo->refcnt)) {
817 const struct net_device_ops *ops;
818
819 ops = np->dev->netdev_ops;
820 if (ops->ndo_netpoll_cleanup)
821 ops->ndo_netpoll_cleanup(np->dev);
822
823 RCU_INIT_POINTER(np->dev->npinfo, NULL);
824 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
825 }
826}
827EXPORT_SYMBOL_GPL(__netpoll_cleanup);
828
829static void netpoll_async_cleanup(struct work_struct *work)
830{
831 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
832
833 rtnl_lock();
834 __netpoll_cleanup(np);
835 rtnl_unlock();
836 kfree(np);
837}
838
839void __netpoll_free_async(struct netpoll *np)
840{
841 schedule_work(&np->cleanup_work);
842}
843EXPORT_SYMBOL_GPL(__netpoll_free_async);
844
845void netpoll_cleanup(struct netpoll *np)
846{
847 rtnl_lock();
848 if (!np->dev)
849 goto out;
850 __netpoll_cleanup(np);
851 dev_put(np->dev);
852 np->dev = NULL;
853out:
854 rtnl_unlock();
855}
856EXPORT_SYMBOL(netpoll_cleanup);