Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/string.h>
20#include <linux/if_arp.h>
21#include <linux/inetdevice.h>
22#include <linux/inet.h>
23#include <linux/interrupt.h>
24#include <linux/netpoll.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/if_vlan.h>
32#include <net/tcp.h>
33#include <net/udp.h>
34#include <net/addrconf.h>
35#include <net/ndisc.h>
36#include <net/ip6_checksum.h>
37#include <asm/unaligned.h>
38#include <trace/events/napi.h>
39#include <linux/kconfig.h>
40
41/*
42 * We maintain a small pool of fully-sized skbs, to make sure the
43 * message gets out even in extreme OOM situations.
44 */
45
46#define MAX_UDP_CHUNK 1460
47#define MAX_SKBS 32
48
49static struct sk_buff_head skb_pool;
50
51DEFINE_STATIC_SRCU(netpoll_srcu);
52
53#define USEC_PER_POLL 50
54
55#define MAX_SKB_SIZE \
56 (sizeof(struct ethhdr) + \
57 sizeof(struct iphdr) + \
58 sizeof(struct udphdr) + \
59 MAX_UDP_CHUNK)
60
61static void zap_completion_queue(void);
62
63static unsigned int carrier_timeout = 4;
64module_param(carrier_timeout, uint, 0644);
65
66#define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68#define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70#define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72
73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74 struct net_device *dev,
75 struct netdev_queue *txq)
76{
77 netdev_tx_t status = NETDEV_TX_OK;
78 netdev_features_t features;
79
80 features = netif_skb_features(skb);
81
82 if (skb_vlan_tag_present(skb) &&
83 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
84 skb = __vlan_hwaccel_push_inside(skb);
85 if (unlikely(!skb)) {
86 /* This is actually a packet drop, but we
87 * don't want the code that calls this
88 * function to try and operate on a NULL skb.
89 */
90 goto out;
91 }
92 }
93
94 status = netdev_start_xmit(skb, dev, txq, false);
95
96out:
97 return status;
98}
99
100static void queue_process(struct work_struct *work)
101{
102 struct netpoll_info *npinfo =
103 container_of(work, struct netpoll_info, tx_work.work);
104 struct sk_buff *skb;
105 unsigned long flags;
106
107 while ((skb = skb_dequeue(&npinfo->txq))) {
108 struct net_device *dev = skb->dev;
109 struct netdev_queue *txq;
110 unsigned int q_index;
111
112 if (!netif_device_present(dev) || !netif_running(dev)) {
113 kfree_skb(skb);
114 continue;
115 }
116
117 local_irq_save(flags);
118 /* check if skb->queue_mapping is still valid */
119 q_index = skb_get_queue_mapping(skb);
120 if (unlikely(q_index >= dev->real_num_tx_queues)) {
121 q_index = q_index % dev->real_num_tx_queues;
122 skb_set_queue_mapping(skb, q_index);
123 }
124 txq = netdev_get_tx_queue(dev, q_index);
125 HARD_TX_LOCK(dev, txq, smp_processor_id());
126 if (netif_xmit_frozen_or_stopped(txq) ||
127 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128 skb_queue_head(&npinfo->txq, skb);
129 HARD_TX_UNLOCK(dev, txq);
130 local_irq_restore(flags);
131
132 schedule_delayed_work(&npinfo->tx_work, HZ/10);
133 return;
134 }
135 HARD_TX_UNLOCK(dev, txq);
136 local_irq_restore(flags);
137 }
138}
139
140static void poll_one_napi(struct napi_struct *napi)
141{
142 int work;
143
144 /* If we set this bit but see that it has already been set,
145 * that indicates that napi has been disabled and we need
146 * to abort this operation
147 */
148 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
149 return;
150
151 /* We explicilty pass the polling call a budget of 0 to
152 * indicate that we are clearing the Tx path only.
153 */
154 work = napi->poll(napi, 0);
155 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
156 trace_napi_poll(napi, work, 0);
157
158 clear_bit(NAPI_STATE_NPSVC, &napi->state);
159}
160
161static void poll_napi(struct net_device *dev)
162{
163 struct napi_struct *napi;
164 int cpu = smp_processor_id();
165
166 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
167 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
168 poll_one_napi(napi);
169 smp_store_release(&napi->poll_owner, -1);
170 }
171 }
172}
173
174void netpoll_poll_dev(struct net_device *dev)
175{
176 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
177 const struct net_device_ops *ops;
178
179 /* Don't do any rx activity if the dev_lock mutex is held
180 * the dev_open/close paths use this to block netpoll activity
181 * while changing device state
182 */
183 if (!ni || down_trylock(&ni->dev_lock))
184 return;
185
186 if (!netif_running(dev)) {
187 up(&ni->dev_lock);
188 return;
189 }
190
191 ops = dev->netdev_ops;
192 if (ops->ndo_poll_controller)
193 ops->ndo_poll_controller(dev);
194
195 poll_napi(dev);
196
197 up(&ni->dev_lock);
198
199 zap_completion_queue();
200}
201EXPORT_SYMBOL(netpoll_poll_dev);
202
203void netpoll_poll_disable(struct net_device *dev)
204{
205 struct netpoll_info *ni;
206 int idx;
207 might_sleep();
208 idx = srcu_read_lock(&netpoll_srcu);
209 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
210 if (ni)
211 down(&ni->dev_lock);
212 srcu_read_unlock(&netpoll_srcu, idx);
213}
214EXPORT_SYMBOL(netpoll_poll_disable);
215
216void netpoll_poll_enable(struct net_device *dev)
217{
218 struct netpoll_info *ni;
219 rcu_read_lock();
220 ni = rcu_dereference(dev->npinfo);
221 if (ni)
222 up(&ni->dev_lock);
223 rcu_read_unlock();
224}
225EXPORT_SYMBOL(netpoll_poll_enable);
226
227static void refill_skbs(void)
228{
229 struct sk_buff *skb;
230 unsigned long flags;
231
232 spin_lock_irqsave(&skb_pool.lock, flags);
233 while (skb_pool.qlen < MAX_SKBS) {
234 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
235 if (!skb)
236 break;
237
238 __skb_queue_tail(&skb_pool, skb);
239 }
240 spin_unlock_irqrestore(&skb_pool.lock, flags);
241}
242
243static void zap_completion_queue(void)
244{
245 unsigned long flags;
246 struct softnet_data *sd = &get_cpu_var(softnet_data);
247
248 if (sd->completion_queue) {
249 struct sk_buff *clist;
250
251 local_irq_save(flags);
252 clist = sd->completion_queue;
253 sd->completion_queue = NULL;
254 local_irq_restore(flags);
255
256 while (clist != NULL) {
257 struct sk_buff *skb = clist;
258 clist = clist->next;
259 if (!skb_irq_freeable(skb)) {
260 refcount_set(&skb->users, 1);
261 dev_kfree_skb_any(skb); /* put this one back */
262 } else {
263 __kfree_skb(skb);
264 }
265 }
266 }
267
268 put_cpu_var(softnet_data);
269}
270
271static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
272{
273 int count = 0;
274 struct sk_buff *skb;
275
276 zap_completion_queue();
277 refill_skbs();
278repeat:
279
280 skb = alloc_skb(len, GFP_ATOMIC);
281 if (!skb)
282 skb = skb_dequeue(&skb_pool);
283
284 if (!skb) {
285 if (++count < 10) {
286 netpoll_poll_dev(np->dev);
287 goto repeat;
288 }
289 return NULL;
290 }
291
292 refcount_set(&skb->users, 1);
293 skb_reserve(skb, reserve);
294 return skb;
295}
296
297static int netpoll_owner_active(struct net_device *dev)
298{
299 struct napi_struct *napi;
300
301 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
302 if (napi->poll_owner == smp_processor_id())
303 return 1;
304 }
305 return 0;
306}
307
308/* call with IRQ disabled */
309static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
310{
311 netdev_tx_t status = NETDEV_TX_BUSY;
312 struct net_device *dev;
313 unsigned long tries;
314 /* It is up to the caller to keep npinfo alive. */
315 struct netpoll_info *npinfo;
316
317 lockdep_assert_irqs_disabled();
318
319 dev = np->dev;
320 npinfo = rcu_dereference_bh(dev->npinfo);
321
322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
323 dev_kfree_skb_irq(skb);
324 return NET_XMIT_DROP;
325 }
326
327 /* don't get messages out of order, and no recursion */
328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
329 struct netdev_queue *txq;
330
331 txq = netdev_core_pick_tx(dev, skb, NULL);
332
333 /* try until next clock tick */
334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
335 tries > 0; --tries) {
336 if (HARD_TX_TRYLOCK(dev, txq)) {
337 if (!netif_xmit_stopped(txq))
338 status = netpoll_start_xmit(skb, dev, txq);
339
340 HARD_TX_UNLOCK(dev, txq);
341
342 if (dev_xmit_complete(status))
343 break;
344
345 }
346
347 /* tickle device maybe there is some cleanup */
348 netpoll_poll_dev(np->dev);
349
350 udelay(USEC_PER_POLL);
351 }
352
353 WARN_ONCE(!irqs_disabled(),
354 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
355 dev->name, dev->netdev_ops->ndo_start_xmit);
356
357 }
358
359 if (!dev_xmit_complete(status)) {
360 skb_queue_tail(&npinfo->txq, skb);
361 schedule_delayed_work(&npinfo->tx_work,0);
362 }
363 return NETDEV_TX_OK;
364}
365
366netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
367{
368 unsigned long flags;
369 netdev_tx_t ret;
370
371 if (unlikely(!np)) {
372 dev_kfree_skb_irq(skb);
373 ret = NET_XMIT_DROP;
374 } else {
375 local_irq_save(flags);
376 ret = __netpoll_send_skb(np, skb);
377 local_irq_restore(flags);
378 }
379 return ret;
380}
381EXPORT_SYMBOL(netpoll_send_skb);
382
383void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
384{
385 int total_len, ip_len, udp_len;
386 struct sk_buff *skb;
387 struct udphdr *udph;
388 struct iphdr *iph;
389 struct ethhdr *eth;
390 static atomic_t ip_ident;
391 struct ipv6hdr *ip6h;
392
393 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
394 WARN_ON_ONCE(!irqs_disabled());
395
396 udp_len = len + sizeof(*udph);
397 if (np->ipv6)
398 ip_len = udp_len + sizeof(*ip6h);
399 else
400 ip_len = udp_len + sizeof(*iph);
401
402 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
403
404 skb = find_skb(np, total_len + np->dev->needed_tailroom,
405 total_len - len);
406 if (!skb)
407 return;
408
409 skb_copy_to_linear_data(skb, msg, len);
410 skb_put(skb, len);
411
412 skb_push(skb, sizeof(*udph));
413 skb_reset_transport_header(skb);
414 udph = udp_hdr(skb);
415 udph->source = htons(np->local_port);
416 udph->dest = htons(np->remote_port);
417 udph->len = htons(udp_len);
418
419 if (np->ipv6) {
420 udph->check = 0;
421 udph->check = csum_ipv6_magic(&np->local_ip.in6,
422 &np->remote_ip.in6,
423 udp_len, IPPROTO_UDP,
424 csum_partial(udph, udp_len, 0));
425 if (udph->check == 0)
426 udph->check = CSUM_MANGLED_0;
427
428 skb_push(skb, sizeof(*ip6h));
429 skb_reset_network_header(skb);
430 ip6h = ipv6_hdr(skb);
431
432 /* ip6h->version = 6; ip6h->priority = 0; */
433 *(unsigned char *)ip6h = 0x60;
434 ip6h->flow_lbl[0] = 0;
435 ip6h->flow_lbl[1] = 0;
436 ip6h->flow_lbl[2] = 0;
437
438 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
439 ip6h->nexthdr = IPPROTO_UDP;
440 ip6h->hop_limit = 32;
441 ip6h->saddr = np->local_ip.in6;
442 ip6h->daddr = np->remote_ip.in6;
443
444 eth = skb_push(skb, ETH_HLEN);
445 skb_reset_mac_header(skb);
446 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
447 } else {
448 udph->check = 0;
449 udph->check = csum_tcpudp_magic(np->local_ip.ip,
450 np->remote_ip.ip,
451 udp_len, IPPROTO_UDP,
452 csum_partial(udph, udp_len, 0));
453 if (udph->check == 0)
454 udph->check = CSUM_MANGLED_0;
455
456 skb_push(skb, sizeof(*iph));
457 skb_reset_network_header(skb);
458 iph = ip_hdr(skb);
459
460 /* iph->version = 4; iph->ihl = 5; */
461 *(unsigned char *)iph = 0x45;
462 iph->tos = 0;
463 put_unaligned(htons(ip_len), &(iph->tot_len));
464 iph->id = htons(atomic_inc_return(&ip_ident));
465 iph->frag_off = 0;
466 iph->ttl = 64;
467 iph->protocol = IPPROTO_UDP;
468 iph->check = 0;
469 put_unaligned(np->local_ip.ip, &(iph->saddr));
470 put_unaligned(np->remote_ip.ip, &(iph->daddr));
471 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
472
473 eth = skb_push(skb, ETH_HLEN);
474 skb_reset_mac_header(skb);
475 skb->protocol = eth->h_proto = htons(ETH_P_IP);
476 }
477
478 ether_addr_copy(eth->h_source, np->dev->dev_addr);
479 ether_addr_copy(eth->h_dest, np->remote_mac);
480
481 skb->dev = np->dev;
482
483 netpoll_send_skb(np, skb);
484}
485EXPORT_SYMBOL(netpoll_send_udp);
486
487void netpoll_print_options(struct netpoll *np)
488{
489 np_info(np, "local port %d\n", np->local_port);
490 if (np->ipv6)
491 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
492 else
493 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
494 np_info(np, "interface '%s'\n", np->dev_name);
495 np_info(np, "remote port %d\n", np->remote_port);
496 if (np->ipv6)
497 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
498 else
499 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
500 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
501}
502EXPORT_SYMBOL(netpoll_print_options);
503
504static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
505{
506 const char *end;
507
508 if (!strchr(str, ':') &&
509 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
510 if (!*end)
511 return 0;
512 }
513 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
514#if IS_ENABLED(CONFIG_IPV6)
515 if (!*end)
516 return 1;
517#else
518 return -1;
519#endif
520 }
521 return -1;
522}
523
524int netpoll_parse_options(struct netpoll *np, char *opt)
525{
526 char *cur=opt, *delim;
527 int ipv6;
528 bool ipversion_set = false;
529
530 if (*cur != '@') {
531 if ((delim = strchr(cur, '@')) == NULL)
532 goto parse_failed;
533 *delim = 0;
534 if (kstrtou16(cur, 10, &np->local_port))
535 goto parse_failed;
536 cur = delim;
537 }
538 cur++;
539
540 if (*cur != '/') {
541 ipversion_set = true;
542 if ((delim = strchr(cur, '/')) == NULL)
543 goto parse_failed;
544 *delim = 0;
545 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
546 if (ipv6 < 0)
547 goto parse_failed;
548 else
549 np->ipv6 = (bool)ipv6;
550 cur = delim;
551 }
552 cur++;
553
554 if (*cur != ',') {
555 /* parse out dev name */
556 if ((delim = strchr(cur, ',')) == NULL)
557 goto parse_failed;
558 *delim = 0;
559 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
560 cur = delim;
561 }
562 cur++;
563
564 if (*cur != '@') {
565 /* dst port */
566 if ((delim = strchr(cur, '@')) == NULL)
567 goto parse_failed;
568 *delim = 0;
569 if (*cur == ' ' || *cur == '\t')
570 np_info(np, "warning: whitespace is not allowed\n");
571 if (kstrtou16(cur, 10, &np->remote_port))
572 goto parse_failed;
573 cur = delim;
574 }
575 cur++;
576
577 /* dst ip */
578 if ((delim = strchr(cur, '/')) == NULL)
579 goto parse_failed;
580 *delim = 0;
581 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
582 if (ipv6 < 0)
583 goto parse_failed;
584 else if (ipversion_set && np->ipv6 != (bool)ipv6)
585 goto parse_failed;
586 else
587 np->ipv6 = (bool)ipv6;
588 cur = delim + 1;
589
590 if (*cur != 0) {
591 /* MAC address */
592 if (!mac_pton(cur, np->remote_mac))
593 goto parse_failed;
594 }
595
596 netpoll_print_options(np);
597
598 return 0;
599
600 parse_failed:
601 np_info(np, "couldn't parse config at '%s'!\n", cur);
602 return -1;
603}
604EXPORT_SYMBOL(netpoll_parse_options);
605
606int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
607{
608 struct netpoll_info *npinfo;
609 const struct net_device_ops *ops;
610 int err;
611
612 np->dev = ndev;
613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
614
615 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
616 np_err(np, "%s doesn't support polling, aborting\n",
617 np->dev_name);
618 err = -ENOTSUPP;
619 goto out;
620 }
621
622 if (!ndev->npinfo) {
623 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
624 if (!npinfo) {
625 err = -ENOMEM;
626 goto out;
627 }
628
629 sema_init(&npinfo->dev_lock, 1);
630 skb_queue_head_init(&npinfo->txq);
631 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
632
633 refcount_set(&npinfo->refcnt, 1);
634
635 ops = np->dev->netdev_ops;
636 if (ops->ndo_netpoll_setup) {
637 err = ops->ndo_netpoll_setup(ndev, npinfo);
638 if (err)
639 goto free_npinfo;
640 }
641 } else {
642 npinfo = rtnl_dereference(ndev->npinfo);
643 refcount_inc(&npinfo->refcnt);
644 }
645
646 npinfo->netpoll = np;
647
648 /* last thing to do is link it to the net device structure */
649 rcu_assign_pointer(ndev->npinfo, npinfo);
650
651 return 0;
652
653free_npinfo:
654 kfree(npinfo);
655out:
656 return err;
657}
658EXPORT_SYMBOL_GPL(__netpoll_setup);
659
660int netpoll_setup(struct netpoll *np)
661{
662 struct net_device *ndev = NULL;
663 struct in_device *in_dev;
664 int err;
665
666 rtnl_lock();
667 if (np->dev_name[0]) {
668 struct net *net = current->nsproxy->net_ns;
669 ndev = __dev_get_by_name(net, np->dev_name);
670 }
671 if (!ndev) {
672 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
673 err = -ENODEV;
674 goto unlock;
675 }
676 dev_hold(ndev);
677
678 if (netdev_master_upper_dev_get(ndev)) {
679 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
680 err = -EBUSY;
681 goto put;
682 }
683
684 if (!netif_running(ndev)) {
685 unsigned long atmost, atleast;
686
687 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
688
689 err = dev_open(ndev, NULL);
690
691 if (err) {
692 np_err(np, "failed to open %s\n", ndev->name);
693 goto put;
694 }
695
696 rtnl_unlock();
697 atleast = jiffies + HZ/10;
698 atmost = jiffies + carrier_timeout * HZ;
699 while (!netif_carrier_ok(ndev)) {
700 if (time_after(jiffies, atmost)) {
701 np_notice(np, "timeout waiting for carrier\n");
702 break;
703 }
704 msleep(1);
705 }
706
707 /* If carrier appears to come up instantly, we don't
708 * trust it and pause so that we don't pump all our
709 * queued console messages into the bitbucket.
710 */
711
712 if (time_before(jiffies, atleast)) {
713 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
714 msleep(4000);
715 }
716 rtnl_lock();
717 }
718
719 if (!np->local_ip.ip) {
720 if (!np->ipv6) {
721 const struct in_ifaddr *ifa;
722
723 in_dev = __in_dev_get_rtnl(ndev);
724 if (!in_dev)
725 goto put_noaddr;
726
727 ifa = rtnl_dereference(in_dev->ifa_list);
728 if (!ifa) {
729put_noaddr:
730 np_err(np, "no IP address for %s, aborting\n",
731 np->dev_name);
732 err = -EDESTADDRREQ;
733 goto put;
734 }
735
736 np->local_ip.ip = ifa->ifa_local;
737 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
738 } else {
739#if IS_ENABLED(CONFIG_IPV6)
740 struct inet6_dev *idev;
741
742 err = -EDESTADDRREQ;
743 idev = __in6_dev_get(ndev);
744 if (idev) {
745 struct inet6_ifaddr *ifp;
746
747 read_lock_bh(&idev->lock);
748 list_for_each_entry(ifp, &idev->addr_list, if_list) {
749 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
750 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
751 continue;
752 np->local_ip.in6 = ifp->addr;
753 err = 0;
754 break;
755 }
756 read_unlock_bh(&idev->lock);
757 }
758 if (err) {
759 np_err(np, "no IPv6 address for %s, aborting\n",
760 np->dev_name);
761 goto put;
762 } else
763 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
764#else
765 np_err(np, "IPv6 is not supported %s, aborting\n",
766 np->dev_name);
767 err = -EINVAL;
768 goto put;
769#endif
770 }
771 }
772
773 /* fill up the skb queue */
774 refill_skbs();
775
776 err = __netpoll_setup(np, ndev);
777 if (err)
778 goto put;
779
780 rtnl_unlock();
781 return 0;
782
783put:
784 dev_put(ndev);
785unlock:
786 rtnl_unlock();
787 return err;
788}
789EXPORT_SYMBOL(netpoll_setup);
790
791static int __init netpoll_init(void)
792{
793 skb_queue_head_init(&skb_pool);
794 return 0;
795}
796core_initcall(netpoll_init);
797
798static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
799{
800 struct netpoll_info *npinfo =
801 container_of(rcu_head, struct netpoll_info, rcu);
802
803 skb_queue_purge(&npinfo->txq);
804
805 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
806 cancel_delayed_work(&npinfo->tx_work);
807
808 /* clean after last, unfinished work */
809 __skb_queue_purge(&npinfo->txq);
810 /* now cancel it again */
811 cancel_delayed_work(&npinfo->tx_work);
812 kfree(npinfo);
813}
814
815void __netpoll_cleanup(struct netpoll *np)
816{
817 struct netpoll_info *npinfo;
818
819 npinfo = rtnl_dereference(np->dev->npinfo);
820 if (!npinfo)
821 return;
822
823 synchronize_srcu(&netpoll_srcu);
824
825 if (refcount_dec_and_test(&npinfo->refcnt)) {
826 const struct net_device_ops *ops;
827
828 ops = np->dev->netdev_ops;
829 if (ops->ndo_netpoll_cleanup)
830 ops->ndo_netpoll_cleanup(np->dev);
831
832 RCU_INIT_POINTER(np->dev->npinfo, NULL);
833 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
834 } else
835 RCU_INIT_POINTER(np->dev->npinfo, NULL);
836}
837EXPORT_SYMBOL_GPL(__netpoll_cleanup);
838
839void __netpoll_free(struct netpoll *np)
840{
841 ASSERT_RTNL();
842
843 /* Wait for transmitting packets to finish before freeing. */
844 synchronize_rcu();
845 __netpoll_cleanup(np);
846 kfree(np);
847}
848EXPORT_SYMBOL_GPL(__netpoll_free);
849
850void netpoll_cleanup(struct netpoll *np)
851{
852 rtnl_lock();
853 if (!np->dev)
854 goto out;
855 __netpoll_cleanup(np);
856 dev_put(np->dev);
857 np->dev = NULL;
858out:
859 rtnl_unlock();
860}
861EXPORT_SYMBOL(netpoll_cleanup);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/string.h>
20#include <linux/if_arp.h>
21#include <linux/inetdevice.h>
22#include <linux/inet.h>
23#include <linux/interrupt.h>
24#include <linux/netpoll.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/if_vlan.h>
32#include <net/tcp.h>
33#include <net/udp.h>
34#include <net/addrconf.h>
35#include <net/ndisc.h>
36#include <net/ip6_checksum.h>
37#include <asm/unaligned.h>
38#include <trace/events/napi.h>
39
40/*
41 * We maintain a small pool of fully-sized skbs, to make sure the
42 * message gets out even in extreme OOM situations.
43 */
44
45#define MAX_UDP_CHUNK 1460
46#define MAX_SKBS 32
47
48static struct sk_buff_head skb_pool;
49
50DEFINE_STATIC_SRCU(netpoll_srcu);
51
52#define USEC_PER_POLL 50
53
54#define MAX_SKB_SIZE \
55 (sizeof(struct ethhdr) + \
56 sizeof(struct iphdr) + \
57 sizeof(struct udphdr) + \
58 MAX_UDP_CHUNK)
59
60static void zap_completion_queue(void);
61
62static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
65#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
80 if (skb_vlan_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_hwaccel_push_inside(skb);
83 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
90 }
91
92 status = netdev_start_xmit(skb, dev, txq, false);
93
94out:
95 return status;
96}
97
98static void queue_process(struct work_struct *work)
99{
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
102 struct sk_buff *skb;
103 unsigned long flags;
104
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq;
108 unsigned int q_index;
109
110 if (!netif_device_present(dev) || !netif_running(dev)) {
111 kfree_skb(skb);
112 continue;
113 }
114
115 local_irq_save(flags);
116 /* check if skb->queue_mapping is still valid */
117 q_index = skb_get_queue_mapping(skb);
118 if (unlikely(q_index >= dev->real_num_tx_queues)) {
119 q_index = q_index % dev->real_num_tx_queues;
120 skb_set_queue_mapping(skb, q_index);
121 }
122 txq = netdev_get_tx_queue(dev, q_index);
123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (netif_xmit_frozen_or_stopped(txq) ||
125 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
126 skb_queue_head(&npinfo->txq, skb);
127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags);
129
130 schedule_delayed_work(&npinfo->tx_work, HZ/10);
131 return;
132 }
133 HARD_TX_UNLOCK(dev, txq);
134 local_irq_restore(flags);
135 }
136}
137
138static void poll_one_napi(struct napi_struct *napi)
139{
140 int work;
141
142 /* If we set this bit but see that it has already been set,
143 * that indicates that napi has been disabled and we need
144 * to abort this operation
145 */
146 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
147 return;
148
149 /* We explicilty pass the polling call a budget of 0 to
150 * indicate that we are clearing the Tx path only.
151 */
152 work = napi->poll(napi, 0);
153 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
154 trace_napi_poll(napi, work, 0);
155
156 clear_bit(NAPI_STATE_NPSVC, &napi->state);
157}
158
159static void poll_napi(struct net_device *dev)
160{
161 struct napi_struct *napi;
162 int cpu = smp_processor_id();
163
164 list_for_each_entry(napi, &dev->napi_list, dev_list) {
165 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
166 poll_one_napi(napi);
167 smp_store_release(&napi->poll_owner, -1);
168 }
169 }
170}
171
172void netpoll_poll_dev(struct net_device *dev)
173{
174 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
175 const struct net_device_ops *ops;
176
177 /* Don't do any rx activity if the dev_lock mutex is held
178 * the dev_open/close paths use this to block netpoll activity
179 * while changing device state
180 */
181 if (!ni || down_trylock(&ni->dev_lock))
182 return;
183
184 if (!netif_running(dev)) {
185 up(&ni->dev_lock);
186 return;
187 }
188
189 ops = dev->netdev_ops;
190 if (ops->ndo_poll_controller)
191 ops->ndo_poll_controller(dev);
192
193 poll_napi(dev);
194
195 up(&ni->dev_lock);
196
197 zap_completion_queue();
198}
199EXPORT_SYMBOL(netpoll_poll_dev);
200
201void netpoll_poll_disable(struct net_device *dev)
202{
203 struct netpoll_info *ni;
204 int idx;
205 might_sleep();
206 idx = srcu_read_lock(&netpoll_srcu);
207 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
208 if (ni)
209 down(&ni->dev_lock);
210 srcu_read_unlock(&netpoll_srcu, idx);
211}
212EXPORT_SYMBOL(netpoll_poll_disable);
213
214void netpoll_poll_enable(struct net_device *dev)
215{
216 struct netpoll_info *ni;
217 rcu_read_lock();
218 ni = rcu_dereference(dev->npinfo);
219 if (ni)
220 up(&ni->dev_lock);
221 rcu_read_unlock();
222}
223EXPORT_SYMBOL(netpoll_poll_enable);
224
225static void refill_skbs(void)
226{
227 struct sk_buff *skb;
228 unsigned long flags;
229
230 spin_lock_irqsave(&skb_pool.lock, flags);
231 while (skb_pool.qlen < MAX_SKBS) {
232 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
233 if (!skb)
234 break;
235
236 __skb_queue_tail(&skb_pool, skb);
237 }
238 spin_unlock_irqrestore(&skb_pool.lock, flags);
239}
240
241static void zap_completion_queue(void)
242{
243 unsigned long flags;
244 struct softnet_data *sd = &get_cpu_var(softnet_data);
245
246 if (sd->completion_queue) {
247 struct sk_buff *clist;
248
249 local_irq_save(flags);
250 clist = sd->completion_queue;
251 sd->completion_queue = NULL;
252 local_irq_restore(flags);
253
254 while (clist != NULL) {
255 struct sk_buff *skb = clist;
256 clist = clist->next;
257 if (!skb_irq_freeable(skb)) {
258 refcount_set(&skb->users, 1);
259 dev_kfree_skb_any(skb); /* put this one back */
260 } else {
261 __kfree_skb(skb);
262 }
263 }
264 }
265
266 put_cpu_var(softnet_data);
267}
268
269static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
270{
271 int count = 0;
272 struct sk_buff *skb;
273
274 zap_completion_queue();
275 refill_skbs();
276repeat:
277
278 skb = alloc_skb(len, GFP_ATOMIC);
279 if (!skb)
280 skb = skb_dequeue(&skb_pool);
281
282 if (!skb) {
283 if (++count < 10) {
284 netpoll_poll_dev(np->dev);
285 goto repeat;
286 }
287 return NULL;
288 }
289
290 refcount_set(&skb->users, 1);
291 skb_reserve(skb, reserve);
292 return skb;
293}
294
295static int netpoll_owner_active(struct net_device *dev)
296{
297 struct napi_struct *napi;
298
299 list_for_each_entry(napi, &dev->napi_list, dev_list) {
300 if (napi->poll_owner == smp_processor_id())
301 return 1;
302 }
303 return 0;
304}
305
306/* call with IRQ disabled */
307void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
308 struct net_device *dev)
309{
310 int status = NETDEV_TX_BUSY;
311 unsigned long tries;
312 /* It is up to the caller to keep npinfo alive. */
313 struct netpoll_info *npinfo;
314
315 lockdep_assert_irqs_disabled();
316
317 npinfo = rcu_dereference_bh(np->dev->npinfo);
318 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
319 dev_kfree_skb_irq(skb);
320 return;
321 }
322
323 /* don't get messages out of order, and no recursion */
324 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
325 struct netdev_queue *txq;
326
327 txq = netdev_core_pick_tx(dev, skb, NULL);
328
329 /* try until next clock tick */
330 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
331 tries > 0; --tries) {
332 if (HARD_TX_TRYLOCK(dev, txq)) {
333 if (!netif_xmit_stopped(txq))
334 status = netpoll_start_xmit(skb, dev, txq);
335
336 HARD_TX_UNLOCK(dev, txq);
337
338 if (dev_xmit_complete(status))
339 break;
340
341 }
342
343 /* tickle device maybe there is some cleanup */
344 netpoll_poll_dev(np->dev);
345
346 udelay(USEC_PER_POLL);
347 }
348
349 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
351 dev->name, dev->netdev_ops->ndo_start_xmit);
352
353 }
354
355 if (!dev_xmit_complete(status)) {
356 skb_queue_tail(&npinfo->txq, skb);
357 schedule_delayed_work(&npinfo->tx_work,0);
358 }
359}
360EXPORT_SYMBOL(netpoll_send_skb_on_dev);
361
362void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
363{
364 int total_len, ip_len, udp_len;
365 struct sk_buff *skb;
366 struct udphdr *udph;
367 struct iphdr *iph;
368 struct ethhdr *eth;
369 static atomic_t ip_ident;
370 struct ipv6hdr *ip6h;
371
372 WARN_ON_ONCE(!irqs_disabled());
373
374 udp_len = len + sizeof(*udph);
375 if (np->ipv6)
376 ip_len = udp_len + sizeof(*ip6h);
377 else
378 ip_len = udp_len + sizeof(*iph);
379
380 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
381
382 skb = find_skb(np, total_len + np->dev->needed_tailroom,
383 total_len - len);
384 if (!skb)
385 return;
386
387 skb_copy_to_linear_data(skb, msg, len);
388 skb_put(skb, len);
389
390 skb_push(skb, sizeof(*udph));
391 skb_reset_transport_header(skb);
392 udph = udp_hdr(skb);
393 udph->source = htons(np->local_port);
394 udph->dest = htons(np->remote_port);
395 udph->len = htons(udp_len);
396
397 if (np->ipv6) {
398 udph->check = 0;
399 udph->check = csum_ipv6_magic(&np->local_ip.in6,
400 &np->remote_ip.in6,
401 udp_len, IPPROTO_UDP,
402 csum_partial(udph, udp_len, 0));
403 if (udph->check == 0)
404 udph->check = CSUM_MANGLED_0;
405
406 skb_push(skb, sizeof(*ip6h));
407 skb_reset_network_header(skb);
408 ip6h = ipv6_hdr(skb);
409
410 /* ip6h->version = 6; ip6h->priority = 0; */
411 put_unaligned(0x60, (unsigned char *)ip6h);
412 ip6h->flow_lbl[0] = 0;
413 ip6h->flow_lbl[1] = 0;
414 ip6h->flow_lbl[2] = 0;
415
416 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
417 ip6h->nexthdr = IPPROTO_UDP;
418 ip6h->hop_limit = 32;
419 ip6h->saddr = np->local_ip.in6;
420 ip6h->daddr = np->remote_ip.in6;
421
422 eth = skb_push(skb, ETH_HLEN);
423 skb_reset_mac_header(skb);
424 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
425 } else {
426 udph->check = 0;
427 udph->check = csum_tcpudp_magic(np->local_ip.ip,
428 np->remote_ip.ip,
429 udp_len, IPPROTO_UDP,
430 csum_partial(udph, udp_len, 0));
431 if (udph->check == 0)
432 udph->check = CSUM_MANGLED_0;
433
434 skb_push(skb, sizeof(*iph));
435 skb_reset_network_header(skb);
436 iph = ip_hdr(skb);
437
438 /* iph->version = 4; iph->ihl = 5; */
439 put_unaligned(0x45, (unsigned char *)iph);
440 iph->tos = 0;
441 put_unaligned(htons(ip_len), &(iph->tot_len));
442 iph->id = htons(atomic_inc_return(&ip_ident));
443 iph->frag_off = 0;
444 iph->ttl = 64;
445 iph->protocol = IPPROTO_UDP;
446 iph->check = 0;
447 put_unaligned(np->local_ip.ip, &(iph->saddr));
448 put_unaligned(np->remote_ip.ip, &(iph->daddr));
449 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
450
451 eth = skb_push(skb, ETH_HLEN);
452 skb_reset_mac_header(skb);
453 skb->protocol = eth->h_proto = htons(ETH_P_IP);
454 }
455
456 ether_addr_copy(eth->h_source, np->dev->dev_addr);
457 ether_addr_copy(eth->h_dest, np->remote_mac);
458
459 skb->dev = np->dev;
460
461 netpoll_send_skb(np, skb);
462}
463EXPORT_SYMBOL(netpoll_send_udp);
464
465void netpoll_print_options(struct netpoll *np)
466{
467 np_info(np, "local port %d\n", np->local_port);
468 if (np->ipv6)
469 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
470 else
471 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
472 np_info(np, "interface '%s'\n", np->dev_name);
473 np_info(np, "remote port %d\n", np->remote_port);
474 if (np->ipv6)
475 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
476 else
477 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
478 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
479}
480EXPORT_SYMBOL(netpoll_print_options);
481
482static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
483{
484 const char *end;
485
486 if (!strchr(str, ':') &&
487 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
488 if (!*end)
489 return 0;
490 }
491 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
492#if IS_ENABLED(CONFIG_IPV6)
493 if (!*end)
494 return 1;
495#else
496 return -1;
497#endif
498 }
499 return -1;
500}
501
502int netpoll_parse_options(struct netpoll *np, char *opt)
503{
504 char *cur=opt, *delim;
505 int ipv6;
506 bool ipversion_set = false;
507
508 if (*cur != '@') {
509 if ((delim = strchr(cur, '@')) == NULL)
510 goto parse_failed;
511 *delim = 0;
512 if (kstrtou16(cur, 10, &np->local_port))
513 goto parse_failed;
514 cur = delim;
515 }
516 cur++;
517
518 if (*cur != '/') {
519 ipversion_set = true;
520 if ((delim = strchr(cur, '/')) == NULL)
521 goto parse_failed;
522 *delim = 0;
523 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
524 if (ipv6 < 0)
525 goto parse_failed;
526 else
527 np->ipv6 = (bool)ipv6;
528 cur = delim;
529 }
530 cur++;
531
532 if (*cur != ',') {
533 /* parse out dev name */
534 if ((delim = strchr(cur, ',')) == NULL)
535 goto parse_failed;
536 *delim = 0;
537 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
538 cur = delim;
539 }
540 cur++;
541
542 if (*cur != '@') {
543 /* dst port */
544 if ((delim = strchr(cur, '@')) == NULL)
545 goto parse_failed;
546 *delim = 0;
547 if (*cur == ' ' || *cur == '\t')
548 np_info(np, "warning: whitespace is not allowed\n");
549 if (kstrtou16(cur, 10, &np->remote_port))
550 goto parse_failed;
551 cur = delim;
552 }
553 cur++;
554
555 /* dst ip */
556 if ((delim = strchr(cur, '/')) == NULL)
557 goto parse_failed;
558 *delim = 0;
559 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
560 if (ipv6 < 0)
561 goto parse_failed;
562 else if (ipversion_set && np->ipv6 != (bool)ipv6)
563 goto parse_failed;
564 else
565 np->ipv6 = (bool)ipv6;
566 cur = delim + 1;
567
568 if (*cur != 0) {
569 /* MAC address */
570 if (!mac_pton(cur, np->remote_mac))
571 goto parse_failed;
572 }
573
574 netpoll_print_options(np);
575
576 return 0;
577
578 parse_failed:
579 np_info(np, "couldn't parse config at '%s'!\n", cur);
580 return -1;
581}
582EXPORT_SYMBOL(netpoll_parse_options);
583
584int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
585{
586 struct netpoll_info *npinfo;
587 const struct net_device_ops *ops;
588 int err;
589
590 np->dev = ndev;
591 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
592
593 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
594 np_err(np, "%s doesn't support polling, aborting\n",
595 np->dev_name);
596 err = -ENOTSUPP;
597 goto out;
598 }
599
600 if (!ndev->npinfo) {
601 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
602 if (!npinfo) {
603 err = -ENOMEM;
604 goto out;
605 }
606
607 sema_init(&npinfo->dev_lock, 1);
608 skb_queue_head_init(&npinfo->txq);
609 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
610
611 refcount_set(&npinfo->refcnt, 1);
612
613 ops = np->dev->netdev_ops;
614 if (ops->ndo_netpoll_setup) {
615 err = ops->ndo_netpoll_setup(ndev, npinfo);
616 if (err)
617 goto free_npinfo;
618 }
619 } else {
620 npinfo = rtnl_dereference(ndev->npinfo);
621 refcount_inc(&npinfo->refcnt);
622 }
623
624 npinfo->netpoll = np;
625
626 /* last thing to do is link it to the net device structure */
627 rcu_assign_pointer(ndev->npinfo, npinfo);
628
629 return 0;
630
631free_npinfo:
632 kfree(npinfo);
633out:
634 return err;
635}
636EXPORT_SYMBOL_GPL(__netpoll_setup);
637
638int netpoll_setup(struct netpoll *np)
639{
640 struct net_device *ndev = NULL;
641 struct in_device *in_dev;
642 int err;
643
644 rtnl_lock();
645 if (np->dev_name[0]) {
646 struct net *net = current->nsproxy->net_ns;
647 ndev = __dev_get_by_name(net, np->dev_name);
648 }
649 if (!ndev) {
650 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
651 err = -ENODEV;
652 goto unlock;
653 }
654 dev_hold(ndev);
655
656 if (netdev_master_upper_dev_get(ndev)) {
657 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
658 err = -EBUSY;
659 goto put;
660 }
661
662 if (!netif_running(ndev)) {
663 unsigned long atmost, atleast;
664
665 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
666
667 err = dev_open(ndev, NULL);
668
669 if (err) {
670 np_err(np, "failed to open %s\n", ndev->name);
671 goto put;
672 }
673
674 rtnl_unlock();
675 atleast = jiffies + HZ/10;
676 atmost = jiffies + carrier_timeout * HZ;
677 while (!netif_carrier_ok(ndev)) {
678 if (time_after(jiffies, atmost)) {
679 np_notice(np, "timeout waiting for carrier\n");
680 break;
681 }
682 msleep(1);
683 }
684
685 /* If carrier appears to come up instantly, we don't
686 * trust it and pause so that we don't pump all our
687 * queued console messages into the bitbucket.
688 */
689
690 if (time_before(jiffies, atleast)) {
691 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
692 msleep(4000);
693 }
694 rtnl_lock();
695 }
696
697 if (!np->local_ip.ip) {
698 if (!np->ipv6) {
699 const struct in_ifaddr *ifa;
700
701 in_dev = __in_dev_get_rtnl(ndev);
702 if (!in_dev)
703 goto put_noaddr;
704
705 ifa = rtnl_dereference(in_dev->ifa_list);
706 if (!ifa) {
707put_noaddr:
708 np_err(np, "no IP address for %s, aborting\n",
709 np->dev_name);
710 err = -EDESTADDRREQ;
711 goto put;
712 }
713
714 np->local_ip.ip = ifa->ifa_local;
715 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
716 } else {
717#if IS_ENABLED(CONFIG_IPV6)
718 struct inet6_dev *idev;
719
720 err = -EDESTADDRREQ;
721 idev = __in6_dev_get(ndev);
722 if (idev) {
723 struct inet6_ifaddr *ifp;
724
725 read_lock_bh(&idev->lock);
726 list_for_each_entry(ifp, &idev->addr_list, if_list) {
727 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
728 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
729 continue;
730 np->local_ip.in6 = ifp->addr;
731 err = 0;
732 break;
733 }
734 read_unlock_bh(&idev->lock);
735 }
736 if (err) {
737 np_err(np, "no IPv6 address for %s, aborting\n",
738 np->dev_name);
739 goto put;
740 } else
741 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
742#else
743 np_err(np, "IPv6 is not supported %s, aborting\n",
744 np->dev_name);
745 err = -EINVAL;
746 goto put;
747#endif
748 }
749 }
750
751 /* fill up the skb queue */
752 refill_skbs();
753
754 err = __netpoll_setup(np, ndev);
755 if (err)
756 goto put;
757
758 rtnl_unlock();
759 return 0;
760
761put:
762 dev_put(ndev);
763unlock:
764 rtnl_unlock();
765 return err;
766}
767EXPORT_SYMBOL(netpoll_setup);
768
769static int __init netpoll_init(void)
770{
771 skb_queue_head_init(&skb_pool);
772 return 0;
773}
774core_initcall(netpoll_init);
775
776static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
777{
778 struct netpoll_info *npinfo =
779 container_of(rcu_head, struct netpoll_info, rcu);
780
781 skb_queue_purge(&npinfo->txq);
782
783 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
784 cancel_delayed_work(&npinfo->tx_work);
785
786 /* clean after last, unfinished work */
787 __skb_queue_purge(&npinfo->txq);
788 /* now cancel it again */
789 cancel_delayed_work(&npinfo->tx_work);
790 kfree(npinfo);
791}
792
793void __netpoll_cleanup(struct netpoll *np)
794{
795 struct netpoll_info *npinfo;
796
797 npinfo = rtnl_dereference(np->dev->npinfo);
798 if (!npinfo)
799 return;
800
801 synchronize_srcu(&netpoll_srcu);
802
803 if (refcount_dec_and_test(&npinfo->refcnt)) {
804 const struct net_device_ops *ops;
805
806 ops = np->dev->netdev_ops;
807 if (ops->ndo_netpoll_cleanup)
808 ops->ndo_netpoll_cleanup(np->dev);
809
810 RCU_INIT_POINTER(np->dev->npinfo, NULL);
811 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
812 } else
813 RCU_INIT_POINTER(np->dev->npinfo, NULL);
814}
815EXPORT_SYMBOL_GPL(__netpoll_cleanup);
816
817void __netpoll_free(struct netpoll *np)
818{
819 ASSERT_RTNL();
820
821 /* Wait for transmitting packets to finish before freeing. */
822 synchronize_rcu();
823 __netpoll_cleanup(np);
824 kfree(np);
825}
826EXPORT_SYMBOL_GPL(__netpoll_free);
827
828void netpoll_cleanup(struct netpoll *np)
829{
830 rtnl_lock();
831 if (!np->dev)
832 goto out;
833 __netpoll_cleanup(np);
834 dev_put(np->dev);
835 np->dev = NULL;
836out:
837 rtnl_unlock();
838}
839EXPORT_SYMBOL(netpoll_cleanup);