Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common framework for low-level network console, dump, and debugger code
4 *
5 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 *
7 * based on the netconsole code from:
8 *
9 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2002 Red Hat, Inc.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/moduleparam.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/string.h>
20#include <linux/if_arp.h>
21#include <linux/inetdevice.h>
22#include <linux/inet.h>
23#include <linux/interrupt.h>
24#include <linux/netpoll.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/if_vlan.h>
32#include <net/tcp.h>
33#include <net/udp.h>
34#include <net/addrconf.h>
35#include <net/ndisc.h>
36#include <net/ip6_checksum.h>
37#include <asm/unaligned.h>
38#include <trace/events/napi.h>
39#include <linux/kconfig.h>
40
41/*
42 * We maintain a small pool of fully-sized skbs, to make sure the
43 * message gets out even in extreme OOM situations.
44 */
45
46#define MAX_UDP_CHUNK 1460
47#define MAX_SKBS 32
48
49static struct sk_buff_head skb_pool;
50
51DEFINE_STATIC_SRCU(netpoll_srcu);
52
53#define USEC_PER_POLL 50
54
55#define MAX_SKB_SIZE \
56 (sizeof(struct ethhdr) + \
57 sizeof(struct iphdr) + \
58 sizeof(struct udphdr) + \
59 MAX_UDP_CHUNK)
60
61static void zap_completion_queue(void);
62
63static unsigned int carrier_timeout = 4;
64module_param(carrier_timeout, uint, 0644);
65
66#define np_info(np, fmt, ...) \
67 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68#define np_err(np, fmt, ...) \
69 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70#define np_notice(np, fmt, ...) \
71 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72
73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74 struct net_device *dev,
75 struct netdev_queue *txq)
76{
77 netdev_tx_t status = NETDEV_TX_OK;
78 netdev_features_t features;
79
80 features = netif_skb_features(skb);
81
82 if (skb_vlan_tag_present(skb) &&
83 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
84 skb = __vlan_hwaccel_push_inside(skb);
85 if (unlikely(!skb)) {
86 /* This is actually a packet drop, but we
87 * don't want the code that calls this
88 * function to try and operate on a NULL skb.
89 */
90 goto out;
91 }
92 }
93
94 status = netdev_start_xmit(skb, dev, txq, false);
95
96out:
97 return status;
98}
99
100static void queue_process(struct work_struct *work)
101{
102 struct netpoll_info *npinfo =
103 container_of(work, struct netpoll_info, tx_work.work);
104 struct sk_buff *skb;
105 unsigned long flags;
106
107 while ((skb = skb_dequeue(&npinfo->txq))) {
108 struct net_device *dev = skb->dev;
109 struct netdev_queue *txq;
110 unsigned int q_index;
111
112 if (!netif_device_present(dev) || !netif_running(dev)) {
113 kfree_skb(skb);
114 continue;
115 }
116
117 local_irq_save(flags);
118 /* check if skb->queue_mapping is still valid */
119 q_index = skb_get_queue_mapping(skb);
120 if (unlikely(q_index >= dev->real_num_tx_queues)) {
121 q_index = q_index % dev->real_num_tx_queues;
122 skb_set_queue_mapping(skb, q_index);
123 }
124 txq = netdev_get_tx_queue(dev, q_index);
125 HARD_TX_LOCK(dev, txq, smp_processor_id());
126 if (netif_xmit_frozen_or_stopped(txq) ||
127 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128 skb_queue_head(&npinfo->txq, skb);
129 HARD_TX_UNLOCK(dev, txq);
130 local_irq_restore(flags);
131
132 schedule_delayed_work(&npinfo->tx_work, HZ/10);
133 return;
134 }
135 HARD_TX_UNLOCK(dev, txq);
136 local_irq_restore(flags);
137 }
138}
139
140static void poll_one_napi(struct napi_struct *napi)
141{
142 int work;
143
144 /* If we set this bit but see that it has already been set,
145 * that indicates that napi has been disabled and we need
146 * to abort this operation
147 */
148 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
149 return;
150
151 /* We explicilty pass the polling call a budget of 0 to
152 * indicate that we are clearing the Tx path only.
153 */
154 work = napi->poll(napi, 0);
155 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
156 trace_napi_poll(napi, work, 0);
157
158 clear_bit(NAPI_STATE_NPSVC, &napi->state);
159}
160
161static void poll_napi(struct net_device *dev)
162{
163 struct napi_struct *napi;
164 int cpu = smp_processor_id();
165
166 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
167 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
168 poll_one_napi(napi);
169 smp_store_release(&napi->poll_owner, -1);
170 }
171 }
172}
173
174void netpoll_poll_dev(struct net_device *dev)
175{
176 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
177 const struct net_device_ops *ops;
178
179 /* Don't do any rx activity if the dev_lock mutex is held
180 * the dev_open/close paths use this to block netpoll activity
181 * while changing device state
182 */
183 if (!ni || down_trylock(&ni->dev_lock))
184 return;
185
186 if (!netif_running(dev)) {
187 up(&ni->dev_lock);
188 return;
189 }
190
191 ops = dev->netdev_ops;
192 if (ops->ndo_poll_controller)
193 ops->ndo_poll_controller(dev);
194
195 poll_napi(dev);
196
197 up(&ni->dev_lock);
198
199 zap_completion_queue();
200}
201EXPORT_SYMBOL(netpoll_poll_dev);
202
203void netpoll_poll_disable(struct net_device *dev)
204{
205 struct netpoll_info *ni;
206 int idx;
207 might_sleep();
208 idx = srcu_read_lock(&netpoll_srcu);
209 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
210 if (ni)
211 down(&ni->dev_lock);
212 srcu_read_unlock(&netpoll_srcu, idx);
213}
214EXPORT_SYMBOL(netpoll_poll_disable);
215
216void netpoll_poll_enable(struct net_device *dev)
217{
218 struct netpoll_info *ni;
219 rcu_read_lock();
220 ni = rcu_dereference(dev->npinfo);
221 if (ni)
222 up(&ni->dev_lock);
223 rcu_read_unlock();
224}
225EXPORT_SYMBOL(netpoll_poll_enable);
226
227static void refill_skbs(void)
228{
229 struct sk_buff *skb;
230 unsigned long flags;
231
232 spin_lock_irqsave(&skb_pool.lock, flags);
233 while (skb_pool.qlen < MAX_SKBS) {
234 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
235 if (!skb)
236 break;
237
238 __skb_queue_tail(&skb_pool, skb);
239 }
240 spin_unlock_irqrestore(&skb_pool.lock, flags);
241}
242
243static void zap_completion_queue(void)
244{
245 unsigned long flags;
246 struct softnet_data *sd = &get_cpu_var(softnet_data);
247
248 if (sd->completion_queue) {
249 struct sk_buff *clist;
250
251 local_irq_save(flags);
252 clist = sd->completion_queue;
253 sd->completion_queue = NULL;
254 local_irq_restore(flags);
255
256 while (clist != NULL) {
257 struct sk_buff *skb = clist;
258 clist = clist->next;
259 if (!skb_irq_freeable(skb)) {
260 refcount_set(&skb->users, 1);
261 dev_kfree_skb_any(skb); /* put this one back */
262 } else {
263 __kfree_skb(skb);
264 }
265 }
266 }
267
268 put_cpu_var(softnet_data);
269}
270
271static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
272{
273 int count = 0;
274 struct sk_buff *skb;
275
276 zap_completion_queue();
277 refill_skbs();
278repeat:
279
280 skb = alloc_skb(len, GFP_ATOMIC);
281 if (!skb)
282 skb = skb_dequeue(&skb_pool);
283
284 if (!skb) {
285 if (++count < 10) {
286 netpoll_poll_dev(np->dev);
287 goto repeat;
288 }
289 return NULL;
290 }
291
292 refcount_set(&skb->users, 1);
293 skb_reserve(skb, reserve);
294 return skb;
295}
296
297static int netpoll_owner_active(struct net_device *dev)
298{
299 struct napi_struct *napi;
300
301 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
302 if (napi->poll_owner == smp_processor_id())
303 return 1;
304 }
305 return 0;
306}
307
308/* call with IRQ disabled */
309static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
310{
311 netdev_tx_t status = NETDEV_TX_BUSY;
312 struct net_device *dev;
313 unsigned long tries;
314 /* It is up to the caller to keep npinfo alive. */
315 struct netpoll_info *npinfo;
316
317 lockdep_assert_irqs_disabled();
318
319 dev = np->dev;
320 npinfo = rcu_dereference_bh(dev->npinfo);
321
322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
323 dev_kfree_skb_irq(skb);
324 return NET_XMIT_DROP;
325 }
326
327 /* don't get messages out of order, and no recursion */
328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
329 struct netdev_queue *txq;
330
331 txq = netdev_core_pick_tx(dev, skb, NULL);
332
333 /* try until next clock tick */
334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
335 tries > 0; --tries) {
336 if (HARD_TX_TRYLOCK(dev, txq)) {
337 if (!netif_xmit_stopped(txq))
338 status = netpoll_start_xmit(skb, dev, txq);
339
340 HARD_TX_UNLOCK(dev, txq);
341
342 if (dev_xmit_complete(status))
343 break;
344
345 }
346
347 /* tickle device maybe there is some cleanup */
348 netpoll_poll_dev(np->dev);
349
350 udelay(USEC_PER_POLL);
351 }
352
353 WARN_ONCE(!irqs_disabled(),
354 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
355 dev->name, dev->netdev_ops->ndo_start_xmit);
356
357 }
358
359 if (!dev_xmit_complete(status)) {
360 skb_queue_tail(&npinfo->txq, skb);
361 schedule_delayed_work(&npinfo->tx_work,0);
362 }
363 return NETDEV_TX_OK;
364}
365
366netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
367{
368 unsigned long flags;
369 netdev_tx_t ret;
370
371 if (unlikely(!np)) {
372 dev_kfree_skb_irq(skb);
373 ret = NET_XMIT_DROP;
374 } else {
375 local_irq_save(flags);
376 ret = __netpoll_send_skb(np, skb);
377 local_irq_restore(flags);
378 }
379 return ret;
380}
381EXPORT_SYMBOL(netpoll_send_skb);
382
383void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
384{
385 int total_len, ip_len, udp_len;
386 struct sk_buff *skb;
387 struct udphdr *udph;
388 struct iphdr *iph;
389 struct ethhdr *eth;
390 static atomic_t ip_ident;
391 struct ipv6hdr *ip6h;
392
393 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
394 WARN_ON_ONCE(!irqs_disabled());
395
396 udp_len = len + sizeof(*udph);
397 if (np->ipv6)
398 ip_len = udp_len + sizeof(*ip6h);
399 else
400 ip_len = udp_len + sizeof(*iph);
401
402 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
403
404 skb = find_skb(np, total_len + np->dev->needed_tailroom,
405 total_len - len);
406 if (!skb)
407 return;
408
409 skb_copy_to_linear_data(skb, msg, len);
410 skb_put(skb, len);
411
412 skb_push(skb, sizeof(*udph));
413 skb_reset_transport_header(skb);
414 udph = udp_hdr(skb);
415 udph->source = htons(np->local_port);
416 udph->dest = htons(np->remote_port);
417 udph->len = htons(udp_len);
418
419 if (np->ipv6) {
420 udph->check = 0;
421 udph->check = csum_ipv6_magic(&np->local_ip.in6,
422 &np->remote_ip.in6,
423 udp_len, IPPROTO_UDP,
424 csum_partial(udph, udp_len, 0));
425 if (udph->check == 0)
426 udph->check = CSUM_MANGLED_0;
427
428 skb_push(skb, sizeof(*ip6h));
429 skb_reset_network_header(skb);
430 ip6h = ipv6_hdr(skb);
431
432 /* ip6h->version = 6; ip6h->priority = 0; */
433 *(unsigned char *)ip6h = 0x60;
434 ip6h->flow_lbl[0] = 0;
435 ip6h->flow_lbl[1] = 0;
436 ip6h->flow_lbl[2] = 0;
437
438 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
439 ip6h->nexthdr = IPPROTO_UDP;
440 ip6h->hop_limit = 32;
441 ip6h->saddr = np->local_ip.in6;
442 ip6h->daddr = np->remote_ip.in6;
443
444 eth = skb_push(skb, ETH_HLEN);
445 skb_reset_mac_header(skb);
446 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
447 } else {
448 udph->check = 0;
449 udph->check = csum_tcpudp_magic(np->local_ip.ip,
450 np->remote_ip.ip,
451 udp_len, IPPROTO_UDP,
452 csum_partial(udph, udp_len, 0));
453 if (udph->check == 0)
454 udph->check = CSUM_MANGLED_0;
455
456 skb_push(skb, sizeof(*iph));
457 skb_reset_network_header(skb);
458 iph = ip_hdr(skb);
459
460 /* iph->version = 4; iph->ihl = 5; */
461 *(unsigned char *)iph = 0x45;
462 iph->tos = 0;
463 put_unaligned(htons(ip_len), &(iph->tot_len));
464 iph->id = htons(atomic_inc_return(&ip_ident));
465 iph->frag_off = 0;
466 iph->ttl = 64;
467 iph->protocol = IPPROTO_UDP;
468 iph->check = 0;
469 put_unaligned(np->local_ip.ip, &(iph->saddr));
470 put_unaligned(np->remote_ip.ip, &(iph->daddr));
471 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
472
473 eth = skb_push(skb, ETH_HLEN);
474 skb_reset_mac_header(skb);
475 skb->protocol = eth->h_proto = htons(ETH_P_IP);
476 }
477
478 ether_addr_copy(eth->h_source, np->dev->dev_addr);
479 ether_addr_copy(eth->h_dest, np->remote_mac);
480
481 skb->dev = np->dev;
482
483 netpoll_send_skb(np, skb);
484}
485EXPORT_SYMBOL(netpoll_send_udp);
486
487void netpoll_print_options(struct netpoll *np)
488{
489 np_info(np, "local port %d\n", np->local_port);
490 if (np->ipv6)
491 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
492 else
493 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
494 np_info(np, "interface '%s'\n", np->dev_name);
495 np_info(np, "remote port %d\n", np->remote_port);
496 if (np->ipv6)
497 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
498 else
499 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
500 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
501}
502EXPORT_SYMBOL(netpoll_print_options);
503
504static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
505{
506 const char *end;
507
508 if (!strchr(str, ':') &&
509 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
510 if (!*end)
511 return 0;
512 }
513 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
514#if IS_ENABLED(CONFIG_IPV6)
515 if (!*end)
516 return 1;
517#else
518 return -1;
519#endif
520 }
521 return -1;
522}
523
524int netpoll_parse_options(struct netpoll *np, char *opt)
525{
526 char *cur=opt, *delim;
527 int ipv6;
528 bool ipversion_set = false;
529
530 if (*cur != '@') {
531 if ((delim = strchr(cur, '@')) == NULL)
532 goto parse_failed;
533 *delim = 0;
534 if (kstrtou16(cur, 10, &np->local_port))
535 goto parse_failed;
536 cur = delim;
537 }
538 cur++;
539
540 if (*cur != '/') {
541 ipversion_set = true;
542 if ((delim = strchr(cur, '/')) == NULL)
543 goto parse_failed;
544 *delim = 0;
545 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
546 if (ipv6 < 0)
547 goto parse_failed;
548 else
549 np->ipv6 = (bool)ipv6;
550 cur = delim;
551 }
552 cur++;
553
554 if (*cur != ',') {
555 /* parse out dev name */
556 if ((delim = strchr(cur, ',')) == NULL)
557 goto parse_failed;
558 *delim = 0;
559 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
560 cur = delim;
561 }
562 cur++;
563
564 if (*cur != '@') {
565 /* dst port */
566 if ((delim = strchr(cur, '@')) == NULL)
567 goto parse_failed;
568 *delim = 0;
569 if (*cur == ' ' || *cur == '\t')
570 np_info(np, "warning: whitespace is not allowed\n");
571 if (kstrtou16(cur, 10, &np->remote_port))
572 goto parse_failed;
573 cur = delim;
574 }
575 cur++;
576
577 /* dst ip */
578 if ((delim = strchr(cur, '/')) == NULL)
579 goto parse_failed;
580 *delim = 0;
581 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
582 if (ipv6 < 0)
583 goto parse_failed;
584 else if (ipversion_set && np->ipv6 != (bool)ipv6)
585 goto parse_failed;
586 else
587 np->ipv6 = (bool)ipv6;
588 cur = delim + 1;
589
590 if (*cur != 0) {
591 /* MAC address */
592 if (!mac_pton(cur, np->remote_mac))
593 goto parse_failed;
594 }
595
596 netpoll_print_options(np);
597
598 return 0;
599
600 parse_failed:
601 np_info(np, "couldn't parse config at '%s'!\n", cur);
602 return -1;
603}
604EXPORT_SYMBOL(netpoll_parse_options);
605
606int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
607{
608 struct netpoll_info *npinfo;
609 const struct net_device_ops *ops;
610 int err;
611
612 np->dev = ndev;
613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
614
615 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
616 np_err(np, "%s doesn't support polling, aborting\n",
617 np->dev_name);
618 err = -ENOTSUPP;
619 goto out;
620 }
621
622 if (!ndev->npinfo) {
623 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
624 if (!npinfo) {
625 err = -ENOMEM;
626 goto out;
627 }
628
629 sema_init(&npinfo->dev_lock, 1);
630 skb_queue_head_init(&npinfo->txq);
631 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
632
633 refcount_set(&npinfo->refcnt, 1);
634
635 ops = np->dev->netdev_ops;
636 if (ops->ndo_netpoll_setup) {
637 err = ops->ndo_netpoll_setup(ndev, npinfo);
638 if (err)
639 goto free_npinfo;
640 }
641 } else {
642 npinfo = rtnl_dereference(ndev->npinfo);
643 refcount_inc(&npinfo->refcnt);
644 }
645
646 npinfo->netpoll = np;
647
648 /* last thing to do is link it to the net device structure */
649 rcu_assign_pointer(ndev->npinfo, npinfo);
650
651 return 0;
652
653free_npinfo:
654 kfree(npinfo);
655out:
656 return err;
657}
658EXPORT_SYMBOL_GPL(__netpoll_setup);
659
660int netpoll_setup(struct netpoll *np)
661{
662 struct net_device *ndev = NULL;
663 struct in_device *in_dev;
664 int err;
665
666 rtnl_lock();
667 if (np->dev_name[0]) {
668 struct net *net = current->nsproxy->net_ns;
669 ndev = __dev_get_by_name(net, np->dev_name);
670 }
671 if (!ndev) {
672 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
673 err = -ENODEV;
674 goto unlock;
675 }
676 dev_hold(ndev);
677
678 if (netdev_master_upper_dev_get(ndev)) {
679 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
680 err = -EBUSY;
681 goto put;
682 }
683
684 if (!netif_running(ndev)) {
685 unsigned long atmost, atleast;
686
687 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
688
689 err = dev_open(ndev, NULL);
690
691 if (err) {
692 np_err(np, "failed to open %s\n", ndev->name);
693 goto put;
694 }
695
696 rtnl_unlock();
697 atleast = jiffies + HZ/10;
698 atmost = jiffies + carrier_timeout * HZ;
699 while (!netif_carrier_ok(ndev)) {
700 if (time_after(jiffies, atmost)) {
701 np_notice(np, "timeout waiting for carrier\n");
702 break;
703 }
704 msleep(1);
705 }
706
707 /* If carrier appears to come up instantly, we don't
708 * trust it and pause so that we don't pump all our
709 * queued console messages into the bitbucket.
710 */
711
712 if (time_before(jiffies, atleast)) {
713 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
714 msleep(4000);
715 }
716 rtnl_lock();
717 }
718
719 if (!np->local_ip.ip) {
720 if (!np->ipv6) {
721 const struct in_ifaddr *ifa;
722
723 in_dev = __in_dev_get_rtnl(ndev);
724 if (!in_dev)
725 goto put_noaddr;
726
727 ifa = rtnl_dereference(in_dev->ifa_list);
728 if (!ifa) {
729put_noaddr:
730 np_err(np, "no IP address for %s, aborting\n",
731 np->dev_name);
732 err = -EDESTADDRREQ;
733 goto put;
734 }
735
736 np->local_ip.ip = ifa->ifa_local;
737 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
738 } else {
739#if IS_ENABLED(CONFIG_IPV6)
740 struct inet6_dev *idev;
741
742 err = -EDESTADDRREQ;
743 idev = __in6_dev_get(ndev);
744 if (idev) {
745 struct inet6_ifaddr *ifp;
746
747 read_lock_bh(&idev->lock);
748 list_for_each_entry(ifp, &idev->addr_list, if_list) {
749 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
750 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
751 continue;
752 np->local_ip.in6 = ifp->addr;
753 err = 0;
754 break;
755 }
756 read_unlock_bh(&idev->lock);
757 }
758 if (err) {
759 np_err(np, "no IPv6 address for %s, aborting\n",
760 np->dev_name);
761 goto put;
762 } else
763 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
764#else
765 np_err(np, "IPv6 is not supported %s, aborting\n",
766 np->dev_name);
767 err = -EINVAL;
768 goto put;
769#endif
770 }
771 }
772
773 /* fill up the skb queue */
774 refill_skbs();
775
776 err = __netpoll_setup(np, ndev);
777 if (err)
778 goto put;
779
780 rtnl_unlock();
781 return 0;
782
783put:
784 dev_put(ndev);
785unlock:
786 rtnl_unlock();
787 return err;
788}
789EXPORT_SYMBOL(netpoll_setup);
790
791static int __init netpoll_init(void)
792{
793 skb_queue_head_init(&skb_pool);
794 return 0;
795}
796core_initcall(netpoll_init);
797
798static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
799{
800 struct netpoll_info *npinfo =
801 container_of(rcu_head, struct netpoll_info, rcu);
802
803 skb_queue_purge(&npinfo->txq);
804
805 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
806 cancel_delayed_work(&npinfo->tx_work);
807
808 /* clean after last, unfinished work */
809 __skb_queue_purge(&npinfo->txq);
810 /* now cancel it again */
811 cancel_delayed_work(&npinfo->tx_work);
812 kfree(npinfo);
813}
814
815void __netpoll_cleanup(struct netpoll *np)
816{
817 struct netpoll_info *npinfo;
818
819 npinfo = rtnl_dereference(np->dev->npinfo);
820 if (!npinfo)
821 return;
822
823 synchronize_srcu(&netpoll_srcu);
824
825 if (refcount_dec_and_test(&npinfo->refcnt)) {
826 const struct net_device_ops *ops;
827
828 ops = np->dev->netdev_ops;
829 if (ops->ndo_netpoll_cleanup)
830 ops->ndo_netpoll_cleanup(np->dev);
831
832 RCU_INIT_POINTER(np->dev->npinfo, NULL);
833 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
834 } else
835 RCU_INIT_POINTER(np->dev->npinfo, NULL);
836}
837EXPORT_SYMBOL_GPL(__netpoll_cleanup);
838
839void __netpoll_free(struct netpoll *np)
840{
841 ASSERT_RTNL();
842
843 /* Wait for transmitting packets to finish before freeing. */
844 synchronize_rcu();
845 __netpoll_cleanup(np);
846 kfree(np);
847}
848EXPORT_SYMBOL_GPL(__netpoll_free);
849
850void netpoll_cleanup(struct netpoll *np)
851{
852 rtnl_lock();
853 if (!np->dev)
854 goto out;
855 __netpoll_cleanup(np);
856 dev_put(np->dev);
857 np->dev = NULL;
858out:
859 rtnl_unlock();
860}
861EXPORT_SYMBOL(netpoll_cleanup);
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/string.h>
19#include <linux/if_arp.h>
20#include <linux/inetdevice.h>
21#include <linux/inet.h>
22#include <linux/interrupt.h>
23#include <linux/netpoll.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/rcupdate.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29#include <linux/export.h>
30#include <linux/if_vlan.h>
31#include <net/tcp.h>
32#include <net/udp.h>
33#include <net/addrconf.h>
34#include <net/ndisc.h>
35#include <net/ip6_checksum.h>
36#include <asm/unaligned.h>
37#include <trace/events/napi.h>
38
39/*
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
42 */
43
44#define MAX_UDP_CHUNK 1460
45#define MAX_SKBS 32
46
47static struct sk_buff_head skb_pool;
48
49DEFINE_STATIC_SRCU(netpoll_srcu);
50
51#define USEC_PER_POLL 50
52
53#define MAX_SKB_SIZE \
54 (sizeof(struct ethhdr) + \
55 sizeof(struct iphdr) + \
56 sizeof(struct udphdr) + \
57 MAX_UDP_CHUNK)
58
59static void zap_completion_queue(void);
60static void netpoll_async_cleanup(struct work_struct *work);
61
62static unsigned int carrier_timeout = 4;
63module_param(carrier_timeout, uint, 0644);
64
65#define np_info(np, fmt, ...) \
66 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
67#define np_err(np, fmt, ...) \
68 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
69#define np_notice(np, fmt, ...) \
70 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
71
72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
73 struct netdev_queue *txq)
74{
75 int status = NETDEV_TX_OK;
76 netdev_features_t features;
77
78 features = netif_skb_features(skb);
79
80 if (skb_vlan_tag_present(skb) &&
81 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
82 skb = __vlan_hwaccel_push_inside(skb);
83 if (unlikely(!skb)) {
84 /* This is actually a packet drop, but we
85 * don't want the code that calls this
86 * function to try and operate on a NULL skb.
87 */
88 goto out;
89 }
90 }
91
92 status = netdev_start_xmit(skb, dev, txq, false);
93
94out:
95 return status;
96}
97
98static void queue_process(struct work_struct *work)
99{
100 struct netpoll_info *npinfo =
101 container_of(work, struct netpoll_info, tx_work.work);
102 struct sk_buff *skb;
103 unsigned long flags;
104
105 while ((skb = skb_dequeue(&npinfo->txq))) {
106 struct net_device *dev = skb->dev;
107 struct netdev_queue *txq;
108
109 if (!netif_device_present(dev) || !netif_running(dev)) {
110 kfree_skb(skb);
111 continue;
112 }
113
114 txq = skb_get_tx_queue(dev, skb);
115
116 local_irq_save(flags);
117 HARD_TX_LOCK(dev, txq, smp_processor_id());
118 if (netif_xmit_frozen_or_stopped(txq) ||
119 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
120 skb_queue_head(&npinfo->txq, skb);
121 HARD_TX_UNLOCK(dev, txq);
122 local_irq_restore(flags);
123
124 schedule_delayed_work(&npinfo->tx_work, HZ/10);
125 return;
126 }
127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags);
129 }
130}
131
132/*
133 * Check whether delayed processing was scheduled for our NIC. If so,
134 * we attempt to grab the poll lock and use ->poll() to pump the card.
135 * If this fails, either we've recursed in ->poll() or it's already
136 * running on another CPU.
137 *
138 * Note: we don't mask interrupts with this lock because we're using
139 * trylock here and interrupts are already disabled in the softirq
140 * case. Further, we test the poll_owner to avoid recursion on UP
141 * systems where the lock doesn't exist.
142 */
143static void poll_one_napi(struct napi_struct *napi)
144{
145 int work = 0;
146
147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while
149 * holding the napi->poll_lock.
150 */
151 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
152 return;
153
154 /* If we set this bit but see that it has already been set,
155 * that indicates that napi has been disabled and we need
156 * to abort this operation
157 */
158 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
159 return;
160
161 /* We explicilty pass the polling call a budget of 0 to
162 * indicate that we are clearing the Tx path only.
163 */
164 work = napi->poll(napi, 0);
165 WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
166 trace_napi_poll(napi);
167
168 clear_bit(NAPI_STATE_NPSVC, &napi->state);
169}
170
171static void poll_napi(struct net_device *dev)
172{
173 struct napi_struct *napi;
174
175 list_for_each_entry(napi, &dev->napi_list, dev_list) {
176 if (napi->poll_owner != smp_processor_id() &&
177 spin_trylock(&napi->poll_lock)) {
178 poll_one_napi(napi);
179 spin_unlock(&napi->poll_lock);
180 }
181 }
182}
183
184static void netpoll_poll_dev(struct net_device *dev)
185{
186 const struct net_device_ops *ops;
187 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
188
189 /* Don't do any rx activity if the dev_lock mutex is held
190 * the dev_open/close paths use this to block netpoll activity
191 * while changing device state
192 */
193 if (down_trylock(&ni->dev_lock))
194 return;
195
196 if (!netif_running(dev)) {
197 up(&ni->dev_lock);
198 return;
199 }
200
201 ops = dev->netdev_ops;
202 if (!ops->ndo_poll_controller) {
203 up(&ni->dev_lock);
204 return;
205 }
206
207 /* Process pending work on NIC */
208 ops->ndo_poll_controller(dev);
209
210 poll_napi(dev);
211
212 up(&ni->dev_lock);
213
214 zap_completion_queue();
215}
216
217void netpoll_poll_disable(struct net_device *dev)
218{
219 struct netpoll_info *ni;
220 int idx;
221 might_sleep();
222 idx = srcu_read_lock(&netpoll_srcu);
223 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
224 if (ni)
225 down(&ni->dev_lock);
226 srcu_read_unlock(&netpoll_srcu, idx);
227}
228EXPORT_SYMBOL(netpoll_poll_disable);
229
230void netpoll_poll_enable(struct net_device *dev)
231{
232 struct netpoll_info *ni;
233 rcu_read_lock();
234 ni = rcu_dereference(dev->npinfo);
235 if (ni)
236 up(&ni->dev_lock);
237 rcu_read_unlock();
238}
239EXPORT_SYMBOL(netpoll_poll_enable);
240
241static void refill_skbs(void)
242{
243 struct sk_buff *skb;
244 unsigned long flags;
245
246 spin_lock_irqsave(&skb_pool.lock, flags);
247 while (skb_pool.qlen < MAX_SKBS) {
248 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
249 if (!skb)
250 break;
251
252 __skb_queue_tail(&skb_pool, skb);
253 }
254 spin_unlock_irqrestore(&skb_pool.lock, flags);
255}
256
257static void zap_completion_queue(void)
258{
259 unsigned long flags;
260 struct softnet_data *sd = &get_cpu_var(softnet_data);
261
262 if (sd->completion_queue) {
263 struct sk_buff *clist;
264
265 local_irq_save(flags);
266 clist = sd->completion_queue;
267 sd->completion_queue = NULL;
268 local_irq_restore(flags);
269
270 while (clist != NULL) {
271 struct sk_buff *skb = clist;
272 clist = clist->next;
273 if (!skb_irq_freeable(skb)) {
274 atomic_inc(&skb->users);
275 dev_kfree_skb_any(skb); /* put this one back */
276 } else {
277 __kfree_skb(skb);
278 }
279 }
280 }
281
282 put_cpu_var(softnet_data);
283}
284
285static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
286{
287 int count = 0;
288 struct sk_buff *skb;
289
290 zap_completion_queue();
291 refill_skbs();
292repeat:
293
294 skb = alloc_skb(len, GFP_ATOMIC);
295 if (!skb)
296 skb = skb_dequeue(&skb_pool);
297
298 if (!skb) {
299 if (++count < 10) {
300 netpoll_poll_dev(np->dev);
301 goto repeat;
302 }
303 return NULL;
304 }
305
306 atomic_set(&skb->users, 1);
307 skb_reserve(skb, reserve);
308 return skb;
309}
310
311static int netpoll_owner_active(struct net_device *dev)
312{
313 struct napi_struct *napi;
314
315 list_for_each_entry(napi, &dev->napi_list, dev_list) {
316 if (napi->poll_owner == smp_processor_id())
317 return 1;
318 }
319 return 0;
320}
321
322/* call with IRQ disabled */
323void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
324 struct net_device *dev)
325{
326 int status = NETDEV_TX_BUSY;
327 unsigned long tries;
328 /* It is up to the caller to keep npinfo alive. */
329 struct netpoll_info *npinfo;
330
331 WARN_ON_ONCE(!irqs_disabled());
332
333 npinfo = rcu_dereference_bh(np->dev->npinfo);
334 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
335 dev_kfree_skb_irq(skb);
336 return;
337 }
338
339 /* don't get messages out of order, and no recursion */
340 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
341 struct netdev_queue *txq;
342
343 txq = netdev_pick_tx(dev, skb, NULL);
344
345 /* try until next clock tick */
346 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
347 tries > 0; --tries) {
348 if (HARD_TX_TRYLOCK(dev, txq)) {
349 if (!netif_xmit_stopped(txq))
350 status = netpoll_start_xmit(skb, dev, txq);
351
352 HARD_TX_UNLOCK(dev, txq);
353
354 if (status == NETDEV_TX_OK)
355 break;
356
357 }
358
359 /* tickle device maybe there is some cleanup */
360 netpoll_poll_dev(np->dev);
361
362 udelay(USEC_PER_POLL);
363 }
364
365 WARN_ONCE(!irqs_disabled(),
366 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
367 dev->name, dev->netdev_ops->ndo_start_xmit);
368
369 }
370
371 if (status != NETDEV_TX_OK) {
372 skb_queue_tail(&npinfo->txq, skb);
373 schedule_delayed_work(&npinfo->tx_work,0);
374 }
375}
376EXPORT_SYMBOL(netpoll_send_skb_on_dev);
377
378void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
379{
380 int total_len, ip_len, udp_len;
381 struct sk_buff *skb;
382 struct udphdr *udph;
383 struct iphdr *iph;
384 struct ethhdr *eth;
385 static atomic_t ip_ident;
386 struct ipv6hdr *ip6h;
387
388 WARN_ON_ONCE(!irqs_disabled());
389
390 udp_len = len + sizeof(*udph);
391 if (np->ipv6)
392 ip_len = udp_len + sizeof(*ip6h);
393 else
394 ip_len = udp_len + sizeof(*iph);
395
396 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
397
398 skb = find_skb(np, total_len + np->dev->needed_tailroom,
399 total_len - len);
400 if (!skb)
401 return;
402
403 skb_copy_to_linear_data(skb, msg, len);
404 skb_put(skb, len);
405
406 skb_push(skb, sizeof(*udph));
407 skb_reset_transport_header(skb);
408 udph = udp_hdr(skb);
409 udph->source = htons(np->local_port);
410 udph->dest = htons(np->remote_port);
411 udph->len = htons(udp_len);
412
413 if (np->ipv6) {
414 udph->check = 0;
415 udph->check = csum_ipv6_magic(&np->local_ip.in6,
416 &np->remote_ip.in6,
417 udp_len, IPPROTO_UDP,
418 csum_partial(udph, udp_len, 0));
419 if (udph->check == 0)
420 udph->check = CSUM_MANGLED_0;
421
422 skb_push(skb, sizeof(*ip6h));
423 skb_reset_network_header(skb);
424 ip6h = ipv6_hdr(skb);
425
426 /* ip6h->version = 6; ip6h->priority = 0; */
427 put_unaligned(0x60, (unsigned char *)ip6h);
428 ip6h->flow_lbl[0] = 0;
429 ip6h->flow_lbl[1] = 0;
430 ip6h->flow_lbl[2] = 0;
431
432 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
433 ip6h->nexthdr = IPPROTO_UDP;
434 ip6h->hop_limit = 32;
435 ip6h->saddr = np->local_ip.in6;
436 ip6h->daddr = np->remote_ip.in6;
437
438 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
439 skb_reset_mac_header(skb);
440 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
441 } else {
442 udph->check = 0;
443 udph->check = csum_tcpudp_magic(np->local_ip.ip,
444 np->remote_ip.ip,
445 udp_len, IPPROTO_UDP,
446 csum_partial(udph, udp_len, 0));
447 if (udph->check == 0)
448 udph->check = CSUM_MANGLED_0;
449
450 skb_push(skb, sizeof(*iph));
451 skb_reset_network_header(skb);
452 iph = ip_hdr(skb);
453
454 /* iph->version = 4; iph->ihl = 5; */
455 put_unaligned(0x45, (unsigned char *)iph);
456 iph->tos = 0;
457 put_unaligned(htons(ip_len), &(iph->tot_len));
458 iph->id = htons(atomic_inc_return(&ip_ident));
459 iph->frag_off = 0;
460 iph->ttl = 64;
461 iph->protocol = IPPROTO_UDP;
462 iph->check = 0;
463 put_unaligned(np->local_ip.ip, &(iph->saddr));
464 put_unaligned(np->remote_ip.ip, &(iph->daddr));
465 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
466
467 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
468 skb_reset_mac_header(skb);
469 skb->protocol = eth->h_proto = htons(ETH_P_IP);
470 }
471
472 ether_addr_copy(eth->h_source, np->dev->dev_addr);
473 ether_addr_copy(eth->h_dest, np->remote_mac);
474
475 skb->dev = np->dev;
476
477 netpoll_send_skb(np, skb);
478}
479EXPORT_SYMBOL(netpoll_send_udp);
480
481void netpoll_print_options(struct netpoll *np)
482{
483 np_info(np, "local port %d\n", np->local_port);
484 if (np->ipv6)
485 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
486 else
487 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
488 np_info(np, "interface '%s'\n", np->dev_name);
489 np_info(np, "remote port %d\n", np->remote_port);
490 if (np->ipv6)
491 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
492 else
493 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
494 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
495}
496EXPORT_SYMBOL(netpoll_print_options);
497
498static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
499{
500 const char *end;
501
502 if (!strchr(str, ':') &&
503 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
504 if (!*end)
505 return 0;
506 }
507 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
508#if IS_ENABLED(CONFIG_IPV6)
509 if (!*end)
510 return 1;
511#else
512 return -1;
513#endif
514 }
515 return -1;
516}
517
518int netpoll_parse_options(struct netpoll *np, char *opt)
519{
520 char *cur=opt, *delim;
521 int ipv6;
522 bool ipversion_set = false;
523
524 if (*cur != '@') {
525 if ((delim = strchr(cur, '@')) == NULL)
526 goto parse_failed;
527 *delim = 0;
528 if (kstrtou16(cur, 10, &np->local_port))
529 goto parse_failed;
530 cur = delim;
531 }
532 cur++;
533
534 if (*cur != '/') {
535 ipversion_set = true;
536 if ((delim = strchr(cur, '/')) == NULL)
537 goto parse_failed;
538 *delim = 0;
539 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
540 if (ipv6 < 0)
541 goto parse_failed;
542 else
543 np->ipv6 = (bool)ipv6;
544 cur = delim;
545 }
546 cur++;
547
548 if (*cur != ',') {
549 /* parse out dev name */
550 if ((delim = strchr(cur, ',')) == NULL)
551 goto parse_failed;
552 *delim = 0;
553 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
554 cur = delim;
555 }
556 cur++;
557
558 if (*cur != '@') {
559 /* dst port */
560 if ((delim = strchr(cur, '@')) == NULL)
561 goto parse_failed;
562 *delim = 0;
563 if (*cur == ' ' || *cur == '\t')
564 np_info(np, "warning: whitespace is not allowed\n");
565 if (kstrtou16(cur, 10, &np->remote_port))
566 goto parse_failed;
567 cur = delim;
568 }
569 cur++;
570
571 /* dst ip */
572 if ((delim = strchr(cur, '/')) == NULL)
573 goto parse_failed;
574 *delim = 0;
575 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
576 if (ipv6 < 0)
577 goto parse_failed;
578 else if (ipversion_set && np->ipv6 != (bool)ipv6)
579 goto parse_failed;
580 else
581 np->ipv6 = (bool)ipv6;
582 cur = delim + 1;
583
584 if (*cur != 0) {
585 /* MAC address */
586 if (!mac_pton(cur, np->remote_mac))
587 goto parse_failed;
588 }
589
590 netpoll_print_options(np);
591
592 return 0;
593
594 parse_failed:
595 np_info(np, "couldn't parse config at '%s'!\n", cur);
596 return -1;
597}
598EXPORT_SYMBOL(netpoll_parse_options);
599
600int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
601{
602 struct netpoll_info *npinfo;
603 const struct net_device_ops *ops;
604 int err;
605
606 np->dev = ndev;
607 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
608 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
609
610 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
611 !ndev->netdev_ops->ndo_poll_controller) {
612 np_err(np, "%s doesn't support polling, aborting\n",
613 np->dev_name);
614 err = -ENOTSUPP;
615 goto out;
616 }
617
618 if (!ndev->npinfo) {
619 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
620 if (!npinfo) {
621 err = -ENOMEM;
622 goto out;
623 }
624
625 sema_init(&npinfo->dev_lock, 1);
626 skb_queue_head_init(&npinfo->txq);
627 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
628
629 atomic_set(&npinfo->refcnt, 1);
630
631 ops = np->dev->netdev_ops;
632 if (ops->ndo_netpoll_setup) {
633 err = ops->ndo_netpoll_setup(ndev, npinfo);
634 if (err)
635 goto free_npinfo;
636 }
637 } else {
638 npinfo = rtnl_dereference(ndev->npinfo);
639 atomic_inc(&npinfo->refcnt);
640 }
641
642 npinfo->netpoll = np;
643
644 /* last thing to do is link it to the net device structure */
645 rcu_assign_pointer(ndev->npinfo, npinfo);
646
647 return 0;
648
649free_npinfo:
650 kfree(npinfo);
651out:
652 return err;
653}
654EXPORT_SYMBOL_GPL(__netpoll_setup);
655
656int netpoll_setup(struct netpoll *np)
657{
658 struct net_device *ndev = NULL;
659 struct in_device *in_dev;
660 int err;
661
662 rtnl_lock();
663 if (np->dev_name) {
664 struct net *net = current->nsproxy->net_ns;
665 ndev = __dev_get_by_name(net, np->dev_name);
666 }
667 if (!ndev) {
668 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
669 err = -ENODEV;
670 goto unlock;
671 }
672 dev_hold(ndev);
673
674 if (netdev_master_upper_dev_get(ndev)) {
675 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
676 err = -EBUSY;
677 goto put;
678 }
679
680 if (!netif_running(ndev)) {
681 unsigned long atmost, atleast;
682
683 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
684
685 err = dev_open(ndev);
686
687 if (err) {
688 np_err(np, "failed to open %s\n", ndev->name);
689 goto put;
690 }
691
692 rtnl_unlock();
693 atleast = jiffies + HZ/10;
694 atmost = jiffies + carrier_timeout * HZ;
695 while (!netif_carrier_ok(ndev)) {
696 if (time_after(jiffies, atmost)) {
697 np_notice(np, "timeout waiting for carrier\n");
698 break;
699 }
700 msleep(1);
701 }
702
703 /* If carrier appears to come up instantly, we don't
704 * trust it and pause so that we don't pump all our
705 * queued console messages into the bitbucket.
706 */
707
708 if (time_before(jiffies, atleast)) {
709 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
710 msleep(4000);
711 }
712 rtnl_lock();
713 }
714
715 if (!np->local_ip.ip) {
716 if (!np->ipv6) {
717 in_dev = __in_dev_get_rtnl(ndev);
718
719 if (!in_dev || !in_dev->ifa_list) {
720 np_err(np, "no IP address for %s, aborting\n",
721 np->dev_name);
722 err = -EDESTADDRREQ;
723 goto put;
724 }
725
726 np->local_ip.ip = in_dev->ifa_list->ifa_local;
727 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
728 } else {
729#if IS_ENABLED(CONFIG_IPV6)
730 struct inet6_dev *idev;
731
732 err = -EDESTADDRREQ;
733 idev = __in6_dev_get(ndev);
734 if (idev) {
735 struct inet6_ifaddr *ifp;
736
737 read_lock_bh(&idev->lock);
738 list_for_each_entry(ifp, &idev->addr_list, if_list) {
739 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
740 continue;
741 np->local_ip.in6 = ifp->addr;
742 err = 0;
743 break;
744 }
745 read_unlock_bh(&idev->lock);
746 }
747 if (err) {
748 np_err(np, "no IPv6 address for %s, aborting\n",
749 np->dev_name);
750 goto put;
751 } else
752 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
753#else
754 np_err(np, "IPv6 is not supported %s, aborting\n",
755 np->dev_name);
756 err = -EINVAL;
757 goto put;
758#endif
759 }
760 }
761
762 /* fill up the skb queue */
763 refill_skbs();
764
765 err = __netpoll_setup(np, ndev);
766 if (err)
767 goto put;
768
769 rtnl_unlock();
770 return 0;
771
772put:
773 dev_put(ndev);
774unlock:
775 rtnl_unlock();
776 return err;
777}
778EXPORT_SYMBOL(netpoll_setup);
779
780static int __init netpoll_init(void)
781{
782 skb_queue_head_init(&skb_pool);
783 return 0;
784}
785core_initcall(netpoll_init);
786
787static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
788{
789 struct netpoll_info *npinfo =
790 container_of(rcu_head, struct netpoll_info, rcu);
791
792 skb_queue_purge(&npinfo->txq);
793
794 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
795 cancel_delayed_work(&npinfo->tx_work);
796
797 /* clean after last, unfinished work */
798 __skb_queue_purge(&npinfo->txq);
799 /* now cancel it again */
800 cancel_delayed_work(&npinfo->tx_work);
801 kfree(npinfo);
802}
803
804void __netpoll_cleanup(struct netpoll *np)
805{
806 struct netpoll_info *npinfo;
807
808 /* rtnl_dereference would be preferable here but
809 * rcu_cleanup_netpoll path can put us in here safely without
810 * holding the rtnl, so plain rcu_dereference it is
811 */
812 npinfo = rtnl_dereference(np->dev->npinfo);
813 if (!npinfo)
814 return;
815
816 synchronize_srcu(&netpoll_srcu);
817
818 if (atomic_dec_and_test(&npinfo->refcnt)) {
819 const struct net_device_ops *ops;
820
821 ops = np->dev->netdev_ops;
822 if (ops->ndo_netpoll_cleanup)
823 ops->ndo_netpoll_cleanup(np->dev);
824
825 RCU_INIT_POINTER(np->dev->npinfo, NULL);
826 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
827 } else
828 RCU_INIT_POINTER(np->dev->npinfo, NULL);
829}
830EXPORT_SYMBOL_GPL(__netpoll_cleanup);
831
832static void netpoll_async_cleanup(struct work_struct *work)
833{
834 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
835
836 rtnl_lock();
837 __netpoll_cleanup(np);
838 rtnl_unlock();
839 kfree(np);
840}
841
842void __netpoll_free_async(struct netpoll *np)
843{
844 schedule_work(&np->cleanup_work);
845}
846EXPORT_SYMBOL_GPL(__netpoll_free_async);
847
848void netpoll_cleanup(struct netpoll *np)
849{
850 rtnl_lock();
851 if (!np->dev)
852 goto out;
853 __netpoll_cleanup(np);
854 dev_put(np->dev);
855 np->dev = NULL;
856out:
857 rtnl_unlock();
858}
859EXPORT_SYMBOL(netpoll_cleanup);