Loading...
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/moduleparam.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/string.h>
18#include <linux/if_arp.h>
19#include <linux/inetdevice.h>
20#include <linux/inet.h>
21#include <linux/interrupt.h>
22#include <linux/netpoll.h>
23#include <linux/sched.h>
24#include <linux/delay.h>
25#include <linux/rcupdate.h>
26#include <linux/workqueue.h>
27#include <linux/slab.h>
28#include <linux/export.h>
29#include <net/tcp.h>
30#include <net/udp.h>
31#include <asm/unaligned.h>
32#include <trace/events/napi.h>
33
34/*
35 * We maintain a small pool of fully-sized skbs, to make sure the
36 * message gets out even in extreme OOM situations.
37 */
38
39#define MAX_UDP_CHUNK 1460
40#define MAX_SKBS 32
41
42static struct sk_buff_head skb_pool;
43
44static atomic_t trapped;
45
46#define USEC_PER_POLL 50
47#define NETPOLL_RX_ENABLED 1
48#define NETPOLL_RX_DROP 2
49
50#define MAX_SKB_SIZE \
51 (sizeof(struct ethhdr) + \
52 sizeof(struct iphdr) + \
53 sizeof(struct udphdr) + \
54 MAX_UDP_CHUNK)
55
56static void zap_completion_queue(void);
57static void arp_reply(struct sk_buff *skb);
58
59static unsigned int carrier_timeout = 4;
60module_param(carrier_timeout, uint, 0644);
61
62#define np_info(np, fmt, ...) \
63 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
64#define np_err(np, fmt, ...) \
65 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
66#define np_notice(np, fmt, ...) \
67 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
68
69static void queue_process(struct work_struct *work)
70{
71 struct netpoll_info *npinfo =
72 container_of(work, struct netpoll_info, tx_work.work);
73 struct sk_buff *skb;
74 unsigned long flags;
75
76 while ((skb = skb_dequeue(&npinfo->txq))) {
77 struct net_device *dev = skb->dev;
78 const struct net_device_ops *ops = dev->netdev_ops;
79 struct netdev_queue *txq;
80
81 if (!netif_device_present(dev) || !netif_running(dev)) {
82 __kfree_skb(skb);
83 continue;
84 }
85
86 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
87
88 local_irq_save(flags);
89 __netif_tx_lock(txq, smp_processor_id());
90 if (netif_xmit_frozen_or_stopped(txq) ||
91 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
92 skb_queue_head(&npinfo->txq, skb);
93 __netif_tx_unlock(txq);
94 local_irq_restore(flags);
95
96 schedule_delayed_work(&npinfo->tx_work, HZ/10);
97 return;
98 }
99 __netif_tx_unlock(txq);
100 local_irq_restore(flags);
101 }
102}
103
104static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
105 unsigned short ulen, __be32 saddr, __be32 daddr)
106{
107 __wsum psum;
108
109 if (uh->check == 0 || skb_csum_unnecessary(skb))
110 return 0;
111
112 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
113
114 if (skb->ip_summed == CHECKSUM_COMPLETE &&
115 !csum_fold(csum_add(psum, skb->csum)))
116 return 0;
117
118 skb->csum = psum;
119
120 return __skb_checksum_complete(skb);
121}
122
123/*
124 * Check whether delayed processing was scheduled for our NIC. If so,
125 * we attempt to grab the poll lock and use ->poll() to pump the card.
126 * If this fails, either we've recursed in ->poll() or it's already
127 * running on another CPU.
128 *
129 * Note: we don't mask interrupts with this lock because we're using
130 * trylock here and interrupts are already disabled in the softirq
131 * case. Further, we test the poll_owner to avoid recursion on UP
132 * systems where the lock doesn't exist.
133 *
134 * In cases where there is bi-directional communications, reading only
135 * one message at a time can lead to packets being dropped by the
136 * network adapter, forcing superfluous retries and possibly timeouts.
137 * Thus, we set our budget to greater than 1.
138 */
139static int poll_one_napi(struct netpoll_info *npinfo,
140 struct napi_struct *napi, int budget)
141{
142 int work;
143
144 /* net_rx_action's ->poll() invocations and our's are
145 * synchronized by this test which is only made while
146 * holding the napi->poll_lock.
147 */
148 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
149 return budget;
150
151 npinfo->rx_flags |= NETPOLL_RX_DROP;
152 atomic_inc(&trapped);
153 set_bit(NAPI_STATE_NPSVC, &napi->state);
154
155 work = napi->poll(napi, budget);
156 trace_napi_poll(napi);
157
158 clear_bit(NAPI_STATE_NPSVC, &napi->state);
159 atomic_dec(&trapped);
160 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
161
162 return budget - work;
163}
164
165static void poll_napi(struct net_device *dev)
166{
167 struct napi_struct *napi;
168 int budget = 16;
169
170 list_for_each_entry(napi, &dev->napi_list, dev_list) {
171 if (napi->poll_owner != smp_processor_id() &&
172 spin_trylock(&napi->poll_lock)) {
173 budget = poll_one_napi(dev->npinfo, napi, budget);
174 spin_unlock(&napi->poll_lock);
175
176 if (!budget)
177 break;
178 }
179 }
180}
181
182static void service_arp_queue(struct netpoll_info *npi)
183{
184 if (npi) {
185 struct sk_buff *skb;
186
187 while ((skb = skb_dequeue(&npi->arp_tx)))
188 arp_reply(skb);
189 }
190}
191
192static void netpoll_poll_dev(struct net_device *dev)
193{
194 const struct net_device_ops *ops;
195
196 if (!dev || !netif_running(dev))
197 return;
198
199 ops = dev->netdev_ops;
200 if (!ops->ndo_poll_controller)
201 return;
202
203 /* Process pending work on NIC */
204 ops->ndo_poll_controller(dev);
205
206 poll_napi(dev);
207
208 if (dev->flags & IFF_SLAVE) {
209 if (dev->npinfo) {
210 struct net_device *bond_dev = dev->master;
211 struct sk_buff *skb;
212 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
213 skb->dev = bond_dev;
214 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
215 }
216 }
217 }
218
219 service_arp_queue(dev->npinfo);
220
221 zap_completion_queue();
222}
223
224static void refill_skbs(void)
225{
226 struct sk_buff *skb;
227 unsigned long flags;
228
229 spin_lock_irqsave(&skb_pool.lock, flags);
230 while (skb_pool.qlen < MAX_SKBS) {
231 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
232 if (!skb)
233 break;
234
235 __skb_queue_tail(&skb_pool, skb);
236 }
237 spin_unlock_irqrestore(&skb_pool.lock, flags);
238}
239
240static void zap_completion_queue(void)
241{
242 unsigned long flags;
243 struct softnet_data *sd = &get_cpu_var(softnet_data);
244
245 if (sd->completion_queue) {
246 struct sk_buff *clist;
247
248 local_irq_save(flags);
249 clist = sd->completion_queue;
250 sd->completion_queue = NULL;
251 local_irq_restore(flags);
252
253 while (clist != NULL) {
254 struct sk_buff *skb = clist;
255 clist = clist->next;
256 if (skb->destructor) {
257 atomic_inc(&skb->users);
258 dev_kfree_skb_any(skb); /* put this one back */
259 } else {
260 __kfree_skb(skb);
261 }
262 }
263 }
264
265 put_cpu_var(softnet_data);
266}
267
268static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
269{
270 int count = 0;
271 struct sk_buff *skb;
272
273 zap_completion_queue();
274 refill_skbs();
275repeat:
276
277 skb = alloc_skb(len, GFP_ATOMIC);
278 if (!skb)
279 skb = skb_dequeue(&skb_pool);
280
281 if (!skb) {
282 if (++count < 10) {
283 netpoll_poll_dev(np->dev);
284 goto repeat;
285 }
286 return NULL;
287 }
288
289 atomic_set(&skb->users, 1);
290 skb_reserve(skb, reserve);
291 return skb;
292}
293
294static int netpoll_owner_active(struct net_device *dev)
295{
296 struct napi_struct *napi;
297
298 list_for_each_entry(napi, &dev->napi_list, dev_list) {
299 if (napi->poll_owner == smp_processor_id())
300 return 1;
301 }
302 return 0;
303}
304
305void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306 struct net_device *dev)
307{
308 int status = NETDEV_TX_BUSY;
309 unsigned long tries;
310 const struct net_device_ops *ops = dev->netdev_ops;
311 /* It is up to the caller to keep npinfo alive. */
312 struct netpoll_info *npinfo = np->dev->npinfo;
313
314 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315 __kfree_skb(skb);
316 return;
317 }
318
319 /* don't get messages out of order, and no recursion */
320 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321 struct netdev_queue *txq;
322 unsigned long flags;
323
324 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325
326 local_irq_save(flags);
327 /* try until next clock tick */
328 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329 tries > 0; --tries) {
330 if (__netif_tx_trylock(txq)) {
331 if (!netif_xmit_stopped(txq)) {
332 status = ops->ndo_start_xmit(skb, dev);
333 if (status == NETDEV_TX_OK)
334 txq_trans_update(txq);
335 }
336 __netif_tx_unlock(txq);
337
338 if (status == NETDEV_TX_OK)
339 break;
340
341 }
342
343 /* tickle device maybe there is some cleanup */
344 netpoll_poll_dev(np->dev);
345
346 udelay(USEC_PER_POLL);
347 }
348
349 WARN_ONCE(!irqs_disabled(),
350 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
351 dev->name, ops->ndo_start_xmit);
352
353 local_irq_restore(flags);
354 }
355
356 if (status != NETDEV_TX_OK) {
357 skb_queue_tail(&npinfo->txq, skb);
358 schedule_delayed_work(&npinfo->tx_work,0);
359 }
360}
361EXPORT_SYMBOL(netpoll_send_skb_on_dev);
362
363void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364{
365 int total_len, ip_len, udp_len;
366 struct sk_buff *skb;
367 struct udphdr *udph;
368 struct iphdr *iph;
369 struct ethhdr *eth;
370
371 udp_len = len + sizeof(*udph);
372 ip_len = udp_len + sizeof(*iph);
373 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
374
375 skb = find_skb(np, total_len + np->dev->needed_tailroom,
376 total_len - len);
377 if (!skb)
378 return;
379
380 skb_copy_to_linear_data(skb, msg, len);
381 skb_put(skb, len);
382
383 skb_push(skb, sizeof(*udph));
384 skb_reset_transport_header(skb);
385 udph = udp_hdr(skb);
386 udph->source = htons(np->local_port);
387 udph->dest = htons(np->remote_port);
388 udph->len = htons(udp_len);
389 udph->check = 0;
390 udph->check = csum_tcpudp_magic(np->local_ip,
391 np->remote_ip,
392 udp_len, IPPROTO_UDP,
393 csum_partial(udph, udp_len, 0));
394 if (udph->check == 0)
395 udph->check = CSUM_MANGLED_0;
396
397 skb_push(skb, sizeof(*iph));
398 skb_reset_network_header(skb);
399 iph = ip_hdr(skb);
400
401 /* iph->version = 4; iph->ihl = 5; */
402 put_unaligned(0x45, (unsigned char *)iph);
403 iph->tos = 0;
404 put_unaligned(htons(ip_len), &(iph->tot_len));
405 iph->id = 0;
406 iph->frag_off = 0;
407 iph->ttl = 64;
408 iph->protocol = IPPROTO_UDP;
409 iph->check = 0;
410 put_unaligned(np->local_ip, &(iph->saddr));
411 put_unaligned(np->remote_ip, &(iph->daddr));
412 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
413
414 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
415 skb_reset_mac_header(skb);
416 skb->protocol = eth->h_proto = htons(ETH_P_IP);
417 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
418 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
419
420 skb->dev = np->dev;
421
422 netpoll_send_skb(np, skb);
423}
424EXPORT_SYMBOL(netpoll_send_udp);
425
426static void arp_reply(struct sk_buff *skb)
427{
428 struct netpoll_info *npinfo = skb->dev->npinfo;
429 struct arphdr *arp;
430 unsigned char *arp_ptr;
431 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
432 __be32 sip, tip;
433 unsigned char *sha;
434 struct sk_buff *send_skb;
435 struct netpoll *np, *tmp;
436 unsigned long flags;
437 int hlen, tlen;
438 int hits = 0;
439
440 if (list_empty(&npinfo->rx_np))
441 return;
442
443 /* Before checking the packet, we do some early
444 inspection whether this is interesting at all */
445 spin_lock_irqsave(&npinfo->rx_lock, flags);
446 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
447 if (np->dev == skb->dev)
448 hits++;
449 }
450 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
451
452 /* No netpoll struct is using this dev */
453 if (!hits)
454 return;
455
456 /* No arp on this interface */
457 if (skb->dev->flags & IFF_NOARP)
458 return;
459
460 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
461 return;
462
463 skb_reset_network_header(skb);
464 skb_reset_transport_header(skb);
465 arp = arp_hdr(skb);
466
467 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
468 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
469 arp->ar_pro != htons(ETH_P_IP) ||
470 arp->ar_op != htons(ARPOP_REQUEST))
471 return;
472
473 arp_ptr = (unsigned char *)(arp+1);
474 /* save the location of the src hw addr */
475 sha = arp_ptr;
476 arp_ptr += skb->dev->addr_len;
477 memcpy(&sip, arp_ptr, 4);
478 arp_ptr += 4;
479 /* If we actually cared about dst hw addr,
480 it would get copied here */
481 arp_ptr += skb->dev->addr_len;
482 memcpy(&tip, arp_ptr, 4);
483
484 /* Should we ignore arp? */
485 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
486 return;
487
488 size = arp_hdr_len(skb->dev);
489
490 spin_lock_irqsave(&npinfo->rx_lock, flags);
491 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
492 if (tip != np->local_ip)
493 continue;
494
495 hlen = LL_RESERVED_SPACE(np->dev);
496 tlen = np->dev->needed_tailroom;
497 send_skb = find_skb(np, size + hlen + tlen, hlen);
498 if (!send_skb)
499 continue;
500
501 skb_reset_network_header(send_skb);
502 arp = (struct arphdr *) skb_put(send_skb, size);
503 send_skb->dev = skb->dev;
504 send_skb->protocol = htons(ETH_P_ARP);
505
506 /* Fill the device header for the ARP frame */
507 if (dev_hard_header(send_skb, skb->dev, ptype,
508 sha, np->dev->dev_addr,
509 send_skb->len) < 0) {
510 kfree_skb(send_skb);
511 continue;
512 }
513
514 /*
515 * Fill out the arp protocol part.
516 *
517 * we only support ethernet device type,
518 * which (according to RFC 1390) should
519 * always equal 1 (Ethernet).
520 */
521
522 arp->ar_hrd = htons(np->dev->type);
523 arp->ar_pro = htons(ETH_P_IP);
524 arp->ar_hln = np->dev->addr_len;
525 arp->ar_pln = 4;
526 arp->ar_op = htons(type);
527
528 arp_ptr = (unsigned char *)(arp + 1);
529 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
530 arp_ptr += np->dev->addr_len;
531 memcpy(arp_ptr, &tip, 4);
532 arp_ptr += 4;
533 memcpy(arp_ptr, sha, np->dev->addr_len);
534 arp_ptr += np->dev->addr_len;
535 memcpy(arp_ptr, &sip, 4);
536
537 netpoll_send_skb(np, send_skb);
538
539 /* If there are several rx_hooks for the same address,
540 we're fine by sending a single reply */
541 break;
542 }
543 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
544}
545
546int __netpoll_rx(struct sk_buff *skb)
547{
548 int proto, len, ulen;
549 int hits = 0;
550 const struct iphdr *iph;
551 struct udphdr *uh;
552 struct netpoll_info *npinfo = skb->dev->npinfo;
553 struct netpoll *np, *tmp;
554
555 if (list_empty(&npinfo->rx_np))
556 goto out;
557
558 if (skb->dev->type != ARPHRD_ETHER)
559 goto out;
560
561 /* check if netpoll clients need ARP */
562 if (skb->protocol == htons(ETH_P_ARP) &&
563 atomic_read(&trapped)) {
564 skb_queue_tail(&npinfo->arp_tx, skb);
565 return 1;
566 }
567
568 proto = ntohs(eth_hdr(skb)->h_proto);
569 if (proto != ETH_P_IP)
570 goto out;
571 if (skb->pkt_type == PACKET_OTHERHOST)
572 goto out;
573 if (skb_shared(skb))
574 goto out;
575
576 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
577 goto out;
578 iph = (struct iphdr *)skb->data;
579 if (iph->ihl < 5 || iph->version != 4)
580 goto out;
581 if (!pskb_may_pull(skb, iph->ihl*4))
582 goto out;
583 iph = (struct iphdr *)skb->data;
584 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
585 goto out;
586
587 len = ntohs(iph->tot_len);
588 if (skb->len < len || len < iph->ihl*4)
589 goto out;
590
591 /*
592 * Our transport medium may have padded the buffer out.
593 * Now We trim to the true length of the frame.
594 */
595 if (pskb_trim_rcsum(skb, len))
596 goto out;
597
598 iph = (struct iphdr *)skb->data;
599 if (iph->protocol != IPPROTO_UDP)
600 goto out;
601
602 len -= iph->ihl*4;
603 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
604 ulen = ntohs(uh->len);
605
606 if (ulen != len)
607 goto out;
608 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
609 goto out;
610
611 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
612 if (np->local_ip && np->local_ip != iph->daddr)
613 continue;
614 if (np->remote_ip && np->remote_ip != iph->saddr)
615 continue;
616 if (np->local_port && np->local_port != ntohs(uh->dest))
617 continue;
618
619 np->rx_hook(np, ntohs(uh->source),
620 (char *)(uh+1),
621 ulen - sizeof(struct udphdr));
622 hits++;
623 }
624
625 if (!hits)
626 goto out;
627
628 kfree_skb(skb);
629 return 1;
630
631out:
632 if (atomic_read(&trapped)) {
633 kfree_skb(skb);
634 return 1;
635 }
636
637 return 0;
638}
639
640void netpoll_print_options(struct netpoll *np)
641{
642 np_info(np, "local port %d\n", np->local_port);
643 np_info(np, "local IP %pI4\n", &np->local_ip);
644 np_info(np, "interface '%s'\n", np->dev_name);
645 np_info(np, "remote port %d\n", np->remote_port);
646 np_info(np, "remote IP %pI4\n", &np->remote_ip);
647 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
648}
649EXPORT_SYMBOL(netpoll_print_options);
650
651int netpoll_parse_options(struct netpoll *np, char *opt)
652{
653 char *cur=opt, *delim;
654
655 if (*cur != '@') {
656 if ((delim = strchr(cur, '@')) == NULL)
657 goto parse_failed;
658 *delim = 0;
659 np->local_port = simple_strtol(cur, NULL, 10);
660 cur = delim;
661 }
662 cur++;
663
664 if (*cur != '/') {
665 if ((delim = strchr(cur, '/')) == NULL)
666 goto parse_failed;
667 *delim = 0;
668 np->local_ip = in_aton(cur);
669 cur = delim;
670 }
671 cur++;
672
673 if (*cur != ',') {
674 /* parse out dev name */
675 if ((delim = strchr(cur, ',')) == NULL)
676 goto parse_failed;
677 *delim = 0;
678 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
679 cur = delim;
680 }
681 cur++;
682
683 if (*cur != '@') {
684 /* dst port */
685 if ((delim = strchr(cur, '@')) == NULL)
686 goto parse_failed;
687 *delim = 0;
688 if (*cur == ' ' || *cur == '\t')
689 np_info(np, "warning: whitespace is not allowed\n");
690 np->remote_port = simple_strtol(cur, NULL, 10);
691 cur = delim;
692 }
693 cur++;
694
695 /* dst ip */
696 if ((delim = strchr(cur, '/')) == NULL)
697 goto parse_failed;
698 *delim = 0;
699 np->remote_ip = in_aton(cur);
700 cur = delim + 1;
701
702 if (*cur != 0) {
703 /* MAC address */
704 if (!mac_pton(cur, np->remote_mac))
705 goto parse_failed;
706 }
707
708 netpoll_print_options(np);
709
710 return 0;
711
712 parse_failed:
713 np_info(np, "couldn't parse config at '%s'!\n", cur);
714 return -1;
715}
716EXPORT_SYMBOL(netpoll_parse_options);
717
718int __netpoll_setup(struct netpoll *np)
719{
720 struct net_device *ndev = np->dev;
721 struct netpoll_info *npinfo;
722 const struct net_device_ops *ops;
723 unsigned long flags;
724 int err;
725
726 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
727 !ndev->netdev_ops->ndo_poll_controller) {
728 np_err(np, "%s doesn't support polling, aborting\n",
729 np->dev_name);
730 err = -ENOTSUPP;
731 goto out;
732 }
733
734 if (!ndev->npinfo) {
735 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
736 if (!npinfo) {
737 err = -ENOMEM;
738 goto out;
739 }
740
741 npinfo->rx_flags = 0;
742 INIT_LIST_HEAD(&npinfo->rx_np);
743
744 spin_lock_init(&npinfo->rx_lock);
745 skb_queue_head_init(&npinfo->arp_tx);
746 skb_queue_head_init(&npinfo->txq);
747 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
748
749 atomic_set(&npinfo->refcnt, 1);
750
751 ops = np->dev->netdev_ops;
752 if (ops->ndo_netpoll_setup) {
753 err = ops->ndo_netpoll_setup(ndev, npinfo);
754 if (err)
755 goto free_npinfo;
756 }
757 } else {
758 npinfo = ndev->npinfo;
759 atomic_inc(&npinfo->refcnt);
760 }
761
762 npinfo->netpoll = np;
763
764 if (np->rx_hook) {
765 spin_lock_irqsave(&npinfo->rx_lock, flags);
766 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
767 list_add_tail(&np->rx, &npinfo->rx_np);
768 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
769 }
770
771 /* last thing to do is link it to the net device structure */
772 rcu_assign_pointer(ndev->npinfo, npinfo);
773
774 return 0;
775
776free_npinfo:
777 kfree(npinfo);
778out:
779 return err;
780}
781EXPORT_SYMBOL_GPL(__netpoll_setup);
782
783int netpoll_setup(struct netpoll *np)
784{
785 struct net_device *ndev = NULL;
786 struct in_device *in_dev;
787 int err;
788
789 if (np->dev_name)
790 ndev = dev_get_by_name(&init_net, np->dev_name);
791 if (!ndev) {
792 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
793 return -ENODEV;
794 }
795
796 if (ndev->master) {
797 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
802 if (!netif_running(ndev)) {
803 unsigned long atmost, atleast;
804
805 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
806
807 rtnl_lock();
808 err = dev_open(ndev);
809 rtnl_unlock();
810
811 if (err) {
812 np_err(np, "failed to open %s\n", ndev->name);
813 goto put;
814 }
815
816 atleast = jiffies + HZ/10;
817 atmost = jiffies + carrier_timeout * HZ;
818 while (!netif_carrier_ok(ndev)) {
819 if (time_after(jiffies, atmost)) {
820 np_notice(np, "timeout waiting for carrier\n");
821 break;
822 }
823 msleep(1);
824 }
825
826 /* If carrier appears to come up instantly, we don't
827 * trust it and pause so that we don't pump all our
828 * queued console messages into the bitbucket.
829 */
830
831 if (time_before(jiffies, atleast)) {
832 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
833 msleep(4000);
834 }
835 }
836
837 if (!np->local_ip) {
838 rcu_read_lock();
839 in_dev = __in_dev_get_rcu(ndev);
840
841 if (!in_dev || !in_dev->ifa_list) {
842 rcu_read_unlock();
843 np_err(np, "no IP address for %s, aborting\n",
844 np->dev_name);
845 err = -EDESTADDRREQ;
846 goto put;
847 }
848
849 np->local_ip = in_dev->ifa_list->ifa_local;
850 rcu_read_unlock();
851 np_info(np, "local IP %pI4\n", &np->local_ip);
852 }
853
854 np->dev = ndev;
855
856 /* fill up the skb queue */
857 refill_skbs();
858
859 rtnl_lock();
860 err = __netpoll_setup(np);
861 rtnl_unlock();
862
863 if (err)
864 goto put;
865
866 return 0;
867
868put:
869 dev_put(ndev);
870 return err;
871}
872EXPORT_SYMBOL(netpoll_setup);
873
874static int __init netpoll_init(void)
875{
876 skb_queue_head_init(&skb_pool);
877 return 0;
878}
879core_initcall(netpoll_init);
880
881void __netpoll_cleanup(struct netpoll *np)
882{
883 struct netpoll_info *npinfo;
884 unsigned long flags;
885
886 npinfo = np->dev->npinfo;
887 if (!npinfo)
888 return;
889
890 if (!list_empty(&npinfo->rx_np)) {
891 spin_lock_irqsave(&npinfo->rx_lock, flags);
892 list_del(&np->rx);
893 if (list_empty(&npinfo->rx_np))
894 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
895 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
896 }
897
898 if (atomic_dec_and_test(&npinfo->refcnt)) {
899 const struct net_device_ops *ops;
900
901 ops = np->dev->netdev_ops;
902 if (ops->ndo_netpoll_cleanup)
903 ops->ndo_netpoll_cleanup(np->dev);
904
905 RCU_INIT_POINTER(np->dev->npinfo, NULL);
906
907 /* avoid racing with NAPI reading npinfo */
908 synchronize_rcu_bh();
909
910 skb_queue_purge(&npinfo->arp_tx);
911 skb_queue_purge(&npinfo->txq);
912 cancel_delayed_work_sync(&npinfo->tx_work);
913
914 /* clean after last, unfinished work */
915 __skb_queue_purge(&npinfo->txq);
916 kfree(npinfo);
917 }
918}
919EXPORT_SYMBOL_GPL(__netpoll_cleanup);
920
921void netpoll_cleanup(struct netpoll *np)
922{
923 if (!np->dev)
924 return;
925
926 rtnl_lock();
927 __netpoll_cleanup(np);
928 rtnl_unlock();
929
930 dev_put(np->dev);
931 np->dev = NULL;
932}
933EXPORT_SYMBOL(netpoll_cleanup);
934
935int netpoll_trap(void)
936{
937 return atomic_read(&trapped);
938}
939EXPORT_SYMBOL(netpoll_trap);
940
941void netpoll_set_trap(int trap)
942{
943 if (trap)
944 atomic_inc(&trapped);
945 else
946 atomic_dec(&trapped);
947}
948EXPORT_SYMBOL(netpoll_set_trap);
1/*
2 * Common framework for low-level network console, dump, and debugger code
3 *
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
5 *
6 * based on the netconsole code from:
7 *
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
10 */
11
12#include <linux/moduleparam.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/string.h>
16#include <linux/if_arp.h>
17#include <linux/inetdevice.h>
18#include <linux/inet.h>
19#include <linux/interrupt.h>
20#include <linux/netpoll.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/rcupdate.h>
24#include <linux/workqueue.h>
25#include <linux/slab.h>
26#include <net/tcp.h>
27#include <net/udp.h>
28#include <asm/unaligned.h>
29#include <trace/events/napi.h>
30
31/*
32 * We maintain a small pool of fully-sized skbs, to make sure the
33 * message gets out even in extreme OOM situations.
34 */
35
36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32
38
39static struct sk_buff_head skb_pool;
40
41static atomic_t trapped;
42
43#define USEC_PER_POLL 50
44#define NETPOLL_RX_ENABLED 1
45#define NETPOLL_RX_DROP 2
46
47#define MAX_SKB_SIZE \
48 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
49 sizeof(struct iphdr) + sizeof(struct ethhdr))
50
51static void zap_completion_queue(void);
52static void arp_reply(struct sk_buff *skb);
53
54static unsigned int carrier_timeout = 4;
55module_param(carrier_timeout, uint, 0644);
56
57static void queue_process(struct work_struct *work)
58{
59 struct netpoll_info *npinfo =
60 container_of(work, struct netpoll_info, tx_work.work);
61 struct sk_buff *skb;
62 unsigned long flags;
63
64 while ((skb = skb_dequeue(&npinfo->txq))) {
65 struct net_device *dev = skb->dev;
66 const struct net_device_ops *ops = dev->netdev_ops;
67 struct netdev_queue *txq;
68
69 if (!netif_device_present(dev) || !netif_running(dev)) {
70 __kfree_skb(skb);
71 continue;
72 }
73
74 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
75
76 local_irq_save(flags);
77 __netif_tx_lock(txq, smp_processor_id());
78 if (netif_tx_queue_frozen_or_stopped(txq) ||
79 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
80 skb_queue_head(&npinfo->txq, skb);
81 __netif_tx_unlock(txq);
82 local_irq_restore(flags);
83
84 schedule_delayed_work(&npinfo->tx_work, HZ/10);
85 return;
86 }
87 __netif_tx_unlock(txq);
88 local_irq_restore(flags);
89 }
90}
91
92static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
93 unsigned short ulen, __be32 saddr, __be32 daddr)
94{
95 __wsum psum;
96
97 if (uh->check == 0 || skb_csum_unnecessary(skb))
98 return 0;
99
100 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
101
102 if (skb->ip_summed == CHECKSUM_COMPLETE &&
103 !csum_fold(csum_add(psum, skb->csum)))
104 return 0;
105
106 skb->csum = psum;
107
108 return __skb_checksum_complete(skb);
109}
110
111/*
112 * Check whether delayed processing was scheduled for our NIC. If so,
113 * we attempt to grab the poll lock and use ->poll() to pump the card.
114 * If this fails, either we've recursed in ->poll() or it's already
115 * running on another CPU.
116 *
117 * Note: we don't mask interrupts with this lock because we're using
118 * trylock here and interrupts are already disabled in the softirq
119 * case. Further, we test the poll_owner to avoid recursion on UP
120 * systems where the lock doesn't exist.
121 *
122 * In cases where there is bi-directional communications, reading only
123 * one message at a time can lead to packets being dropped by the
124 * network adapter, forcing superfluous retries and possibly timeouts.
125 * Thus, we set our budget to greater than 1.
126 */
127static int poll_one_napi(struct netpoll_info *npinfo,
128 struct napi_struct *napi, int budget)
129{
130 int work;
131
132 /* net_rx_action's ->poll() invocations and our's are
133 * synchronized by this test which is only made while
134 * holding the napi->poll_lock.
135 */
136 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
137 return budget;
138
139 npinfo->rx_flags |= NETPOLL_RX_DROP;
140 atomic_inc(&trapped);
141 set_bit(NAPI_STATE_NPSVC, &napi->state);
142
143 work = napi->poll(napi, budget);
144 trace_napi_poll(napi);
145
146 clear_bit(NAPI_STATE_NPSVC, &napi->state);
147 atomic_dec(&trapped);
148 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
149
150 return budget - work;
151}
152
153static void poll_napi(struct net_device *dev)
154{
155 struct napi_struct *napi;
156 int budget = 16;
157
158 list_for_each_entry(napi, &dev->napi_list, dev_list) {
159 if (napi->poll_owner != smp_processor_id() &&
160 spin_trylock(&napi->poll_lock)) {
161 budget = poll_one_napi(dev->npinfo, napi, budget);
162 spin_unlock(&napi->poll_lock);
163
164 if (!budget)
165 break;
166 }
167 }
168}
169
170static void service_arp_queue(struct netpoll_info *npi)
171{
172 if (npi) {
173 struct sk_buff *skb;
174
175 while ((skb = skb_dequeue(&npi->arp_tx)))
176 arp_reply(skb);
177 }
178}
179
180static void netpoll_poll_dev(struct net_device *dev)
181{
182 const struct net_device_ops *ops;
183
184 if (!dev || !netif_running(dev))
185 return;
186
187 ops = dev->netdev_ops;
188 if (!ops->ndo_poll_controller)
189 return;
190
191 /* Process pending work on NIC */
192 ops->ndo_poll_controller(dev);
193
194 poll_napi(dev);
195
196 if (dev->priv_flags & IFF_SLAVE) {
197 if (dev->npinfo) {
198 struct net_device *bond_dev = dev->master;
199 struct sk_buff *skb;
200 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
201 skb->dev = bond_dev;
202 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
203 }
204 }
205 }
206
207 service_arp_queue(dev->npinfo);
208
209 zap_completion_queue();
210}
211
212static void refill_skbs(void)
213{
214 struct sk_buff *skb;
215 unsigned long flags;
216
217 spin_lock_irqsave(&skb_pool.lock, flags);
218 while (skb_pool.qlen < MAX_SKBS) {
219 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
220 if (!skb)
221 break;
222
223 __skb_queue_tail(&skb_pool, skb);
224 }
225 spin_unlock_irqrestore(&skb_pool.lock, flags);
226}
227
228static void zap_completion_queue(void)
229{
230 unsigned long flags;
231 struct softnet_data *sd = &get_cpu_var(softnet_data);
232
233 if (sd->completion_queue) {
234 struct sk_buff *clist;
235
236 local_irq_save(flags);
237 clist = sd->completion_queue;
238 sd->completion_queue = NULL;
239 local_irq_restore(flags);
240
241 while (clist != NULL) {
242 struct sk_buff *skb = clist;
243 clist = clist->next;
244 if (skb->destructor) {
245 atomic_inc(&skb->users);
246 dev_kfree_skb_any(skb); /* put this one back */
247 } else {
248 __kfree_skb(skb);
249 }
250 }
251 }
252
253 put_cpu_var(softnet_data);
254}
255
256static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
257{
258 int count = 0;
259 struct sk_buff *skb;
260
261 zap_completion_queue();
262 refill_skbs();
263repeat:
264
265 skb = alloc_skb(len, GFP_ATOMIC);
266 if (!skb)
267 skb = skb_dequeue(&skb_pool);
268
269 if (!skb) {
270 if (++count < 10) {
271 netpoll_poll_dev(np->dev);
272 goto repeat;
273 }
274 return NULL;
275 }
276
277 atomic_set(&skb->users, 1);
278 skb_reserve(skb, reserve);
279 return skb;
280}
281
282static int netpoll_owner_active(struct net_device *dev)
283{
284 struct napi_struct *napi;
285
286 list_for_each_entry(napi, &dev->napi_list, dev_list) {
287 if (napi->poll_owner == smp_processor_id())
288 return 1;
289 }
290 return 0;
291}
292
293void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
294 struct net_device *dev)
295{
296 int status = NETDEV_TX_BUSY;
297 unsigned long tries;
298 const struct net_device_ops *ops = dev->netdev_ops;
299 /* It is up to the caller to keep npinfo alive. */
300 struct netpoll_info *npinfo = np->dev->npinfo;
301
302 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
303 __kfree_skb(skb);
304 return;
305 }
306
307 /* don't get messages out of order, and no recursion */
308 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
309 struct netdev_queue *txq;
310 unsigned long flags;
311
312 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
313
314 local_irq_save(flags);
315 /* try until next clock tick */
316 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
317 tries > 0; --tries) {
318 if (__netif_tx_trylock(txq)) {
319 if (!netif_tx_queue_stopped(txq)) {
320 status = ops->ndo_start_xmit(skb, dev);
321 if (status == NETDEV_TX_OK)
322 txq_trans_update(txq);
323 }
324 __netif_tx_unlock(txq);
325
326 if (status == NETDEV_TX_OK)
327 break;
328
329 }
330
331 /* tickle device maybe there is some cleanup */
332 netpoll_poll_dev(np->dev);
333
334 udelay(USEC_PER_POLL);
335 }
336
337 WARN_ONCE(!irqs_disabled(),
338 "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
339 dev->name, ops->ndo_start_xmit);
340
341 local_irq_restore(flags);
342 }
343
344 if (status != NETDEV_TX_OK) {
345 skb_queue_tail(&npinfo->txq, skb);
346 schedule_delayed_work(&npinfo->tx_work,0);
347 }
348}
349EXPORT_SYMBOL(netpoll_send_skb_on_dev);
350
351void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
352{
353 int total_len, eth_len, ip_len, udp_len;
354 struct sk_buff *skb;
355 struct udphdr *udph;
356 struct iphdr *iph;
357 struct ethhdr *eth;
358
359 udp_len = len + sizeof(*udph);
360 ip_len = eth_len = udp_len + sizeof(*iph);
361 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
362
363 skb = find_skb(np, total_len, total_len - len);
364 if (!skb)
365 return;
366
367 skb_copy_to_linear_data(skb, msg, len);
368 skb->len += len;
369
370 skb_push(skb, sizeof(*udph));
371 skb_reset_transport_header(skb);
372 udph = udp_hdr(skb);
373 udph->source = htons(np->local_port);
374 udph->dest = htons(np->remote_port);
375 udph->len = htons(udp_len);
376 udph->check = 0;
377 udph->check = csum_tcpudp_magic(np->local_ip,
378 np->remote_ip,
379 udp_len, IPPROTO_UDP,
380 csum_partial(udph, udp_len, 0));
381 if (udph->check == 0)
382 udph->check = CSUM_MANGLED_0;
383
384 skb_push(skb, sizeof(*iph));
385 skb_reset_network_header(skb);
386 iph = ip_hdr(skb);
387
388 /* iph->version = 4; iph->ihl = 5; */
389 put_unaligned(0x45, (unsigned char *)iph);
390 iph->tos = 0;
391 put_unaligned(htons(ip_len), &(iph->tot_len));
392 iph->id = 0;
393 iph->frag_off = 0;
394 iph->ttl = 64;
395 iph->protocol = IPPROTO_UDP;
396 iph->check = 0;
397 put_unaligned(np->local_ip, &(iph->saddr));
398 put_unaligned(np->remote_ip, &(iph->daddr));
399 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
400
401 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
402 skb_reset_mac_header(skb);
403 skb->protocol = eth->h_proto = htons(ETH_P_IP);
404 memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
405 memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
406
407 skb->dev = np->dev;
408
409 netpoll_send_skb(np, skb);
410}
411EXPORT_SYMBOL(netpoll_send_udp);
412
413static void arp_reply(struct sk_buff *skb)
414{
415 struct netpoll_info *npinfo = skb->dev->npinfo;
416 struct arphdr *arp;
417 unsigned char *arp_ptr;
418 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
419 __be32 sip, tip;
420 unsigned char *sha;
421 struct sk_buff *send_skb;
422 struct netpoll *np, *tmp;
423 unsigned long flags;
424 int hits = 0;
425
426 if (list_empty(&npinfo->rx_np))
427 return;
428
429 /* Before checking the packet, we do some early
430 inspection whether this is interesting at all */
431 spin_lock_irqsave(&npinfo->rx_lock, flags);
432 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
433 if (np->dev == skb->dev)
434 hits++;
435 }
436 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
437
438 /* No netpoll struct is using this dev */
439 if (!hits)
440 return;
441
442 /* No arp on this interface */
443 if (skb->dev->flags & IFF_NOARP)
444 return;
445
446 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
447 return;
448
449 skb_reset_network_header(skb);
450 skb_reset_transport_header(skb);
451 arp = arp_hdr(skb);
452
453 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
454 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
455 arp->ar_pro != htons(ETH_P_IP) ||
456 arp->ar_op != htons(ARPOP_REQUEST))
457 return;
458
459 arp_ptr = (unsigned char *)(arp+1);
460 /* save the location of the src hw addr */
461 sha = arp_ptr;
462 arp_ptr += skb->dev->addr_len;
463 memcpy(&sip, arp_ptr, 4);
464 arp_ptr += 4;
465 /* If we actually cared about dst hw addr,
466 it would get copied here */
467 arp_ptr += skb->dev->addr_len;
468 memcpy(&tip, arp_ptr, 4);
469
470 /* Should we ignore arp? */
471 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
472 return;
473
474 size = arp_hdr_len(skb->dev);
475
476 spin_lock_irqsave(&npinfo->rx_lock, flags);
477 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
478 if (tip != np->local_ip)
479 continue;
480
481 send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
482 LL_RESERVED_SPACE(np->dev));
483 if (!send_skb)
484 continue;
485
486 skb_reset_network_header(send_skb);
487 arp = (struct arphdr *) skb_put(send_skb, size);
488 send_skb->dev = skb->dev;
489 send_skb->protocol = htons(ETH_P_ARP);
490
491 /* Fill the device header for the ARP frame */
492 if (dev_hard_header(send_skb, skb->dev, ptype,
493 sha, np->dev->dev_addr,
494 send_skb->len) < 0) {
495 kfree_skb(send_skb);
496 continue;
497 }
498
499 /*
500 * Fill out the arp protocol part.
501 *
502 * we only support ethernet device type,
503 * which (according to RFC 1390) should
504 * always equal 1 (Ethernet).
505 */
506
507 arp->ar_hrd = htons(np->dev->type);
508 arp->ar_pro = htons(ETH_P_IP);
509 arp->ar_hln = np->dev->addr_len;
510 arp->ar_pln = 4;
511 arp->ar_op = htons(type);
512
513 arp_ptr = (unsigned char *)(arp + 1);
514 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
515 arp_ptr += np->dev->addr_len;
516 memcpy(arp_ptr, &tip, 4);
517 arp_ptr += 4;
518 memcpy(arp_ptr, sha, np->dev->addr_len);
519 arp_ptr += np->dev->addr_len;
520 memcpy(arp_ptr, &sip, 4);
521
522 netpoll_send_skb(np, send_skb);
523
524 /* If there are several rx_hooks for the same address,
525 we're fine by sending a single reply */
526 break;
527 }
528 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
529}
530
531int __netpoll_rx(struct sk_buff *skb)
532{
533 int proto, len, ulen;
534 int hits = 0;
535 const struct iphdr *iph;
536 struct udphdr *uh;
537 struct netpoll_info *npinfo = skb->dev->npinfo;
538 struct netpoll *np, *tmp;
539
540 if (list_empty(&npinfo->rx_np))
541 goto out;
542
543 if (skb->dev->type != ARPHRD_ETHER)
544 goto out;
545
546 /* check if netpoll clients need ARP */
547 if (skb->protocol == htons(ETH_P_ARP) &&
548 atomic_read(&trapped)) {
549 skb_queue_tail(&npinfo->arp_tx, skb);
550 return 1;
551 }
552
553 proto = ntohs(eth_hdr(skb)->h_proto);
554 if (proto != ETH_P_IP)
555 goto out;
556 if (skb->pkt_type == PACKET_OTHERHOST)
557 goto out;
558 if (skb_shared(skb))
559 goto out;
560
561 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
562 goto out;
563 iph = (struct iphdr *)skb->data;
564 if (iph->ihl < 5 || iph->version != 4)
565 goto out;
566 if (!pskb_may_pull(skb, iph->ihl*4))
567 goto out;
568 iph = (struct iphdr *)skb->data;
569 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
570 goto out;
571
572 len = ntohs(iph->tot_len);
573 if (skb->len < len || len < iph->ihl*4)
574 goto out;
575
576 /*
577 * Our transport medium may have padded the buffer out.
578 * Now We trim to the true length of the frame.
579 */
580 if (pskb_trim_rcsum(skb, len))
581 goto out;
582
583 iph = (struct iphdr *)skb->data;
584 if (iph->protocol != IPPROTO_UDP)
585 goto out;
586
587 len -= iph->ihl*4;
588 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
589 ulen = ntohs(uh->len);
590
591 if (ulen != len)
592 goto out;
593 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
594 goto out;
595
596 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
597 if (np->local_ip && np->local_ip != iph->daddr)
598 continue;
599 if (np->remote_ip && np->remote_ip != iph->saddr)
600 continue;
601 if (np->local_port && np->local_port != ntohs(uh->dest))
602 continue;
603
604 np->rx_hook(np, ntohs(uh->source),
605 (char *)(uh+1),
606 ulen - sizeof(struct udphdr));
607 hits++;
608 }
609
610 if (!hits)
611 goto out;
612
613 kfree_skb(skb);
614 return 1;
615
616out:
617 if (atomic_read(&trapped)) {
618 kfree_skb(skb);
619 return 1;
620 }
621
622 return 0;
623}
624
625void netpoll_print_options(struct netpoll *np)
626{
627 printk(KERN_INFO "%s: local port %d\n",
628 np->name, np->local_port);
629 printk(KERN_INFO "%s: local IP %pI4\n",
630 np->name, &np->local_ip);
631 printk(KERN_INFO "%s: interface '%s'\n",
632 np->name, np->dev_name);
633 printk(KERN_INFO "%s: remote port %d\n",
634 np->name, np->remote_port);
635 printk(KERN_INFO "%s: remote IP %pI4\n",
636 np->name, &np->remote_ip);
637 printk(KERN_INFO "%s: remote ethernet address %pM\n",
638 np->name, np->remote_mac);
639}
640EXPORT_SYMBOL(netpoll_print_options);
641
642int netpoll_parse_options(struct netpoll *np, char *opt)
643{
644 char *cur=opt, *delim;
645
646 if (*cur != '@') {
647 if ((delim = strchr(cur, '@')) == NULL)
648 goto parse_failed;
649 *delim = 0;
650 np->local_port = simple_strtol(cur, NULL, 10);
651 cur = delim;
652 }
653 cur++;
654
655 if (*cur != '/') {
656 if ((delim = strchr(cur, '/')) == NULL)
657 goto parse_failed;
658 *delim = 0;
659 np->local_ip = in_aton(cur);
660 cur = delim;
661 }
662 cur++;
663
664 if (*cur != ',') {
665 /* parse out dev name */
666 if ((delim = strchr(cur, ',')) == NULL)
667 goto parse_failed;
668 *delim = 0;
669 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
670 cur = delim;
671 }
672 cur++;
673
674 if (*cur != '@') {
675 /* dst port */
676 if ((delim = strchr(cur, '@')) == NULL)
677 goto parse_failed;
678 *delim = 0;
679 if (*cur == ' ' || *cur == '\t')
680 printk(KERN_INFO "%s: warning: whitespace"
681 "is not allowed\n", np->name);
682 np->remote_port = simple_strtol(cur, NULL, 10);
683 cur = delim;
684 }
685 cur++;
686
687 /* dst ip */
688 if ((delim = strchr(cur, '/')) == NULL)
689 goto parse_failed;
690 *delim = 0;
691 np->remote_ip = in_aton(cur);
692 cur = delim + 1;
693
694 if (*cur != 0) {
695 /* MAC address */
696 if (!mac_pton(cur, np->remote_mac))
697 goto parse_failed;
698 }
699
700 netpoll_print_options(np);
701
702 return 0;
703
704 parse_failed:
705 printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
706 np->name, cur);
707 return -1;
708}
709EXPORT_SYMBOL(netpoll_parse_options);
710
711int __netpoll_setup(struct netpoll *np)
712{
713 struct net_device *ndev = np->dev;
714 struct netpoll_info *npinfo;
715 const struct net_device_ops *ops;
716 unsigned long flags;
717 int err;
718
719 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
720 !ndev->netdev_ops->ndo_poll_controller) {
721 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
722 np->name, np->dev_name);
723 err = -ENOTSUPP;
724 goto out;
725 }
726
727 if (!ndev->npinfo) {
728 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
729 if (!npinfo) {
730 err = -ENOMEM;
731 goto out;
732 }
733
734 npinfo->rx_flags = 0;
735 INIT_LIST_HEAD(&npinfo->rx_np);
736
737 spin_lock_init(&npinfo->rx_lock);
738 skb_queue_head_init(&npinfo->arp_tx);
739 skb_queue_head_init(&npinfo->txq);
740 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
741
742 atomic_set(&npinfo->refcnt, 1);
743
744 ops = np->dev->netdev_ops;
745 if (ops->ndo_netpoll_setup) {
746 err = ops->ndo_netpoll_setup(ndev, npinfo);
747 if (err)
748 goto free_npinfo;
749 }
750 } else {
751 npinfo = ndev->npinfo;
752 atomic_inc(&npinfo->refcnt);
753 }
754
755 npinfo->netpoll = np;
756
757 if (np->rx_hook) {
758 spin_lock_irqsave(&npinfo->rx_lock, flags);
759 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
760 list_add_tail(&np->rx, &npinfo->rx_np);
761 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
762 }
763
764 /* last thing to do is link it to the net device structure */
765 rcu_assign_pointer(ndev->npinfo, npinfo);
766
767 return 0;
768
769free_npinfo:
770 kfree(npinfo);
771out:
772 return err;
773}
774EXPORT_SYMBOL_GPL(__netpoll_setup);
775
776int netpoll_setup(struct netpoll *np)
777{
778 struct net_device *ndev = NULL;
779 struct in_device *in_dev;
780 int err;
781
782 if (np->dev_name)
783 ndev = dev_get_by_name(&init_net, np->dev_name);
784 if (!ndev) {
785 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
786 np->name, np->dev_name);
787 return -ENODEV;
788 }
789
790 if (ndev->master) {
791 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
792 np->name, np->dev_name);
793 err = -EBUSY;
794 goto put;
795 }
796
797 if (!netif_running(ndev)) {
798 unsigned long atmost, atleast;
799
800 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
801 np->name, np->dev_name);
802
803 rtnl_lock();
804 err = dev_open(ndev);
805 rtnl_unlock();
806
807 if (err) {
808 printk(KERN_ERR "%s: failed to open %s\n",
809 np->name, ndev->name);
810 goto put;
811 }
812
813 atleast = jiffies + HZ/10;
814 atmost = jiffies + carrier_timeout * HZ;
815 while (!netif_carrier_ok(ndev)) {
816 if (time_after(jiffies, atmost)) {
817 printk(KERN_NOTICE
818 "%s: timeout waiting for carrier\n",
819 np->name);
820 break;
821 }
822 msleep(1);
823 }
824
825 /* If carrier appears to come up instantly, we don't
826 * trust it and pause so that we don't pump all our
827 * queued console messages into the bitbucket.
828 */
829
830 if (time_before(jiffies, atleast)) {
831 printk(KERN_NOTICE "%s: carrier detect appears"
832 " untrustworthy, waiting 4 seconds\n",
833 np->name);
834 msleep(4000);
835 }
836 }
837
838 if (!np->local_ip) {
839 rcu_read_lock();
840 in_dev = __in_dev_get_rcu(ndev);
841
842 if (!in_dev || !in_dev->ifa_list) {
843 rcu_read_unlock();
844 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
845 np->name, np->dev_name);
846 err = -EDESTADDRREQ;
847 goto put;
848 }
849
850 np->local_ip = in_dev->ifa_list->ifa_local;
851 rcu_read_unlock();
852 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
853 }
854
855 np->dev = ndev;
856
857 /* fill up the skb queue */
858 refill_skbs();
859
860 rtnl_lock();
861 err = __netpoll_setup(np);
862 rtnl_unlock();
863
864 if (err)
865 goto put;
866
867 return 0;
868
869put:
870 dev_put(ndev);
871 return err;
872}
873EXPORT_SYMBOL(netpoll_setup);
874
875static int __init netpoll_init(void)
876{
877 skb_queue_head_init(&skb_pool);
878 return 0;
879}
880core_initcall(netpoll_init);
881
882void __netpoll_cleanup(struct netpoll *np)
883{
884 struct netpoll_info *npinfo;
885 unsigned long flags;
886
887 npinfo = np->dev->npinfo;
888 if (!npinfo)
889 return;
890
891 if (!list_empty(&npinfo->rx_np)) {
892 spin_lock_irqsave(&npinfo->rx_lock, flags);
893 list_del(&np->rx);
894 if (list_empty(&npinfo->rx_np))
895 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
896 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
897 }
898
899 if (atomic_dec_and_test(&npinfo->refcnt)) {
900 const struct net_device_ops *ops;
901
902 ops = np->dev->netdev_ops;
903 if (ops->ndo_netpoll_cleanup)
904 ops->ndo_netpoll_cleanup(np->dev);
905
906 rcu_assign_pointer(np->dev->npinfo, NULL);
907
908 /* avoid racing with NAPI reading npinfo */
909 synchronize_rcu_bh();
910
911 skb_queue_purge(&npinfo->arp_tx);
912 skb_queue_purge(&npinfo->txq);
913 cancel_delayed_work_sync(&npinfo->tx_work);
914
915 /* clean after last, unfinished work */
916 __skb_queue_purge(&npinfo->txq);
917 kfree(npinfo);
918 }
919}
920EXPORT_SYMBOL_GPL(__netpoll_cleanup);
921
922void netpoll_cleanup(struct netpoll *np)
923{
924 if (!np->dev)
925 return;
926
927 rtnl_lock();
928 __netpoll_cleanup(np);
929 rtnl_unlock();
930
931 dev_put(np->dev);
932 np->dev = NULL;
933}
934EXPORT_SYMBOL(netpoll_cleanup);
935
936int netpoll_trap(void)
937{
938 return atomic_read(&trapped);
939}
940EXPORT_SYMBOL(netpoll_trap);
941
942void netpoll_set_trap(int trap)
943{
944 if (trap)
945 atomic_inc(&trapped);
946 else
947 atomic_dec(&trapped);
948}
949EXPORT_SYMBOL(netpoll_set_trap);