Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Common framework for low-level network console, dump, and debugger code
  4 *
  5 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  6 *
  7 * based on the netconsole code from:
  8 *
  9 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
 10 * Copyright (C) 2002  Red Hat, Inc.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/moduleparam.h>
 16#include <linux/kernel.h>
 17#include <linux/netdevice.h>
 18#include <linux/etherdevice.h>
 19#include <linux/string.h>
 20#include <linux/if_arp.h>
 21#include <linux/inetdevice.h>
 22#include <linux/inet.h>
 23#include <linux/interrupt.h>
 24#include <linux/netpoll.h>
 25#include <linux/sched.h>
 26#include <linux/delay.h>
 27#include <linux/rcupdate.h>
 28#include <linux/workqueue.h>
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/if_vlan.h>
 32#include <net/tcp.h>
 33#include <net/udp.h>
 34#include <net/addrconf.h>
 35#include <net/ndisc.h>
 36#include <net/ip6_checksum.h>
 37#include <asm/unaligned.h>
 38#include <trace/events/napi.h>
 39#include <linux/kconfig.h>
 40
 41/*
 42 * We maintain a small pool of fully-sized skbs, to make sure the
 43 * message gets out even in extreme OOM situations.
 44 */
 45
 46#define MAX_UDP_CHUNK 1460
 47#define MAX_SKBS 32
 48
 49static struct sk_buff_head skb_pool;
 50
 51DEFINE_STATIC_SRCU(netpoll_srcu);
 52
 53#define USEC_PER_POLL	50
 
 
 54
 55#define MAX_SKB_SIZE							\
 56	(sizeof(struct ethhdr) +					\
 57	 sizeof(struct iphdr) +						\
 58	 sizeof(struct udphdr) +					\
 59	 MAX_UDP_CHUNK)
 60
 61static void zap_completion_queue(void);
 
 62
 63static unsigned int carrier_timeout = 4;
 64module_param(carrier_timeout, uint, 0644);
 65
 66#define np_info(np, fmt, ...)				\
 67	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 68#define np_err(np, fmt, ...)				\
 69	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 70#define np_notice(np, fmt, ...)				\
 71	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 72
 73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
 74				      struct net_device *dev,
 75				      struct netdev_queue *txq)
 76{
 77	netdev_tx_t status = NETDEV_TX_OK;
 78	netdev_features_t features;
 79
 80	features = netif_skb_features(skb);
 81
 82	if (skb_vlan_tag_present(skb) &&
 83	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 84		skb = __vlan_hwaccel_push_inside(skb);
 85		if (unlikely(!skb)) {
 86			/* This is actually a packet drop, but we
 87			 * don't want the code that calls this
 88			 * function to try and operate on a NULL skb.
 89			 */
 90			goto out;
 91		}
 92	}
 93
 94	status = netdev_start_xmit(skb, dev, txq, false);
 95
 96out:
 97	return status;
 98}
 99
100static void queue_process(struct work_struct *work)
101{
102	struct netpoll_info *npinfo =
103		container_of(work, struct netpoll_info, tx_work.work);
104	struct sk_buff *skb;
105	unsigned long flags;
106
107	while ((skb = skb_dequeue(&npinfo->txq))) {
108		struct net_device *dev = skb->dev;
 
109		struct netdev_queue *txq;
110		unsigned int q_index;
111
112		if (!netif_device_present(dev) || !netif_running(dev)) {
113			kfree_skb(skb);
114			continue;
115		}
116
 
 
117		local_irq_save(flags);
118		/* check if skb->queue_mapping is still valid */
119		q_index = skb_get_queue_mapping(skb);
120		if (unlikely(q_index >= dev->real_num_tx_queues)) {
121			q_index = q_index % dev->real_num_tx_queues;
122			skb_set_queue_mapping(skb, q_index);
123		}
124		txq = netdev_get_tx_queue(dev, q_index);
125		HARD_TX_LOCK(dev, txq, smp_processor_id());
126		if (netif_xmit_frozen_or_stopped(txq) ||
127		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128			skb_queue_head(&npinfo->txq, skb);
129			HARD_TX_UNLOCK(dev, txq);
130			local_irq_restore(flags);
131
132			schedule_delayed_work(&npinfo->tx_work, HZ/10);
133			return;
134		}
135		HARD_TX_UNLOCK(dev, txq);
136		local_irq_restore(flags);
137	}
138}
139
140static int netif_local_xmit_active(struct net_device *dev)
 
141{
142	int i;
143
144	for (i = 0; i < dev->num_tx_queues; i++) {
145		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146
147		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148			return 1;
149	}
150
151	return 0;
 
 
 
 
 
 
152}
153
154static void poll_one_napi(struct napi_struct *napi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155{
156	int work;
157
158	/* If we set this bit but see that it has already been set,
159	 * that indicates that napi has been disabled and we need
160	 * to abort this operation
161	 */
162	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
163		return;
164
165	/* We explicilty pass the polling call a budget of 0 to
166	 * indicate that we are clearing the Tx path only.
167	 */
168	work = napi->poll(napi, 0);
169	WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
170	trace_napi_poll(napi, work, 0);
171
172	clear_bit(NAPI_STATE_NPSVC, &napi->state);
 
 
 
 
173}
174
175static void poll_napi(struct net_device *dev)
176{
177	struct napi_struct *napi;
178	int cpu = smp_processor_id();
 
 
 
 
 
 
179
180	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
181		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182			poll_one_napi(napi);
183			smp_store_release(&napi->poll_owner, -1);
184		}
185	}
186}
187
188void netpoll_poll_dev(struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
189{
190	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
191	const struct net_device_ops *ops;
192
193	/* Don't do any rx activity if the dev_lock mutex is held
194	 * the dev_open/close paths use this to block netpoll activity
195	 * while changing device state
196	 */
197	if (!ni || down_trylock(&ni->dev_lock))
198		return;
199
200	/* Some drivers will take the same locks in poll and xmit,
201	 * we can't poll if local CPU is already in xmit.
202	 */
203	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
204		up(&ni->dev_lock);
205		return;
206	}
207
208	ops = dev->netdev_ops;
209	if (ops->ndo_poll_controller)
210		ops->ndo_poll_controller(dev);
211
212	poll_napi(dev);
213
214	up(&ni->dev_lock);
 
 
 
 
 
 
 
 
 
215
216	zap_completion_queue();
217}
218EXPORT_SYMBOL(netpoll_poll_dev);
219
220void netpoll_poll_disable(struct net_device *dev)
221{
222	struct netpoll_info *ni;
223	int idx;
224	might_sleep();
225	idx = srcu_read_lock(&netpoll_srcu);
226	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
227	if (ni)
228		down(&ni->dev_lock);
229	srcu_read_unlock(&netpoll_srcu, idx);
230}
231EXPORT_SYMBOL(netpoll_poll_disable);
232
233void netpoll_poll_enable(struct net_device *dev)
234{
235	struct netpoll_info *ni;
236	rcu_read_lock();
237	ni = rcu_dereference(dev->npinfo);
238	if (ni)
239		up(&ni->dev_lock);
240	rcu_read_unlock();
241}
242EXPORT_SYMBOL(netpoll_poll_enable);
243
244static void refill_skbs(void)
245{
246	struct sk_buff *skb;
247	unsigned long flags;
248
249	spin_lock_irqsave(&skb_pool.lock, flags);
250	while (skb_pool.qlen < MAX_SKBS) {
251		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252		if (!skb)
253			break;
254
255		__skb_queue_tail(&skb_pool, skb);
256	}
257	spin_unlock_irqrestore(&skb_pool.lock, flags);
258}
259
260static void zap_completion_queue(void)
261{
262	unsigned long flags;
263	struct softnet_data *sd = &get_cpu_var(softnet_data);
264
265	if (sd->completion_queue) {
266		struct sk_buff *clist;
267
268		local_irq_save(flags);
269		clist = sd->completion_queue;
270		sd->completion_queue = NULL;
271		local_irq_restore(flags);
272
273		while (clist != NULL) {
274			struct sk_buff *skb = clist;
275			clist = clist->next;
276			if (!skb_irq_freeable(skb)) {
277				refcount_set(&skb->users, 1);
278				dev_kfree_skb_any(skb); /* put this one back */
279			} else {
280				__kfree_skb(skb);
281			}
282		}
283	}
284
285	put_cpu_var(softnet_data);
286}
287
288static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289{
290	int count = 0;
291	struct sk_buff *skb;
292
293	zap_completion_queue();
294	refill_skbs();
295repeat:
296
297	skb = alloc_skb(len, GFP_ATOMIC);
298	if (!skb)
299		skb = skb_dequeue(&skb_pool);
300
301	if (!skb) {
302		if (++count < 10) {
303			netpoll_poll_dev(np->dev);
304			goto repeat;
305		}
306		return NULL;
307	}
308
309	refcount_set(&skb->users, 1);
310	skb_reserve(skb, reserve);
311	return skb;
312}
313
314static int netpoll_owner_active(struct net_device *dev)
315{
316	struct napi_struct *napi;
317
318	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
319		if (napi->poll_owner == smp_processor_id())
320			return 1;
321	}
322	return 0;
323}
324
325/* call with IRQ disabled */
326static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
327{
328	netdev_tx_t status = NETDEV_TX_BUSY;
329	struct net_device *dev;
330	unsigned long tries;
 
331	/* It is up to the caller to keep npinfo alive. */
332	struct netpoll_info *npinfo;
333
334	lockdep_assert_irqs_disabled();
335
336	dev = np->dev;
337	npinfo = rcu_dereference_bh(dev->npinfo);
338
339	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
340		dev_kfree_skb_irq(skb);
341		return NET_XMIT_DROP;
342	}
343
344	/* don't get messages out of order, and no recursion */
345	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
346		struct netdev_queue *txq;
 
347
348		txq = netdev_core_pick_tx(dev, skb, NULL);
349
 
350		/* try until next clock tick */
351		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
352		     tries > 0; --tries) {
353			if (HARD_TX_TRYLOCK(dev, txq)) {
354				if (!netif_xmit_stopped(txq))
355					status = netpoll_start_xmit(skb, dev, txq);
356
357				HARD_TX_UNLOCK(dev, txq);
 
 
358
359				if (dev_xmit_complete(status))
360					break;
361
362			}
363
364			/* tickle device maybe there is some cleanup */
365			netpoll_poll_dev(np->dev);
366
367			udelay(USEC_PER_POLL);
368		}
369
370		WARN_ONCE(!irqs_disabled(),
371			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372			dev->name, dev->netdev_ops->ndo_start_xmit);
373
 
374	}
375
376	if (!dev_xmit_complete(status)) {
377		skb_queue_tail(&npinfo->txq, skb);
378		schedule_delayed_work(&npinfo->tx_work,0);
379	}
380	return NETDEV_TX_OK;
381}
382
383netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384{
385	unsigned long flags;
386	netdev_tx_t ret;
387
388	if (unlikely(!np)) {
389		dev_kfree_skb_irq(skb);
390		ret = NET_XMIT_DROP;
391	} else {
392		local_irq_save(flags);
393		ret = __netpoll_send_skb(np, skb);
394		local_irq_restore(flags);
395	}
396	return ret;
397}
398EXPORT_SYMBOL(netpoll_send_skb);
399
400void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
401{
402	int total_len, ip_len, udp_len;
403	struct sk_buff *skb;
404	struct udphdr *udph;
405	struct iphdr *iph;
406	struct ethhdr *eth;
407	static atomic_t ip_ident;
408	struct ipv6hdr *ip6h;
409
410	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
411		WARN_ON_ONCE(!irqs_disabled());
412
413	udp_len = len + sizeof(*udph);
414	if (np->ipv6)
415		ip_len = udp_len + sizeof(*ip6h);
416	else
417		ip_len = udp_len + sizeof(*iph);
418
419	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
420
421	skb = find_skb(np, total_len + np->dev->needed_tailroom,
422		       total_len - len);
423	if (!skb)
424		return;
425
426	skb_copy_to_linear_data(skb, msg, len);
427	skb_put(skb, len);
428
429	skb_push(skb, sizeof(*udph));
430	skb_reset_transport_header(skb);
431	udph = udp_hdr(skb);
432	udph->source = htons(np->local_port);
433	udph->dest = htons(np->remote_port);
434	udph->len = htons(udp_len);
435
436	if (np->ipv6) {
437		udph->check = 0;
438		udph->check = csum_ipv6_magic(&np->local_ip.in6,
439					      &np->remote_ip.in6,
440					      udp_len, IPPROTO_UDP,
441					      csum_partial(udph, udp_len, 0));
442		if (udph->check == 0)
443			udph->check = CSUM_MANGLED_0;
444
445		skb_push(skb, sizeof(*ip6h));
446		skb_reset_network_header(skb);
447		ip6h = ipv6_hdr(skb);
448
449		/* ip6h->version = 6; ip6h->priority = 0; */
450		*(unsigned char *)ip6h = 0x60;
451		ip6h->flow_lbl[0] = 0;
452		ip6h->flow_lbl[1] = 0;
453		ip6h->flow_lbl[2] = 0;
454
455		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
456		ip6h->nexthdr = IPPROTO_UDP;
457		ip6h->hop_limit = 32;
458		ip6h->saddr = np->local_ip.in6;
459		ip6h->daddr = np->remote_ip.in6;
460
461		eth = skb_push(skb, ETH_HLEN);
462		skb_reset_mac_header(skb);
463		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
464	} else {
465		udph->check = 0;
466		udph->check = csum_tcpudp_magic(np->local_ip.ip,
467						np->remote_ip.ip,
468						udp_len, IPPROTO_UDP,
469						csum_partial(udph, udp_len, 0));
470		if (udph->check == 0)
471			udph->check = CSUM_MANGLED_0;
472
473		skb_push(skb, sizeof(*iph));
474		skb_reset_network_header(skb);
475		iph = ip_hdr(skb);
476
477		/* iph->version = 4; iph->ihl = 5; */
478		*(unsigned char *)iph = 0x45;
479		iph->tos      = 0;
480		put_unaligned(htons(ip_len), &(iph->tot_len));
481		iph->id       = htons(atomic_inc_return(&ip_ident));
482		iph->frag_off = 0;
483		iph->ttl      = 64;
484		iph->protocol = IPPROTO_UDP;
485		iph->check    = 0;
486		put_unaligned(np->local_ip.ip, &(iph->saddr));
487		put_unaligned(np->remote_ip.ip, &(iph->daddr));
488		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
489
490		eth = skb_push(skb, ETH_HLEN);
491		skb_reset_mac_header(skb);
492		skb->protocol = eth->h_proto = htons(ETH_P_IP);
493	}
494
495	ether_addr_copy(eth->h_source, np->dev->dev_addr);
496	ether_addr_copy(eth->h_dest, np->remote_mac);
497
498	skb->dev = np->dev;
499
500	netpoll_send_skb(np, skb);
501}
502EXPORT_SYMBOL(netpoll_send_udp);
503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504void netpoll_print_options(struct netpoll *np)
505{
506	np_info(np, "local port %d\n", np->local_port);
507	if (np->ipv6)
508		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
509	else
510		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
511	np_info(np, "interface '%s'\n", np->dev_name);
512	np_info(np, "remote port %d\n", np->remote_port);
513	if (np->ipv6)
514		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
515	else
516		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
517	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
518}
519EXPORT_SYMBOL(netpoll_print_options);
520
521static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
522{
523	const char *end;
524
525	if (!strchr(str, ':') &&
526	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
527		if (!*end)
528			return 0;
529	}
530	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
531#if IS_ENABLED(CONFIG_IPV6)
532		if (!*end)
533			return 1;
534#else
535		return -1;
536#endif
537	}
538	return -1;
539}
540
541int netpoll_parse_options(struct netpoll *np, char *opt)
542{
543	char *cur=opt, *delim;
544	int ipv6;
545	bool ipversion_set = false;
546
547	if (*cur != '@') {
548		if ((delim = strchr(cur, '@')) == NULL)
549			goto parse_failed;
550		*delim = 0;
551		if (kstrtou16(cur, 10, &np->local_port))
552			goto parse_failed;
553		cur = delim;
554	}
555	cur++;
556
557	if (*cur != '/') {
558		ipversion_set = true;
559		if ((delim = strchr(cur, '/')) == NULL)
560			goto parse_failed;
561		*delim = 0;
562		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
563		if (ipv6 < 0)
564			goto parse_failed;
565		else
566			np->ipv6 = (bool)ipv6;
567		cur = delim;
568	}
569	cur++;
570
571	if (*cur != ',') {
572		/* parse out dev name */
573		if ((delim = strchr(cur, ',')) == NULL)
574			goto parse_failed;
575		*delim = 0;
576		strscpy(np->dev_name, cur, sizeof(np->dev_name));
577		cur = delim;
578	}
579	cur++;
580
581	if (*cur != '@') {
582		/* dst port */
583		if ((delim = strchr(cur, '@')) == NULL)
584			goto parse_failed;
585		*delim = 0;
586		if (*cur == ' ' || *cur == '\t')
587			np_info(np, "warning: whitespace is not allowed\n");
588		if (kstrtou16(cur, 10, &np->remote_port))
589			goto parse_failed;
590		cur = delim;
591	}
592	cur++;
593
594	/* dst ip */
595	if ((delim = strchr(cur, '/')) == NULL)
596		goto parse_failed;
597	*delim = 0;
598	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
599	if (ipv6 < 0)
600		goto parse_failed;
601	else if (ipversion_set && np->ipv6 != (bool)ipv6)
602		goto parse_failed;
603	else
604		np->ipv6 = (bool)ipv6;
605	cur = delim + 1;
606
607	if (*cur != 0) {
608		/* MAC address */
609		if (!mac_pton(cur, np->remote_mac))
610			goto parse_failed;
611	}
612
613	netpoll_print_options(np);
614
615	return 0;
616
617 parse_failed:
618	np_info(np, "couldn't parse config at '%s'!\n", cur);
619	return -1;
620}
621EXPORT_SYMBOL(netpoll_parse_options);
622
623int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
624{
 
625	struct netpoll_info *npinfo;
626	const struct net_device_ops *ops;
 
627	int err;
628
629	np->dev = ndev;
630	strscpy(np->dev_name, ndev->name, IFNAMSIZ);
631
632	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
633		np_err(np, "%s doesn't support polling, aborting\n",
634		       np->dev_name);
635		err = -ENOTSUPP;
636		goto out;
637	}
638
639	if (!ndev->npinfo) {
640		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
641		if (!npinfo) {
642			err = -ENOMEM;
643			goto out;
644		}
645
646		sema_init(&npinfo->dev_lock, 1);
 
 
 
 
647		skb_queue_head_init(&npinfo->txq);
648		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
649
650		refcount_set(&npinfo->refcnt, 1);
651
652		ops = np->dev->netdev_ops;
653		if (ops->ndo_netpoll_setup) {
654			err = ops->ndo_netpoll_setup(ndev, npinfo);
655			if (err)
656				goto free_npinfo;
657		}
658	} else {
659		npinfo = rtnl_dereference(ndev->npinfo);
660		refcount_inc(&npinfo->refcnt);
661	}
662
663	npinfo->netpoll = np;
664
 
 
 
 
 
 
 
665	/* last thing to do is link it to the net device structure */
666	rcu_assign_pointer(ndev->npinfo, npinfo);
667
668	return 0;
669
670free_npinfo:
671	kfree(npinfo);
672out:
673	return err;
674}
675EXPORT_SYMBOL_GPL(__netpoll_setup);
676
677int netpoll_setup(struct netpoll *np)
678{
679	struct net_device *ndev = NULL;
680	struct in_device *in_dev;
681	int err;
682
683	rtnl_lock();
684	if (np->dev_name[0]) {
685		struct net *net = current->nsproxy->net_ns;
686		ndev = __dev_get_by_name(net, np->dev_name);
687	}
688	if (!ndev) {
689		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
690		err = -ENODEV;
691		goto unlock;
692	}
693	netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL);
694
695	if (netdev_master_upper_dev_get(ndev)) {
696		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
697		err = -EBUSY;
698		goto put;
699	}
700
701	if (!netif_running(ndev)) {
702		unsigned long atmost;
703
704		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
705
706		err = dev_open(ndev, NULL);
 
 
707
708		if (err) {
709			np_err(np, "failed to open %s\n", ndev->name);
710			goto put;
711		}
712
713		rtnl_unlock();
714		atmost = jiffies + carrier_timeout * HZ;
715		while (!netif_carrier_ok(ndev)) {
716			if (time_after(jiffies, atmost)) {
717				np_notice(np, "timeout waiting for carrier\n");
718				break;
719			}
720			msleep(1);
721		}
722
723		rtnl_lock();
724	}
 
 
725
726	if (!np->local_ip.ip) {
727		if (!np->ipv6) {
728			const struct in_ifaddr *ifa;
729
730			in_dev = __in_dev_get_rtnl(ndev);
731			if (!in_dev)
732				goto put_noaddr;
733
734			ifa = rtnl_dereference(in_dev->ifa_list);
735			if (!ifa) {
736put_noaddr:
737				np_err(np, "no IP address for %s, aborting\n",
738				       np->dev_name);
739				err = -EDESTADDRREQ;
740				goto put;
741			}
742
743			np->local_ip.ip = ifa->ifa_local;
744			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
745		} else {
746#if IS_ENABLED(CONFIG_IPV6)
747			struct inet6_dev *idev;
748
749			err = -EDESTADDRREQ;
750			idev = __in6_dev_get(ndev);
751			if (idev) {
752				struct inet6_ifaddr *ifp;
753
754				read_lock_bh(&idev->lock);
755				list_for_each_entry(ifp, &idev->addr_list, if_list) {
756					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
757					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
758						continue;
759					np->local_ip.in6 = ifp->addr;
760					err = 0;
761					break;
762				}
763				read_unlock_bh(&idev->lock);
764			}
765			if (err) {
766				np_err(np, "no IPv6 address for %s, aborting\n",
767				       np->dev_name);
768				goto put;
769			} else
770				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
771#else
772			np_err(np, "IPv6 is not supported %s, aborting\n",
773			       np->dev_name);
774			err = -EINVAL;
775			goto put;
776#endif
777		}
 
 
 
 
778	}
779
 
 
780	/* fill up the skb queue */
781	refill_skbs();
782
783	err = __netpoll_setup(np, ndev);
 
 
 
784	if (err)
785		goto put;
786	rtnl_unlock();
787	return 0;
788
789put:
790	netdev_put(ndev, &np->dev_tracker);
791unlock:
792	rtnl_unlock();
793	return err;
794}
795EXPORT_SYMBOL(netpoll_setup);
796
797static int __init netpoll_init(void)
798{
799	skb_queue_head_init(&skb_pool);
800	return 0;
801}
802core_initcall(netpoll_init);
803
804static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
805{
806	struct netpoll_info *npinfo =
807			container_of(rcu_head, struct netpoll_info, rcu);
808
809	skb_queue_purge(&npinfo->txq);
810
811	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
812	cancel_delayed_work(&npinfo->tx_work);
813
814	/* clean after last, unfinished work */
815	__skb_queue_purge(&npinfo->txq);
816	/* now cancel it again */
817	cancel_delayed_work(&npinfo->tx_work);
818	kfree(npinfo);
819}
820
821void __netpoll_cleanup(struct netpoll *np)
822{
823	struct netpoll_info *npinfo;
 
824
825	npinfo = rtnl_dereference(np->dev->npinfo);
826	if (!npinfo)
827		return;
828
829	synchronize_srcu(&netpoll_srcu);
 
 
 
 
 
 
830
831	if (refcount_dec_and_test(&npinfo->refcnt)) {
832		const struct net_device_ops *ops;
833
834		ops = np->dev->netdev_ops;
835		if (ops->ndo_netpoll_cleanup)
836			ops->ndo_netpoll_cleanup(np->dev);
837
838		RCU_INIT_POINTER(np->dev->npinfo, NULL);
839		call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
840	} else
841		RCU_INIT_POINTER(np->dev->npinfo, NULL);
842}
843EXPORT_SYMBOL_GPL(__netpoll_cleanup);
844
845void __netpoll_free(struct netpoll *np)
846{
847	ASSERT_RTNL();
848
849	/* Wait for transmitting packets to finish before freeing. */
850	synchronize_rcu();
851	__netpoll_cleanup(np);
852	kfree(np);
 
 
 
 
853}
854EXPORT_SYMBOL_GPL(__netpoll_free);
855
856void netpoll_cleanup(struct netpoll *np)
857{
858	rtnl_lock();
859	if (!np->dev)
860		goto out;
 
 
861	__netpoll_cleanup(np);
862	netdev_put(np->dev, &np->dev_tracker);
863	np->dev = NULL;
864out:
865	rtnl_unlock();
 
 
 
866}
867EXPORT_SYMBOL(netpoll_cleanup);
v3.5.6
 
  1/*
  2 * Common framework for low-level network console, dump, and debugger code
  3 *
  4 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  5 *
  6 * based on the netconsole code from:
  7 *
  8 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
  9 * Copyright (C) 2002  Red Hat, Inc.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/moduleparam.h>
 
 15#include <linux/netdevice.h>
 16#include <linux/etherdevice.h>
 17#include <linux/string.h>
 18#include <linux/if_arp.h>
 19#include <linux/inetdevice.h>
 20#include <linux/inet.h>
 21#include <linux/interrupt.h>
 22#include <linux/netpoll.h>
 23#include <linux/sched.h>
 24#include <linux/delay.h>
 25#include <linux/rcupdate.h>
 26#include <linux/workqueue.h>
 27#include <linux/slab.h>
 28#include <linux/export.h>
 
 29#include <net/tcp.h>
 30#include <net/udp.h>
 
 
 
 31#include <asm/unaligned.h>
 32#include <trace/events/napi.h>
 
 33
 34/*
 35 * We maintain a small pool of fully-sized skbs, to make sure the
 36 * message gets out even in extreme OOM situations.
 37 */
 38
 39#define MAX_UDP_CHUNK 1460
 40#define MAX_SKBS 32
 41
 42static struct sk_buff_head skb_pool;
 43
 44static atomic_t trapped;
 45
 46#define USEC_PER_POLL	50
 47#define NETPOLL_RX_ENABLED  1
 48#define NETPOLL_RX_DROP     2
 49
 50#define MAX_SKB_SIZE							\
 51	(sizeof(struct ethhdr) +					\
 52	 sizeof(struct iphdr) +						\
 53	 sizeof(struct udphdr) +					\
 54	 MAX_UDP_CHUNK)
 55
 56static void zap_completion_queue(void);
 57static void arp_reply(struct sk_buff *skb);
 58
 59static unsigned int carrier_timeout = 4;
 60module_param(carrier_timeout, uint, 0644);
 61
 62#define np_info(np, fmt, ...)				\
 63	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 64#define np_err(np, fmt, ...)				\
 65	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 66#define np_notice(np, fmt, ...)				\
 67	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69static void queue_process(struct work_struct *work)
 70{
 71	struct netpoll_info *npinfo =
 72		container_of(work, struct netpoll_info, tx_work.work);
 73	struct sk_buff *skb;
 74	unsigned long flags;
 75
 76	while ((skb = skb_dequeue(&npinfo->txq))) {
 77		struct net_device *dev = skb->dev;
 78		const struct net_device_ops *ops = dev->netdev_ops;
 79		struct netdev_queue *txq;
 
 80
 81		if (!netif_device_present(dev) || !netif_running(dev)) {
 82			__kfree_skb(skb);
 83			continue;
 84		}
 85
 86		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 87
 88		local_irq_save(flags);
 89		__netif_tx_lock(txq, smp_processor_id());
 
 
 
 
 
 
 
 90		if (netif_xmit_frozen_or_stopped(txq) ||
 91		    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
 92			skb_queue_head(&npinfo->txq, skb);
 93			__netif_tx_unlock(txq);
 94			local_irq_restore(flags);
 95
 96			schedule_delayed_work(&npinfo->tx_work, HZ/10);
 97			return;
 98		}
 99		__netif_tx_unlock(txq);
100		local_irq_restore(flags);
101	}
102}
103
104static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
105			    unsigned short ulen, __be32 saddr, __be32 daddr)
106{
107	__wsum psum;
108
109	if (uh->check == 0 || skb_csum_unnecessary(skb))
110		return 0;
111
112	psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
 
 
113
114	if (skb->ip_summed == CHECKSUM_COMPLETE &&
115	    !csum_fold(csum_add(psum, skb->csum)))
116		return 0;
117
118	skb->csum = psum;
119
120	return __skb_checksum_complete(skb);
121}
122
123/*
124 * Check whether delayed processing was scheduled for our NIC. If so,
125 * we attempt to grab the poll lock and use ->poll() to pump the card.
126 * If this fails, either we've recursed in ->poll() or it's already
127 * running on another CPU.
128 *
129 * Note: we don't mask interrupts with this lock because we're using
130 * trylock here and interrupts are already disabled in the softirq
131 * case. Further, we test the poll_owner to avoid recursion on UP
132 * systems where the lock doesn't exist.
133 *
134 * In cases where there is bi-directional communications, reading only
135 * one message at a time can lead to packets being dropped by the
136 * network adapter, forcing superfluous retries and possibly timeouts.
137 * Thus, we set our budget to greater than 1.
138 */
139static int poll_one_napi(struct netpoll_info *npinfo,
140			 struct napi_struct *napi, int budget)
141{
142	int work;
143
144	/* net_rx_action's ->poll() invocations and our's are
145	 * synchronized by this test which is only made while
146	 * holding the napi->poll_lock.
147	 */
148	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
149		return budget;
150
151	npinfo->rx_flags |= NETPOLL_RX_DROP;
152	atomic_inc(&trapped);
153	set_bit(NAPI_STATE_NPSVC, &napi->state);
154
155	work = napi->poll(napi, budget);
156	trace_napi_poll(napi);
157
158	clear_bit(NAPI_STATE_NPSVC, &napi->state);
159	atomic_dec(&trapped);
160	npinfo->rx_flags &= ~NETPOLL_RX_DROP;
161
162	return budget - work;
163}
164
165static void poll_napi(struct net_device *dev)
166{
167	struct napi_struct *napi;
168	int budget = 16;
169
170	list_for_each_entry(napi, &dev->napi_list, dev_list) {
171		if (napi->poll_owner != smp_processor_id() &&
172		    spin_trylock(&napi->poll_lock)) {
173			budget = poll_one_napi(dev->npinfo, napi, budget);
174			spin_unlock(&napi->poll_lock);
175
176			if (!budget)
177				break;
 
 
178		}
179	}
180}
181
182static void service_arp_queue(struct netpoll_info *npi)
183{
184	if (npi) {
185		struct sk_buff *skb;
186
187		while ((skb = skb_dequeue(&npi->arp_tx)))
188			arp_reply(skb);
189	}
190}
191
192static void netpoll_poll_dev(struct net_device *dev)
193{
 
194	const struct net_device_ops *ops;
195
196	if (!dev || !netif_running(dev))
 
 
 
 
197		return;
198
199	ops = dev->netdev_ops;
200	if (!ops->ndo_poll_controller)
 
 
 
201		return;
 
202
203	/* Process pending work on NIC */
204	ops->ndo_poll_controller(dev);
 
205
206	poll_napi(dev);
207
208	if (dev->flags & IFF_SLAVE) {
209		if (dev->npinfo) {
210			struct net_device *bond_dev = dev->master;
211			struct sk_buff *skb;
212			while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
213				skb->dev = bond_dev;
214				skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
215			}
216		}
217	}
218
219	service_arp_queue(dev->npinfo);
 
 
220
221	zap_completion_queue();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222}
 
223
224static void refill_skbs(void)
225{
226	struct sk_buff *skb;
227	unsigned long flags;
228
229	spin_lock_irqsave(&skb_pool.lock, flags);
230	while (skb_pool.qlen < MAX_SKBS) {
231		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
232		if (!skb)
233			break;
234
235		__skb_queue_tail(&skb_pool, skb);
236	}
237	spin_unlock_irqrestore(&skb_pool.lock, flags);
238}
239
240static void zap_completion_queue(void)
241{
242	unsigned long flags;
243	struct softnet_data *sd = &get_cpu_var(softnet_data);
244
245	if (sd->completion_queue) {
246		struct sk_buff *clist;
247
248		local_irq_save(flags);
249		clist = sd->completion_queue;
250		sd->completion_queue = NULL;
251		local_irq_restore(flags);
252
253		while (clist != NULL) {
254			struct sk_buff *skb = clist;
255			clist = clist->next;
256			if (skb->destructor) {
257				atomic_inc(&skb->users);
258				dev_kfree_skb_any(skb); /* put this one back */
259			} else {
260				__kfree_skb(skb);
261			}
262		}
263	}
264
265	put_cpu_var(softnet_data);
266}
267
268static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
269{
270	int count = 0;
271	struct sk_buff *skb;
272
273	zap_completion_queue();
274	refill_skbs();
275repeat:
276
277	skb = alloc_skb(len, GFP_ATOMIC);
278	if (!skb)
279		skb = skb_dequeue(&skb_pool);
280
281	if (!skb) {
282		if (++count < 10) {
283			netpoll_poll_dev(np->dev);
284			goto repeat;
285		}
286		return NULL;
287	}
288
289	atomic_set(&skb->users, 1);
290	skb_reserve(skb, reserve);
291	return skb;
292}
293
294static int netpoll_owner_active(struct net_device *dev)
295{
296	struct napi_struct *napi;
297
298	list_for_each_entry(napi, &dev->napi_list, dev_list) {
299		if (napi->poll_owner == smp_processor_id())
300			return 1;
301	}
302	return 0;
303}
304
305void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306			     struct net_device *dev)
307{
308	int status = NETDEV_TX_BUSY;
 
309	unsigned long tries;
310	const struct net_device_ops *ops = dev->netdev_ops;
311	/* It is up to the caller to keep npinfo alive. */
312	struct netpoll_info *npinfo = np->dev->npinfo;
 
 
 
 
 
313
314	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315		__kfree_skb(skb);
316		return;
317	}
318
319	/* don't get messages out of order, and no recursion */
320	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321		struct netdev_queue *txq;
322		unsigned long flags;
323
324		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325
326		local_irq_save(flags);
327		/* try until next clock tick */
328		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329		     tries > 0; --tries) {
330			if (__netif_tx_trylock(txq)) {
331				if (!netif_xmit_stopped(txq)) {
332					status = ops->ndo_start_xmit(skb, dev);
333					if (status == NETDEV_TX_OK)
334						txq_trans_update(txq);
335				}
336				__netif_tx_unlock(txq);
337
338				if (status == NETDEV_TX_OK)
339					break;
340
341			}
342
343			/* tickle device maybe there is some cleanup */
344			netpoll_poll_dev(np->dev);
345
346			udelay(USEC_PER_POLL);
347		}
348
349		WARN_ONCE(!irqs_disabled(),
350			"netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
351			dev->name, ops->ndo_start_xmit);
352
353		local_irq_restore(flags);
354	}
355
356	if (status != NETDEV_TX_OK) {
357		skb_queue_tail(&npinfo->txq, skb);
358		schedule_delayed_work(&npinfo->tx_work,0);
359	}
 
360}
361EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
363void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364{
365	int total_len, ip_len, udp_len;
366	struct sk_buff *skb;
367	struct udphdr *udph;
368	struct iphdr *iph;
369	struct ethhdr *eth;
 
 
 
 
 
370
371	udp_len = len + sizeof(*udph);
372	ip_len = udp_len + sizeof(*iph);
 
 
 
 
373	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
374
375	skb = find_skb(np, total_len + np->dev->needed_tailroom,
376		       total_len - len);
377	if (!skb)
378		return;
379
380	skb_copy_to_linear_data(skb, msg, len);
381	skb_put(skb, len);
382
383	skb_push(skb, sizeof(*udph));
384	skb_reset_transport_header(skb);
385	udph = udp_hdr(skb);
386	udph->source = htons(np->local_port);
387	udph->dest = htons(np->remote_port);
388	udph->len = htons(udp_len);
389	udph->check = 0;
390	udph->check = csum_tcpudp_magic(np->local_ip,
391					np->remote_ip,
392					udp_len, IPPROTO_UDP,
393					csum_partial(udph, udp_len, 0));
394	if (udph->check == 0)
395		udph->check = CSUM_MANGLED_0;
396
397	skb_push(skb, sizeof(*iph));
398	skb_reset_network_header(skb);
399	iph = ip_hdr(skb);
400
401	/* iph->version = 4; iph->ihl = 5; */
402	put_unaligned(0x45, (unsigned char *)iph);
403	iph->tos      = 0;
404	put_unaligned(htons(ip_len), &(iph->tot_len));
405	iph->id       = 0;
406	iph->frag_off = 0;
407	iph->ttl      = 64;
408	iph->protocol = IPPROTO_UDP;
409	iph->check    = 0;
410	put_unaligned(np->local_ip, &(iph->saddr));
411	put_unaligned(np->remote_ip, &(iph->daddr));
412	iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
413
414	eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
415	skb_reset_mac_header(skb);
416	skb->protocol = eth->h_proto = htons(ETH_P_IP);
417	memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
418	memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
420	skb->dev = np->dev;
421
422	netpoll_send_skb(np, skb);
423}
424EXPORT_SYMBOL(netpoll_send_udp);
425
426static void arp_reply(struct sk_buff *skb)
427{
428	struct netpoll_info *npinfo = skb->dev->npinfo;
429	struct arphdr *arp;
430	unsigned char *arp_ptr;
431	int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
432	__be32 sip, tip;
433	unsigned char *sha;
434	struct sk_buff *send_skb;
435	struct netpoll *np, *tmp;
436	unsigned long flags;
437	int hlen, tlen;
438	int hits = 0;
439
440	if (list_empty(&npinfo->rx_np))
441		return;
442
443	/* Before checking the packet, we do some early
444	   inspection whether this is interesting at all */
445	spin_lock_irqsave(&npinfo->rx_lock, flags);
446	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
447		if (np->dev == skb->dev)
448			hits++;
449	}
450	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
451
452	/* No netpoll struct is using this dev */
453	if (!hits)
454		return;
455
456	/* No arp on this interface */
457	if (skb->dev->flags & IFF_NOARP)
458		return;
459
460	if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
461		return;
462
463	skb_reset_network_header(skb);
464	skb_reset_transport_header(skb);
465	arp = arp_hdr(skb);
466
467	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
468	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
469	    arp->ar_pro != htons(ETH_P_IP) ||
470	    arp->ar_op != htons(ARPOP_REQUEST))
471		return;
472
473	arp_ptr = (unsigned char *)(arp+1);
474	/* save the location of the src hw addr */
475	sha = arp_ptr;
476	arp_ptr += skb->dev->addr_len;
477	memcpy(&sip, arp_ptr, 4);
478	arp_ptr += 4;
479	/* If we actually cared about dst hw addr,
480	   it would get copied here */
481	arp_ptr += skb->dev->addr_len;
482	memcpy(&tip, arp_ptr, 4);
483
484	/* Should we ignore arp? */
485	if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
486		return;
487
488	size = arp_hdr_len(skb->dev);
489
490	spin_lock_irqsave(&npinfo->rx_lock, flags);
491	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
492		if (tip != np->local_ip)
493			continue;
494
495		hlen = LL_RESERVED_SPACE(np->dev);
496		tlen = np->dev->needed_tailroom;
497		send_skb = find_skb(np, size + hlen + tlen, hlen);
498		if (!send_skb)
499			continue;
500
501		skb_reset_network_header(send_skb);
502		arp = (struct arphdr *) skb_put(send_skb, size);
503		send_skb->dev = skb->dev;
504		send_skb->protocol = htons(ETH_P_ARP);
505
506		/* Fill the device header for the ARP frame */
507		if (dev_hard_header(send_skb, skb->dev, ptype,
508				    sha, np->dev->dev_addr,
509				    send_skb->len) < 0) {
510			kfree_skb(send_skb);
511			continue;
512		}
513
514		/*
515		 * Fill out the arp protocol part.
516		 *
517		 * we only support ethernet device type,
518		 * which (according to RFC 1390) should
519		 * always equal 1 (Ethernet).
520		 */
521
522		arp->ar_hrd = htons(np->dev->type);
523		arp->ar_pro = htons(ETH_P_IP);
524		arp->ar_hln = np->dev->addr_len;
525		arp->ar_pln = 4;
526		arp->ar_op = htons(type);
527
528		arp_ptr = (unsigned char *)(arp + 1);
529		memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
530		arp_ptr += np->dev->addr_len;
531		memcpy(arp_ptr, &tip, 4);
532		arp_ptr += 4;
533		memcpy(arp_ptr, sha, np->dev->addr_len);
534		arp_ptr += np->dev->addr_len;
535		memcpy(arp_ptr, &sip, 4);
536
537		netpoll_send_skb(np, send_skb);
538
539		/* If there are several rx_hooks for the same address,
540		   we're fine by sending a single reply */
541		break;
542	}
543	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
544}
545
546int __netpoll_rx(struct sk_buff *skb)
547{
548	int proto, len, ulen;
549	int hits = 0;
550	const struct iphdr *iph;
551	struct udphdr *uh;
552	struct netpoll_info *npinfo = skb->dev->npinfo;
553	struct netpoll *np, *tmp;
554
555	if (list_empty(&npinfo->rx_np))
556		goto out;
557
558	if (skb->dev->type != ARPHRD_ETHER)
559		goto out;
560
561	/* check if netpoll clients need ARP */
562	if (skb->protocol == htons(ETH_P_ARP) &&
563	    atomic_read(&trapped)) {
564		skb_queue_tail(&npinfo->arp_tx, skb);
565		return 1;
566	}
567
568	proto = ntohs(eth_hdr(skb)->h_proto);
569	if (proto != ETH_P_IP)
570		goto out;
571	if (skb->pkt_type == PACKET_OTHERHOST)
572		goto out;
573	if (skb_shared(skb))
574		goto out;
575
576	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
577		goto out;
578	iph = (struct iphdr *)skb->data;
579	if (iph->ihl < 5 || iph->version != 4)
580		goto out;
581	if (!pskb_may_pull(skb, iph->ihl*4))
582		goto out;
583	iph = (struct iphdr *)skb->data;
584	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
585		goto out;
586
587	len = ntohs(iph->tot_len);
588	if (skb->len < len || len < iph->ihl*4)
589		goto out;
590
591	/*
592	 * Our transport medium may have padded the buffer out.
593	 * Now We trim to the true length of the frame.
594	 */
595	if (pskb_trim_rcsum(skb, len))
596		goto out;
597
598	iph = (struct iphdr *)skb->data;
599	if (iph->protocol != IPPROTO_UDP)
600		goto out;
601
602	len -= iph->ihl*4;
603	uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
604	ulen = ntohs(uh->len);
605
606	if (ulen != len)
607		goto out;
608	if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
609		goto out;
610
611	list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
612		if (np->local_ip && np->local_ip != iph->daddr)
613			continue;
614		if (np->remote_ip && np->remote_ip != iph->saddr)
615			continue;
616		if (np->local_port && np->local_port != ntohs(uh->dest))
617			continue;
618
619		np->rx_hook(np, ntohs(uh->source),
620			       (char *)(uh+1),
621			       ulen - sizeof(struct udphdr));
622		hits++;
623	}
624
625	if (!hits)
626		goto out;
627
628	kfree_skb(skb);
629	return 1;
630
631out:
632	if (atomic_read(&trapped)) {
633		kfree_skb(skb);
634		return 1;
635	}
636
637	return 0;
638}
639
640void netpoll_print_options(struct netpoll *np)
641{
642	np_info(np, "local port %d\n", np->local_port);
643	np_info(np, "local IP %pI4\n", &np->local_ip);
 
 
 
644	np_info(np, "interface '%s'\n", np->dev_name);
645	np_info(np, "remote port %d\n", np->remote_port);
646	np_info(np, "remote IP %pI4\n", &np->remote_ip);
 
 
 
647	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
648}
649EXPORT_SYMBOL(netpoll_print_options);
650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651int netpoll_parse_options(struct netpoll *np, char *opt)
652{
653	char *cur=opt, *delim;
 
 
654
655	if (*cur != '@') {
656		if ((delim = strchr(cur, '@')) == NULL)
657			goto parse_failed;
658		*delim = 0;
659		np->local_port = simple_strtol(cur, NULL, 10);
 
660		cur = delim;
661	}
662	cur++;
663
664	if (*cur != '/') {
 
665		if ((delim = strchr(cur, '/')) == NULL)
666			goto parse_failed;
667		*delim = 0;
668		np->local_ip = in_aton(cur);
 
 
 
 
669		cur = delim;
670	}
671	cur++;
672
673	if (*cur != ',') {
674		/* parse out dev name */
675		if ((delim = strchr(cur, ',')) == NULL)
676			goto parse_failed;
677		*delim = 0;
678		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
679		cur = delim;
680	}
681	cur++;
682
683	if (*cur != '@') {
684		/* dst port */
685		if ((delim = strchr(cur, '@')) == NULL)
686			goto parse_failed;
687		*delim = 0;
688		if (*cur == ' ' || *cur == '\t')
689			np_info(np, "warning: whitespace is not allowed\n");
690		np->remote_port = simple_strtol(cur, NULL, 10);
 
691		cur = delim;
692	}
693	cur++;
694
695	/* dst ip */
696	if ((delim = strchr(cur, '/')) == NULL)
697		goto parse_failed;
698	*delim = 0;
699	np->remote_ip = in_aton(cur);
 
 
 
 
 
 
700	cur = delim + 1;
701
702	if (*cur != 0) {
703		/* MAC address */
704		if (!mac_pton(cur, np->remote_mac))
705			goto parse_failed;
706	}
707
708	netpoll_print_options(np);
709
710	return 0;
711
712 parse_failed:
713	np_info(np, "couldn't parse config at '%s'!\n", cur);
714	return -1;
715}
716EXPORT_SYMBOL(netpoll_parse_options);
717
718int __netpoll_setup(struct netpoll *np)
719{
720	struct net_device *ndev = np->dev;
721	struct netpoll_info *npinfo;
722	const struct net_device_ops *ops;
723	unsigned long flags;
724	int err;
725
726	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
727	    !ndev->netdev_ops->ndo_poll_controller) {
 
 
728		np_err(np, "%s doesn't support polling, aborting\n",
729		       np->dev_name);
730		err = -ENOTSUPP;
731		goto out;
732	}
733
734	if (!ndev->npinfo) {
735		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
736		if (!npinfo) {
737			err = -ENOMEM;
738			goto out;
739		}
740
741		npinfo->rx_flags = 0;
742		INIT_LIST_HEAD(&npinfo->rx_np);
743
744		spin_lock_init(&npinfo->rx_lock);
745		skb_queue_head_init(&npinfo->arp_tx);
746		skb_queue_head_init(&npinfo->txq);
747		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
748
749		atomic_set(&npinfo->refcnt, 1);
750
751		ops = np->dev->netdev_ops;
752		if (ops->ndo_netpoll_setup) {
753			err = ops->ndo_netpoll_setup(ndev, npinfo);
754			if (err)
755				goto free_npinfo;
756		}
757	} else {
758		npinfo = ndev->npinfo;
759		atomic_inc(&npinfo->refcnt);
760	}
761
762	npinfo->netpoll = np;
763
764	if (np->rx_hook) {
765		spin_lock_irqsave(&npinfo->rx_lock, flags);
766		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
767		list_add_tail(&np->rx, &npinfo->rx_np);
768		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
769	}
770
771	/* last thing to do is link it to the net device structure */
772	rcu_assign_pointer(ndev->npinfo, npinfo);
773
774	return 0;
775
776free_npinfo:
777	kfree(npinfo);
778out:
779	return err;
780}
781EXPORT_SYMBOL_GPL(__netpoll_setup);
782
783int netpoll_setup(struct netpoll *np)
784{
785	struct net_device *ndev = NULL;
786	struct in_device *in_dev;
787	int err;
788
789	if (np->dev_name)
790		ndev = dev_get_by_name(&init_net, np->dev_name);
 
 
 
791	if (!ndev) {
792		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
793		return -ENODEV;
 
794	}
 
795
796	if (ndev->master) {
797		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
798		err = -EBUSY;
799		goto put;
800	}
801
802	if (!netif_running(ndev)) {
803		unsigned long atmost, atleast;
804
805		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
806
807		rtnl_lock();
808		err = dev_open(ndev);
809		rtnl_unlock();
810
811		if (err) {
812			np_err(np, "failed to open %s\n", ndev->name);
813			goto put;
814		}
815
816		atleast = jiffies + HZ/10;
817		atmost = jiffies + carrier_timeout * HZ;
818		while (!netif_carrier_ok(ndev)) {
819			if (time_after(jiffies, atmost)) {
820				np_notice(np, "timeout waiting for carrier\n");
821				break;
822			}
823			msleep(1);
824		}
825
826		/* If carrier appears to come up instantly, we don't
827		 * trust it and pause so that we don't pump all our
828		 * queued console messages into the bitbucket.
829		 */
830
831		if (time_before(jiffies, atleast)) {
832			np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
833			msleep(4000);
834		}
835	}
 
 
 
 
 
 
 
 
 
 
 
836
837	if (!np->local_ip) {
838		rcu_read_lock();
839		in_dev = __in_dev_get_rcu(ndev);
 
 
840
841		if (!in_dev || !in_dev->ifa_list) {
842			rcu_read_unlock();
843			np_err(np, "no IP address for %s, aborting\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
844			       np->dev_name);
845			err = -EDESTADDRREQ;
846			goto put;
 
847		}
848
849		np->local_ip = in_dev->ifa_list->ifa_local;
850		rcu_read_unlock();
851		np_info(np, "local IP %pI4\n", &np->local_ip);
852	}
853
854	np->dev = ndev;
855
856	/* fill up the skb queue */
857	refill_skbs();
858
859	rtnl_lock();
860	err = __netpoll_setup(np);
861	rtnl_unlock();
862
863	if (err)
864		goto put;
865
866	return 0;
867
868put:
869	dev_put(ndev);
 
 
870	return err;
871}
872EXPORT_SYMBOL(netpoll_setup);
873
874static int __init netpoll_init(void)
875{
876	skb_queue_head_init(&skb_pool);
877	return 0;
878}
879core_initcall(netpoll_init);
880
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
881void __netpoll_cleanup(struct netpoll *np)
882{
883	struct netpoll_info *npinfo;
884	unsigned long flags;
885
886	npinfo = np->dev->npinfo;
887	if (!npinfo)
888		return;
889
890	if (!list_empty(&npinfo->rx_np)) {
891		spin_lock_irqsave(&npinfo->rx_lock, flags);
892		list_del(&np->rx);
893		if (list_empty(&npinfo->rx_np))
894			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
895		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
896	}
897
898	if (atomic_dec_and_test(&npinfo->refcnt)) {
899		const struct net_device_ops *ops;
900
901		ops = np->dev->netdev_ops;
902		if (ops->ndo_netpoll_cleanup)
903			ops->ndo_netpoll_cleanup(np->dev);
904
905		RCU_INIT_POINTER(np->dev->npinfo, NULL);
 
 
 
 
 
906
907		/* avoid racing with NAPI reading npinfo */
908		synchronize_rcu_bh();
 
909
910		skb_queue_purge(&npinfo->arp_tx);
911		skb_queue_purge(&npinfo->txq);
912		cancel_delayed_work_sync(&npinfo->tx_work);
913
914		/* clean after last, unfinished work */
915		__skb_queue_purge(&npinfo->txq);
916		kfree(npinfo);
917	}
918}
919EXPORT_SYMBOL_GPL(__netpoll_cleanup);
920
921void netpoll_cleanup(struct netpoll *np)
922{
 
923	if (!np->dev)
924		return;
925
926	rtnl_lock();
927	__netpoll_cleanup(np);
 
 
 
928	rtnl_unlock();
929
930	dev_put(np->dev);
931	np->dev = NULL;
932}
933EXPORT_SYMBOL(netpoll_cleanup);
934
935int netpoll_trap(void)
936{
937	return atomic_read(&trapped);
938}
939EXPORT_SYMBOL(netpoll_trap);
940
941void netpoll_set_trap(int trap)
942{
943	if (trap)
944		atomic_inc(&trapped);
945	else
946		atomic_dec(&trapped);
947}
948EXPORT_SYMBOL(netpoll_set_trap);