Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Common framework for low-level network console, dump, and debugger code
  4 *
  5 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  6 *
  7 * based on the netconsole code from:
  8 *
  9 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
 10 * Copyright (C) 2002  Red Hat, Inc.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/moduleparam.h>
 16#include <linux/kernel.h>
 17#include <linux/netdevice.h>
 18#include <linux/etherdevice.h>
 19#include <linux/string.h>
 20#include <linux/if_arp.h>
 21#include <linux/inetdevice.h>
 22#include <linux/inet.h>
 23#include <linux/interrupt.h>
 24#include <linux/netpoll.h>
 25#include <linux/sched.h>
 26#include <linux/delay.h>
 27#include <linux/rcupdate.h>
 28#include <linux/workqueue.h>
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/if_vlan.h>
 32#include <net/tcp.h>
 33#include <net/udp.h>
 34#include <net/addrconf.h>
 35#include <net/ndisc.h>
 36#include <net/ip6_checksum.h>
 37#include <asm/unaligned.h>
 38#include <trace/events/napi.h>
 39#include <linux/kconfig.h>
 40
 41/*
 42 * We maintain a small pool of fully-sized skbs, to make sure the
 43 * message gets out even in extreme OOM situations.
 44 */
 45
 46#define MAX_UDP_CHUNK 1460
 47#define MAX_SKBS 32
 48
 49static struct sk_buff_head skb_pool;
 50
 51DEFINE_STATIC_SRCU(netpoll_srcu);
 52
 53#define USEC_PER_POLL	50
 54
 55#define MAX_SKB_SIZE							\
 56	(sizeof(struct ethhdr) +					\
 57	 sizeof(struct iphdr) +						\
 58	 sizeof(struct udphdr) +					\
 59	 MAX_UDP_CHUNK)
 60
 61static void zap_completion_queue(void);
 
 62
 63static unsigned int carrier_timeout = 4;
 64module_param(carrier_timeout, uint, 0644);
 65
 66#define np_info(np, fmt, ...)				\
 67	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 68#define np_err(np, fmt, ...)				\
 69	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 70#define np_notice(np, fmt, ...)				\
 71	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 72
 73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
 74				      struct net_device *dev,
 75				      struct netdev_queue *txq)
 76{
 77	netdev_tx_t status = NETDEV_TX_OK;
 78	netdev_features_t features;
 79
 80	features = netif_skb_features(skb);
 81
 82	if (skb_vlan_tag_present(skb) &&
 83	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 84		skb = __vlan_hwaccel_push_inside(skb);
 85		if (unlikely(!skb)) {
 86			/* This is actually a packet drop, but we
 87			 * don't want the code that calls this
 88			 * function to try and operate on a NULL skb.
 89			 */
 90			goto out;
 91		}
 92	}
 93
 94	status = netdev_start_xmit(skb, dev, txq, false);
 95
 96out:
 97	return status;
 98}
 99
100static void queue_process(struct work_struct *work)
101{
102	struct netpoll_info *npinfo =
103		container_of(work, struct netpoll_info, tx_work.work);
104	struct sk_buff *skb;
105	unsigned long flags;
106
107	while ((skb = skb_dequeue(&npinfo->txq))) {
108		struct net_device *dev = skb->dev;
109		struct netdev_queue *txq;
110		unsigned int q_index;
111
112		if (!netif_device_present(dev) || !netif_running(dev)) {
113			kfree_skb(skb);
114			continue;
115		}
116
117		local_irq_save(flags);
118		/* check if skb->queue_mapping is still valid */
119		q_index = skb_get_queue_mapping(skb);
120		if (unlikely(q_index >= dev->real_num_tx_queues)) {
121			q_index = q_index % dev->real_num_tx_queues;
122			skb_set_queue_mapping(skb, q_index);
123		}
124		txq = netdev_get_tx_queue(dev, q_index);
125		HARD_TX_LOCK(dev, txq, smp_processor_id());
126		if (netif_xmit_frozen_or_stopped(txq) ||
127		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128			skb_queue_head(&npinfo->txq, skb);
129			HARD_TX_UNLOCK(dev, txq);
130			local_irq_restore(flags);
131
132			schedule_delayed_work(&npinfo->tx_work, HZ/10);
133			return;
134		}
135		HARD_TX_UNLOCK(dev, txq);
136		local_irq_restore(flags);
137	}
138}
139
140static int netif_local_xmit_active(struct net_device *dev)
141{
142	int i;
143
144	for (i = 0; i < dev->num_tx_queues; i++) {
145		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146
147		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148			return 1;
149	}
150
151	return 0;
152}
153
154static void poll_one_napi(struct napi_struct *napi)
155{
156	int work;
 
 
 
 
 
 
 
157
158	/* If we set this bit but see that it has already been set,
159	 * that indicates that napi has been disabled and we need
160	 * to abort this operation
161	 */
162	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
163		return;
164
165	/* We explicilty pass the polling call a budget of 0 to
166	 * indicate that we are clearing the Tx path only.
167	 */
168	work = napi->poll(napi, 0);
169	WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
170	trace_napi_poll(napi, work, 0);
171
172	clear_bit(NAPI_STATE_NPSVC, &napi->state);
173}
174
175static void poll_napi(struct net_device *dev)
176{
177	struct napi_struct *napi;
178	int cpu = smp_processor_id();
179
180	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
181		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182			poll_one_napi(napi);
183			smp_store_release(&napi->poll_owner, -1);
184		}
185	}
186}
187
188void netpoll_poll_dev(struct net_device *dev)
189{
190	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
191	const struct net_device_ops *ops;
 
192
193	/* Don't do any rx activity if the dev_lock mutex is held
194	 * the dev_open/close paths use this to block netpoll activity
195	 * while changing device state
196	 */
197	if (!ni || down_trylock(&ni->dev_lock))
198		return;
199
200	/* Some drivers will take the same locks in poll and xmit,
201	 * we can't poll if local CPU is already in xmit.
202	 */
203	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
204		up(&ni->dev_lock);
205		return;
206	}
207
208	ops = dev->netdev_ops;
209	if (ops->ndo_poll_controller)
210		ops->ndo_poll_controller(dev);
 
 
 
 
 
211
212	poll_napi(dev);
213
214	up(&ni->dev_lock);
215
216	zap_completion_queue();
217}
218EXPORT_SYMBOL(netpoll_poll_dev);
219
220void netpoll_poll_disable(struct net_device *dev)
221{
222	struct netpoll_info *ni;
223	int idx;
224	might_sleep();
225	idx = srcu_read_lock(&netpoll_srcu);
226	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
227	if (ni)
228		down(&ni->dev_lock);
229	srcu_read_unlock(&netpoll_srcu, idx);
230}
231EXPORT_SYMBOL(netpoll_poll_disable);
232
233void netpoll_poll_enable(struct net_device *dev)
234{
235	struct netpoll_info *ni;
236	rcu_read_lock();
237	ni = rcu_dereference(dev->npinfo);
238	if (ni)
239		up(&ni->dev_lock);
240	rcu_read_unlock();
241}
242EXPORT_SYMBOL(netpoll_poll_enable);
243
244static void refill_skbs(void)
245{
246	struct sk_buff *skb;
247	unsigned long flags;
248
249	spin_lock_irqsave(&skb_pool.lock, flags);
250	while (skb_pool.qlen < MAX_SKBS) {
251		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252		if (!skb)
253			break;
254
255		__skb_queue_tail(&skb_pool, skb);
256	}
257	spin_unlock_irqrestore(&skb_pool.lock, flags);
258}
259
260static void zap_completion_queue(void)
261{
262	unsigned long flags;
263	struct softnet_data *sd = &get_cpu_var(softnet_data);
264
265	if (sd->completion_queue) {
266		struct sk_buff *clist;
267
268		local_irq_save(flags);
269		clist = sd->completion_queue;
270		sd->completion_queue = NULL;
271		local_irq_restore(flags);
272
273		while (clist != NULL) {
274			struct sk_buff *skb = clist;
275			clist = clist->next;
276			if (!skb_irq_freeable(skb)) {
277				refcount_set(&skb->users, 1);
278				dev_kfree_skb_any(skb); /* put this one back */
279			} else {
280				__kfree_skb(skb);
281			}
282		}
283	}
284
285	put_cpu_var(softnet_data);
286}
287
288static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289{
290	int count = 0;
291	struct sk_buff *skb;
292
293	zap_completion_queue();
294	refill_skbs();
295repeat:
296
297	skb = alloc_skb(len, GFP_ATOMIC);
298	if (!skb)
299		skb = skb_dequeue(&skb_pool);
300
301	if (!skb) {
302		if (++count < 10) {
303			netpoll_poll_dev(np->dev);
304			goto repeat;
305		}
306		return NULL;
307	}
308
309	refcount_set(&skb->users, 1);
310	skb_reserve(skb, reserve);
311	return skb;
312}
313
314static int netpoll_owner_active(struct net_device *dev)
315{
316	struct napi_struct *napi;
317
318	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
319		if (napi->poll_owner == smp_processor_id())
320			return 1;
321	}
322	return 0;
323}
324
325/* call with IRQ disabled */
326static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
327{
328	netdev_tx_t status = NETDEV_TX_BUSY;
329	struct net_device *dev;
330	unsigned long tries;
331	/* It is up to the caller to keep npinfo alive. */
332	struct netpoll_info *npinfo;
333
334	lockdep_assert_irqs_disabled();
335
336	dev = np->dev;
337	npinfo = rcu_dereference_bh(dev->npinfo);
338
339	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
340		dev_kfree_skb_irq(skb);
341		return NET_XMIT_DROP;
342	}
343
344	/* don't get messages out of order, and no recursion */
345	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
346		struct netdev_queue *txq;
347
348		txq = netdev_core_pick_tx(dev, skb, NULL);
349
350		/* try until next clock tick */
351		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
352		     tries > 0; --tries) {
353			if (HARD_TX_TRYLOCK(dev, txq)) {
354				if (!netif_xmit_stopped(txq))
355					status = netpoll_start_xmit(skb, dev, txq);
356
357				HARD_TX_UNLOCK(dev, txq);
358
359				if (dev_xmit_complete(status))
360					break;
361
362			}
363
364			/* tickle device maybe there is some cleanup */
365			netpoll_poll_dev(np->dev);
366
367			udelay(USEC_PER_POLL);
368		}
369
370		WARN_ONCE(!irqs_disabled(),
371			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372			dev->name, dev->netdev_ops->ndo_start_xmit);
373
374	}
375
376	if (!dev_xmit_complete(status)) {
377		skb_queue_tail(&npinfo->txq, skb);
378		schedule_delayed_work(&npinfo->tx_work,0);
379	}
380	return NETDEV_TX_OK;
381}
382
383netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384{
385	unsigned long flags;
386	netdev_tx_t ret;
387
388	if (unlikely(!np)) {
389		dev_kfree_skb_irq(skb);
390		ret = NET_XMIT_DROP;
391	} else {
392		local_irq_save(flags);
393		ret = __netpoll_send_skb(np, skb);
394		local_irq_restore(flags);
395	}
396	return ret;
397}
398EXPORT_SYMBOL(netpoll_send_skb);
399
400void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
401{
402	int total_len, ip_len, udp_len;
403	struct sk_buff *skb;
404	struct udphdr *udph;
405	struct iphdr *iph;
406	struct ethhdr *eth;
407	static atomic_t ip_ident;
408	struct ipv6hdr *ip6h;
409
410	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
411		WARN_ON_ONCE(!irqs_disabled());
412
413	udp_len = len + sizeof(*udph);
414	if (np->ipv6)
415		ip_len = udp_len + sizeof(*ip6h);
416	else
417		ip_len = udp_len + sizeof(*iph);
418
419	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
420
421	skb = find_skb(np, total_len + np->dev->needed_tailroom,
422		       total_len - len);
423	if (!skb)
424		return;
425
426	skb_copy_to_linear_data(skb, msg, len);
427	skb_put(skb, len);
428
429	skb_push(skb, sizeof(*udph));
430	skb_reset_transport_header(skb);
431	udph = udp_hdr(skb);
432	udph->source = htons(np->local_port);
433	udph->dest = htons(np->remote_port);
434	udph->len = htons(udp_len);
435
436	if (np->ipv6) {
437		udph->check = 0;
438		udph->check = csum_ipv6_magic(&np->local_ip.in6,
439					      &np->remote_ip.in6,
440					      udp_len, IPPROTO_UDP,
441					      csum_partial(udph, udp_len, 0));
442		if (udph->check == 0)
443			udph->check = CSUM_MANGLED_0;
444
445		skb_push(skb, sizeof(*ip6h));
446		skb_reset_network_header(skb);
447		ip6h = ipv6_hdr(skb);
448
449		/* ip6h->version = 6; ip6h->priority = 0; */
450		*(unsigned char *)ip6h = 0x60;
451		ip6h->flow_lbl[0] = 0;
452		ip6h->flow_lbl[1] = 0;
453		ip6h->flow_lbl[2] = 0;
454
455		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
456		ip6h->nexthdr = IPPROTO_UDP;
457		ip6h->hop_limit = 32;
458		ip6h->saddr = np->local_ip.in6;
459		ip6h->daddr = np->remote_ip.in6;
460
461		eth = skb_push(skb, ETH_HLEN);
462		skb_reset_mac_header(skb);
463		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
464	} else {
465		udph->check = 0;
466		udph->check = csum_tcpudp_magic(np->local_ip.ip,
467						np->remote_ip.ip,
468						udp_len, IPPROTO_UDP,
469						csum_partial(udph, udp_len, 0));
470		if (udph->check == 0)
471			udph->check = CSUM_MANGLED_0;
472
473		skb_push(skb, sizeof(*iph));
474		skb_reset_network_header(skb);
475		iph = ip_hdr(skb);
476
477		/* iph->version = 4; iph->ihl = 5; */
478		*(unsigned char *)iph = 0x45;
479		iph->tos      = 0;
480		put_unaligned(htons(ip_len), &(iph->tot_len));
481		iph->id       = htons(atomic_inc_return(&ip_ident));
482		iph->frag_off = 0;
483		iph->ttl      = 64;
484		iph->protocol = IPPROTO_UDP;
485		iph->check    = 0;
486		put_unaligned(np->local_ip.ip, &(iph->saddr));
487		put_unaligned(np->remote_ip.ip, &(iph->daddr));
488		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
489
490		eth = skb_push(skb, ETH_HLEN);
491		skb_reset_mac_header(skb);
492		skb->protocol = eth->h_proto = htons(ETH_P_IP);
493	}
494
495	ether_addr_copy(eth->h_source, np->dev->dev_addr);
496	ether_addr_copy(eth->h_dest, np->remote_mac);
497
498	skb->dev = np->dev;
499
500	netpoll_send_skb(np, skb);
501}
502EXPORT_SYMBOL(netpoll_send_udp);
503
504void netpoll_print_options(struct netpoll *np)
505{
506	np_info(np, "local port %d\n", np->local_port);
507	if (np->ipv6)
508		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
509	else
510		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
511	np_info(np, "interface '%s'\n", np->dev_name);
512	np_info(np, "remote port %d\n", np->remote_port);
513	if (np->ipv6)
514		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
515	else
516		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
517	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
518}
519EXPORT_SYMBOL(netpoll_print_options);
520
521static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
522{
523	const char *end;
524
525	if (!strchr(str, ':') &&
526	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
527		if (!*end)
528			return 0;
529	}
530	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
531#if IS_ENABLED(CONFIG_IPV6)
532		if (!*end)
533			return 1;
534#else
535		return -1;
536#endif
537	}
538	return -1;
539}
540
541int netpoll_parse_options(struct netpoll *np, char *opt)
542{
543	char *cur=opt, *delim;
544	int ipv6;
545	bool ipversion_set = false;
546
547	if (*cur != '@') {
548		if ((delim = strchr(cur, '@')) == NULL)
549			goto parse_failed;
550		*delim = 0;
551		if (kstrtou16(cur, 10, &np->local_port))
552			goto parse_failed;
553		cur = delim;
554	}
555	cur++;
556
557	if (*cur != '/') {
558		ipversion_set = true;
559		if ((delim = strchr(cur, '/')) == NULL)
560			goto parse_failed;
561		*delim = 0;
562		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
563		if (ipv6 < 0)
564			goto parse_failed;
565		else
566			np->ipv6 = (bool)ipv6;
567		cur = delim;
568	}
569	cur++;
570
571	if (*cur != ',') {
572		/* parse out dev name */
573		if ((delim = strchr(cur, ',')) == NULL)
574			goto parse_failed;
575		*delim = 0;
576		strscpy(np->dev_name, cur, sizeof(np->dev_name));
577		cur = delim;
578	}
579	cur++;
580
581	if (*cur != '@') {
582		/* dst port */
583		if ((delim = strchr(cur, '@')) == NULL)
584			goto parse_failed;
585		*delim = 0;
586		if (*cur == ' ' || *cur == '\t')
587			np_info(np, "warning: whitespace is not allowed\n");
588		if (kstrtou16(cur, 10, &np->remote_port))
589			goto parse_failed;
590		cur = delim;
591	}
592	cur++;
593
594	/* dst ip */
595	if ((delim = strchr(cur, '/')) == NULL)
596		goto parse_failed;
597	*delim = 0;
598	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
599	if (ipv6 < 0)
600		goto parse_failed;
601	else if (ipversion_set && np->ipv6 != (bool)ipv6)
602		goto parse_failed;
603	else
604		np->ipv6 = (bool)ipv6;
605	cur = delim + 1;
606
607	if (*cur != 0) {
608		/* MAC address */
609		if (!mac_pton(cur, np->remote_mac))
610			goto parse_failed;
611	}
612
613	netpoll_print_options(np);
614
615	return 0;
616
617 parse_failed:
618	np_info(np, "couldn't parse config at '%s'!\n", cur);
619	return -1;
620}
621EXPORT_SYMBOL(netpoll_parse_options);
622
623int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
624{
625	struct netpoll_info *npinfo;
626	const struct net_device_ops *ops;
627	int err;
628
629	np->dev = ndev;
630	strscpy(np->dev_name, ndev->name, IFNAMSIZ);
 
631
632	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
 
633		np_err(np, "%s doesn't support polling, aborting\n",
634		       np->dev_name);
635		err = -ENOTSUPP;
636		goto out;
637	}
638
639	if (!ndev->npinfo) {
640		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
641		if (!npinfo) {
642			err = -ENOMEM;
643			goto out;
644		}
645
646		sema_init(&npinfo->dev_lock, 1);
647		skb_queue_head_init(&npinfo->txq);
648		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
649
650		refcount_set(&npinfo->refcnt, 1);
651
652		ops = np->dev->netdev_ops;
653		if (ops->ndo_netpoll_setup) {
654			err = ops->ndo_netpoll_setup(ndev, npinfo);
655			if (err)
656				goto free_npinfo;
657		}
658	} else {
659		npinfo = rtnl_dereference(ndev->npinfo);
660		refcount_inc(&npinfo->refcnt);
661	}
662
663	npinfo->netpoll = np;
664
665	/* last thing to do is link it to the net device structure */
666	rcu_assign_pointer(ndev->npinfo, npinfo);
667
668	return 0;
669
670free_npinfo:
671	kfree(npinfo);
672out:
673	return err;
674}
675EXPORT_SYMBOL_GPL(__netpoll_setup);
676
677int netpoll_setup(struct netpoll *np)
678{
679	struct net_device *ndev = NULL;
680	struct in_device *in_dev;
681	int err;
682
683	rtnl_lock();
684	if (np->dev_name[0]) {
685		struct net *net = current->nsproxy->net_ns;
686		ndev = __dev_get_by_name(net, np->dev_name);
687	}
688	if (!ndev) {
689		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
690		err = -ENODEV;
691		goto unlock;
692	}
693	netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL);
694
695	if (netdev_master_upper_dev_get(ndev)) {
696		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
697		err = -EBUSY;
698		goto put;
699	}
700
701	if (!netif_running(ndev)) {
702		unsigned long atmost;
703
704		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
705
706		err = dev_open(ndev, NULL);
707
708		if (err) {
709			np_err(np, "failed to open %s\n", ndev->name);
710			goto put;
711		}
712
713		rtnl_unlock();
 
714		atmost = jiffies + carrier_timeout * HZ;
715		while (!netif_carrier_ok(ndev)) {
716			if (time_after(jiffies, atmost)) {
717				np_notice(np, "timeout waiting for carrier\n");
718				break;
719			}
720			msleep(1);
721		}
722
 
 
 
 
 
 
 
 
 
723		rtnl_lock();
724	}
725
726	if (!np->local_ip.ip) {
727		if (!np->ipv6) {
728			const struct in_ifaddr *ifa;
729
730			in_dev = __in_dev_get_rtnl(ndev);
731			if (!in_dev)
732				goto put_noaddr;
733
734			ifa = rtnl_dereference(in_dev->ifa_list);
735			if (!ifa) {
736put_noaddr:
737				np_err(np, "no IP address for %s, aborting\n",
738				       np->dev_name);
739				err = -EDESTADDRREQ;
740				goto put;
741			}
742
743			np->local_ip.ip = ifa->ifa_local;
744			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
745		} else {
746#if IS_ENABLED(CONFIG_IPV6)
747			struct inet6_dev *idev;
748
749			err = -EDESTADDRREQ;
750			idev = __in6_dev_get(ndev);
751			if (idev) {
752				struct inet6_ifaddr *ifp;
753
754				read_lock_bh(&idev->lock);
755				list_for_each_entry(ifp, &idev->addr_list, if_list) {
756					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
757					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
758						continue;
759					np->local_ip.in6 = ifp->addr;
760					err = 0;
761					break;
762				}
763				read_unlock_bh(&idev->lock);
764			}
765			if (err) {
766				np_err(np, "no IPv6 address for %s, aborting\n",
767				       np->dev_name);
768				goto put;
769			} else
770				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
771#else
772			np_err(np, "IPv6 is not supported %s, aborting\n",
773			       np->dev_name);
774			err = -EINVAL;
775			goto put;
776#endif
777		}
778	}
779
780	/* fill up the skb queue */
781	refill_skbs();
782
783	err = __netpoll_setup(np, ndev);
784	if (err)
785		goto put;
 
786	rtnl_unlock();
787	return 0;
788
789put:
790	netdev_put(ndev, &np->dev_tracker);
791unlock:
792	rtnl_unlock();
793	return err;
794}
795EXPORT_SYMBOL(netpoll_setup);
796
797static int __init netpoll_init(void)
798{
799	skb_queue_head_init(&skb_pool);
800	return 0;
801}
802core_initcall(netpoll_init);
803
804static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
805{
806	struct netpoll_info *npinfo =
807			container_of(rcu_head, struct netpoll_info, rcu);
808
809	skb_queue_purge(&npinfo->txq);
810
811	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
812	cancel_delayed_work(&npinfo->tx_work);
813
814	/* clean after last, unfinished work */
815	__skb_queue_purge(&npinfo->txq);
816	/* now cancel it again */
817	cancel_delayed_work(&npinfo->tx_work);
818	kfree(npinfo);
819}
820
821void __netpoll_cleanup(struct netpoll *np)
822{
823	struct netpoll_info *npinfo;
824
 
 
 
 
825	npinfo = rtnl_dereference(np->dev->npinfo);
826	if (!npinfo)
827		return;
828
829	synchronize_srcu(&netpoll_srcu);
830
831	if (refcount_dec_and_test(&npinfo->refcnt)) {
832		const struct net_device_ops *ops;
833
834		ops = np->dev->netdev_ops;
835		if (ops->ndo_netpoll_cleanup)
836			ops->ndo_netpoll_cleanup(np->dev);
837
838		RCU_INIT_POINTER(np->dev->npinfo, NULL);
839		call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
840	} else
841		RCU_INIT_POINTER(np->dev->npinfo, NULL);
842}
843EXPORT_SYMBOL_GPL(__netpoll_cleanup);
844
845void __netpoll_free(struct netpoll *np)
846{
847	ASSERT_RTNL();
848
849	/* Wait for transmitting packets to finish before freeing. */
850	synchronize_rcu();
851	__netpoll_cleanup(np);
 
852	kfree(np);
853}
854EXPORT_SYMBOL_GPL(__netpoll_free);
 
 
 
 
 
855
856void netpoll_cleanup(struct netpoll *np)
857{
858	rtnl_lock();
859	if (!np->dev)
860		goto out;
861	__netpoll_cleanup(np);
862	netdev_put(np->dev, &np->dev_tracker);
863	np->dev = NULL;
864out:
865	rtnl_unlock();
866}
867EXPORT_SYMBOL(netpoll_cleanup);
v4.17
 
  1/*
  2 * Common framework for low-level network console, dump, and debugger code
  3 *
  4 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  5 *
  6 * based on the netconsole code from:
  7 *
  8 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
  9 * Copyright (C) 2002  Red Hat, Inc.
 10 */
 11
 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13
 14#include <linux/moduleparam.h>
 15#include <linux/kernel.h>
 16#include <linux/netdevice.h>
 17#include <linux/etherdevice.h>
 18#include <linux/string.h>
 19#include <linux/if_arp.h>
 20#include <linux/inetdevice.h>
 21#include <linux/inet.h>
 22#include <linux/interrupt.h>
 23#include <linux/netpoll.h>
 24#include <linux/sched.h>
 25#include <linux/delay.h>
 26#include <linux/rcupdate.h>
 27#include <linux/workqueue.h>
 28#include <linux/slab.h>
 29#include <linux/export.h>
 30#include <linux/if_vlan.h>
 31#include <net/tcp.h>
 32#include <net/udp.h>
 33#include <net/addrconf.h>
 34#include <net/ndisc.h>
 35#include <net/ip6_checksum.h>
 36#include <asm/unaligned.h>
 37#include <trace/events/napi.h>
 
 38
 39/*
 40 * We maintain a small pool of fully-sized skbs, to make sure the
 41 * message gets out even in extreme OOM situations.
 42 */
 43
 44#define MAX_UDP_CHUNK 1460
 45#define MAX_SKBS 32
 46
 47static struct sk_buff_head skb_pool;
 48
 49DEFINE_STATIC_SRCU(netpoll_srcu);
 50
 51#define USEC_PER_POLL	50
 52
 53#define MAX_SKB_SIZE							\
 54	(sizeof(struct ethhdr) +					\
 55	 sizeof(struct iphdr) +						\
 56	 sizeof(struct udphdr) +					\
 57	 MAX_UDP_CHUNK)
 58
 59static void zap_completion_queue(void);
 60static void netpoll_async_cleanup(struct work_struct *work);
 61
 62static unsigned int carrier_timeout = 4;
 63module_param(carrier_timeout, uint, 0644);
 64
 65#define np_info(np, fmt, ...)				\
 66	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 67#define np_err(np, fmt, ...)				\
 68	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 69#define np_notice(np, fmt, ...)				\
 70	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 71
 72static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
 73			      struct netdev_queue *txq)
 
 74{
 75	int status = NETDEV_TX_OK;
 76	netdev_features_t features;
 77
 78	features = netif_skb_features(skb);
 79
 80	if (skb_vlan_tag_present(skb) &&
 81	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 82		skb = __vlan_hwaccel_push_inside(skb);
 83		if (unlikely(!skb)) {
 84			/* This is actually a packet drop, but we
 85			 * don't want the code that calls this
 86			 * function to try and operate on a NULL skb.
 87			 */
 88			goto out;
 89		}
 90	}
 91
 92	status = netdev_start_xmit(skb, dev, txq, false);
 93
 94out:
 95	return status;
 96}
 97
 98static void queue_process(struct work_struct *work)
 99{
100	struct netpoll_info *npinfo =
101		container_of(work, struct netpoll_info, tx_work.work);
102	struct sk_buff *skb;
103	unsigned long flags;
104
105	while ((skb = skb_dequeue(&npinfo->txq))) {
106		struct net_device *dev = skb->dev;
107		struct netdev_queue *txq;
108		unsigned int q_index;
109
110		if (!netif_device_present(dev) || !netif_running(dev)) {
111			kfree_skb(skb);
112			continue;
113		}
114
115		local_irq_save(flags);
116		/* check if skb->queue_mapping is still valid */
117		q_index = skb_get_queue_mapping(skb);
118		if (unlikely(q_index >= dev->real_num_tx_queues)) {
119			q_index = q_index % dev->real_num_tx_queues;
120			skb_set_queue_mapping(skb, q_index);
121		}
122		txq = netdev_get_tx_queue(dev, q_index);
123		HARD_TX_LOCK(dev, txq, smp_processor_id());
124		if (netif_xmit_frozen_or_stopped(txq) ||
125		    netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
126			skb_queue_head(&npinfo->txq, skb);
127			HARD_TX_UNLOCK(dev, txq);
128			local_irq_restore(flags);
129
130			schedule_delayed_work(&npinfo->tx_work, HZ/10);
131			return;
132		}
133		HARD_TX_UNLOCK(dev, txq);
134		local_irq_restore(flags);
135	}
136}
137
138/*
139 * Check whether delayed processing was scheduled for our NIC. If so,
140 * we attempt to grab the poll lock and use ->poll() to pump the card.
141 * If this fails, either we've recursed in ->poll() or it's already
142 * running on another CPU.
143 *
144 * Note: we don't mask interrupts with this lock because we're using
145 * trylock here and interrupts are already disabled in the softirq
146 * case. Further, we test the poll_owner to avoid recursion on UP
147 * systems where the lock doesn't exist.
148 */
 
 
 
149static void poll_one_napi(struct napi_struct *napi)
150{
151	int work = 0;
152
153	/* net_rx_action's ->poll() invocations and our's are
154	 * synchronized by this test which is only made while
155	 * holding the napi->poll_lock.
156	 */
157	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158		return;
159
160	/* If we set this bit but see that it has already been set,
161	 * that indicates that napi has been disabled and we need
162	 * to abort this operation
163	 */
164	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
165		return;
166
167	/* We explicilty pass the polling call a budget of 0 to
168	 * indicate that we are clearing the Tx path only.
169	 */
170	work = napi->poll(napi, 0);
171	WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
172	trace_napi_poll(napi, work, 0);
173
174	clear_bit(NAPI_STATE_NPSVC, &napi->state);
175}
176
177static void poll_napi(struct net_device *dev)
178{
179	struct napi_struct *napi;
180	int cpu = smp_processor_id();
181
182	list_for_each_entry(napi, &dev->napi_list, dev_list) {
183		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
184			poll_one_napi(napi);
185			smp_store_release(&napi->poll_owner, -1);
186		}
187	}
188}
189
190static void netpoll_poll_dev(struct net_device *dev)
191{
 
192	const struct net_device_ops *ops;
193	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
194
195	/* Don't do any rx activity if the dev_lock mutex is held
196	 * the dev_open/close paths use this to block netpoll activity
197	 * while changing device state
198	 */
199	if (down_trylock(&ni->dev_lock))
200		return;
201
202	if (!netif_running(dev)) {
 
 
 
203		up(&ni->dev_lock);
204		return;
205	}
206
207	ops = dev->netdev_ops;
208	if (!ops->ndo_poll_controller) {
209		up(&ni->dev_lock);
210		return;
211	}
212
213	/* Process pending work on NIC */
214	ops->ndo_poll_controller(dev);
215
216	poll_napi(dev);
217
218	up(&ni->dev_lock);
219
220	zap_completion_queue();
221}
 
222
223void netpoll_poll_disable(struct net_device *dev)
224{
225	struct netpoll_info *ni;
226	int idx;
227	might_sleep();
228	idx = srcu_read_lock(&netpoll_srcu);
229	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
230	if (ni)
231		down(&ni->dev_lock);
232	srcu_read_unlock(&netpoll_srcu, idx);
233}
234EXPORT_SYMBOL(netpoll_poll_disable);
235
236void netpoll_poll_enable(struct net_device *dev)
237{
238	struct netpoll_info *ni;
239	rcu_read_lock();
240	ni = rcu_dereference(dev->npinfo);
241	if (ni)
242		up(&ni->dev_lock);
243	rcu_read_unlock();
244}
245EXPORT_SYMBOL(netpoll_poll_enable);
246
247static void refill_skbs(void)
248{
249	struct sk_buff *skb;
250	unsigned long flags;
251
252	spin_lock_irqsave(&skb_pool.lock, flags);
253	while (skb_pool.qlen < MAX_SKBS) {
254		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
255		if (!skb)
256			break;
257
258		__skb_queue_tail(&skb_pool, skb);
259	}
260	spin_unlock_irqrestore(&skb_pool.lock, flags);
261}
262
263static void zap_completion_queue(void)
264{
265	unsigned long flags;
266	struct softnet_data *sd = &get_cpu_var(softnet_data);
267
268	if (sd->completion_queue) {
269		struct sk_buff *clist;
270
271		local_irq_save(flags);
272		clist = sd->completion_queue;
273		sd->completion_queue = NULL;
274		local_irq_restore(flags);
275
276		while (clist != NULL) {
277			struct sk_buff *skb = clist;
278			clist = clist->next;
279			if (!skb_irq_freeable(skb)) {
280				refcount_set(&skb->users, 1);
281				dev_kfree_skb_any(skb); /* put this one back */
282			} else {
283				__kfree_skb(skb);
284			}
285		}
286	}
287
288	put_cpu_var(softnet_data);
289}
290
291static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
292{
293	int count = 0;
294	struct sk_buff *skb;
295
296	zap_completion_queue();
297	refill_skbs();
298repeat:
299
300	skb = alloc_skb(len, GFP_ATOMIC);
301	if (!skb)
302		skb = skb_dequeue(&skb_pool);
303
304	if (!skb) {
305		if (++count < 10) {
306			netpoll_poll_dev(np->dev);
307			goto repeat;
308		}
309		return NULL;
310	}
311
312	refcount_set(&skb->users, 1);
313	skb_reserve(skb, reserve);
314	return skb;
315}
316
317static int netpoll_owner_active(struct net_device *dev)
318{
319	struct napi_struct *napi;
320
321	list_for_each_entry(napi, &dev->napi_list, dev_list) {
322		if (napi->poll_owner == smp_processor_id())
323			return 1;
324	}
325	return 0;
326}
327
328/* call with IRQ disabled */
329void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
330			     struct net_device *dev)
331{
332	int status = NETDEV_TX_BUSY;
 
333	unsigned long tries;
334	/* It is up to the caller to keep npinfo alive. */
335	struct netpoll_info *npinfo;
336
337	lockdep_assert_irqs_disabled();
338
339	npinfo = rcu_dereference_bh(np->dev->npinfo);
 
 
340	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
341		dev_kfree_skb_irq(skb);
342		return;
343	}
344
345	/* don't get messages out of order, and no recursion */
346	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
347		struct netdev_queue *txq;
348
349		txq = netdev_pick_tx(dev, skb, NULL);
350
351		/* try until next clock tick */
352		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
353		     tries > 0; --tries) {
354			if (HARD_TX_TRYLOCK(dev, txq)) {
355				if (!netif_xmit_stopped(txq))
356					status = netpoll_start_xmit(skb, dev, txq);
357
358				HARD_TX_UNLOCK(dev, txq);
359
360				if (status == NETDEV_TX_OK)
361					break;
362
363			}
364
365			/* tickle device maybe there is some cleanup */
366			netpoll_poll_dev(np->dev);
367
368			udelay(USEC_PER_POLL);
369		}
370
371		WARN_ONCE(!irqs_disabled(),
372			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
373			dev->name, dev->netdev_ops->ndo_start_xmit);
374
375	}
376
377	if (status != NETDEV_TX_OK) {
378		skb_queue_tail(&npinfo->txq, skb);
379		schedule_delayed_work(&npinfo->tx_work,0);
380	}
 
381}
382EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
384void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
385{
386	int total_len, ip_len, udp_len;
387	struct sk_buff *skb;
388	struct udphdr *udph;
389	struct iphdr *iph;
390	struct ethhdr *eth;
391	static atomic_t ip_ident;
392	struct ipv6hdr *ip6h;
393
394	WARN_ON_ONCE(!irqs_disabled());
 
395
396	udp_len = len + sizeof(*udph);
397	if (np->ipv6)
398		ip_len = udp_len + sizeof(*ip6h);
399	else
400		ip_len = udp_len + sizeof(*iph);
401
402	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
403
404	skb = find_skb(np, total_len + np->dev->needed_tailroom,
405		       total_len - len);
406	if (!skb)
407		return;
408
409	skb_copy_to_linear_data(skb, msg, len);
410	skb_put(skb, len);
411
412	skb_push(skb, sizeof(*udph));
413	skb_reset_transport_header(skb);
414	udph = udp_hdr(skb);
415	udph->source = htons(np->local_port);
416	udph->dest = htons(np->remote_port);
417	udph->len = htons(udp_len);
418
419	if (np->ipv6) {
420		udph->check = 0;
421		udph->check = csum_ipv6_magic(&np->local_ip.in6,
422					      &np->remote_ip.in6,
423					      udp_len, IPPROTO_UDP,
424					      csum_partial(udph, udp_len, 0));
425		if (udph->check == 0)
426			udph->check = CSUM_MANGLED_0;
427
428		skb_push(skb, sizeof(*ip6h));
429		skb_reset_network_header(skb);
430		ip6h = ipv6_hdr(skb);
431
432		/* ip6h->version = 6; ip6h->priority = 0; */
433		put_unaligned(0x60, (unsigned char *)ip6h);
434		ip6h->flow_lbl[0] = 0;
435		ip6h->flow_lbl[1] = 0;
436		ip6h->flow_lbl[2] = 0;
437
438		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
439		ip6h->nexthdr = IPPROTO_UDP;
440		ip6h->hop_limit = 32;
441		ip6h->saddr = np->local_ip.in6;
442		ip6h->daddr = np->remote_ip.in6;
443
444		eth = skb_push(skb, ETH_HLEN);
445		skb_reset_mac_header(skb);
446		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
447	} else {
448		udph->check = 0;
449		udph->check = csum_tcpudp_magic(np->local_ip.ip,
450						np->remote_ip.ip,
451						udp_len, IPPROTO_UDP,
452						csum_partial(udph, udp_len, 0));
453		if (udph->check == 0)
454			udph->check = CSUM_MANGLED_0;
455
456		skb_push(skb, sizeof(*iph));
457		skb_reset_network_header(skb);
458		iph = ip_hdr(skb);
459
460		/* iph->version = 4; iph->ihl = 5; */
461		put_unaligned(0x45, (unsigned char *)iph);
462		iph->tos      = 0;
463		put_unaligned(htons(ip_len), &(iph->tot_len));
464		iph->id       = htons(atomic_inc_return(&ip_ident));
465		iph->frag_off = 0;
466		iph->ttl      = 64;
467		iph->protocol = IPPROTO_UDP;
468		iph->check    = 0;
469		put_unaligned(np->local_ip.ip, &(iph->saddr));
470		put_unaligned(np->remote_ip.ip, &(iph->daddr));
471		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
472
473		eth = skb_push(skb, ETH_HLEN);
474		skb_reset_mac_header(skb);
475		skb->protocol = eth->h_proto = htons(ETH_P_IP);
476	}
477
478	ether_addr_copy(eth->h_source, np->dev->dev_addr);
479	ether_addr_copy(eth->h_dest, np->remote_mac);
480
481	skb->dev = np->dev;
482
483	netpoll_send_skb(np, skb);
484}
485EXPORT_SYMBOL(netpoll_send_udp);
486
487void netpoll_print_options(struct netpoll *np)
488{
489	np_info(np, "local port %d\n", np->local_port);
490	if (np->ipv6)
491		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
492	else
493		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
494	np_info(np, "interface '%s'\n", np->dev_name);
495	np_info(np, "remote port %d\n", np->remote_port);
496	if (np->ipv6)
497		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
498	else
499		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
500	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
501}
502EXPORT_SYMBOL(netpoll_print_options);
503
504static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
505{
506	const char *end;
507
508	if (!strchr(str, ':') &&
509	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
510		if (!*end)
511			return 0;
512	}
513	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
514#if IS_ENABLED(CONFIG_IPV6)
515		if (!*end)
516			return 1;
517#else
518		return -1;
519#endif
520	}
521	return -1;
522}
523
524int netpoll_parse_options(struct netpoll *np, char *opt)
525{
526	char *cur=opt, *delim;
527	int ipv6;
528	bool ipversion_set = false;
529
530	if (*cur != '@') {
531		if ((delim = strchr(cur, '@')) == NULL)
532			goto parse_failed;
533		*delim = 0;
534		if (kstrtou16(cur, 10, &np->local_port))
535			goto parse_failed;
536		cur = delim;
537	}
538	cur++;
539
540	if (*cur != '/') {
541		ipversion_set = true;
542		if ((delim = strchr(cur, '/')) == NULL)
543			goto parse_failed;
544		*delim = 0;
545		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
546		if (ipv6 < 0)
547			goto parse_failed;
548		else
549			np->ipv6 = (bool)ipv6;
550		cur = delim;
551	}
552	cur++;
553
554	if (*cur != ',') {
555		/* parse out dev name */
556		if ((delim = strchr(cur, ',')) == NULL)
557			goto parse_failed;
558		*delim = 0;
559		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
560		cur = delim;
561	}
562	cur++;
563
564	if (*cur != '@') {
565		/* dst port */
566		if ((delim = strchr(cur, '@')) == NULL)
567			goto parse_failed;
568		*delim = 0;
569		if (*cur == ' ' || *cur == '\t')
570			np_info(np, "warning: whitespace is not allowed\n");
571		if (kstrtou16(cur, 10, &np->remote_port))
572			goto parse_failed;
573		cur = delim;
574	}
575	cur++;
576
577	/* dst ip */
578	if ((delim = strchr(cur, '/')) == NULL)
579		goto parse_failed;
580	*delim = 0;
581	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
582	if (ipv6 < 0)
583		goto parse_failed;
584	else if (ipversion_set && np->ipv6 != (bool)ipv6)
585		goto parse_failed;
586	else
587		np->ipv6 = (bool)ipv6;
588	cur = delim + 1;
589
590	if (*cur != 0) {
591		/* MAC address */
592		if (!mac_pton(cur, np->remote_mac))
593			goto parse_failed;
594	}
595
596	netpoll_print_options(np);
597
598	return 0;
599
600 parse_failed:
601	np_info(np, "couldn't parse config at '%s'!\n", cur);
602	return -1;
603}
604EXPORT_SYMBOL(netpoll_parse_options);
605
606int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
607{
608	struct netpoll_info *npinfo;
609	const struct net_device_ops *ops;
610	int err;
611
612	np->dev = ndev;
613	strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
614	INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
615
616	if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
617	    !ndev->netdev_ops->ndo_poll_controller) {
618		np_err(np, "%s doesn't support polling, aborting\n",
619		       np->dev_name);
620		err = -ENOTSUPP;
621		goto out;
622	}
623
624	if (!ndev->npinfo) {
625		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
626		if (!npinfo) {
627			err = -ENOMEM;
628			goto out;
629		}
630
631		sema_init(&npinfo->dev_lock, 1);
632		skb_queue_head_init(&npinfo->txq);
633		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
634
635		refcount_set(&npinfo->refcnt, 1);
636
637		ops = np->dev->netdev_ops;
638		if (ops->ndo_netpoll_setup) {
639			err = ops->ndo_netpoll_setup(ndev, npinfo);
640			if (err)
641				goto free_npinfo;
642		}
643	} else {
644		npinfo = rtnl_dereference(ndev->npinfo);
645		refcount_inc(&npinfo->refcnt);
646	}
647
648	npinfo->netpoll = np;
649
650	/* last thing to do is link it to the net device structure */
651	rcu_assign_pointer(ndev->npinfo, npinfo);
652
653	return 0;
654
655free_npinfo:
656	kfree(npinfo);
657out:
658	return err;
659}
660EXPORT_SYMBOL_GPL(__netpoll_setup);
661
662int netpoll_setup(struct netpoll *np)
663{
664	struct net_device *ndev = NULL;
665	struct in_device *in_dev;
666	int err;
667
668	rtnl_lock();
669	if (np->dev_name[0]) {
670		struct net *net = current->nsproxy->net_ns;
671		ndev = __dev_get_by_name(net, np->dev_name);
672	}
673	if (!ndev) {
674		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
675		err = -ENODEV;
676		goto unlock;
677	}
678	dev_hold(ndev);
679
680	if (netdev_master_upper_dev_get(ndev)) {
681		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
682		err = -EBUSY;
683		goto put;
684	}
685
686	if (!netif_running(ndev)) {
687		unsigned long atmost, atleast;
688
689		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
690
691		err = dev_open(ndev);
692
693		if (err) {
694			np_err(np, "failed to open %s\n", ndev->name);
695			goto put;
696		}
697
698		rtnl_unlock();
699		atleast = jiffies + HZ/10;
700		atmost = jiffies + carrier_timeout * HZ;
701		while (!netif_carrier_ok(ndev)) {
702			if (time_after(jiffies, atmost)) {
703				np_notice(np, "timeout waiting for carrier\n");
704				break;
705			}
706			msleep(1);
707		}
708
709		/* If carrier appears to come up instantly, we don't
710		 * trust it and pause so that we don't pump all our
711		 * queued console messages into the bitbucket.
712		 */
713
714		if (time_before(jiffies, atleast)) {
715			np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
716			msleep(4000);
717		}
718		rtnl_lock();
719	}
720
721	if (!np->local_ip.ip) {
722		if (!np->ipv6) {
 
 
723			in_dev = __in_dev_get_rtnl(ndev);
 
 
724
725			if (!in_dev || !in_dev->ifa_list) {
 
 
726				np_err(np, "no IP address for %s, aborting\n",
727				       np->dev_name);
728				err = -EDESTADDRREQ;
729				goto put;
730			}
731
732			np->local_ip.ip = in_dev->ifa_list->ifa_local;
733			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
734		} else {
735#if IS_ENABLED(CONFIG_IPV6)
736			struct inet6_dev *idev;
737
738			err = -EDESTADDRREQ;
739			idev = __in6_dev_get(ndev);
740			if (idev) {
741				struct inet6_ifaddr *ifp;
742
743				read_lock_bh(&idev->lock);
744				list_for_each_entry(ifp, &idev->addr_list, if_list) {
745					if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
 
746						continue;
747					np->local_ip.in6 = ifp->addr;
748					err = 0;
749					break;
750				}
751				read_unlock_bh(&idev->lock);
752			}
753			if (err) {
754				np_err(np, "no IPv6 address for %s, aborting\n",
755				       np->dev_name);
756				goto put;
757			} else
758				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
759#else
760			np_err(np, "IPv6 is not supported %s, aborting\n",
761			       np->dev_name);
762			err = -EINVAL;
763			goto put;
764#endif
765		}
766	}
767
768	/* fill up the skb queue */
769	refill_skbs();
770
771	err = __netpoll_setup(np, ndev);
772	if (err)
773		goto put;
774
775	rtnl_unlock();
776	return 0;
777
778put:
779	dev_put(ndev);
780unlock:
781	rtnl_unlock();
782	return err;
783}
784EXPORT_SYMBOL(netpoll_setup);
785
786static int __init netpoll_init(void)
787{
788	skb_queue_head_init(&skb_pool);
789	return 0;
790}
791core_initcall(netpoll_init);
792
793static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
794{
795	struct netpoll_info *npinfo =
796			container_of(rcu_head, struct netpoll_info, rcu);
797
798	skb_queue_purge(&npinfo->txq);
799
800	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
801	cancel_delayed_work(&npinfo->tx_work);
802
803	/* clean after last, unfinished work */
804	__skb_queue_purge(&npinfo->txq);
805	/* now cancel it again */
806	cancel_delayed_work(&npinfo->tx_work);
807	kfree(npinfo);
808}
809
810void __netpoll_cleanup(struct netpoll *np)
811{
812	struct netpoll_info *npinfo;
813
814	/* rtnl_dereference would be preferable here but
815	 * rcu_cleanup_netpoll path can put us in here safely without
816	 * holding the rtnl, so plain rcu_dereference it is
817	 */
818	npinfo = rtnl_dereference(np->dev->npinfo);
819	if (!npinfo)
820		return;
821
822	synchronize_srcu(&netpoll_srcu);
823
824	if (refcount_dec_and_test(&npinfo->refcnt)) {
825		const struct net_device_ops *ops;
826
827		ops = np->dev->netdev_ops;
828		if (ops->ndo_netpoll_cleanup)
829			ops->ndo_netpoll_cleanup(np->dev);
830
831		RCU_INIT_POINTER(np->dev->npinfo, NULL);
832		call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
833	} else
834		RCU_INIT_POINTER(np->dev->npinfo, NULL);
835}
836EXPORT_SYMBOL_GPL(__netpoll_cleanup);
837
838static void netpoll_async_cleanup(struct work_struct *work)
839{
840	struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
841
842	rtnl_lock();
 
843	__netpoll_cleanup(np);
844	rtnl_unlock();
845	kfree(np);
846}
847
848void __netpoll_free_async(struct netpoll *np)
849{
850	schedule_work(&np->cleanup_work);
851}
852EXPORT_SYMBOL_GPL(__netpoll_free_async);
853
854void netpoll_cleanup(struct netpoll *np)
855{
856	rtnl_lock();
857	if (!np->dev)
858		goto out;
859	__netpoll_cleanup(np);
860	dev_put(np->dev);
861	np->dev = NULL;
862out:
863	rtnl_unlock();
864}
865EXPORT_SYMBOL(netpoll_cleanup);