Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Common framework for low-level network console, dump, and debugger code
  4 *
  5 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  6 *
  7 * based on the netconsole code from:
  8 *
  9 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
 10 * Copyright (C) 2002  Red Hat, Inc.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/moduleparam.h>
 16#include <linux/kernel.h>
 17#include <linux/netdevice.h>
 18#include <linux/etherdevice.h>
 19#include <linux/string.h>
 20#include <linux/if_arp.h>
 21#include <linux/inetdevice.h>
 22#include <linux/inet.h>
 23#include <linux/interrupt.h>
 24#include <linux/netpoll.h>
 25#include <linux/sched.h>
 26#include <linux/delay.h>
 27#include <linux/rcupdate.h>
 28#include <linux/workqueue.h>
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/if_vlan.h>
 32#include <net/tcp.h>
 33#include <net/udp.h>
 34#include <net/addrconf.h>
 35#include <net/ndisc.h>
 36#include <net/ip6_checksum.h>
 37#include <linux/unaligned.h>
 38#include <trace/events/napi.h>
 39#include <linux/kconfig.h>
 40
 41/*
 42 * We maintain a small pool of fully-sized skbs, to make sure the
 43 * message gets out even in extreme OOM situations.
 44 */
 45
 46#define MAX_UDP_CHUNK 1460
 47#define MAX_SKBS 32
 
 
 
 
 
 48#define USEC_PER_POLL	50
 49
 50#define MAX_SKB_SIZE							\
 51	(sizeof(struct ethhdr) +					\
 52	 sizeof(struct iphdr) +						\
 53	 sizeof(struct udphdr) +					\
 54	 MAX_UDP_CHUNK)
 55
 56static void zap_completion_queue(void);
 57
 58static unsigned int carrier_timeout = 4;
 59module_param(carrier_timeout, uint, 0644);
 60
 61#define np_info(np, fmt, ...)				\
 62	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 63#define np_err(np, fmt, ...)				\
 64	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 65#define np_notice(np, fmt, ...)				\
 66	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 67
 68static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
 69				      struct net_device *dev,
 70				      struct netdev_queue *txq)
 71{
 72	netdev_tx_t status = NETDEV_TX_OK;
 73	netdev_features_t features;
 74
 75	features = netif_skb_features(skb);
 76
 77	if (skb_vlan_tag_present(skb) &&
 78	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 79		skb = __vlan_hwaccel_push_inside(skb);
 80		if (unlikely(!skb)) {
 81			/* This is actually a packet drop, but we
 82			 * don't want the code that calls this
 83			 * function to try and operate on a NULL skb.
 84			 */
 85			goto out;
 86		}
 87	}
 88
 89	status = netdev_start_xmit(skb, dev, txq, false);
 90
 91out:
 92	return status;
 93}
 94
 95static void queue_process(struct work_struct *work)
 96{
 97	struct netpoll_info *npinfo =
 98		container_of(work, struct netpoll_info, tx_work.work);
 99	struct sk_buff *skb;
100	unsigned long flags;
101
102	while ((skb = skb_dequeue(&npinfo->txq))) {
103		struct net_device *dev = skb->dev;
104		struct netdev_queue *txq;
105		unsigned int q_index;
106
107		if (!netif_device_present(dev) || !netif_running(dev)) {
108			kfree_skb(skb);
109			continue;
110		}
111
112		local_irq_save(flags);
113		/* check if skb->queue_mapping is still valid */
114		q_index = skb_get_queue_mapping(skb);
115		if (unlikely(q_index >= dev->real_num_tx_queues)) {
116			q_index = q_index % dev->real_num_tx_queues;
117			skb_set_queue_mapping(skb, q_index);
118		}
119		txq = netdev_get_tx_queue(dev, q_index);
120		HARD_TX_LOCK(dev, txq, smp_processor_id());
121		if (netif_xmit_frozen_or_stopped(txq) ||
122		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
123			skb_queue_head(&npinfo->txq, skb);
124			HARD_TX_UNLOCK(dev, txq);
125			local_irq_restore(flags);
126
127			schedule_delayed_work(&npinfo->tx_work, HZ/10);
128			return;
129		}
130		HARD_TX_UNLOCK(dev, txq);
131		local_irq_restore(flags);
132	}
133}
134
135static int netif_local_xmit_active(struct net_device *dev)
136{
137	int i;
138
139	for (i = 0; i < dev->num_tx_queues; i++) {
140		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
141
142		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
143			return 1;
144	}
145
146	return 0;
147}
148
149static void poll_one_napi(struct napi_struct *napi)
150{
151	int work;
152
153	/* If we set this bit but see that it has already been set,
154	 * that indicates that napi has been disabled and we need
155	 * to abort this operation
156	 */
157	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
158		return;
159
160	/* We explicitly pass the polling call a budget of 0 to
161	 * indicate that we are clearing the Tx path only.
162	 */
163	work = napi->poll(napi, 0);
164	WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
165	trace_napi_poll(napi, work, 0);
166
167	clear_bit(NAPI_STATE_NPSVC, &napi->state);
168}
169
170static void poll_napi(struct net_device *dev)
171{
172	struct napi_struct *napi;
173	int cpu = smp_processor_id();
174
175	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
176		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
177			poll_one_napi(napi);
178			smp_store_release(&napi->poll_owner, -1);
179		}
180	}
181}
182
183void netpoll_poll_dev(struct net_device *dev)
184{
185	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
186	const struct net_device_ops *ops;
187
188	/* Don't do any rx activity if the dev_lock mutex is held
189	 * the dev_open/close paths use this to block netpoll activity
190	 * while changing device state
191	 */
192	if (!ni || down_trylock(&ni->dev_lock))
193		return;
194
195	/* Some drivers will take the same locks in poll and xmit,
196	 * we can't poll if local CPU is already in xmit.
197	 */
198	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
199		up(&ni->dev_lock);
200		return;
201	}
202
203	ops = dev->netdev_ops;
204	if (ops->ndo_poll_controller)
205		ops->ndo_poll_controller(dev);
206
207	poll_napi(dev);
208
209	up(&ni->dev_lock);
210
211	zap_completion_queue();
212}
213EXPORT_SYMBOL(netpoll_poll_dev);
214
215void netpoll_poll_disable(struct net_device *dev)
216{
217	struct netpoll_info *ni;
218
219	might_sleep();
220	ni = rtnl_dereference(dev->npinfo);
 
221	if (ni)
222		down(&ni->dev_lock);
 
223}
 
224
225void netpoll_poll_enable(struct net_device *dev)
226{
227	struct netpoll_info *ni;
228
229	ni = rtnl_dereference(dev->npinfo);
230	if (ni)
231		up(&ni->dev_lock);
 
232}
 
233
234static void refill_skbs(struct netpoll *np)
235{
236	struct sk_buff_head *skb_pool;
237	struct sk_buff *skb;
238	unsigned long flags;
239
240	skb_pool = &np->skb_pool;
241
242	spin_lock_irqsave(&skb_pool->lock, flags);
243	while (skb_pool->qlen < MAX_SKBS) {
244		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
245		if (!skb)
246			break;
247
248		__skb_queue_tail(skb_pool, skb);
249	}
250	spin_unlock_irqrestore(&skb_pool->lock, flags);
251}
252
253static void zap_completion_queue(void)
254{
255	unsigned long flags;
256	struct softnet_data *sd = &get_cpu_var(softnet_data);
257
258	if (sd->completion_queue) {
259		struct sk_buff *clist;
260
261		local_irq_save(flags);
262		clist = sd->completion_queue;
263		sd->completion_queue = NULL;
264		local_irq_restore(flags);
265
266		while (clist != NULL) {
267			struct sk_buff *skb = clist;
268			clist = clist->next;
269			if (!skb_irq_freeable(skb)) {
270				refcount_set(&skb->users, 1);
271				dev_kfree_skb_any(skb); /* put this one back */
272			} else {
273				__kfree_skb(skb);
274			}
275		}
276	}
277
278	put_cpu_var(softnet_data);
279}
280
281static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
282{
283	int count = 0;
284	struct sk_buff *skb;
285
286	zap_completion_queue();
287	refill_skbs(np);
288repeat:
289
290	skb = alloc_skb(len, GFP_ATOMIC);
291	if (!skb)
292		skb = skb_dequeue(&np->skb_pool);
293
294	if (!skb) {
295		if (++count < 10) {
296			netpoll_poll_dev(np->dev);
297			goto repeat;
298		}
299		return NULL;
300	}
301
302	refcount_set(&skb->users, 1);
303	skb_reserve(skb, reserve);
304	return skb;
305}
306
307static int netpoll_owner_active(struct net_device *dev)
308{
309	struct napi_struct *napi;
310
311	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
312		if (READ_ONCE(napi->poll_owner) == smp_processor_id())
313			return 1;
314	}
315	return 0;
316}
317
318/* call with IRQ disabled */
319static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
320{
321	netdev_tx_t status = NETDEV_TX_BUSY;
322	struct net_device *dev;
323	unsigned long tries;
324	/* It is up to the caller to keep npinfo alive. */
325	struct netpoll_info *npinfo;
326
327	lockdep_assert_irqs_disabled();
328
329	dev = np->dev;
330	npinfo = rcu_dereference_bh(dev->npinfo);
331
332	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
333		dev_kfree_skb_irq(skb);
334		return NET_XMIT_DROP;
335	}
336
337	/* don't get messages out of order, and no recursion */
338	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
339		struct netdev_queue *txq;
340
341		txq = netdev_core_pick_tx(dev, skb, NULL);
342
343		/* try until next clock tick */
344		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
345		     tries > 0; --tries) {
346			if (HARD_TX_TRYLOCK(dev, txq)) {
347				if (!netif_xmit_stopped(txq))
348					status = netpoll_start_xmit(skb, dev, txq);
349
350				HARD_TX_UNLOCK(dev, txq);
351
352				if (dev_xmit_complete(status))
353					break;
354
355			}
356
357			/* tickle device maybe there is some cleanup */
358			netpoll_poll_dev(np->dev);
359
360			udelay(USEC_PER_POLL);
361		}
362
363		WARN_ONCE(!irqs_disabled(),
364			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
365			dev->name, dev->netdev_ops->ndo_start_xmit);
366
367	}
368
369	if (!dev_xmit_complete(status)) {
370		skb_queue_tail(&npinfo->txq, skb);
371		schedule_delayed_work(&npinfo->tx_work,0);
372	}
373	return NETDEV_TX_OK;
374}
375
376netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
377{
378	unsigned long flags;
379	netdev_tx_t ret;
380
381	if (unlikely(!np)) {
382		dev_kfree_skb_irq(skb);
383		ret = NET_XMIT_DROP;
384	} else {
385		local_irq_save(flags);
386		ret = __netpoll_send_skb(np, skb);
387		local_irq_restore(flags);
388	}
389	return ret;
390}
391EXPORT_SYMBOL(netpoll_send_skb);
392
393void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
394{
395	int total_len, ip_len, udp_len;
396	struct sk_buff *skb;
397	struct udphdr *udph;
398	struct iphdr *iph;
399	struct ethhdr *eth;
400	static atomic_t ip_ident;
401	struct ipv6hdr *ip6h;
402
403	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
404		WARN_ON_ONCE(!irqs_disabled());
405
406	udp_len = len + sizeof(*udph);
407	if (np->ipv6)
408		ip_len = udp_len + sizeof(*ip6h);
409	else
410		ip_len = udp_len + sizeof(*iph);
411
412	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
413
414	skb = find_skb(np, total_len + np->dev->needed_tailroom,
415		       total_len - len);
416	if (!skb)
417		return;
418
419	skb_copy_to_linear_data(skb, msg, len);
420	skb_put(skb, len);
421
422	skb_push(skb, sizeof(*udph));
423	skb_reset_transport_header(skb);
424	udph = udp_hdr(skb);
425	udph->source = htons(np->local_port);
426	udph->dest = htons(np->remote_port);
427	udph->len = htons(udp_len);
428
429	if (np->ipv6) {
430		udph->check = 0;
431		udph->check = csum_ipv6_magic(&np->local_ip.in6,
432					      &np->remote_ip.in6,
433					      udp_len, IPPROTO_UDP,
434					      csum_partial(udph, udp_len, 0));
435		if (udph->check == 0)
436			udph->check = CSUM_MANGLED_0;
437
438		skb_push(skb, sizeof(*ip6h));
439		skb_reset_network_header(skb);
440		ip6h = ipv6_hdr(skb);
441
442		/* ip6h->version = 6; ip6h->priority = 0; */
443		*(unsigned char *)ip6h = 0x60;
444		ip6h->flow_lbl[0] = 0;
445		ip6h->flow_lbl[1] = 0;
446		ip6h->flow_lbl[2] = 0;
447
448		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
449		ip6h->nexthdr = IPPROTO_UDP;
450		ip6h->hop_limit = 32;
451		ip6h->saddr = np->local_ip.in6;
452		ip6h->daddr = np->remote_ip.in6;
453
454		eth = skb_push(skb, ETH_HLEN);
455		skb_reset_mac_header(skb);
456		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
457	} else {
458		udph->check = 0;
459		udph->check = csum_tcpudp_magic(np->local_ip.ip,
460						np->remote_ip.ip,
461						udp_len, IPPROTO_UDP,
462						csum_partial(udph, udp_len, 0));
463		if (udph->check == 0)
464			udph->check = CSUM_MANGLED_0;
465
466		skb_push(skb, sizeof(*iph));
467		skb_reset_network_header(skb);
468		iph = ip_hdr(skb);
469
470		/* iph->version = 4; iph->ihl = 5; */
471		*(unsigned char *)iph = 0x45;
472		iph->tos      = 0;
473		put_unaligned(htons(ip_len), &(iph->tot_len));
474		iph->id       = htons(atomic_inc_return(&ip_ident));
475		iph->frag_off = 0;
476		iph->ttl      = 64;
477		iph->protocol = IPPROTO_UDP;
478		iph->check    = 0;
479		put_unaligned(np->local_ip.ip, &(iph->saddr));
480		put_unaligned(np->remote_ip.ip, &(iph->daddr));
481		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
482
483		eth = skb_push(skb, ETH_HLEN);
484		skb_reset_mac_header(skb);
485		skb->protocol = eth->h_proto = htons(ETH_P_IP);
486	}
487
488	ether_addr_copy(eth->h_source, np->dev->dev_addr);
489	ether_addr_copy(eth->h_dest, np->remote_mac);
490
491	skb->dev = np->dev;
492
493	netpoll_send_skb(np, skb);
494}
495EXPORT_SYMBOL(netpoll_send_udp);
496
497void netpoll_print_options(struct netpoll *np)
498{
499	np_info(np, "local port %d\n", np->local_port);
500	if (np->ipv6)
501		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
502	else
503		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
504	np_info(np, "interface '%s'\n", np->dev_name);
505	np_info(np, "remote port %d\n", np->remote_port);
506	if (np->ipv6)
507		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
508	else
509		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
510	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
511}
512EXPORT_SYMBOL(netpoll_print_options);
513
514static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
515{
516	const char *end;
517
518	if (!strchr(str, ':') &&
519	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
520		if (!*end)
521			return 0;
522	}
523	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
524#if IS_ENABLED(CONFIG_IPV6)
525		if (!*end)
526			return 1;
527#else
528		return -1;
529#endif
530	}
531	return -1;
532}
533
534static void skb_pool_flush(struct netpoll *np)
535{
536	struct sk_buff_head *skb_pool;
537
538	skb_pool = &np->skb_pool;
539	skb_queue_purge_reason(skb_pool, SKB_CONSUMED);
540}
541
542int netpoll_parse_options(struct netpoll *np, char *opt)
543{
544	char *cur=opt, *delim;
545	int ipv6;
546	bool ipversion_set = false;
547
548	if (*cur != '@') {
549		if ((delim = strchr(cur, '@')) == NULL)
550			goto parse_failed;
551		*delim = 0;
552		if (kstrtou16(cur, 10, &np->local_port))
553			goto parse_failed;
554		cur = delim;
555	}
556	cur++;
557
558	if (*cur != '/') {
559		ipversion_set = true;
560		if ((delim = strchr(cur, '/')) == NULL)
561			goto parse_failed;
562		*delim = 0;
563		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
564		if (ipv6 < 0)
565			goto parse_failed;
566		else
567			np->ipv6 = (bool)ipv6;
568		cur = delim;
569	}
570	cur++;
571
572	if (*cur != ',') {
573		/* parse out dev name */
574		if ((delim = strchr(cur, ',')) == NULL)
575			goto parse_failed;
576		*delim = 0;
577		strscpy(np->dev_name, cur, sizeof(np->dev_name));
578		cur = delim;
579	}
580	cur++;
581
582	if (*cur != '@') {
583		/* dst port */
584		if ((delim = strchr(cur, '@')) == NULL)
585			goto parse_failed;
586		*delim = 0;
587		if (*cur == ' ' || *cur == '\t')
588			np_info(np, "warning: whitespace is not allowed\n");
589		if (kstrtou16(cur, 10, &np->remote_port))
590			goto parse_failed;
591		cur = delim;
592	}
593	cur++;
594
595	/* dst ip */
596	if ((delim = strchr(cur, '/')) == NULL)
597		goto parse_failed;
598	*delim = 0;
599	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
600	if (ipv6 < 0)
601		goto parse_failed;
602	else if (ipversion_set && np->ipv6 != (bool)ipv6)
603		goto parse_failed;
604	else
605		np->ipv6 = (bool)ipv6;
606	cur = delim + 1;
607
608	if (*cur != 0) {
609		/* MAC address */
610		if (!mac_pton(cur, np->remote_mac))
611			goto parse_failed;
612	}
613
614	netpoll_print_options(np);
615
616	return 0;
617
618 parse_failed:
619	np_info(np, "couldn't parse config at '%s'!\n", cur);
620	return -1;
621}
622EXPORT_SYMBOL(netpoll_parse_options);
623
624int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
625{
626	struct netpoll_info *npinfo;
627	const struct net_device_ops *ops;
628	int err;
629
630	skb_queue_head_init(&np->skb_pool);
 
631
632	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
633		np_err(np, "%s doesn't support polling, aborting\n",
634		       ndev->name);
635		err = -ENOTSUPP;
636		goto out;
637	}
638
639	if (!rcu_access_pointer(ndev->npinfo)) {
640		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
641		if (!npinfo) {
642			err = -ENOMEM;
643			goto out;
644		}
645
646		sema_init(&npinfo->dev_lock, 1);
647		skb_queue_head_init(&npinfo->txq);
648		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
649
650		refcount_set(&npinfo->refcnt, 1);
651
652		ops = ndev->netdev_ops;
653		if (ops->ndo_netpoll_setup) {
654			err = ops->ndo_netpoll_setup(ndev);
655			if (err)
656				goto free_npinfo;
657		}
658	} else {
659		npinfo = rtnl_dereference(ndev->npinfo);
660		refcount_inc(&npinfo->refcnt);
661	}
662
663	np->dev = ndev;
664	strscpy(np->dev_name, ndev->name, IFNAMSIZ);
665	npinfo->netpoll = np;
666
667	/* fill up the skb queue */
668	refill_skbs(np);
669
670	/* last thing to do is link it to the net device structure */
671	rcu_assign_pointer(ndev->npinfo, npinfo);
672
673	return 0;
674
675free_npinfo:
676	kfree(npinfo);
677out:
678	return err;
679}
680EXPORT_SYMBOL_GPL(__netpoll_setup);
681
682int netpoll_setup(struct netpoll *np)
683{
684	struct net_device *ndev = NULL;
685	bool ip_overwritten = false;
686	struct in_device *in_dev;
687	int err;
688
689	rtnl_lock();
690	if (np->dev_name[0]) {
691		struct net *net = current->nsproxy->net_ns;
692		ndev = __dev_get_by_name(net, np->dev_name);
693	}
694	if (!ndev) {
695		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
696		err = -ENODEV;
697		goto unlock;
698	}
699	netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL);
700
701	if (netdev_master_upper_dev_get(ndev)) {
702		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
703		err = -EBUSY;
704		goto put;
705	}
706
707	if (!netif_running(ndev)) {
708		unsigned long atmost;
709
710		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
711
712		err = dev_open(ndev, NULL);
713
714		if (err) {
715			np_err(np, "failed to open %s\n", ndev->name);
716			goto put;
717		}
718
719		rtnl_unlock();
720		atmost = jiffies + carrier_timeout * HZ;
721		while (!netif_carrier_ok(ndev)) {
722			if (time_after(jiffies, atmost)) {
723				np_notice(np, "timeout waiting for carrier\n");
724				break;
725			}
726			msleep(1);
727		}
728
729		rtnl_lock();
730	}
731
732	if (!np->local_ip.ip) {
733		if (!np->ipv6) {
734			const struct in_ifaddr *ifa;
735
736			in_dev = __in_dev_get_rtnl(ndev);
737			if (!in_dev)
738				goto put_noaddr;
739
740			ifa = rtnl_dereference(in_dev->ifa_list);
741			if (!ifa) {
742put_noaddr:
743				np_err(np, "no IP address for %s, aborting\n",
744				       np->dev_name);
745				err = -EDESTADDRREQ;
746				goto put;
747			}
748
749			np->local_ip.ip = ifa->ifa_local;
750			ip_overwritten = true;
751			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
752		} else {
753#if IS_ENABLED(CONFIG_IPV6)
754			struct inet6_dev *idev;
755
756			err = -EDESTADDRREQ;
757			idev = __in6_dev_get(ndev);
758			if (idev) {
759				struct inet6_ifaddr *ifp;
760
761				read_lock_bh(&idev->lock);
762				list_for_each_entry(ifp, &idev->addr_list, if_list) {
763					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
764					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
765						continue;
766					np->local_ip.in6 = ifp->addr;
767					ip_overwritten = true;
768					err = 0;
769					break;
770				}
771				read_unlock_bh(&idev->lock);
772			}
773			if (err) {
774				np_err(np, "no IPv6 address for %s, aborting\n",
775				       np->dev_name);
776				goto put;
777			} else
778				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
779#else
780			np_err(np, "IPv6 is not supported %s, aborting\n",
781			       np->dev_name);
782			err = -EINVAL;
783			goto put;
784#endif
785		}
786	}
787
 
 
 
788	err = __netpoll_setup(np, ndev);
789	if (err)
790		goto flush;
791	rtnl_unlock();
792	return 0;
793
794flush:
795	skb_pool_flush(np);
796put:
797	DEBUG_NET_WARN_ON_ONCE(np->dev);
798	if (ip_overwritten)
799		memset(&np->local_ip, 0, sizeof(np->local_ip));
800	netdev_put(ndev, &np->dev_tracker);
801unlock:
802	rtnl_unlock();
803	return err;
804}
805EXPORT_SYMBOL(netpoll_setup);
806
 
 
 
 
 
 
 
807static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
808{
809	struct netpoll_info *npinfo =
810			container_of(rcu_head, struct netpoll_info, rcu);
811
812	skb_queue_purge(&npinfo->txq);
813
814	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
815	cancel_delayed_work(&npinfo->tx_work);
816
817	/* clean after last, unfinished work */
818	__skb_queue_purge(&npinfo->txq);
819	/* now cancel it again */
820	cancel_delayed_work(&npinfo->tx_work);
821	kfree(npinfo);
822}
823
824void __netpoll_cleanup(struct netpoll *np)
825{
826	struct netpoll_info *npinfo;
827
828	npinfo = rtnl_dereference(np->dev->npinfo);
829	if (!npinfo)
830		return;
831
 
 
832	if (refcount_dec_and_test(&npinfo->refcnt)) {
833		const struct net_device_ops *ops;
834
835		ops = np->dev->netdev_ops;
836		if (ops->ndo_netpoll_cleanup)
837			ops->ndo_netpoll_cleanup(np->dev);
838
839		RCU_INIT_POINTER(np->dev->npinfo, NULL);
840		call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
841	} else
842		RCU_INIT_POINTER(np->dev->npinfo, NULL);
843
844	skb_pool_flush(np);
845}
846EXPORT_SYMBOL_GPL(__netpoll_cleanup);
847
848void __netpoll_free(struct netpoll *np)
849{
850	ASSERT_RTNL();
851
852	/* Wait for transmitting packets to finish before freeing. */
853	synchronize_rcu();
854	__netpoll_cleanup(np);
855	kfree(np);
856}
857EXPORT_SYMBOL_GPL(__netpoll_free);
858
859void do_netpoll_cleanup(struct netpoll *np)
860{
861	__netpoll_cleanup(np);
862	netdev_put(np->dev, &np->dev_tracker);
863	np->dev = NULL;
864}
865EXPORT_SYMBOL(do_netpoll_cleanup);
866
867void netpoll_cleanup(struct netpoll *np)
868{
869	rtnl_lock();
870	if (!np->dev)
871		goto out;
872	do_netpoll_cleanup(np);
 
 
873out:
874	rtnl_unlock();
875}
876EXPORT_SYMBOL(netpoll_cleanup);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Common framework for low-level network console, dump, and debugger code
  4 *
  5 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
  6 *
  7 * based on the netconsole code from:
  8 *
  9 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
 10 * Copyright (C) 2002  Red Hat, Inc.
 11 */
 12
 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 14
 15#include <linux/moduleparam.h>
 16#include <linux/kernel.h>
 17#include <linux/netdevice.h>
 18#include <linux/etherdevice.h>
 19#include <linux/string.h>
 20#include <linux/if_arp.h>
 21#include <linux/inetdevice.h>
 22#include <linux/inet.h>
 23#include <linux/interrupt.h>
 24#include <linux/netpoll.h>
 25#include <linux/sched.h>
 26#include <linux/delay.h>
 27#include <linux/rcupdate.h>
 28#include <linux/workqueue.h>
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/if_vlan.h>
 32#include <net/tcp.h>
 33#include <net/udp.h>
 34#include <net/addrconf.h>
 35#include <net/ndisc.h>
 36#include <net/ip6_checksum.h>
 37#include <asm/unaligned.h>
 38#include <trace/events/napi.h>
 39#include <linux/kconfig.h>
 40
 41/*
 42 * We maintain a small pool of fully-sized skbs, to make sure the
 43 * message gets out even in extreme OOM situations.
 44 */
 45
 46#define MAX_UDP_CHUNK 1460
 47#define MAX_SKBS 32
 48
 49static struct sk_buff_head skb_pool;
 50
 51DEFINE_STATIC_SRCU(netpoll_srcu);
 52
 53#define USEC_PER_POLL	50
 54
 55#define MAX_SKB_SIZE							\
 56	(sizeof(struct ethhdr) +					\
 57	 sizeof(struct iphdr) +						\
 58	 sizeof(struct udphdr) +					\
 59	 MAX_UDP_CHUNK)
 60
 61static void zap_completion_queue(void);
 62
 63static unsigned int carrier_timeout = 4;
 64module_param(carrier_timeout, uint, 0644);
 65
 66#define np_info(np, fmt, ...)				\
 67	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
 68#define np_err(np, fmt, ...)				\
 69	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
 70#define np_notice(np, fmt, ...)				\
 71	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 72
 73static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
 74				      struct net_device *dev,
 75				      struct netdev_queue *txq)
 76{
 77	netdev_tx_t status = NETDEV_TX_OK;
 78	netdev_features_t features;
 79
 80	features = netif_skb_features(skb);
 81
 82	if (skb_vlan_tag_present(skb) &&
 83	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
 84		skb = __vlan_hwaccel_push_inside(skb);
 85		if (unlikely(!skb)) {
 86			/* This is actually a packet drop, but we
 87			 * don't want the code that calls this
 88			 * function to try and operate on a NULL skb.
 89			 */
 90			goto out;
 91		}
 92	}
 93
 94	status = netdev_start_xmit(skb, dev, txq, false);
 95
 96out:
 97	return status;
 98}
 99
100static void queue_process(struct work_struct *work)
101{
102	struct netpoll_info *npinfo =
103		container_of(work, struct netpoll_info, tx_work.work);
104	struct sk_buff *skb;
105	unsigned long flags;
106
107	while ((skb = skb_dequeue(&npinfo->txq))) {
108		struct net_device *dev = skb->dev;
109		struct netdev_queue *txq;
110		unsigned int q_index;
111
112		if (!netif_device_present(dev) || !netif_running(dev)) {
113			kfree_skb(skb);
114			continue;
115		}
116
117		local_irq_save(flags);
118		/* check if skb->queue_mapping is still valid */
119		q_index = skb_get_queue_mapping(skb);
120		if (unlikely(q_index >= dev->real_num_tx_queues)) {
121			q_index = q_index % dev->real_num_tx_queues;
122			skb_set_queue_mapping(skb, q_index);
123		}
124		txq = netdev_get_tx_queue(dev, q_index);
125		HARD_TX_LOCK(dev, txq, smp_processor_id());
126		if (netif_xmit_frozen_or_stopped(txq) ||
127		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128			skb_queue_head(&npinfo->txq, skb);
129			HARD_TX_UNLOCK(dev, txq);
130			local_irq_restore(flags);
131
132			schedule_delayed_work(&npinfo->tx_work, HZ/10);
133			return;
134		}
135		HARD_TX_UNLOCK(dev, txq);
136		local_irq_restore(flags);
137	}
138}
139
140static int netif_local_xmit_active(struct net_device *dev)
141{
142	int i;
143
144	for (i = 0; i < dev->num_tx_queues; i++) {
145		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146
147		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148			return 1;
149	}
150
151	return 0;
152}
153
154static void poll_one_napi(struct napi_struct *napi)
155{
156	int work;
157
158	/* If we set this bit but see that it has already been set,
159	 * that indicates that napi has been disabled and we need
160	 * to abort this operation
161	 */
162	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
163		return;
164
165	/* We explicilty pass the polling call a budget of 0 to
166	 * indicate that we are clearing the Tx path only.
167	 */
168	work = napi->poll(napi, 0);
169	WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
170	trace_napi_poll(napi, work, 0);
171
172	clear_bit(NAPI_STATE_NPSVC, &napi->state);
173}
174
175static void poll_napi(struct net_device *dev)
176{
177	struct napi_struct *napi;
178	int cpu = smp_processor_id();
179
180	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
181		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182			poll_one_napi(napi);
183			smp_store_release(&napi->poll_owner, -1);
184		}
185	}
186}
187
188void netpoll_poll_dev(struct net_device *dev)
189{
190	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
191	const struct net_device_ops *ops;
192
193	/* Don't do any rx activity if the dev_lock mutex is held
194	 * the dev_open/close paths use this to block netpoll activity
195	 * while changing device state
196	 */
197	if (!ni || down_trylock(&ni->dev_lock))
198		return;
199
200	/* Some drivers will take the same locks in poll and xmit,
201	 * we can't poll if local CPU is already in xmit.
202	 */
203	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
204		up(&ni->dev_lock);
205		return;
206	}
207
208	ops = dev->netdev_ops;
209	if (ops->ndo_poll_controller)
210		ops->ndo_poll_controller(dev);
211
212	poll_napi(dev);
213
214	up(&ni->dev_lock);
215
216	zap_completion_queue();
217}
218EXPORT_SYMBOL(netpoll_poll_dev);
219
220void netpoll_poll_disable(struct net_device *dev)
221{
222	struct netpoll_info *ni;
223	int idx;
224	might_sleep();
225	idx = srcu_read_lock(&netpoll_srcu);
226	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
227	if (ni)
228		down(&ni->dev_lock);
229	srcu_read_unlock(&netpoll_srcu, idx);
230}
231EXPORT_SYMBOL(netpoll_poll_disable);
232
233void netpoll_poll_enable(struct net_device *dev)
234{
235	struct netpoll_info *ni;
236	rcu_read_lock();
237	ni = rcu_dereference(dev->npinfo);
238	if (ni)
239		up(&ni->dev_lock);
240	rcu_read_unlock();
241}
242EXPORT_SYMBOL(netpoll_poll_enable);
243
244static void refill_skbs(void)
245{
 
246	struct sk_buff *skb;
247	unsigned long flags;
248
249	spin_lock_irqsave(&skb_pool.lock, flags);
250	while (skb_pool.qlen < MAX_SKBS) {
 
 
251		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252		if (!skb)
253			break;
254
255		__skb_queue_tail(&skb_pool, skb);
256	}
257	spin_unlock_irqrestore(&skb_pool.lock, flags);
258}
259
260static void zap_completion_queue(void)
261{
262	unsigned long flags;
263	struct softnet_data *sd = &get_cpu_var(softnet_data);
264
265	if (sd->completion_queue) {
266		struct sk_buff *clist;
267
268		local_irq_save(flags);
269		clist = sd->completion_queue;
270		sd->completion_queue = NULL;
271		local_irq_restore(flags);
272
273		while (clist != NULL) {
274			struct sk_buff *skb = clist;
275			clist = clist->next;
276			if (!skb_irq_freeable(skb)) {
277				refcount_set(&skb->users, 1);
278				dev_kfree_skb_any(skb); /* put this one back */
279			} else {
280				__kfree_skb(skb);
281			}
282		}
283	}
284
285	put_cpu_var(softnet_data);
286}
287
288static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289{
290	int count = 0;
291	struct sk_buff *skb;
292
293	zap_completion_queue();
294	refill_skbs();
295repeat:
296
297	skb = alloc_skb(len, GFP_ATOMIC);
298	if (!skb)
299		skb = skb_dequeue(&skb_pool);
300
301	if (!skb) {
302		if (++count < 10) {
303			netpoll_poll_dev(np->dev);
304			goto repeat;
305		}
306		return NULL;
307	}
308
309	refcount_set(&skb->users, 1);
310	skb_reserve(skb, reserve);
311	return skb;
312}
313
314static int netpoll_owner_active(struct net_device *dev)
315{
316	struct napi_struct *napi;
317
318	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
319		if (napi->poll_owner == smp_processor_id())
320			return 1;
321	}
322	return 0;
323}
324
325/* call with IRQ disabled */
326static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
327{
328	netdev_tx_t status = NETDEV_TX_BUSY;
329	struct net_device *dev;
330	unsigned long tries;
331	/* It is up to the caller to keep npinfo alive. */
332	struct netpoll_info *npinfo;
333
334	lockdep_assert_irqs_disabled();
335
336	dev = np->dev;
337	npinfo = rcu_dereference_bh(dev->npinfo);
338
339	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
340		dev_kfree_skb_irq(skb);
341		return NET_XMIT_DROP;
342	}
343
344	/* don't get messages out of order, and no recursion */
345	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
346		struct netdev_queue *txq;
347
348		txq = netdev_core_pick_tx(dev, skb, NULL);
349
350		/* try until next clock tick */
351		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
352		     tries > 0; --tries) {
353			if (HARD_TX_TRYLOCK(dev, txq)) {
354				if (!netif_xmit_stopped(txq))
355					status = netpoll_start_xmit(skb, dev, txq);
356
357				HARD_TX_UNLOCK(dev, txq);
358
359				if (dev_xmit_complete(status))
360					break;
361
362			}
363
364			/* tickle device maybe there is some cleanup */
365			netpoll_poll_dev(np->dev);
366
367			udelay(USEC_PER_POLL);
368		}
369
370		WARN_ONCE(!irqs_disabled(),
371			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372			dev->name, dev->netdev_ops->ndo_start_xmit);
373
374	}
375
376	if (!dev_xmit_complete(status)) {
377		skb_queue_tail(&npinfo->txq, skb);
378		schedule_delayed_work(&npinfo->tx_work,0);
379	}
380	return NETDEV_TX_OK;
381}
382
383netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384{
385	unsigned long flags;
386	netdev_tx_t ret;
387
388	if (unlikely(!np)) {
389		dev_kfree_skb_irq(skb);
390		ret = NET_XMIT_DROP;
391	} else {
392		local_irq_save(flags);
393		ret = __netpoll_send_skb(np, skb);
394		local_irq_restore(flags);
395	}
396	return ret;
397}
398EXPORT_SYMBOL(netpoll_send_skb);
399
400void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
401{
402	int total_len, ip_len, udp_len;
403	struct sk_buff *skb;
404	struct udphdr *udph;
405	struct iphdr *iph;
406	struct ethhdr *eth;
407	static atomic_t ip_ident;
408	struct ipv6hdr *ip6h;
409
410	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
411		WARN_ON_ONCE(!irqs_disabled());
412
413	udp_len = len + sizeof(*udph);
414	if (np->ipv6)
415		ip_len = udp_len + sizeof(*ip6h);
416	else
417		ip_len = udp_len + sizeof(*iph);
418
419	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
420
421	skb = find_skb(np, total_len + np->dev->needed_tailroom,
422		       total_len - len);
423	if (!skb)
424		return;
425
426	skb_copy_to_linear_data(skb, msg, len);
427	skb_put(skb, len);
428
429	skb_push(skb, sizeof(*udph));
430	skb_reset_transport_header(skb);
431	udph = udp_hdr(skb);
432	udph->source = htons(np->local_port);
433	udph->dest = htons(np->remote_port);
434	udph->len = htons(udp_len);
435
436	if (np->ipv6) {
437		udph->check = 0;
438		udph->check = csum_ipv6_magic(&np->local_ip.in6,
439					      &np->remote_ip.in6,
440					      udp_len, IPPROTO_UDP,
441					      csum_partial(udph, udp_len, 0));
442		if (udph->check == 0)
443			udph->check = CSUM_MANGLED_0;
444
445		skb_push(skb, sizeof(*ip6h));
446		skb_reset_network_header(skb);
447		ip6h = ipv6_hdr(skb);
448
449		/* ip6h->version = 6; ip6h->priority = 0; */
450		*(unsigned char *)ip6h = 0x60;
451		ip6h->flow_lbl[0] = 0;
452		ip6h->flow_lbl[1] = 0;
453		ip6h->flow_lbl[2] = 0;
454
455		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
456		ip6h->nexthdr = IPPROTO_UDP;
457		ip6h->hop_limit = 32;
458		ip6h->saddr = np->local_ip.in6;
459		ip6h->daddr = np->remote_ip.in6;
460
461		eth = skb_push(skb, ETH_HLEN);
462		skb_reset_mac_header(skb);
463		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
464	} else {
465		udph->check = 0;
466		udph->check = csum_tcpudp_magic(np->local_ip.ip,
467						np->remote_ip.ip,
468						udp_len, IPPROTO_UDP,
469						csum_partial(udph, udp_len, 0));
470		if (udph->check == 0)
471			udph->check = CSUM_MANGLED_0;
472
473		skb_push(skb, sizeof(*iph));
474		skb_reset_network_header(skb);
475		iph = ip_hdr(skb);
476
477		/* iph->version = 4; iph->ihl = 5; */
478		*(unsigned char *)iph = 0x45;
479		iph->tos      = 0;
480		put_unaligned(htons(ip_len), &(iph->tot_len));
481		iph->id       = htons(atomic_inc_return(&ip_ident));
482		iph->frag_off = 0;
483		iph->ttl      = 64;
484		iph->protocol = IPPROTO_UDP;
485		iph->check    = 0;
486		put_unaligned(np->local_ip.ip, &(iph->saddr));
487		put_unaligned(np->remote_ip.ip, &(iph->daddr));
488		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
489
490		eth = skb_push(skb, ETH_HLEN);
491		skb_reset_mac_header(skb);
492		skb->protocol = eth->h_proto = htons(ETH_P_IP);
493	}
494
495	ether_addr_copy(eth->h_source, np->dev->dev_addr);
496	ether_addr_copy(eth->h_dest, np->remote_mac);
497
498	skb->dev = np->dev;
499
500	netpoll_send_skb(np, skb);
501}
502EXPORT_SYMBOL(netpoll_send_udp);
503
504void netpoll_print_options(struct netpoll *np)
505{
506	np_info(np, "local port %d\n", np->local_port);
507	if (np->ipv6)
508		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
509	else
510		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
511	np_info(np, "interface '%s'\n", np->dev_name);
512	np_info(np, "remote port %d\n", np->remote_port);
513	if (np->ipv6)
514		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
515	else
516		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
517	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
518}
519EXPORT_SYMBOL(netpoll_print_options);
520
521static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
522{
523	const char *end;
524
525	if (!strchr(str, ':') &&
526	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
527		if (!*end)
528			return 0;
529	}
530	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
531#if IS_ENABLED(CONFIG_IPV6)
532		if (!*end)
533			return 1;
534#else
535		return -1;
536#endif
537	}
538	return -1;
539}
540
 
 
 
 
 
 
 
 
541int netpoll_parse_options(struct netpoll *np, char *opt)
542{
543	char *cur=opt, *delim;
544	int ipv6;
545	bool ipversion_set = false;
546
547	if (*cur != '@') {
548		if ((delim = strchr(cur, '@')) == NULL)
549			goto parse_failed;
550		*delim = 0;
551		if (kstrtou16(cur, 10, &np->local_port))
552			goto parse_failed;
553		cur = delim;
554	}
555	cur++;
556
557	if (*cur != '/') {
558		ipversion_set = true;
559		if ((delim = strchr(cur, '/')) == NULL)
560			goto parse_failed;
561		*delim = 0;
562		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
563		if (ipv6 < 0)
564			goto parse_failed;
565		else
566			np->ipv6 = (bool)ipv6;
567		cur = delim;
568	}
569	cur++;
570
571	if (*cur != ',') {
572		/* parse out dev name */
573		if ((delim = strchr(cur, ',')) == NULL)
574			goto parse_failed;
575		*delim = 0;
576		strscpy(np->dev_name, cur, sizeof(np->dev_name));
577		cur = delim;
578	}
579	cur++;
580
581	if (*cur != '@') {
582		/* dst port */
583		if ((delim = strchr(cur, '@')) == NULL)
584			goto parse_failed;
585		*delim = 0;
586		if (*cur == ' ' || *cur == '\t')
587			np_info(np, "warning: whitespace is not allowed\n");
588		if (kstrtou16(cur, 10, &np->remote_port))
589			goto parse_failed;
590		cur = delim;
591	}
592	cur++;
593
594	/* dst ip */
595	if ((delim = strchr(cur, '/')) == NULL)
596		goto parse_failed;
597	*delim = 0;
598	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
599	if (ipv6 < 0)
600		goto parse_failed;
601	else if (ipversion_set && np->ipv6 != (bool)ipv6)
602		goto parse_failed;
603	else
604		np->ipv6 = (bool)ipv6;
605	cur = delim + 1;
606
607	if (*cur != 0) {
608		/* MAC address */
609		if (!mac_pton(cur, np->remote_mac))
610			goto parse_failed;
611	}
612
613	netpoll_print_options(np);
614
615	return 0;
616
617 parse_failed:
618	np_info(np, "couldn't parse config at '%s'!\n", cur);
619	return -1;
620}
621EXPORT_SYMBOL(netpoll_parse_options);
622
623int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
624{
625	struct netpoll_info *npinfo;
626	const struct net_device_ops *ops;
627	int err;
628
629	np->dev = ndev;
630	strscpy(np->dev_name, ndev->name, IFNAMSIZ);
631
632	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
633		np_err(np, "%s doesn't support polling, aborting\n",
634		       np->dev_name);
635		err = -ENOTSUPP;
636		goto out;
637	}
638
639	if (!ndev->npinfo) {
640		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
641		if (!npinfo) {
642			err = -ENOMEM;
643			goto out;
644		}
645
646		sema_init(&npinfo->dev_lock, 1);
647		skb_queue_head_init(&npinfo->txq);
648		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
649
650		refcount_set(&npinfo->refcnt, 1);
651
652		ops = np->dev->netdev_ops;
653		if (ops->ndo_netpoll_setup) {
654			err = ops->ndo_netpoll_setup(ndev, npinfo);
655			if (err)
656				goto free_npinfo;
657		}
658	} else {
659		npinfo = rtnl_dereference(ndev->npinfo);
660		refcount_inc(&npinfo->refcnt);
661	}
662
 
 
663	npinfo->netpoll = np;
664
 
 
 
665	/* last thing to do is link it to the net device structure */
666	rcu_assign_pointer(ndev->npinfo, npinfo);
667
668	return 0;
669
670free_npinfo:
671	kfree(npinfo);
672out:
673	return err;
674}
675EXPORT_SYMBOL_GPL(__netpoll_setup);
676
677int netpoll_setup(struct netpoll *np)
678{
679	struct net_device *ndev = NULL;
 
680	struct in_device *in_dev;
681	int err;
682
683	rtnl_lock();
684	if (np->dev_name[0]) {
685		struct net *net = current->nsproxy->net_ns;
686		ndev = __dev_get_by_name(net, np->dev_name);
687	}
688	if (!ndev) {
689		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
690		err = -ENODEV;
691		goto unlock;
692	}
693	netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL);
694
695	if (netdev_master_upper_dev_get(ndev)) {
696		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
697		err = -EBUSY;
698		goto put;
699	}
700
701	if (!netif_running(ndev)) {
702		unsigned long atmost;
703
704		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
705
706		err = dev_open(ndev, NULL);
707
708		if (err) {
709			np_err(np, "failed to open %s\n", ndev->name);
710			goto put;
711		}
712
713		rtnl_unlock();
714		atmost = jiffies + carrier_timeout * HZ;
715		while (!netif_carrier_ok(ndev)) {
716			if (time_after(jiffies, atmost)) {
717				np_notice(np, "timeout waiting for carrier\n");
718				break;
719			}
720			msleep(1);
721		}
722
723		rtnl_lock();
724	}
725
726	if (!np->local_ip.ip) {
727		if (!np->ipv6) {
728			const struct in_ifaddr *ifa;
729
730			in_dev = __in_dev_get_rtnl(ndev);
731			if (!in_dev)
732				goto put_noaddr;
733
734			ifa = rtnl_dereference(in_dev->ifa_list);
735			if (!ifa) {
736put_noaddr:
737				np_err(np, "no IP address for %s, aborting\n",
738				       np->dev_name);
739				err = -EDESTADDRREQ;
740				goto put;
741			}
742
743			np->local_ip.ip = ifa->ifa_local;
 
744			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
745		} else {
746#if IS_ENABLED(CONFIG_IPV6)
747			struct inet6_dev *idev;
748
749			err = -EDESTADDRREQ;
750			idev = __in6_dev_get(ndev);
751			if (idev) {
752				struct inet6_ifaddr *ifp;
753
754				read_lock_bh(&idev->lock);
755				list_for_each_entry(ifp, &idev->addr_list, if_list) {
756					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
757					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
758						continue;
759					np->local_ip.in6 = ifp->addr;
 
760					err = 0;
761					break;
762				}
763				read_unlock_bh(&idev->lock);
764			}
765			if (err) {
766				np_err(np, "no IPv6 address for %s, aborting\n",
767				       np->dev_name);
768				goto put;
769			} else
770				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
771#else
772			np_err(np, "IPv6 is not supported %s, aborting\n",
773			       np->dev_name);
774			err = -EINVAL;
775			goto put;
776#endif
777		}
778	}
779
780	/* fill up the skb queue */
781	refill_skbs();
782
783	err = __netpoll_setup(np, ndev);
784	if (err)
785		goto put;
786	rtnl_unlock();
787	return 0;
788
 
 
789put:
 
 
 
790	netdev_put(ndev, &np->dev_tracker);
791unlock:
792	rtnl_unlock();
793	return err;
794}
795EXPORT_SYMBOL(netpoll_setup);
796
797static int __init netpoll_init(void)
798{
799	skb_queue_head_init(&skb_pool);
800	return 0;
801}
802core_initcall(netpoll_init);
803
804static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
805{
806	struct netpoll_info *npinfo =
807			container_of(rcu_head, struct netpoll_info, rcu);
808
809	skb_queue_purge(&npinfo->txq);
810
811	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
812	cancel_delayed_work(&npinfo->tx_work);
813
814	/* clean after last, unfinished work */
815	__skb_queue_purge(&npinfo->txq);
816	/* now cancel it again */
817	cancel_delayed_work(&npinfo->tx_work);
818	kfree(npinfo);
819}
820
821void __netpoll_cleanup(struct netpoll *np)
822{
823	struct netpoll_info *npinfo;
824
825	npinfo = rtnl_dereference(np->dev->npinfo);
826	if (!npinfo)
827		return;
828
829	synchronize_srcu(&netpoll_srcu);
830
831	if (refcount_dec_and_test(&npinfo->refcnt)) {
832		const struct net_device_ops *ops;
833
834		ops = np->dev->netdev_ops;
835		if (ops->ndo_netpoll_cleanup)
836			ops->ndo_netpoll_cleanup(np->dev);
837
838		RCU_INIT_POINTER(np->dev->npinfo, NULL);
839		call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
840	} else
841		RCU_INIT_POINTER(np->dev->npinfo, NULL);
 
 
842}
843EXPORT_SYMBOL_GPL(__netpoll_cleanup);
844
845void __netpoll_free(struct netpoll *np)
846{
847	ASSERT_RTNL();
848
849	/* Wait for transmitting packets to finish before freeing. */
850	synchronize_rcu();
851	__netpoll_cleanup(np);
852	kfree(np);
853}
854EXPORT_SYMBOL_GPL(__netpoll_free);
855
 
 
 
 
 
 
 
 
856void netpoll_cleanup(struct netpoll *np)
857{
858	rtnl_lock();
859	if (!np->dev)
860		goto out;
861	__netpoll_cleanup(np);
862	netdev_put(np->dev, &np->dev_tracker);
863	np->dev = NULL;
864out:
865	rtnl_unlock();
866}
867EXPORT_SYMBOL(netpoll_cleanup);