Linux Audio

Check our new training course

Loading...
v5.14.15
  1/*
  2 * Network-device interface management.
  3 *
  4 * Copyright (c) 2004-2005, Keir Fraser
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version 2
  8 * as published by the Free Software Foundation; or, when distributed
  9 * separately from the Linux kernel or incorporated into other
 10 * software packages, subject to the following license:
 11 *
 12 * Permission is hereby granted, free of charge, to any person obtaining a copy
 13 * of this source file (the "Software"), to deal in the Software without
 14 * restriction, including without limitation the rights to use, copy, modify,
 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 16 * and to permit persons to whom the Software is furnished to do so, subject to
 17 * the following conditions:
 18 *
 19 * The above copyright notice and this permission notice shall be included in
 20 * all copies or substantial portions of the Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 28 * IN THE SOFTWARE.
 29 */
 30
 31#include "common.h"
 32
 33#include <linux/kthread.h>
 34#include <linux/sched/task.h>
 35#include <linux/ethtool.h>
 36#include <linux/rtnetlink.h>
 37#include <linux/if_vlan.h>
 38#include <linux/vmalloc.h>
 39
 40#include <xen/events.h>
 41#include <asm/xen/hypercall.h>
 42#include <xen/balloon.h>
 43
 44#define XENVIF_QUEUE_LENGTH 32
 45#define XENVIF_NAPI_WEIGHT  64
 46
 47/* Number of bytes allowed on the internal guest Rx queue. */
 48#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
 49
 50/* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
 51 * increasing the inflight counter. We need to increase the inflight
 52 * counter because core driver calls into xenvif_zerocopy_callback
 53 * which calls xenvif_skb_zerocopy_complete.
 54 */
 55void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 56				 struct sk_buff *skb)
 57{
 58	skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
 59	atomic_inc(&queue->inflight_packets);
 60}
 61
 62void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 63{
 64	atomic_dec(&queue->inflight_packets);
 65
 66	/* Wake the dealloc thread _after_ decrementing inflight_packets so
 67	 * that if kthread_stop() has already been called, the dealloc thread
 68	 * does not wait forever with nothing to wake it.
 69	 */
 70	wake_up(&queue->dealloc_wq);
 71}
 72
 73int xenvif_schedulable(struct xenvif *vif)
 74{
 75	return netif_running(vif->dev) &&
 76		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
 77		!vif->disabled;
 78}
 79
 80static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
 81{
 82	bool rc;
 83
 84	rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
 85	if (rc)
 86		napi_schedule(&queue->napi);
 87	return rc;
 88}
 89
 90static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 91{
 92	struct xenvif_queue *queue = dev_id;
 93	int old;
 94
 95	old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
 96	WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
 97
 98	if (!xenvif_handle_tx_interrupt(queue)) {
 99		atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
100		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
101	}
102
103	return IRQ_HANDLED;
104}
105
106static int xenvif_poll(struct napi_struct *napi, int budget)
107{
108	struct xenvif_queue *queue =
109		container_of(napi, struct xenvif_queue, napi);
110	int work_done;
111
112	/* This vif is rogue, we pretend we've there is nothing to do
113	 * for this vif to deschedule it from NAPI. But this interface
114	 * will be turned off in thread context later.
115	 */
116	if (unlikely(queue->vif->disabled)) {
117		napi_complete(napi);
118		return 0;
119	}
120
121	work_done = xenvif_tx_action(queue, budget);
122
123	if (work_done < budget) {
124		napi_complete_done(napi, work_done);
125		/* If the queue is rate-limited, it shall be
126		 * rescheduled in the timer callback.
127		 */
128		if (likely(!queue->rate_limited))
129			xenvif_napi_schedule_or_enable_events(queue);
130	}
131
132	return work_done;
133}
134
135static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
136{
137	bool rc;
138
139	rc = xenvif_have_rx_work(queue, false);
140	if (rc)
141		xenvif_kick_thread(queue);
142	return rc;
143}
144
145static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
146{
147	struct xenvif_queue *queue = dev_id;
148	int old;
149
150	old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
151	WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
152
153	if (!xenvif_handle_rx_interrupt(queue)) {
154		atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
155		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
156	}
157
158	return IRQ_HANDLED;
159}
160
161irqreturn_t xenvif_interrupt(int irq, void *dev_id)
162{
163	struct xenvif_queue *queue = dev_id;
164	int old;
165	bool has_rx, has_tx;
166
167	old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
168	WARN(old, "Interrupt while EOI pending\n");
169
170	has_tx = xenvif_handle_tx_interrupt(queue);
171	has_rx = xenvif_handle_rx_interrupt(queue);
172
173	if (!has_rx && !has_tx) {
174		atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
175		xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
176	}
177
178	return IRQ_HANDLED;
179}
180
181int xenvif_queue_stopped(struct xenvif_queue *queue)
182{
183	struct net_device *dev = queue->vif->dev;
184	unsigned int id = queue->id;
185	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
186}
187
188void xenvif_wake_queue(struct xenvif_queue *queue)
189{
190	struct net_device *dev = queue->vif->dev;
191	unsigned int id = queue->id;
192	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
193}
194
195static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
196			       struct net_device *sb_dev)
197{
198	struct xenvif *vif = netdev_priv(dev);
199	unsigned int size = vif->hash.size;
200	unsigned int num_queues;
201
202	/* If queues are not set up internally - always return 0
203	 * as the packet going to be dropped anyway */
204	num_queues = READ_ONCE(vif->num_queues);
205	if (num_queues < 1)
206		return 0;
207
208	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
209		return netdev_pick_tx(dev, skb, NULL) %
210		       dev->real_num_tx_queues;
211
212	xenvif_set_skb_hash(vif, skb);
213
214	if (size == 0)
215		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
216
217	return vif->hash.mapping[vif->hash.mapping_sel]
218				[skb_get_hash_raw(skb) % size];
219}
220
221static netdev_tx_t
222xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
223{
224	struct xenvif *vif = netdev_priv(dev);
225	struct xenvif_queue *queue = NULL;
226	unsigned int num_queues;
227	u16 index;
228	struct xenvif_rx_cb *cb;
229
230	BUG_ON(skb->dev != dev);
231
232	/* Drop the packet if queues are not set up.
233	 * This handler should be called inside an RCU read section
234	 * so we don't need to enter it here explicitly.
235	 */
236	num_queues = READ_ONCE(vif->num_queues);
237	if (num_queues < 1)
238		goto drop;
239
240	/* Obtain the queue to be used to transmit this packet */
241	index = skb_get_queue_mapping(skb);
242	if (index >= num_queues) {
243		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
244				    index, vif->dev->name);
245		index %= num_queues;
246	}
247	queue = &vif->queues[index];
248
249	/* Drop the packet if queue is not ready */
250	if (queue->task == NULL ||
251	    queue->dealloc_task == NULL ||
252	    !xenvif_schedulable(vif))
253		goto drop;
254
255	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
256		struct ethhdr *eth = (struct ethhdr *)skb->data;
257
258		if (!xenvif_mcast_match(vif, eth->h_dest))
259			goto drop;
260	}
261
262	cb = XENVIF_RX_CB(skb);
263	cb->expires = jiffies + vif->drain_timeout;
264
265	/* If there is no hash algorithm configured then make sure there
266	 * is no hash information in the socket buffer otherwise it
267	 * would be incorrectly forwarded to the frontend.
268	 */
269	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
270		skb_clear_hash(skb);
271
272	xenvif_rx_queue_tail(queue, skb);
273	xenvif_kick_thread(queue);
274
275	return NETDEV_TX_OK;
276
277 drop:
278	vif->dev->stats.tx_dropped++;
279	dev_kfree_skb(skb);
280	return NETDEV_TX_OK;
281}
282
283static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
284{
285	struct xenvif *vif = netdev_priv(dev);
286	struct xenvif_queue *queue = NULL;
287	unsigned int num_queues;
288	u64 rx_bytes = 0;
289	u64 rx_packets = 0;
290	u64 tx_bytes = 0;
291	u64 tx_packets = 0;
292	unsigned int index;
293
294	rcu_read_lock();
295	num_queues = READ_ONCE(vif->num_queues);
296
297	/* Aggregate tx and rx stats from each queue */
298	for (index = 0; index < num_queues; ++index) {
299		queue = &vif->queues[index];
300		rx_bytes += queue->stats.rx_bytes;
301		rx_packets += queue->stats.rx_packets;
302		tx_bytes += queue->stats.tx_bytes;
303		tx_packets += queue->stats.tx_packets;
304	}
305
306	rcu_read_unlock();
307
308	vif->dev->stats.rx_bytes = rx_bytes;
309	vif->dev->stats.rx_packets = rx_packets;
310	vif->dev->stats.tx_bytes = tx_bytes;
311	vif->dev->stats.tx_packets = tx_packets;
312
313	return &vif->dev->stats;
314}
315
316static void xenvif_up(struct xenvif *vif)
317{
318	struct xenvif_queue *queue = NULL;
319	unsigned int num_queues = vif->num_queues;
320	unsigned int queue_index;
321
322	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
323		queue = &vif->queues[queue_index];
324		napi_enable(&queue->napi);
325		enable_irq(queue->tx_irq);
326		if (queue->tx_irq != queue->rx_irq)
327			enable_irq(queue->rx_irq);
328		xenvif_napi_schedule_or_enable_events(queue);
329	}
330}
331
332static void xenvif_down(struct xenvif *vif)
333{
334	struct xenvif_queue *queue = NULL;
335	unsigned int num_queues = vif->num_queues;
336	unsigned int queue_index;
337
338	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
339		queue = &vif->queues[queue_index];
340		disable_irq(queue->tx_irq);
341		if (queue->tx_irq != queue->rx_irq)
342			disable_irq(queue->rx_irq);
343		napi_disable(&queue->napi);
344		del_timer_sync(&queue->credit_timeout);
345	}
346}
347
348static int xenvif_open(struct net_device *dev)
349{
350	struct xenvif *vif = netdev_priv(dev);
351	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
352		xenvif_up(vif);
353	netif_tx_start_all_queues(dev);
354	return 0;
355}
356
357static int xenvif_close(struct net_device *dev)
358{
359	struct xenvif *vif = netdev_priv(dev);
360	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
361		xenvif_down(vif);
362	netif_tx_stop_all_queues(dev);
363	return 0;
364}
365
366static int xenvif_change_mtu(struct net_device *dev, int mtu)
367{
368	struct xenvif *vif = netdev_priv(dev);
369	int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
370
371	if (mtu > max)
372		return -EINVAL;
373	dev->mtu = mtu;
374	return 0;
375}
376
377static netdev_features_t xenvif_fix_features(struct net_device *dev,
378	netdev_features_t features)
379{
380	struct xenvif *vif = netdev_priv(dev);
381
382	if (!vif->can_sg)
383		features &= ~NETIF_F_SG;
384	if (~(vif->gso_mask) & GSO_BIT(TCPV4))
385		features &= ~NETIF_F_TSO;
386	if (~(vif->gso_mask) & GSO_BIT(TCPV6))
387		features &= ~NETIF_F_TSO6;
388	if (!vif->ip_csum)
389		features &= ~NETIF_F_IP_CSUM;
390	if (!vif->ipv6_csum)
391		features &= ~NETIF_F_IPV6_CSUM;
392
393	return features;
394}
395
396static const struct xenvif_stat {
397	char name[ETH_GSTRING_LEN];
398	u16 offset;
399} xenvif_stats[] = {
400	{
401		"rx_gso_checksum_fixup",
402		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
403	},
404	/* If (sent != success + fail), there are probably packets never
405	 * freed up properly!
406	 */
407	{
408		"tx_zerocopy_sent",
409		offsetof(struct xenvif_stats, tx_zerocopy_sent),
410	},
411	{
412		"tx_zerocopy_success",
413		offsetof(struct xenvif_stats, tx_zerocopy_success),
414	},
415	{
416		"tx_zerocopy_fail",
417		offsetof(struct xenvif_stats, tx_zerocopy_fail)
418	},
419	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
420	 * a guest with the same MAX_SKB_FRAG
421	 */
422	{
423		"tx_frag_overflow",
424		offsetof(struct xenvif_stats, tx_frag_overflow)
425	},
426};
427
428static int xenvif_get_sset_count(struct net_device *dev, int string_set)
429{
430	switch (string_set) {
431	case ETH_SS_STATS:
432		return ARRAY_SIZE(xenvif_stats);
433	default:
434		return -EINVAL;
435	}
436}
437
438static void xenvif_get_ethtool_stats(struct net_device *dev,
439				     struct ethtool_stats *stats, u64 * data)
440{
441	struct xenvif *vif = netdev_priv(dev);
442	unsigned int num_queues;
443	int i;
444	unsigned int queue_index;
445
446	rcu_read_lock();
447	num_queues = READ_ONCE(vif->num_queues);
448
449	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
450		unsigned long accum = 0;
451		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
452			void *vif_stats = &vif->queues[queue_index].stats;
453			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
454		}
455		data[i] = accum;
456	}
457
458	rcu_read_unlock();
459}
460
461static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
462{
463	int i;
464
465	switch (stringset) {
466	case ETH_SS_STATS:
467		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
468			memcpy(data + i * ETH_GSTRING_LEN,
469			       xenvif_stats[i].name, ETH_GSTRING_LEN);
470		break;
471	}
472}
473
474static const struct ethtool_ops xenvif_ethtool_ops = {
475	.get_link	= ethtool_op_get_link,
476
477	.get_sset_count = xenvif_get_sset_count,
478	.get_ethtool_stats = xenvif_get_ethtool_stats,
479	.get_strings = xenvif_get_strings,
480};
481
482static const struct net_device_ops xenvif_netdev_ops = {
483	.ndo_select_queue = xenvif_select_queue,
484	.ndo_start_xmit	= xenvif_start_xmit,
485	.ndo_get_stats	= xenvif_get_stats,
486	.ndo_open	= xenvif_open,
487	.ndo_stop	= xenvif_close,
488	.ndo_change_mtu	= xenvif_change_mtu,
489	.ndo_fix_features = xenvif_fix_features,
490	.ndo_set_mac_address = eth_mac_addr,
491	.ndo_validate_addr   = eth_validate_addr,
492};
493
494struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
495			    unsigned int handle)
496{
497	int err;
498	struct net_device *dev;
499	struct xenvif *vif;
500	char name[IFNAMSIZ] = {};
501
502	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
503	/* Allocate a netdev with the max. supported number of queues.
504	 * When the guest selects the desired number, it will be updated
505	 * via netif_set_real_num_*_queues().
506	 */
507	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
508			      ether_setup, xenvif_max_queues);
509	if (dev == NULL) {
510		pr_warn("Could not allocate netdev for %s\n", name);
511		return ERR_PTR(-ENOMEM);
512	}
513
514	SET_NETDEV_DEV(dev, parent);
515
516	vif = netdev_priv(dev);
517
518	vif->domid  = domid;
519	vif->handle = handle;
520	vif->can_sg = 1;
521	vif->ip_csum = 1;
522	vif->dev = dev;
523	vif->disabled = false;
524	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
525	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
526
527	/* Start out with no queues. */
528	vif->queues = NULL;
529	vif->num_queues = 0;
530
531	vif->xdp_headroom = 0;
532
533	spin_lock_init(&vif->lock);
534	INIT_LIST_HEAD(&vif->fe_mcast_addr);
535
536	dev->netdev_ops	= &xenvif_netdev_ops;
537	dev->hw_features = NETIF_F_SG |
538		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
539		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
540	dev->features = dev->hw_features | NETIF_F_RXCSUM;
541	dev->ethtool_ops = &xenvif_ethtool_ops;
542
543	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
544
545	dev->min_mtu = ETH_MIN_MTU;
546	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
547
548	/*
549	 * Initialise a dummy MAC address. We choose the numerically
550	 * largest non-broadcast address to prevent the address getting
551	 * stolen by an Ethernet bridge for STP purposes.
552	 * (FE:FF:FF:FF:FF:FF)
553	 */
554	eth_broadcast_addr(dev->dev_addr);
555	dev->dev_addr[0] &= ~0x01;
556
557	netif_carrier_off(dev);
558
559	err = register_netdev(dev);
560	if (err) {
561		netdev_warn(dev, "Could not register device: err=%d\n", err);
562		free_netdev(dev);
563		return ERR_PTR(err);
564	}
565
566	netdev_dbg(dev, "Successfully created xenvif\n");
567
568	__module_get(THIS_MODULE);
569
570	return vif;
571}
572
573int xenvif_init_queue(struct xenvif_queue *queue)
574{
575	int err, i;
576
577	queue->credit_bytes = queue->remaining_credit = ~0UL;
578	queue->credit_usec  = 0UL;
579	timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
580	queue->credit_window_start = get_jiffies_64();
581
582	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
583
584	skb_queue_head_init(&queue->rx_queue);
585	skb_queue_head_init(&queue->tx_queue);
586
587	queue->pending_cons = 0;
588	queue->pending_prod = MAX_PENDING_REQS;
589	for (i = 0; i < MAX_PENDING_REQS; ++i)
590		queue->pending_ring[i] = i;
591
592	spin_lock_init(&queue->callback_lock);
593	spin_lock_init(&queue->response_lock);
594
595	/* If ballooning is disabled, this will consume real memory, so you
596	 * better enable it. The long term solution would be to use just a
597	 * bunch of valid page descriptors, without dependency on ballooning
598	 */
599	err = gnttab_alloc_pages(MAX_PENDING_REQS,
600				 queue->mmap_pages);
601	if (err) {
602		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
603		return -ENOMEM;
604	}
605
606	for (i = 0; i < MAX_PENDING_REQS; i++) {
607		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
608			{ .callback = xenvif_zerocopy_callback,
609			  { { .ctx = NULL,
610			      .desc = i } } };
611		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
612	}
613
614	return 0;
615}
616
617void xenvif_carrier_on(struct xenvif *vif)
618{
619	rtnl_lock();
620	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
621		dev_set_mtu(vif->dev, ETH_DATA_LEN);
622	netdev_update_features(vif->dev);
623	set_bit(VIF_STATUS_CONNECTED, &vif->status);
624	if (netif_running(vif->dev))
625		xenvif_up(vif);
626	rtnl_unlock();
627}
628
629int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
630			unsigned int evtchn)
631{
632	struct net_device *dev = vif->dev;
633	struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
634	void *addr;
635	struct xen_netif_ctrl_sring *shared;
636	RING_IDX rsp_prod, req_prod;
637	int err;
638
639	err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
 
640	if (err)
641		goto err;
642
643	shared = (struct xen_netif_ctrl_sring *)addr;
644	rsp_prod = READ_ONCE(shared->rsp_prod);
645	req_prod = READ_ONCE(shared->req_prod);
646
647	BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
648
649	err = -EIO;
650	if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
651		goto err_unmap;
652
653	err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
654	if (err < 0)
655		goto err_unmap;
656
657	vif->ctrl_irq = err;
658
659	xenvif_init_hash(vif);
660
661	err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
662				   IRQF_ONESHOT, "xen-netback-ctrl", vif);
663	if (err) {
664		pr_warn("Could not setup irq handler for %s\n", dev->name);
665		goto err_deinit;
666	}
667
668	return 0;
669
670err_deinit:
671	xenvif_deinit_hash(vif);
672	unbind_from_irqhandler(vif->ctrl_irq, vif);
673	vif->ctrl_irq = 0;
674
675err_unmap:
676	xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
 
677	vif->ctrl.sring = NULL;
678
679err:
680	return err;
681}
682
683static void xenvif_disconnect_queue(struct xenvif_queue *queue)
684{
685	if (queue->task) {
686		kthread_stop(queue->task);
687		put_task_struct(queue->task);
688		queue->task = NULL;
689	}
690
691	if (queue->dealloc_task) {
692		kthread_stop(queue->dealloc_task);
693		queue->dealloc_task = NULL;
694	}
695
696	if (queue->napi.poll) {
697		netif_napi_del(&queue->napi);
698		queue->napi.poll = NULL;
699	}
700
701	if (queue->tx_irq) {
702		unbind_from_irqhandler(queue->tx_irq, queue);
703		if (queue->tx_irq == queue->rx_irq)
704			queue->rx_irq = 0;
705		queue->tx_irq = 0;
706	}
707
708	if (queue->rx_irq) {
709		unbind_from_irqhandler(queue->rx_irq, queue);
710		queue->rx_irq = 0;
711	}
712
713	xenvif_unmap_frontend_data_rings(queue);
714}
715
716int xenvif_connect_data(struct xenvif_queue *queue,
717			unsigned long tx_ring_ref,
718			unsigned long rx_ring_ref,
719			unsigned int tx_evtchn,
720			unsigned int rx_evtchn)
721{
722	struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
723	struct task_struct *task;
724	int err;
725
726	BUG_ON(queue->tx_irq);
727	BUG_ON(queue->task);
728	BUG_ON(queue->dealloc_task);
729
730	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
731					     rx_ring_ref);
732	if (err < 0)
733		goto err;
734
735	init_waitqueue_head(&queue->wq);
736	init_waitqueue_head(&queue->dealloc_wq);
737	atomic_set(&queue->inflight_packets, 0);
738
739	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
740			XENVIF_NAPI_WEIGHT);
741
742	queue->stalled = true;
743
744	task = kthread_run(xenvif_kthread_guest_rx, queue,
745			   "%s-guest-rx", queue->name);
746	if (IS_ERR(task))
747		goto kthread_err;
748	queue->task = task;
749	/*
750	 * Take a reference to the task in order to prevent it from being freed
751	 * if the thread function returns before kthread_stop is called.
752	 */
753	get_task_struct(task);
754
755	task = kthread_run(xenvif_dealloc_kthread, queue,
756			   "%s-dealloc", queue->name);
757	if (IS_ERR(task))
758		goto kthread_err;
759	queue->dealloc_task = task;
760
761	if (tx_evtchn == rx_evtchn) {
762		/* feature-split-event-channels == 0 */
763		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
764			dev, tx_evtchn, xenvif_interrupt, 0,
765			queue->name, queue);
766		if (err < 0)
767			goto err;
768		queue->tx_irq = queue->rx_irq = err;
769		disable_irq(queue->tx_irq);
770	} else {
771		/* feature-split-event-channels == 1 */
772		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
773			 "%s-tx", queue->name);
774		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
775			dev, tx_evtchn, xenvif_tx_interrupt, 0,
776			queue->tx_irq_name, queue);
777		if (err < 0)
778			goto err;
779		queue->tx_irq = err;
780		disable_irq(queue->tx_irq);
781
782		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
783			 "%s-rx", queue->name);
784		err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
785			dev, rx_evtchn, xenvif_rx_interrupt, 0,
786			queue->rx_irq_name, queue);
787		if (err < 0)
788			goto err;
789		queue->rx_irq = err;
790		disable_irq(queue->rx_irq);
791	}
792
793	return 0;
794
795kthread_err:
796	pr_warn("Could not allocate kthread for %s\n", queue->name);
797	err = PTR_ERR(task);
798err:
799	xenvif_disconnect_queue(queue);
800	return err;
801}
802
803void xenvif_carrier_off(struct xenvif *vif)
804{
805	struct net_device *dev = vif->dev;
806
807	rtnl_lock();
808	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
809		netif_carrier_off(dev); /* discard queued packets */
810		if (netif_running(dev))
811			xenvif_down(vif);
812	}
813	rtnl_unlock();
814}
815
816void xenvif_disconnect_data(struct xenvif *vif)
817{
818	struct xenvif_queue *queue = NULL;
819	unsigned int num_queues = vif->num_queues;
820	unsigned int queue_index;
821
822	xenvif_carrier_off(vif);
823
824	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
825		queue = &vif->queues[queue_index];
826
827		xenvif_disconnect_queue(queue);
828	}
829
830	xenvif_mcast_addr_list_free(vif);
831}
832
833void xenvif_disconnect_ctrl(struct xenvif *vif)
834{
835	if (vif->ctrl_irq) {
836		xenvif_deinit_hash(vif);
837		unbind_from_irqhandler(vif->ctrl_irq, vif);
838		vif->ctrl_irq = 0;
839	}
840
841	if (vif->ctrl.sring) {
842		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
843					vif->ctrl.sring);
844		vif->ctrl.sring = NULL;
845	}
846}
847
848/* Reverse the relevant parts of xenvif_init_queue().
849 * Used for queue teardown from xenvif_free(), and on the
850 * error handling paths in xenbus.c:connect().
851 */
852void xenvif_deinit_queue(struct xenvif_queue *queue)
853{
854	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
855}
856
857void xenvif_free(struct xenvif *vif)
858{
859	struct xenvif_queue *queues = vif->queues;
860	unsigned int num_queues = vif->num_queues;
861	unsigned int queue_index;
862
863	unregister_netdev(vif->dev);
864	free_netdev(vif->dev);
865
866	for (queue_index = 0; queue_index < num_queues; ++queue_index)
867		xenvif_deinit_queue(&queues[queue_index]);
868	vfree(queues);
869
870	module_put(THIS_MODULE);
871}
v5.9
  1/*
  2 * Network-device interface management.
  3 *
  4 * Copyright (c) 2004-2005, Keir Fraser
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version 2
  8 * as published by the Free Software Foundation; or, when distributed
  9 * separately from the Linux kernel or incorporated into other
 10 * software packages, subject to the following license:
 11 *
 12 * Permission is hereby granted, free of charge, to any person obtaining a copy
 13 * of this source file (the "Software"), to deal in the Software without
 14 * restriction, including without limitation the rights to use, copy, modify,
 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 16 * and to permit persons to whom the Software is furnished to do so, subject to
 17 * the following conditions:
 18 *
 19 * The above copyright notice and this permission notice shall be included in
 20 * all copies or substantial portions of the Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 28 * IN THE SOFTWARE.
 29 */
 30
 31#include "common.h"
 32
 33#include <linux/kthread.h>
 34#include <linux/sched/task.h>
 35#include <linux/ethtool.h>
 36#include <linux/rtnetlink.h>
 37#include <linux/if_vlan.h>
 38#include <linux/vmalloc.h>
 39
 40#include <xen/events.h>
 41#include <asm/xen/hypercall.h>
 42#include <xen/balloon.h>
 43
 44#define XENVIF_QUEUE_LENGTH 32
 45#define XENVIF_NAPI_WEIGHT  64
 46
 47/* Number of bytes allowed on the internal guest Rx queue. */
 48#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
 49
 50/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 51 * increasing the inflight counter. We need to increase the inflight
 52 * counter because core driver calls into xenvif_zerocopy_callback
 53 * which calls xenvif_skb_zerocopy_complete.
 54 */
 55void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 56				 struct sk_buff *skb)
 57{
 58	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 59	atomic_inc(&queue->inflight_packets);
 60}
 61
 62void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 63{
 64	atomic_dec(&queue->inflight_packets);
 65
 66	/* Wake the dealloc thread _after_ decrementing inflight_packets so
 67	 * that if kthread_stop() has already been called, the dealloc thread
 68	 * does not wait forever with nothing to wake it.
 69	 */
 70	wake_up(&queue->dealloc_wq);
 71}
 72
 73int xenvif_schedulable(struct xenvif *vif)
 74{
 75	return netif_running(vif->dev) &&
 76		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
 77		!vif->disabled;
 78}
 79
 
 
 
 
 
 
 
 
 
 
 80static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 81{
 82	struct xenvif_queue *queue = dev_id;
 
 
 
 
 83
 84	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
 85		napi_schedule(&queue->napi);
 
 
 86
 87	return IRQ_HANDLED;
 88}
 89
 90static int xenvif_poll(struct napi_struct *napi, int budget)
 91{
 92	struct xenvif_queue *queue =
 93		container_of(napi, struct xenvif_queue, napi);
 94	int work_done;
 95
 96	/* This vif is rogue, we pretend we've there is nothing to do
 97	 * for this vif to deschedule it from NAPI. But this interface
 98	 * will be turned off in thread context later.
 99	 */
100	if (unlikely(queue->vif->disabled)) {
101		napi_complete(napi);
102		return 0;
103	}
104
105	work_done = xenvif_tx_action(queue, budget);
106
107	if (work_done < budget) {
108		napi_complete_done(napi, work_done);
109		/* If the queue is rate-limited, it shall be
110		 * rescheduled in the timer callback.
111		 */
112		if (likely(!queue->rate_limited))
113			xenvif_napi_schedule_or_enable_events(queue);
114	}
115
116	return work_done;
117}
118
 
 
 
 
 
 
 
 
 
 
119static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120{
121	struct xenvif_queue *queue = dev_id;
 
 
 
 
122
123	xenvif_kick_thread(queue);
 
 
 
124
125	return IRQ_HANDLED;
126}
127
128irqreturn_t xenvif_interrupt(int irq, void *dev_id)
129{
130	xenvif_tx_interrupt(irq, dev_id);
131	xenvif_rx_interrupt(irq, dev_id);
 
 
 
 
 
 
 
 
 
 
 
 
132
133	return IRQ_HANDLED;
134}
135
136int xenvif_queue_stopped(struct xenvif_queue *queue)
137{
138	struct net_device *dev = queue->vif->dev;
139	unsigned int id = queue->id;
140	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
141}
142
143void xenvif_wake_queue(struct xenvif_queue *queue)
144{
145	struct net_device *dev = queue->vif->dev;
146	unsigned int id = queue->id;
147	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
148}
149
150static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
151			       struct net_device *sb_dev)
152{
153	struct xenvif *vif = netdev_priv(dev);
154	unsigned int size = vif->hash.size;
155	unsigned int num_queues;
156
157	/* If queues are not set up internally - always return 0
158	 * as the packet going to be dropped anyway */
159	num_queues = READ_ONCE(vif->num_queues);
160	if (num_queues < 1)
161		return 0;
162
163	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
164		return netdev_pick_tx(dev, skb, NULL) %
165		       dev->real_num_tx_queues;
166
167	xenvif_set_skb_hash(vif, skb);
168
169	if (size == 0)
170		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
171
172	return vif->hash.mapping[vif->hash.mapping_sel]
173				[skb_get_hash_raw(skb) % size];
174}
175
176static netdev_tx_t
177xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
178{
179	struct xenvif *vif = netdev_priv(dev);
180	struct xenvif_queue *queue = NULL;
181	unsigned int num_queues;
182	u16 index;
183	struct xenvif_rx_cb *cb;
184
185	BUG_ON(skb->dev != dev);
186
187	/* Drop the packet if queues are not set up.
188	 * This handler should be called inside an RCU read section
189	 * so we don't need to enter it here explicitly.
190	 */
191	num_queues = READ_ONCE(vif->num_queues);
192	if (num_queues < 1)
193		goto drop;
194
195	/* Obtain the queue to be used to transmit this packet */
196	index = skb_get_queue_mapping(skb);
197	if (index >= num_queues) {
198		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
199				    index, vif->dev->name);
200		index %= num_queues;
201	}
202	queue = &vif->queues[index];
203
204	/* Drop the packet if queue is not ready */
205	if (queue->task == NULL ||
206	    queue->dealloc_task == NULL ||
207	    !xenvif_schedulable(vif))
208		goto drop;
209
210	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211		struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213		if (!xenvif_mcast_match(vif, eth->h_dest))
214			goto drop;
215	}
216
217	cb = XENVIF_RX_CB(skb);
218	cb->expires = jiffies + vif->drain_timeout;
219
220	/* If there is no hash algorithm configured then make sure there
221	 * is no hash information in the socket buffer otherwise it
222	 * would be incorrectly forwarded to the frontend.
223	 */
224	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
225		skb_clear_hash(skb);
226
227	xenvif_rx_queue_tail(queue, skb);
228	xenvif_kick_thread(queue);
229
230	return NETDEV_TX_OK;
231
232 drop:
233	vif->dev->stats.tx_dropped++;
234	dev_kfree_skb(skb);
235	return NETDEV_TX_OK;
236}
237
238static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
239{
240	struct xenvif *vif = netdev_priv(dev);
241	struct xenvif_queue *queue = NULL;
242	unsigned int num_queues;
243	u64 rx_bytes = 0;
244	u64 rx_packets = 0;
245	u64 tx_bytes = 0;
246	u64 tx_packets = 0;
247	unsigned int index;
248
249	rcu_read_lock();
250	num_queues = READ_ONCE(vif->num_queues);
251
252	/* Aggregate tx and rx stats from each queue */
253	for (index = 0; index < num_queues; ++index) {
254		queue = &vif->queues[index];
255		rx_bytes += queue->stats.rx_bytes;
256		rx_packets += queue->stats.rx_packets;
257		tx_bytes += queue->stats.tx_bytes;
258		tx_packets += queue->stats.tx_packets;
259	}
260
261	rcu_read_unlock();
262
263	vif->dev->stats.rx_bytes = rx_bytes;
264	vif->dev->stats.rx_packets = rx_packets;
265	vif->dev->stats.tx_bytes = tx_bytes;
266	vif->dev->stats.tx_packets = tx_packets;
267
268	return &vif->dev->stats;
269}
270
271static void xenvif_up(struct xenvif *vif)
272{
273	struct xenvif_queue *queue = NULL;
274	unsigned int num_queues = vif->num_queues;
275	unsigned int queue_index;
276
277	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
278		queue = &vif->queues[queue_index];
279		napi_enable(&queue->napi);
280		enable_irq(queue->tx_irq);
281		if (queue->tx_irq != queue->rx_irq)
282			enable_irq(queue->rx_irq);
283		xenvif_napi_schedule_or_enable_events(queue);
284	}
285}
286
287static void xenvif_down(struct xenvif *vif)
288{
289	struct xenvif_queue *queue = NULL;
290	unsigned int num_queues = vif->num_queues;
291	unsigned int queue_index;
292
293	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
294		queue = &vif->queues[queue_index];
295		disable_irq(queue->tx_irq);
296		if (queue->tx_irq != queue->rx_irq)
297			disable_irq(queue->rx_irq);
298		napi_disable(&queue->napi);
299		del_timer_sync(&queue->credit_timeout);
300	}
301}
302
303static int xenvif_open(struct net_device *dev)
304{
305	struct xenvif *vif = netdev_priv(dev);
306	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
307		xenvif_up(vif);
308	netif_tx_start_all_queues(dev);
309	return 0;
310}
311
312static int xenvif_close(struct net_device *dev)
313{
314	struct xenvif *vif = netdev_priv(dev);
315	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
316		xenvif_down(vif);
317	netif_tx_stop_all_queues(dev);
318	return 0;
319}
320
321static int xenvif_change_mtu(struct net_device *dev, int mtu)
322{
323	struct xenvif *vif = netdev_priv(dev);
324	int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
325
326	if (mtu > max)
327		return -EINVAL;
328	dev->mtu = mtu;
329	return 0;
330}
331
332static netdev_features_t xenvif_fix_features(struct net_device *dev,
333	netdev_features_t features)
334{
335	struct xenvif *vif = netdev_priv(dev);
336
337	if (!vif->can_sg)
338		features &= ~NETIF_F_SG;
339	if (~(vif->gso_mask) & GSO_BIT(TCPV4))
340		features &= ~NETIF_F_TSO;
341	if (~(vif->gso_mask) & GSO_BIT(TCPV6))
342		features &= ~NETIF_F_TSO6;
343	if (!vif->ip_csum)
344		features &= ~NETIF_F_IP_CSUM;
345	if (!vif->ipv6_csum)
346		features &= ~NETIF_F_IPV6_CSUM;
347
348	return features;
349}
350
351static const struct xenvif_stat {
352	char name[ETH_GSTRING_LEN];
353	u16 offset;
354} xenvif_stats[] = {
355	{
356		"rx_gso_checksum_fixup",
357		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
358	},
359	/* If (sent != success + fail), there are probably packets never
360	 * freed up properly!
361	 */
362	{
363		"tx_zerocopy_sent",
364		offsetof(struct xenvif_stats, tx_zerocopy_sent),
365	},
366	{
367		"tx_zerocopy_success",
368		offsetof(struct xenvif_stats, tx_zerocopy_success),
369	},
370	{
371		"tx_zerocopy_fail",
372		offsetof(struct xenvif_stats, tx_zerocopy_fail)
373	},
374	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
375	 * a guest with the same MAX_SKB_FRAG
376	 */
377	{
378		"tx_frag_overflow",
379		offsetof(struct xenvif_stats, tx_frag_overflow)
380	},
381};
382
383static int xenvif_get_sset_count(struct net_device *dev, int string_set)
384{
385	switch (string_set) {
386	case ETH_SS_STATS:
387		return ARRAY_SIZE(xenvif_stats);
388	default:
389		return -EINVAL;
390	}
391}
392
393static void xenvif_get_ethtool_stats(struct net_device *dev,
394				     struct ethtool_stats *stats, u64 * data)
395{
396	struct xenvif *vif = netdev_priv(dev);
397	unsigned int num_queues;
398	int i;
399	unsigned int queue_index;
400
401	rcu_read_lock();
402	num_queues = READ_ONCE(vif->num_queues);
403
404	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
405		unsigned long accum = 0;
406		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
407			void *vif_stats = &vif->queues[queue_index].stats;
408			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
409		}
410		data[i] = accum;
411	}
412
413	rcu_read_unlock();
414}
415
416static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
417{
418	int i;
419
420	switch (stringset) {
421	case ETH_SS_STATS:
422		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
423			memcpy(data + i * ETH_GSTRING_LEN,
424			       xenvif_stats[i].name, ETH_GSTRING_LEN);
425		break;
426	}
427}
428
429static const struct ethtool_ops xenvif_ethtool_ops = {
430	.get_link	= ethtool_op_get_link,
431
432	.get_sset_count = xenvif_get_sset_count,
433	.get_ethtool_stats = xenvif_get_ethtool_stats,
434	.get_strings = xenvif_get_strings,
435};
436
437static const struct net_device_ops xenvif_netdev_ops = {
438	.ndo_select_queue = xenvif_select_queue,
439	.ndo_start_xmit	= xenvif_start_xmit,
440	.ndo_get_stats	= xenvif_get_stats,
441	.ndo_open	= xenvif_open,
442	.ndo_stop	= xenvif_close,
443	.ndo_change_mtu	= xenvif_change_mtu,
444	.ndo_fix_features = xenvif_fix_features,
445	.ndo_set_mac_address = eth_mac_addr,
446	.ndo_validate_addr   = eth_validate_addr,
447};
448
449struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
450			    unsigned int handle)
451{
452	int err;
453	struct net_device *dev;
454	struct xenvif *vif;
455	char name[IFNAMSIZ] = {};
456
457	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
458	/* Allocate a netdev with the max. supported number of queues.
459	 * When the guest selects the desired number, it will be updated
460	 * via netif_set_real_num_*_queues().
461	 */
462	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
463			      ether_setup, xenvif_max_queues);
464	if (dev == NULL) {
465		pr_warn("Could not allocate netdev for %s\n", name);
466		return ERR_PTR(-ENOMEM);
467	}
468
469	SET_NETDEV_DEV(dev, parent);
470
471	vif = netdev_priv(dev);
472
473	vif->domid  = domid;
474	vif->handle = handle;
475	vif->can_sg = 1;
476	vif->ip_csum = 1;
477	vif->dev = dev;
478	vif->disabled = false;
479	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
480	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
481
482	/* Start out with no queues. */
483	vif->queues = NULL;
484	vif->num_queues = 0;
485
486	vif->xdp_headroom = 0;
487
488	spin_lock_init(&vif->lock);
489	INIT_LIST_HEAD(&vif->fe_mcast_addr);
490
491	dev->netdev_ops	= &xenvif_netdev_ops;
492	dev->hw_features = NETIF_F_SG |
493		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
494		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
495	dev->features = dev->hw_features | NETIF_F_RXCSUM;
496	dev->ethtool_ops = &xenvif_ethtool_ops;
497
498	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
499
500	dev->min_mtu = ETH_MIN_MTU;
501	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
502
503	/*
504	 * Initialise a dummy MAC address. We choose the numerically
505	 * largest non-broadcast address to prevent the address getting
506	 * stolen by an Ethernet bridge for STP purposes.
507	 * (FE:FF:FF:FF:FF:FF)
508	 */
509	eth_broadcast_addr(dev->dev_addr);
510	dev->dev_addr[0] &= ~0x01;
511
512	netif_carrier_off(dev);
513
514	err = register_netdev(dev);
515	if (err) {
516		netdev_warn(dev, "Could not register device: err=%d\n", err);
517		free_netdev(dev);
518		return ERR_PTR(err);
519	}
520
521	netdev_dbg(dev, "Successfully created xenvif\n");
522
523	__module_get(THIS_MODULE);
524
525	return vif;
526}
527
528int xenvif_init_queue(struct xenvif_queue *queue)
529{
530	int err, i;
531
532	queue->credit_bytes = queue->remaining_credit = ~0UL;
533	queue->credit_usec  = 0UL;
534	timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
535	queue->credit_window_start = get_jiffies_64();
536
537	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
538
539	skb_queue_head_init(&queue->rx_queue);
540	skb_queue_head_init(&queue->tx_queue);
541
542	queue->pending_cons = 0;
543	queue->pending_prod = MAX_PENDING_REQS;
544	for (i = 0; i < MAX_PENDING_REQS; ++i)
545		queue->pending_ring[i] = i;
546
547	spin_lock_init(&queue->callback_lock);
548	spin_lock_init(&queue->response_lock);
549
550	/* If ballooning is disabled, this will consume real memory, so you
551	 * better enable it. The long term solution would be to use just a
552	 * bunch of valid page descriptors, without dependency on ballooning
553	 */
554	err = gnttab_alloc_pages(MAX_PENDING_REQS,
555				 queue->mmap_pages);
556	if (err) {
557		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
558		return -ENOMEM;
559	}
560
561	for (i = 0; i < MAX_PENDING_REQS; i++) {
562		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
563			{ .callback = xenvif_zerocopy_callback,
564			  { { .ctx = NULL,
565			      .desc = i } } };
566		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
567	}
568
569	return 0;
570}
571
572void xenvif_carrier_on(struct xenvif *vif)
573{
574	rtnl_lock();
575	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
576		dev_set_mtu(vif->dev, ETH_DATA_LEN);
577	netdev_update_features(vif->dev);
578	set_bit(VIF_STATUS_CONNECTED, &vif->status);
579	if (netif_running(vif->dev))
580		xenvif_up(vif);
581	rtnl_unlock();
582}
583
584int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
585			unsigned int evtchn)
586{
587	struct net_device *dev = vif->dev;
 
588	void *addr;
589	struct xen_netif_ctrl_sring *shared;
590	RING_IDX rsp_prod, req_prod;
591	int err;
592
593	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
594				     &ring_ref, 1, &addr);
595	if (err)
596		goto err;
597
598	shared = (struct xen_netif_ctrl_sring *)addr;
599	rsp_prod = READ_ONCE(shared->rsp_prod);
600	req_prod = READ_ONCE(shared->req_prod);
601
602	BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
603
604	err = -EIO;
605	if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
606		goto err_unmap;
607
608	err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
609	if (err < 0)
610		goto err_unmap;
611
612	vif->ctrl_irq = err;
613
614	xenvif_init_hash(vif);
615
616	err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
617				   IRQF_ONESHOT, "xen-netback-ctrl", vif);
618	if (err) {
619		pr_warn("Could not setup irq handler for %s\n", dev->name);
620		goto err_deinit;
621	}
622
623	return 0;
624
625err_deinit:
626	xenvif_deinit_hash(vif);
627	unbind_from_irqhandler(vif->ctrl_irq, vif);
628	vif->ctrl_irq = 0;
629
630err_unmap:
631	xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
632				vif->ctrl.sring);
633	vif->ctrl.sring = NULL;
634
635err:
636	return err;
637}
638
639static void xenvif_disconnect_queue(struct xenvif_queue *queue)
640{
641	if (queue->task) {
642		kthread_stop(queue->task);
 
643		queue->task = NULL;
644	}
645
646	if (queue->dealloc_task) {
647		kthread_stop(queue->dealloc_task);
648		queue->dealloc_task = NULL;
649	}
650
651	if (queue->napi.poll) {
652		netif_napi_del(&queue->napi);
653		queue->napi.poll = NULL;
654	}
655
656	if (queue->tx_irq) {
657		unbind_from_irqhandler(queue->tx_irq, queue);
658		if (queue->tx_irq == queue->rx_irq)
659			queue->rx_irq = 0;
660		queue->tx_irq = 0;
661	}
662
663	if (queue->rx_irq) {
664		unbind_from_irqhandler(queue->rx_irq, queue);
665		queue->rx_irq = 0;
666	}
667
668	xenvif_unmap_frontend_data_rings(queue);
669}
670
671int xenvif_connect_data(struct xenvif_queue *queue,
672			unsigned long tx_ring_ref,
673			unsigned long rx_ring_ref,
674			unsigned int tx_evtchn,
675			unsigned int rx_evtchn)
676{
 
677	struct task_struct *task;
678	int err;
679
680	BUG_ON(queue->tx_irq);
681	BUG_ON(queue->task);
682	BUG_ON(queue->dealloc_task);
683
684	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
685					     rx_ring_ref);
686	if (err < 0)
687		goto err;
688
689	init_waitqueue_head(&queue->wq);
690	init_waitqueue_head(&queue->dealloc_wq);
691	atomic_set(&queue->inflight_packets, 0);
692
693	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
694			XENVIF_NAPI_WEIGHT);
695
696	queue->stalled = true;
697
698	task = kthread_run(xenvif_kthread_guest_rx, queue,
699			   "%s-guest-rx", queue->name);
700	if (IS_ERR(task))
701		goto kthread_err;
702	queue->task = task;
 
 
 
 
 
703
704	task = kthread_run(xenvif_dealloc_kthread, queue,
705			   "%s-dealloc", queue->name);
706	if (IS_ERR(task))
707		goto kthread_err;
708	queue->dealloc_task = task;
709
710	if (tx_evtchn == rx_evtchn) {
711		/* feature-split-event-channels == 0 */
712		err = bind_interdomain_evtchn_to_irqhandler(
713			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
714			queue->name, queue);
715		if (err < 0)
716			goto err;
717		queue->tx_irq = queue->rx_irq = err;
718		disable_irq(queue->tx_irq);
719	} else {
720		/* feature-split-event-channels == 1 */
721		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
722			 "%s-tx", queue->name);
723		err = bind_interdomain_evtchn_to_irqhandler(
724			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
725			queue->tx_irq_name, queue);
726		if (err < 0)
727			goto err;
728		queue->tx_irq = err;
729		disable_irq(queue->tx_irq);
730
731		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
732			 "%s-rx", queue->name);
733		err = bind_interdomain_evtchn_to_irqhandler(
734			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
735			queue->rx_irq_name, queue);
736		if (err < 0)
737			goto err;
738		queue->rx_irq = err;
739		disable_irq(queue->rx_irq);
740	}
741
742	return 0;
743
744kthread_err:
745	pr_warn("Could not allocate kthread for %s\n", queue->name);
746	err = PTR_ERR(task);
747err:
748	xenvif_disconnect_queue(queue);
749	return err;
750}
751
752void xenvif_carrier_off(struct xenvif *vif)
753{
754	struct net_device *dev = vif->dev;
755
756	rtnl_lock();
757	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
758		netif_carrier_off(dev); /* discard queued packets */
759		if (netif_running(dev))
760			xenvif_down(vif);
761	}
762	rtnl_unlock();
763}
764
765void xenvif_disconnect_data(struct xenvif *vif)
766{
767	struct xenvif_queue *queue = NULL;
768	unsigned int num_queues = vif->num_queues;
769	unsigned int queue_index;
770
771	xenvif_carrier_off(vif);
772
773	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
774		queue = &vif->queues[queue_index];
775
776		xenvif_disconnect_queue(queue);
777	}
778
779	xenvif_mcast_addr_list_free(vif);
780}
781
782void xenvif_disconnect_ctrl(struct xenvif *vif)
783{
784	if (vif->ctrl_irq) {
785		xenvif_deinit_hash(vif);
786		unbind_from_irqhandler(vif->ctrl_irq, vif);
787		vif->ctrl_irq = 0;
788	}
789
790	if (vif->ctrl.sring) {
791		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
792					vif->ctrl.sring);
793		vif->ctrl.sring = NULL;
794	}
795}
796
797/* Reverse the relevant parts of xenvif_init_queue().
798 * Used for queue teardown from xenvif_free(), and on the
799 * error handling paths in xenbus.c:connect().
800 */
801void xenvif_deinit_queue(struct xenvif_queue *queue)
802{
803	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
804}
805
806void xenvif_free(struct xenvif *vif)
807{
808	struct xenvif_queue *queues = vif->queues;
809	unsigned int num_queues = vif->num_queues;
810	unsigned int queue_index;
811
812	unregister_netdev(vif->dev);
813	free_netdev(vif->dev);
814
815	for (queue_index = 0; queue_index < num_queues; ++queue_index)
816		xenvif_deinit_queue(&queues[queue_index]);
817	vfree(queues);
818
819	module_put(THIS_MODULE);
820}