Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Network-device interface management.
  3 *
  4 * Copyright (c) 2004-2005, Keir Fraser
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version 2
  8 * as published by the Free Software Foundation; or, when distributed
  9 * separately from the Linux kernel or incorporated into other
 10 * software packages, subject to the following license:
 11 *
 12 * Permission is hereby granted, free of charge, to any person obtaining a copy
 13 * of this source file (the "Software"), to deal in the Software without
 14 * restriction, including without limitation the rights to use, copy, modify,
 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 16 * and to permit persons to whom the Software is furnished to do so, subject to
 17 * the following conditions:
 18 *
 19 * The above copyright notice and this permission notice shall be included in
 20 * all copies or substantial portions of the Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 28 * IN THE SOFTWARE.
 29 */
 30
 31#include "common.h"
 32
 33#include <linux/kthread.h>
 34#include <linux/sched/task.h>
 35#include <linux/ethtool.h>
 36#include <linux/rtnetlink.h>
 37#include <linux/if_vlan.h>
 38#include <linux/vmalloc.h>
 39
 40#include <xen/events.h>
 41#include <asm/xen/hypercall.h>
 42#include <xen/balloon.h>
 43
 44#define XENVIF_QUEUE_LENGTH 32
 45#define XENVIF_NAPI_WEIGHT  64
 46
 47/* Number of bytes allowed on the internal guest Rx queue. */
 48#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
 49
 50/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 51 * increasing the inflight counter. We need to increase the inflight
 52 * counter because core driver calls into xenvif_zerocopy_callback
 53 * which calls xenvif_skb_zerocopy_complete.
 54 */
 55void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 56				 struct sk_buff *skb)
 57{
 58	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 59	atomic_inc(&queue->inflight_packets);
 60}
 61
 62void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 63{
 64	atomic_dec(&queue->inflight_packets);
 65
 66	/* Wake the dealloc thread _after_ decrementing inflight_packets so
 67	 * that if kthread_stop() has already been called, the dealloc thread
 68	 * does not wait forever with nothing to wake it.
 69	 */
 70	wake_up(&queue->dealloc_wq);
 71}
 72
 73int xenvif_schedulable(struct xenvif *vif)
 74{
 75	return netif_running(vif->dev) &&
 76		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
 77		!vif->disabled;
 78}
 79
 80static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 81{
 82	struct xenvif_queue *queue = dev_id;
 83
 84	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
 85		napi_schedule(&queue->napi);
 86
 87	return IRQ_HANDLED;
 88}
 89
 90static int xenvif_poll(struct napi_struct *napi, int budget)
 91{
 92	struct xenvif_queue *queue =
 93		container_of(napi, struct xenvif_queue, napi);
 94	int work_done;
 95
 96	/* This vif is rogue, we pretend we've there is nothing to do
 97	 * for this vif to deschedule it from NAPI. But this interface
 98	 * will be turned off in thread context later.
 99	 */
100	if (unlikely(queue->vif->disabled)) {
101		napi_complete(napi);
102		return 0;
103	}
104
105	work_done = xenvif_tx_action(queue, budget);
106
107	if (work_done < budget) {
108		napi_complete_done(napi, work_done);
109		/* If the queue is rate-limited, it shall be
110		 * rescheduled in the timer callback.
111		 */
112		if (likely(!queue->rate_limited))
113			xenvif_napi_schedule_or_enable_events(queue);
114	}
115
116	return work_done;
117}
118
119static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120{
121	struct xenvif_queue *queue = dev_id;
122
123	xenvif_kick_thread(queue);
124
125	return IRQ_HANDLED;
126}
127
128irqreturn_t xenvif_interrupt(int irq, void *dev_id)
129{
130	xenvif_tx_interrupt(irq, dev_id);
131	xenvif_rx_interrupt(irq, dev_id);
132
133	return IRQ_HANDLED;
134}
135
136int xenvif_queue_stopped(struct xenvif_queue *queue)
137{
138	struct net_device *dev = queue->vif->dev;
139	unsigned int id = queue->id;
140	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
141}
142
143void xenvif_wake_queue(struct xenvif_queue *queue)
144{
145	struct net_device *dev = queue->vif->dev;
146	unsigned int id = queue->id;
147	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
148}
149
150static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
151			       struct net_device *sb_dev)
152{
153	struct xenvif *vif = netdev_priv(dev);
154	unsigned int size = vif->hash.size;
155	unsigned int num_queues;
156
157	/* If queues are not set up internally - always return 0
158	 * as the packet going to be dropped anyway */
159	num_queues = READ_ONCE(vif->num_queues);
160	if (num_queues < 1)
161		return 0;
162
163	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
164		return netdev_pick_tx(dev, skb, NULL) %
165		       dev->real_num_tx_queues;
166
167	xenvif_set_skb_hash(vif, skb);
168
169	if (size == 0)
170		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
171
172	return vif->hash.mapping[vif->hash.mapping_sel]
173				[skb_get_hash_raw(skb) % size];
174}
175
176static netdev_tx_t
177xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
178{
179	struct xenvif *vif = netdev_priv(dev);
180	struct xenvif_queue *queue = NULL;
181	unsigned int num_queues;
182	u16 index;
183	struct xenvif_rx_cb *cb;
184
185	BUG_ON(skb->dev != dev);
186
187	/* Drop the packet if queues are not set up.
188	 * This handler should be called inside an RCU read section
189	 * so we don't need to enter it here explicitly.
190	 */
191	num_queues = READ_ONCE(vif->num_queues);
192	if (num_queues < 1)
193		goto drop;
194
195	/* Obtain the queue to be used to transmit this packet */
196	index = skb_get_queue_mapping(skb);
197	if (index >= num_queues) {
198		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
199				    index, vif->dev->name);
200		index %= num_queues;
201	}
202	queue = &vif->queues[index];
203
204	/* Drop the packet if queue is not ready */
205	if (queue->task == NULL ||
206	    queue->dealloc_task == NULL ||
207	    !xenvif_schedulable(vif))
208		goto drop;
209
210	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211		struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213		if (!xenvif_mcast_match(vif, eth->h_dest))
214			goto drop;
215	}
216
217	cb = XENVIF_RX_CB(skb);
218	cb->expires = jiffies + vif->drain_timeout;
219
220	/* If there is no hash algorithm configured then make sure there
221	 * is no hash information in the socket buffer otherwise it
222	 * would be incorrectly forwarded to the frontend.
223	 */
224	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
225		skb_clear_hash(skb);
226
227	xenvif_rx_queue_tail(queue, skb);
228	xenvif_kick_thread(queue);
229
230	return NETDEV_TX_OK;
231
232 drop:
233	vif->dev->stats.tx_dropped++;
234	dev_kfree_skb(skb);
235	return NETDEV_TX_OK;
236}
237
238static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
239{
240	struct xenvif *vif = netdev_priv(dev);
241	struct xenvif_queue *queue = NULL;
242	unsigned int num_queues;
243	u64 rx_bytes = 0;
244	u64 rx_packets = 0;
245	u64 tx_bytes = 0;
246	u64 tx_packets = 0;
247	unsigned int index;
248
249	rcu_read_lock();
250	num_queues = READ_ONCE(vif->num_queues);
251
252	/* Aggregate tx and rx stats from each queue */
253	for (index = 0; index < num_queues; ++index) {
254		queue = &vif->queues[index];
255		rx_bytes += queue->stats.rx_bytes;
256		rx_packets += queue->stats.rx_packets;
257		tx_bytes += queue->stats.tx_bytes;
258		tx_packets += queue->stats.tx_packets;
259	}
260
261	rcu_read_unlock();
262
263	vif->dev->stats.rx_bytes = rx_bytes;
264	vif->dev->stats.rx_packets = rx_packets;
265	vif->dev->stats.tx_bytes = tx_bytes;
266	vif->dev->stats.tx_packets = tx_packets;
267
268	return &vif->dev->stats;
269}
270
271static void xenvif_up(struct xenvif *vif)
272{
273	struct xenvif_queue *queue = NULL;
274	unsigned int num_queues = vif->num_queues;
275	unsigned int queue_index;
276
277	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
278		queue = &vif->queues[queue_index];
279		napi_enable(&queue->napi);
280		enable_irq(queue->tx_irq);
281		if (queue->tx_irq != queue->rx_irq)
282			enable_irq(queue->rx_irq);
283		xenvif_napi_schedule_or_enable_events(queue);
284	}
285}
286
287static void xenvif_down(struct xenvif *vif)
288{
289	struct xenvif_queue *queue = NULL;
290	unsigned int num_queues = vif->num_queues;
291	unsigned int queue_index;
292
293	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
294		queue = &vif->queues[queue_index];
295		disable_irq(queue->tx_irq);
296		if (queue->tx_irq != queue->rx_irq)
297			disable_irq(queue->rx_irq);
298		napi_disable(&queue->napi);
299		del_timer_sync(&queue->credit_timeout);
300	}
301}
302
303static int xenvif_open(struct net_device *dev)
304{
305	struct xenvif *vif = netdev_priv(dev);
306	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
307		xenvif_up(vif);
308	netif_tx_start_all_queues(dev);
309	return 0;
310}
311
312static int xenvif_close(struct net_device *dev)
313{
314	struct xenvif *vif = netdev_priv(dev);
315	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
316		xenvif_down(vif);
317	netif_tx_stop_all_queues(dev);
318	return 0;
319}
320
321static int xenvif_change_mtu(struct net_device *dev, int mtu)
322{
323	struct xenvif *vif = netdev_priv(dev);
324	int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
325
326	if (mtu > max)
327		return -EINVAL;
328	dev->mtu = mtu;
329	return 0;
330}
331
332static netdev_features_t xenvif_fix_features(struct net_device *dev,
333	netdev_features_t features)
334{
335	struct xenvif *vif = netdev_priv(dev);
336
337	if (!vif->can_sg)
338		features &= ~NETIF_F_SG;
339	if (~(vif->gso_mask) & GSO_BIT(TCPV4))
340		features &= ~NETIF_F_TSO;
341	if (~(vif->gso_mask) & GSO_BIT(TCPV6))
342		features &= ~NETIF_F_TSO6;
343	if (!vif->ip_csum)
344		features &= ~NETIF_F_IP_CSUM;
345	if (!vif->ipv6_csum)
346		features &= ~NETIF_F_IPV6_CSUM;
347
348	return features;
349}
350
351static const struct xenvif_stat {
352	char name[ETH_GSTRING_LEN];
353	u16 offset;
354} xenvif_stats[] = {
355	{
356		"rx_gso_checksum_fixup",
357		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
358	},
359	/* If (sent != success + fail), there are probably packets never
360	 * freed up properly!
361	 */
362	{
363		"tx_zerocopy_sent",
364		offsetof(struct xenvif_stats, tx_zerocopy_sent),
365	},
366	{
367		"tx_zerocopy_success",
368		offsetof(struct xenvif_stats, tx_zerocopy_success),
369	},
370	{
371		"tx_zerocopy_fail",
372		offsetof(struct xenvif_stats, tx_zerocopy_fail)
373	},
374	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
375	 * a guest with the same MAX_SKB_FRAG
376	 */
377	{
378		"tx_frag_overflow",
379		offsetof(struct xenvif_stats, tx_frag_overflow)
380	},
381};
382
383static int xenvif_get_sset_count(struct net_device *dev, int string_set)
384{
385	switch (string_set) {
386	case ETH_SS_STATS:
387		return ARRAY_SIZE(xenvif_stats);
388	default:
389		return -EINVAL;
390	}
391}
392
393static void xenvif_get_ethtool_stats(struct net_device *dev,
394				     struct ethtool_stats *stats, u64 * data)
395{
396	struct xenvif *vif = netdev_priv(dev);
397	unsigned int num_queues;
398	int i;
399	unsigned int queue_index;
400
401	rcu_read_lock();
402	num_queues = READ_ONCE(vif->num_queues);
403
404	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
405		unsigned long accum = 0;
406		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
407			void *vif_stats = &vif->queues[queue_index].stats;
408			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
409		}
410		data[i] = accum;
411	}
412
413	rcu_read_unlock();
414}
415
416static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
417{
418	int i;
419
420	switch (stringset) {
421	case ETH_SS_STATS:
422		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
423			memcpy(data + i * ETH_GSTRING_LEN,
424			       xenvif_stats[i].name, ETH_GSTRING_LEN);
425		break;
426	}
427}
428
429static const struct ethtool_ops xenvif_ethtool_ops = {
430	.get_link	= ethtool_op_get_link,
431
432	.get_sset_count = xenvif_get_sset_count,
433	.get_ethtool_stats = xenvif_get_ethtool_stats,
434	.get_strings = xenvif_get_strings,
435};
436
437static const struct net_device_ops xenvif_netdev_ops = {
438	.ndo_select_queue = xenvif_select_queue,
439	.ndo_start_xmit	= xenvif_start_xmit,
440	.ndo_get_stats	= xenvif_get_stats,
441	.ndo_open	= xenvif_open,
442	.ndo_stop	= xenvif_close,
443	.ndo_change_mtu	= xenvif_change_mtu,
444	.ndo_fix_features = xenvif_fix_features,
445	.ndo_set_mac_address = eth_mac_addr,
446	.ndo_validate_addr   = eth_validate_addr,
447};
448
449struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
450			    unsigned int handle)
451{
452	int err;
453	struct net_device *dev;
454	struct xenvif *vif;
455	char name[IFNAMSIZ] = {};
456
457	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
458	/* Allocate a netdev with the max. supported number of queues.
459	 * When the guest selects the desired number, it will be updated
460	 * via netif_set_real_num_*_queues().
461	 */
462	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
463			      ether_setup, xenvif_max_queues);
464	if (dev == NULL) {
465		pr_warn("Could not allocate netdev for %s\n", name);
466		return ERR_PTR(-ENOMEM);
467	}
468
469	SET_NETDEV_DEV(dev, parent);
470
471	vif = netdev_priv(dev);
472
473	vif->domid  = domid;
474	vif->handle = handle;
475	vif->can_sg = 1;
476	vif->ip_csum = 1;
477	vif->dev = dev;
478	vif->disabled = false;
479	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
480	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
481
482	/* Start out with no queues. */
483	vif->queues = NULL;
484	vif->num_queues = 0;
485
486	vif->xdp_headroom = 0;
487
488	spin_lock_init(&vif->lock);
489	INIT_LIST_HEAD(&vif->fe_mcast_addr);
490
491	dev->netdev_ops	= &xenvif_netdev_ops;
492	dev->hw_features = NETIF_F_SG |
493		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
494		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
495	dev->features = dev->hw_features | NETIF_F_RXCSUM;
496	dev->ethtool_ops = &xenvif_ethtool_ops;
497
498	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
499
500	dev->min_mtu = ETH_MIN_MTU;
501	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
502
503	/*
504	 * Initialise a dummy MAC address. We choose the numerically
505	 * largest non-broadcast address to prevent the address getting
506	 * stolen by an Ethernet bridge for STP purposes.
507	 * (FE:FF:FF:FF:FF:FF)
508	 */
509	eth_broadcast_addr(dev->dev_addr);
510	dev->dev_addr[0] &= ~0x01;
511
512	netif_carrier_off(dev);
513
514	err = register_netdev(dev);
515	if (err) {
516		netdev_warn(dev, "Could not register device: err=%d\n", err);
517		free_netdev(dev);
518		return ERR_PTR(err);
519	}
520
521	netdev_dbg(dev, "Successfully created xenvif\n");
522
523	__module_get(THIS_MODULE);
524
525	return vif;
526}
527
528int xenvif_init_queue(struct xenvif_queue *queue)
529{
530	int err, i;
531
532	queue->credit_bytes = queue->remaining_credit = ~0UL;
533	queue->credit_usec  = 0UL;
534	timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
 
535	queue->credit_window_start = get_jiffies_64();
536
537	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
538
539	skb_queue_head_init(&queue->rx_queue);
540	skb_queue_head_init(&queue->tx_queue);
541
542	queue->pending_cons = 0;
543	queue->pending_prod = MAX_PENDING_REQS;
544	for (i = 0; i < MAX_PENDING_REQS; ++i)
545		queue->pending_ring[i] = i;
546
547	spin_lock_init(&queue->callback_lock);
548	spin_lock_init(&queue->response_lock);
549
550	/* If ballooning is disabled, this will consume real memory, so you
551	 * better enable it. The long term solution would be to use just a
552	 * bunch of valid page descriptors, without dependency on ballooning
553	 */
554	err = gnttab_alloc_pages(MAX_PENDING_REQS,
555				 queue->mmap_pages);
556	if (err) {
557		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
558		return -ENOMEM;
559	}
560
561	for (i = 0; i < MAX_PENDING_REQS; i++) {
562		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
563			{ .callback = xenvif_zerocopy_callback,
564			  { { .ctx = NULL,
565			      .desc = i } } };
566		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
567	}
568
569	return 0;
570}
571
572void xenvif_carrier_on(struct xenvif *vif)
573{
574	rtnl_lock();
575	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
576		dev_set_mtu(vif->dev, ETH_DATA_LEN);
577	netdev_update_features(vif->dev);
578	set_bit(VIF_STATUS_CONNECTED, &vif->status);
579	if (netif_running(vif->dev))
580		xenvif_up(vif);
581	rtnl_unlock();
582}
583
584int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
585			unsigned int evtchn)
586{
587	struct net_device *dev = vif->dev;
588	void *addr;
589	struct xen_netif_ctrl_sring *shared;
590	RING_IDX rsp_prod, req_prod;
591	int err;
592
593	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
594				     &ring_ref, 1, &addr);
595	if (err)
596		goto err;
597
598	shared = (struct xen_netif_ctrl_sring *)addr;
599	rsp_prod = READ_ONCE(shared->rsp_prod);
600	req_prod = READ_ONCE(shared->req_prod);
601
602	BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
603
604	err = -EIO;
605	if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
606		goto err_unmap;
607
608	err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
609	if (err < 0)
610		goto err_unmap;
611
612	vif->ctrl_irq = err;
613
614	xenvif_init_hash(vif);
615
616	err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
617				   IRQF_ONESHOT, "xen-netback-ctrl", vif);
618	if (err) {
619		pr_warn("Could not setup irq handler for %s\n", dev->name);
620		goto err_deinit;
621	}
622
623	return 0;
624
625err_deinit:
626	xenvif_deinit_hash(vif);
627	unbind_from_irqhandler(vif->ctrl_irq, vif);
628	vif->ctrl_irq = 0;
629
630err_unmap:
631	xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
632				vif->ctrl.sring);
633	vif->ctrl.sring = NULL;
634
635err:
636	return err;
637}
638
639static void xenvif_disconnect_queue(struct xenvif_queue *queue)
640{
641	if (queue->task) {
642		kthread_stop(queue->task);
643		queue->task = NULL;
644	}
645
646	if (queue->dealloc_task) {
647		kthread_stop(queue->dealloc_task);
648		queue->dealloc_task = NULL;
649	}
650
651	if (queue->napi.poll) {
652		netif_napi_del(&queue->napi);
653		queue->napi.poll = NULL;
654	}
655
656	if (queue->tx_irq) {
657		unbind_from_irqhandler(queue->tx_irq, queue);
658		if (queue->tx_irq == queue->rx_irq)
659			queue->rx_irq = 0;
660		queue->tx_irq = 0;
661	}
662
663	if (queue->rx_irq) {
664		unbind_from_irqhandler(queue->rx_irq, queue);
665		queue->rx_irq = 0;
666	}
667
668	xenvif_unmap_frontend_data_rings(queue);
669}
670
671int xenvif_connect_data(struct xenvif_queue *queue,
672			unsigned long tx_ring_ref,
673			unsigned long rx_ring_ref,
674			unsigned int tx_evtchn,
675			unsigned int rx_evtchn)
676{
677	struct task_struct *task;
678	int err;
679
680	BUG_ON(queue->tx_irq);
681	BUG_ON(queue->task);
682	BUG_ON(queue->dealloc_task);
683
684	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
685					     rx_ring_ref);
686	if (err < 0)
687		goto err;
688
689	init_waitqueue_head(&queue->wq);
690	init_waitqueue_head(&queue->dealloc_wq);
691	atomic_set(&queue->inflight_packets, 0);
692
693	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
694			XENVIF_NAPI_WEIGHT);
695
696	queue->stalled = true;
697
698	task = kthread_run(xenvif_kthread_guest_rx, queue,
699			   "%s-guest-rx", queue->name);
700	if (IS_ERR(task))
701		goto kthread_err;
702	queue->task = task;
703
704	task = kthread_run(xenvif_dealloc_kthread, queue,
705			   "%s-dealloc", queue->name);
706	if (IS_ERR(task))
707		goto kthread_err;
708	queue->dealloc_task = task;
709
710	if (tx_evtchn == rx_evtchn) {
711		/* feature-split-event-channels == 0 */
712		err = bind_interdomain_evtchn_to_irqhandler(
713			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
714			queue->name, queue);
715		if (err < 0)
716			goto err;
717		queue->tx_irq = queue->rx_irq = err;
718		disable_irq(queue->tx_irq);
719	} else {
720		/* feature-split-event-channels == 1 */
721		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
722			 "%s-tx", queue->name);
723		err = bind_interdomain_evtchn_to_irqhandler(
724			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
725			queue->tx_irq_name, queue);
726		if (err < 0)
727			goto err;
728		queue->tx_irq = err;
729		disable_irq(queue->tx_irq);
730
731		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
732			 "%s-rx", queue->name);
733		err = bind_interdomain_evtchn_to_irqhandler(
734			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
735			queue->rx_irq_name, queue);
736		if (err < 0)
737			goto err;
738		queue->rx_irq = err;
739		disable_irq(queue->rx_irq);
740	}
741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742	return 0;
743
744kthread_err:
745	pr_warn("Could not allocate kthread for %s\n", queue->name);
746	err = PTR_ERR(task);
 
 
 
 
 
 
747err:
748	xenvif_disconnect_queue(queue);
749	return err;
750}
751
752void xenvif_carrier_off(struct xenvif *vif)
753{
754	struct net_device *dev = vif->dev;
755
756	rtnl_lock();
757	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
758		netif_carrier_off(dev); /* discard queued packets */
759		if (netif_running(dev))
760			xenvif_down(vif);
761	}
762	rtnl_unlock();
763}
764
765void xenvif_disconnect_data(struct xenvif *vif)
766{
767	struct xenvif_queue *queue = NULL;
768	unsigned int num_queues = vif->num_queues;
769	unsigned int queue_index;
770
771	xenvif_carrier_off(vif);
772
773	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
774		queue = &vif->queues[queue_index];
775
776		xenvif_disconnect_queue(queue);
777	}
778
779	xenvif_mcast_addr_list_free(vif);
780}
 
 
 
781
782void xenvif_disconnect_ctrl(struct xenvif *vif)
783{
784	if (vif->ctrl_irq) {
785		xenvif_deinit_hash(vif);
786		unbind_from_irqhandler(vif->ctrl_irq, vif);
787		vif->ctrl_irq = 0;
788	}
 
 
 
 
 
 
 
789
790	if (vif->ctrl.sring) {
791		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
792					vif->ctrl.sring);
793		vif->ctrl.sring = NULL;
794	}
 
 
795}
796
797/* Reverse the relevant parts of xenvif_init_queue().
798 * Used for queue teardown from xenvif_free(), and on the
799 * error handling paths in xenbus.c:connect().
800 */
801void xenvif_deinit_queue(struct xenvif_queue *queue)
802{
803	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
804}
805
806void xenvif_free(struct xenvif *vif)
807{
808	struct xenvif_queue *queues = vif->queues;
809	unsigned int num_queues = vif->num_queues;
810	unsigned int queue_index;
811
812	unregister_netdev(vif->dev);
813	free_netdev(vif->dev);
814
815	for (queue_index = 0; queue_index < num_queues; ++queue_index)
816		xenvif_deinit_queue(&queues[queue_index]);
817	vfree(queues);
818
819	module_put(THIS_MODULE);
820}
v4.6
  1/*
  2 * Network-device interface management.
  3 *
  4 * Copyright (c) 2004-2005, Keir Fraser
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version 2
  8 * as published by the Free Software Foundation; or, when distributed
  9 * separately from the Linux kernel or incorporated into other
 10 * software packages, subject to the following license:
 11 *
 12 * Permission is hereby granted, free of charge, to any person obtaining a copy
 13 * of this source file (the "Software"), to deal in the Software without
 14 * restriction, including without limitation the rights to use, copy, modify,
 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 16 * and to permit persons to whom the Software is furnished to do so, subject to
 17 * the following conditions:
 18 *
 19 * The above copyright notice and this permission notice shall be included in
 20 * all copies or substantial portions of the Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 28 * IN THE SOFTWARE.
 29 */
 30
 31#include "common.h"
 32
 33#include <linux/kthread.h>
 
 34#include <linux/ethtool.h>
 35#include <linux/rtnetlink.h>
 36#include <linux/if_vlan.h>
 37#include <linux/vmalloc.h>
 38
 39#include <xen/events.h>
 40#include <asm/xen/hypercall.h>
 41#include <xen/balloon.h>
 42
 43#define XENVIF_QUEUE_LENGTH 32
 44#define XENVIF_NAPI_WEIGHT  64
 45
 46/* Number of bytes allowed on the internal guest Rx queue. */
 47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
 48
 49/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
 50 * increasing the inflight counter. We need to increase the inflight
 51 * counter because core driver calls into xenvif_zerocopy_callback
 52 * which calls xenvif_skb_zerocopy_complete.
 53 */
 54void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 55				 struct sk_buff *skb)
 56{
 57	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 58	atomic_inc(&queue->inflight_packets);
 59}
 60
 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 62{
 63	atomic_dec(&queue->inflight_packets);
 64
 65	/* Wake the dealloc thread _after_ decrementing inflight_packets so
 66	 * that if kthread_stop() has already been called, the dealloc thread
 67	 * does not wait forever with nothing to wake it.
 68	 */
 69	wake_up(&queue->dealloc_wq);
 70}
 71
 72int xenvif_schedulable(struct xenvif *vif)
 73{
 74	return netif_running(vif->dev) &&
 75		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
 76		!vif->disabled;
 77}
 78
 79static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
 80{
 81	struct xenvif_queue *queue = dev_id;
 82
 83	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
 84		napi_schedule(&queue->napi);
 85
 86	return IRQ_HANDLED;
 87}
 88
 89static int xenvif_poll(struct napi_struct *napi, int budget)
 90{
 91	struct xenvif_queue *queue =
 92		container_of(napi, struct xenvif_queue, napi);
 93	int work_done;
 94
 95	/* This vif is rogue, we pretend we've there is nothing to do
 96	 * for this vif to deschedule it from NAPI. But this interface
 97	 * will be turned off in thread context later.
 98	 */
 99	if (unlikely(queue->vif->disabled)) {
100		napi_complete(napi);
101		return 0;
102	}
103
104	work_done = xenvif_tx_action(queue, budget);
105
106	if (work_done < budget) {
107		napi_complete(napi);
108		xenvif_napi_schedule_or_enable_events(queue);
 
 
 
 
109	}
110
111	return work_done;
112}
113
114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{
116	struct xenvif_queue *queue = dev_id;
117
118	xenvif_kick_thread(queue);
119
120	return IRQ_HANDLED;
121}
122
123irqreturn_t xenvif_interrupt(int irq, void *dev_id)
124{
125	xenvif_tx_interrupt(irq, dev_id);
126	xenvif_rx_interrupt(irq, dev_id);
127
128	return IRQ_HANDLED;
129}
130
131int xenvif_queue_stopped(struct xenvif_queue *queue)
132{
133	struct net_device *dev = queue->vif->dev;
134	unsigned int id = queue->id;
135	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
136}
137
138void xenvif_wake_queue(struct xenvif_queue *queue)
139{
140	struct net_device *dev = queue->vif->dev;
141	unsigned int id = queue->id;
142	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
143}
144
145static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146{
147	struct xenvif *vif = netdev_priv(dev);
148	struct xenvif_queue *queue = NULL;
149	unsigned int num_queues = vif->num_queues;
150	u16 index;
151	struct xenvif_rx_cb *cb;
152
153	BUG_ON(skb->dev != dev);
154
155	/* Drop the packet if queues are not set up */
 
 
 
 
156	if (num_queues < 1)
157		goto drop;
158
159	/* Obtain the queue to be used to transmit this packet */
160	index = skb_get_queue_mapping(skb);
161	if (index >= num_queues) {
162		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
163				    index, vif->dev->name);
164		index %= num_queues;
165	}
166	queue = &vif->queues[index];
167
168	/* Drop the packet if queue is not ready */
169	if (queue->task == NULL ||
170	    queue->dealloc_task == NULL ||
171	    !xenvif_schedulable(vif))
172		goto drop;
173
174	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
175		struct ethhdr *eth = (struct ethhdr *)skb->data;
176
177		if (!xenvif_mcast_match(vif, eth->h_dest))
178			goto drop;
179	}
180
181	cb = XENVIF_RX_CB(skb);
182	cb->expires = jiffies + vif->drain_timeout;
183
 
 
 
 
 
 
 
184	xenvif_rx_queue_tail(queue, skb);
185	xenvif_kick_thread(queue);
186
187	return NETDEV_TX_OK;
188
189 drop:
190	vif->dev->stats.tx_dropped++;
191	dev_kfree_skb(skb);
192	return NETDEV_TX_OK;
193}
194
195static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
196{
197	struct xenvif *vif = netdev_priv(dev);
198	struct xenvif_queue *queue = NULL;
199	unsigned int num_queues = vif->num_queues;
200	unsigned long rx_bytes = 0;
201	unsigned long rx_packets = 0;
202	unsigned long tx_bytes = 0;
203	unsigned long tx_packets = 0;
204	unsigned int index;
205
206	if (vif->queues == NULL)
207		goto out;
208
209	/* Aggregate tx and rx stats from each queue */
210	for (index = 0; index < num_queues; ++index) {
211		queue = &vif->queues[index];
212		rx_bytes += queue->stats.rx_bytes;
213		rx_packets += queue->stats.rx_packets;
214		tx_bytes += queue->stats.tx_bytes;
215		tx_packets += queue->stats.tx_packets;
216	}
217
218out:
 
219	vif->dev->stats.rx_bytes = rx_bytes;
220	vif->dev->stats.rx_packets = rx_packets;
221	vif->dev->stats.tx_bytes = tx_bytes;
222	vif->dev->stats.tx_packets = tx_packets;
223
224	return &vif->dev->stats;
225}
226
227static void xenvif_up(struct xenvif *vif)
228{
229	struct xenvif_queue *queue = NULL;
230	unsigned int num_queues = vif->num_queues;
231	unsigned int queue_index;
232
233	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
234		queue = &vif->queues[queue_index];
235		napi_enable(&queue->napi);
236		enable_irq(queue->tx_irq);
237		if (queue->tx_irq != queue->rx_irq)
238			enable_irq(queue->rx_irq);
239		xenvif_napi_schedule_or_enable_events(queue);
240	}
241}
242
243static void xenvif_down(struct xenvif *vif)
244{
245	struct xenvif_queue *queue = NULL;
246	unsigned int num_queues = vif->num_queues;
247	unsigned int queue_index;
248
249	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
250		queue = &vif->queues[queue_index];
251		disable_irq(queue->tx_irq);
252		if (queue->tx_irq != queue->rx_irq)
253			disable_irq(queue->rx_irq);
254		napi_disable(&queue->napi);
255		del_timer_sync(&queue->credit_timeout);
256	}
257}
258
259static int xenvif_open(struct net_device *dev)
260{
261	struct xenvif *vif = netdev_priv(dev);
262	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
263		xenvif_up(vif);
264	netif_tx_start_all_queues(dev);
265	return 0;
266}
267
268static int xenvif_close(struct net_device *dev)
269{
270	struct xenvif *vif = netdev_priv(dev);
271	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
272		xenvif_down(vif);
273	netif_tx_stop_all_queues(dev);
274	return 0;
275}
276
277static int xenvif_change_mtu(struct net_device *dev, int mtu)
278{
279	struct xenvif *vif = netdev_priv(dev);
280	int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
281
282	if (mtu > max)
283		return -EINVAL;
284	dev->mtu = mtu;
285	return 0;
286}
287
288static netdev_features_t xenvif_fix_features(struct net_device *dev,
289	netdev_features_t features)
290{
291	struct xenvif *vif = netdev_priv(dev);
292
293	if (!vif->can_sg)
294		features &= ~NETIF_F_SG;
295	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
296		features &= ~NETIF_F_TSO;
297	if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
298		features &= ~NETIF_F_TSO6;
299	if (!vif->ip_csum)
300		features &= ~NETIF_F_IP_CSUM;
301	if (!vif->ipv6_csum)
302		features &= ~NETIF_F_IPV6_CSUM;
303
304	return features;
305}
306
307static const struct xenvif_stat {
308	char name[ETH_GSTRING_LEN];
309	u16 offset;
310} xenvif_stats[] = {
311	{
312		"rx_gso_checksum_fixup",
313		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
314	},
315	/* If (sent != success + fail), there are probably packets never
316	 * freed up properly!
317	 */
318	{
319		"tx_zerocopy_sent",
320		offsetof(struct xenvif_stats, tx_zerocopy_sent),
321	},
322	{
323		"tx_zerocopy_success",
324		offsetof(struct xenvif_stats, tx_zerocopy_success),
325	},
326	{
327		"tx_zerocopy_fail",
328		offsetof(struct xenvif_stats, tx_zerocopy_fail)
329	},
330	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
331	 * a guest with the same MAX_SKB_FRAG
332	 */
333	{
334		"tx_frag_overflow",
335		offsetof(struct xenvif_stats, tx_frag_overflow)
336	},
337};
338
339static int xenvif_get_sset_count(struct net_device *dev, int string_set)
340{
341	switch (string_set) {
342	case ETH_SS_STATS:
343		return ARRAY_SIZE(xenvif_stats);
344	default:
345		return -EINVAL;
346	}
347}
348
349static void xenvif_get_ethtool_stats(struct net_device *dev,
350				     struct ethtool_stats *stats, u64 * data)
351{
352	struct xenvif *vif = netdev_priv(dev);
353	unsigned int num_queues = vif->num_queues;
354	int i;
355	unsigned int queue_index;
356
 
 
 
357	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
358		unsigned long accum = 0;
359		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
360			void *vif_stats = &vif->queues[queue_index].stats;
361			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
362		}
363		data[i] = accum;
364	}
 
 
365}
366
367static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
368{
369	int i;
370
371	switch (stringset) {
372	case ETH_SS_STATS:
373		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
374			memcpy(data + i * ETH_GSTRING_LEN,
375			       xenvif_stats[i].name, ETH_GSTRING_LEN);
376		break;
377	}
378}
379
380static const struct ethtool_ops xenvif_ethtool_ops = {
381	.get_link	= ethtool_op_get_link,
382
383	.get_sset_count = xenvif_get_sset_count,
384	.get_ethtool_stats = xenvif_get_ethtool_stats,
385	.get_strings = xenvif_get_strings,
386};
387
388static const struct net_device_ops xenvif_netdev_ops = {
 
389	.ndo_start_xmit	= xenvif_start_xmit,
390	.ndo_get_stats	= xenvif_get_stats,
391	.ndo_open	= xenvif_open,
392	.ndo_stop	= xenvif_close,
393	.ndo_change_mtu	= xenvif_change_mtu,
394	.ndo_fix_features = xenvif_fix_features,
395	.ndo_set_mac_address = eth_mac_addr,
396	.ndo_validate_addr   = eth_validate_addr,
397};
398
399struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
400			    unsigned int handle)
401{
402	int err;
403	struct net_device *dev;
404	struct xenvif *vif;
405	char name[IFNAMSIZ] = {};
406
407	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
408	/* Allocate a netdev with the max. supported number of queues.
409	 * When the guest selects the desired number, it will be updated
410	 * via netif_set_real_num_*_queues().
411	 */
412	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
413			      ether_setup, xenvif_max_queues);
414	if (dev == NULL) {
415		pr_warn("Could not allocate netdev for %s\n", name);
416		return ERR_PTR(-ENOMEM);
417	}
418
419	SET_NETDEV_DEV(dev, parent);
420
421	vif = netdev_priv(dev);
422
423	vif->domid  = domid;
424	vif->handle = handle;
425	vif->can_sg = 1;
426	vif->ip_csum = 1;
427	vif->dev = dev;
428	vif->disabled = false;
429	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
430	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
431
432	/* Start out with no queues. */
433	vif->queues = NULL;
434	vif->num_queues = 0;
435
 
 
436	spin_lock_init(&vif->lock);
437	INIT_LIST_HEAD(&vif->fe_mcast_addr);
438
439	dev->netdev_ops	= &xenvif_netdev_ops;
440	dev->hw_features = NETIF_F_SG |
441		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
442		NETIF_F_TSO | NETIF_F_TSO6;
443	dev->features = dev->hw_features | NETIF_F_RXCSUM;
444	dev->ethtool_ops = &xenvif_ethtool_ops;
445
446	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
447
 
 
 
448	/*
449	 * Initialise a dummy MAC address. We choose the numerically
450	 * largest non-broadcast address to prevent the address getting
451	 * stolen by an Ethernet bridge for STP purposes.
452	 * (FE:FF:FF:FF:FF:FF)
453	 */
454	eth_broadcast_addr(dev->dev_addr);
455	dev->dev_addr[0] &= ~0x01;
456
457	netif_carrier_off(dev);
458
459	err = register_netdev(dev);
460	if (err) {
461		netdev_warn(dev, "Could not register device: err=%d\n", err);
462		free_netdev(dev);
463		return ERR_PTR(err);
464	}
465
466	netdev_dbg(dev, "Successfully created xenvif\n");
467
468	__module_get(THIS_MODULE);
469
470	return vif;
471}
472
473int xenvif_init_queue(struct xenvif_queue *queue)
474{
475	int err, i;
476
477	queue->credit_bytes = queue->remaining_credit = ~0UL;
478	queue->credit_usec  = 0UL;
479	init_timer(&queue->credit_timeout);
480	queue->credit_timeout.function = xenvif_tx_credit_callback;
481	queue->credit_window_start = get_jiffies_64();
482
483	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
484
485	skb_queue_head_init(&queue->rx_queue);
486	skb_queue_head_init(&queue->tx_queue);
487
488	queue->pending_cons = 0;
489	queue->pending_prod = MAX_PENDING_REQS;
490	for (i = 0; i < MAX_PENDING_REQS; ++i)
491		queue->pending_ring[i] = i;
492
493	spin_lock_init(&queue->callback_lock);
494	spin_lock_init(&queue->response_lock);
495
496	/* If ballooning is disabled, this will consume real memory, so you
497	 * better enable it. The long term solution would be to use just a
498	 * bunch of valid page descriptors, without dependency on ballooning
499	 */
500	err = gnttab_alloc_pages(MAX_PENDING_REQS,
501				 queue->mmap_pages);
502	if (err) {
503		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
504		return -ENOMEM;
505	}
506
507	for (i = 0; i < MAX_PENDING_REQS; i++) {
508		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
509			{ .callback = xenvif_zerocopy_callback,
510			  .ctx = NULL,
511			  .desc = i };
512		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
513	}
514
515	return 0;
516}
517
518void xenvif_carrier_on(struct xenvif *vif)
519{
520	rtnl_lock();
521	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
522		dev_set_mtu(vif->dev, ETH_DATA_LEN);
523	netdev_update_features(vif->dev);
524	set_bit(VIF_STATUS_CONNECTED, &vif->status);
525	if (netif_running(vif->dev))
526		xenvif_up(vif);
527	rtnl_unlock();
528}
529
530int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
531		   unsigned long rx_ring_ref, unsigned int tx_evtchn,
532		   unsigned int rx_evtchn)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533{
534	struct task_struct *task;
535	int err = -ENOMEM;
536
537	BUG_ON(queue->tx_irq);
538	BUG_ON(queue->task);
539	BUG_ON(queue->dealloc_task);
540
541	err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
 
542	if (err < 0)
543		goto err;
544
545	init_waitqueue_head(&queue->wq);
546	init_waitqueue_head(&queue->dealloc_wq);
547	atomic_set(&queue->inflight_packets, 0);
548
549	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
550			XENVIF_NAPI_WEIGHT);
551
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552	if (tx_evtchn == rx_evtchn) {
553		/* feature-split-event-channels == 0 */
554		err = bind_interdomain_evtchn_to_irqhandler(
555			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
556			queue->name, queue);
557		if (err < 0)
558			goto err_unmap;
559		queue->tx_irq = queue->rx_irq = err;
560		disable_irq(queue->tx_irq);
561	} else {
562		/* feature-split-event-channels == 1 */
563		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
564			 "%s-tx", queue->name);
565		err = bind_interdomain_evtchn_to_irqhandler(
566			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
567			queue->tx_irq_name, queue);
568		if (err < 0)
569			goto err_unmap;
570		queue->tx_irq = err;
571		disable_irq(queue->tx_irq);
572
573		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
574			 "%s-rx", queue->name);
575		err = bind_interdomain_evtchn_to_irqhandler(
576			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
577			queue->rx_irq_name, queue);
578		if (err < 0)
579			goto err_tx_unbind;
580		queue->rx_irq = err;
581		disable_irq(queue->rx_irq);
582	}
583
584	queue->stalled = true;
585
586	task = kthread_create(xenvif_kthread_guest_rx,
587			      (void *)queue, "%s-guest-rx", queue->name);
588	if (IS_ERR(task)) {
589		pr_warn("Could not allocate kthread for %s\n", queue->name);
590		err = PTR_ERR(task);
591		goto err_rx_unbind;
592	}
593	queue->task = task;
594	get_task_struct(task);
595
596	task = kthread_create(xenvif_dealloc_kthread,
597			      (void *)queue, "%s-dealloc", queue->name);
598	if (IS_ERR(task)) {
599		pr_warn("Could not allocate kthread for %s\n", queue->name);
600		err = PTR_ERR(task);
601		goto err_rx_unbind;
602	}
603	queue->dealloc_task = task;
604
605	wake_up_process(queue->task);
606	wake_up_process(queue->dealloc_task);
607
608	return 0;
609
610err_rx_unbind:
611	unbind_from_irqhandler(queue->rx_irq, queue);
612	queue->rx_irq = 0;
613err_tx_unbind:
614	unbind_from_irqhandler(queue->tx_irq, queue);
615	queue->tx_irq = 0;
616err_unmap:
617	xenvif_unmap_frontend_rings(queue);
618	netif_napi_del(&queue->napi);
619err:
620	module_put(THIS_MODULE);
621	return err;
622}
623
624void xenvif_carrier_off(struct xenvif *vif)
625{
626	struct net_device *dev = vif->dev;
627
628	rtnl_lock();
629	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
630		netif_carrier_off(dev); /* discard queued packets */
631		if (netif_running(dev))
632			xenvif_down(vif);
633	}
634	rtnl_unlock();
635}
636
637void xenvif_disconnect(struct xenvif *vif)
638{
639	struct xenvif_queue *queue = NULL;
640	unsigned int num_queues = vif->num_queues;
641	unsigned int queue_index;
642
643	xenvif_carrier_off(vif);
644
645	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
646		queue = &vif->queues[queue_index];
647
648		netif_napi_del(&queue->napi);
 
649
650		if (queue->task) {
651			kthread_stop(queue->task);
652			put_task_struct(queue->task);
653			queue->task = NULL;
654		}
655
656		if (queue->dealloc_task) {
657			kthread_stop(queue->dealloc_task);
658			queue->dealloc_task = NULL;
659		}
660
661		if (queue->tx_irq) {
662			if (queue->tx_irq == queue->rx_irq)
663				unbind_from_irqhandler(queue->tx_irq, queue);
664			else {
665				unbind_from_irqhandler(queue->tx_irq, queue);
666				unbind_from_irqhandler(queue->rx_irq, queue);
667			}
668			queue->tx_irq = 0;
669		}
670
671		xenvif_unmap_frontend_rings(queue);
 
 
 
672	}
673
674	xenvif_mcast_addr_list_free(vif);
675}
676
677/* Reverse the relevant parts of xenvif_init_queue().
678 * Used for queue teardown from xenvif_free(), and on the
679 * error handling paths in xenbus.c:connect().
680 */
681void xenvif_deinit_queue(struct xenvif_queue *queue)
682{
683	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
684}
685
686void xenvif_free(struct xenvif *vif)
687{
688	struct xenvif_queue *queues = vif->queues;
689	unsigned int num_queues = vif->num_queues;
690	unsigned int queue_index;
691
692	unregister_netdev(vif->dev);
693	free_netdev(vif->dev);
694
695	for (queue_index = 0; queue_index < num_queues; ++queue_index)
696		xenvif_deinit_queue(&queues[queue_index]);
697	vfree(queues);
698
699	module_put(THIS_MODULE);
700}