Loading...
1/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
33#include <linux/kthread.h>
34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
38
39#include <xen/events.h>
40#include <asm/xen/hypercall.h>
41#include <xen/balloon.h>
42
43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64
45
46int xenvif_schedulable(struct xenvif *vif)
47{
48 return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
49}
50
51static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
52{
53 struct xenvif *vif = dev_id;
54
55 if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
56 napi_schedule(&vif->napi);
57
58 return IRQ_HANDLED;
59}
60
61static int xenvif_poll(struct napi_struct *napi, int budget)
62{
63 struct xenvif *vif = container_of(napi, struct xenvif, napi);
64 int work_done;
65
66 /* This vif is rogue, we pretend we've there is nothing to do
67 * for this vif to deschedule it from NAPI. But this interface
68 * will be turned off in thread context later.
69 */
70 if (unlikely(vif->disabled)) {
71 napi_complete(napi);
72 return 0;
73 }
74
75 work_done = xenvif_tx_action(vif, budget);
76
77 if (work_done < budget) {
78 napi_complete(napi);
79 xenvif_napi_schedule_or_enable_events(vif);
80 }
81
82 return work_done;
83}
84
85static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
86{
87 struct xenvif *vif = dev_id;
88
89 xenvif_kick_thread(vif);
90
91 return IRQ_HANDLED;
92}
93
94static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
95{
96 xenvif_tx_interrupt(irq, dev_id);
97 xenvif_rx_interrupt(irq, dev_id);
98
99 return IRQ_HANDLED;
100}
101
102static void xenvif_wake_queue(unsigned long data)
103{
104 struct xenvif *vif = (struct xenvif *)data;
105
106 if (netif_queue_stopped(vif->dev)) {
107 netdev_err(vif->dev, "draining TX queue\n");
108 vif->rx_queue_purge = true;
109 xenvif_kick_thread(vif);
110 netif_wake_queue(vif->dev);
111 }
112}
113
114static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
115{
116 struct xenvif *vif = netdev_priv(dev);
117 int min_slots_needed;
118
119 BUG_ON(skb->dev != dev);
120
121 /* Drop the packet if vif is not ready */
122 if (vif->task == NULL ||
123 vif->dealloc_task == NULL ||
124 !xenvif_schedulable(vif))
125 goto drop;
126
127 /* At best we'll need one slot for the header and one for each
128 * frag.
129 */
130 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
131
132 /* If the skb is GSO then we'll also need an extra slot for the
133 * metadata.
134 */
135 if (skb_is_gso(skb))
136 min_slots_needed++;
137
138 /* If the skb can't possibly fit in the remaining slots
139 * then turn off the queue to give the ring a chance to
140 * drain.
141 */
142 if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
143 vif->wake_queue.function = xenvif_wake_queue;
144 vif->wake_queue.data = (unsigned long)vif;
145 xenvif_stop_queue(vif);
146 mod_timer(&vif->wake_queue,
147 jiffies + rx_drain_timeout_jiffies);
148 }
149
150 skb_queue_tail(&vif->rx_queue, skb);
151 xenvif_kick_thread(vif);
152
153 return NETDEV_TX_OK;
154
155 drop:
156 vif->dev->stats.tx_dropped++;
157 dev_kfree_skb(skb);
158 return NETDEV_TX_OK;
159}
160
161static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
162{
163 struct xenvif *vif = netdev_priv(dev);
164 return &vif->dev->stats;
165}
166
167static void xenvif_up(struct xenvif *vif)
168{
169 napi_enable(&vif->napi);
170 enable_irq(vif->tx_irq);
171 if (vif->tx_irq != vif->rx_irq)
172 enable_irq(vif->rx_irq);
173 xenvif_napi_schedule_or_enable_events(vif);
174}
175
176static void xenvif_down(struct xenvif *vif)
177{
178 napi_disable(&vif->napi);
179 disable_irq(vif->tx_irq);
180 if (vif->tx_irq != vif->rx_irq)
181 disable_irq(vif->rx_irq);
182 del_timer_sync(&vif->credit_timeout);
183}
184
185static int xenvif_open(struct net_device *dev)
186{
187 struct xenvif *vif = netdev_priv(dev);
188 if (netif_carrier_ok(dev))
189 xenvif_up(vif);
190 netif_start_queue(dev);
191 return 0;
192}
193
194static int xenvif_close(struct net_device *dev)
195{
196 struct xenvif *vif = netdev_priv(dev);
197 if (netif_carrier_ok(dev))
198 xenvif_down(vif);
199 netif_stop_queue(dev);
200 return 0;
201}
202
203static int xenvif_change_mtu(struct net_device *dev, int mtu)
204{
205 struct xenvif *vif = netdev_priv(dev);
206 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
207
208 if (mtu > max)
209 return -EINVAL;
210 dev->mtu = mtu;
211 return 0;
212}
213
214static netdev_features_t xenvif_fix_features(struct net_device *dev,
215 netdev_features_t features)
216{
217 struct xenvif *vif = netdev_priv(dev);
218
219 if (!vif->can_sg)
220 features &= ~NETIF_F_SG;
221 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
222 features &= ~NETIF_F_TSO;
223 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
224 features &= ~NETIF_F_TSO6;
225 if (!vif->ip_csum)
226 features &= ~NETIF_F_IP_CSUM;
227 if (!vif->ipv6_csum)
228 features &= ~NETIF_F_IPV6_CSUM;
229
230 return features;
231}
232
233static const struct xenvif_stat {
234 char name[ETH_GSTRING_LEN];
235 u16 offset;
236} xenvif_stats[] = {
237 {
238 "rx_gso_checksum_fixup",
239 offsetof(struct xenvif, rx_gso_checksum_fixup)
240 },
241 /* If (sent != success + fail), there are probably packets never
242 * freed up properly!
243 */
244 {
245 "tx_zerocopy_sent",
246 offsetof(struct xenvif, tx_zerocopy_sent),
247 },
248 {
249 "tx_zerocopy_success",
250 offsetof(struct xenvif, tx_zerocopy_success),
251 },
252 {
253 "tx_zerocopy_fail",
254 offsetof(struct xenvif, tx_zerocopy_fail)
255 },
256 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
257 * a guest with the same MAX_SKB_FRAG
258 */
259 {
260 "tx_frag_overflow",
261 offsetof(struct xenvif, tx_frag_overflow)
262 },
263};
264
265static int xenvif_get_sset_count(struct net_device *dev, int string_set)
266{
267 switch (string_set) {
268 case ETH_SS_STATS:
269 return ARRAY_SIZE(xenvif_stats);
270 default:
271 return -EINVAL;
272 }
273}
274
275static void xenvif_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *stats, u64 * data)
277{
278 void *vif = netdev_priv(dev);
279 int i;
280
281 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
282 data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset);
283}
284
285static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
286{
287 int i;
288
289 switch (stringset) {
290 case ETH_SS_STATS:
291 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
292 memcpy(data + i * ETH_GSTRING_LEN,
293 xenvif_stats[i].name, ETH_GSTRING_LEN);
294 break;
295 }
296}
297
298static const struct ethtool_ops xenvif_ethtool_ops = {
299 .get_link = ethtool_op_get_link,
300
301 .get_sset_count = xenvif_get_sset_count,
302 .get_ethtool_stats = xenvif_get_ethtool_stats,
303 .get_strings = xenvif_get_strings,
304};
305
306static const struct net_device_ops xenvif_netdev_ops = {
307 .ndo_start_xmit = xenvif_start_xmit,
308 .ndo_get_stats = xenvif_get_stats,
309 .ndo_open = xenvif_open,
310 .ndo_stop = xenvif_close,
311 .ndo_change_mtu = xenvif_change_mtu,
312 .ndo_fix_features = xenvif_fix_features,
313 .ndo_set_mac_address = eth_mac_addr,
314 .ndo_validate_addr = eth_validate_addr,
315};
316
317struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
318 unsigned int handle)
319{
320 int err;
321 struct net_device *dev;
322 struct xenvif *vif;
323 char name[IFNAMSIZ] = {};
324 int i;
325
326 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
327 dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
328 if (dev == NULL) {
329 pr_warn("Could not allocate netdev for %s\n", name);
330 return ERR_PTR(-ENOMEM);
331 }
332
333 SET_NETDEV_DEV(dev, parent);
334
335 vif = netdev_priv(dev);
336
337 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
338 MAX_GRANT_COPY_OPS);
339 if (vif->grant_copy_op == NULL) {
340 pr_warn("Could not allocate grant copy space for %s\n", name);
341 free_netdev(dev);
342 return ERR_PTR(-ENOMEM);
343 }
344
345 vif->domid = domid;
346 vif->handle = handle;
347 vif->can_sg = 1;
348 vif->ip_csum = 1;
349 vif->dev = dev;
350
351 vif->disabled = false;
352
353 vif->credit_bytes = vif->remaining_credit = ~0UL;
354 vif->credit_usec = 0UL;
355 init_timer(&vif->credit_timeout);
356 vif->credit_window_start = get_jiffies_64();
357
358 init_timer(&vif->wake_queue);
359
360 dev->netdev_ops = &xenvif_netdev_ops;
361 dev->hw_features = NETIF_F_SG |
362 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
363 NETIF_F_TSO | NETIF_F_TSO6;
364 dev->features = dev->hw_features | NETIF_F_RXCSUM;
365 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
366
367 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
368
369 skb_queue_head_init(&vif->rx_queue);
370 skb_queue_head_init(&vif->tx_queue);
371
372 vif->pending_cons = 0;
373 vif->pending_prod = MAX_PENDING_REQS;
374 for (i = 0; i < MAX_PENDING_REQS; i++)
375 vif->pending_ring[i] = i;
376 spin_lock_init(&vif->callback_lock);
377 spin_lock_init(&vif->response_lock);
378 /* If ballooning is disabled, this will consume real memory, so you
379 * better enable it. The long term solution would be to use just a
380 * bunch of valid page descriptors, without dependency on ballooning
381 */
382 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
383 vif->mmap_pages,
384 false);
385 if (err) {
386 netdev_err(dev, "Could not reserve mmap_pages\n");
387 return ERR_PTR(-ENOMEM);
388 }
389 for (i = 0; i < MAX_PENDING_REQS; i++) {
390 vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
391 { .callback = xenvif_zerocopy_callback,
392 .ctx = NULL,
393 .desc = i };
394 vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
395 }
396
397 /*
398 * Initialise a dummy MAC address. We choose the numerically
399 * largest non-broadcast address to prevent the address getting
400 * stolen by an Ethernet bridge for STP purposes.
401 * (FE:FF:FF:FF:FF:FF)
402 */
403 memset(dev->dev_addr, 0xFF, ETH_ALEN);
404 dev->dev_addr[0] &= ~0x01;
405
406 netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
407
408 netif_carrier_off(dev);
409
410 err = register_netdev(dev);
411 if (err) {
412 netdev_warn(dev, "Could not register device: err=%d\n", err);
413 free_netdev(dev);
414 return ERR_PTR(err);
415 }
416
417 netdev_dbg(dev, "Successfully created xenvif\n");
418
419 __module_get(THIS_MODULE);
420
421 return vif;
422}
423
424int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
425 unsigned long rx_ring_ref, unsigned int tx_evtchn,
426 unsigned int rx_evtchn)
427{
428 struct task_struct *task;
429 int err = -ENOMEM;
430
431 BUG_ON(vif->tx_irq);
432 BUG_ON(vif->task);
433 BUG_ON(vif->dealloc_task);
434
435 err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
436 if (err < 0)
437 goto err;
438
439 init_waitqueue_head(&vif->wq);
440 init_waitqueue_head(&vif->dealloc_wq);
441
442 if (tx_evtchn == rx_evtchn) {
443 /* feature-split-event-channels == 0 */
444 err = bind_interdomain_evtchn_to_irqhandler(
445 vif->domid, tx_evtchn, xenvif_interrupt, 0,
446 vif->dev->name, vif);
447 if (err < 0)
448 goto err_unmap;
449 vif->tx_irq = vif->rx_irq = err;
450 disable_irq(vif->tx_irq);
451 } else {
452 /* feature-split-event-channels == 1 */
453 snprintf(vif->tx_irq_name, sizeof(vif->tx_irq_name),
454 "%s-tx", vif->dev->name);
455 err = bind_interdomain_evtchn_to_irqhandler(
456 vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
457 vif->tx_irq_name, vif);
458 if (err < 0)
459 goto err_unmap;
460 vif->tx_irq = err;
461 disable_irq(vif->tx_irq);
462
463 snprintf(vif->rx_irq_name, sizeof(vif->rx_irq_name),
464 "%s-rx", vif->dev->name);
465 err = bind_interdomain_evtchn_to_irqhandler(
466 vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
467 vif->rx_irq_name, vif);
468 if (err < 0)
469 goto err_tx_unbind;
470 vif->rx_irq = err;
471 disable_irq(vif->rx_irq);
472 }
473
474 task = kthread_create(xenvif_kthread_guest_rx,
475 (void *)vif, "%s-guest-rx", vif->dev->name);
476 if (IS_ERR(task)) {
477 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
478 err = PTR_ERR(task);
479 goto err_rx_unbind;
480 }
481
482 vif->task = task;
483
484 task = kthread_create(xenvif_dealloc_kthread,
485 (void *)vif, "%s-dealloc", vif->dev->name);
486 if (IS_ERR(task)) {
487 pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
488 err = PTR_ERR(task);
489 goto err_rx_unbind;
490 }
491
492 vif->dealloc_task = task;
493
494 rtnl_lock();
495 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
496 dev_set_mtu(vif->dev, ETH_DATA_LEN);
497 netdev_update_features(vif->dev);
498 netif_carrier_on(vif->dev);
499 if (netif_running(vif->dev))
500 xenvif_up(vif);
501 rtnl_unlock();
502
503 wake_up_process(vif->task);
504 wake_up_process(vif->dealloc_task);
505
506 return 0;
507
508err_rx_unbind:
509 unbind_from_irqhandler(vif->rx_irq, vif);
510 vif->rx_irq = 0;
511err_tx_unbind:
512 unbind_from_irqhandler(vif->tx_irq, vif);
513 vif->tx_irq = 0;
514err_unmap:
515 xenvif_unmap_frontend_rings(vif);
516err:
517 module_put(THIS_MODULE);
518 return err;
519}
520
521void xenvif_carrier_off(struct xenvif *vif)
522{
523 struct net_device *dev = vif->dev;
524
525 rtnl_lock();
526 netif_carrier_off(dev); /* discard queued packets */
527 if (netif_running(dev))
528 xenvif_down(vif);
529 rtnl_unlock();
530}
531
532void xenvif_disconnect(struct xenvif *vif)
533{
534 if (netif_carrier_ok(vif->dev))
535 xenvif_carrier_off(vif);
536
537 if (vif->task) {
538 del_timer_sync(&vif->wake_queue);
539 kthread_stop(vif->task);
540 vif->task = NULL;
541 }
542
543 if (vif->dealloc_task) {
544 kthread_stop(vif->dealloc_task);
545 vif->dealloc_task = NULL;
546 }
547
548 if (vif->tx_irq) {
549 if (vif->tx_irq == vif->rx_irq)
550 unbind_from_irqhandler(vif->tx_irq, vif);
551 else {
552 unbind_from_irqhandler(vif->tx_irq, vif);
553 unbind_from_irqhandler(vif->rx_irq, vif);
554 }
555 vif->tx_irq = 0;
556 }
557
558 xenvif_unmap_frontend_rings(vif);
559}
560
561void xenvif_free(struct xenvif *vif)
562{
563 int i, unmap_timeout = 0;
564 /* Here we want to avoid timeout messages if an skb can be legitimately
565 * stuck somewhere else. Realistically this could be an another vif's
566 * internal or QDisc queue. That another vif also has this
567 * rx_drain_timeout_msecs timeout, but the timer only ditches the
568 * internal queue. After that, the QDisc queue can put in worst case
569 * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
570 * internal queue, so we need several rounds of such timeouts until we
571 * can be sure that no another vif should have skb's from us. We are
572 * not sending more skb's, so newly stuck packets are not interesting
573 * for us here.
574 */
575 unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
576 DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
577
578 for (i = 0; i < MAX_PENDING_REQS; ++i) {
579 if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
580 unmap_timeout++;
581 schedule_timeout(msecs_to_jiffies(1000));
582 if (unmap_timeout > worst_case_skb_lifetime &&
583 net_ratelimit())
584 netdev_err(vif->dev,
585 "Page still granted! Index: %x\n",
586 i);
587 /* If there are still unmapped pages, reset the loop to
588 * start checking again. We shouldn't exit here until
589 * dealloc thread and NAPI instance release all the
590 * pages. If a kernel bug causes the skbs to stall
591 * somewhere, the interface cannot be brought down
592 * properly.
593 */
594 i = -1;
595 }
596 }
597
598 free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
599
600 netif_napi_del(&vif->napi);
601
602 unregister_netdev(vif->dev);
603
604 vfree(vif->grant_copy_op);
605 free_netdev(vif->dev);
606
607 module_put(THIS_MODULE);
608}
1/*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31#include "common.h"
32
33#include <linux/kthread.h>
34#include <linux/sched/task.h>
35#include <linux/ethtool.h>
36#include <linux/rtnetlink.h>
37#include <linux/if_vlan.h>
38#include <linux/vmalloc.h>
39
40#include <xen/events.h>
41#include <asm/xen/hypercall.h>
42#include <xen/balloon.h>
43
44#define XENVIF_QUEUE_LENGTH 32
45#define XENVIF_NAPI_WEIGHT 64
46
47/* Number of bytes allowed on the internal guest Rx queue. */
48#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
49
50/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
51 * increasing the inflight counter. We need to increase the inflight
52 * counter because core driver calls into xenvif_zerocopy_callback
53 * which calls xenvif_skb_zerocopy_complete.
54 */
55void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
56 struct sk_buff *skb)
57{
58 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
59 atomic_inc(&queue->inflight_packets);
60}
61
62void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
63{
64 atomic_dec(&queue->inflight_packets);
65
66 /* Wake the dealloc thread _after_ decrementing inflight_packets so
67 * that if kthread_stop() has already been called, the dealloc thread
68 * does not wait forever with nothing to wake it.
69 */
70 wake_up(&queue->dealloc_wq);
71}
72
73int xenvif_schedulable(struct xenvif *vif)
74{
75 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
77 !vif->disabled;
78}
79
80static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
81{
82 struct xenvif_queue *queue = dev_id;
83
84 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
85 napi_schedule(&queue->napi);
86
87 return IRQ_HANDLED;
88}
89
90static int xenvif_poll(struct napi_struct *napi, int budget)
91{
92 struct xenvif_queue *queue =
93 container_of(napi, struct xenvif_queue, napi);
94 int work_done;
95
96 /* This vif is rogue, we pretend we've there is nothing to do
97 * for this vif to deschedule it from NAPI. But this interface
98 * will be turned off in thread context later.
99 */
100 if (unlikely(queue->vif->disabled)) {
101 napi_complete(napi);
102 return 0;
103 }
104
105 work_done = xenvif_tx_action(queue, budget);
106
107 if (work_done < budget) {
108 napi_complete_done(napi, work_done);
109 /* If the queue is rate-limited, it shall be
110 * rescheduled in the timer callback.
111 */
112 if (likely(!queue->rate_limited))
113 xenvif_napi_schedule_or_enable_events(queue);
114 }
115
116 return work_done;
117}
118
119static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
120{
121 struct xenvif_queue *queue = dev_id;
122
123 xenvif_kick_thread(queue);
124
125 return IRQ_HANDLED;
126}
127
128irqreturn_t xenvif_interrupt(int irq, void *dev_id)
129{
130 xenvif_tx_interrupt(irq, dev_id);
131 xenvif_rx_interrupt(irq, dev_id);
132
133 return IRQ_HANDLED;
134}
135
136int xenvif_queue_stopped(struct xenvif_queue *queue)
137{
138 struct net_device *dev = queue->vif->dev;
139 unsigned int id = queue->id;
140 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
141}
142
143void xenvif_wake_queue(struct xenvif_queue *queue)
144{
145 struct net_device *dev = queue->vif->dev;
146 unsigned int id = queue->id;
147 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
148}
149
150static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
151 struct net_device *sb_dev)
152{
153 struct xenvif *vif = netdev_priv(dev);
154 unsigned int size = vif->hash.size;
155 unsigned int num_queues;
156
157 /* If queues are not set up internally - always return 0
158 * as the packet going to be dropped anyway */
159 num_queues = READ_ONCE(vif->num_queues);
160 if (num_queues < 1)
161 return 0;
162
163 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
164 return netdev_pick_tx(dev, skb, NULL) %
165 dev->real_num_tx_queues;
166
167 xenvif_set_skb_hash(vif, skb);
168
169 if (size == 0)
170 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
171
172 return vif->hash.mapping[vif->hash.mapping_sel]
173 [skb_get_hash_raw(skb) % size];
174}
175
176static netdev_tx_t
177xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
178{
179 struct xenvif *vif = netdev_priv(dev);
180 struct xenvif_queue *queue = NULL;
181 unsigned int num_queues;
182 u16 index;
183 struct xenvif_rx_cb *cb;
184
185 BUG_ON(skb->dev != dev);
186
187 /* Drop the packet if queues are not set up.
188 * This handler should be called inside an RCU read section
189 * so we don't need to enter it here explicitly.
190 */
191 num_queues = READ_ONCE(vif->num_queues);
192 if (num_queues < 1)
193 goto drop;
194
195 /* Obtain the queue to be used to transmit this packet */
196 index = skb_get_queue_mapping(skb);
197 if (index >= num_queues) {
198 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
199 index, vif->dev->name);
200 index %= num_queues;
201 }
202 queue = &vif->queues[index];
203
204 /* Drop the packet if queue is not ready */
205 if (queue->task == NULL ||
206 queue->dealloc_task == NULL ||
207 !xenvif_schedulable(vif))
208 goto drop;
209
210 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
211 struct ethhdr *eth = (struct ethhdr *)skb->data;
212
213 if (!xenvif_mcast_match(vif, eth->h_dest))
214 goto drop;
215 }
216
217 cb = XENVIF_RX_CB(skb);
218 cb->expires = jiffies + vif->drain_timeout;
219
220 /* If there is no hash algorithm configured then make sure there
221 * is no hash information in the socket buffer otherwise it
222 * would be incorrectly forwarded to the frontend.
223 */
224 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
225 skb_clear_hash(skb);
226
227 xenvif_rx_queue_tail(queue, skb);
228 xenvif_kick_thread(queue);
229
230 return NETDEV_TX_OK;
231
232 drop:
233 vif->dev->stats.tx_dropped++;
234 dev_kfree_skb(skb);
235 return NETDEV_TX_OK;
236}
237
238static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
239{
240 struct xenvif *vif = netdev_priv(dev);
241 struct xenvif_queue *queue = NULL;
242 unsigned int num_queues;
243 u64 rx_bytes = 0;
244 u64 rx_packets = 0;
245 u64 tx_bytes = 0;
246 u64 tx_packets = 0;
247 unsigned int index;
248
249 rcu_read_lock();
250 num_queues = READ_ONCE(vif->num_queues);
251
252 /* Aggregate tx and rx stats from each queue */
253 for (index = 0; index < num_queues; ++index) {
254 queue = &vif->queues[index];
255 rx_bytes += queue->stats.rx_bytes;
256 rx_packets += queue->stats.rx_packets;
257 tx_bytes += queue->stats.tx_bytes;
258 tx_packets += queue->stats.tx_packets;
259 }
260
261 rcu_read_unlock();
262
263 vif->dev->stats.rx_bytes = rx_bytes;
264 vif->dev->stats.rx_packets = rx_packets;
265 vif->dev->stats.tx_bytes = tx_bytes;
266 vif->dev->stats.tx_packets = tx_packets;
267
268 return &vif->dev->stats;
269}
270
271static void xenvif_up(struct xenvif *vif)
272{
273 struct xenvif_queue *queue = NULL;
274 unsigned int num_queues = vif->num_queues;
275 unsigned int queue_index;
276
277 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
278 queue = &vif->queues[queue_index];
279 napi_enable(&queue->napi);
280 enable_irq(queue->tx_irq);
281 if (queue->tx_irq != queue->rx_irq)
282 enable_irq(queue->rx_irq);
283 xenvif_napi_schedule_or_enable_events(queue);
284 }
285}
286
287static void xenvif_down(struct xenvif *vif)
288{
289 struct xenvif_queue *queue = NULL;
290 unsigned int num_queues = vif->num_queues;
291 unsigned int queue_index;
292
293 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
294 queue = &vif->queues[queue_index];
295 disable_irq(queue->tx_irq);
296 if (queue->tx_irq != queue->rx_irq)
297 disable_irq(queue->rx_irq);
298 napi_disable(&queue->napi);
299 del_timer_sync(&queue->credit_timeout);
300 }
301}
302
303static int xenvif_open(struct net_device *dev)
304{
305 struct xenvif *vif = netdev_priv(dev);
306 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
307 xenvif_up(vif);
308 netif_tx_start_all_queues(dev);
309 return 0;
310}
311
312static int xenvif_close(struct net_device *dev)
313{
314 struct xenvif *vif = netdev_priv(dev);
315 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
316 xenvif_down(vif);
317 netif_tx_stop_all_queues(dev);
318 return 0;
319}
320
321static int xenvif_change_mtu(struct net_device *dev, int mtu)
322{
323 struct xenvif *vif = netdev_priv(dev);
324 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
325
326 if (mtu > max)
327 return -EINVAL;
328 dev->mtu = mtu;
329 return 0;
330}
331
332static netdev_features_t xenvif_fix_features(struct net_device *dev,
333 netdev_features_t features)
334{
335 struct xenvif *vif = netdev_priv(dev);
336
337 if (!vif->can_sg)
338 features &= ~NETIF_F_SG;
339 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
340 features &= ~NETIF_F_TSO;
341 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
342 features &= ~NETIF_F_TSO6;
343 if (!vif->ip_csum)
344 features &= ~NETIF_F_IP_CSUM;
345 if (!vif->ipv6_csum)
346 features &= ~NETIF_F_IPV6_CSUM;
347
348 return features;
349}
350
351static const struct xenvif_stat {
352 char name[ETH_GSTRING_LEN];
353 u16 offset;
354} xenvif_stats[] = {
355 {
356 "rx_gso_checksum_fixup",
357 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
358 },
359 /* If (sent != success + fail), there are probably packets never
360 * freed up properly!
361 */
362 {
363 "tx_zerocopy_sent",
364 offsetof(struct xenvif_stats, tx_zerocopy_sent),
365 },
366 {
367 "tx_zerocopy_success",
368 offsetof(struct xenvif_stats, tx_zerocopy_success),
369 },
370 {
371 "tx_zerocopy_fail",
372 offsetof(struct xenvif_stats, tx_zerocopy_fail)
373 },
374 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
375 * a guest with the same MAX_SKB_FRAG
376 */
377 {
378 "tx_frag_overflow",
379 offsetof(struct xenvif_stats, tx_frag_overflow)
380 },
381};
382
383static int xenvif_get_sset_count(struct net_device *dev, int string_set)
384{
385 switch (string_set) {
386 case ETH_SS_STATS:
387 return ARRAY_SIZE(xenvif_stats);
388 default:
389 return -EINVAL;
390 }
391}
392
393static void xenvif_get_ethtool_stats(struct net_device *dev,
394 struct ethtool_stats *stats, u64 * data)
395{
396 struct xenvif *vif = netdev_priv(dev);
397 unsigned int num_queues;
398 int i;
399 unsigned int queue_index;
400
401 rcu_read_lock();
402 num_queues = READ_ONCE(vif->num_queues);
403
404 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
405 unsigned long accum = 0;
406 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
407 void *vif_stats = &vif->queues[queue_index].stats;
408 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
409 }
410 data[i] = accum;
411 }
412
413 rcu_read_unlock();
414}
415
416static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
417{
418 int i;
419
420 switch (stringset) {
421 case ETH_SS_STATS:
422 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
423 memcpy(data + i * ETH_GSTRING_LEN,
424 xenvif_stats[i].name, ETH_GSTRING_LEN);
425 break;
426 }
427}
428
429static const struct ethtool_ops xenvif_ethtool_ops = {
430 .get_link = ethtool_op_get_link,
431
432 .get_sset_count = xenvif_get_sset_count,
433 .get_ethtool_stats = xenvif_get_ethtool_stats,
434 .get_strings = xenvif_get_strings,
435};
436
437static const struct net_device_ops xenvif_netdev_ops = {
438 .ndo_select_queue = xenvif_select_queue,
439 .ndo_start_xmit = xenvif_start_xmit,
440 .ndo_get_stats = xenvif_get_stats,
441 .ndo_open = xenvif_open,
442 .ndo_stop = xenvif_close,
443 .ndo_change_mtu = xenvif_change_mtu,
444 .ndo_fix_features = xenvif_fix_features,
445 .ndo_set_mac_address = eth_mac_addr,
446 .ndo_validate_addr = eth_validate_addr,
447};
448
449struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
450 unsigned int handle)
451{
452 int err;
453 struct net_device *dev;
454 struct xenvif *vif;
455 char name[IFNAMSIZ] = {};
456
457 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
458 /* Allocate a netdev with the max. supported number of queues.
459 * When the guest selects the desired number, it will be updated
460 * via netif_set_real_num_*_queues().
461 */
462 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
463 ether_setup, xenvif_max_queues);
464 if (dev == NULL) {
465 pr_warn("Could not allocate netdev for %s\n", name);
466 return ERR_PTR(-ENOMEM);
467 }
468
469 SET_NETDEV_DEV(dev, parent);
470
471 vif = netdev_priv(dev);
472
473 vif->domid = domid;
474 vif->handle = handle;
475 vif->can_sg = 1;
476 vif->ip_csum = 1;
477 vif->dev = dev;
478 vif->disabled = false;
479 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
480 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
481
482 /* Start out with no queues. */
483 vif->queues = NULL;
484 vif->num_queues = 0;
485
486 vif->xdp_headroom = 0;
487
488 spin_lock_init(&vif->lock);
489 INIT_LIST_HEAD(&vif->fe_mcast_addr);
490
491 dev->netdev_ops = &xenvif_netdev_ops;
492 dev->hw_features = NETIF_F_SG |
493 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
494 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
495 dev->features = dev->hw_features | NETIF_F_RXCSUM;
496 dev->ethtool_ops = &xenvif_ethtool_ops;
497
498 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
499
500 dev->min_mtu = ETH_MIN_MTU;
501 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
502
503 /*
504 * Initialise a dummy MAC address. We choose the numerically
505 * largest non-broadcast address to prevent the address getting
506 * stolen by an Ethernet bridge for STP purposes.
507 * (FE:FF:FF:FF:FF:FF)
508 */
509 eth_broadcast_addr(dev->dev_addr);
510 dev->dev_addr[0] &= ~0x01;
511
512 netif_carrier_off(dev);
513
514 err = register_netdev(dev);
515 if (err) {
516 netdev_warn(dev, "Could not register device: err=%d\n", err);
517 free_netdev(dev);
518 return ERR_PTR(err);
519 }
520
521 netdev_dbg(dev, "Successfully created xenvif\n");
522
523 __module_get(THIS_MODULE);
524
525 return vif;
526}
527
528int xenvif_init_queue(struct xenvif_queue *queue)
529{
530 int err, i;
531
532 queue->credit_bytes = queue->remaining_credit = ~0UL;
533 queue->credit_usec = 0UL;
534 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
535 queue->credit_window_start = get_jiffies_64();
536
537 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
538
539 skb_queue_head_init(&queue->rx_queue);
540 skb_queue_head_init(&queue->tx_queue);
541
542 queue->pending_cons = 0;
543 queue->pending_prod = MAX_PENDING_REQS;
544 for (i = 0; i < MAX_PENDING_REQS; ++i)
545 queue->pending_ring[i] = i;
546
547 spin_lock_init(&queue->callback_lock);
548 spin_lock_init(&queue->response_lock);
549
550 /* If ballooning is disabled, this will consume real memory, so you
551 * better enable it. The long term solution would be to use just a
552 * bunch of valid page descriptors, without dependency on ballooning
553 */
554 err = gnttab_alloc_pages(MAX_PENDING_REQS,
555 queue->mmap_pages);
556 if (err) {
557 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
558 return -ENOMEM;
559 }
560
561 for (i = 0; i < MAX_PENDING_REQS; i++) {
562 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
563 { .callback = xenvif_zerocopy_callback,
564 { { .ctx = NULL,
565 .desc = i } } };
566 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
567 }
568
569 return 0;
570}
571
572void xenvif_carrier_on(struct xenvif *vif)
573{
574 rtnl_lock();
575 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
576 dev_set_mtu(vif->dev, ETH_DATA_LEN);
577 netdev_update_features(vif->dev);
578 set_bit(VIF_STATUS_CONNECTED, &vif->status);
579 if (netif_running(vif->dev))
580 xenvif_up(vif);
581 rtnl_unlock();
582}
583
584int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
585 unsigned int evtchn)
586{
587 struct net_device *dev = vif->dev;
588 void *addr;
589 struct xen_netif_ctrl_sring *shared;
590 RING_IDX rsp_prod, req_prod;
591 int err;
592
593 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
594 &ring_ref, 1, &addr);
595 if (err)
596 goto err;
597
598 shared = (struct xen_netif_ctrl_sring *)addr;
599 rsp_prod = READ_ONCE(shared->rsp_prod);
600 req_prod = READ_ONCE(shared->req_prod);
601
602 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
603
604 err = -EIO;
605 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
606 goto err_unmap;
607
608 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
609 if (err < 0)
610 goto err_unmap;
611
612 vif->ctrl_irq = err;
613
614 xenvif_init_hash(vif);
615
616 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
617 IRQF_ONESHOT, "xen-netback-ctrl", vif);
618 if (err) {
619 pr_warn("Could not setup irq handler for %s\n", dev->name);
620 goto err_deinit;
621 }
622
623 return 0;
624
625err_deinit:
626 xenvif_deinit_hash(vif);
627 unbind_from_irqhandler(vif->ctrl_irq, vif);
628 vif->ctrl_irq = 0;
629
630err_unmap:
631 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
632 vif->ctrl.sring);
633 vif->ctrl.sring = NULL;
634
635err:
636 return err;
637}
638
639static void xenvif_disconnect_queue(struct xenvif_queue *queue)
640{
641 if (queue->task) {
642 kthread_stop(queue->task);
643 queue->task = NULL;
644 }
645
646 if (queue->dealloc_task) {
647 kthread_stop(queue->dealloc_task);
648 queue->dealloc_task = NULL;
649 }
650
651 if (queue->napi.poll) {
652 netif_napi_del(&queue->napi);
653 queue->napi.poll = NULL;
654 }
655
656 if (queue->tx_irq) {
657 unbind_from_irqhandler(queue->tx_irq, queue);
658 if (queue->tx_irq == queue->rx_irq)
659 queue->rx_irq = 0;
660 queue->tx_irq = 0;
661 }
662
663 if (queue->rx_irq) {
664 unbind_from_irqhandler(queue->rx_irq, queue);
665 queue->rx_irq = 0;
666 }
667
668 xenvif_unmap_frontend_data_rings(queue);
669}
670
671int xenvif_connect_data(struct xenvif_queue *queue,
672 unsigned long tx_ring_ref,
673 unsigned long rx_ring_ref,
674 unsigned int tx_evtchn,
675 unsigned int rx_evtchn)
676{
677 struct task_struct *task;
678 int err;
679
680 BUG_ON(queue->tx_irq);
681 BUG_ON(queue->task);
682 BUG_ON(queue->dealloc_task);
683
684 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
685 rx_ring_ref);
686 if (err < 0)
687 goto err;
688
689 init_waitqueue_head(&queue->wq);
690 init_waitqueue_head(&queue->dealloc_wq);
691 atomic_set(&queue->inflight_packets, 0);
692
693 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
694 XENVIF_NAPI_WEIGHT);
695
696 queue->stalled = true;
697
698 task = kthread_run(xenvif_kthread_guest_rx, queue,
699 "%s-guest-rx", queue->name);
700 if (IS_ERR(task))
701 goto kthread_err;
702 queue->task = task;
703
704 task = kthread_run(xenvif_dealloc_kthread, queue,
705 "%s-dealloc", queue->name);
706 if (IS_ERR(task))
707 goto kthread_err;
708 queue->dealloc_task = task;
709
710 if (tx_evtchn == rx_evtchn) {
711 /* feature-split-event-channels == 0 */
712 err = bind_interdomain_evtchn_to_irqhandler(
713 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
714 queue->name, queue);
715 if (err < 0)
716 goto err;
717 queue->tx_irq = queue->rx_irq = err;
718 disable_irq(queue->tx_irq);
719 } else {
720 /* feature-split-event-channels == 1 */
721 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
722 "%s-tx", queue->name);
723 err = bind_interdomain_evtchn_to_irqhandler(
724 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
725 queue->tx_irq_name, queue);
726 if (err < 0)
727 goto err;
728 queue->tx_irq = err;
729 disable_irq(queue->tx_irq);
730
731 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
732 "%s-rx", queue->name);
733 err = bind_interdomain_evtchn_to_irqhandler(
734 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
735 queue->rx_irq_name, queue);
736 if (err < 0)
737 goto err;
738 queue->rx_irq = err;
739 disable_irq(queue->rx_irq);
740 }
741
742 return 0;
743
744kthread_err:
745 pr_warn("Could not allocate kthread for %s\n", queue->name);
746 err = PTR_ERR(task);
747err:
748 xenvif_disconnect_queue(queue);
749 return err;
750}
751
752void xenvif_carrier_off(struct xenvif *vif)
753{
754 struct net_device *dev = vif->dev;
755
756 rtnl_lock();
757 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
758 netif_carrier_off(dev); /* discard queued packets */
759 if (netif_running(dev))
760 xenvif_down(vif);
761 }
762 rtnl_unlock();
763}
764
765void xenvif_disconnect_data(struct xenvif *vif)
766{
767 struct xenvif_queue *queue = NULL;
768 unsigned int num_queues = vif->num_queues;
769 unsigned int queue_index;
770
771 xenvif_carrier_off(vif);
772
773 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
774 queue = &vif->queues[queue_index];
775
776 xenvif_disconnect_queue(queue);
777 }
778
779 xenvif_mcast_addr_list_free(vif);
780}
781
782void xenvif_disconnect_ctrl(struct xenvif *vif)
783{
784 if (vif->ctrl_irq) {
785 xenvif_deinit_hash(vif);
786 unbind_from_irqhandler(vif->ctrl_irq, vif);
787 vif->ctrl_irq = 0;
788 }
789
790 if (vif->ctrl.sring) {
791 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
792 vif->ctrl.sring);
793 vif->ctrl.sring = NULL;
794 }
795}
796
797/* Reverse the relevant parts of xenvif_init_queue().
798 * Used for queue teardown from xenvif_free(), and on the
799 * error handling paths in xenbus.c:connect().
800 */
801void xenvif_deinit_queue(struct xenvif_queue *queue)
802{
803 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
804}
805
806void xenvif_free(struct xenvif *vif)
807{
808 struct xenvif_queue *queues = vif->queues;
809 unsigned int num_queues = vif->num_queues;
810 unsigned int queue_index;
811
812 unregister_netdev(vif->dev);
813 free_netdev(vif->dev);
814
815 for (queue_index = 0; queue_index < num_queues; ++queue_index)
816 xenvif_deinit_queue(&queues[queue_index]);
817 vfree(queues);
818
819 module_put(THIS_MODULE);
820}