Loading...
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41#include <linux/skbuff_ref.h>
42
43#include <net/tcp.h>
44
45#include <xen/xen.h>
46#include <xen/events.h>
47#include <xen/interface/memory.h>
48#include <xen/page.h>
49
50#include <asm/xen/hypercall.h>
51
52/* Provide an option to disable split event channels at load time as
53 * event channels are limited resource. Split event channels are
54 * enabled by default.
55 */
56bool separate_tx_rx_irq = true;
57module_param(separate_tx_rx_irq, bool, 0644);
58
59/* The time that packets can stay on the guest Rx internal queue
60 * before they are dropped.
61 */
62unsigned int rx_drain_timeout_msecs = 10000;
63module_param(rx_drain_timeout_msecs, uint, 0444);
64
65/* The length of time before the frontend is considered unresponsive
66 * because it isn't providing Rx slots.
67 */
68unsigned int rx_stall_timeout_msecs = 60000;
69module_param(rx_stall_timeout_msecs, uint, 0444);
70
71#define MAX_QUEUES_DEFAULT 8
72unsigned int xenvif_max_queues;
73module_param_named(max_queues, xenvif_max_queues, uint, 0644);
74MODULE_PARM_DESC(max_queues,
75 "Maximum number of queues per virtual interface");
76
77/*
78 * This is the maximum slots a skb can have. If a guest sends a skb
79 * which exceeds this limit it is considered malicious.
80 */
81#define FATAL_SKB_SLOTS_DEFAULT 20
82static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
83module_param(fatal_skb_slots, uint, 0444);
84
85/* The amount to copy out of the first guest Tx slot into the skb's
86 * linear area. If the first slot has more data, it will be mapped
87 * and put into the first frag.
88 *
89 * This is sized to avoid pulling headers from the frags for most
90 * TCP/IP packets.
91 */
92#define XEN_NETBACK_TX_COPY_LEN 128
93
94/* This is the maximum number of flows in the hash cache. */
95#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
96unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
97module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
98MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
99
100/* The module parameter tells that we have to put data
101 * for xen-netfront with the XDP_PACKET_HEADROOM offset
102 * needed for XDP processing
103 */
104bool provides_xdp_headroom = true;
105module_param(provides_xdp_headroom, bool, 0644);
106
107static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
108 s8 status);
109
110static void make_tx_response(struct xenvif_queue *queue,
111 const struct xen_netif_tx_request *txp,
112 unsigned int extra_count,
113 s8 status);
114
115static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
116
117static inline int tx_work_todo(struct xenvif_queue *queue);
118
119static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
120 u16 idx)
121{
122 return page_to_pfn(queue->mmap_pages[idx]);
123}
124
125static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
126 u16 idx)
127{
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
129}
130
131#define callback_param(vif, pending_idx) \
132 (vif->pending_tx_info[pending_idx].callback_struct)
133
134/* Find the containing VIF's structure from a pointer in pending_tx_info array
135 */
136static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
137{
138 u16 pending_idx = ubuf->desc;
139 struct pending_tx_info *temp =
140 container_of(ubuf, struct pending_tx_info, callback_struct);
141 return container_of(temp - pending_idx,
142 struct xenvif_queue,
143 pending_tx_info[0]);
144}
145
146static u16 frag_get_pending_idx(skb_frag_t *frag)
147{
148 return (u16)skb_frag_off(frag);
149}
150
151static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
152{
153 skb_frag_off_set(frag, pending_idx);
154}
155
156static inline pending_ring_idx_t pending_index(unsigned i)
157{
158 return i & (MAX_PENDING_REQS-1);
159}
160
161void xenvif_kick_thread(struct xenvif_queue *queue)
162{
163 wake_up(&queue->wq);
164}
165
166void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
167{
168 int more_to_do;
169
170 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
171
172 if (more_to_do)
173 napi_schedule(&queue->napi);
174 else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
175 &queue->eoi_pending) &
176 (NETBK_TX_EOI | NETBK_COMMON_EOI))
177 xen_irq_lateeoi(queue->tx_irq, 0);
178}
179
180static void tx_add_credit(struct xenvif_queue *queue)
181{
182 unsigned long max_burst, max_credit;
183
184 /*
185 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
186 * Otherwise the interface can seize up due to insufficient credit.
187 */
188 max_burst = max(131072UL, queue->credit_bytes);
189
190 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
191 max_credit = queue->remaining_credit + queue->credit_bytes;
192 if (max_credit < queue->remaining_credit)
193 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
194
195 queue->remaining_credit = min(max_credit, max_burst);
196 queue->rate_limited = false;
197}
198
199void xenvif_tx_credit_callback(struct timer_list *t)
200{
201 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
202 tx_add_credit(queue);
203 xenvif_napi_schedule_or_enable_events(queue);
204}
205
206static void xenvif_tx_err(struct xenvif_queue *queue,
207 struct xen_netif_tx_request *txp,
208 unsigned int extra_count, RING_IDX end)
209{
210 RING_IDX cons = queue->tx.req_cons;
211
212 do {
213 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
214 if (cons == end)
215 break;
216 RING_COPY_REQUEST(&queue->tx, cons++, txp);
217 extra_count = 0; /* only the first frag can have extras */
218 } while (1);
219 queue->tx.req_cons = cons;
220}
221
222static void xenvif_fatal_tx_err(struct xenvif *vif)
223{
224 netdev_err(vif->dev, "fatal error; disabling device\n");
225 vif->disabled = true;
226 /* Disable the vif from queue 0's kthread */
227 if (vif->num_queues)
228 xenvif_kick_thread(&vif->queues[0]);
229}
230
231static int xenvif_count_requests(struct xenvif_queue *queue,
232 struct xen_netif_tx_request *first,
233 unsigned int extra_count,
234 struct xen_netif_tx_request *txp,
235 int work_to_do)
236{
237 RING_IDX cons = queue->tx.req_cons;
238 int slots = 0;
239 int drop_err = 0;
240 int more_data;
241
242 if (!(first->flags & XEN_NETTXF_more_data))
243 return 0;
244
245 do {
246 struct xen_netif_tx_request dropped_tx = { 0 };
247
248 if (slots >= work_to_do) {
249 netdev_err(queue->vif->dev,
250 "Asked for %d slots but exceeds this limit\n",
251 work_to_do);
252 xenvif_fatal_tx_err(queue->vif);
253 return -ENODATA;
254 }
255
256 /* This guest is really using too many slots and
257 * considered malicious.
258 */
259 if (unlikely(slots >= fatal_skb_slots)) {
260 netdev_err(queue->vif->dev,
261 "Malicious frontend using %d slots, threshold %u\n",
262 slots, fatal_skb_slots);
263 xenvif_fatal_tx_err(queue->vif);
264 return -E2BIG;
265 }
266
267 /* Xen network protocol had implicit dependency on
268 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
269 * the historical MAX_SKB_FRAGS value 18 to honor the
270 * same behavior as before. Any packet using more than
271 * 18 slots but less than fatal_skb_slots slots is
272 * dropped
273 */
274 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
275 if (net_ratelimit())
276 netdev_dbg(queue->vif->dev,
277 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
278 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
279 drop_err = -E2BIG;
280 }
281
282 if (drop_err)
283 txp = &dropped_tx;
284
285 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
286
287 /* If the guest submitted a frame >= 64 KiB then
288 * first->size overflowed and following slots will
289 * appear to be larger than the frame.
290 *
291 * This cannot be fatal error as there are buggy
292 * frontends that do this.
293 *
294 * Consume all slots and drop the packet.
295 */
296 if (!drop_err && txp->size > first->size) {
297 if (net_ratelimit())
298 netdev_dbg(queue->vif->dev,
299 "Invalid tx request, slot size %u > remaining size %u\n",
300 txp->size, first->size);
301 drop_err = -EIO;
302 }
303
304 first->size -= txp->size;
305 slots++;
306
307 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
308 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
309 txp->offset, txp->size);
310 xenvif_fatal_tx_err(queue->vif);
311 return -EINVAL;
312 }
313
314 more_data = txp->flags & XEN_NETTXF_more_data;
315
316 if (!drop_err)
317 txp++;
318
319 } while (more_data);
320
321 if (drop_err) {
322 xenvif_tx_err(queue, first, extra_count, cons + slots);
323 return drop_err;
324 }
325
326 return slots;
327}
328
329
330struct xenvif_tx_cb {
331 u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
332 u8 copy_count;
333 u32 split_mask;
334};
335
336#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
337#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
338#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
339
340static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
341 u16 pending_idx,
342 struct xen_netif_tx_request *txp,
343 unsigned int extra_count,
344 struct gnttab_map_grant_ref *mop)
345{
346 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
347 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
348 GNTMAP_host_map | GNTMAP_readonly,
349 txp->gref, queue->vif->domid);
350
351 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
352 sizeof(*txp));
353 queue->pending_tx_info[pending_idx].extra_count = extra_count;
354}
355
356static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
357{
358 struct sk_buff *skb =
359 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
360 GFP_ATOMIC | __GFP_NOWARN);
361
362 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
363 if (unlikely(skb == NULL))
364 return NULL;
365
366 /* Packets passed to netif_rx() must have some headroom. */
367 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
368
369 /* Initialize it here to avoid later surprises */
370 skb_shinfo(skb)->destructor_arg = NULL;
371
372 return skb;
373}
374
375static void xenvif_get_requests(struct xenvif_queue *queue,
376 struct sk_buff *skb,
377 struct xen_netif_tx_request *first,
378 struct xen_netif_tx_request *txfrags,
379 unsigned *copy_ops,
380 unsigned *map_ops,
381 unsigned int frag_overflow,
382 struct sk_buff *nskb,
383 unsigned int extra_count,
384 unsigned int data_len)
385{
386 struct skb_shared_info *shinfo = skb_shinfo(skb);
387 skb_frag_t *frags = shinfo->frags;
388 u16 pending_idx;
389 pending_ring_idx_t index;
390 unsigned int nr_slots;
391 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
392 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
393 struct xen_netif_tx_request *txp = first;
394
395 nr_slots = shinfo->nr_frags + frag_overflow + 1;
396
397 copy_count(skb) = 0;
398 XENVIF_TX_CB(skb)->split_mask = 0;
399
400 /* Create copy ops for exactly data_len bytes into the skb head. */
401 __skb_put(skb, data_len);
402 while (data_len > 0) {
403 int amount = data_len > txp->size ? txp->size : data_len;
404 bool split = false;
405
406 cop->source.u.ref = txp->gref;
407 cop->source.domid = queue->vif->domid;
408 cop->source.offset = txp->offset;
409
410 cop->dest.domid = DOMID_SELF;
411 cop->dest.offset = (offset_in_page(skb->data +
412 skb_headlen(skb) -
413 data_len)) & ~XEN_PAGE_MASK;
414 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
415 - data_len);
416
417 /* Don't cross local page boundary! */
418 if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
419 amount = XEN_PAGE_SIZE - cop->dest.offset;
420 XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
421 split = true;
422 }
423
424 cop->len = amount;
425 cop->flags = GNTCOPY_source_gref;
426
427 index = pending_index(queue->pending_cons);
428 pending_idx = queue->pending_ring[index];
429 callback_param(queue, pending_idx).ctx = NULL;
430 copy_pending_idx(skb, copy_count(skb)) = pending_idx;
431 if (!split)
432 copy_count(skb)++;
433
434 cop++;
435 data_len -= amount;
436
437 if (amount == txp->size) {
438 /* The copy op covered the full tx_request */
439
440 memcpy(&queue->pending_tx_info[pending_idx].req,
441 txp, sizeof(*txp));
442 queue->pending_tx_info[pending_idx].extra_count =
443 (txp == first) ? extra_count : 0;
444
445 if (txp == first)
446 txp = txfrags;
447 else
448 txp++;
449 queue->pending_cons++;
450 nr_slots--;
451 } else {
452 /* The copy op partially covered the tx_request.
453 * The remainder will be mapped or copied in the next
454 * iteration.
455 */
456 txp->offset += amount;
457 txp->size -= amount;
458 }
459 }
460
461 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
462 nr_slots--) {
463 if (unlikely(!txp->size)) {
464 make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
465 ++txp;
466 continue;
467 }
468
469 index = pending_index(queue->pending_cons++);
470 pending_idx = queue->pending_ring[index];
471 xenvif_tx_create_map_op(queue, pending_idx, txp,
472 txp == first ? extra_count : 0, gop);
473 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
474 ++shinfo->nr_frags;
475 ++gop;
476
477 if (txp == first)
478 txp = txfrags;
479 else
480 txp++;
481 }
482
483 if (nr_slots > 0) {
484
485 shinfo = skb_shinfo(nskb);
486 frags = shinfo->frags;
487
488 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
489 if (unlikely(!txp->size)) {
490 make_tx_response(queue, txp, 0,
491 XEN_NETIF_RSP_OKAY);
492 continue;
493 }
494
495 index = pending_index(queue->pending_cons++);
496 pending_idx = queue->pending_ring[index];
497 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
498 gop);
499 frag_set_pending_idx(&frags[shinfo->nr_frags],
500 pending_idx);
501 ++shinfo->nr_frags;
502 ++gop;
503 }
504
505 if (shinfo->nr_frags) {
506 skb_shinfo(skb)->frag_list = nskb;
507 nskb = NULL;
508 }
509 }
510
511 if (nskb) {
512 /* A frag_list skb was allocated but it is no longer needed
513 * because enough slots were converted to copy ops above or some
514 * were empty.
515 */
516 kfree_skb(nskb);
517 }
518
519 (*copy_ops) = cop - queue->tx_copy_ops;
520 (*map_ops) = gop - queue->tx_map_ops;
521}
522
523static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
524 u16 pending_idx,
525 grant_handle_t handle)
526{
527 if (unlikely(queue->grant_tx_handle[pending_idx] !=
528 NETBACK_INVALID_HANDLE)) {
529 netdev_err(queue->vif->dev,
530 "Trying to overwrite active handle! pending_idx: 0x%x\n",
531 pending_idx);
532 BUG();
533 }
534 queue->grant_tx_handle[pending_idx] = handle;
535}
536
537static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
538 u16 pending_idx)
539{
540 if (unlikely(queue->grant_tx_handle[pending_idx] ==
541 NETBACK_INVALID_HANDLE)) {
542 netdev_err(queue->vif->dev,
543 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
544 pending_idx);
545 BUG();
546 }
547 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
548}
549
550static int xenvif_tx_check_gop(struct xenvif_queue *queue,
551 struct sk_buff *skb,
552 struct gnttab_map_grant_ref **gopp_map,
553 struct gnttab_copy **gopp_copy)
554{
555 struct gnttab_map_grant_ref *gop_map = *gopp_map;
556 u16 pending_idx;
557 /* This always points to the shinfo of the skb being checked, which
558 * could be either the first or the one on the frag_list
559 */
560 struct skb_shared_info *shinfo = skb_shinfo(skb);
561 /* If this is non-NULL, we are currently checking the frag_list skb, and
562 * this points to the shinfo of the first one
563 */
564 struct skb_shared_info *first_shinfo = NULL;
565 int nr_frags = shinfo->nr_frags;
566 const bool sharedslot = nr_frags &&
567 frag_get_pending_idx(&shinfo->frags[0]) ==
568 copy_pending_idx(skb, copy_count(skb) - 1);
569 int i, err = 0;
570
571 for (i = 0; i < copy_count(skb); i++) {
572 int newerr;
573
574 /* Check status of header. */
575 pending_idx = copy_pending_idx(skb, i);
576
577 newerr = (*gopp_copy)->status;
578
579 /* Split copies need to be handled together. */
580 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
581 (*gopp_copy)++;
582 if (!newerr)
583 newerr = (*gopp_copy)->status;
584 }
585 if (likely(!newerr)) {
586 /* The first frag might still have this slot mapped */
587 if (i < copy_count(skb) - 1 || !sharedslot)
588 xenvif_idx_release(queue, pending_idx,
589 XEN_NETIF_RSP_OKAY);
590 } else {
591 err = newerr;
592 if (net_ratelimit())
593 netdev_dbg(queue->vif->dev,
594 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
595 (*gopp_copy)->status,
596 pending_idx,
597 (*gopp_copy)->source.u.ref);
598 /* The first frag might still have this slot mapped */
599 if (i < copy_count(skb) - 1 || !sharedslot)
600 xenvif_idx_release(queue, pending_idx,
601 XEN_NETIF_RSP_ERROR);
602 }
603 (*gopp_copy)++;
604 }
605
606check_frags:
607 for (i = 0; i < nr_frags; i++, gop_map++) {
608 int j, newerr;
609
610 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
611
612 /* Check error status: if okay then remember grant handle. */
613 newerr = gop_map->status;
614
615 if (likely(!newerr)) {
616 xenvif_grant_handle_set(queue,
617 pending_idx,
618 gop_map->handle);
619 /* Had a previous error? Invalidate this fragment. */
620 if (unlikely(err)) {
621 xenvif_idx_unmap(queue, pending_idx);
622 /* If the mapping of the first frag was OK, but
623 * the header's copy failed, and they are
624 * sharing a slot, send an error
625 */
626 if (i == 0 && !first_shinfo && sharedslot)
627 xenvif_idx_release(queue, pending_idx,
628 XEN_NETIF_RSP_ERROR);
629 else
630 xenvif_idx_release(queue, pending_idx,
631 XEN_NETIF_RSP_OKAY);
632 }
633 continue;
634 }
635
636 /* Error on this fragment: respond to client with an error. */
637 if (net_ratelimit())
638 netdev_dbg(queue->vif->dev,
639 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
640 i,
641 gop_map->status,
642 pending_idx,
643 gop_map->ref);
644
645 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
646
647 /* Not the first error? Preceding frags already invalidated. */
648 if (err)
649 continue;
650
651 /* Invalidate preceding fragments of this skb. */
652 for (j = 0; j < i; j++) {
653 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
654 xenvif_idx_unmap(queue, pending_idx);
655 xenvif_idx_release(queue, pending_idx,
656 XEN_NETIF_RSP_OKAY);
657 }
658
659 /* And if we found the error while checking the frag_list, unmap
660 * the first skb's frags
661 */
662 if (first_shinfo) {
663 for (j = 0; j < first_shinfo->nr_frags; j++) {
664 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
665 xenvif_idx_unmap(queue, pending_idx);
666 xenvif_idx_release(queue, pending_idx,
667 XEN_NETIF_RSP_OKAY);
668 }
669 }
670
671 /* Remember the error: invalidate all subsequent fragments. */
672 err = newerr;
673 }
674
675 if (skb_has_frag_list(skb) && !first_shinfo) {
676 first_shinfo = shinfo;
677 shinfo = skb_shinfo(shinfo->frag_list);
678 nr_frags = shinfo->nr_frags;
679
680 goto check_frags;
681 }
682
683 *gopp_map = gop_map;
684 return err;
685}
686
687static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
688{
689 struct skb_shared_info *shinfo = skb_shinfo(skb);
690 int nr_frags = shinfo->nr_frags;
691 int i;
692 u16 prev_pending_idx = INVALID_PENDING_IDX;
693
694 for (i = 0; i < nr_frags; i++) {
695 skb_frag_t *frag = shinfo->frags + i;
696 struct xen_netif_tx_request *txp;
697 struct page *page;
698 u16 pending_idx;
699
700 pending_idx = frag_get_pending_idx(frag);
701
702 /* If this is not the first frag, chain it to the previous*/
703 if (prev_pending_idx == INVALID_PENDING_IDX)
704 skb_shinfo(skb)->destructor_arg =
705 &callback_param(queue, pending_idx);
706 else
707 callback_param(queue, prev_pending_idx).ctx =
708 &callback_param(queue, pending_idx);
709
710 callback_param(queue, pending_idx).ctx = NULL;
711 prev_pending_idx = pending_idx;
712
713 txp = &queue->pending_tx_info[pending_idx].req;
714 page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
715 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
716 skb->len += txp->size;
717 skb->data_len += txp->size;
718 skb->truesize += txp->size;
719
720 /* Take an extra reference to offset network stack's put_page */
721 get_page(queue->mmap_pages[pending_idx]);
722 }
723}
724
725static int xenvif_get_extras(struct xenvif_queue *queue,
726 struct xen_netif_extra_info *extras,
727 unsigned int *extra_count,
728 int work_to_do)
729{
730 struct xen_netif_extra_info extra;
731 RING_IDX cons = queue->tx.req_cons;
732
733 do {
734 if (unlikely(work_to_do-- <= 0)) {
735 netdev_err(queue->vif->dev, "Missing extra info\n");
736 xenvif_fatal_tx_err(queue->vif);
737 return -EBADR;
738 }
739
740 RING_COPY_REQUEST(&queue->tx, cons, &extra);
741
742 queue->tx.req_cons = ++cons;
743 (*extra_count)++;
744
745 if (unlikely(!extra.type ||
746 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
747 netdev_err(queue->vif->dev,
748 "Invalid extra type: %d\n", extra.type);
749 xenvif_fatal_tx_err(queue->vif);
750 return -EINVAL;
751 }
752
753 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
754 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
755
756 return work_to_do;
757}
758
759static int xenvif_set_skb_gso(struct xenvif *vif,
760 struct sk_buff *skb,
761 struct xen_netif_extra_info *gso)
762{
763 if (!gso->u.gso.size) {
764 netdev_err(vif->dev, "GSO size must not be zero.\n");
765 xenvif_fatal_tx_err(vif);
766 return -EINVAL;
767 }
768
769 switch (gso->u.gso.type) {
770 case XEN_NETIF_GSO_TYPE_TCPV4:
771 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
772 break;
773 case XEN_NETIF_GSO_TYPE_TCPV6:
774 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
775 break;
776 default:
777 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
778 xenvif_fatal_tx_err(vif);
779 return -EINVAL;
780 }
781
782 skb_shinfo(skb)->gso_size = gso->u.gso.size;
783 /* gso_segs will be calculated later */
784
785 return 0;
786}
787
788static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
789{
790 bool recalculate_partial_csum = false;
791
792 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
793 * peers can fail to set NETRXF_csum_blank when sending a GSO
794 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
795 * recalculate the partial checksum.
796 */
797 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
798 queue->stats.rx_gso_checksum_fixup++;
799 skb->ip_summed = CHECKSUM_PARTIAL;
800 recalculate_partial_csum = true;
801 }
802
803 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
804 if (skb->ip_summed != CHECKSUM_PARTIAL)
805 return 0;
806
807 return skb_checksum_setup(skb, recalculate_partial_csum);
808}
809
810static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
811{
812 u64 now = get_jiffies_64();
813 u64 next_credit = queue->credit_window_start +
814 msecs_to_jiffies(queue->credit_usec / 1000);
815
816 /* Timer could already be pending in rare cases. */
817 if (timer_pending(&queue->credit_timeout)) {
818 queue->rate_limited = true;
819 return true;
820 }
821
822 /* Passed the point where we can replenish credit? */
823 if (time_after_eq64(now, next_credit)) {
824 queue->credit_window_start = now;
825 tx_add_credit(queue);
826 }
827
828 /* Still too big to send right now? Set a callback. */
829 if (size > queue->remaining_credit) {
830 mod_timer(&queue->credit_timeout,
831 next_credit);
832 queue->credit_window_start = next_credit;
833 queue->rate_limited = true;
834
835 return true;
836 }
837
838 return false;
839}
840
841/* No locking is required in xenvif_mcast_add/del() as they are
842 * only ever invoked from NAPI poll. An RCU list is used because
843 * xenvif_mcast_match() is called asynchronously, during start_xmit.
844 */
845
846static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
847{
848 struct xenvif_mcast_addr *mcast;
849
850 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
851 if (net_ratelimit())
852 netdev_err(vif->dev,
853 "Too many multicast addresses\n");
854 return -ENOSPC;
855 }
856
857 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
858 if (!mcast)
859 return -ENOMEM;
860
861 ether_addr_copy(mcast->addr, addr);
862 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
863 vif->fe_mcast_count++;
864
865 return 0;
866}
867
868static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
869{
870 struct xenvif_mcast_addr *mcast;
871
872 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
873 if (ether_addr_equal(addr, mcast->addr)) {
874 --vif->fe_mcast_count;
875 list_del_rcu(&mcast->entry);
876 kfree_rcu(mcast, rcu);
877 break;
878 }
879 }
880}
881
882bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
883{
884 struct xenvif_mcast_addr *mcast;
885
886 rcu_read_lock();
887 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
888 if (ether_addr_equal(addr, mcast->addr)) {
889 rcu_read_unlock();
890 return true;
891 }
892 }
893 rcu_read_unlock();
894
895 return false;
896}
897
898void xenvif_mcast_addr_list_free(struct xenvif *vif)
899{
900 /* No need for locking or RCU here. NAPI poll and TX queue
901 * are stopped.
902 */
903 while (!list_empty(&vif->fe_mcast_addr)) {
904 struct xenvif_mcast_addr *mcast;
905
906 mcast = list_first_entry(&vif->fe_mcast_addr,
907 struct xenvif_mcast_addr,
908 entry);
909 --vif->fe_mcast_count;
910 list_del(&mcast->entry);
911 kfree(mcast);
912 }
913}
914
915static void xenvif_tx_build_gops(struct xenvif_queue *queue,
916 int budget,
917 unsigned *copy_ops,
918 unsigned *map_ops)
919{
920 struct sk_buff *skb, *nskb;
921 int ret;
922 unsigned int frag_overflow;
923
924 while (skb_queue_len(&queue->tx_queue) < budget) {
925 struct xen_netif_tx_request txreq;
926 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
927 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
928 unsigned int extra_count;
929 RING_IDX idx;
930 int work_to_do;
931 unsigned int data_len;
932
933 if (queue->tx.sring->req_prod - queue->tx.req_cons >
934 XEN_NETIF_TX_RING_SIZE) {
935 netdev_err(queue->vif->dev,
936 "Impossible number of requests. "
937 "req_prod %d, req_cons %d, size %ld\n",
938 queue->tx.sring->req_prod, queue->tx.req_cons,
939 XEN_NETIF_TX_RING_SIZE);
940 xenvif_fatal_tx_err(queue->vif);
941 break;
942 }
943
944 work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
945 if (!work_to_do)
946 break;
947
948 idx = queue->tx.req_cons;
949 rmb(); /* Ensure that we see the request before we copy it. */
950 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
951
952 /* Credit-based scheduling. */
953 if (txreq.size > queue->remaining_credit &&
954 tx_credit_exceeded(queue, txreq.size))
955 break;
956
957 queue->remaining_credit -= txreq.size;
958
959 work_to_do--;
960 queue->tx.req_cons = ++idx;
961
962 memset(extras, 0, sizeof(extras));
963 extra_count = 0;
964 if (txreq.flags & XEN_NETTXF_extra_info) {
965 work_to_do = xenvif_get_extras(queue, extras,
966 &extra_count,
967 work_to_do);
968 idx = queue->tx.req_cons;
969 if (unlikely(work_to_do < 0))
970 break;
971 }
972
973 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
974 struct xen_netif_extra_info *extra;
975
976 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
977 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
978
979 make_tx_response(queue, &txreq, extra_count,
980 (ret == 0) ?
981 XEN_NETIF_RSP_OKAY :
982 XEN_NETIF_RSP_ERROR);
983 continue;
984 }
985
986 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
987 struct xen_netif_extra_info *extra;
988
989 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
990 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
991
992 make_tx_response(queue, &txreq, extra_count,
993 XEN_NETIF_RSP_OKAY);
994 continue;
995 }
996
997 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
998 XEN_NETBACK_TX_COPY_LEN : txreq.size;
999
1000 ret = xenvif_count_requests(queue, &txreq, extra_count,
1001 txfrags, work_to_do);
1002
1003 if (unlikely(ret < 0))
1004 break;
1005
1006 idx += ret;
1007
1008 if (unlikely(txreq.size < ETH_HLEN)) {
1009 netdev_dbg(queue->vif->dev,
1010 "Bad packet size: %d\n", txreq.size);
1011 xenvif_tx_err(queue, &txreq, extra_count, idx);
1012 break;
1013 }
1014
1015 /* No crossing a page as the payload mustn't fragment. */
1016 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1017 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1018 txreq.offset, txreq.size);
1019 xenvif_fatal_tx_err(queue->vif);
1020 break;
1021 }
1022
1023 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1024 data_len = txreq.size;
1025
1026 skb = xenvif_alloc_skb(data_len);
1027 if (unlikely(skb == NULL)) {
1028 netdev_dbg(queue->vif->dev,
1029 "Can't allocate a skb in start_xmit.\n");
1030 xenvif_tx_err(queue, &txreq, extra_count, idx);
1031 break;
1032 }
1033
1034 skb_shinfo(skb)->nr_frags = ret;
1035 /* At this point shinfo->nr_frags is in fact the number of
1036 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1037 */
1038 frag_overflow = 0;
1039 nskb = NULL;
1040 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1041 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1042 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1043 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1044 nskb = xenvif_alloc_skb(0);
1045 if (unlikely(nskb == NULL)) {
1046 skb_shinfo(skb)->nr_frags = 0;
1047 kfree_skb(skb);
1048 xenvif_tx_err(queue, &txreq, extra_count, idx);
1049 if (net_ratelimit())
1050 netdev_err(queue->vif->dev,
1051 "Can't allocate the frag_list skb.\n");
1052 break;
1053 }
1054 }
1055
1056 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1057 struct xen_netif_extra_info *gso;
1058 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1059
1060 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1061 /* Failure in xenvif_set_skb_gso is fatal. */
1062 skb_shinfo(skb)->nr_frags = 0;
1063 kfree_skb(skb);
1064 kfree_skb(nskb);
1065 break;
1066 }
1067 }
1068
1069 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1070 struct xen_netif_extra_info *extra;
1071 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1072
1073 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1074
1075 switch (extra->u.hash.type) {
1076 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1077 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1078 type = PKT_HASH_TYPE_L3;
1079 break;
1080
1081 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1082 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1083 type = PKT_HASH_TYPE_L4;
1084 break;
1085
1086 default:
1087 break;
1088 }
1089
1090 if (type != PKT_HASH_TYPE_NONE)
1091 skb_set_hash(skb,
1092 *(u32 *)extra->u.hash.value,
1093 type);
1094 }
1095
1096 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1097 map_ops, frag_overflow, nskb, extra_count,
1098 data_len);
1099
1100 __skb_queue_tail(&queue->tx_queue, skb);
1101
1102 queue->tx.req_cons = idx;
1103 }
1104
1105 return;
1106}
1107
1108/* Consolidate skb with a frag_list into a brand new one with local pages on
1109 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1110 */
1111static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1112{
1113 unsigned int offset = skb_headlen(skb);
1114 skb_frag_t frags[MAX_SKB_FRAGS];
1115 int i, f;
1116 struct ubuf_info *uarg;
1117 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1118
1119 queue->stats.tx_zerocopy_sent += 2;
1120 queue->stats.tx_frag_overflow++;
1121
1122 xenvif_fill_frags(queue, nskb);
1123 /* Subtract frags size, we will correct it later */
1124 skb->truesize -= skb->data_len;
1125 skb->len += nskb->len;
1126 skb->data_len += nskb->len;
1127
1128 /* create a brand new frags array and coalesce there */
1129 for (i = 0; offset < skb->len; i++) {
1130 struct page *page;
1131 unsigned int len;
1132
1133 BUG_ON(i >= MAX_SKB_FRAGS);
1134 page = alloc_page(GFP_ATOMIC);
1135 if (!page) {
1136 int j;
1137 skb->truesize += skb->data_len;
1138 for (j = 0; j < i; j++)
1139 put_page(skb_frag_page(&frags[j]));
1140 return -ENOMEM;
1141 }
1142
1143 if (offset + PAGE_SIZE < skb->len)
1144 len = PAGE_SIZE;
1145 else
1146 len = skb->len - offset;
1147 if (skb_copy_bits(skb, offset, page_address(page), len))
1148 BUG();
1149
1150 offset += len;
1151 skb_frag_fill_page_desc(&frags[i], page, 0, len);
1152 }
1153
1154 /* Release all the original (foreign) frags. */
1155 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1156 skb_frag_unref(skb, f);
1157 uarg = skb_shinfo(skb)->destructor_arg;
1158 /* increase inflight counter to offset decrement in callback */
1159 atomic_inc(&queue->inflight_packets);
1160 uarg->ops->complete(NULL, uarg, true);
1161 skb_shinfo(skb)->destructor_arg = NULL;
1162
1163 /* Fill the skb with the new (local) frags. */
1164 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1165 skb_shinfo(skb)->nr_frags = i;
1166 skb->truesize += i * PAGE_SIZE;
1167
1168 return 0;
1169}
1170
1171static int xenvif_tx_submit(struct xenvif_queue *queue)
1172{
1173 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1174 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1175 struct sk_buff *skb;
1176 int work_done = 0;
1177
1178 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1179 struct xen_netif_tx_request *txp;
1180 u16 pending_idx;
1181
1182 pending_idx = copy_pending_idx(skb, 0);
1183 txp = &queue->pending_tx_info[pending_idx].req;
1184
1185 /* Check the remap error code. */
1186 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1187 /* If there was an error, xenvif_tx_check_gop is
1188 * expected to release all the frags which were mapped,
1189 * so kfree_skb shouldn't do it again
1190 */
1191 skb_shinfo(skb)->nr_frags = 0;
1192 if (skb_has_frag_list(skb)) {
1193 struct sk_buff *nskb =
1194 skb_shinfo(skb)->frag_list;
1195 skb_shinfo(nskb)->nr_frags = 0;
1196 }
1197 kfree_skb(skb);
1198 continue;
1199 }
1200
1201 if (txp->flags & XEN_NETTXF_csum_blank)
1202 skb->ip_summed = CHECKSUM_PARTIAL;
1203 else if (txp->flags & XEN_NETTXF_data_validated)
1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205
1206 xenvif_fill_frags(queue, skb);
1207
1208 if (unlikely(skb_has_frag_list(skb))) {
1209 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1210 xenvif_skb_zerocopy_prepare(queue, nskb);
1211 if (xenvif_handle_frag_list(queue, skb)) {
1212 if (net_ratelimit())
1213 netdev_err(queue->vif->dev,
1214 "Not enough memory to consolidate frag_list!\n");
1215 xenvif_skb_zerocopy_prepare(queue, skb);
1216 kfree_skb(skb);
1217 continue;
1218 }
1219 /* Copied all the bits from the frag list -- free it. */
1220 skb_frag_list_init(skb);
1221 kfree_skb(nskb);
1222 }
1223
1224 skb->dev = queue->vif->dev;
1225 skb->protocol = eth_type_trans(skb, skb->dev);
1226 skb_reset_network_header(skb);
1227
1228 if (checksum_setup(queue, skb)) {
1229 netdev_dbg(queue->vif->dev,
1230 "Can't setup checksum in net_tx_action\n");
1231 /* We have to set this flag to trigger the callback */
1232 if (skb_shinfo(skb)->destructor_arg)
1233 xenvif_skb_zerocopy_prepare(queue, skb);
1234 kfree_skb(skb);
1235 continue;
1236 }
1237
1238 skb_probe_transport_header(skb);
1239
1240 /* If the packet is GSO then we will have just set up the
1241 * transport header offset in checksum_setup so it's now
1242 * straightforward to calculate gso_segs.
1243 */
1244 if (skb_is_gso(skb)) {
1245 int mss, hdrlen;
1246
1247 /* GSO implies having the L4 header. */
1248 WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1249 if (unlikely(!skb_transport_header_was_set(skb))) {
1250 kfree_skb(skb);
1251 continue;
1252 }
1253
1254 mss = skb_shinfo(skb)->gso_size;
1255 hdrlen = skb_tcp_all_headers(skb);
1256
1257 skb_shinfo(skb)->gso_segs =
1258 DIV_ROUND_UP(skb->len - hdrlen, mss);
1259 }
1260
1261 queue->stats.rx_bytes += skb->len;
1262 queue->stats.rx_packets++;
1263
1264 work_done++;
1265
1266 /* Set this flag right before netif_receive_skb, otherwise
1267 * someone might think this packet already left netback, and
1268 * do a skb_copy_ubufs while we are still in control of the
1269 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1270 */
1271 if (skb_shinfo(skb)->destructor_arg) {
1272 xenvif_skb_zerocopy_prepare(queue, skb);
1273 queue->stats.tx_zerocopy_sent++;
1274 }
1275
1276 netif_receive_skb(skb);
1277 }
1278
1279 return work_done;
1280}
1281
1282static void xenvif_zerocopy_callback(struct sk_buff *skb,
1283 struct ubuf_info *ubuf_base,
1284 bool zerocopy_success)
1285{
1286 unsigned long flags;
1287 pending_ring_idx_t index;
1288 struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1289 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1290
1291 /* This is the only place where we grab this lock, to protect callbacks
1292 * from each other.
1293 */
1294 spin_lock_irqsave(&queue->callback_lock, flags);
1295 do {
1296 u16 pending_idx = ubuf->desc;
1297 ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1298 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1299 MAX_PENDING_REQS);
1300 index = pending_index(queue->dealloc_prod);
1301 queue->dealloc_ring[index] = pending_idx;
1302 /* Sync with xenvif_tx_dealloc_action:
1303 * insert idx then incr producer.
1304 */
1305 smp_wmb();
1306 queue->dealloc_prod++;
1307 } while (ubuf);
1308 spin_unlock_irqrestore(&queue->callback_lock, flags);
1309
1310 if (likely(zerocopy_success))
1311 queue->stats.tx_zerocopy_success++;
1312 else
1313 queue->stats.tx_zerocopy_fail++;
1314 xenvif_skb_zerocopy_complete(queue);
1315}
1316
1317const struct ubuf_info_ops xenvif_ubuf_ops = {
1318 .complete = xenvif_zerocopy_callback,
1319};
1320
1321static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1322{
1323 struct gnttab_unmap_grant_ref *gop;
1324 pending_ring_idx_t dc, dp;
1325 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1326 unsigned int i = 0;
1327
1328 dc = queue->dealloc_cons;
1329 gop = queue->tx_unmap_ops;
1330
1331 /* Free up any grants we have finished using */
1332 do {
1333 dp = queue->dealloc_prod;
1334
1335 /* Ensure we see all indices enqueued by all
1336 * xenvif_zerocopy_callback().
1337 */
1338 smp_rmb();
1339
1340 while (dc != dp) {
1341 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1342 pending_idx =
1343 queue->dealloc_ring[pending_index(dc++)];
1344
1345 pending_idx_release[gop - queue->tx_unmap_ops] =
1346 pending_idx;
1347 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1348 queue->mmap_pages[pending_idx];
1349 gnttab_set_unmap_op(gop,
1350 idx_to_kaddr(queue, pending_idx),
1351 GNTMAP_host_map,
1352 queue->grant_tx_handle[pending_idx]);
1353 xenvif_grant_handle_reset(queue, pending_idx);
1354 ++gop;
1355 }
1356
1357 } while (dp != queue->dealloc_prod);
1358
1359 queue->dealloc_cons = dc;
1360
1361 if (gop - queue->tx_unmap_ops > 0) {
1362 int ret;
1363 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1364 NULL,
1365 queue->pages_to_unmap,
1366 gop - queue->tx_unmap_ops);
1367 if (ret) {
1368 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1369 gop - queue->tx_unmap_ops, ret);
1370 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1371 if (gop[i].status != GNTST_okay)
1372 netdev_err(queue->vif->dev,
1373 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1374 gop[i].host_addr,
1375 gop[i].handle,
1376 gop[i].status);
1377 }
1378 BUG();
1379 }
1380 }
1381
1382 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1383 xenvif_idx_release(queue, pending_idx_release[i],
1384 XEN_NETIF_RSP_OKAY);
1385}
1386
1387
1388/* Called after netfront has transmitted */
1389int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1390{
1391 unsigned nr_mops = 0, nr_cops = 0;
1392 int work_done, ret;
1393
1394 if (unlikely(!tx_work_todo(queue)))
1395 return 0;
1396
1397 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1398
1399 if (nr_cops == 0)
1400 return 0;
1401
1402 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1403 if (nr_mops != 0) {
1404 ret = gnttab_map_refs(queue->tx_map_ops,
1405 NULL,
1406 queue->pages_to_map,
1407 nr_mops);
1408 if (ret) {
1409 unsigned int i;
1410
1411 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1412 nr_mops, ret);
1413 for (i = 0; i < nr_mops; ++i)
1414 WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1415 GNTST_okay);
1416 }
1417 }
1418
1419 work_done = xenvif_tx_submit(queue);
1420
1421 return work_done;
1422}
1423
1424static void _make_tx_response(struct xenvif_queue *queue,
1425 const struct xen_netif_tx_request *txp,
1426 unsigned int extra_count,
1427 s8 status)
1428{
1429 RING_IDX i = queue->tx.rsp_prod_pvt;
1430 struct xen_netif_tx_response *resp;
1431
1432 resp = RING_GET_RESPONSE(&queue->tx, i);
1433 resp->id = txp->id;
1434 resp->status = status;
1435
1436 while (extra_count-- != 0)
1437 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1438
1439 queue->tx.rsp_prod_pvt = ++i;
1440}
1441
1442static void push_tx_responses(struct xenvif_queue *queue)
1443{
1444 int notify;
1445
1446 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1447 if (notify)
1448 notify_remote_via_irq(queue->tx_irq);
1449}
1450
1451static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1452 s8 status)
1453{
1454 struct pending_tx_info *pending_tx_info;
1455 pending_ring_idx_t index;
1456 unsigned long flags;
1457
1458 pending_tx_info = &queue->pending_tx_info[pending_idx];
1459
1460 spin_lock_irqsave(&queue->response_lock, flags);
1461
1462 _make_tx_response(queue, &pending_tx_info->req,
1463 pending_tx_info->extra_count, status);
1464
1465 /* Release the pending index before pusing the Tx response so
1466 * its available before a new Tx request is pushed by the
1467 * frontend.
1468 */
1469 index = pending_index(queue->pending_prod++);
1470 queue->pending_ring[index] = pending_idx;
1471
1472 push_tx_responses(queue);
1473
1474 spin_unlock_irqrestore(&queue->response_lock, flags);
1475}
1476
1477static void make_tx_response(struct xenvif_queue *queue,
1478 const struct xen_netif_tx_request *txp,
1479 unsigned int extra_count,
1480 s8 status)
1481{
1482 unsigned long flags;
1483
1484 spin_lock_irqsave(&queue->response_lock, flags);
1485
1486 _make_tx_response(queue, txp, extra_count, status);
1487 push_tx_responses(queue);
1488
1489 spin_unlock_irqrestore(&queue->response_lock, flags);
1490}
1491
1492static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1493{
1494 int ret;
1495 struct gnttab_unmap_grant_ref tx_unmap_op;
1496
1497 gnttab_set_unmap_op(&tx_unmap_op,
1498 idx_to_kaddr(queue, pending_idx),
1499 GNTMAP_host_map,
1500 queue->grant_tx_handle[pending_idx]);
1501 xenvif_grant_handle_reset(queue, pending_idx);
1502
1503 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1504 &queue->mmap_pages[pending_idx], 1);
1505 if (ret) {
1506 netdev_err(queue->vif->dev,
1507 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1508 ret,
1509 pending_idx,
1510 tx_unmap_op.host_addr,
1511 tx_unmap_op.handle,
1512 tx_unmap_op.status);
1513 BUG();
1514 }
1515}
1516
1517static inline int tx_work_todo(struct xenvif_queue *queue)
1518{
1519 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1520 return 1;
1521
1522 return 0;
1523}
1524
1525static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1526{
1527 return queue->dealloc_cons != queue->dealloc_prod;
1528}
1529
1530void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1531{
1532 if (queue->tx.sring)
1533 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1534 queue->tx.sring);
1535 if (queue->rx.sring)
1536 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1537 queue->rx.sring);
1538}
1539
1540int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1541 grant_ref_t tx_ring_ref,
1542 grant_ref_t rx_ring_ref)
1543{
1544 void *addr;
1545 struct xen_netif_tx_sring *txs;
1546 struct xen_netif_rx_sring *rxs;
1547 RING_IDX rsp_prod, req_prod;
1548 int err;
1549
1550 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1551 &tx_ring_ref, 1, &addr);
1552 if (err)
1553 goto err;
1554
1555 txs = (struct xen_netif_tx_sring *)addr;
1556 rsp_prod = READ_ONCE(txs->rsp_prod);
1557 req_prod = READ_ONCE(txs->req_prod);
1558
1559 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1560
1561 err = -EIO;
1562 if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1563 goto err;
1564
1565 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1566 &rx_ring_ref, 1, &addr);
1567 if (err)
1568 goto err;
1569
1570 rxs = (struct xen_netif_rx_sring *)addr;
1571 rsp_prod = READ_ONCE(rxs->rsp_prod);
1572 req_prod = READ_ONCE(rxs->req_prod);
1573
1574 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1575
1576 err = -EIO;
1577 if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1578 goto err;
1579
1580 return 0;
1581
1582err:
1583 xenvif_unmap_frontend_data_rings(queue);
1584 return err;
1585}
1586
1587static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1588{
1589 /* Dealloc thread must remain running until all inflight
1590 * packets complete.
1591 */
1592 return kthread_should_stop() &&
1593 !atomic_read(&queue->inflight_packets);
1594}
1595
1596int xenvif_dealloc_kthread(void *data)
1597{
1598 struct xenvif_queue *queue = data;
1599
1600 for (;;) {
1601 wait_event_interruptible(queue->dealloc_wq,
1602 tx_dealloc_work_todo(queue) ||
1603 xenvif_dealloc_kthread_should_stop(queue));
1604 if (xenvif_dealloc_kthread_should_stop(queue))
1605 break;
1606
1607 xenvif_tx_dealloc_action(queue);
1608 cond_resched();
1609 }
1610
1611 /* Unmap anything remaining*/
1612 if (tx_dealloc_work_todo(queue))
1613 xenvif_tx_dealloc_action(queue);
1614
1615 return 0;
1616}
1617
1618static void make_ctrl_response(struct xenvif *vif,
1619 const struct xen_netif_ctrl_request *req,
1620 u32 status, u32 data)
1621{
1622 RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1623 struct xen_netif_ctrl_response rsp = {
1624 .id = req->id,
1625 .type = req->type,
1626 .status = status,
1627 .data = data,
1628 };
1629
1630 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1631 vif->ctrl.rsp_prod_pvt = ++idx;
1632}
1633
1634static void push_ctrl_response(struct xenvif *vif)
1635{
1636 int notify;
1637
1638 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1639 if (notify)
1640 notify_remote_via_irq(vif->ctrl_irq);
1641}
1642
1643static void process_ctrl_request(struct xenvif *vif,
1644 const struct xen_netif_ctrl_request *req)
1645{
1646 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1647 u32 data = 0;
1648
1649 switch (req->type) {
1650 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1651 status = xenvif_set_hash_alg(vif, req->data[0]);
1652 break;
1653
1654 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1655 status = xenvif_get_hash_flags(vif, &data);
1656 break;
1657
1658 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1659 status = xenvif_set_hash_flags(vif, req->data[0]);
1660 break;
1661
1662 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1663 status = xenvif_set_hash_key(vif, req->data[0],
1664 req->data[1]);
1665 break;
1666
1667 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1668 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1669 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1670 break;
1671
1672 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1673 status = xenvif_set_hash_mapping_size(vif,
1674 req->data[0]);
1675 break;
1676
1677 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1678 status = xenvif_set_hash_mapping(vif, req->data[0],
1679 req->data[1],
1680 req->data[2]);
1681 break;
1682
1683 default:
1684 break;
1685 }
1686
1687 make_ctrl_response(vif, req, status, data);
1688 push_ctrl_response(vif);
1689}
1690
1691static void xenvif_ctrl_action(struct xenvif *vif)
1692{
1693 for (;;) {
1694 RING_IDX req_prod, req_cons;
1695
1696 req_prod = vif->ctrl.sring->req_prod;
1697 req_cons = vif->ctrl.req_cons;
1698
1699 /* Make sure we can see requests before we process them. */
1700 rmb();
1701
1702 if (req_cons == req_prod)
1703 break;
1704
1705 while (req_cons != req_prod) {
1706 struct xen_netif_ctrl_request req;
1707
1708 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1709 req_cons++;
1710
1711 process_ctrl_request(vif, &req);
1712 }
1713
1714 vif->ctrl.req_cons = req_cons;
1715 vif->ctrl.sring->req_event = req_cons + 1;
1716 }
1717}
1718
1719static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1720{
1721 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1722 return true;
1723
1724 return false;
1725}
1726
1727irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1728{
1729 struct xenvif *vif = data;
1730 unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1731
1732 while (xenvif_ctrl_work_todo(vif)) {
1733 xenvif_ctrl_action(vif);
1734 eoi_flag = 0;
1735 }
1736
1737 xen_irq_lateeoi(irq, eoi_flag);
1738
1739 return IRQ_HANDLED;
1740}
1741
1742static int __init netback_init(void)
1743{
1744 int rc = 0;
1745
1746 if (!xen_domain())
1747 return -ENODEV;
1748
1749 /* Allow as many queues as there are CPUs but max. 8 if user has not
1750 * specified a value.
1751 */
1752 if (xenvif_max_queues == 0)
1753 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1754 num_online_cpus());
1755
1756 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1757 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1758 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1759 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1760 }
1761
1762 rc = xenvif_xenbus_init();
1763 if (rc)
1764 goto failed_init;
1765
1766#ifdef CONFIG_DEBUG_FS
1767 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1768#endif /* CONFIG_DEBUG_FS */
1769
1770 return 0;
1771
1772failed_init:
1773 return rc;
1774}
1775
1776module_init(netback_init);
1777
1778static void __exit netback_fini(void)
1779{
1780#ifdef CONFIG_DEBUG_FS
1781 debugfs_remove_recursive(xen_netback_dbg_root);
1782#endif /* CONFIG_DEBUG_FS */
1783 xenvif_xenbus_fini();
1784}
1785module_exit(netback_fini);
1786
1787MODULE_DESCRIPTION("Xen backend network device module");
1788MODULE_LICENSE("Dual BSD/GPL");
1789MODULE_ALIAS("xen-backend:vif");
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41
42#include <net/tcp.h>
43
44#include <xen/xen.h>
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47#include <xen/page.h>
48
49#include <asm/xen/hypercall.h>
50
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644);
57
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
67unsigned int rx_stall_timeout_msecs = 60000;
68module_param(rx_stall_timeout_msecs, uint, 0444);
69
70#define MAX_QUEUES_DEFAULT 8
71unsigned int xenvif_max_queues;
72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73MODULE_PARM_DESC(max_queues,
74 "Maximum number of queues per virtual interface");
75
76/*
77 * This is the maximum slots a skb can have. If a guest sends a skb
78 * which exceeds this limit it is considered malicious.
79 */
80#define FATAL_SKB_SLOTS_DEFAULT 20
81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82module_param(fatal_skb_slots, uint, 0444);
83
84/* The amount to copy out of the first guest Tx slot into the skb's
85 * linear area. If the first slot has more data, it will be mapped
86 * and put into the first frag.
87 *
88 * This is sized to avoid pulling headers from the frags for most
89 * TCP/IP packets.
90 */
91#define XEN_NETBACK_TX_COPY_LEN 128
92
93/* This is the maximum number of flows in the hash cache. */
94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
98
99static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
100 u8 status);
101
102static void make_tx_response(struct xenvif_queue *queue,
103 struct xen_netif_tx_request *txp,
104 unsigned int extra_count,
105 s8 st);
106static void push_tx_responses(struct xenvif_queue *queue);
107
108static inline int tx_work_todo(struct xenvif_queue *queue);
109
110static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
111 u16 idx)
112{
113 return page_to_pfn(queue->mmap_pages[idx]);
114}
115
116static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
117 u16 idx)
118{
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
120}
121
122#define callback_param(vif, pending_idx) \
123 (vif->pending_tx_info[pending_idx].callback_struct)
124
125/* Find the containing VIF's structure from a pointer in pending_tx_info array
126 */
127static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
128{
129 u16 pending_idx = ubuf->desc;
130 struct pending_tx_info *temp =
131 container_of(ubuf, struct pending_tx_info, callback_struct);
132 return container_of(temp - pending_idx,
133 struct xenvif_queue,
134 pending_tx_info[0]);
135}
136
137static u16 frag_get_pending_idx(skb_frag_t *frag)
138{
139 return (u16)frag->page_offset;
140}
141
142static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
143{
144 frag->page_offset = pending_idx;
145}
146
147static inline pending_ring_idx_t pending_index(unsigned i)
148{
149 return i & (MAX_PENDING_REQS-1);
150}
151
152void xenvif_kick_thread(struct xenvif_queue *queue)
153{
154 wake_up(&queue->wq);
155}
156
157void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
158{
159 int more_to_do;
160
161 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
162
163 if (more_to_do)
164 napi_schedule(&queue->napi);
165}
166
167static void tx_add_credit(struct xenvif_queue *queue)
168{
169 unsigned long max_burst, max_credit;
170
171 /*
172 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
173 * Otherwise the interface can seize up due to insufficient credit.
174 */
175 max_burst = max(131072UL, queue->credit_bytes);
176
177 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
178 max_credit = queue->remaining_credit + queue->credit_bytes;
179 if (max_credit < queue->remaining_credit)
180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
181
182 queue->remaining_credit = min(max_credit, max_burst);
183 queue->rate_limited = false;
184}
185
186void xenvif_tx_credit_callback(struct timer_list *t)
187{
188 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
189 tx_add_credit(queue);
190 xenvif_napi_schedule_or_enable_events(queue);
191}
192
193static void xenvif_tx_err(struct xenvif_queue *queue,
194 struct xen_netif_tx_request *txp,
195 unsigned int extra_count, RING_IDX end)
196{
197 RING_IDX cons = queue->tx.req_cons;
198 unsigned long flags;
199
200 do {
201 spin_lock_irqsave(&queue->response_lock, flags);
202 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
203 push_tx_responses(queue);
204 spin_unlock_irqrestore(&queue->response_lock, flags);
205 if (cons == end)
206 break;
207 RING_COPY_REQUEST(&queue->tx, cons++, txp);
208 extra_count = 0; /* only the first frag can have extras */
209 } while (1);
210 queue->tx.req_cons = cons;
211}
212
213static void xenvif_fatal_tx_err(struct xenvif *vif)
214{
215 netdev_err(vif->dev, "fatal error; disabling device\n");
216 vif->disabled = true;
217 /* Disable the vif from queue 0's kthread */
218 if (vif->num_queues)
219 xenvif_kick_thread(&vif->queues[0]);
220}
221
222static int xenvif_count_requests(struct xenvif_queue *queue,
223 struct xen_netif_tx_request *first,
224 unsigned int extra_count,
225 struct xen_netif_tx_request *txp,
226 int work_to_do)
227{
228 RING_IDX cons = queue->tx.req_cons;
229 int slots = 0;
230 int drop_err = 0;
231 int more_data;
232
233 if (!(first->flags & XEN_NETTXF_more_data))
234 return 0;
235
236 do {
237 struct xen_netif_tx_request dropped_tx = { 0 };
238
239 if (slots >= work_to_do) {
240 netdev_err(queue->vif->dev,
241 "Asked for %d slots but exceeds this limit\n",
242 work_to_do);
243 xenvif_fatal_tx_err(queue->vif);
244 return -ENODATA;
245 }
246
247 /* This guest is really using too many slots and
248 * considered malicious.
249 */
250 if (unlikely(slots >= fatal_skb_slots)) {
251 netdev_err(queue->vif->dev,
252 "Malicious frontend using %d slots, threshold %u\n",
253 slots, fatal_skb_slots);
254 xenvif_fatal_tx_err(queue->vif);
255 return -E2BIG;
256 }
257
258 /* Xen network protocol had implicit dependency on
259 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
260 * the historical MAX_SKB_FRAGS value 18 to honor the
261 * same behavior as before. Any packet using more than
262 * 18 slots but less than fatal_skb_slots slots is
263 * dropped
264 */
265 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
266 if (net_ratelimit())
267 netdev_dbg(queue->vif->dev,
268 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
269 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
270 drop_err = -E2BIG;
271 }
272
273 if (drop_err)
274 txp = &dropped_tx;
275
276 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
277
278 /* If the guest submitted a frame >= 64 KiB then
279 * first->size overflowed and following slots will
280 * appear to be larger than the frame.
281 *
282 * This cannot be fatal error as there are buggy
283 * frontends that do this.
284 *
285 * Consume all slots and drop the packet.
286 */
287 if (!drop_err && txp->size > first->size) {
288 if (net_ratelimit())
289 netdev_dbg(queue->vif->dev,
290 "Invalid tx request, slot size %u > remaining size %u\n",
291 txp->size, first->size);
292 drop_err = -EIO;
293 }
294
295 first->size -= txp->size;
296 slots++;
297
298 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
299 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
300 txp->offset, txp->size);
301 xenvif_fatal_tx_err(queue->vif);
302 return -EINVAL;
303 }
304
305 more_data = txp->flags & XEN_NETTXF_more_data;
306
307 if (!drop_err)
308 txp++;
309
310 } while (more_data);
311
312 if (drop_err) {
313 xenvif_tx_err(queue, first, extra_count, cons + slots);
314 return drop_err;
315 }
316
317 return slots;
318}
319
320
321struct xenvif_tx_cb {
322 u16 pending_idx;
323};
324
325#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
326
327static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
328 u16 pending_idx,
329 struct xen_netif_tx_request *txp,
330 unsigned int extra_count,
331 struct gnttab_map_grant_ref *mop)
332{
333 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
334 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
335 GNTMAP_host_map | GNTMAP_readonly,
336 txp->gref, queue->vif->domid);
337
338 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
339 sizeof(*txp));
340 queue->pending_tx_info[pending_idx].extra_count = extra_count;
341}
342
343static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
344{
345 struct sk_buff *skb =
346 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
347 GFP_ATOMIC | __GFP_NOWARN);
348 if (unlikely(skb == NULL))
349 return NULL;
350
351 /* Packets passed to netif_rx() must have some headroom. */
352 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
353
354 /* Initialize it here to avoid later surprises */
355 skb_shinfo(skb)->destructor_arg = NULL;
356
357 return skb;
358}
359
360static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
361 struct sk_buff *skb,
362 struct xen_netif_tx_request *txp,
363 struct gnttab_map_grant_ref *gop,
364 unsigned int frag_overflow,
365 struct sk_buff *nskb)
366{
367 struct skb_shared_info *shinfo = skb_shinfo(skb);
368 skb_frag_t *frags = shinfo->frags;
369 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
370 int start;
371 pending_ring_idx_t index;
372 unsigned int nr_slots;
373
374 nr_slots = shinfo->nr_frags;
375
376 /* Skip first skb fragment if it is on same page as header fragment. */
377 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
378
379 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
380 shinfo->nr_frags++, txp++, gop++) {
381 index = pending_index(queue->pending_cons++);
382 pending_idx = queue->pending_ring[index];
383 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
384 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
385 }
386
387 if (frag_overflow) {
388
389 shinfo = skb_shinfo(nskb);
390 frags = shinfo->frags;
391
392 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
393 shinfo->nr_frags++, txp++, gop++) {
394 index = pending_index(queue->pending_cons++);
395 pending_idx = queue->pending_ring[index];
396 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
397 gop);
398 frag_set_pending_idx(&frags[shinfo->nr_frags],
399 pending_idx);
400 }
401
402 skb_shinfo(skb)->frag_list = nskb;
403 }
404
405 return gop;
406}
407
408static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
409 u16 pending_idx,
410 grant_handle_t handle)
411{
412 if (unlikely(queue->grant_tx_handle[pending_idx] !=
413 NETBACK_INVALID_HANDLE)) {
414 netdev_err(queue->vif->dev,
415 "Trying to overwrite active handle! pending_idx: 0x%x\n",
416 pending_idx);
417 BUG();
418 }
419 queue->grant_tx_handle[pending_idx] = handle;
420}
421
422static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
423 u16 pending_idx)
424{
425 if (unlikely(queue->grant_tx_handle[pending_idx] ==
426 NETBACK_INVALID_HANDLE)) {
427 netdev_err(queue->vif->dev,
428 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
429 pending_idx);
430 BUG();
431 }
432 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
433}
434
435static int xenvif_tx_check_gop(struct xenvif_queue *queue,
436 struct sk_buff *skb,
437 struct gnttab_map_grant_ref **gopp_map,
438 struct gnttab_copy **gopp_copy)
439{
440 struct gnttab_map_grant_ref *gop_map = *gopp_map;
441 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
442 /* This always points to the shinfo of the skb being checked, which
443 * could be either the first or the one on the frag_list
444 */
445 struct skb_shared_info *shinfo = skb_shinfo(skb);
446 /* If this is non-NULL, we are currently checking the frag_list skb, and
447 * this points to the shinfo of the first one
448 */
449 struct skb_shared_info *first_shinfo = NULL;
450 int nr_frags = shinfo->nr_frags;
451 const bool sharedslot = nr_frags &&
452 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
453 int i, err;
454
455 /* Check status of header. */
456 err = (*gopp_copy)->status;
457 if (unlikely(err)) {
458 if (net_ratelimit())
459 netdev_dbg(queue->vif->dev,
460 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
461 (*gopp_copy)->status,
462 pending_idx,
463 (*gopp_copy)->source.u.ref);
464 /* The first frag might still have this slot mapped */
465 if (!sharedslot)
466 xenvif_idx_release(queue, pending_idx,
467 XEN_NETIF_RSP_ERROR);
468 }
469 (*gopp_copy)++;
470
471check_frags:
472 for (i = 0; i < nr_frags; i++, gop_map++) {
473 int j, newerr;
474
475 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
476
477 /* Check error status: if okay then remember grant handle. */
478 newerr = gop_map->status;
479
480 if (likely(!newerr)) {
481 xenvif_grant_handle_set(queue,
482 pending_idx,
483 gop_map->handle);
484 /* Had a previous error? Invalidate this fragment. */
485 if (unlikely(err)) {
486 xenvif_idx_unmap(queue, pending_idx);
487 /* If the mapping of the first frag was OK, but
488 * the header's copy failed, and they are
489 * sharing a slot, send an error
490 */
491 if (i == 0 && sharedslot)
492 xenvif_idx_release(queue, pending_idx,
493 XEN_NETIF_RSP_ERROR);
494 else
495 xenvif_idx_release(queue, pending_idx,
496 XEN_NETIF_RSP_OKAY);
497 }
498 continue;
499 }
500
501 /* Error on this fragment: respond to client with an error. */
502 if (net_ratelimit())
503 netdev_dbg(queue->vif->dev,
504 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
505 i,
506 gop_map->status,
507 pending_idx,
508 gop_map->ref);
509
510 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
511
512 /* Not the first error? Preceding frags already invalidated. */
513 if (err)
514 continue;
515
516 /* First error: if the header haven't shared a slot with the
517 * first frag, release it as well.
518 */
519 if (!sharedslot)
520 xenvif_idx_release(queue,
521 XENVIF_TX_CB(skb)->pending_idx,
522 XEN_NETIF_RSP_OKAY);
523
524 /* Invalidate preceding fragments of this skb. */
525 for (j = 0; j < i; j++) {
526 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
527 xenvif_idx_unmap(queue, pending_idx);
528 xenvif_idx_release(queue, pending_idx,
529 XEN_NETIF_RSP_OKAY);
530 }
531
532 /* And if we found the error while checking the frag_list, unmap
533 * the first skb's frags
534 */
535 if (first_shinfo) {
536 for (j = 0; j < first_shinfo->nr_frags; j++) {
537 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
538 xenvif_idx_unmap(queue, pending_idx);
539 xenvif_idx_release(queue, pending_idx,
540 XEN_NETIF_RSP_OKAY);
541 }
542 }
543
544 /* Remember the error: invalidate all subsequent fragments. */
545 err = newerr;
546 }
547
548 if (skb_has_frag_list(skb) && !first_shinfo) {
549 first_shinfo = skb_shinfo(skb);
550 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
551 nr_frags = shinfo->nr_frags;
552
553 goto check_frags;
554 }
555
556 *gopp_map = gop_map;
557 return err;
558}
559
560static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
561{
562 struct skb_shared_info *shinfo = skb_shinfo(skb);
563 int nr_frags = shinfo->nr_frags;
564 int i;
565 u16 prev_pending_idx = INVALID_PENDING_IDX;
566
567 for (i = 0; i < nr_frags; i++) {
568 skb_frag_t *frag = shinfo->frags + i;
569 struct xen_netif_tx_request *txp;
570 struct page *page;
571 u16 pending_idx;
572
573 pending_idx = frag_get_pending_idx(frag);
574
575 /* If this is not the first frag, chain it to the previous*/
576 if (prev_pending_idx == INVALID_PENDING_IDX)
577 skb_shinfo(skb)->destructor_arg =
578 &callback_param(queue, pending_idx);
579 else
580 callback_param(queue, prev_pending_idx).ctx =
581 &callback_param(queue, pending_idx);
582
583 callback_param(queue, pending_idx).ctx = NULL;
584 prev_pending_idx = pending_idx;
585
586 txp = &queue->pending_tx_info[pending_idx].req;
587 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
588 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
589 skb->len += txp->size;
590 skb->data_len += txp->size;
591 skb->truesize += txp->size;
592
593 /* Take an extra reference to offset network stack's put_page */
594 get_page(queue->mmap_pages[pending_idx]);
595 }
596}
597
598static int xenvif_get_extras(struct xenvif_queue *queue,
599 struct xen_netif_extra_info *extras,
600 unsigned int *extra_count,
601 int work_to_do)
602{
603 struct xen_netif_extra_info extra;
604 RING_IDX cons = queue->tx.req_cons;
605
606 do {
607 if (unlikely(work_to_do-- <= 0)) {
608 netdev_err(queue->vif->dev, "Missing extra info\n");
609 xenvif_fatal_tx_err(queue->vif);
610 return -EBADR;
611 }
612
613 RING_COPY_REQUEST(&queue->tx, cons, &extra);
614
615 queue->tx.req_cons = ++cons;
616 (*extra_count)++;
617
618 if (unlikely(!extra.type ||
619 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
620 netdev_err(queue->vif->dev,
621 "Invalid extra type: %d\n", extra.type);
622 xenvif_fatal_tx_err(queue->vif);
623 return -EINVAL;
624 }
625
626 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
627 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
628
629 return work_to_do;
630}
631
632static int xenvif_set_skb_gso(struct xenvif *vif,
633 struct sk_buff *skb,
634 struct xen_netif_extra_info *gso)
635{
636 if (!gso->u.gso.size) {
637 netdev_err(vif->dev, "GSO size must not be zero.\n");
638 xenvif_fatal_tx_err(vif);
639 return -EINVAL;
640 }
641
642 switch (gso->u.gso.type) {
643 case XEN_NETIF_GSO_TYPE_TCPV4:
644 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
645 break;
646 case XEN_NETIF_GSO_TYPE_TCPV6:
647 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
648 break;
649 default:
650 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
651 xenvif_fatal_tx_err(vif);
652 return -EINVAL;
653 }
654
655 skb_shinfo(skb)->gso_size = gso->u.gso.size;
656 /* gso_segs will be calculated later */
657
658 return 0;
659}
660
661static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
662{
663 bool recalculate_partial_csum = false;
664
665 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
666 * peers can fail to set NETRXF_csum_blank when sending a GSO
667 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
668 * recalculate the partial checksum.
669 */
670 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
671 queue->stats.rx_gso_checksum_fixup++;
672 skb->ip_summed = CHECKSUM_PARTIAL;
673 recalculate_partial_csum = true;
674 }
675
676 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
677 if (skb->ip_summed != CHECKSUM_PARTIAL)
678 return 0;
679
680 return skb_checksum_setup(skb, recalculate_partial_csum);
681}
682
683static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
684{
685 u64 now = get_jiffies_64();
686 u64 next_credit = queue->credit_window_start +
687 msecs_to_jiffies(queue->credit_usec / 1000);
688
689 /* Timer could already be pending in rare cases. */
690 if (timer_pending(&queue->credit_timeout)) {
691 queue->rate_limited = true;
692 return true;
693 }
694
695 /* Passed the point where we can replenish credit? */
696 if (time_after_eq64(now, next_credit)) {
697 queue->credit_window_start = now;
698 tx_add_credit(queue);
699 }
700
701 /* Still too big to send right now? Set a callback. */
702 if (size > queue->remaining_credit) {
703 mod_timer(&queue->credit_timeout,
704 next_credit);
705 queue->credit_window_start = next_credit;
706 queue->rate_limited = true;
707
708 return true;
709 }
710
711 return false;
712}
713
714/* No locking is required in xenvif_mcast_add/del() as they are
715 * only ever invoked from NAPI poll. An RCU list is used because
716 * xenvif_mcast_match() is called asynchronously, during start_xmit.
717 */
718
719static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
720{
721 struct xenvif_mcast_addr *mcast;
722
723 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
724 if (net_ratelimit())
725 netdev_err(vif->dev,
726 "Too many multicast addresses\n");
727 return -ENOSPC;
728 }
729
730 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
731 if (!mcast)
732 return -ENOMEM;
733
734 ether_addr_copy(mcast->addr, addr);
735 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
736 vif->fe_mcast_count++;
737
738 return 0;
739}
740
741static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
742{
743 struct xenvif_mcast_addr *mcast;
744
745 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
746 if (ether_addr_equal(addr, mcast->addr)) {
747 --vif->fe_mcast_count;
748 list_del_rcu(&mcast->entry);
749 kfree_rcu(mcast, rcu);
750 break;
751 }
752 }
753}
754
755bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
756{
757 struct xenvif_mcast_addr *mcast;
758
759 rcu_read_lock();
760 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
761 if (ether_addr_equal(addr, mcast->addr)) {
762 rcu_read_unlock();
763 return true;
764 }
765 }
766 rcu_read_unlock();
767
768 return false;
769}
770
771void xenvif_mcast_addr_list_free(struct xenvif *vif)
772{
773 /* No need for locking or RCU here. NAPI poll and TX queue
774 * are stopped.
775 */
776 while (!list_empty(&vif->fe_mcast_addr)) {
777 struct xenvif_mcast_addr *mcast;
778
779 mcast = list_first_entry(&vif->fe_mcast_addr,
780 struct xenvif_mcast_addr,
781 entry);
782 --vif->fe_mcast_count;
783 list_del(&mcast->entry);
784 kfree(mcast);
785 }
786}
787
788static void xenvif_tx_build_gops(struct xenvif_queue *queue,
789 int budget,
790 unsigned *copy_ops,
791 unsigned *map_ops)
792{
793 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
794 struct sk_buff *skb, *nskb;
795 int ret;
796 unsigned int frag_overflow;
797
798 while (skb_queue_len(&queue->tx_queue) < budget) {
799 struct xen_netif_tx_request txreq;
800 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
801 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
802 unsigned int extra_count;
803 u16 pending_idx;
804 RING_IDX idx;
805 int work_to_do;
806 unsigned int data_len;
807 pending_ring_idx_t index;
808
809 if (queue->tx.sring->req_prod - queue->tx.req_cons >
810 XEN_NETIF_TX_RING_SIZE) {
811 netdev_err(queue->vif->dev,
812 "Impossible number of requests. "
813 "req_prod %d, req_cons %d, size %ld\n",
814 queue->tx.sring->req_prod, queue->tx.req_cons,
815 XEN_NETIF_TX_RING_SIZE);
816 xenvif_fatal_tx_err(queue->vif);
817 break;
818 }
819
820 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
821 if (!work_to_do)
822 break;
823
824 idx = queue->tx.req_cons;
825 rmb(); /* Ensure that we see the request before we copy it. */
826 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
827
828 /* Credit-based scheduling. */
829 if (txreq.size > queue->remaining_credit &&
830 tx_credit_exceeded(queue, txreq.size))
831 break;
832
833 queue->remaining_credit -= txreq.size;
834
835 work_to_do--;
836 queue->tx.req_cons = ++idx;
837
838 memset(extras, 0, sizeof(extras));
839 extra_count = 0;
840 if (txreq.flags & XEN_NETTXF_extra_info) {
841 work_to_do = xenvif_get_extras(queue, extras,
842 &extra_count,
843 work_to_do);
844 idx = queue->tx.req_cons;
845 if (unlikely(work_to_do < 0))
846 break;
847 }
848
849 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
850 struct xen_netif_extra_info *extra;
851
852 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
853 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
854
855 make_tx_response(queue, &txreq, extra_count,
856 (ret == 0) ?
857 XEN_NETIF_RSP_OKAY :
858 XEN_NETIF_RSP_ERROR);
859 push_tx_responses(queue);
860 continue;
861 }
862
863 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
864 struct xen_netif_extra_info *extra;
865
866 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
867 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
868
869 make_tx_response(queue, &txreq, extra_count,
870 XEN_NETIF_RSP_OKAY);
871 push_tx_responses(queue);
872 continue;
873 }
874
875 ret = xenvif_count_requests(queue, &txreq, extra_count,
876 txfrags, work_to_do);
877 if (unlikely(ret < 0))
878 break;
879
880 idx += ret;
881
882 if (unlikely(txreq.size < ETH_HLEN)) {
883 netdev_dbg(queue->vif->dev,
884 "Bad packet size: %d\n", txreq.size);
885 xenvif_tx_err(queue, &txreq, extra_count, idx);
886 break;
887 }
888
889 /* No crossing a page as the payload mustn't fragment. */
890 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
891 netdev_err(queue->vif->dev,
892 "txreq.offset: %u, size: %u, end: %lu\n",
893 txreq.offset, txreq.size,
894 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
895 xenvif_fatal_tx_err(queue->vif);
896 break;
897 }
898
899 index = pending_index(queue->pending_cons);
900 pending_idx = queue->pending_ring[index];
901
902 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
903 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
904 XEN_NETBACK_TX_COPY_LEN : txreq.size;
905
906 skb = xenvif_alloc_skb(data_len);
907 if (unlikely(skb == NULL)) {
908 netdev_dbg(queue->vif->dev,
909 "Can't allocate a skb in start_xmit.\n");
910 xenvif_tx_err(queue, &txreq, extra_count, idx);
911 break;
912 }
913
914 skb_shinfo(skb)->nr_frags = ret;
915 if (data_len < txreq.size)
916 skb_shinfo(skb)->nr_frags++;
917 /* At this point shinfo->nr_frags is in fact the number of
918 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
919 */
920 frag_overflow = 0;
921 nskb = NULL;
922 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
923 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
924 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) {
928 kfree_skb(skb);
929 xenvif_tx_err(queue, &txreq, extra_count, idx);
930 if (net_ratelimit())
931 netdev_err(queue->vif->dev,
932 "Can't allocate the frag_list skb.\n");
933 break;
934 }
935 }
936
937 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
938 struct xen_netif_extra_info *gso;
939 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
940
941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
942 /* Failure in xenvif_set_skb_gso is fatal. */
943 kfree_skb(skb);
944 kfree_skb(nskb);
945 break;
946 }
947 }
948
949 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
950 struct xen_netif_extra_info *extra;
951 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
952
953 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
954
955 switch (extra->u.hash.type) {
956 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
957 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
958 type = PKT_HASH_TYPE_L3;
959 break;
960
961 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
962 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
963 type = PKT_HASH_TYPE_L4;
964 break;
965
966 default:
967 break;
968 }
969
970 if (type != PKT_HASH_TYPE_NONE)
971 skb_set_hash(skb,
972 *(u32 *)extra->u.hash.value,
973 type);
974 }
975
976 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
977
978 __skb_put(skb, data_len);
979 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
980 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
981 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
982
983 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
984 virt_to_gfn(skb->data);
985 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
986 queue->tx_copy_ops[*copy_ops].dest.offset =
987 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
988
989 queue->tx_copy_ops[*copy_ops].len = data_len;
990 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
991
992 (*copy_ops)++;
993
994 if (data_len < txreq.size) {
995 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
996 pending_idx);
997 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
998 extra_count, gop);
999 gop++;
1000 } else {
1001 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1002 INVALID_PENDING_IDX);
1003 memcpy(&queue->pending_tx_info[pending_idx].req,
1004 &txreq, sizeof(txreq));
1005 queue->pending_tx_info[pending_idx].extra_count =
1006 extra_count;
1007 }
1008
1009 queue->pending_cons++;
1010
1011 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1012 frag_overflow, nskb);
1013
1014 __skb_queue_tail(&queue->tx_queue, skb);
1015
1016 queue->tx.req_cons = idx;
1017
1018 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1019 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1020 break;
1021 }
1022
1023 (*map_ops) = gop - queue->tx_map_ops;
1024 return;
1025}
1026
1027/* Consolidate skb with a frag_list into a brand new one with local pages on
1028 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1029 */
1030static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1031{
1032 unsigned int offset = skb_headlen(skb);
1033 skb_frag_t frags[MAX_SKB_FRAGS];
1034 int i, f;
1035 struct ubuf_info *uarg;
1036 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1037
1038 queue->stats.tx_zerocopy_sent += 2;
1039 queue->stats.tx_frag_overflow++;
1040
1041 xenvif_fill_frags(queue, nskb);
1042 /* Subtract frags size, we will correct it later */
1043 skb->truesize -= skb->data_len;
1044 skb->len += nskb->len;
1045 skb->data_len += nskb->len;
1046
1047 /* create a brand new frags array and coalesce there */
1048 for (i = 0; offset < skb->len; i++) {
1049 struct page *page;
1050 unsigned int len;
1051
1052 BUG_ON(i >= MAX_SKB_FRAGS);
1053 page = alloc_page(GFP_ATOMIC);
1054 if (!page) {
1055 int j;
1056 skb->truesize += skb->data_len;
1057 for (j = 0; j < i; j++)
1058 put_page(frags[j].page.p);
1059 return -ENOMEM;
1060 }
1061
1062 if (offset + PAGE_SIZE < skb->len)
1063 len = PAGE_SIZE;
1064 else
1065 len = skb->len - offset;
1066 if (skb_copy_bits(skb, offset, page_address(page), len))
1067 BUG();
1068
1069 offset += len;
1070 frags[i].page.p = page;
1071 frags[i].page_offset = 0;
1072 skb_frag_size_set(&frags[i], len);
1073 }
1074
1075 /* Copied all the bits from the frag list -- free it. */
1076 skb_frag_list_init(skb);
1077 xenvif_skb_zerocopy_prepare(queue, nskb);
1078 kfree_skb(nskb);
1079
1080 /* Release all the original (foreign) frags. */
1081 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1082 skb_frag_unref(skb, f);
1083 uarg = skb_shinfo(skb)->destructor_arg;
1084 /* increase inflight counter to offset decrement in callback */
1085 atomic_inc(&queue->inflight_packets);
1086 uarg->callback(uarg, true);
1087 skb_shinfo(skb)->destructor_arg = NULL;
1088
1089 /* Fill the skb with the new (local) frags. */
1090 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1091 skb_shinfo(skb)->nr_frags = i;
1092 skb->truesize += i * PAGE_SIZE;
1093
1094 return 0;
1095}
1096
1097static int xenvif_tx_submit(struct xenvif_queue *queue)
1098{
1099 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1100 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1101 struct sk_buff *skb;
1102 int work_done = 0;
1103
1104 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1105 struct xen_netif_tx_request *txp;
1106 u16 pending_idx;
1107 unsigned data_len;
1108
1109 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1110 txp = &queue->pending_tx_info[pending_idx].req;
1111
1112 /* Check the remap error code. */
1113 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1114 /* If there was an error, xenvif_tx_check_gop is
1115 * expected to release all the frags which were mapped,
1116 * so kfree_skb shouldn't do it again
1117 */
1118 skb_shinfo(skb)->nr_frags = 0;
1119 if (skb_has_frag_list(skb)) {
1120 struct sk_buff *nskb =
1121 skb_shinfo(skb)->frag_list;
1122 skb_shinfo(nskb)->nr_frags = 0;
1123 }
1124 kfree_skb(skb);
1125 continue;
1126 }
1127
1128 data_len = skb->len;
1129 callback_param(queue, pending_idx).ctx = NULL;
1130 if (data_len < txp->size) {
1131 /* Append the packet payload as a fragment. */
1132 txp->offset += data_len;
1133 txp->size -= data_len;
1134 } else {
1135 /* Schedule a response immediately. */
1136 xenvif_idx_release(queue, pending_idx,
1137 XEN_NETIF_RSP_OKAY);
1138 }
1139
1140 if (txp->flags & XEN_NETTXF_csum_blank)
1141 skb->ip_summed = CHECKSUM_PARTIAL;
1142 else if (txp->flags & XEN_NETTXF_data_validated)
1143 skb->ip_summed = CHECKSUM_UNNECESSARY;
1144
1145 xenvif_fill_frags(queue, skb);
1146
1147 if (unlikely(skb_has_frag_list(skb))) {
1148 if (xenvif_handle_frag_list(queue, skb)) {
1149 if (net_ratelimit())
1150 netdev_err(queue->vif->dev,
1151 "Not enough memory to consolidate frag_list!\n");
1152 xenvif_skb_zerocopy_prepare(queue, skb);
1153 kfree_skb(skb);
1154 continue;
1155 }
1156 }
1157
1158 skb->dev = queue->vif->dev;
1159 skb->protocol = eth_type_trans(skb, skb->dev);
1160 skb_reset_network_header(skb);
1161
1162 if (checksum_setup(queue, skb)) {
1163 netdev_dbg(queue->vif->dev,
1164 "Can't setup checksum in net_tx_action\n");
1165 /* We have to set this flag to trigger the callback */
1166 if (skb_shinfo(skb)->destructor_arg)
1167 xenvif_skb_zerocopy_prepare(queue, skb);
1168 kfree_skb(skb);
1169 continue;
1170 }
1171
1172 skb_probe_transport_header(skb, 0);
1173
1174 /* If the packet is GSO then we will have just set up the
1175 * transport header offset in checksum_setup so it's now
1176 * straightforward to calculate gso_segs.
1177 */
1178 if (skb_is_gso(skb)) {
1179 int mss = skb_shinfo(skb)->gso_size;
1180 int hdrlen = skb_transport_header(skb) -
1181 skb_mac_header(skb) +
1182 tcp_hdrlen(skb);
1183
1184 skb_shinfo(skb)->gso_segs =
1185 DIV_ROUND_UP(skb->len - hdrlen, mss);
1186 }
1187
1188 queue->stats.rx_bytes += skb->len;
1189 queue->stats.rx_packets++;
1190
1191 work_done++;
1192
1193 /* Set this flag right before netif_receive_skb, otherwise
1194 * someone might think this packet already left netback, and
1195 * do a skb_copy_ubufs while we are still in control of the
1196 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1197 */
1198 if (skb_shinfo(skb)->destructor_arg) {
1199 xenvif_skb_zerocopy_prepare(queue, skb);
1200 queue->stats.tx_zerocopy_sent++;
1201 }
1202
1203 netif_receive_skb(skb);
1204 }
1205
1206 return work_done;
1207}
1208
1209void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1210{
1211 unsigned long flags;
1212 pending_ring_idx_t index;
1213 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1214
1215 /* This is the only place where we grab this lock, to protect callbacks
1216 * from each other.
1217 */
1218 spin_lock_irqsave(&queue->callback_lock, flags);
1219 do {
1220 u16 pending_idx = ubuf->desc;
1221 ubuf = (struct ubuf_info *) ubuf->ctx;
1222 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1223 MAX_PENDING_REQS);
1224 index = pending_index(queue->dealloc_prod);
1225 queue->dealloc_ring[index] = pending_idx;
1226 /* Sync with xenvif_tx_dealloc_action:
1227 * insert idx then incr producer.
1228 */
1229 smp_wmb();
1230 queue->dealloc_prod++;
1231 } while (ubuf);
1232 spin_unlock_irqrestore(&queue->callback_lock, flags);
1233
1234 if (likely(zerocopy_success))
1235 queue->stats.tx_zerocopy_success++;
1236 else
1237 queue->stats.tx_zerocopy_fail++;
1238 xenvif_skb_zerocopy_complete(queue);
1239}
1240
1241static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1242{
1243 struct gnttab_unmap_grant_ref *gop;
1244 pending_ring_idx_t dc, dp;
1245 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1246 unsigned int i = 0;
1247
1248 dc = queue->dealloc_cons;
1249 gop = queue->tx_unmap_ops;
1250
1251 /* Free up any grants we have finished using */
1252 do {
1253 dp = queue->dealloc_prod;
1254
1255 /* Ensure we see all indices enqueued by all
1256 * xenvif_zerocopy_callback().
1257 */
1258 smp_rmb();
1259
1260 while (dc != dp) {
1261 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1262 pending_idx =
1263 queue->dealloc_ring[pending_index(dc++)];
1264
1265 pending_idx_release[gop - queue->tx_unmap_ops] =
1266 pending_idx;
1267 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1268 queue->mmap_pages[pending_idx];
1269 gnttab_set_unmap_op(gop,
1270 idx_to_kaddr(queue, pending_idx),
1271 GNTMAP_host_map,
1272 queue->grant_tx_handle[pending_idx]);
1273 xenvif_grant_handle_reset(queue, pending_idx);
1274 ++gop;
1275 }
1276
1277 } while (dp != queue->dealloc_prod);
1278
1279 queue->dealloc_cons = dc;
1280
1281 if (gop - queue->tx_unmap_ops > 0) {
1282 int ret;
1283 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1284 NULL,
1285 queue->pages_to_unmap,
1286 gop - queue->tx_unmap_ops);
1287 if (ret) {
1288 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1289 gop - queue->tx_unmap_ops, ret);
1290 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1291 if (gop[i].status != GNTST_okay)
1292 netdev_err(queue->vif->dev,
1293 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1294 gop[i].host_addr,
1295 gop[i].handle,
1296 gop[i].status);
1297 }
1298 BUG();
1299 }
1300 }
1301
1302 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1303 xenvif_idx_release(queue, pending_idx_release[i],
1304 XEN_NETIF_RSP_OKAY);
1305}
1306
1307
1308/* Called after netfront has transmitted */
1309int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1310{
1311 unsigned nr_mops, nr_cops = 0;
1312 int work_done, ret;
1313
1314 if (unlikely(!tx_work_todo(queue)))
1315 return 0;
1316
1317 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1318
1319 if (nr_cops == 0)
1320 return 0;
1321
1322 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1323 if (nr_mops != 0) {
1324 ret = gnttab_map_refs(queue->tx_map_ops,
1325 NULL,
1326 queue->pages_to_map,
1327 nr_mops);
1328 BUG_ON(ret);
1329 }
1330
1331 work_done = xenvif_tx_submit(queue);
1332
1333 return work_done;
1334}
1335
1336static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1337 u8 status)
1338{
1339 struct pending_tx_info *pending_tx_info;
1340 pending_ring_idx_t index;
1341 unsigned long flags;
1342
1343 pending_tx_info = &queue->pending_tx_info[pending_idx];
1344
1345 spin_lock_irqsave(&queue->response_lock, flags);
1346
1347 make_tx_response(queue, &pending_tx_info->req,
1348 pending_tx_info->extra_count, status);
1349
1350 /* Release the pending index before pusing the Tx response so
1351 * its available before a new Tx request is pushed by the
1352 * frontend.
1353 */
1354 index = pending_index(queue->pending_prod++);
1355 queue->pending_ring[index] = pending_idx;
1356
1357 push_tx_responses(queue);
1358
1359 spin_unlock_irqrestore(&queue->response_lock, flags);
1360}
1361
1362
1363static void make_tx_response(struct xenvif_queue *queue,
1364 struct xen_netif_tx_request *txp,
1365 unsigned int extra_count,
1366 s8 st)
1367{
1368 RING_IDX i = queue->tx.rsp_prod_pvt;
1369 struct xen_netif_tx_response *resp;
1370
1371 resp = RING_GET_RESPONSE(&queue->tx, i);
1372 resp->id = txp->id;
1373 resp->status = st;
1374
1375 while (extra_count-- != 0)
1376 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1377
1378 queue->tx.rsp_prod_pvt = ++i;
1379}
1380
1381static void push_tx_responses(struct xenvif_queue *queue)
1382{
1383 int notify;
1384
1385 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1386 if (notify)
1387 notify_remote_via_irq(queue->tx_irq);
1388}
1389
1390void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1391{
1392 int ret;
1393 struct gnttab_unmap_grant_ref tx_unmap_op;
1394
1395 gnttab_set_unmap_op(&tx_unmap_op,
1396 idx_to_kaddr(queue, pending_idx),
1397 GNTMAP_host_map,
1398 queue->grant_tx_handle[pending_idx]);
1399 xenvif_grant_handle_reset(queue, pending_idx);
1400
1401 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1402 &queue->mmap_pages[pending_idx], 1);
1403 if (ret) {
1404 netdev_err(queue->vif->dev,
1405 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1406 ret,
1407 pending_idx,
1408 tx_unmap_op.host_addr,
1409 tx_unmap_op.handle,
1410 tx_unmap_op.status);
1411 BUG();
1412 }
1413}
1414
1415static inline int tx_work_todo(struct xenvif_queue *queue)
1416{
1417 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1418 return 1;
1419
1420 return 0;
1421}
1422
1423static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1424{
1425 return queue->dealloc_cons != queue->dealloc_prod;
1426}
1427
1428void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1429{
1430 if (queue->tx.sring)
1431 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1432 queue->tx.sring);
1433 if (queue->rx.sring)
1434 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1435 queue->rx.sring);
1436}
1437
1438int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1439 grant_ref_t tx_ring_ref,
1440 grant_ref_t rx_ring_ref)
1441{
1442 void *addr;
1443 struct xen_netif_tx_sring *txs;
1444 struct xen_netif_rx_sring *rxs;
1445
1446 int err = -ENOMEM;
1447
1448 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1449 &tx_ring_ref, 1, &addr);
1450 if (err)
1451 goto err;
1452
1453 txs = (struct xen_netif_tx_sring *)addr;
1454 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1455
1456 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1457 &rx_ring_ref, 1, &addr);
1458 if (err)
1459 goto err;
1460
1461 rxs = (struct xen_netif_rx_sring *)addr;
1462 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1463
1464 return 0;
1465
1466err:
1467 xenvif_unmap_frontend_data_rings(queue);
1468 return err;
1469}
1470
1471static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1472{
1473 /* Dealloc thread must remain running until all inflight
1474 * packets complete.
1475 */
1476 return kthread_should_stop() &&
1477 !atomic_read(&queue->inflight_packets);
1478}
1479
1480int xenvif_dealloc_kthread(void *data)
1481{
1482 struct xenvif_queue *queue = data;
1483
1484 for (;;) {
1485 wait_event_interruptible(queue->dealloc_wq,
1486 tx_dealloc_work_todo(queue) ||
1487 xenvif_dealloc_kthread_should_stop(queue));
1488 if (xenvif_dealloc_kthread_should_stop(queue))
1489 break;
1490
1491 xenvif_tx_dealloc_action(queue);
1492 cond_resched();
1493 }
1494
1495 /* Unmap anything remaining*/
1496 if (tx_dealloc_work_todo(queue))
1497 xenvif_tx_dealloc_action(queue);
1498
1499 return 0;
1500}
1501
1502static void make_ctrl_response(struct xenvif *vif,
1503 const struct xen_netif_ctrl_request *req,
1504 u32 status, u32 data)
1505{
1506 RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1507 struct xen_netif_ctrl_response rsp = {
1508 .id = req->id,
1509 .type = req->type,
1510 .status = status,
1511 .data = data,
1512 };
1513
1514 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1515 vif->ctrl.rsp_prod_pvt = ++idx;
1516}
1517
1518static void push_ctrl_response(struct xenvif *vif)
1519{
1520 int notify;
1521
1522 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1523 if (notify)
1524 notify_remote_via_irq(vif->ctrl_irq);
1525}
1526
1527static void process_ctrl_request(struct xenvif *vif,
1528 const struct xen_netif_ctrl_request *req)
1529{
1530 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1531 u32 data = 0;
1532
1533 switch (req->type) {
1534 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1535 status = xenvif_set_hash_alg(vif, req->data[0]);
1536 break;
1537
1538 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1539 status = xenvif_get_hash_flags(vif, &data);
1540 break;
1541
1542 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1543 status = xenvif_set_hash_flags(vif, req->data[0]);
1544 break;
1545
1546 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1547 status = xenvif_set_hash_key(vif, req->data[0],
1548 req->data[1]);
1549 break;
1550
1551 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1552 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1553 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1554 break;
1555
1556 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1557 status = xenvif_set_hash_mapping_size(vif,
1558 req->data[0]);
1559 break;
1560
1561 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1562 status = xenvif_set_hash_mapping(vif, req->data[0],
1563 req->data[1],
1564 req->data[2]);
1565 break;
1566
1567 default:
1568 break;
1569 }
1570
1571 make_ctrl_response(vif, req, status, data);
1572 push_ctrl_response(vif);
1573}
1574
1575static void xenvif_ctrl_action(struct xenvif *vif)
1576{
1577 for (;;) {
1578 RING_IDX req_prod, req_cons;
1579
1580 req_prod = vif->ctrl.sring->req_prod;
1581 req_cons = vif->ctrl.req_cons;
1582
1583 /* Make sure we can see requests before we process them. */
1584 rmb();
1585
1586 if (req_cons == req_prod)
1587 break;
1588
1589 while (req_cons != req_prod) {
1590 struct xen_netif_ctrl_request req;
1591
1592 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1593 req_cons++;
1594
1595 process_ctrl_request(vif, &req);
1596 }
1597
1598 vif->ctrl.req_cons = req_cons;
1599 vif->ctrl.sring->req_event = req_cons + 1;
1600 }
1601}
1602
1603static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1604{
1605 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1606 return 1;
1607
1608 return 0;
1609}
1610
1611irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1612{
1613 struct xenvif *vif = data;
1614
1615 while (xenvif_ctrl_work_todo(vif))
1616 xenvif_ctrl_action(vif);
1617
1618 return IRQ_HANDLED;
1619}
1620
1621static int __init netback_init(void)
1622{
1623 int rc = 0;
1624
1625 if (!xen_domain())
1626 return -ENODEV;
1627
1628 /* Allow as many queues as there are CPUs but max. 8 if user has not
1629 * specified a value.
1630 */
1631 if (xenvif_max_queues == 0)
1632 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1633 num_online_cpus());
1634
1635 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1636 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1637 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1638 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1639 }
1640
1641 rc = xenvif_xenbus_init();
1642 if (rc)
1643 goto failed_init;
1644
1645#ifdef CONFIG_DEBUG_FS
1646 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1647 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
1648 pr_warn("Init of debugfs returned %ld!\n",
1649 PTR_ERR(xen_netback_dbg_root));
1650#endif /* CONFIG_DEBUG_FS */
1651
1652 return 0;
1653
1654failed_init:
1655 return rc;
1656}
1657
1658module_init(netback_init);
1659
1660static void __exit netback_fini(void)
1661{
1662#ifdef CONFIG_DEBUG_FS
1663 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
1664 debugfs_remove_recursive(xen_netback_dbg_root);
1665#endif /* CONFIG_DEBUG_FS */
1666 xenvif_xenbus_fini();
1667}
1668module_exit(netback_fini);
1669
1670MODULE_LICENSE("Dual BSD/GPL");
1671MODULE_ALIAS("xen-backend:vif");