Loading...
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41
42#include <net/tcp.h>
43
44#include <xen/xen.h>
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47#include <xen/page.h>
48
49#include <asm/xen/hypercall.h>
50
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644);
57
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
67unsigned int rx_stall_timeout_msecs = 60000;
68module_param(rx_stall_timeout_msecs, uint, 0444);
69
70#define MAX_QUEUES_DEFAULT 8
71unsigned int xenvif_max_queues;
72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73MODULE_PARM_DESC(max_queues,
74 "Maximum number of queues per virtual interface");
75
76/*
77 * This is the maximum slots a skb can have. If a guest sends a skb
78 * which exceeds this limit it is considered malicious.
79 */
80#define FATAL_SKB_SLOTS_DEFAULT 20
81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82module_param(fatal_skb_slots, uint, 0444);
83
84/* The amount to copy out of the first guest Tx slot into the skb's
85 * linear area. If the first slot has more data, it will be mapped
86 * and put into the first frag.
87 *
88 * This is sized to avoid pulling headers from the frags for most
89 * TCP/IP packets.
90 */
91#define XEN_NETBACK_TX_COPY_LEN 128
92
93/* This is the maximum number of flows in the hash cache. */
94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
98
99/* The module parameter tells that we have to put data
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
101 * needed for XDP processing
102 */
103bool provides_xdp_headroom = true;
104module_param(provides_xdp_headroom, bool, 0644);
105
106static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
107 u8 status);
108
109static void make_tx_response(struct xenvif_queue *queue,
110 struct xen_netif_tx_request *txp,
111 unsigned int extra_count,
112 s8 st);
113static void push_tx_responses(struct xenvif_queue *queue);
114
115static inline int tx_work_todo(struct xenvif_queue *queue);
116
117static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
118 u16 idx)
119{
120 return page_to_pfn(queue->mmap_pages[idx]);
121}
122
123static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
124 u16 idx)
125{
126 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
127}
128
129#define callback_param(vif, pending_idx) \
130 (vif->pending_tx_info[pending_idx].callback_struct)
131
132/* Find the containing VIF's structure from a pointer in pending_tx_info array
133 */
134static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
135{
136 u16 pending_idx = ubuf->desc;
137 struct pending_tx_info *temp =
138 container_of(ubuf, struct pending_tx_info, callback_struct);
139 return container_of(temp - pending_idx,
140 struct xenvif_queue,
141 pending_tx_info[0]);
142}
143
144static u16 frag_get_pending_idx(skb_frag_t *frag)
145{
146 return (u16)skb_frag_off(frag);
147}
148
149static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
150{
151 skb_frag_off_set(frag, pending_idx);
152}
153
154static inline pending_ring_idx_t pending_index(unsigned i)
155{
156 return i & (MAX_PENDING_REQS-1);
157}
158
159void xenvif_kick_thread(struct xenvif_queue *queue)
160{
161 wake_up(&queue->wq);
162}
163
164void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
165{
166 int more_to_do;
167
168 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
169
170 if (more_to_do)
171 napi_schedule(&queue->napi);
172 else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
173 &queue->eoi_pending) &
174 (NETBK_TX_EOI | NETBK_COMMON_EOI))
175 xen_irq_lateeoi(queue->tx_irq, 0);
176}
177
178static void tx_add_credit(struct xenvif_queue *queue)
179{
180 unsigned long max_burst, max_credit;
181
182 /*
183 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
184 * Otherwise the interface can seize up due to insufficient credit.
185 */
186 max_burst = max(131072UL, queue->credit_bytes);
187
188 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
189 max_credit = queue->remaining_credit + queue->credit_bytes;
190 if (max_credit < queue->remaining_credit)
191 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
192
193 queue->remaining_credit = min(max_credit, max_burst);
194 queue->rate_limited = false;
195}
196
197void xenvif_tx_credit_callback(struct timer_list *t)
198{
199 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
200 tx_add_credit(queue);
201 xenvif_napi_schedule_or_enable_events(queue);
202}
203
204static void xenvif_tx_err(struct xenvif_queue *queue,
205 struct xen_netif_tx_request *txp,
206 unsigned int extra_count, RING_IDX end)
207{
208 RING_IDX cons = queue->tx.req_cons;
209 unsigned long flags;
210
211 do {
212 spin_lock_irqsave(&queue->response_lock, flags);
213 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
214 push_tx_responses(queue);
215 spin_unlock_irqrestore(&queue->response_lock, flags);
216 if (cons == end)
217 break;
218 RING_COPY_REQUEST(&queue->tx, cons++, txp);
219 extra_count = 0; /* only the first frag can have extras */
220 } while (1);
221 queue->tx.req_cons = cons;
222}
223
224static void xenvif_fatal_tx_err(struct xenvif *vif)
225{
226 netdev_err(vif->dev, "fatal error; disabling device\n");
227 vif->disabled = true;
228 /* Disable the vif from queue 0's kthread */
229 if (vif->num_queues)
230 xenvif_kick_thread(&vif->queues[0]);
231}
232
233static int xenvif_count_requests(struct xenvif_queue *queue,
234 struct xen_netif_tx_request *first,
235 unsigned int extra_count,
236 struct xen_netif_tx_request *txp,
237 int work_to_do)
238{
239 RING_IDX cons = queue->tx.req_cons;
240 int slots = 0;
241 int drop_err = 0;
242 int more_data;
243
244 if (!(first->flags & XEN_NETTXF_more_data))
245 return 0;
246
247 do {
248 struct xen_netif_tx_request dropped_tx = { 0 };
249
250 if (slots >= work_to_do) {
251 netdev_err(queue->vif->dev,
252 "Asked for %d slots but exceeds this limit\n",
253 work_to_do);
254 xenvif_fatal_tx_err(queue->vif);
255 return -ENODATA;
256 }
257
258 /* This guest is really using too many slots and
259 * considered malicious.
260 */
261 if (unlikely(slots >= fatal_skb_slots)) {
262 netdev_err(queue->vif->dev,
263 "Malicious frontend using %d slots, threshold %u\n",
264 slots, fatal_skb_slots);
265 xenvif_fatal_tx_err(queue->vif);
266 return -E2BIG;
267 }
268
269 /* Xen network protocol had implicit dependency on
270 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
271 * the historical MAX_SKB_FRAGS value 18 to honor the
272 * same behavior as before. Any packet using more than
273 * 18 slots but less than fatal_skb_slots slots is
274 * dropped
275 */
276 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
277 if (net_ratelimit())
278 netdev_dbg(queue->vif->dev,
279 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
280 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
281 drop_err = -E2BIG;
282 }
283
284 if (drop_err)
285 txp = &dropped_tx;
286
287 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
288
289 /* If the guest submitted a frame >= 64 KiB then
290 * first->size overflowed and following slots will
291 * appear to be larger than the frame.
292 *
293 * This cannot be fatal error as there are buggy
294 * frontends that do this.
295 *
296 * Consume all slots and drop the packet.
297 */
298 if (!drop_err && txp->size > first->size) {
299 if (net_ratelimit())
300 netdev_dbg(queue->vif->dev,
301 "Invalid tx request, slot size %u > remaining size %u\n",
302 txp->size, first->size);
303 drop_err = -EIO;
304 }
305
306 first->size -= txp->size;
307 slots++;
308
309 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
310 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
311 txp->offset, txp->size);
312 xenvif_fatal_tx_err(queue->vif);
313 return -EINVAL;
314 }
315
316 more_data = txp->flags & XEN_NETTXF_more_data;
317
318 if (!drop_err)
319 txp++;
320
321 } while (more_data);
322
323 if (drop_err) {
324 xenvif_tx_err(queue, first, extra_count, cons + slots);
325 return drop_err;
326 }
327
328 return slots;
329}
330
331
332struct xenvif_tx_cb {
333 u16 pending_idx;
334};
335
336#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
337
338static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
339 u16 pending_idx,
340 struct xen_netif_tx_request *txp,
341 unsigned int extra_count,
342 struct gnttab_map_grant_ref *mop)
343{
344 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
345 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
346 GNTMAP_host_map | GNTMAP_readonly,
347 txp->gref, queue->vif->domid);
348
349 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
350 sizeof(*txp));
351 queue->pending_tx_info[pending_idx].extra_count = extra_count;
352}
353
354static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
355{
356 struct sk_buff *skb =
357 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
358 GFP_ATOMIC | __GFP_NOWARN);
359 if (unlikely(skb == NULL))
360 return NULL;
361
362 /* Packets passed to netif_rx() must have some headroom. */
363 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
364
365 /* Initialize it here to avoid later surprises */
366 skb_shinfo(skb)->destructor_arg = NULL;
367
368 return skb;
369}
370
371static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
372 struct sk_buff *skb,
373 struct xen_netif_tx_request *txp,
374 struct gnttab_map_grant_ref *gop,
375 unsigned int frag_overflow,
376 struct sk_buff *nskb)
377{
378 struct skb_shared_info *shinfo = skb_shinfo(skb);
379 skb_frag_t *frags = shinfo->frags;
380 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
381 int start;
382 pending_ring_idx_t index;
383 unsigned int nr_slots;
384
385 nr_slots = shinfo->nr_frags;
386
387 /* Skip first skb fragment if it is on same page as header fragment. */
388 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
389
390 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
391 shinfo->nr_frags++, txp++, gop++) {
392 index = pending_index(queue->pending_cons++);
393 pending_idx = queue->pending_ring[index];
394 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
395 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
396 }
397
398 if (frag_overflow) {
399
400 shinfo = skb_shinfo(nskb);
401 frags = shinfo->frags;
402
403 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
404 shinfo->nr_frags++, txp++, gop++) {
405 index = pending_index(queue->pending_cons++);
406 pending_idx = queue->pending_ring[index];
407 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
408 gop);
409 frag_set_pending_idx(&frags[shinfo->nr_frags],
410 pending_idx);
411 }
412
413 skb_shinfo(skb)->frag_list = nskb;
414 }
415
416 return gop;
417}
418
419static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
420 u16 pending_idx,
421 grant_handle_t handle)
422{
423 if (unlikely(queue->grant_tx_handle[pending_idx] !=
424 NETBACK_INVALID_HANDLE)) {
425 netdev_err(queue->vif->dev,
426 "Trying to overwrite active handle! pending_idx: 0x%x\n",
427 pending_idx);
428 BUG();
429 }
430 queue->grant_tx_handle[pending_idx] = handle;
431}
432
433static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
434 u16 pending_idx)
435{
436 if (unlikely(queue->grant_tx_handle[pending_idx] ==
437 NETBACK_INVALID_HANDLE)) {
438 netdev_err(queue->vif->dev,
439 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
440 pending_idx);
441 BUG();
442 }
443 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
444}
445
446static int xenvif_tx_check_gop(struct xenvif_queue *queue,
447 struct sk_buff *skb,
448 struct gnttab_map_grant_ref **gopp_map,
449 struct gnttab_copy **gopp_copy)
450{
451 struct gnttab_map_grant_ref *gop_map = *gopp_map;
452 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
453 /* This always points to the shinfo of the skb being checked, which
454 * could be either the first or the one on the frag_list
455 */
456 struct skb_shared_info *shinfo = skb_shinfo(skb);
457 /* If this is non-NULL, we are currently checking the frag_list skb, and
458 * this points to the shinfo of the first one
459 */
460 struct skb_shared_info *first_shinfo = NULL;
461 int nr_frags = shinfo->nr_frags;
462 const bool sharedslot = nr_frags &&
463 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
464 int i, err;
465
466 /* Check status of header. */
467 err = (*gopp_copy)->status;
468 if (unlikely(err)) {
469 if (net_ratelimit())
470 netdev_dbg(queue->vif->dev,
471 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
472 (*gopp_copy)->status,
473 pending_idx,
474 (*gopp_copy)->source.u.ref);
475 /* The first frag might still have this slot mapped */
476 if (!sharedslot)
477 xenvif_idx_release(queue, pending_idx,
478 XEN_NETIF_RSP_ERROR);
479 }
480 (*gopp_copy)++;
481
482check_frags:
483 for (i = 0; i < nr_frags; i++, gop_map++) {
484 int j, newerr;
485
486 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
487
488 /* Check error status: if okay then remember grant handle. */
489 newerr = gop_map->status;
490
491 if (likely(!newerr)) {
492 xenvif_grant_handle_set(queue,
493 pending_idx,
494 gop_map->handle);
495 /* Had a previous error? Invalidate this fragment. */
496 if (unlikely(err)) {
497 xenvif_idx_unmap(queue, pending_idx);
498 /* If the mapping of the first frag was OK, but
499 * the header's copy failed, and they are
500 * sharing a slot, send an error
501 */
502 if (i == 0 && !first_shinfo && sharedslot)
503 xenvif_idx_release(queue, pending_idx,
504 XEN_NETIF_RSP_ERROR);
505 else
506 xenvif_idx_release(queue, pending_idx,
507 XEN_NETIF_RSP_OKAY);
508 }
509 continue;
510 }
511
512 /* Error on this fragment: respond to client with an error. */
513 if (net_ratelimit())
514 netdev_dbg(queue->vif->dev,
515 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
516 i,
517 gop_map->status,
518 pending_idx,
519 gop_map->ref);
520
521 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
522
523 /* Not the first error? Preceding frags already invalidated. */
524 if (err)
525 continue;
526
527 /* First error: if the header haven't shared a slot with the
528 * first frag, release it as well.
529 */
530 if (!sharedslot)
531 xenvif_idx_release(queue,
532 XENVIF_TX_CB(skb)->pending_idx,
533 XEN_NETIF_RSP_OKAY);
534
535 /* Invalidate preceding fragments of this skb. */
536 for (j = 0; j < i; j++) {
537 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
538 xenvif_idx_unmap(queue, pending_idx);
539 xenvif_idx_release(queue, pending_idx,
540 XEN_NETIF_RSP_OKAY);
541 }
542
543 /* And if we found the error while checking the frag_list, unmap
544 * the first skb's frags
545 */
546 if (first_shinfo) {
547 for (j = 0; j < first_shinfo->nr_frags; j++) {
548 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
549 xenvif_idx_unmap(queue, pending_idx);
550 xenvif_idx_release(queue, pending_idx,
551 XEN_NETIF_RSP_OKAY);
552 }
553 }
554
555 /* Remember the error: invalidate all subsequent fragments. */
556 err = newerr;
557 }
558
559 if (skb_has_frag_list(skb) && !first_shinfo) {
560 first_shinfo = shinfo;
561 shinfo = skb_shinfo(shinfo->frag_list);
562 nr_frags = shinfo->nr_frags;
563
564 goto check_frags;
565 }
566
567 *gopp_map = gop_map;
568 return err;
569}
570
571static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
572{
573 struct skb_shared_info *shinfo = skb_shinfo(skb);
574 int nr_frags = shinfo->nr_frags;
575 int i;
576 u16 prev_pending_idx = INVALID_PENDING_IDX;
577
578 for (i = 0; i < nr_frags; i++) {
579 skb_frag_t *frag = shinfo->frags + i;
580 struct xen_netif_tx_request *txp;
581 struct page *page;
582 u16 pending_idx;
583
584 pending_idx = frag_get_pending_idx(frag);
585
586 /* If this is not the first frag, chain it to the previous*/
587 if (prev_pending_idx == INVALID_PENDING_IDX)
588 skb_shinfo(skb)->destructor_arg =
589 &callback_param(queue, pending_idx);
590 else
591 callback_param(queue, prev_pending_idx).ctx =
592 &callback_param(queue, pending_idx);
593
594 callback_param(queue, pending_idx).ctx = NULL;
595 prev_pending_idx = pending_idx;
596
597 txp = &queue->pending_tx_info[pending_idx].req;
598 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
599 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
600 skb->len += txp->size;
601 skb->data_len += txp->size;
602 skb->truesize += txp->size;
603
604 /* Take an extra reference to offset network stack's put_page */
605 get_page(queue->mmap_pages[pending_idx]);
606 }
607}
608
609static int xenvif_get_extras(struct xenvif_queue *queue,
610 struct xen_netif_extra_info *extras,
611 unsigned int *extra_count,
612 int work_to_do)
613{
614 struct xen_netif_extra_info extra;
615 RING_IDX cons = queue->tx.req_cons;
616
617 do {
618 if (unlikely(work_to_do-- <= 0)) {
619 netdev_err(queue->vif->dev, "Missing extra info\n");
620 xenvif_fatal_tx_err(queue->vif);
621 return -EBADR;
622 }
623
624 RING_COPY_REQUEST(&queue->tx, cons, &extra);
625
626 queue->tx.req_cons = ++cons;
627 (*extra_count)++;
628
629 if (unlikely(!extra.type ||
630 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
631 netdev_err(queue->vif->dev,
632 "Invalid extra type: %d\n", extra.type);
633 xenvif_fatal_tx_err(queue->vif);
634 return -EINVAL;
635 }
636
637 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
638 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
639
640 return work_to_do;
641}
642
643static int xenvif_set_skb_gso(struct xenvif *vif,
644 struct sk_buff *skb,
645 struct xen_netif_extra_info *gso)
646{
647 if (!gso->u.gso.size) {
648 netdev_err(vif->dev, "GSO size must not be zero.\n");
649 xenvif_fatal_tx_err(vif);
650 return -EINVAL;
651 }
652
653 switch (gso->u.gso.type) {
654 case XEN_NETIF_GSO_TYPE_TCPV4:
655 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
656 break;
657 case XEN_NETIF_GSO_TYPE_TCPV6:
658 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
659 break;
660 default:
661 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
662 xenvif_fatal_tx_err(vif);
663 return -EINVAL;
664 }
665
666 skb_shinfo(skb)->gso_size = gso->u.gso.size;
667 /* gso_segs will be calculated later */
668
669 return 0;
670}
671
672static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
673{
674 bool recalculate_partial_csum = false;
675
676 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
677 * peers can fail to set NETRXF_csum_blank when sending a GSO
678 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
679 * recalculate the partial checksum.
680 */
681 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
682 queue->stats.rx_gso_checksum_fixup++;
683 skb->ip_summed = CHECKSUM_PARTIAL;
684 recalculate_partial_csum = true;
685 }
686
687 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
688 if (skb->ip_summed != CHECKSUM_PARTIAL)
689 return 0;
690
691 return skb_checksum_setup(skb, recalculate_partial_csum);
692}
693
694static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
695{
696 u64 now = get_jiffies_64();
697 u64 next_credit = queue->credit_window_start +
698 msecs_to_jiffies(queue->credit_usec / 1000);
699
700 /* Timer could already be pending in rare cases. */
701 if (timer_pending(&queue->credit_timeout)) {
702 queue->rate_limited = true;
703 return true;
704 }
705
706 /* Passed the point where we can replenish credit? */
707 if (time_after_eq64(now, next_credit)) {
708 queue->credit_window_start = now;
709 tx_add_credit(queue);
710 }
711
712 /* Still too big to send right now? Set a callback. */
713 if (size > queue->remaining_credit) {
714 mod_timer(&queue->credit_timeout,
715 next_credit);
716 queue->credit_window_start = next_credit;
717 queue->rate_limited = true;
718
719 return true;
720 }
721
722 return false;
723}
724
725/* No locking is required in xenvif_mcast_add/del() as they are
726 * only ever invoked from NAPI poll. An RCU list is used because
727 * xenvif_mcast_match() is called asynchronously, during start_xmit.
728 */
729
730static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
731{
732 struct xenvif_mcast_addr *mcast;
733
734 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
735 if (net_ratelimit())
736 netdev_err(vif->dev,
737 "Too many multicast addresses\n");
738 return -ENOSPC;
739 }
740
741 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
742 if (!mcast)
743 return -ENOMEM;
744
745 ether_addr_copy(mcast->addr, addr);
746 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
747 vif->fe_mcast_count++;
748
749 return 0;
750}
751
752static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
753{
754 struct xenvif_mcast_addr *mcast;
755
756 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
757 if (ether_addr_equal(addr, mcast->addr)) {
758 --vif->fe_mcast_count;
759 list_del_rcu(&mcast->entry);
760 kfree_rcu(mcast, rcu);
761 break;
762 }
763 }
764}
765
766bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
767{
768 struct xenvif_mcast_addr *mcast;
769
770 rcu_read_lock();
771 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
772 if (ether_addr_equal(addr, mcast->addr)) {
773 rcu_read_unlock();
774 return true;
775 }
776 }
777 rcu_read_unlock();
778
779 return false;
780}
781
782void xenvif_mcast_addr_list_free(struct xenvif *vif)
783{
784 /* No need for locking or RCU here. NAPI poll and TX queue
785 * are stopped.
786 */
787 while (!list_empty(&vif->fe_mcast_addr)) {
788 struct xenvif_mcast_addr *mcast;
789
790 mcast = list_first_entry(&vif->fe_mcast_addr,
791 struct xenvif_mcast_addr,
792 entry);
793 --vif->fe_mcast_count;
794 list_del(&mcast->entry);
795 kfree(mcast);
796 }
797}
798
799static void xenvif_tx_build_gops(struct xenvif_queue *queue,
800 int budget,
801 unsigned *copy_ops,
802 unsigned *map_ops)
803{
804 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
805 struct sk_buff *skb, *nskb;
806 int ret;
807 unsigned int frag_overflow;
808
809 while (skb_queue_len(&queue->tx_queue) < budget) {
810 struct xen_netif_tx_request txreq;
811 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
812 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
813 unsigned int extra_count;
814 u16 pending_idx;
815 RING_IDX idx;
816 int work_to_do;
817 unsigned int data_len;
818 pending_ring_idx_t index;
819
820 if (queue->tx.sring->req_prod - queue->tx.req_cons >
821 XEN_NETIF_TX_RING_SIZE) {
822 netdev_err(queue->vif->dev,
823 "Impossible number of requests. "
824 "req_prod %d, req_cons %d, size %ld\n",
825 queue->tx.sring->req_prod, queue->tx.req_cons,
826 XEN_NETIF_TX_RING_SIZE);
827 xenvif_fatal_tx_err(queue->vif);
828 break;
829 }
830
831 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
832 if (!work_to_do)
833 break;
834
835 idx = queue->tx.req_cons;
836 rmb(); /* Ensure that we see the request before we copy it. */
837 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
838
839 /* Credit-based scheduling. */
840 if (txreq.size > queue->remaining_credit &&
841 tx_credit_exceeded(queue, txreq.size))
842 break;
843
844 queue->remaining_credit -= txreq.size;
845
846 work_to_do--;
847 queue->tx.req_cons = ++idx;
848
849 memset(extras, 0, sizeof(extras));
850 extra_count = 0;
851 if (txreq.flags & XEN_NETTXF_extra_info) {
852 work_to_do = xenvif_get_extras(queue, extras,
853 &extra_count,
854 work_to_do);
855 idx = queue->tx.req_cons;
856 if (unlikely(work_to_do < 0))
857 break;
858 }
859
860 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
861 struct xen_netif_extra_info *extra;
862
863 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
864 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
865
866 make_tx_response(queue, &txreq, extra_count,
867 (ret == 0) ?
868 XEN_NETIF_RSP_OKAY :
869 XEN_NETIF_RSP_ERROR);
870 push_tx_responses(queue);
871 continue;
872 }
873
874 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
875 struct xen_netif_extra_info *extra;
876
877 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
878 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
879
880 make_tx_response(queue, &txreq, extra_count,
881 XEN_NETIF_RSP_OKAY);
882 push_tx_responses(queue);
883 continue;
884 }
885
886 ret = xenvif_count_requests(queue, &txreq, extra_count,
887 txfrags, work_to_do);
888 if (unlikely(ret < 0))
889 break;
890
891 idx += ret;
892
893 if (unlikely(txreq.size < ETH_HLEN)) {
894 netdev_dbg(queue->vif->dev,
895 "Bad packet size: %d\n", txreq.size);
896 xenvif_tx_err(queue, &txreq, extra_count, idx);
897 break;
898 }
899
900 /* No crossing a page as the payload mustn't fragment. */
901 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
902 netdev_err(queue->vif->dev,
903 "txreq.offset: %u, size: %u, end: %lu\n",
904 txreq.offset, txreq.size,
905 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
906 xenvif_fatal_tx_err(queue->vif);
907 break;
908 }
909
910 index = pending_index(queue->pending_cons);
911 pending_idx = queue->pending_ring[index];
912
913 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
914 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
915 XEN_NETBACK_TX_COPY_LEN : txreq.size;
916
917 skb = xenvif_alloc_skb(data_len);
918 if (unlikely(skb == NULL)) {
919 netdev_dbg(queue->vif->dev,
920 "Can't allocate a skb in start_xmit.\n");
921 xenvif_tx_err(queue, &txreq, extra_count, idx);
922 break;
923 }
924
925 skb_shinfo(skb)->nr_frags = ret;
926 if (data_len < txreq.size)
927 skb_shinfo(skb)->nr_frags++;
928 /* At this point shinfo->nr_frags is in fact the number of
929 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
930 */
931 frag_overflow = 0;
932 nskb = NULL;
933 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
934 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
935 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
936 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
937 nskb = xenvif_alloc_skb(0);
938 if (unlikely(nskb == NULL)) {
939 skb_shinfo(skb)->nr_frags = 0;
940 kfree_skb(skb);
941 xenvif_tx_err(queue, &txreq, extra_count, idx);
942 if (net_ratelimit())
943 netdev_err(queue->vif->dev,
944 "Can't allocate the frag_list skb.\n");
945 break;
946 }
947 }
948
949 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
950 struct xen_netif_extra_info *gso;
951 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
952
953 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
954 /* Failure in xenvif_set_skb_gso is fatal. */
955 skb_shinfo(skb)->nr_frags = 0;
956 kfree_skb(skb);
957 kfree_skb(nskb);
958 break;
959 }
960 }
961
962 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
963 struct xen_netif_extra_info *extra;
964 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
965
966 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
967
968 switch (extra->u.hash.type) {
969 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
970 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
971 type = PKT_HASH_TYPE_L3;
972 break;
973
974 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
975 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
976 type = PKT_HASH_TYPE_L4;
977 break;
978
979 default:
980 break;
981 }
982
983 if (type != PKT_HASH_TYPE_NONE)
984 skb_set_hash(skb,
985 *(u32 *)extra->u.hash.value,
986 type);
987 }
988
989 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
990
991 __skb_put(skb, data_len);
992 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
993 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
994 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
995
996 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
997 virt_to_gfn(skb->data);
998 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
999 queue->tx_copy_ops[*copy_ops].dest.offset =
1000 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1001
1002 queue->tx_copy_ops[*copy_ops].len = data_len;
1003 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1004
1005 (*copy_ops)++;
1006
1007 if (data_len < txreq.size) {
1008 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1009 pending_idx);
1010 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1011 extra_count, gop);
1012 gop++;
1013 } else {
1014 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1015 INVALID_PENDING_IDX);
1016 memcpy(&queue->pending_tx_info[pending_idx].req,
1017 &txreq, sizeof(txreq));
1018 queue->pending_tx_info[pending_idx].extra_count =
1019 extra_count;
1020 }
1021
1022 queue->pending_cons++;
1023
1024 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1025 frag_overflow, nskb);
1026
1027 __skb_queue_tail(&queue->tx_queue, skb);
1028
1029 queue->tx.req_cons = idx;
1030
1031 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1032 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1033 break;
1034 }
1035
1036 (*map_ops) = gop - queue->tx_map_ops;
1037 return;
1038}
1039
1040/* Consolidate skb with a frag_list into a brand new one with local pages on
1041 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1042 */
1043static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1044{
1045 unsigned int offset = skb_headlen(skb);
1046 skb_frag_t frags[MAX_SKB_FRAGS];
1047 int i, f;
1048 struct ubuf_info *uarg;
1049 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1050
1051 queue->stats.tx_zerocopy_sent += 2;
1052 queue->stats.tx_frag_overflow++;
1053
1054 xenvif_fill_frags(queue, nskb);
1055 /* Subtract frags size, we will correct it later */
1056 skb->truesize -= skb->data_len;
1057 skb->len += nskb->len;
1058 skb->data_len += nskb->len;
1059
1060 /* create a brand new frags array and coalesce there */
1061 for (i = 0; offset < skb->len; i++) {
1062 struct page *page;
1063 unsigned int len;
1064
1065 BUG_ON(i >= MAX_SKB_FRAGS);
1066 page = alloc_page(GFP_ATOMIC);
1067 if (!page) {
1068 int j;
1069 skb->truesize += skb->data_len;
1070 for (j = 0; j < i; j++)
1071 put_page(skb_frag_page(&frags[j]));
1072 return -ENOMEM;
1073 }
1074
1075 if (offset + PAGE_SIZE < skb->len)
1076 len = PAGE_SIZE;
1077 else
1078 len = skb->len - offset;
1079 if (skb_copy_bits(skb, offset, page_address(page), len))
1080 BUG();
1081
1082 offset += len;
1083 __skb_frag_set_page(&frags[i], page);
1084 skb_frag_off_set(&frags[i], 0);
1085 skb_frag_size_set(&frags[i], len);
1086 }
1087
1088 /* Release all the original (foreign) frags. */
1089 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1090 skb_frag_unref(skb, f);
1091 uarg = skb_shinfo(skb)->destructor_arg;
1092 /* increase inflight counter to offset decrement in callback */
1093 atomic_inc(&queue->inflight_packets);
1094 uarg->callback(NULL, uarg, true);
1095 skb_shinfo(skb)->destructor_arg = NULL;
1096
1097 /* Fill the skb with the new (local) frags. */
1098 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1099 skb_shinfo(skb)->nr_frags = i;
1100 skb->truesize += i * PAGE_SIZE;
1101
1102 return 0;
1103}
1104
1105static int xenvif_tx_submit(struct xenvif_queue *queue)
1106{
1107 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1108 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1109 struct sk_buff *skb;
1110 int work_done = 0;
1111
1112 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1113 struct xen_netif_tx_request *txp;
1114 u16 pending_idx;
1115 unsigned data_len;
1116
1117 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1118 txp = &queue->pending_tx_info[pending_idx].req;
1119
1120 /* Check the remap error code. */
1121 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1122 /* If there was an error, xenvif_tx_check_gop is
1123 * expected to release all the frags which were mapped,
1124 * so kfree_skb shouldn't do it again
1125 */
1126 skb_shinfo(skb)->nr_frags = 0;
1127 if (skb_has_frag_list(skb)) {
1128 struct sk_buff *nskb =
1129 skb_shinfo(skb)->frag_list;
1130 skb_shinfo(nskb)->nr_frags = 0;
1131 }
1132 kfree_skb(skb);
1133 continue;
1134 }
1135
1136 data_len = skb->len;
1137 callback_param(queue, pending_idx).ctx = NULL;
1138 if (data_len < txp->size) {
1139 /* Append the packet payload as a fragment. */
1140 txp->offset += data_len;
1141 txp->size -= data_len;
1142 } else {
1143 /* Schedule a response immediately. */
1144 xenvif_idx_release(queue, pending_idx,
1145 XEN_NETIF_RSP_OKAY);
1146 }
1147
1148 if (txp->flags & XEN_NETTXF_csum_blank)
1149 skb->ip_summed = CHECKSUM_PARTIAL;
1150 else if (txp->flags & XEN_NETTXF_data_validated)
1151 skb->ip_summed = CHECKSUM_UNNECESSARY;
1152
1153 xenvif_fill_frags(queue, skb);
1154
1155 if (unlikely(skb_has_frag_list(skb))) {
1156 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1157 xenvif_skb_zerocopy_prepare(queue, nskb);
1158 if (xenvif_handle_frag_list(queue, skb)) {
1159 if (net_ratelimit())
1160 netdev_err(queue->vif->dev,
1161 "Not enough memory to consolidate frag_list!\n");
1162 xenvif_skb_zerocopy_prepare(queue, skb);
1163 kfree_skb(skb);
1164 continue;
1165 }
1166 /* Copied all the bits from the frag list -- free it. */
1167 skb_frag_list_init(skb);
1168 kfree_skb(nskb);
1169 }
1170
1171 skb->dev = queue->vif->dev;
1172 skb->protocol = eth_type_trans(skb, skb->dev);
1173 skb_reset_network_header(skb);
1174
1175 if (checksum_setup(queue, skb)) {
1176 netdev_dbg(queue->vif->dev,
1177 "Can't setup checksum in net_tx_action\n");
1178 /* We have to set this flag to trigger the callback */
1179 if (skb_shinfo(skb)->destructor_arg)
1180 xenvif_skb_zerocopy_prepare(queue, skb);
1181 kfree_skb(skb);
1182 continue;
1183 }
1184
1185 skb_probe_transport_header(skb);
1186
1187 /* If the packet is GSO then we will have just set up the
1188 * transport header offset in checksum_setup so it's now
1189 * straightforward to calculate gso_segs.
1190 */
1191 if (skb_is_gso(skb)) {
1192 int mss, hdrlen;
1193
1194 /* GSO implies having the L4 header. */
1195 WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1196 if (unlikely(!skb_transport_header_was_set(skb))) {
1197 kfree_skb(skb);
1198 continue;
1199 }
1200
1201 mss = skb_shinfo(skb)->gso_size;
1202 hdrlen = skb_transport_header(skb) -
1203 skb_mac_header(skb) +
1204 tcp_hdrlen(skb);
1205
1206 skb_shinfo(skb)->gso_segs =
1207 DIV_ROUND_UP(skb->len - hdrlen, mss);
1208 }
1209
1210 queue->stats.rx_bytes += skb->len;
1211 queue->stats.rx_packets++;
1212
1213 work_done++;
1214
1215 /* Set this flag right before netif_receive_skb, otherwise
1216 * someone might think this packet already left netback, and
1217 * do a skb_copy_ubufs while we are still in control of the
1218 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1219 */
1220 if (skb_shinfo(skb)->destructor_arg) {
1221 xenvif_skb_zerocopy_prepare(queue, skb);
1222 queue->stats.tx_zerocopy_sent++;
1223 }
1224
1225 netif_receive_skb(skb);
1226 }
1227
1228 return work_done;
1229}
1230
1231void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
1232 bool zerocopy_success)
1233{
1234 unsigned long flags;
1235 pending_ring_idx_t index;
1236 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1237
1238 /* This is the only place where we grab this lock, to protect callbacks
1239 * from each other.
1240 */
1241 spin_lock_irqsave(&queue->callback_lock, flags);
1242 do {
1243 u16 pending_idx = ubuf->desc;
1244 ubuf = (struct ubuf_info *) ubuf->ctx;
1245 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1246 MAX_PENDING_REQS);
1247 index = pending_index(queue->dealloc_prod);
1248 queue->dealloc_ring[index] = pending_idx;
1249 /* Sync with xenvif_tx_dealloc_action:
1250 * insert idx then incr producer.
1251 */
1252 smp_wmb();
1253 queue->dealloc_prod++;
1254 } while (ubuf);
1255 spin_unlock_irqrestore(&queue->callback_lock, flags);
1256
1257 if (likely(zerocopy_success))
1258 queue->stats.tx_zerocopy_success++;
1259 else
1260 queue->stats.tx_zerocopy_fail++;
1261 xenvif_skb_zerocopy_complete(queue);
1262}
1263
1264static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1265{
1266 struct gnttab_unmap_grant_ref *gop;
1267 pending_ring_idx_t dc, dp;
1268 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1269 unsigned int i = 0;
1270
1271 dc = queue->dealloc_cons;
1272 gop = queue->tx_unmap_ops;
1273
1274 /* Free up any grants we have finished using */
1275 do {
1276 dp = queue->dealloc_prod;
1277
1278 /* Ensure we see all indices enqueued by all
1279 * xenvif_zerocopy_callback().
1280 */
1281 smp_rmb();
1282
1283 while (dc != dp) {
1284 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1285 pending_idx =
1286 queue->dealloc_ring[pending_index(dc++)];
1287
1288 pending_idx_release[gop - queue->tx_unmap_ops] =
1289 pending_idx;
1290 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1291 queue->mmap_pages[pending_idx];
1292 gnttab_set_unmap_op(gop,
1293 idx_to_kaddr(queue, pending_idx),
1294 GNTMAP_host_map,
1295 queue->grant_tx_handle[pending_idx]);
1296 xenvif_grant_handle_reset(queue, pending_idx);
1297 ++gop;
1298 }
1299
1300 } while (dp != queue->dealloc_prod);
1301
1302 queue->dealloc_cons = dc;
1303
1304 if (gop - queue->tx_unmap_ops > 0) {
1305 int ret;
1306 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1307 NULL,
1308 queue->pages_to_unmap,
1309 gop - queue->tx_unmap_ops);
1310 if (ret) {
1311 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1312 gop - queue->tx_unmap_ops, ret);
1313 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1314 if (gop[i].status != GNTST_okay)
1315 netdev_err(queue->vif->dev,
1316 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1317 gop[i].host_addr,
1318 gop[i].handle,
1319 gop[i].status);
1320 }
1321 BUG();
1322 }
1323 }
1324
1325 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1326 xenvif_idx_release(queue, pending_idx_release[i],
1327 XEN_NETIF_RSP_OKAY);
1328}
1329
1330
1331/* Called after netfront has transmitted */
1332int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1333{
1334 unsigned nr_mops, nr_cops = 0;
1335 int work_done, ret;
1336
1337 if (unlikely(!tx_work_todo(queue)))
1338 return 0;
1339
1340 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1341
1342 if (nr_cops == 0)
1343 return 0;
1344
1345 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1346 if (nr_mops != 0) {
1347 ret = gnttab_map_refs(queue->tx_map_ops,
1348 NULL,
1349 queue->pages_to_map,
1350 nr_mops);
1351 if (ret) {
1352 unsigned int i;
1353
1354 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1355 nr_mops, ret);
1356 for (i = 0; i < nr_mops; ++i)
1357 WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1358 GNTST_okay);
1359 }
1360 }
1361
1362 work_done = xenvif_tx_submit(queue);
1363
1364 return work_done;
1365}
1366
1367static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1368 u8 status)
1369{
1370 struct pending_tx_info *pending_tx_info;
1371 pending_ring_idx_t index;
1372 unsigned long flags;
1373
1374 pending_tx_info = &queue->pending_tx_info[pending_idx];
1375
1376 spin_lock_irqsave(&queue->response_lock, flags);
1377
1378 make_tx_response(queue, &pending_tx_info->req,
1379 pending_tx_info->extra_count, status);
1380
1381 /* Release the pending index before pusing the Tx response so
1382 * its available before a new Tx request is pushed by the
1383 * frontend.
1384 */
1385 index = pending_index(queue->pending_prod++);
1386 queue->pending_ring[index] = pending_idx;
1387
1388 push_tx_responses(queue);
1389
1390 spin_unlock_irqrestore(&queue->response_lock, flags);
1391}
1392
1393
1394static void make_tx_response(struct xenvif_queue *queue,
1395 struct xen_netif_tx_request *txp,
1396 unsigned int extra_count,
1397 s8 st)
1398{
1399 RING_IDX i = queue->tx.rsp_prod_pvt;
1400 struct xen_netif_tx_response *resp;
1401
1402 resp = RING_GET_RESPONSE(&queue->tx, i);
1403 resp->id = txp->id;
1404 resp->status = st;
1405
1406 while (extra_count-- != 0)
1407 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1408
1409 queue->tx.rsp_prod_pvt = ++i;
1410}
1411
1412static void push_tx_responses(struct xenvif_queue *queue)
1413{
1414 int notify;
1415
1416 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1417 if (notify)
1418 notify_remote_via_irq(queue->tx_irq);
1419}
1420
1421void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1422{
1423 int ret;
1424 struct gnttab_unmap_grant_ref tx_unmap_op;
1425
1426 gnttab_set_unmap_op(&tx_unmap_op,
1427 idx_to_kaddr(queue, pending_idx),
1428 GNTMAP_host_map,
1429 queue->grant_tx_handle[pending_idx]);
1430 xenvif_grant_handle_reset(queue, pending_idx);
1431
1432 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1433 &queue->mmap_pages[pending_idx], 1);
1434 if (ret) {
1435 netdev_err(queue->vif->dev,
1436 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1437 ret,
1438 pending_idx,
1439 tx_unmap_op.host_addr,
1440 tx_unmap_op.handle,
1441 tx_unmap_op.status);
1442 BUG();
1443 }
1444}
1445
1446static inline int tx_work_todo(struct xenvif_queue *queue)
1447{
1448 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1449 return 1;
1450
1451 return 0;
1452}
1453
1454static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1455{
1456 return queue->dealloc_cons != queue->dealloc_prod;
1457}
1458
1459void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1460{
1461 if (queue->tx.sring)
1462 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1463 queue->tx.sring);
1464 if (queue->rx.sring)
1465 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1466 queue->rx.sring);
1467}
1468
1469int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1470 grant_ref_t tx_ring_ref,
1471 grant_ref_t rx_ring_ref)
1472{
1473 void *addr;
1474 struct xen_netif_tx_sring *txs;
1475 struct xen_netif_rx_sring *rxs;
1476 RING_IDX rsp_prod, req_prod;
1477 int err = -ENOMEM;
1478
1479 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1480 &tx_ring_ref, 1, &addr);
1481 if (err)
1482 goto err;
1483
1484 txs = (struct xen_netif_tx_sring *)addr;
1485 rsp_prod = READ_ONCE(txs->rsp_prod);
1486 req_prod = READ_ONCE(txs->req_prod);
1487
1488 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1489
1490 err = -EIO;
1491 if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1492 goto err;
1493
1494 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1495 &rx_ring_ref, 1, &addr);
1496 if (err)
1497 goto err;
1498
1499 rxs = (struct xen_netif_rx_sring *)addr;
1500 rsp_prod = READ_ONCE(rxs->rsp_prod);
1501 req_prod = READ_ONCE(rxs->req_prod);
1502
1503 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1504
1505 err = -EIO;
1506 if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1507 goto err;
1508
1509 return 0;
1510
1511err:
1512 xenvif_unmap_frontend_data_rings(queue);
1513 return err;
1514}
1515
1516static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1517{
1518 /* Dealloc thread must remain running until all inflight
1519 * packets complete.
1520 */
1521 return kthread_should_stop() &&
1522 !atomic_read(&queue->inflight_packets);
1523}
1524
1525int xenvif_dealloc_kthread(void *data)
1526{
1527 struct xenvif_queue *queue = data;
1528
1529 for (;;) {
1530 wait_event_interruptible(queue->dealloc_wq,
1531 tx_dealloc_work_todo(queue) ||
1532 xenvif_dealloc_kthread_should_stop(queue));
1533 if (xenvif_dealloc_kthread_should_stop(queue))
1534 break;
1535
1536 xenvif_tx_dealloc_action(queue);
1537 cond_resched();
1538 }
1539
1540 /* Unmap anything remaining*/
1541 if (tx_dealloc_work_todo(queue))
1542 xenvif_tx_dealloc_action(queue);
1543
1544 return 0;
1545}
1546
1547static void make_ctrl_response(struct xenvif *vif,
1548 const struct xen_netif_ctrl_request *req,
1549 u32 status, u32 data)
1550{
1551 RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1552 struct xen_netif_ctrl_response rsp = {
1553 .id = req->id,
1554 .type = req->type,
1555 .status = status,
1556 .data = data,
1557 };
1558
1559 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1560 vif->ctrl.rsp_prod_pvt = ++idx;
1561}
1562
1563static void push_ctrl_response(struct xenvif *vif)
1564{
1565 int notify;
1566
1567 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1568 if (notify)
1569 notify_remote_via_irq(vif->ctrl_irq);
1570}
1571
1572static void process_ctrl_request(struct xenvif *vif,
1573 const struct xen_netif_ctrl_request *req)
1574{
1575 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1576 u32 data = 0;
1577
1578 switch (req->type) {
1579 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1580 status = xenvif_set_hash_alg(vif, req->data[0]);
1581 break;
1582
1583 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1584 status = xenvif_get_hash_flags(vif, &data);
1585 break;
1586
1587 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1588 status = xenvif_set_hash_flags(vif, req->data[0]);
1589 break;
1590
1591 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1592 status = xenvif_set_hash_key(vif, req->data[0],
1593 req->data[1]);
1594 break;
1595
1596 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1597 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1598 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1599 break;
1600
1601 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1602 status = xenvif_set_hash_mapping_size(vif,
1603 req->data[0]);
1604 break;
1605
1606 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1607 status = xenvif_set_hash_mapping(vif, req->data[0],
1608 req->data[1],
1609 req->data[2]);
1610 break;
1611
1612 default:
1613 break;
1614 }
1615
1616 make_ctrl_response(vif, req, status, data);
1617 push_ctrl_response(vif);
1618}
1619
1620static void xenvif_ctrl_action(struct xenvif *vif)
1621{
1622 for (;;) {
1623 RING_IDX req_prod, req_cons;
1624
1625 req_prod = vif->ctrl.sring->req_prod;
1626 req_cons = vif->ctrl.req_cons;
1627
1628 /* Make sure we can see requests before we process them. */
1629 rmb();
1630
1631 if (req_cons == req_prod)
1632 break;
1633
1634 while (req_cons != req_prod) {
1635 struct xen_netif_ctrl_request req;
1636
1637 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1638 req_cons++;
1639
1640 process_ctrl_request(vif, &req);
1641 }
1642
1643 vif->ctrl.req_cons = req_cons;
1644 vif->ctrl.sring->req_event = req_cons + 1;
1645 }
1646}
1647
1648static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1649{
1650 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1651 return true;
1652
1653 return false;
1654}
1655
1656irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1657{
1658 struct xenvif *vif = data;
1659 unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1660
1661 while (xenvif_ctrl_work_todo(vif)) {
1662 xenvif_ctrl_action(vif);
1663 eoi_flag = 0;
1664 }
1665
1666 xen_irq_lateeoi(irq, eoi_flag);
1667
1668 return IRQ_HANDLED;
1669}
1670
1671static int __init netback_init(void)
1672{
1673 int rc = 0;
1674
1675 if (!xen_domain())
1676 return -ENODEV;
1677
1678 /* Allow as many queues as there are CPUs but max. 8 if user has not
1679 * specified a value.
1680 */
1681 if (xenvif_max_queues == 0)
1682 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1683 num_online_cpus());
1684
1685 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1686 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1687 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1688 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1689 }
1690
1691 rc = xenvif_xenbus_init();
1692 if (rc)
1693 goto failed_init;
1694
1695#ifdef CONFIG_DEBUG_FS
1696 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1697#endif /* CONFIG_DEBUG_FS */
1698
1699 return 0;
1700
1701failed_init:
1702 return rc;
1703}
1704
1705module_init(netback_init);
1706
1707static void __exit netback_fini(void)
1708{
1709#ifdef CONFIG_DEBUG_FS
1710 debugfs_remove_recursive(xen_netback_dbg_root);
1711#endif /* CONFIG_DEBUG_FS */
1712 xenvif_xenbus_fini();
1713}
1714module_exit(netback_fini);
1715
1716MODULE_LICENSE("Dual BSD/GPL");
1717MODULE_ALIAS("xen-backend:vif");
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41
42#include <net/tcp.h>
43
44#include <xen/xen.h>
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47#include <xen/page.h>
48
49#include <asm/xen/hypercall.h>
50
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644);
57
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
67unsigned int rx_stall_timeout_msecs = 60000;
68module_param(rx_stall_timeout_msecs, uint, 0444);
69
70unsigned int xenvif_max_queues;
71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
72MODULE_PARM_DESC(max_queues,
73 "Maximum number of queues per virtual interface");
74
75/*
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
78 */
79#define FATAL_SKB_SLOTS_DEFAULT 20
80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
81module_param(fatal_skb_slots, uint, 0444);
82
83/* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
86 *
87 * This is sized to avoid pulling headers from the frags for most
88 * TCP/IP packets.
89 */
90#define XEN_NETBACK_TX_COPY_LEN 128
91
92
93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
94 u8 status);
95
96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp,
98 unsigned int extra_count,
99 s8 st);
100static void push_tx_responses(struct xenvif_queue *queue);
101
102static inline int tx_work_todo(struct xenvif_queue *queue);
103
104static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
105 u16 id,
106 s8 st,
107 u16 offset,
108 u16 size,
109 u16 flags);
110
111static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
112 u16 idx)
113{
114 return page_to_pfn(queue->mmap_pages[idx]);
115}
116
117static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
118 u16 idx)
119{
120 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
121}
122
123#define callback_param(vif, pending_idx) \
124 (vif->pending_tx_info[pending_idx].callback_struct)
125
126/* Find the containing VIF's structure from a pointer in pending_tx_info array
127 */
128static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
129{
130 u16 pending_idx = ubuf->desc;
131 struct pending_tx_info *temp =
132 container_of(ubuf, struct pending_tx_info, callback_struct);
133 return container_of(temp - pending_idx,
134 struct xenvif_queue,
135 pending_tx_info[0]);
136}
137
138static u16 frag_get_pending_idx(skb_frag_t *frag)
139{
140 return (u16)frag->page_offset;
141}
142
143static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
144{
145 frag->page_offset = pending_idx;
146}
147
148static inline pending_ring_idx_t pending_index(unsigned i)
149{
150 return i & (MAX_PENDING_REQS-1);
151}
152
153static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
154{
155 RING_IDX prod, cons;
156 struct sk_buff *skb;
157 int needed;
158
159 skb = skb_peek(&queue->rx_queue);
160 if (!skb)
161 return false;
162
163 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
164 if (skb_is_gso(skb))
165 needed++;
166
167 do {
168 prod = queue->rx.sring->req_prod;
169 cons = queue->rx.req_cons;
170
171 if (prod - cons >= needed)
172 return true;
173
174 queue->rx.sring->req_event = prod + 1;
175
176 /* Make sure event is visible before we check prod
177 * again.
178 */
179 mb();
180 } while (queue->rx.sring->req_prod != prod);
181
182 return false;
183}
184
185void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
186{
187 unsigned long flags;
188
189 spin_lock_irqsave(&queue->rx_queue.lock, flags);
190
191 __skb_queue_tail(&queue->rx_queue, skb);
192
193 queue->rx_queue_len += skb->len;
194 if (queue->rx_queue_len > queue->rx_queue_max)
195 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
196
197 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
198}
199
200static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
201{
202 struct sk_buff *skb;
203
204 spin_lock_irq(&queue->rx_queue.lock);
205
206 skb = __skb_dequeue(&queue->rx_queue);
207 if (skb)
208 queue->rx_queue_len -= skb->len;
209
210 spin_unlock_irq(&queue->rx_queue.lock);
211
212 return skb;
213}
214
215static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
216{
217 spin_lock_irq(&queue->rx_queue.lock);
218
219 if (queue->rx_queue_len < queue->rx_queue_max)
220 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
221
222 spin_unlock_irq(&queue->rx_queue.lock);
223}
224
225
226static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
227{
228 struct sk_buff *skb;
229 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
230 kfree_skb(skb);
231}
232
233static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
234{
235 struct sk_buff *skb;
236
237 for(;;) {
238 skb = skb_peek(&queue->rx_queue);
239 if (!skb)
240 break;
241 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
242 break;
243 xenvif_rx_dequeue(queue);
244 kfree_skb(skb);
245 }
246}
247
248struct netrx_pending_operations {
249 unsigned copy_prod, copy_cons;
250 unsigned meta_prod, meta_cons;
251 struct gnttab_copy *copy;
252 struct xenvif_rx_meta *meta;
253 int copy_off;
254 grant_ref_t copy_gref;
255};
256
257static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo)
259{
260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request req;
262
263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264
265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0;
268 meta->size = 0;
269 meta->id = req.id;
270
271 npo->copy_off = 0;
272 npo->copy_gref = req.gref;
273
274 return meta;
275}
276
277struct gop_frag_copy {
278 struct xenvif_queue *queue;
279 struct netrx_pending_operations *npo;
280 struct xenvif_rx_meta *meta;
281 int head;
282 int gso_type;
283
284 struct page *page;
285};
286
287static void xenvif_setup_copy_gop(unsigned long gfn,
288 unsigned int offset,
289 unsigned int *len,
290 struct gop_frag_copy *info)
291{
292 struct gnttab_copy *copy_gop;
293 struct xen_page_foreign *foreign;
294 /* Convenient aliases */
295 struct xenvif_queue *queue = info->queue;
296 struct netrx_pending_operations *npo = info->npo;
297 struct page *page = info->page;
298
299 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
300
301 if (npo->copy_off == MAX_BUFFER_OFFSET)
302 info->meta = get_next_rx_buffer(queue, npo);
303
304 if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
305 *len = MAX_BUFFER_OFFSET - npo->copy_off;
306
307 copy_gop = npo->copy + npo->copy_prod++;
308 copy_gop->flags = GNTCOPY_dest_gref;
309 copy_gop->len = *len;
310
311 foreign = xen_page_foreign(page);
312 if (foreign) {
313 copy_gop->source.domid = foreign->domid;
314 copy_gop->source.u.ref = foreign->gref;
315 copy_gop->flags |= GNTCOPY_source_gref;
316 } else {
317 copy_gop->source.domid = DOMID_SELF;
318 copy_gop->source.u.gmfn = gfn;
319 }
320 copy_gop->source.offset = offset;
321
322 copy_gop->dest.domid = queue->vif->domid;
323 copy_gop->dest.offset = npo->copy_off;
324 copy_gop->dest.u.ref = npo->copy_gref;
325
326 npo->copy_off += *len;
327 info->meta->size += *len;
328
329 /* Leave a gap for the GSO descriptor. */
330 if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
331 queue->rx.req_cons++;
332
333 info->head = 0; /* There must be something in this buffer now */
334}
335
336static void xenvif_gop_frag_copy_grant(unsigned long gfn,
337 unsigned offset,
338 unsigned int len,
339 void *data)
340{
341 unsigned int bytes;
342
343 while (len) {
344 bytes = len;
345 xenvif_setup_copy_gop(gfn, offset, &bytes, data);
346 offset += bytes;
347 len -= bytes;
348 }
349}
350
351/*
352 * Set up the grant operations for this fragment. If it's a flipping
353 * interface, we also set up the unmap request from here.
354 */
355static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
356 struct netrx_pending_operations *npo,
357 struct page *page, unsigned long size,
358 unsigned long offset, int *head)
359{
360 struct gop_frag_copy info = {
361 .queue = queue,
362 .npo = npo,
363 .head = *head,
364 .gso_type = XEN_NETIF_GSO_TYPE_NONE,
365 };
366 unsigned long bytes;
367
368 if (skb_is_gso(skb)) {
369 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
370 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
371 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
372 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
373 }
374
375 /* Data must not cross a page boundary. */
376 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
377
378 info.meta = npo->meta + npo->meta_prod - 1;
379
380 /* Skip unused frames from start of page */
381 page += offset >> PAGE_SHIFT;
382 offset &= ~PAGE_MASK;
383
384 while (size > 0) {
385 BUG_ON(offset >= PAGE_SIZE);
386
387 bytes = PAGE_SIZE - offset;
388 if (bytes > size)
389 bytes = size;
390
391 info.page = page;
392 gnttab_foreach_grant_in_range(page, offset, bytes,
393 xenvif_gop_frag_copy_grant,
394 &info);
395 size -= bytes;
396 offset = 0;
397
398 /* Next page */
399 if (size) {
400 BUG_ON(!PageCompound(page));
401 page++;
402 }
403 }
404
405 *head = info.head;
406}
407
408/*
409 * Prepare an SKB to be transmitted to the frontend.
410 *
411 * This function is responsible for allocating grant operations, meta
412 * structures, etc.
413 *
414 * It returns the number of meta structures consumed. The number of
415 * ring slots used is always equal to the number of meta slots used
416 * plus the number of GSO descriptors used. Currently, we use either
417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
418 * frontend-side LRO).
419 */
420static int xenvif_gop_skb(struct sk_buff *skb,
421 struct netrx_pending_operations *npo,
422 struct xenvif_queue *queue)
423{
424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i;
427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta;
429 unsigned char *data;
430 int head = 1;
431 int old_meta_prod;
432 int gso_type;
433
434 old_meta_prod = npo->meta_prod;
435
436 gso_type = XEN_NETIF_GSO_TYPE_NONE;
437 if (skb_is_gso(skb)) {
438 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
439 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
440 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
441 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
442 }
443
444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0;
451 meta->id = req.id;
452 }
453
454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++;
456
457 if ((1 << gso_type) & vif->gso_mask) {
458 meta->gso_type = gso_type;
459 meta->gso_size = skb_shinfo(skb)->gso_size;
460 } else {
461 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
462 meta->gso_size = 0;
463 }
464
465 meta->size = 0;
466 meta->id = req.id;
467 npo->copy_off = 0;
468 npo->copy_gref = req.gref;
469
470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) {
472 unsigned int offset = offset_in_page(data);
473 unsigned int len = PAGE_SIZE - offset;
474
475 if (data + len > skb_tail_pointer(skb))
476 len = skb_tail_pointer(skb) - data;
477
478 xenvif_gop_frag_copy(queue, skb, npo,
479 virt_to_page(data), len, offset, &head);
480 data += len;
481 }
482
483 for (i = 0; i < nr_frags; i++) {
484 xenvif_gop_frag_copy(queue, skb, npo,
485 skb_frag_page(&skb_shinfo(skb)->frags[i]),
486 skb_frag_size(&skb_shinfo(skb)->frags[i]),
487 skb_shinfo(skb)->frags[i].page_offset,
488 &head);
489 }
490
491 return npo->meta_prod - old_meta_prod;
492}
493
494/*
495 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
496 * used to set up the operations on the top of
497 * netrx_pending_operations, which have since been done. Check that
498 * they didn't give any errors and advance over them.
499 */
500static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
501 struct netrx_pending_operations *npo)
502{
503 struct gnttab_copy *copy_op;
504 int status = XEN_NETIF_RSP_OKAY;
505 int i;
506
507 for (i = 0; i < nr_meta_slots; i++) {
508 copy_op = npo->copy + npo->copy_cons++;
509 if (copy_op->status != GNTST_okay) {
510 netdev_dbg(vif->dev,
511 "Bad status %d from copy to DOM%d.\n",
512 copy_op->status, vif->domid);
513 status = XEN_NETIF_RSP_ERROR;
514 }
515 }
516
517 return status;
518}
519
520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
521 struct xenvif_rx_meta *meta,
522 int nr_meta_slots)
523{
524 int i;
525 unsigned long offset;
526
527 /* No fragments used */
528 if (nr_meta_slots <= 1)
529 return;
530
531 nr_meta_slots--;
532
533 for (i = 0; i < nr_meta_slots; i++) {
534 int flags;
535 if (i == nr_meta_slots - 1)
536 flags = 0;
537 else
538 flags = XEN_NETRXF_more_data;
539
540 offset = 0;
541 make_rx_response(queue, meta[i].id, status, offset,
542 meta[i].size, flags);
543 }
544}
545
546void xenvif_kick_thread(struct xenvif_queue *queue)
547{
548 wake_up(&queue->wq);
549}
550
551static void xenvif_rx_action(struct xenvif_queue *queue)
552{
553 s8 status;
554 u16 flags;
555 struct xen_netif_rx_response *resp;
556 struct sk_buff_head rxq;
557 struct sk_buff *skb;
558 LIST_HEAD(notify);
559 int ret;
560 unsigned long offset;
561 bool need_to_notify = false;
562
563 struct netrx_pending_operations npo = {
564 .copy = queue->grant_copy_op,
565 .meta = queue->meta,
566 };
567
568 skb_queue_head_init(&rxq);
569
570 while (xenvif_rx_ring_slots_available(queue)
571 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
572 queue->last_rx_time = jiffies;
573
574 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
575
576 __skb_queue_tail(&rxq, skb);
577 }
578
579 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
580
581 if (!npo.copy_prod)
582 goto done;
583
584 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
585 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
586
587 while ((skb = __skb_dequeue(&rxq)) != NULL) {
588
589 if ((1 << queue->meta[npo.meta_cons].gso_type) &
590 queue->vif->gso_prefix_mask) {
591 resp = RING_GET_RESPONSE(&queue->rx,
592 queue->rx.rsp_prod_pvt++);
593
594 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
595
596 resp->offset = queue->meta[npo.meta_cons].gso_size;
597 resp->id = queue->meta[npo.meta_cons].id;
598 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
599
600 npo.meta_cons++;
601 XENVIF_RX_CB(skb)->meta_slots_used--;
602 }
603
604
605 queue->stats.tx_bytes += skb->len;
606 queue->stats.tx_packets++;
607
608 status = xenvif_check_gop(queue->vif,
609 XENVIF_RX_CB(skb)->meta_slots_used,
610 &npo);
611
612 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
613 flags = 0;
614 else
615 flags = XEN_NETRXF_more_data;
616
617 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
618 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
619 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
620 /* remote but checksummed. */
621 flags |= XEN_NETRXF_data_validated;
622
623 offset = 0;
624 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
625 status, offset,
626 queue->meta[npo.meta_cons].size,
627 flags);
628
629 if ((1 << queue->meta[npo.meta_cons].gso_type) &
630 queue->vif->gso_mask) {
631 struct xen_netif_extra_info *gso =
632 (struct xen_netif_extra_info *)
633 RING_GET_RESPONSE(&queue->rx,
634 queue->rx.rsp_prod_pvt++);
635
636 resp->flags |= XEN_NETRXF_extra_info;
637
638 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
639 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
640 gso->u.gso.pad = 0;
641 gso->u.gso.features = 0;
642
643 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
644 gso->flags = 0;
645 }
646
647 xenvif_add_frag_responses(queue, status,
648 queue->meta + npo.meta_cons + 1,
649 XENVIF_RX_CB(skb)->meta_slots_used);
650
651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
652
653 need_to_notify |= !!ret;
654
655 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
656 dev_kfree_skb(skb);
657 }
658
659done:
660 if (need_to_notify)
661 notify_remote_via_irq(queue->rx_irq);
662}
663
664void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
665{
666 int more_to_do;
667
668 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
669
670 if (more_to_do)
671 napi_schedule(&queue->napi);
672}
673
674static void tx_add_credit(struct xenvif_queue *queue)
675{
676 unsigned long max_burst, max_credit;
677
678 /*
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit.
681 */
682 max_burst = max(131072UL, queue->credit_bytes);
683
684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
685 max_credit = queue->remaining_credit + queue->credit_bytes;
686 if (max_credit < queue->remaining_credit)
687 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
688
689 queue->remaining_credit = min(max_credit, max_burst);
690}
691
692void xenvif_tx_credit_callback(unsigned long data)
693{
694 struct xenvif_queue *queue = (struct xenvif_queue *)data;
695 tx_add_credit(queue);
696 xenvif_napi_schedule_or_enable_events(queue);
697}
698
699static void xenvif_tx_err(struct xenvif_queue *queue,
700 struct xen_netif_tx_request *txp,
701 unsigned int extra_count, RING_IDX end)
702{
703 RING_IDX cons = queue->tx.req_cons;
704 unsigned long flags;
705
706 do {
707 spin_lock_irqsave(&queue->response_lock, flags);
708 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
709 push_tx_responses(queue);
710 spin_unlock_irqrestore(&queue->response_lock, flags);
711 if (cons == end)
712 break;
713 RING_COPY_REQUEST(&queue->tx, cons++, txp);
714 extra_count = 0; /* only the first frag can have extras */
715 } while (1);
716 queue->tx.req_cons = cons;
717}
718
719static void xenvif_fatal_tx_err(struct xenvif *vif)
720{
721 netdev_err(vif->dev, "fatal error; disabling device\n");
722 vif->disabled = true;
723 /* Disable the vif from queue 0's kthread */
724 if (vif->queues)
725 xenvif_kick_thread(&vif->queues[0]);
726}
727
728static int xenvif_count_requests(struct xenvif_queue *queue,
729 struct xen_netif_tx_request *first,
730 unsigned int extra_count,
731 struct xen_netif_tx_request *txp,
732 int work_to_do)
733{
734 RING_IDX cons = queue->tx.req_cons;
735 int slots = 0;
736 int drop_err = 0;
737 int more_data;
738
739 if (!(first->flags & XEN_NETTXF_more_data))
740 return 0;
741
742 do {
743 struct xen_netif_tx_request dropped_tx = { 0 };
744
745 if (slots >= work_to_do) {
746 netdev_err(queue->vif->dev,
747 "Asked for %d slots but exceeds this limit\n",
748 work_to_do);
749 xenvif_fatal_tx_err(queue->vif);
750 return -ENODATA;
751 }
752
753 /* This guest is really using too many slots and
754 * considered malicious.
755 */
756 if (unlikely(slots >= fatal_skb_slots)) {
757 netdev_err(queue->vif->dev,
758 "Malicious frontend using %d slots, threshold %u\n",
759 slots, fatal_skb_slots);
760 xenvif_fatal_tx_err(queue->vif);
761 return -E2BIG;
762 }
763
764 /* Xen network protocol had implicit dependency on
765 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
766 * the historical MAX_SKB_FRAGS value 18 to honor the
767 * same behavior as before. Any packet using more than
768 * 18 slots but less than fatal_skb_slots slots is
769 * dropped
770 */
771 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
772 if (net_ratelimit())
773 netdev_dbg(queue->vif->dev,
774 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
775 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
776 drop_err = -E2BIG;
777 }
778
779 if (drop_err)
780 txp = &dropped_tx;
781
782 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
783
784 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will
786 * appear to be larger than the frame.
787 *
788 * This cannot be fatal error as there are buggy
789 * frontends that do this.
790 *
791 * Consume all slots and drop the packet.
792 */
793 if (!drop_err && txp->size > first->size) {
794 if (net_ratelimit())
795 netdev_dbg(queue->vif->dev,
796 "Invalid tx request, slot size %u > remaining size %u\n",
797 txp->size, first->size);
798 drop_err = -EIO;
799 }
800
801 first->size -= txp->size;
802 slots++;
803
804 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
805 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
806 txp->offset, txp->size);
807 xenvif_fatal_tx_err(queue->vif);
808 return -EINVAL;
809 }
810
811 more_data = txp->flags & XEN_NETTXF_more_data;
812
813 if (!drop_err)
814 txp++;
815
816 } while (more_data);
817
818 if (drop_err) {
819 xenvif_tx_err(queue, first, extra_count, cons + slots);
820 return drop_err;
821 }
822
823 return slots;
824}
825
826
827struct xenvif_tx_cb {
828 u16 pending_idx;
829};
830
831#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
832
833static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
834 u16 pending_idx,
835 struct xen_netif_tx_request *txp,
836 unsigned int extra_count,
837 struct gnttab_map_grant_ref *mop)
838{
839 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
840 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
841 GNTMAP_host_map | GNTMAP_readonly,
842 txp->gref, queue->vif->domid);
843
844 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
845 sizeof(*txp));
846 queue->pending_tx_info[pending_idx].extra_count = extra_count;
847}
848
849static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
850{
851 struct sk_buff *skb =
852 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
853 GFP_ATOMIC | __GFP_NOWARN);
854 if (unlikely(skb == NULL))
855 return NULL;
856
857 /* Packets passed to netif_rx() must have some headroom. */
858 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
859
860 /* Initialize it here to avoid later surprises */
861 skb_shinfo(skb)->destructor_arg = NULL;
862
863 return skb;
864}
865
866static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
867 struct sk_buff *skb,
868 struct xen_netif_tx_request *txp,
869 struct gnttab_map_grant_ref *gop,
870 unsigned int frag_overflow,
871 struct sk_buff *nskb)
872{
873 struct skb_shared_info *shinfo = skb_shinfo(skb);
874 skb_frag_t *frags = shinfo->frags;
875 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
876 int start;
877 pending_ring_idx_t index;
878 unsigned int nr_slots;
879
880 nr_slots = shinfo->nr_frags;
881
882 /* Skip first skb fragment if it is on same page as header fragment. */
883 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
884
885 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
886 shinfo->nr_frags++, txp++, gop++) {
887 index = pending_index(queue->pending_cons++);
888 pending_idx = queue->pending_ring[index];
889 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
890 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
891 }
892
893 if (frag_overflow) {
894
895 shinfo = skb_shinfo(nskb);
896 frags = shinfo->frags;
897
898 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
899 shinfo->nr_frags++, txp++, gop++) {
900 index = pending_index(queue->pending_cons++);
901 pending_idx = queue->pending_ring[index];
902 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
903 gop);
904 frag_set_pending_idx(&frags[shinfo->nr_frags],
905 pending_idx);
906 }
907
908 skb_shinfo(skb)->frag_list = nskb;
909 }
910
911 return gop;
912}
913
914static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
915 u16 pending_idx,
916 grant_handle_t handle)
917{
918 if (unlikely(queue->grant_tx_handle[pending_idx] !=
919 NETBACK_INVALID_HANDLE)) {
920 netdev_err(queue->vif->dev,
921 "Trying to overwrite active handle! pending_idx: 0x%x\n",
922 pending_idx);
923 BUG();
924 }
925 queue->grant_tx_handle[pending_idx] = handle;
926}
927
928static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
929 u16 pending_idx)
930{
931 if (unlikely(queue->grant_tx_handle[pending_idx] ==
932 NETBACK_INVALID_HANDLE)) {
933 netdev_err(queue->vif->dev,
934 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
935 pending_idx);
936 BUG();
937 }
938 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
939}
940
941static int xenvif_tx_check_gop(struct xenvif_queue *queue,
942 struct sk_buff *skb,
943 struct gnttab_map_grant_ref **gopp_map,
944 struct gnttab_copy **gopp_copy)
945{
946 struct gnttab_map_grant_ref *gop_map = *gopp_map;
947 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
948 /* This always points to the shinfo of the skb being checked, which
949 * could be either the first or the one on the frag_list
950 */
951 struct skb_shared_info *shinfo = skb_shinfo(skb);
952 /* If this is non-NULL, we are currently checking the frag_list skb, and
953 * this points to the shinfo of the first one
954 */
955 struct skb_shared_info *first_shinfo = NULL;
956 int nr_frags = shinfo->nr_frags;
957 const bool sharedslot = nr_frags &&
958 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
959 int i, err;
960
961 /* Check status of header. */
962 err = (*gopp_copy)->status;
963 if (unlikely(err)) {
964 if (net_ratelimit())
965 netdev_dbg(queue->vif->dev,
966 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
967 (*gopp_copy)->status,
968 pending_idx,
969 (*gopp_copy)->source.u.ref);
970 /* The first frag might still have this slot mapped */
971 if (!sharedslot)
972 xenvif_idx_release(queue, pending_idx,
973 XEN_NETIF_RSP_ERROR);
974 }
975 (*gopp_copy)++;
976
977check_frags:
978 for (i = 0; i < nr_frags; i++, gop_map++) {
979 int j, newerr;
980
981 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
982
983 /* Check error status: if okay then remember grant handle. */
984 newerr = gop_map->status;
985
986 if (likely(!newerr)) {
987 xenvif_grant_handle_set(queue,
988 pending_idx,
989 gop_map->handle);
990 /* Had a previous error? Invalidate this fragment. */
991 if (unlikely(err)) {
992 xenvif_idx_unmap(queue, pending_idx);
993 /* If the mapping of the first frag was OK, but
994 * the header's copy failed, and they are
995 * sharing a slot, send an error
996 */
997 if (i == 0 && sharedslot)
998 xenvif_idx_release(queue, pending_idx,
999 XEN_NETIF_RSP_ERROR);
1000 else
1001 xenvif_idx_release(queue, pending_idx,
1002 XEN_NETIF_RSP_OKAY);
1003 }
1004 continue;
1005 }
1006
1007 /* Error on this fragment: respond to client with an error. */
1008 if (net_ratelimit())
1009 netdev_dbg(queue->vif->dev,
1010 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1011 i,
1012 gop_map->status,
1013 pending_idx,
1014 gop_map->ref);
1015
1016 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1017
1018 /* Not the first error? Preceding frags already invalidated. */
1019 if (err)
1020 continue;
1021
1022 /* First error: if the header haven't shared a slot with the
1023 * first frag, release it as well.
1024 */
1025 if (!sharedslot)
1026 xenvif_idx_release(queue,
1027 XENVIF_TX_CB(skb)->pending_idx,
1028 XEN_NETIF_RSP_OKAY);
1029
1030 /* Invalidate preceding fragments of this skb. */
1031 for (j = 0; j < i; j++) {
1032 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1033 xenvif_idx_unmap(queue, pending_idx);
1034 xenvif_idx_release(queue, pending_idx,
1035 XEN_NETIF_RSP_OKAY);
1036 }
1037
1038 /* And if we found the error while checking the frag_list, unmap
1039 * the first skb's frags
1040 */
1041 if (first_shinfo) {
1042 for (j = 0; j < first_shinfo->nr_frags; j++) {
1043 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1044 xenvif_idx_unmap(queue, pending_idx);
1045 xenvif_idx_release(queue, pending_idx,
1046 XEN_NETIF_RSP_OKAY);
1047 }
1048 }
1049
1050 /* Remember the error: invalidate all subsequent fragments. */
1051 err = newerr;
1052 }
1053
1054 if (skb_has_frag_list(skb) && !first_shinfo) {
1055 first_shinfo = skb_shinfo(skb);
1056 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1057 nr_frags = shinfo->nr_frags;
1058
1059 goto check_frags;
1060 }
1061
1062 *gopp_map = gop_map;
1063 return err;
1064}
1065
1066static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1067{
1068 struct skb_shared_info *shinfo = skb_shinfo(skb);
1069 int nr_frags = shinfo->nr_frags;
1070 int i;
1071 u16 prev_pending_idx = INVALID_PENDING_IDX;
1072
1073 for (i = 0; i < nr_frags; i++) {
1074 skb_frag_t *frag = shinfo->frags + i;
1075 struct xen_netif_tx_request *txp;
1076 struct page *page;
1077 u16 pending_idx;
1078
1079 pending_idx = frag_get_pending_idx(frag);
1080
1081 /* If this is not the first frag, chain it to the previous*/
1082 if (prev_pending_idx == INVALID_PENDING_IDX)
1083 skb_shinfo(skb)->destructor_arg =
1084 &callback_param(queue, pending_idx);
1085 else
1086 callback_param(queue, prev_pending_idx).ctx =
1087 &callback_param(queue, pending_idx);
1088
1089 callback_param(queue, pending_idx).ctx = NULL;
1090 prev_pending_idx = pending_idx;
1091
1092 txp = &queue->pending_tx_info[pending_idx].req;
1093 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1094 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1095 skb->len += txp->size;
1096 skb->data_len += txp->size;
1097 skb->truesize += txp->size;
1098
1099 /* Take an extra reference to offset network stack's put_page */
1100 get_page(queue->mmap_pages[pending_idx]);
1101 }
1102}
1103
1104static int xenvif_get_extras(struct xenvif_queue *queue,
1105 struct xen_netif_extra_info *extras,
1106 unsigned int *extra_count,
1107 int work_to_do)
1108{
1109 struct xen_netif_extra_info extra;
1110 RING_IDX cons = queue->tx.req_cons;
1111
1112 do {
1113 if (unlikely(work_to_do-- <= 0)) {
1114 netdev_err(queue->vif->dev, "Missing extra info\n");
1115 xenvif_fatal_tx_err(queue->vif);
1116 return -EBADR;
1117 }
1118
1119 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1120
1121 queue->tx.req_cons = ++cons;
1122 (*extra_count)++;
1123
1124 if (unlikely(!extra.type ||
1125 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1126 netdev_err(queue->vif->dev,
1127 "Invalid extra type: %d\n", extra.type);
1128 xenvif_fatal_tx_err(queue->vif);
1129 return -EINVAL;
1130 }
1131
1132 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1133 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1134
1135 return work_to_do;
1136}
1137
1138static int xenvif_set_skb_gso(struct xenvif *vif,
1139 struct sk_buff *skb,
1140 struct xen_netif_extra_info *gso)
1141{
1142 if (!gso->u.gso.size) {
1143 netdev_err(vif->dev, "GSO size must not be zero.\n");
1144 xenvif_fatal_tx_err(vif);
1145 return -EINVAL;
1146 }
1147
1148 switch (gso->u.gso.type) {
1149 case XEN_NETIF_GSO_TYPE_TCPV4:
1150 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1151 break;
1152 case XEN_NETIF_GSO_TYPE_TCPV6:
1153 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1154 break;
1155 default:
1156 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1157 xenvif_fatal_tx_err(vif);
1158 return -EINVAL;
1159 }
1160
1161 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1162 /* gso_segs will be calculated later */
1163
1164 return 0;
1165}
1166
1167static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1168{
1169 bool recalculate_partial_csum = false;
1170
1171 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1172 * peers can fail to set NETRXF_csum_blank when sending a GSO
1173 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1174 * recalculate the partial checksum.
1175 */
1176 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1177 queue->stats.rx_gso_checksum_fixup++;
1178 skb->ip_summed = CHECKSUM_PARTIAL;
1179 recalculate_partial_csum = true;
1180 }
1181
1182 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1183 if (skb->ip_summed != CHECKSUM_PARTIAL)
1184 return 0;
1185
1186 return skb_checksum_setup(skb, recalculate_partial_csum);
1187}
1188
1189static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1190{
1191 u64 now = get_jiffies_64();
1192 u64 next_credit = queue->credit_window_start +
1193 msecs_to_jiffies(queue->credit_usec / 1000);
1194
1195 /* Timer could already be pending in rare cases. */
1196 if (timer_pending(&queue->credit_timeout))
1197 return true;
1198
1199 /* Passed the point where we can replenish credit? */
1200 if (time_after_eq64(now, next_credit)) {
1201 queue->credit_window_start = now;
1202 tx_add_credit(queue);
1203 }
1204
1205 /* Still too big to send right now? Set a callback. */
1206 if (size > queue->remaining_credit) {
1207 queue->credit_timeout.data =
1208 (unsigned long)queue;
1209 mod_timer(&queue->credit_timeout,
1210 next_credit);
1211 queue->credit_window_start = next_credit;
1212
1213 return true;
1214 }
1215
1216 return false;
1217}
1218
1219/* No locking is required in xenvif_mcast_add/del() as they are
1220 * only ever invoked from NAPI poll. An RCU list is used because
1221 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1222 */
1223
1224static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1225{
1226 struct xenvif_mcast_addr *mcast;
1227
1228 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1229 if (net_ratelimit())
1230 netdev_err(vif->dev,
1231 "Too many multicast addresses\n");
1232 return -ENOSPC;
1233 }
1234
1235 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1236 if (!mcast)
1237 return -ENOMEM;
1238
1239 ether_addr_copy(mcast->addr, addr);
1240 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1241 vif->fe_mcast_count++;
1242
1243 return 0;
1244}
1245
1246static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1247{
1248 struct xenvif_mcast_addr *mcast;
1249
1250 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1251 if (ether_addr_equal(addr, mcast->addr)) {
1252 --vif->fe_mcast_count;
1253 list_del_rcu(&mcast->entry);
1254 kfree_rcu(mcast, rcu);
1255 break;
1256 }
1257 }
1258}
1259
1260bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1261{
1262 struct xenvif_mcast_addr *mcast;
1263
1264 rcu_read_lock();
1265 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1266 if (ether_addr_equal(addr, mcast->addr)) {
1267 rcu_read_unlock();
1268 return true;
1269 }
1270 }
1271 rcu_read_unlock();
1272
1273 return false;
1274}
1275
1276void xenvif_mcast_addr_list_free(struct xenvif *vif)
1277{
1278 /* No need for locking or RCU here. NAPI poll and TX queue
1279 * are stopped.
1280 */
1281 while (!list_empty(&vif->fe_mcast_addr)) {
1282 struct xenvif_mcast_addr *mcast;
1283
1284 mcast = list_first_entry(&vif->fe_mcast_addr,
1285 struct xenvif_mcast_addr,
1286 entry);
1287 --vif->fe_mcast_count;
1288 list_del(&mcast->entry);
1289 kfree(mcast);
1290 }
1291}
1292
1293static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 int budget,
1295 unsigned *copy_ops,
1296 unsigned *map_ops)
1297{
1298 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1299 struct sk_buff *skb, *nskb;
1300 int ret;
1301 unsigned int frag_overflow;
1302
1303 while (skb_queue_len(&queue->tx_queue) < budget) {
1304 struct xen_netif_tx_request txreq;
1305 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1306 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1307 unsigned int extra_count;
1308 u16 pending_idx;
1309 RING_IDX idx;
1310 int work_to_do;
1311 unsigned int data_len;
1312 pending_ring_idx_t index;
1313
1314 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1315 XEN_NETIF_TX_RING_SIZE) {
1316 netdev_err(queue->vif->dev,
1317 "Impossible number of requests. "
1318 "req_prod %d, req_cons %d, size %ld\n",
1319 queue->tx.sring->req_prod, queue->tx.req_cons,
1320 XEN_NETIF_TX_RING_SIZE);
1321 xenvif_fatal_tx_err(queue->vif);
1322 break;
1323 }
1324
1325 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1326 if (!work_to_do)
1327 break;
1328
1329 idx = queue->tx.req_cons;
1330 rmb(); /* Ensure that we see the request before we copy it. */
1331 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1332
1333 /* Credit-based scheduling. */
1334 if (txreq.size > queue->remaining_credit &&
1335 tx_credit_exceeded(queue, txreq.size))
1336 break;
1337
1338 queue->remaining_credit -= txreq.size;
1339
1340 work_to_do--;
1341 queue->tx.req_cons = ++idx;
1342
1343 memset(extras, 0, sizeof(extras));
1344 extra_count = 0;
1345 if (txreq.flags & XEN_NETTXF_extra_info) {
1346 work_to_do = xenvif_get_extras(queue, extras,
1347 &extra_count,
1348 work_to_do);
1349 idx = queue->tx.req_cons;
1350 if (unlikely(work_to_do < 0))
1351 break;
1352 }
1353
1354 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1355 struct xen_netif_extra_info *extra;
1356
1357 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1358 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1359
1360 make_tx_response(queue, &txreq, extra_count,
1361 (ret == 0) ?
1362 XEN_NETIF_RSP_OKAY :
1363 XEN_NETIF_RSP_ERROR);
1364 push_tx_responses(queue);
1365 continue;
1366 }
1367
1368 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1369 struct xen_netif_extra_info *extra;
1370
1371 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1372 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1373
1374 make_tx_response(queue, &txreq, extra_count,
1375 XEN_NETIF_RSP_OKAY);
1376 push_tx_responses(queue);
1377 continue;
1378 }
1379
1380 ret = xenvif_count_requests(queue, &txreq, extra_count,
1381 txfrags, work_to_do);
1382 if (unlikely(ret < 0))
1383 break;
1384
1385 idx += ret;
1386
1387 if (unlikely(txreq.size < ETH_HLEN)) {
1388 netdev_dbg(queue->vif->dev,
1389 "Bad packet size: %d\n", txreq.size);
1390 xenvif_tx_err(queue, &txreq, extra_count, idx);
1391 break;
1392 }
1393
1394 /* No crossing a page as the payload mustn't fragment. */
1395 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1396 netdev_err(queue->vif->dev,
1397 "txreq.offset: %u, size: %u, end: %lu\n",
1398 txreq.offset, txreq.size,
1399 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1400 xenvif_fatal_tx_err(queue->vif);
1401 break;
1402 }
1403
1404 index = pending_index(queue->pending_cons);
1405 pending_idx = queue->pending_ring[index];
1406
1407 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1408 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1409 XEN_NETBACK_TX_COPY_LEN : txreq.size;
1410
1411 skb = xenvif_alloc_skb(data_len);
1412 if (unlikely(skb == NULL)) {
1413 netdev_dbg(queue->vif->dev,
1414 "Can't allocate a skb in start_xmit.\n");
1415 xenvif_tx_err(queue, &txreq, extra_count, idx);
1416 break;
1417 }
1418
1419 skb_shinfo(skb)->nr_frags = ret;
1420 if (data_len < txreq.size)
1421 skb_shinfo(skb)->nr_frags++;
1422 /* At this point shinfo->nr_frags is in fact the number of
1423 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1424 */
1425 frag_overflow = 0;
1426 nskb = NULL;
1427 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1428 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1429 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1430 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1431 nskb = xenvif_alloc_skb(0);
1432 if (unlikely(nskb == NULL)) {
1433 kfree_skb(skb);
1434 xenvif_tx_err(queue, &txreq, extra_count, idx);
1435 if (net_ratelimit())
1436 netdev_err(queue->vif->dev,
1437 "Can't allocate the frag_list skb.\n");
1438 break;
1439 }
1440 }
1441
1442 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1443 struct xen_netif_extra_info *gso;
1444 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1445
1446 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1447 /* Failure in xenvif_set_skb_gso is fatal. */
1448 kfree_skb(skb);
1449 kfree_skb(nskb);
1450 break;
1451 }
1452 }
1453
1454 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1455
1456 __skb_put(skb, data_len);
1457 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1458 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1459 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1460
1461 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1462 virt_to_gfn(skb->data);
1463 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1464 queue->tx_copy_ops[*copy_ops].dest.offset =
1465 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1466
1467 queue->tx_copy_ops[*copy_ops].len = data_len;
1468 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1469
1470 (*copy_ops)++;
1471
1472 if (data_len < txreq.size) {
1473 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1474 pending_idx);
1475 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1476 extra_count, gop);
1477 gop++;
1478 } else {
1479 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1480 INVALID_PENDING_IDX);
1481 memcpy(&queue->pending_tx_info[pending_idx].req,
1482 &txreq, sizeof(txreq));
1483 queue->pending_tx_info[pending_idx].extra_count =
1484 extra_count;
1485 }
1486
1487 queue->pending_cons++;
1488
1489 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1490 frag_overflow, nskb);
1491
1492 __skb_queue_tail(&queue->tx_queue, skb);
1493
1494 queue->tx.req_cons = idx;
1495
1496 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1497 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1498 break;
1499 }
1500
1501 (*map_ops) = gop - queue->tx_map_ops;
1502 return;
1503}
1504
1505/* Consolidate skb with a frag_list into a brand new one with local pages on
1506 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1507 */
1508static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1509{
1510 unsigned int offset = skb_headlen(skb);
1511 skb_frag_t frags[MAX_SKB_FRAGS];
1512 int i, f;
1513 struct ubuf_info *uarg;
1514 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1515
1516 queue->stats.tx_zerocopy_sent += 2;
1517 queue->stats.tx_frag_overflow++;
1518
1519 xenvif_fill_frags(queue, nskb);
1520 /* Subtract frags size, we will correct it later */
1521 skb->truesize -= skb->data_len;
1522 skb->len += nskb->len;
1523 skb->data_len += nskb->len;
1524
1525 /* create a brand new frags array and coalesce there */
1526 for (i = 0; offset < skb->len; i++) {
1527 struct page *page;
1528 unsigned int len;
1529
1530 BUG_ON(i >= MAX_SKB_FRAGS);
1531 page = alloc_page(GFP_ATOMIC);
1532 if (!page) {
1533 int j;
1534 skb->truesize += skb->data_len;
1535 for (j = 0; j < i; j++)
1536 put_page(frags[j].page.p);
1537 return -ENOMEM;
1538 }
1539
1540 if (offset + PAGE_SIZE < skb->len)
1541 len = PAGE_SIZE;
1542 else
1543 len = skb->len - offset;
1544 if (skb_copy_bits(skb, offset, page_address(page), len))
1545 BUG();
1546
1547 offset += len;
1548 frags[i].page.p = page;
1549 frags[i].page_offset = 0;
1550 skb_frag_size_set(&frags[i], len);
1551 }
1552
1553 /* Copied all the bits from the frag list -- free it. */
1554 skb_frag_list_init(skb);
1555 xenvif_skb_zerocopy_prepare(queue, nskb);
1556 kfree_skb(nskb);
1557
1558 /* Release all the original (foreign) frags. */
1559 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1560 skb_frag_unref(skb, f);
1561 uarg = skb_shinfo(skb)->destructor_arg;
1562 /* increase inflight counter to offset decrement in callback */
1563 atomic_inc(&queue->inflight_packets);
1564 uarg->callback(uarg, true);
1565 skb_shinfo(skb)->destructor_arg = NULL;
1566
1567 /* Fill the skb with the new (local) frags. */
1568 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1569 skb_shinfo(skb)->nr_frags = i;
1570 skb->truesize += i * PAGE_SIZE;
1571
1572 return 0;
1573}
1574
1575static int xenvif_tx_submit(struct xenvif_queue *queue)
1576{
1577 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1578 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1579 struct sk_buff *skb;
1580 int work_done = 0;
1581
1582 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1583 struct xen_netif_tx_request *txp;
1584 u16 pending_idx;
1585 unsigned data_len;
1586
1587 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1588 txp = &queue->pending_tx_info[pending_idx].req;
1589
1590 /* Check the remap error code. */
1591 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1592 /* If there was an error, xenvif_tx_check_gop is
1593 * expected to release all the frags which were mapped,
1594 * so kfree_skb shouldn't do it again
1595 */
1596 skb_shinfo(skb)->nr_frags = 0;
1597 if (skb_has_frag_list(skb)) {
1598 struct sk_buff *nskb =
1599 skb_shinfo(skb)->frag_list;
1600 skb_shinfo(nskb)->nr_frags = 0;
1601 }
1602 kfree_skb(skb);
1603 continue;
1604 }
1605
1606 data_len = skb->len;
1607 callback_param(queue, pending_idx).ctx = NULL;
1608 if (data_len < txp->size) {
1609 /* Append the packet payload as a fragment. */
1610 txp->offset += data_len;
1611 txp->size -= data_len;
1612 } else {
1613 /* Schedule a response immediately. */
1614 xenvif_idx_release(queue, pending_idx,
1615 XEN_NETIF_RSP_OKAY);
1616 }
1617
1618 if (txp->flags & XEN_NETTXF_csum_blank)
1619 skb->ip_summed = CHECKSUM_PARTIAL;
1620 else if (txp->flags & XEN_NETTXF_data_validated)
1621 skb->ip_summed = CHECKSUM_UNNECESSARY;
1622
1623 xenvif_fill_frags(queue, skb);
1624
1625 if (unlikely(skb_has_frag_list(skb))) {
1626 if (xenvif_handle_frag_list(queue, skb)) {
1627 if (net_ratelimit())
1628 netdev_err(queue->vif->dev,
1629 "Not enough memory to consolidate frag_list!\n");
1630 xenvif_skb_zerocopy_prepare(queue, skb);
1631 kfree_skb(skb);
1632 continue;
1633 }
1634 }
1635
1636 skb->dev = queue->vif->dev;
1637 skb->protocol = eth_type_trans(skb, skb->dev);
1638 skb_reset_network_header(skb);
1639
1640 if (checksum_setup(queue, skb)) {
1641 netdev_dbg(queue->vif->dev,
1642 "Can't setup checksum in net_tx_action\n");
1643 /* We have to set this flag to trigger the callback */
1644 if (skb_shinfo(skb)->destructor_arg)
1645 xenvif_skb_zerocopy_prepare(queue, skb);
1646 kfree_skb(skb);
1647 continue;
1648 }
1649
1650 skb_probe_transport_header(skb, 0);
1651
1652 /* If the packet is GSO then we will have just set up the
1653 * transport header offset in checksum_setup so it's now
1654 * straightforward to calculate gso_segs.
1655 */
1656 if (skb_is_gso(skb)) {
1657 int mss = skb_shinfo(skb)->gso_size;
1658 int hdrlen = skb_transport_header(skb) -
1659 skb_mac_header(skb) +
1660 tcp_hdrlen(skb);
1661
1662 skb_shinfo(skb)->gso_segs =
1663 DIV_ROUND_UP(skb->len - hdrlen, mss);
1664 }
1665
1666 queue->stats.rx_bytes += skb->len;
1667 queue->stats.rx_packets++;
1668
1669 work_done++;
1670
1671 /* Set this flag right before netif_receive_skb, otherwise
1672 * someone might think this packet already left netback, and
1673 * do a skb_copy_ubufs while we are still in control of the
1674 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1675 */
1676 if (skb_shinfo(skb)->destructor_arg) {
1677 xenvif_skb_zerocopy_prepare(queue, skb);
1678 queue->stats.tx_zerocopy_sent++;
1679 }
1680
1681 netif_receive_skb(skb);
1682 }
1683
1684 return work_done;
1685}
1686
1687void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1688{
1689 unsigned long flags;
1690 pending_ring_idx_t index;
1691 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1692
1693 /* This is the only place where we grab this lock, to protect callbacks
1694 * from each other.
1695 */
1696 spin_lock_irqsave(&queue->callback_lock, flags);
1697 do {
1698 u16 pending_idx = ubuf->desc;
1699 ubuf = (struct ubuf_info *) ubuf->ctx;
1700 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1701 MAX_PENDING_REQS);
1702 index = pending_index(queue->dealloc_prod);
1703 queue->dealloc_ring[index] = pending_idx;
1704 /* Sync with xenvif_tx_dealloc_action:
1705 * insert idx then incr producer.
1706 */
1707 smp_wmb();
1708 queue->dealloc_prod++;
1709 } while (ubuf);
1710 spin_unlock_irqrestore(&queue->callback_lock, flags);
1711
1712 if (likely(zerocopy_success))
1713 queue->stats.tx_zerocopy_success++;
1714 else
1715 queue->stats.tx_zerocopy_fail++;
1716 xenvif_skb_zerocopy_complete(queue);
1717}
1718
1719static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1720{
1721 struct gnttab_unmap_grant_ref *gop;
1722 pending_ring_idx_t dc, dp;
1723 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1724 unsigned int i = 0;
1725
1726 dc = queue->dealloc_cons;
1727 gop = queue->tx_unmap_ops;
1728
1729 /* Free up any grants we have finished using */
1730 do {
1731 dp = queue->dealloc_prod;
1732
1733 /* Ensure we see all indices enqueued by all
1734 * xenvif_zerocopy_callback().
1735 */
1736 smp_rmb();
1737
1738 while (dc != dp) {
1739 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1740 pending_idx =
1741 queue->dealloc_ring[pending_index(dc++)];
1742
1743 pending_idx_release[gop - queue->tx_unmap_ops] =
1744 pending_idx;
1745 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1746 queue->mmap_pages[pending_idx];
1747 gnttab_set_unmap_op(gop,
1748 idx_to_kaddr(queue, pending_idx),
1749 GNTMAP_host_map,
1750 queue->grant_tx_handle[pending_idx]);
1751 xenvif_grant_handle_reset(queue, pending_idx);
1752 ++gop;
1753 }
1754
1755 } while (dp != queue->dealloc_prod);
1756
1757 queue->dealloc_cons = dc;
1758
1759 if (gop - queue->tx_unmap_ops > 0) {
1760 int ret;
1761 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1762 NULL,
1763 queue->pages_to_unmap,
1764 gop - queue->tx_unmap_ops);
1765 if (ret) {
1766 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1767 gop - queue->tx_unmap_ops, ret);
1768 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1769 if (gop[i].status != GNTST_okay)
1770 netdev_err(queue->vif->dev,
1771 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1772 gop[i].host_addr,
1773 gop[i].handle,
1774 gop[i].status);
1775 }
1776 BUG();
1777 }
1778 }
1779
1780 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1781 xenvif_idx_release(queue, pending_idx_release[i],
1782 XEN_NETIF_RSP_OKAY);
1783}
1784
1785
1786/* Called after netfront has transmitted */
1787int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1788{
1789 unsigned nr_mops, nr_cops = 0;
1790 int work_done, ret;
1791
1792 if (unlikely(!tx_work_todo(queue)))
1793 return 0;
1794
1795 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1796
1797 if (nr_cops == 0)
1798 return 0;
1799
1800 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1801 if (nr_mops != 0) {
1802 ret = gnttab_map_refs(queue->tx_map_ops,
1803 NULL,
1804 queue->pages_to_map,
1805 nr_mops);
1806 BUG_ON(ret);
1807 }
1808
1809 work_done = xenvif_tx_submit(queue);
1810
1811 return work_done;
1812}
1813
1814static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1815 u8 status)
1816{
1817 struct pending_tx_info *pending_tx_info;
1818 pending_ring_idx_t index;
1819 unsigned long flags;
1820
1821 pending_tx_info = &queue->pending_tx_info[pending_idx];
1822
1823 spin_lock_irqsave(&queue->response_lock, flags);
1824
1825 make_tx_response(queue, &pending_tx_info->req,
1826 pending_tx_info->extra_count, status);
1827
1828 /* Release the pending index before pusing the Tx response so
1829 * its available before a new Tx request is pushed by the
1830 * frontend.
1831 */
1832 index = pending_index(queue->pending_prod++);
1833 queue->pending_ring[index] = pending_idx;
1834
1835 push_tx_responses(queue);
1836
1837 spin_unlock_irqrestore(&queue->response_lock, flags);
1838}
1839
1840
1841static void make_tx_response(struct xenvif_queue *queue,
1842 struct xen_netif_tx_request *txp,
1843 unsigned int extra_count,
1844 s8 st)
1845{
1846 RING_IDX i = queue->tx.rsp_prod_pvt;
1847 struct xen_netif_tx_response *resp;
1848
1849 resp = RING_GET_RESPONSE(&queue->tx, i);
1850 resp->id = txp->id;
1851 resp->status = st;
1852
1853 while (extra_count-- != 0)
1854 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1855
1856 queue->tx.rsp_prod_pvt = ++i;
1857}
1858
1859static void push_tx_responses(struct xenvif_queue *queue)
1860{
1861 int notify;
1862
1863 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1864 if (notify)
1865 notify_remote_via_irq(queue->tx_irq);
1866}
1867
1868static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1869 u16 id,
1870 s8 st,
1871 u16 offset,
1872 u16 size,
1873 u16 flags)
1874{
1875 RING_IDX i = queue->rx.rsp_prod_pvt;
1876 struct xen_netif_rx_response *resp;
1877
1878 resp = RING_GET_RESPONSE(&queue->rx, i);
1879 resp->offset = offset;
1880 resp->flags = flags;
1881 resp->id = id;
1882 resp->status = (s16)size;
1883 if (st < 0)
1884 resp->status = (s16)st;
1885
1886 queue->rx.rsp_prod_pvt = ++i;
1887
1888 return resp;
1889}
1890
1891void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1892{
1893 int ret;
1894 struct gnttab_unmap_grant_ref tx_unmap_op;
1895
1896 gnttab_set_unmap_op(&tx_unmap_op,
1897 idx_to_kaddr(queue, pending_idx),
1898 GNTMAP_host_map,
1899 queue->grant_tx_handle[pending_idx]);
1900 xenvif_grant_handle_reset(queue, pending_idx);
1901
1902 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1903 &queue->mmap_pages[pending_idx], 1);
1904 if (ret) {
1905 netdev_err(queue->vif->dev,
1906 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1907 ret,
1908 pending_idx,
1909 tx_unmap_op.host_addr,
1910 tx_unmap_op.handle,
1911 tx_unmap_op.status);
1912 BUG();
1913 }
1914}
1915
1916static inline int tx_work_todo(struct xenvif_queue *queue)
1917{
1918 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1919 return 1;
1920
1921 return 0;
1922}
1923
1924static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1925{
1926 return queue->dealloc_cons != queue->dealloc_prod;
1927}
1928
1929void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1930{
1931 if (queue->tx.sring)
1932 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1933 queue->tx.sring);
1934 if (queue->rx.sring)
1935 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1936 queue->rx.sring);
1937}
1938
1939int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1940 grant_ref_t tx_ring_ref,
1941 grant_ref_t rx_ring_ref)
1942{
1943 void *addr;
1944 struct xen_netif_tx_sring *txs;
1945 struct xen_netif_rx_sring *rxs;
1946
1947 int err = -ENOMEM;
1948
1949 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1950 &tx_ring_ref, 1, &addr);
1951 if (err)
1952 goto err;
1953
1954 txs = (struct xen_netif_tx_sring *)addr;
1955 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1956
1957 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1958 &rx_ring_ref, 1, &addr);
1959 if (err)
1960 goto err;
1961
1962 rxs = (struct xen_netif_rx_sring *)addr;
1963 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1964
1965 return 0;
1966
1967err:
1968 xenvif_unmap_frontend_rings(queue);
1969 return err;
1970}
1971
1972static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1973{
1974 struct xenvif *vif = queue->vif;
1975
1976 queue->stalled = true;
1977
1978 /* At least one queue has stalled? Disable the carrier. */
1979 spin_lock(&vif->lock);
1980 if (vif->stalled_queues++ == 0) {
1981 netdev_info(vif->dev, "Guest Rx stalled");
1982 netif_carrier_off(vif->dev);
1983 }
1984 spin_unlock(&vif->lock);
1985}
1986
1987static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1988{
1989 struct xenvif *vif = queue->vif;
1990
1991 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1992 queue->stalled = false;
1993
1994 /* All queues are ready? Enable the carrier. */
1995 spin_lock(&vif->lock);
1996 if (--vif->stalled_queues == 0) {
1997 netdev_info(vif->dev, "Guest Rx ready");
1998 netif_carrier_on(vif->dev);
1999 }
2000 spin_unlock(&vif->lock);
2001}
2002
2003static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2004{
2005 RING_IDX prod, cons;
2006
2007 prod = queue->rx.sring->req_prod;
2008 cons = queue->rx.req_cons;
2009
2010 return !queue->stalled && prod - cons < 1
2011 && time_after(jiffies,
2012 queue->last_rx_time + queue->vif->stall_timeout);
2013}
2014
2015static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2016{
2017 RING_IDX prod, cons;
2018
2019 prod = queue->rx.sring->req_prod;
2020 cons = queue->rx.req_cons;
2021
2022 return queue->stalled && prod - cons >= 1;
2023}
2024
2025static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2026{
2027 return xenvif_rx_ring_slots_available(queue)
2028 || (queue->vif->stall_timeout &&
2029 (xenvif_rx_queue_stalled(queue)
2030 || xenvif_rx_queue_ready(queue)))
2031 || kthread_should_stop()
2032 || queue->vif->disabled;
2033}
2034
2035static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2036{
2037 struct sk_buff *skb;
2038 long timeout;
2039
2040 skb = skb_peek(&queue->rx_queue);
2041 if (!skb)
2042 return MAX_SCHEDULE_TIMEOUT;
2043
2044 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2045 return timeout < 0 ? 0 : timeout;
2046}
2047
2048/* Wait until the guest Rx thread has work.
2049 *
2050 * The timeout needs to be adjusted based on the current head of the
2051 * queue (and not just the head at the beginning). In particular, if
2052 * the queue is initially empty an infinite timeout is used and this
2053 * needs to be reduced when a skb is queued.
2054 *
2055 * This cannot be done with wait_event_timeout() because it only
2056 * calculates the timeout once.
2057 */
2058static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2059{
2060 DEFINE_WAIT(wait);
2061
2062 if (xenvif_have_rx_work(queue))
2063 return;
2064
2065 for (;;) {
2066 long ret;
2067
2068 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2069 if (xenvif_have_rx_work(queue))
2070 break;
2071 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2072 if (!ret)
2073 break;
2074 }
2075 finish_wait(&queue->wq, &wait);
2076}
2077
2078int xenvif_kthread_guest_rx(void *data)
2079{
2080 struct xenvif_queue *queue = data;
2081 struct xenvif *vif = queue->vif;
2082
2083 if (!vif->stall_timeout)
2084 xenvif_queue_carrier_on(queue);
2085
2086 for (;;) {
2087 xenvif_wait_for_rx_work(queue);
2088
2089 if (kthread_should_stop())
2090 break;
2091
2092 /* This frontend is found to be rogue, disable it in
2093 * kthread context. Currently this is only set when
2094 * netback finds out frontend sends malformed packet,
2095 * but we cannot disable the interface in softirq
2096 * context so we defer it here, if this thread is
2097 * associated with queue 0.
2098 */
2099 if (unlikely(vif->disabled && queue->id == 0)) {
2100 xenvif_carrier_off(vif);
2101 break;
2102 }
2103
2104 if (!skb_queue_empty(&queue->rx_queue))
2105 xenvif_rx_action(queue);
2106
2107 /* If the guest hasn't provided any Rx slots for a
2108 * while it's probably not responsive, drop the
2109 * carrier so packets are dropped earlier.
2110 */
2111 if (vif->stall_timeout) {
2112 if (xenvif_rx_queue_stalled(queue))
2113 xenvif_queue_carrier_off(queue);
2114 else if (xenvif_rx_queue_ready(queue))
2115 xenvif_queue_carrier_on(queue);
2116 }
2117
2118 /* Queued packets may have foreign pages from other
2119 * domains. These cannot be queued indefinitely as
2120 * this would starve guests of grant refs and transmit
2121 * slots.
2122 */
2123 xenvif_rx_queue_drop_expired(queue);
2124
2125 xenvif_rx_queue_maybe_wake(queue);
2126
2127 cond_resched();
2128 }
2129
2130 /* Bin any remaining skbs */
2131 xenvif_rx_queue_purge(queue);
2132
2133 return 0;
2134}
2135
2136static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2137{
2138 /* Dealloc thread must remain running until all inflight
2139 * packets complete.
2140 */
2141 return kthread_should_stop() &&
2142 !atomic_read(&queue->inflight_packets);
2143}
2144
2145int xenvif_dealloc_kthread(void *data)
2146{
2147 struct xenvif_queue *queue = data;
2148
2149 for (;;) {
2150 wait_event_interruptible(queue->dealloc_wq,
2151 tx_dealloc_work_todo(queue) ||
2152 xenvif_dealloc_kthread_should_stop(queue));
2153 if (xenvif_dealloc_kthread_should_stop(queue))
2154 break;
2155
2156 xenvif_tx_dealloc_action(queue);
2157 cond_resched();
2158 }
2159
2160 /* Unmap anything remaining*/
2161 if (tx_dealloc_work_todo(queue))
2162 xenvif_tx_dealloc_action(queue);
2163
2164 return 0;
2165}
2166
2167static int __init netback_init(void)
2168{
2169 int rc = 0;
2170
2171 if (!xen_domain())
2172 return -ENODEV;
2173
2174 /* Allow as many queues as there are CPUs if user has not
2175 * specified a value.
2176 */
2177 if (xenvif_max_queues == 0)
2178 xenvif_max_queues = num_online_cpus();
2179
2180 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2181 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2182 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2183 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2184 }
2185
2186 rc = xenvif_xenbus_init();
2187 if (rc)
2188 goto failed_init;
2189
2190#ifdef CONFIG_DEBUG_FS
2191 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2192 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2193 pr_warn("Init of debugfs returned %ld!\n",
2194 PTR_ERR(xen_netback_dbg_root));
2195#endif /* CONFIG_DEBUG_FS */
2196
2197 return 0;
2198
2199failed_init:
2200 return rc;
2201}
2202
2203module_init(netback_init);
2204
2205static void __exit netback_fini(void)
2206{
2207#ifdef CONFIG_DEBUG_FS
2208 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2209 debugfs_remove_recursive(xen_netback_dbg_root);
2210#endif /* CONFIG_DEBUG_FS */
2211 xenvif_xenbus_fini();
2212}
2213module_exit(netback_fini);
2214
2215MODULE_LICENSE("Dual BSD/GPL");
2216MODULE_ALIAS("xen-backend:vif");