Loading...
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41
42#include <net/tcp.h>
43
44#include <xen/xen.h>
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47#include <xen/page.h>
48
49#include <asm/xen/hypercall.h>
50
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644);
57
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
67unsigned int rx_stall_timeout_msecs = 60000;
68module_param(rx_stall_timeout_msecs, uint, 0444);
69
70#define MAX_QUEUES_DEFAULT 8
71unsigned int xenvif_max_queues;
72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73MODULE_PARM_DESC(max_queues,
74 "Maximum number of queues per virtual interface");
75
76/*
77 * This is the maximum slots a skb can have. If a guest sends a skb
78 * which exceeds this limit it is considered malicious.
79 */
80#define FATAL_SKB_SLOTS_DEFAULT 20
81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82module_param(fatal_skb_slots, uint, 0444);
83
84/* The amount to copy out of the first guest Tx slot into the skb's
85 * linear area. If the first slot has more data, it will be mapped
86 * and put into the first frag.
87 *
88 * This is sized to avoid pulling headers from the frags for most
89 * TCP/IP packets.
90 */
91#define XEN_NETBACK_TX_COPY_LEN 128
92
93/* This is the maximum number of flows in the hash cache. */
94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
98
99/* The module parameter tells that we have to put data
100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
101 * needed for XDP processing
102 */
103bool provides_xdp_headroom = true;
104module_param(provides_xdp_headroom, bool, 0644);
105
106static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
107 u8 status);
108
109static void make_tx_response(struct xenvif_queue *queue,
110 struct xen_netif_tx_request *txp,
111 unsigned int extra_count,
112 s8 st);
113static void push_tx_responses(struct xenvif_queue *queue);
114
115static inline int tx_work_todo(struct xenvif_queue *queue);
116
117static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
118 u16 idx)
119{
120 return page_to_pfn(queue->mmap_pages[idx]);
121}
122
123static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
124 u16 idx)
125{
126 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
127}
128
129#define callback_param(vif, pending_idx) \
130 (vif->pending_tx_info[pending_idx].callback_struct)
131
132/* Find the containing VIF's structure from a pointer in pending_tx_info array
133 */
134static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
135{
136 u16 pending_idx = ubuf->desc;
137 struct pending_tx_info *temp =
138 container_of(ubuf, struct pending_tx_info, callback_struct);
139 return container_of(temp - pending_idx,
140 struct xenvif_queue,
141 pending_tx_info[0]);
142}
143
144static u16 frag_get_pending_idx(skb_frag_t *frag)
145{
146 return (u16)skb_frag_off(frag);
147}
148
149static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
150{
151 skb_frag_off_set(frag, pending_idx);
152}
153
154static inline pending_ring_idx_t pending_index(unsigned i)
155{
156 return i & (MAX_PENDING_REQS-1);
157}
158
159void xenvif_kick_thread(struct xenvif_queue *queue)
160{
161 wake_up(&queue->wq);
162}
163
164void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
165{
166 int more_to_do;
167
168 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
169
170 if (more_to_do)
171 napi_schedule(&queue->napi);
172}
173
174static void tx_add_credit(struct xenvif_queue *queue)
175{
176 unsigned long max_burst, max_credit;
177
178 /*
179 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
180 * Otherwise the interface can seize up due to insufficient credit.
181 */
182 max_burst = max(131072UL, queue->credit_bytes);
183
184 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
185 max_credit = queue->remaining_credit + queue->credit_bytes;
186 if (max_credit < queue->remaining_credit)
187 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
188
189 queue->remaining_credit = min(max_credit, max_burst);
190 queue->rate_limited = false;
191}
192
193void xenvif_tx_credit_callback(struct timer_list *t)
194{
195 struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
196 tx_add_credit(queue);
197 xenvif_napi_schedule_or_enable_events(queue);
198}
199
200static void xenvif_tx_err(struct xenvif_queue *queue,
201 struct xen_netif_tx_request *txp,
202 unsigned int extra_count, RING_IDX end)
203{
204 RING_IDX cons = queue->tx.req_cons;
205 unsigned long flags;
206
207 do {
208 spin_lock_irqsave(&queue->response_lock, flags);
209 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
210 push_tx_responses(queue);
211 spin_unlock_irqrestore(&queue->response_lock, flags);
212 if (cons == end)
213 break;
214 RING_COPY_REQUEST(&queue->tx, cons++, txp);
215 extra_count = 0; /* only the first frag can have extras */
216 } while (1);
217 queue->tx.req_cons = cons;
218}
219
220static void xenvif_fatal_tx_err(struct xenvif *vif)
221{
222 netdev_err(vif->dev, "fatal error; disabling device\n");
223 vif->disabled = true;
224 /* Disable the vif from queue 0's kthread */
225 if (vif->num_queues)
226 xenvif_kick_thread(&vif->queues[0]);
227}
228
229static int xenvif_count_requests(struct xenvif_queue *queue,
230 struct xen_netif_tx_request *first,
231 unsigned int extra_count,
232 struct xen_netif_tx_request *txp,
233 int work_to_do)
234{
235 RING_IDX cons = queue->tx.req_cons;
236 int slots = 0;
237 int drop_err = 0;
238 int more_data;
239
240 if (!(first->flags & XEN_NETTXF_more_data))
241 return 0;
242
243 do {
244 struct xen_netif_tx_request dropped_tx = { 0 };
245
246 if (slots >= work_to_do) {
247 netdev_err(queue->vif->dev,
248 "Asked for %d slots but exceeds this limit\n",
249 work_to_do);
250 xenvif_fatal_tx_err(queue->vif);
251 return -ENODATA;
252 }
253
254 /* This guest is really using too many slots and
255 * considered malicious.
256 */
257 if (unlikely(slots >= fatal_skb_slots)) {
258 netdev_err(queue->vif->dev,
259 "Malicious frontend using %d slots, threshold %u\n",
260 slots, fatal_skb_slots);
261 xenvif_fatal_tx_err(queue->vif);
262 return -E2BIG;
263 }
264
265 /* Xen network protocol had implicit dependency on
266 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
267 * the historical MAX_SKB_FRAGS value 18 to honor the
268 * same behavior as before. Any packet using more than
269 * 18 slots but less than fatal_skb_slots slots is
270 * dropped
271 */
272 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
273 if (net_ratelimit())
274 netdev_dbg(queue->vif->dev,
275 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
276 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
277 drop_err = -E2BIG;
278 }
279
280 if (drop_err)
281 txp = &dropped_tx;
282
283 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
284
285 /* If the guest submitted a frame >= 64 KiB then
286 * first->size overflowed and following slots will
287 * appear to be larger than the frame.
288 *
289 * This cannot be fatal error as there are buggy
290 * frontends that do this.
291 *
292 * Consume all slots and drop the packet.
293 */
294 if (!drop_err && txp->size > first->size) {
295 if (net_ratelimit())
296 netdev_dbg(queue->vif->dev,
297 "Invalid tx request, slot size %u > remaining size %u\n",
298 txp->size, first->size);
299 drop_err = -EIO;
300 }
301
302 first->size -= txp->size;
303 slots++;
304
305 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
306 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
307 txp->offset, txp->size);
308 xenvif_fatal_tx_err(queue->vif);
309 return -EINVAL;
310 }
311
312 more_data = txp->flags & XEN_NETTXF_more_data;
313
314 if (!drop_err)
315 txp++;
316
317 } while (more_data);
318
319 if (drop_err) {
320 xenvif_tx_err(queue, first, extra_count, cons + slots);
321 return drop_err;
322 }
323
324 return slots;
325}
326
327
328struct xenvif_tx_cb {
329 u16 pending_idx;
330};
331
332#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
333
334static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
335 u16 pending_idx,
336 struct xen_netif_tx_request *txp,
337 unsigned int extra_count,
338 struct gnttab_map_grant_ref *mop)
339{
340 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
341 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
342 GNTMAP_host_map | GNTMAP_readonly,
343 txp->gref, queue->vif->domid);
344
345 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
346 sizeof(*txp));
347 queue->pending_tx_info[pending_idx].extra_count = extra_count;
348}
349
350static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
351{
352 struct sk_buff *skb =
353 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
354 GFP_ATOMIC | __GFP_NOWARN);
355 if (unlikely(skb == NULL))
356 return NULL;
357
358 /* Packets passed to netif_rx() must have some headroom. */
359 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
360
361 /* Initialize it here to avoid later surprises */
362 skb_shinfo(skb)->destructor_arg = NULL;
363
364 return skb;
365}
366
367static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
368 struct sk_buff *skb,
369 struct xen_netif_tx_request *txp,
370 struct gnttab_map_grant_ref *gop,
371 unsigned int frag_overflow,
372 struct sk_buff *nskb)
373{
374 struct skb_shared_info *shinfo = skb_shinfo(skb);
375 skb_frag_t *frags = shinfo->frags;
376 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
377 int start;
378 pending_ring_idx_t index;
379 unsigned int nr_slots;
380
381 nr_slots = shinfo->nr_frags;
382
383 /* Skip first skb fragment if it is on same page as header fragment. */
384 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
385
386 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
387 shinfo->nr_frags++, txp++, gop++) {
388 index = pending_index(queue->pending_cons++);
389 pending_idx = queue->pending_ring[index];
390 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
391 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
392 }
393
394 if (frag_overflow) {
395
396 shinfo = skb_shinfo(nskb);
397 frags = shinfo->frags;
398
399 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
400 shinfo->nr_frags++, txp++, gop++) {
401 index = pending_index(queue->pending_cons++);
402 pending_idx = queue->pending_ring[index];
403 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
404 gop);
405 frag_set_pending_idx(&frags[shinfo->nr_frags],
406 pending_idx);
407 }
408
409 skb_shinfo(skb)->frag_list = nskb;
410 }
411
412 return gop;
413}
414
415static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
416 u16 pending_idx,
417 grant_handle_t handle)
418{
419 if (unlikely(queue->grant_tx_handle[pending_idx] !=
420 NETBACK_INVALID_HANDLE)) {
421 netdev_err(queue->vif->dev,
422 "Trying to overwrite active handle! pending_idx: 0x%x\n",
423 pending_idx);
424 BUG();
425 }
426 queue->grant_tx_handle[pending_idx] = handle;
427}
428
429static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
430 u16 pending_idx)
431{
432 if (unlikely(queue->grant_tx_handle[pending_idx] ==
433 NETBACK_INVALID_HANDLE)) {
434 netdev_err(queue->vif->dev,
435 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
436 pending_idx);
437 BUG();
438 }
439 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
440}
441
442static int xenvif_tx_check_gop(struct xenvif_queue *queue,
443 struct sk_buff *skb,
444 struct gnttab_map_grant_ref **gopp_map,
445 struct gnttab_copy **gopp_copy)
446{
447 struct gnttab_map_grant_ref *gop_map = *gopp_map;
448 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
449 /* This always points to the shinfo of the skb being checked, which
450 * could be either the first or the one on the frag_list
451 */
452 struct skb_shared_info *shinfo = skb_shinfo(skb);
453 /* If this is non-NULL, we are currently checking the frag_list skb, and
454 * this points to the shinfo of the first one
455 */
456 struct skb_shared_info *first_shinfo = NULL;
457 int nr_frags = shinfo->nr_frags;
458 const bool sharedslot = nr_frags &&
459 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
460 int i, err;
461
462 /* Check status of header. */
463 err = (*gopp_copy)->status;
464 if (unlikely(err)) {
465 if (net_ratelimit())
466 netdev_dbg(queue->vif->dev,
467 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
468 (*gopp_copy)->status,
469 pending_idx,
470 (*gopp_copy)->source.u.ref);
471 /* The first frag might still have this slot mapped */
472 if (!sharedslot)
473 xenvif_idx_release(queue, pending_idx,
474 XEN_NETIF_RSP_ERROR);
475 }
476 (*gopp_copy)++;
477
478check_frags:
479 for (i = 0; i < nr_frags; i++, gop_map++) {
480 int j, newerr;
481
482 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
483
484 /* Check error status: if okay then remember grant handle. */
485 newerr = gop_map->status;
486
487 if (likely(!newerr)) {
488 xenvif_grant_handle_set(queue,
489 pending_idx,
490 gop_map->handle);
491 /* Had a previous error? Invalidate this fragment. */
492 if (unlikely(err)) {
493 xenvif_idx_unmap(queue, pending_idx);
494 /* If the mapping of the first frag was OK, but
495 * the header's copy failed, and they are
496 * sharing a slot, send an error
497 */
498 if (i == 0 && sharedslot)
499 xenvif_idx_release(queue, pending_idx,
500 XEN_NETIF_RSP_ERROR);
501 else
502 xenvif_idx_release(queue, pending_idx,
503 XEN_NETIF_RSP_OKAY);
504 }
505 continue;
506 }
507
508 /* Error on this fragment: respond to client with an error. */
509 if (net_ratelimit())
510 netdev_dbg(queue->vif->dev,
511 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
512 i,
513 gop_map->status,
514 pending_idx,
515 gop_map->ref);
516
517 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
518
519 /* Not the first error? Preceding frags already invalidated. */
520 if (err)
521 continue;
522
523 /* First error: if the header haven't shared a slot with the
524 * first frag, release it as well.
525 */
526 if (!sharedslot)
527 xenvif_idx_release(queue,
528 XENVIF_TX_CB(skb)->pending_idx,
529 XEN_NETIF_RSP_OKAY);
530
531 /* Invalidate preceding fragments of this skb. */
532 for (j = 0; j < i; j++) {
533 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
534 xenvif_idx_unmap(queue, pending_idx);
535 xenvif_idx_release(queue, pending_idx,
536 XEN_NETIF_RSP_OKAY);
537 }
538
539 /* And if we found the error while checking the frag_list, unmap
540 * the first skb's frags
541 */
542 if (first_shinfo) {
543 for (j = 0; j < first_shinfo->nr_frags; j++) {
544 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
545 xenvif_idx_unmap(queue, pending_idx);
546 xenvif_idx_release(queue, pending_idx,
547 XEN_NETIF_RSP_OKAY);
548 }
549 }
550
551 /* Remember the error: invalidate all subsequent fragments. */
552 err = newerr;
553 }
554
555 if (skb_has_frag_list(skb) && !first_shinfo) {
556 first_shinfo = skb_shinfo(skb);
557 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
558 nr_frags = shinfo->nr_frags;
559
560 goto check_frags;
561 }
562
563 *gopp_map = gop_map;
564 return err;
565}
566
567static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
568{
569 struct skb_shared_info *shinfo = skb_shinfo(skb);
570 int nr_frags = shinfo->nr_frags;
571 int i;
572 u16 prev_pending_idx = INVALID_PENDING_IDX;
573
574 for (i = 0; i < nr_frags; i++) {
575 skb_frag_t *frag = shinfo->frags + i;
576 struct xen_netif_tx_request *txp;
577 struct page *page;
578 u16 pending_idx;
579
580 pending_idx = frag_get_pending_idx(frag);
581
582 /* If this is not the first frag, chain it to the previous*/
583 if (prev_pending_idx == INVALID_PENDING_IDX)
584 skb_shinfo(skb)->destructor_arg =
585 &callback_param(queue, pending_idx);
586 else
587 callback_param(queue, prev_pending_idx).ctx =
588 &callback_param(queue, pending_idx);
589
590 callback_param(queue, pending_idx).ctx = NULL;
591 prev_pending_idx = pending_idx;
592
593 txp = &queue->pending_tx_info[pending_idx].req;
594 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
595 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
596 skb->len += txp->size;
597 skb->data_len += txp->size;
598 skb->truesize += txp->size;
599
600 /* Take an extra reference to offset network stack's put_page */
601 get_page(queue->mmap_pages[pending_idx]);
602 }
603}
604
605static int xenvif_get_extras(struct xenvif_queue *queue,
606 struct xen_netif_extra_info *extras,
607 unsigned int *extra_count,
608 int work_to_do)
609{
610 struct xen_netif_extra_info extra;
611 RING_IDX cons = queue->tx.req_cons;
612
613 do {
614 if (unlikely(work_to_do-- <= 0)) {
615 netdev_err(queue->vif->dev, "Missing extra info\n");
616 xenvif_fatal_tx_err(queue->vif);
617 return -EBADR;
618 }
619
620 RING_COPY_REQUEST(&queue->tx, cons, &extra);
621
622 queue->tx.req_cons = ++cons;
623 (*extra_count)++;
624
625 if (unlikely(!extra.type ||
626 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
627 netdev_err(queue->vif->dev,
628 "Invalid extra type: %d\n", extra.type);
629 xenvif_fatal_tx_err(queue->vif);
630 return -EINVAL;
631 }
632
633 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
634 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
635
636 return work_to_do;
637}
638
639static int xenvif_set_skb_gso(struct xenvif *vif,
640 struct sk_buff *skb,
641 struct xen_netif_extra_info *gso)
642{
643 if (!gso->u.gso.size) {
644 netdev_err(vif->dev, "GSO size must not be zero.\n");
645 xenvif_fatal_tx_err(vif);
646 return -EINVAL;
647 }
648
649 switch (gso->u.gso.type) {
650 case XEN_NETIF_GSO_TYPE_TCPV4:
651 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
652 break;
653 case XEN_NETIF_GSO_TYPE_TCPV6:
654 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
655 break;
656 default:
657 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
658 xenvif_fatal_tx_err(vif);
659 return -EINVAL;
660 }
661
662 skb_shinfo(skb)->gso_size = gso->u.gso.size;
663 /* gso_segs will be calculated later */
664
665 return 0;
666}
667
668static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
669{
670 bool recalculate_partial_csum = false;
671
672 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
673 * peers can fail to set NETRXF_csum_blank when sending a GSO
674 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
675 * recalculate the partial checksum.
676 */
677 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
678 queue->stats.rx_gso_checksum_fixup++;
679 skb->ip_summed = CHECKSUM_PARTIAL;
680 recalculate_partial_csum = true;
681 }
682
683 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
684 if (skb->ip_summed != CHECKSUM_PARTIAL)
685 return 0;
686
687 return skb_checksum_setup(skb, recalculate_partial_csum);
688}
689
690static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
691{
692 u64 now = get_jiffies_64();
693 u64 next_credit = queue->credit_window_start +
694 msecs_to_jiffies(queue->credit_usec / 1000);
695
696 /* Timer could already be pending in rare cases. */
697 if (timer_pending(&queue->credit_timeout)) {
698 queue->rate_limited = true;
699 return true;
700 }
701
702 /* Passed the point where we can replenish credit? */
703 if (time_after_eq64(now, next_credit)) {
704 queue->credit_window_start = now;
705 tx_add_credit(queue);
706 }
707
708 /* Still too big to send right now? Set a callback. */
709 if (size > queue->remaining_credit) {
710 mod_timer(&queue->credit_timeout,
711 next_credit);
712 queue->credit_window_start = next_credit;
713 queue->rate_limited = true;
714
715 return true;
716 }
717
718 return false;
719}
720
721/* No locking is required in xenvif_mcast_add/del() as they are
722 * only ever invoked from NAPI poll. An RCU list is used because
723 * xenvif_mcast_match() is called asynchronously, during start_xmit.
724 */
725
726static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
727{
728 struct xenvif_mcast_addr *mcast;
729
730 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
731 if (net_ratelimit())
732 netdev_err(vif->dev,
733 "Too many multicast addresses\n");
734 return -ENOSPC;
735 }
736
737 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
738 if (!mcast)
739 return -ENOMEM;
740
741 ether_addr_copy(mcast->addr, addr);
742 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
743 vif->fe_mcast_count++;
744
745 return 0;
746}
747
748static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
749{
750 struct xenvif_mcast_addr *mcast;
751
752 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
753 if (ether_addr_equal(addr, mcast->addr)) {
754 --vif->fe_mcast_count;
755 list_del_rcu(&mcast->entry);
756 kfree_rcu(mcast, rcu);
757 break;
758 }
759 }
760}
761
762bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
763{
764 struct xenvif_mcast_addr *mcast;
765
766 rcu_read_lock();
767 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
768 if (ether_addr_equal(addr, mcast->addr)) {
769 rcu_read_unlock();
770 return true;
771 }
772 }
773 rcu_read_unlock();
774
775 return false;
776}
777
778void xenvif_mcast_addr_list_free(struct xenvif *vif)
779{
780 /* No need for locking or RCU here. NAPI poll and TX queue
781 * are stopped.
782 */
783 while (!list_empty(&vif->fe_mcast_addr)) {
784 struct xenvif_mcast_addr *mcast;
785
786 mcast = list_first_entry(&vif->fe_mcast_addr,
787 struct xenvif_mcast_addr,
788 entry);
789 --vif->fe_mcast_count;
790 list_del(&mcast->entry);
791 kfree(mcast);
792 }
793}
794
795static void xenvif_tx_build_gops(struct xenvif_queue *queue,
796 int budget,
797 unsigned *copy_ops,
798 unsigned *map_ops)
799{
800 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
801 struct sk_buff *skb, *nskb;
802 int ret;
803 unsigned int frag_overflow;
804
805 while (skb_queue_len(&queue->tx_queue) < budget) {
806 struct xen_netif_tx_request txreq;
807 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
808 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
809 unsigned int extra_count;
810 u16 pending_idx;
811 RING_IDX idx;
812 int work_to_do;
813 unsigned int data_len;
814 pending_ring_idx_t index;
815
816 if (queue->tx.sring->req_prod - queue->tx.req_cons >
817 XEN_NETIF_TX_RING_SIZE) {
818 netdev_err(queue->vif->dev,
819 "Impossible number of requests. "
820 "req_prod %d, req_cons %d, size %ld\n",
821 queue->tx.sring->req_prod, queue->tx.req_cons,
822 XEN_NETIF_TX_RING_SIZE);
823 xenvif_fatal_tx_err(queue->vif);
824 break;
825 }
826
827 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
828 if (!work_to_do)
829 break;
830
831 idx = queue->tx.req_cons;
832 rmb(); /* Ensure that we see the request before we copy it. */
833 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
834
835 /* Credit-based scheduling. */
836 if (txreq.size > queue->remaining_credit &&
837 tx_credit_exceeded(queue, txreq.size))
838 break;
839
840 queue->remaining_credit -= txreq.size;
841
842 work_to_do--;
843 queue->tx.req_cons = ++idx;
844
845 memset(extras, 0, sizeof(extras));
846 extra_count = 0;
847 if (txreq.flags & XEN_NETTXF_extra_info) {
848 work_to_do = xenvif_get_extras(queue, extras,
849 &extra_count,
850 work_to_do);
851 idx = queue->tx.req_cons;
852 if (unlikely(work_to_do < 0))
853 break;
854 }
855
856 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
857 struct xen_netif_extra_info *extra;
858
859 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
860 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
861
862 make_tx_response(queue, &txreq, extra_count,
863 (ret == 0) ?
864 XEN_NETIF_RSP_OKAY :
865 XEN_NETIF_RSP_ERROR);
866 push_tx_responses(queue);
867 continue;
868 }
869
870 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
871 struct xen_netif_extra_info *extra;
872
873 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
874 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
875
876 make_tx_response(queue, &txreq, extra_count,
877 XEN_NETIF_RSP_OKAY);
878 push_tx_responses(queue);
879 continue;
880 }
881
882 ret = xenvif_count_requests(queue, &txreq, extra_count,
883 txfrags, work_to_do);
884 if (unlikely(ret < 0))
885 break;
886
887 idx += ret;
888
889 if (unlikely(txreq.size < ETH_HLEN)) {
890 netdev_dbg(queue->vif->dev,
891 "Bad packet size: %d\n", txreq.size);
892 xenvif_tx_err(queue, &txreq, extra_count, idx);
893 break;
894 }
895
896 /* No crossing a page as the payload mustn't fragment. */
897 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
898 netdev_err(queue->vif->dev,
899 "txreq.offset: %u, size: %u, end: %lu\n",
900 txreq.offset, txreq.size,
901 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
902 xenvif_fatal_tx_err(queue->vif);
903 break;
904 }
905
906 index = pending_index(queue->pending_cons);
907 pending_idx = queue->pending_ring[index];
908
909 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
910 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
911 XEN_NETBACK_TX_COPY_LEN : txreq.size;
912
913 skb = xenvif_alloc_skb(data_len);
914 if (unlikely(skb == NULL)) {
915 netdev_dbg(queue->vif->dev,
916 "Can't allocate a skb in start_xmit.\n");
917 xenvif_tx_err(queue, &txreq, extra_count, idx);
918 break;
919 }
920
921 skb_shinfo(skb)->nr_frags = ret;
922 if (data_len < txreq.size)
923 skb_shinfo(skb)->nr_frags++;
924 /* At this point shinfo->nr_frags is in fact the number of
925 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
926 */
927 frag_overflow = 0;
928 nskb = NULL;
929 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
930 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
931 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
932 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
933 nskb = xenvif_alloc_skb(0);
934 if (unlikely(nskb == NULL)) {
935 skb_shinfo(skb)->nr_frags = 0;
936 kfree_skb(skb);
937 xenvif_tx_err(queue, &txreq, extra_count, idx);
938 if (net_ratelimit())
939 netdev_err(queue->vif->dev,
940 "Can't allocate the frag_list skb.\n");
941 break;
942 }
943 }
944
945 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
946 struct xen_netif_extra_info *gso;
947 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
948
949 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
950 /* Failure in xenvif_set_skb_gso is fatal. */
951 skb_shinfo(skb)->nr_frags = 0;
952 kfree_skb(skb);
953 kfree_skb(nskb);
954 break;
955 }
956 }
957
958 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
959 struct xen_netif_extra_info *extra;
960 enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
961
962 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
963
964 switch (extra->u.hash.type) {
965 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
966 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
967 type = PKT_HASH_TYPE_L3;
968 break;
969
970 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
971 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
972 type = PKT_HASH_TYPE_L4;
973 break;
974
975 default:
976 break;
977 }
978
979 if (type != PKT_HASH_TYPE_NONE)
980 skb_set_hash(skb,
981 *(u32 *)extra->u.hash.value,
982 type);
983 }
984
985 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
986
987 __skb_put(skb, data_len);
988 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
989 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
990 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
991
992 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
993 virt_to_gfn(skb->data);
994 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
995 queue->tx_copy_ops[*copy_ops].dest.offset =
996 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
997
998 queue->tx_copy_ops[*copy_ops].len = data_len;
999 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1000
1001 (*copy_ops)++;
1002
1003 if (data_len < txreq.size) {
1004 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1005 pending_idx);
1006 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1007 extra_count, gop);
1008 gop++;
1009 } else {
1010 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1011 INVALID_PENDING_IDX);
1012 memcpy(&queue->pending_tx_info[pending_idx].req,
1013 &txreq, sizeof(txreq));
1014 queue->pending_tx_info[pending_idx].extra_count =
1015 extra_count;
1016 }
1017
1018 queue->pending_cons++;
1019
1020 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1021 frag_overflow, nskb);
1022
1023 __skb_queue_tail(&queue->tx_queue, skb);
1024
1025 queue->tx.req_cons = idx;
1026
1027 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1028 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1029 break;
1030 }
1031
1032 (*map_ops) = gop - queue->tx_map_ops;
1033 return;
1034}
1035
1036/* Consolidate skb with a frag_list into a brand new one with local pages on
1037 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1038 */
1039static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1040{
1041 unsigned int offset = skb_headlen(skb);
1042 skb_frag_t frags[MAX_SKB_FRAGS];
1043 int i, f;
1044 struct ubuf_info *uarg;
1045 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1046
1047 queue->stats.tx_zerocopy_sent += 2;
1048 queue->stats.tx_frag_overflow++;
1049
1050 xenvif_fill_frags(queue, nskb);
1051 /* Subtract frags size, we will correct it later */
1052 skb->truesize -= skb->data_len;
1053 skb->len += nskb->len;
1054 skb->data_len += nskb->len;
1055
1056 /* create a brand new frags array and coalesce there */
1057 for (i = 0; offset < skb->len; i++) {
1058 struct page *page;
1059 unsigned int len;
1060
1061 BUG_ON(i >= MAX_SKB_FRAGS);
1062 page = alloc_page(GFP_ATOMIC);
1063 if (!page) {
1064 int j;
1065 skb->truesize += skb->data_len;
1066 for (j = 0; j < i; j++)
1067 put_page(skb_frag_page(&frags[j]));
1068 return -ENOMEM;
1069 }
1070
1071 if (offset + PAGE_SIZE < skb->len)
1072 len = PAGE_SIZE;
1073 else
1074 len = skb->len - offset;
1075 if (skb_copy_bits(skb, offset, page_address(page), len))
1076 BUG();
1077
1078 offset += len;
1079 __skb_frag_set_page(&frags[i], page);
1080 skb_frag_off_set(&frags[i], 0);
1081 skb_frag_size_set(&frags[i], len);
1082 }
1083
1084 /* Release all the original (foreign) frags. */
1085 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1086 skb_frag_unref(skb, f);
1087 uarg = skb_shinfo(skb)->destructor_arg;
1088 /* increase inflight counter to offset decrement in callback */
1089 atomic_inc(&queue->inflight_packets);
1090 uarg->callback(uarg, true);
1091 skb_shinfo(skb)->destructor_arg = NULL;
1092
1093 /* Fill the skb with the new (local) frags. */
1094 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1095 skb_shinfo(skb)->nr_frags = i;
1096 skb->truesize += i * PAGE_SIZE;
1097
1098 return 0;
1099}
1100
1101static int xenvif_tx_submit(struct xenvif_queue *queue)
1102{
1103 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1104 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1105 struct sk_buff *skb;
1106 int work_done = 0;
1107
1108 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1109 struct xen_netif_tx_request *txp;
1110 u16 pending_idx;
1111 unsigned data_len;
1112
1113 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1114 txp = &queue->pending_tx_info[pending_idx].req;
1115
1116 /* Check the remap error code. */
1117 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1118 /* If there was an error, xenvif_tx_check_gop is
1119 * expected to release all the frags which were mapped,
1120 * so kfree_skb shouldn't do it again
1121 */
1122 skb_shinfo(skb)->nr_frags = 0;
1123 if (skb_has_frag_list(skb)) {
1124 struct sk_buff *nskb =
1125 skb_shinfo(skb)->frag_list;
1126 skb_shinfo(nskb)->nr_frags = 0;
1127 }
1128 kfree_skb(skb);
1129 continue;
1130 }
1131
1132 data_len = skb->len;
1133 callback_param(queue, pending_idx).ctx = NULL;
1134 if (data_len < txp->size) {
1135 /* Append the packet payload as a fragment. */
1136 txp->offset += data_len;
1137 txp->size -= data_len;
1138 } else {
1139 /* Schedule a response immediately. */
1140 xenvif_idx_release(queue, pending_idx,
1141 XEN_NETIF_RSP_OKAY);
1142 }
1143
1144 if (txp->flags & XEN_NETTXF_csum_blank)
1145 skb->ip_summed = CHECKSUM_PARTIAL;
1146 else if (txp->flags & XEN_NETTXF_data_validated)
1147 skb->ip_summed = CHECKSUM_UNNECESSARY;
1148
1149 xenvif_fill_frags(queue, skb);
1150
1151 if (unlikely(skb_has_frag_list(skb))) {
1152 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1153 xenvif_skb_zerocopy_prepare(queue, nskb);
1154 if (xenvif_handle_frag_list(queue, skb)) {
1155 if (net_ratelimit())
1156 netdev_err(queue->vif->dev,
1157 "Not enough memory to consolidate frag_list!\n");
1158 xenvif_skb_zerocopy_prepare(queue, skb);
1159 kfree_skb(skb);
1160 continue;
1161 }
1162 /* Copied all the bits from the frag list -- free it. */
1163 skb_frag_list_init(skb);
1164 kfree_skb(nskb);
1165 }
1166
1167 skb->dev = queue->vif->dev;
1168 skb->protocol = eth_type_trans(skb, skb->dev);
1169 skb_reset_network_header(skb);
1170
1171 if (checksum_setup(queue, skb)) {
1172 netdev_dbg(queue->vif->dev,
1173 "Can't setup checksum in net_tx_action\n");
1174 /* We have to set this flag to trigger the callback */
1175 if (skb_shinfo(skb)->destructor_arg)
1176 xenvif_skb_zerocopy_prepare(queue, skb);
1177 kfree_skb(skb);
1178 continue;
1179 }
1180
1181 skb_probe_transport_header(skb);
1182
1183 /* If the packet is GSO then we will have just set up the
1184 * transport header offset in checksum_setup so it's now
1185 * straightforward to calculate gso_segs.
1186 */
1187 if (skb_is_gso(skb)) {
1188 int mss, hdrlen;
1189
1190 /* GSO implies having the L4 header. */
1191 WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1192 if (unlikely(!skb_transport_header_was_set(skb))) {
1193 kfree_skb(skb);
1194 continue;
1195 }
1196
1197 mss = skb_shinfo(skb)->gso_size;
1198 hdrlen = skb_transport_header(skb) -
1199 skb_mac_header(skb) +
1200 tcp_hdrlen(skb);
1201
1202 skb_shinfo(skb)->gso_segs =
1203 DIV_ROUND_UP(skb->len - hdrlen, mss);
1204 }
1205
1206 queue->stats.rx_bytes += skb->len;
1207 queue->stats.rx_packets++;
1208
1209 work_done++;
1210
1211 /* Set this flag right before netif_receive_skb, otherwise
1212 * someone might think this packet already left netback, and
1213 * do a skb_copy_ubufs while we are still in control of the
1214 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1215 */
1216 if (skb_shinfo(skb)->destructor_arg) {
1217 xenvif_skb_zerocopy_prepare(queue, skb);
1218 queue->stats.tx_zerocopy_sent++;
1219 }
1220
1221 netif_receive_skb(skb);
1222 }
1223
1224 return work_done;
1225}
1226
1227void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1228{
1229 unsigned long flags;
1230 pending_ring_idx_t index;
1231 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1232
1233 /* This is the only place where we grab this lock, to protect callbacks
1234 * from each other.
1235 */
1236 spin_lock_irqsave(&queue->callback_lock, flags);
1237 do {
1238 u16 pending_idx = ubuf->desc;
1239 ubuf = (struct ubuf_info *) ubuf->ctx;
1240 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1241 MAX_PENDING_REQS);
1242 index = pending_index(queue->dealloc_prod);
1243 queue->dealloc_ring[index] = pending_idx;
1244 /* Sync with xenvif_tx_dealloc_action:
1245 * insert idx then incr producer.
1246 */
1247 smp_wmb();
1248 queue->dealloc_prod++;
1249 } while (ubuf);
1250 spin_unlock_irqrestore(&queue->callback_lock, flags);
1251
1252 if (likely(zerocopy_success))
1253 queue->stats.tx_zerocopy_success++;
1254 else
1255 queue->stats.tx_zerocopy_fail++;
1256 xenvif_skb_zerocopy_complete(queue);
1257}
1258
1259static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1260{
1261 struct gnttab_unmap_grant_ref *gop;
1262 pending_ring_idx_t dc, dp;
1263 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1264 unsigned int i = 0;
1265
1266 dc = queue->dealloc_cons;
1267 gop = queue->tx_unmap_ops;
1268
1269 /* Free up any grants we have finished using */
1270 do {
1271 dp = queue->dealloc_prod;
1272
1273 /* Ensure we see all indices enqueued by all
1274 * xenvif_zerocopy_callback().
1275 */
1276 smp_rmb();
1277
1278 while (dc != dp) {
1279 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1280 pending_idx =
1281 queue->dealloc_ring[pending_index(dc++)];
1282
1283 pending_idx_release[gop - queue->tx_unmap_ops] =
1284 pending_idx;
1285 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1286 queue->mmap_pages[pending_idx];
1287 gnttab_set_unmap_op(gop,
1288 idx_to_kaddr(queue, pending_idx),
1289 GNTMAP_host_map,
1290 queue->grant_tx_handle[pending_idx]);
1291 xenvif_grant_handle_reset(queue, pending_idx);
1292 ++gop;
1293 }
1294
1295 } while (dp != queue->dealloc_prod);
1296
1297 queue->dealloc_cons = dc;
1298
1299 if (gop - queue->tx_unmap_ops > 0) {
1300 int ret;
1301 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1302 NULL,
1303 queue->pages_to_unmap,
1304 gop - queue->tx_unmap_ops);
1305 if (ret) {
1306 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1307 gop - queue->tx_unmap_ops, ret);
1308 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1309 if (gop[i].status != GNTST_okay)
1310 netdev_err(queue->vif->dev,
1311 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1312 gop[i].host_addr,
1313 gop[i].handle,
1314 gop[i].status);
1315 }
1316 BUG();
1317 }
1318 }
1319
1320 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1321 xenvif_idx_release(queue, pending_idx_release[i],
1322 XEN_NETIF_RSP_OKAY);
1323}
1324
1325
1326/* Called after netfront has transmitted */
1327int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1328{
1329 unsigned nr_mops, nr_cops = 0;
1330 int work_done, ret;
1331
1332 if (unlikely(!tx_work_todo(queue)))
1333 return 0;
1334
1335 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1336
1337 if (nr_cops == 0)
1338 return 0;
1339
1340 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1341 if (nr_mops != 0) {
1342 ret = gnttab_map_refs(queue->tx_map_ops,
1343 NULL,
1344 queue->pages_to_map,
1345 nr_mops);
1346 BUG_ON(ret);
1347 }
1348
1349 work_done = xenvif_tx_submit(queue);
1350
1351 return work_done;
1352}
1353
1354static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1355 u8 status)
1356{
1357 struct pending_tx_info *pending_tx_info;
1358 pending_ring_idx_t index;
1359 unsigned long flags;
1360
1361 pending_tx_info = &queue->pending_tx_info[pending_idx];
1362
1363 spin_lock_irqsave(&queue->response_lock, flags);
1364
1365 make_tx_response(queue, &pending_tx_info->req,
1366 pending_tx_info->extra_count, status);
1367
1368 /* Release the pending index before pusing the Tx response so
1369 * its available before a new Tx request is pushed by the
1370 * frontend.
1371 */
1372 index = pending_index(queue->pending_prod++);
1373 queue->pending_ring[index] = pending_idx;
1374
1375 push_tx_responses(queue);
1376
1377 spin_unlock_irqrestore(&queue->response_lock, flags);
1378}
1379
1380
1381static void make_tx_response(struct xenvif_queue *queue,
1382 struct xen_netif_tx_request *txp,
1383 unsigned int extra_count,
1384 s8 st)
1385{
1386 RING_IDX i = queue->tx.rsp_prod_pvt;
1387 struct xen_netif_tx_response *resp;
1388
1389 resp = RING_GET_RESPONSE(&queue->tx, i);
1390 resp->id = txp->id;
1391 resp->status = st;
1392
1393 while (extra_count-- != 0)
1394 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1395
1396 queue->tx.rsp_prod_pvt = ++i;
1397}
1398
1399static void push_tx_responses(struct xenvif_queue *queue)
1400{
1401 int notify;
1402
1403 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1404 if (notify)
1405 notify_remote_via_irq(queue->tx_irq);
1406}
1407
1408void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1409{
1410 int ret;
1411 struct gnttab_unmap_grant_ref tx_unmap_op;
1412
1413 gnttab_set_unmap_op(&tx_unmap_op,
1414 idx_to_kaddr(queue, pending_idx),
1415 GNTMAP_host_map,
1416 queue->grant_tx_handle[pending_idx]);
1417 xenvif_grant_handle_reset(queue, pending_idx);
1418
1419 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1420 &queue->mmap_pages[pending_idx], 1);
1421 if (ret) {
1422 netdev_err(queue->vif->dev,
1423 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1424 ret,
1425 pending_idx,
1426 tx_unmap_op.host_addr,
1427 tx_unmap_op.handle,
1428 tx_unmap_op.status);
1429 BUG();
1430 }
1431}
1432
1433static inline int tx_work_todo(struct xenvif_queue *queue)
1434{
1435 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1436 return 1;
1437
1438 return 0;
1439}
1440
1441static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1442{
1443 return queue->dealloc_cons != queue->dealloc_prod;
1444}
1445
1446void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1447{
1448 if (queue->tx.sring)
1449 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1450 queue->tx.sring);
1451 if (queue->rx.sring)
1452 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1453 queue->rx.sring);
1454}
1455
1456int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1457 grant_ref_t tx_ring_ref,
1458 grant_ref_t rx_ring_ref)
1459{
1460 void *addr;
1461 struct xen_netif_tx_sring *txs;
1462 struct xen_netif_rx_sring *rxs;
1463 RING_IDX rsp_prod, req_prod;
1464 int err = -ENOMEM;
1465
1466 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1467 &tx_ring_ref, 1, &addr);
1468 if (err)
1469 goto err;
1470
1471 txs = (struct xen_netif_tx_sring *)addr;
1472 rsp_prod = READ_ONCE(txs->rsp_prod);
1473 req_prod = READ_ONCE(txs->req_prod);
1474
1475 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1476
1477 err = -EIO;
1478 if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1479 goto err;
1480
1481 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1482 &rx_ring_ref, 1, &addr);
1483 if (err)
1484 goto err;
1485
1486 rxs = (struct xen_netif_rx_sring *)addr;
1487 rsp_prod = READ_ONCE(rxs->rsp_prod);
1488 req_prod = READ_ONCE(rxs->req_prod);
1489
1490 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1491
1492 err = -EIO;
1493 if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1494 goto err;
1495
1496 return 0;
1497
1498err:
1499 xenvif_unmap_frontend_data_rings(queue);
1500 return err;
1501}
1502
1503static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1504{
1505 /* Dealloc thread must remain running until all inflight
1506 * packets complete.
1507 */
1508 return kthread_should_stop() &&
1509 !atomic_read(&queue->inflight_packets);
1510}
1511
1512int xenvif_dealloc_kthread(void *data)
1513{
1514 struct xenvif_queue *queue = data;
1515
1516 for (;;) {
1517 wait_event_interruptible(queue->dealloc_wq,
1518 tx_dealloc_work_todo(queue) ||
1519 xenvif_dealloc_kthread_should_stop(queue));
1520 if (xenvif_dealloc_kthread_should_stop(queue))
1521 break;
1522
1523 xenvif_tx_dealloc_action(queue);
1524 cond_resched();
1525 }
1526
1527 /* Unmap anything remaining*/
1528 if (tx_dealloc_work_todo(queue))
1529 xenvif_tx_dealloc_action(queue);
1530
1531 return 0;
1532}
1533
1534static void make_ctrl_response(struct xenvif *vif,
1535 const struct xen_netif_ctrl_request *req,
1536 u32 status, u32 data)
1537{
1538 RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1539 struct xen_netif_ctrl_response rsp = {
1540 .id = req->id,
1541 .type = req->type,
1542 .status = status,
1543 .data = data,
1544 };
1545
1546 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1547 vif->ctrl.rsp_prod_pvt = ++idx;
1548}
1549
1550static void push_ctrl_response(struct xenvif *vif)
1551{
1552 int notify;
1553
1554 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1555 if (notify)
1556 notify_remote_via_irq(vif->ctrl_irq);
1557}
1558
1559static void process_ctrl_request(struct xenvif *vif,
1560 const struct xen_netif_ctrl_request *req)
1561{
1562 u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1563 u32 data = 0;
1564
1565 switch (req->type) {
1566 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1567 status = xenvif_set_hash_alg(vif, req->data[0]);
1568 break;
1569
1570 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1571 status = xenvif_get_hash_flags(vif, &data);
1572 break;
1573
1574 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1575 status = xenvif_set_hash_flags(vif, req->data[0]);
1576 break;
1577
1578 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1579 status = xenvif_set_hash_key(vif, req->data[0],
1580 req->data[1]);
1581 break;
1582
1583 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1584 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1585 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1586 break;
1587
1588 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1589 status = xenvif_set_hash_mapping_size(vif,
1590 req->data[0]);
1591 break;
1592
1593 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1594 status = xenvif_set_hash_mapping(vif, req->data[0],
1595 req->data[1],
1596 req->data[2]);
1597 break;
1598
1599 default:
1600 break;
1601 }
1602
1603 make_ctrl_response(vif, req, status, data);
1604 push_ctrl_response(vif);
1605}
1606
1607static void xenvif_ctrl_action(struct xenvif *vif)
1608{
1609 for (;;) {
1610 RING_IDX req_prod, req_cons;
1611
1612 req_prod = vif->ctrl.sring->req_prod;
1613 req_cons = vif->ctrl.req_cons;
1614
1615 /* Make sure we can see requests before we process them. */
1616 rmb();
1617
1618 if (req_cons == req_prod)
1619 break;
1620
1621 while (req_cons != req_prod) {
1622 struct xen_netif_ctrl_request req;
1623
1624 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1625 req_cons++;
1626
1627 process_ctrl_request(vif, &req);
1628 }
1629
1630 vif->ctrl.req_cons = req_cons;
1631 vif->ctrl.sring->req_event = req_cons + 1;
1632 }
1633}
1634
1635static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1636{
1637 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1638 return true;
1639
1640 return false;
1641}
1642
1643irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1644{
1645 struct xenvif *vif = data;
1646
1647 while (xenvif_ctrl_work_todo(vif))
1648 xenvif_ctrl_action(vif);
1649
1650 return IRQ_HANDLED;
1651}
1652
1653static int __init netback_init(void)
1654{
1655 int rc = 0;
1656
1657 if (!xen_domain())
1658 return -ENODEV;
1659
1660 /* Allow as many queues as there are CPUs but max. 8 if user has not
1661 * specified a value.
1662 */
1663 if (xenvif_max_queues == 0)
1664 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1665 num_online_cpus());
1666
1667 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1668 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1669 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1670 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1671 }
1672
1673 rc = xenvif_xenbus_init();
1674 if (rc)
1675 goto failed_init;
1676
1677#ifdef CONFIG_DEBUG_FS
1678 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1679#endif /* CONFIG_DEBUG_FS */
1680
1681 return 0;
1682
1683failed_init:
1684 return rc;
1685}
1686
1687module_init(netback_init);
1688
1689static void __exit netback_fini(void)
1690{
1691#ifdef CONFIG_DEBUG_FS
1692 debugfs_remove_recursive(xen_netback_dbg_root);
1693#endif /* CONFIG_DEBUG_FS */
1694 xenvif_xenbus_fini();
1695}
1696module_exit(netback_fini);
1697
1698MODULE_LICENSE("Dual BSD/GPL");
1699MODULE_ALIAS("xen-backend:vif");
1/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40#include <linux/highmem.h>
41
42#include <net/tcp.h>
43
44#include <xen/xen.h>
45#include <xen/events.h>
46#include <xen/interface/memory.h>
47#include <xen/page.h>
48
49#include <asm/xen/hypercall.h>
50
51/* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
53 * enabled by default.
54 */
55bool separate_tx_rx_irq = true;
56module_param(separate_tx_rx_irq, bool, 0644);
57
58/* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
60 */
61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444);
63
64/* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
66 */
67unsigned int rx_stall_timeout_msecs = 60000;
68module_param(rx_stall_timeout_msecs, uint, 0444);
69
70unsigned int xenvif_max_queues;
71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
72MODULE_PARM_DESC(max_queues,
73 "Maximum number of queues per virtual interface");
74
75/*
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
78 */
79#define FATAL_SKB_SLOTS_DEFAULT 20
80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
81module_param(fatal_skb_slots, uint, 0444);
82
83/* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
86 *
87 * This is sized to avoid pulling headers from the frags for most
88 * TCP/IP packets.
89 */
90#define XEN_NETBACK_TX_COPY_LEN 128
91
92
93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
94 u8 status);
95
96static void make_tx_response(struct xenvif_queue *queue,
97 struct xen_netif_tx_request *txp,
98 unsigned int extra_count,
99 s8 st);
100static void push_tx_responses(struct xenvif_queue *queue);
101
102static inline int tx_work_todo(struct xenvif_queue *queue);
103
104static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
105 u16 id,
106 s8 st,
107 u16 offset,
108 u16 size,
109 u16 flags);
110
111static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
112 u16 idx)
113{
114 return page_to_pfn(queue->mmap_pages[idx]);
115}
116
117static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
118 u16 idx)
119{
120 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
121}
122
123#define callback_param(vif, pending_idx) \
124 (vif->pending_tx_info[pending_idx].callback_struct)
125
126/* Find the containing VIF's structure from a pointer in pending_tx_info array
127 */
128static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
129{
130 u16 pending_idx = ubuf->desc;
131 struct pending_tx_info *temp =
132 container_of(ubuf, struct pending_tx_info, callback_struct);
133 return container_of(temp - pending_idx,
134 struct xenvif_queue,
135 pending_tx_info[0]);
136}
137
138static u16 frag_get_pending_idx(skb_frag_t *frag)
139{
140 return (u16)frag->page_offset;
141}
142
143static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
144{
145 frag->page_offset = pending_idx;
146}
147
148static inline pending_ring_idx_t pending_index(unsigned i)
149{
150 return i & (MAX_PENDING_REQS-1);
151}
152
153static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
154{
155 RING_IDX prod, cons;
156 struct sk_buff *skb;
157 int needed;
158
159 skb = skb_peek(&queue->rx_queue);
160 if (!skb)
161 return false;
162
163 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
164 if (skb_is_gso(skb))
165 needed++;
166
167 do {
168 prod = queue->rx.sring->req_prod;
169 cons = queue->rx.req_cons;
170
171 if (prod - cons >= needed)
172 return true;
173
174 queue->rx.sring->req_event = prod + 1;
175
176 /* Make sure event is visible before we check prod
177 * again.
178 */
179 mb();
180 } while (queue->rx.sring->req_prod != prod);
181
182 return false;
183}
184
185void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
186{
187 unsigned long flags;
188
189 spin_lock_irqsave(&queue->rx_queue.lock, flags);
190
191 __skb_queue_tail(&queue->rx_queue, skb);
192
193 queue->rx_queue_len += skb->len;
194 if (queue->rx_queue_len > queue->rx_queue_max)
195 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
196
197 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
198}
199
200static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
201{
202 struct sk_buff *skb;
203
204 spin_lock_irq(&queue->rx_queue.lock);
205
206 skb = __skb_dequeue(&queue->rx_queue);
207 if (skb)
208 queue->rx_queue_len -= skb->len;
209
210 spin_unlock_irq(&queue->rx_queue.lock);
211
212 return skb;
213}
214
215static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
216{
217 spin_lock_irq(&queue->rx_queue.lock);
218
219 if (queue->rx_queue_len < queue->rx_queue_max)
220 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
221
222 spin_unlock_irq(&queue->rx_queue.lock);
223}
224
225
226static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
227{
228 struct sk_buff *skb;
229 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
230 kfree_skb(skb);
231}
232
233static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
234{
235 struct sk_buff *skb;
236
237 for(;;) {
238 skb = skb_peek(&queue->rx_queue);
239 if (!skb)
240 break;
241 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
242 break;
243 xenvif_rx_dequeue(queue);
244 kfree_skb(skb);
245 }
246}
247
248struct netrx_pending_operations {
249 unsigned copy_prod, copy_cons;
250 unsigned meta_prod, meta_cons;
251 struct gnttab_copy *copy;
252 struct xenvif_rx_meta *meta;
253 int copy_off;
254 grant_ref_t copy_gref;
255};
256
257static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo)
259{
260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request req;
262
263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264
265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0;
268 meta->size = 0;
269 meta->id = req.id;
270
271 npo->copy_off = 0;
272 npo->copy_gref = req.gref;
273
274 return meta;
275}
276
277struct gop_frag_copy {
278 struct xenvif_queue *queue;
279 struct netrx_pending_operations *npo;
280 struct xenvif_rx_meta *meta;
281 int head;
282 int gso_type;
283
284 struct page *page;
285};
286
287static void xenvif_setup_copy_gop(unsigned long gfn,
288 unsigned int offset,
289 unsigned int *len,
290 struct gop_frag_copy *info)
291{
292 struct gnttab_copy *copy_gop;
293 struct xen_page_foreign *foreign;
294 /* Convenient aliases */
295 struct xenvif_queue *queue = info->queue;
296 struct netrx_pending_operations *npo = info->npo;
297 struct page *page = info->page;
298
299 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
300
301 if (npo->copy_off == MAX_BUFFER_OFFSET)
302 info->meta = get_next_rx_buffer(queue, npo);
303
304 if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
305 *len = MAX_BUFFER_OFFSET - npo->copy_off;
306
307 copy_gop = npo->copy + npo->copy_prod++;
308 copy_gop->flags = GNTCOPY_dest_gref;
309 copy_gop->len = *len;
310
311 foreign = xen_page_foreign(page);
312 if (foreign) {
313 copy_gop->source.domid = foreign->domid;
314 copy_gop->source.u.ref = foreign->gref;
315 copy_gop->flags |= GNTCOPY_source_gref;
316 } else {
317 copy_gop->source.domid = DOMID_SELF;
318 copy_gop->source.u.gmfn = gfn;
319 }
320 copy_gop->source.offset = offset;
321
322 copy_gop->dest.domid = queue->vif->domid;
323 copy_gop->dest.offset = npo->copy_off;
324 copy_gop->dest.u.ref = npo->copy_gref;
325
326 npo->copy_off += *len;
327 info->meta->size += *len;
328
329 /* Leave a gap for the GSO descriptor. */
330 if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
331 queue->rx.req_cons++;
332
333 info->head = 0; /* There must be something in this buffer now */
334}
335
336static void xenvif_gop_frag_copy_grant(unsigned long gfn,
337 unsigned offset,
338 unsigned int len,
339 void *data)
340{
341 unsigned int bytes;
342
343 while (len) {
344 bytes = len;
345 xenvif_setup_copy_gop(gfn, offset, &bytes, data);
346 offset += bytes;
347 len -= bytes;
348 }
349}
350
351/*
352 * Set up the grant operations for this fragment. If it's a flipping
353 * interface, we also set up the unmap request from here.
354 */
355static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
356 struct netrx_pending_operations *npo,
357 struct page *page, unsigned long size,
358 unsigned long offset, int *head)
359{
360 struct gop_frag_copy info = {
361 .queue = queue,
362 .npo = npo,
363 .head = *head,
364 .gso_type = XEN_NETIF_GSO_TYPE_NONE,
365 };
366 unsigned long bytes;
367
368 if (skb_is_gso(skb)) {
369 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
370 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
371 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
372 info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
373 }
374
375 /* Data must not cross a page boundary. */
376 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
377
378 info.meta = npo->meta + npo->meta_prod - 1;
379
380 /* Skip unused frames from start of page */
381 page += offset >> PAGE_SHIFT;
382 offset &= ~PAGE_MASK;
383
384 while (size > 0) {
385 BUG_ON(offset >= PAGE_SIZE);
386
387 bytes = PAGE_SIZE - offset;
388 if (bytes > size)
389 bytes = size;
390
391 info.page = page;
392 gnttab_foreach_grant_in_range(page, offset, bytes,
393 xenvif_gop_frag_copy_grant,
394 &info);
395 size -= bytes;
396 offset = 0;
397
398 /* Next page */
399 if (size) {
400 BUG_ON(!PageCompound(page));
401 page++;
402 }
403 }
404
405 *head = info.head;
406}
407
408/*
409 * Prepare an SKB to be transmitted to the frontend.
410 *
411 * This function is responsible for allocating grant operations, meta
412 * structures, etc.
413 *
414 * It returns the number of meta structures consumed. The number of
415 * ring slots used is always equal to the number of meta slots used
416 * plus the number of GSO descriptors used. Currently, we use either
417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
418 * frontend-side LRO).
419 */
420static int xenvif_gop_skb(struct sk_buff *skb,
421 struct netrx_pending_operations *npo,
422 struct xenvif_queue *queue)
423{
424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i;
427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta;
429 unsigned char *data;
430 int head = 1;
431 int old_meta_prod;
432 int gso_type;
433
434 old_meta_prod = npo->meta_prod;
435
436 gso_type = XEN_NETIF_GSO_TYPE_NONE;
437 if (skb_is_gso(skb)) {
438 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
439 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
440 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
441 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
442 }
443
444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0;
451 meta->id = req.id;
452 }
453
454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++;
456
457 if ((1 << gso_type) & vif->gso_mask) {
458 meta->gso_type = gso_type;
459 meta->gso_size = skb_shinfo(skb)->gso_size;
460 } else {
461 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
462 meta->gso_size = 0;
463 }
464
465 meta->size = 0;
466 meta->id = req.id;
467 npo->copy_off = 0;
468 npo->copy_gref = req.gref;
469
470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) {
472 unsigned int offset = offset_in_page(data);
473 unsigned int len = PAGE_SIZE - offset;
474
475 if (data + len > skb_tail_pointer(skb))
476 len = skb_tail_pointer(skb) - data;
477
478 xenvif_gop_frag_copy(queue, skb, npo,
479 virt_to_page(data), len, offset, &head);
480 data += len;
481 }
482
483 for (i = 0; i < nr_frags; i++) {
484 xenvif_gop_frag_copy(queue, skb, npo,
485 skb_frag_page(&skb_shinfo(skb)->frags[i]),
486 skb_frag_size(&skb_shinfo(skb)->frags[i]),
487 skb_shinfo(skb)->frags[i].page_offset,
488 &head);
489 }
490
491 return npo->meta_prod - old_meta_prod;
492}
493
494/*
495 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
496 * used to set up the operations on the top of
497 * netrx_pending_operations, which have since been done. Check that
498 * they didn't give any errors and advance over them.
499 */
500static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
501 struct netrx_pending_operations *npo)
502{
503 struct gnttab_copy *copy_op;
504 int status = XEN_NETIF_RSP_OKAY;
505 int i;
506
507 for (i = 0; i < nr_meta_slots; i++) {
508 copy_op = npo->copy + npo->copy_cons++;
509 if (copy_op->status != GNTST_okay) {
510 netdev_dbg(vif->dev,
511 "Bad status %d from copy to DOM%d.\n",
512 copy_op->status, vif->domid);
513 status = XEN_NETIF_RSP_ERROR;
514 }
515 }
516
517 return status;
518}
519
520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
521 struct xenvif_rx_meta *meta,
522 int nr_meta_slots)
523{
524 int i;
525 unsigned long offset;
526
527 /* No fragments used */
528 if (nr_meta_slots <= 1)
529 return;
530
531 nr_meta_slots--;
532
533 for (i = 0; i < nr_meta_slots; i++) {
534 int flags;
535 if (i == nr_meta_slots - 1)
536 flags = 0;
537 else
538 flags = XEN_NETRXF_more_data;
539
540 offset = 0;
541 make_rx_response(queue, meta[i].id, status, offset,
542 meta[i].size, flags);
543 }
544}
545
546void xenvif_kick_thread(struct xenvif_queue *queue)
547{
548 wake_up(&queue->wq);
549}
550
551static void xenvif_rx_action(struct xenvif_queue *queue)
552{
553 s8 status;
554 u16 flags;
555 struct xen_netif_rx_response *resp;
556 struct sk_buff_head rxq;
557 struct sk_buff *skb;
558 LIST_HEAD(notify);
559 int ret;
560 unsigned long offset;
561 bool need_to_notify = false;
562
563 struct netrx_pending_operations npo = {
564 .copy = queue->grant_copy_op,
565 .meta = queue->meta,
566 };
567
568 skb_queue_head_init(&rxq);
569
570 while (xenvif_rx_ring_slots_available(queue)
571 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
572 queue->last_rx_time = jiffies;
573
574 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
575
576 __skb_queue_tail(&rxq, skb);
577 }
578
579 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
580
581 if (!npo.copy_prod)
582 goto done;
583
584 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
585 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
586
587 while ((skb = __skb_dequeue(&rxq)) != NULL) {
588
589 if ((1 << queue->meta[npo.meta_cons].gso_type) &
590 queue->vif->gso_prefix_mask) {
591 resp = RING_GET_RESPONSE(&queue->rx,
592 queue->rx.rsp_prod_pvt++);
593
594 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
595
596 resp->offset = queue->meta[npo.meta_cons].gso_size;
597 resp->id = queue->meta[npo.meta_cons].id;
598 resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
599
600 npo.meta_cons++;
601 XENVIF_RX_CB(skb)->meta_slots_used--;
602 }
603
604
605 queue->stats.tx_bytes += skb->len;
606 queue->stats.tx_packets++;
607
608 status = xenvif_check_gop(queue->vif,
609 XENVIF_RX_CB(skb)->meta_slots_used,
610 &npo);
611
612 if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
613 flags = 0;
614 else
615 flags = XEN_NETRXF_more_data;
616
617 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
618 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
619 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
620 /* remote but checksummed. */
621 flags |= XEN_NETRXF_data_validated;
622
623 offset = 0;
624 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
625 status, offset,
626 queue->meta[npo.meta_cons].size,
627 flags);
628
629 if ((1 << queue->meta[npo.meta_cons].gso_type) &
630 queue->vif->gso_mask) {
631 struct xen_netif_extra_info *gso =
632 (struct xen_netif_extra_info *)
633 RING_GET_RESPONSE(&queue->rx,
634 queue->rx.rsp_prod_pvt++);
635
636 resp->flags |= XEN_NETRXF_extra_info;
637
638 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
639 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
640 gso->u.gso.pad = 0;
641 gso->u.gso.features = 0;
642
643 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
644 gso->flags = 0;
645 }
646
647 xenvif_add_frag_responses(queue, status,
648 queue->meta + npo.meta_cons + 1,
649 XENVIF_RX_CB(skb)->meta_slots_used);
650
651 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
652
653 need_to_notify |= !!ret;
654
655 npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
656 dev_kfree_skb(skb);
657 }
658
659done:
660 if (need_to_notify)
661 notify_remote_via_irq(queue->rx_irq);
662}
663
664void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
665{
666 int more_to_do;
667
668 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
669
670 if (more_to_do)
671 napi_schedule(&queue->napi);
672}
673
674static void tx_add_credit(struct xenvif_queue *queue)
675{
676 unsigned long max_burst, max_credit;
677
678 /*
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit.
681 */
682 max_burst = max(131072UL, queue->credit_bytes);
683
684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
685 max_credit = queue->remaining_credit + queue->credit_bytes;
686 if (max_credit < queue->remaining_credit)
687 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
688
689 queue->remaining_credit = min(max_credit, max_burst);
690}
691
692void xenvif_tx_credit_callback(unsigned long data)
693{
694 struct xenvif_queue *queue = (struct xenvif_queue *)data;
695 tx_add_credit(queue);
696 xenvif_napi_schedule_or_enable_events(queue);
697}
698
699static void xenvif_tx_err(struct xenvif_queue *queue,
700 struct xen_netif_tx_request *txp,
701 unsigned int extra_count, RING_IDX end)
702{
703 RING_IDX cons = queue->tx.req_cons;
704 unsigned long flags;
705
706 do {
707 spin_lock_irqsave(&queue->response_lock, flags);
708 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
709 push_tx_responses(queue);
710 spin_unlock_irqrestore(&queue->response_lock, flags);
711 if (cons == end)
712 break;
713 RING_COPY_REQUEST(&queue->tx, cons++, txp);
714 extra_count = 0; /* only the first frag can have extras */
715 } while (1);
716 queue->tx.req_cons = cons;
717}
718
719static void xenvif_fatal_tx_err(struct xenvif *vif)
720{
721 netdev_err(vif->dev, "fatal error; disabling device\n");
722 vif->disabled = true;
723 /* Disable the vif from queue 0's kthread */
724 if (vif->queues)
725 xenvif_kick_thread(&vif->queues[0]);
726}
727
728static int xenvif_count_requests(struct xenvif_queue *queue,
729 struct xen_netif_tx_request *first,
730 unsigned int extra_count,
731 struct xen_netif_tx_request *txp,
732 int work_to_do)
733{
734 RING_IDX cons = queue->tx.req_cons;
735 int slots = 0;
736 int drop_err = 0;
737 int more_data;
738
739 if (!(first->flags & XEN_NETTXF_more_data))
740 return 0;
741
742 do {
743 struct xen_netif_tx_request dropped_tx = { 0 };
744
745 if (slots >= work_to_do) {
746 netdev_err(queue->vif->dev,
747 "Asked for %d slots but exceeds this limit\n",
748 work_to_do);
749 xenvif_fatal_tx_err(queue->vif);
750 return -ENODATA;
751 }
752
753 /* This guest is really using too many slots and
754 * considered malicious.
755 */
756 if (unlikely(slots >= fatal_skb_slots)) {
757 netdev_err(queue->vif->dev,
758 "Malicious frontend using %d slots, threshold %u\n",
759 slots, fatal_skb_slots);
760 xenvif_fatal_tx_err(queue->vif);
761 return -E2BIG;
762 }
763
764 /* Xen network protocol had implicit dependency on
765 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
766 * the historical MAX_SKB_FRAGS value 18 to honor the
767 * same behavior as before. Any packet using more than
768 * 18 slots but less than fatal_skb_slots slots is
769 * dropped
770 */
771 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
772 if (net_ratelimit())
773 netdev_dbg(queue->vif->dev,
774 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
775 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
776 drop_err = -E2BIG;
777 }
778
779 if (drop_err)
780 txp = &dropped_tx;
781
782 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
783
784 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will
786 * appear to be larger than the frame.
787 *
788 * This cannot be fatal error as there are buggy
789 * frontends that do this.
790 *
791 * Consume all slots and drop the packet.
792 */
793 if (!drop_err && txp->size > first->size) {
794 if (net_ratelimit())
795 netdev_dbg(queue->vif->dev,
796 "Invalid tx request, slot size %u > remaining size %u\n",
797 txp->size, first->size);
798 drop_err = -EIO;
799 }
800
801 first->size -= txp->size;
802 slots++;
803
804 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
805 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
806 txp->offset, txp->size);
807 xenvif_fatal_tx_err(queue->vif);
808 return -EINVAL;
809 }
810
811 more_data = txp->flags & XEN_NETTXF_more_data;
812
813 if (!drop_err)
814 txp++;
815
816 } while (more_data);
817
818 if (drop_err) {
819 xenvif_tx_err(queue, first, extra_count, cons + slots);
820 return drop_err;
821 }
822
823 return slots;
824}
825
826
827struct xenvif_tx_cb {
828 u16 pending_idx;
829};
830
831#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
832
833static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
834 u16 pending_idx,
835 struct xen_netif_tx_request *txp,
836 unsigned int extra_count,
837 struct gnttab_map_grant_ref *mop)
838{
839 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
840 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
841 GNTMAP_host_map | GNTMAP_readonly,
842 txp->gref, queue->vif->domid);
843
844 memcpy(&queue->pending_tx_info[pending_idx].req, txp,
845 sizeof(*txp));
846 queue->pending_tx_info[pending_idx].extra_count = extra_count;
847}
848
849static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
850{
851 struct sk_buff *skb =
852 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
853 GFP_ATOMIC | __GFP_NOWARN);
854 if (unlikely(skb == NULL))
855 return NULL;
856
857 /* Packets passed to netif_rx() must have some headroom. */
858 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
859
860 /* Initialize it here to avoid later surprises */
861 skb_shinfo(skb)->destructor_arg = NULL;
862
863 return skb;
864}
865
866static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
867 struct sk_buff *skb,
868 struct xen_netif_tx_request *txp,
869 struct gnttab_map_grant_ref *gop,
870 unsigned int frag_overflow,
871 struct sk_buff *nskb)
872{
873 struct skb_shared_info *shinfo = skb_shinfo(skb);
874 skb_frag_t *frags = shinfo->frags;
875 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
876 int start;
877 pending_ring_idx_t index;
878 unsigned int nr_slots;
879
880 nr_slots = shinfo->nr_frags;
881
882 /* Skip first skb fragment if it is on same page as header fragment. */
883 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
884
885 for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
886 shinfo->nr_frags++, txp++, gop++) {
887 index = pending_index(queue->pending_cons++);
888 pending_idx = queue->pending_ring[index];
889 xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
890 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
891 }
892
893 if (frag_overflow) {
894
895 shinfo = skb_shinfo(nskb);
896 frags = shinfo->frags;
897
898 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
899 shinfo->nr_frags++, txp++, gop++) {
900 index = pending_index(queue->pending_cons++);
901 pending_idx = queue->pending_ring[index];
902 xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
903 gop);
904 frag_set_pending_idx(&frags[shinfo->nr_frags],
905 pending_idx);
906 }
907
908 skb_shinfo(skb)->frag_list = nskb;
909 }
910
911 return gop;
912}
913
914static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
915 u16 pending_idx,
916 grant_handle_t handle)
917{
918 if (unlikely(queue->grant_tx_handle[pending_idx] !=
919 NETBACK_INVALID_HANDLE)) {
920 netdev_err(queue->vif->dev,
921 "Trying to overwrite active handle! pending_idx: 0x%x\n",
922 pending_idx);
923 BUG();
924 }
925 queue->grant_tx_handle[pending_idx] = handle;
926}
927
928static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
929 u16 pending_idx)
930{
931 if (unlikely(queue->grant_tx_handle[pending_idx] ==
932 NETBACK_INVALID_HANDLE)) {
933 netdev_err(queue->vif->dev,
934 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
935 pending_idx);
936 BUG();
937 }
938 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
939}
940
941static int xenvif_tx_check_gop(struct xenvif_queue *queue,
942 struct sk_buff *skb,
943 struct gnttab_map_grant_ref **gopp_map,
944 struct gnttab_copy **gopp_copy)
945{
946 struct gnttab_map_grant_ref *gop_map = *gopp_map;
947 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
948 /* This always points to the shinfo of the skb being checked, which
949 * could be either the first or the one on the frag_list
950 */
951 struct skb_shared_info *shinfo = skb_shinfo(skb);
952 /* If this is non-NULL, we are currently checking the frag_list skb, and
953 * this points to the shinfo of the first one
954 */
955 struct skb_shared_info *first_shinfo = NULL;
956 int nr_frags = shinfo->nr_frags;
957 const bool sharedslot = nr_frags &&
958 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
959 int i, err;
960
961 /* Check status of header. */
962 err = (*gopp_copy)->status;
963 if (unlikely(err)) {
964 if (net_ratelimit())
965 netdev_dbg(queue->vif->dev,
966 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
967 (*gopp_copy)->status,
968 pending_idx,
969 (*gopp_copy)->source.u.ref);
970 /* The first frag might still have this slot mapped */
971 if (!sharedslot)
972 xenvif_idx_release(queue, pending_idx,
973 XEN_NETIF_RSP_ERROR);
974 }
975 (*gopp_copy)++;
976
977check_frags:
978 for (i = 0; i < nr_frags; i++, gop_map++) {
979 int j, newerr;
980
981 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
982
983 /* Check error status: if okay then remember grant handle. */
984 newerr = gop_map->status;
985
986 if (likely(!newerr)) {
987 xenvif_grant_handle_set(queue,
988 pending_idx,
989 gop_map->handle);
990 /* Had a previous error? Invalidate this fragment. */
991 if (unlikely(err)) {
992 xenvif_idx_unmap(queue, pending_idx);
993 /* If the mapping of the first frag was OK, but
994 * the header's copy failed, and they are
995 * sharing a slot, send an error
996 */
997 if (i == 0 && sharedslot)
998 xenvif_idx_release(queue, pending_idx,
999 XEN_NETIF_RSP_ERROR);
1000 else
1001 xenvif_idx_release(queue, pending_idx,
1002 XEN_NETIF_RSP_OKAY);
1003 }
1004 continue;
1005 }
1006
1007 /* Error on this fragment: respond to client with an error. */
1008 if (net_ratelimit())
1009 netdev_dbg(queue->vif->dev,
1010 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1011 i,
1012 gop_map->status,
1013 pending_idx,
1014 gop_map->ref);
1015
1016 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1017
1018 /* Not the first error? Preceding frags already invalidated. */
1019 if (err)
1020 continue;
1021
1022 /* First error: if the header haven't shared a slot with the
1023 * first frag, release it as well.
1024 */
1025 if (!sharedslot)
1026 xenvif_idx_release(queue,
1027 XENVIF_TX_CB(skb)->pending_idx,
1028 XEN_NETIF_RSP_OKAY);
1029
1030 /* Invalidate preceding fragments of this skb. */
1031 for (j = 0; j < i; j++) {
1032 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1033 xenvif_idx_unmap(queue, pending_idx);
1034 xenvif_idx_release(queue, pending_idx,
1035 XEN_NETIF_RSP_OKAY);
1036 }
1037
1038 /* And if we found the error while checking the frag_list, unmap
1039 * the first skb's frags
1040 */
1041 if (first_shinfo) {
1042 for (j = 0; j < first_shinfo->nr_frags; j++) {
1043 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1044 xenvif_idx_unmap(queue, pending_idx);
1045 xenvif_idx_release(queue, pending_idx,
1046 XEN_NETIF_RSP_OKAY);
1047 }
1048 }
1049
1050 /* Remember the error: invalidate all subsequent fragments. */
1051 err = newerr;
1052 }
1053
1054 if (skb_has_frag_list(skb) && !first_shinfo) {
1055 first_shinfo = skb_shinfo(skb);
1056 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1057 nr_frags = shinfo->nr_frags;
1058
1059 goto check_frags;
1060 }
1061
1062 *gopp_map = gop_map;
1063 return err;
1064}
1065
1066static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1067{
1068 struct skb_shared_info *shinfo = skb_shinfo(skb);
1069 int nr_frags = shinfo->nr_frags;
1070 int i;
1071 u16 prev_pending_idx = INVALID_PENDING_IDX;
1072
1073 for (i = 0; i < nr_frags; i++) {
1074 skb_frag_t *frag = shinfo->frags + i;
1075 struct xen_netif_tx_request *txp;
1076 struct page *page;
1077 u16 pending_idx;
1078
1079 pending_idx = frag_get_pending_idx(frag);
1080
1081 /* If this is not the first frag, chain it to the previous*/
1082 if (prev_pending_idx == INVALID_PENDING_IDX)
1083 skb_shinfo(skb)->destructor_arg =
1084 &callback_param(queue, pending_idx);
1085 else
1086 callback_param(queue, prev_pending_idx).ctx =
1087 &callback_param(queue, pending_idx);
1088
1089 callback_param(queue, pending_idx).ctx = NULL;
1090 prev_pending_idx = pending_idx;
1091
1092 txp = &queue->pending_tx_info[pending_idx].req;
1093 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1094 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1095 skb->len += txp->size;
1096 skb->data_len += txp->size;
1097 skb->truesize += txp->size;
1098
1099 /* Take an extra reference to offset network stack's put_page */
1100 get_page(queue->mmap_pages[pending_idx]);
1101 }
1102}
1103
1104static int xenvif_get_extras(struct xenvif_queue *queue,
1105 struct xen_netif_extra_info *extras,
1106 unsigned int *extra_count,
1107 int work_to_do)
1108{
1109 struct xen_netif_extra_info extra;
1110 RING_IDX cons = queue->tx.req_cons;
1111
1112 do {
1113 if (unlikely(work_to_do-- <= 0)) {
1114 netdev_err(queue->vif->dev, "Missing extra info\n");
1115 xenvif_fatal_tx_err(queue->vif);
1116 return -EBADR;
1117 }
1118
1119 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1120
1121 queue->tx.req_cons = ++cons;
1122 (*extra_count)++;
1123
1124 if (unlikely(!extra.type ||
1125 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1126 netdev_err(queue->vif->dev,
1127 "Invalid extra type: %d\n", extra.type);
1128 xenvif_fatal_tx_err(queue->vif);
1129 return -EINVAL;
1130 }
1131
1132 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1133 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1134
1135 return work_to_do;
1136}
1137
1138static int xenvif_set_skb_gso(struct xenvif *vif,
1139 struct sk_buff *skb,
1140 struct xen_netif_extra_info *gso)
1141{
1142 if (!gso->u.gso.size) {
1143 netdev_err(vif->dev, "GSO size must not be zero.\n");
1144 xenvif_fatal_tx_err(vif);
1145 return -EINVAL;
1146 }
1147
1148 switch (gso->u.gso.type) {
1149 case XEN_NETIF_GSO_TYPE_TCPV4:
1150 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1151 break;
1152 case XEN_NETIF_GSO_TYPE_TCPV6:
1153 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1154 break;
1155 default:
1156 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1157 xenvif_fatal_tx_err(vif);
1158 return -EINVAL;
1159 }
1160
1161 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1162 /* gso_segs will be calculated later */
1163
1164 return 0;
1165}
1166
1167static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1168{
1169 bool recalculate_partial_csum = false;
1170
1171 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1172 * peers can fail to set NETRXF_csum_blank when sending a GSO
1173 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1174 * recalculate the partial checksum.
1175 */
1176 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1177 queue->stats.rx_gso_checksum_fixup++;
1178 skb->ip_summed = CHECKSUM_PARTIAL;
1179 recalculate_partial_csum = true;
1180 }
1181
1182 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1183 if (skb->ip_summed != CHECKSUM_PARTIAL)
1184 return 0;
1185
1186 return skb_checksum_setup(skb, recalculate_partial_csum);
1187}
1188
1189static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1190{
1191 u64 now = get_jiffies_64();
1192 u64 next_credit = queue->credit_window_start +
1193 msecs_to_jiffies(queue->credit_usec / 1000);
1194
1195 /* Timer could already be pending in rare cases. */
1196 if (timer_pending(&queue->credit_timeout))
1197 return true;
1198
1199 /* Passed the point where we can replenish credit? */
1200 if (time_after_eq64(now, next_credit)) {
1201 queue->credit_window_start = now;
1202 tx_add_credit(queue);
1203 }
1204
1205 /* Still too big to send right now? Set a callback. */
1206 if (size > queue->remaining_credit) {
1207 queue->credit_timeout.data =
1208 (unsigned long)queue;
1209 mod_timer(&queue->credit_timeout,
1210 next_credit);
1211 queue->credit_window_start = next_credit;
1212
1213 return true;
1214 }
1215
1216 return false;
1217}
1218
1219/* No locking is required in xenvif_mcast_add/del() as they are
1220 * only ever invoked from NAPI poll. An RCU list is used because
1221 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1222 */
1223
1224static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1225{
1226 struct xenvif_mcast_addr *mcast;
1227
1228 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1229 if (net_ratelimit())
1230 netdev_err(vif->dev,
1231 "Too many multicast addresses\n");
1232 return -ENOSPC;
1233 }
1234
1235 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1236 if (!mcast)
1237 return -ENOMEM;
1238
1239 ether_addr_copy(mcast->addr, addr);
1240 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1241 vif->fe_mcast_count++;
1242
1243 return 0;
1244}
1245
1246static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1247{
1248 struct xenvif_mcast_addr *mcast;
1249
1250 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1251 if (ether_addr_equal(addr, mcast->addr)) {
1252 --vif->fe_mcast_count;
1253 list_del_rcu(&mcast->entry);
1254 kfree_rcu(mcast, rcu);
1255 break;
1256 }
1257 }
1258}
1259
1260bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1261{
1262 struct xenvif_mcast_addr *mcast;
1263
1264 rcu_read_lock();
1265 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1266 if (ether_addr_equal(addr, mcast->addr)) {
1267 rcu_read_unlock();
1268 return true;
1269 }
1270 }
1271 rcu_read_unlock();
1272
1273 return false;
1274}
1275
1276void xenvif_mcast_addr_list_free(struct xenvif *vif)
1277{
1278 /* No need for locking or RCU here. NAPI poll and TX queue
1279 * are stopped.
1280 */
1281 while (!list_empty(&vif->fe_mcast_addr)) {
1282 struct xenvif_mcast_addr *mcast;
1283
1284 mcast = list_first_entry(&vif->fe_mcast_addr,
1285 struct xenvif_mcast_addr,
1286 entry);
1287 --vif->fe_mcast_count;
1288 list_del(&mcast->entry);
1289 kfree(mcast);
1290 }
1291}
1292
1293static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 int budget,
1295 unsigned *copy_ops,
1296 unsigned *map_ops)
1297{
1298 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1299 struct sk_buff *skb, *nskb;
1300 int ret;
1301 unsigned int frag_overflow;
1302
1303 while (skb_queue_len(&queue->tx_queue) < budget) {
1304 struct xen_netif_tx_request txreq;
1305 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1306 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1307 unsigned int extra_count;
1308 u16 pending_idx;
1309 RING_IDX idx;
1310 int work_to_do;
1311 unsigned int data_len;
1312 pending_ring_idx_t index;
1313
1314 if (queue->tx.sring->req_prod - queue->tx.req_cons >
1315 XEN_NETIF_TX_RING_SIZE) {
1316 netdev_err(queue->vif->dev,
1317 "Impossible number of requests. "
1318 "req_prod %d, req_cons %d, size %ld\n",
1319 queue->tx.sring->req_prod, queue->tx.req_cons,
1320 XEN_NETIF_TX_RING_SIZE);
1321 xenvif_fatal_tx_err(queue->vif);
1322 break;
1323 }
1324
1325 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1326 if (!work_to_do)
1327 break;
1328
1329 idx = queue->tx.req_cons;
1330 rmb(); /* Ensure that we see the request before we copy it. */
1331 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1332
1333 /* Credit-based scheduling. */
1334 if (txreq.size > queue->remaining_credit &&
1335 tx_credit_exceeded(queue, txreq.size))
1336 break;
1337
1338 queue->remaining_credit -= txreq.size;
1339
1340 work_to_do--;
1341 queue->tx.req_cons = ++idx;
1342
1343 memset(extras, 0, sizeof(extras));
1344 extra_count = 0;
1345 if (txreq.flags & XEN_NETTXF_extra_info) {
1346 work_to_do = xenvif_get_extras(queue, extras,
1347 &extra_count,
1348 work_to_do);
1349 idx = queue->tx.req_cons;
1350 if (unlikely(work_to_do < 0))
1351 break;
1352 }
1353
1354 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1355 struct xen_netif_extra_info *extra;
1356
1357 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1358 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1359
1360 make_tx_response(queue, &txreq, extra_count,
1361 (ret == 0) ?
1362 XEN_NETIF_RSP_OKAY :
1363 XEN_NETIF_RSP_ERROR);
1364 push_tx_responses(queue);
1365 continue;
1366 }
1367
1368 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1369 struct xen_netif_extra_info *extra;
1370
1371 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1372 xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1373
1374 make_tx_response(queue, &txreq, extra_count,
1375 XEN_NETIF_RSP_OKAY);
1376 push_tx_responses(queue);
1377 continue;
1378 }
1379
1380 ret = xenvif_count_requests(queue, &txreq, extra_count,
1381 txfrags, work_to_do);
1382 if (unlikely(ret < 0))
1383 break;
1384
1385 idx += ret;
1386
1387 if (unlikely(txreq.size < ETH_HLEN)) {
1388 netdev_dbg(queue->vif->dev,
1389 "Bad packet size: %d\n", txreq.size);
1390 xenvif_tx_err(queue, &txreq, extra_count, idx);
1391 break;
1392 }
1393
1394 /* No crossing a page as the payload mustn't fragment. */
1395 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1396 netdev_err(queue->vif->dev,
1397 "txreq.offset: %u, size: %u, end: %lu\n",
1398 txreq.offset, txreq.size,
1399 (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1400 xenvif_fatal_tx_err(queue->vif);
1401 break;
1402 }
1403
1404 index = pending_index(queue->pending_cons);
1405 pending_idx = queue->pending_ring[index];
1406
1407 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1408 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1409 XEN_NETBACK_TX_COPY_LEN : txreq.size;
1410
1411 skb = xenvif_alloc_skb(data_len);
1412 if (unlikely(skb == NULL)) {
1413 netdev_dbg(queue->vif->dev,
1414 "Can't allocate a skb in start_xmit.\n");
1415 xenvif_tx_err(queue, &txreq, extra_count, idx);
1416 break;
1417 }
1418
1419 skb_shinfo(skb)->nr_frags = ret;
1420 if (data_len < txreq.size)
1421 skb_shinfo(skb)->nr_frags++;
1422 /* At this point shinfo->nr_frags is in fact the number of
1423 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1424 */
1425 frag_overflow = 0;
1426 nskb = NULL;
1427 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1428 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1429 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1430 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1431 nskb = xenvif_alloc_skb(0);
1432 if (unlikely(nskb == NULL)) {
1433 kfree_skb(skb);
1434 xenvif_tx_err(queue, &txreq, extra_count, idx);
1435 if (net_ratelimit())
1436 netdev_err(queue->vif->dev,
1437 "Can't allocate the frag_list skb.\n");
1438 break;
1439 }
1440 }
1441
1442 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1443 struct xen_netif_extra_info *gso;
1444 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1445
1446 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1447 /* Failure in xenvif_set_skb_gso is fatal. */
1448 kfree_skb(skb);
1449 kfree_skb(nskb);
1450 break;
1451 }
1452 }
1453
1454 XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1455
1456 __skb_put(skb, data_len);
1457 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1458 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1459 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1460
1461 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1462 virt_to_gfn(skb->data);
1463 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1464 queue->tx_copy_ops[*copy_ops].dest.offset =
1465 offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1466
1467 queue->tx_copy_ops[*copy_ops].len = data_len;
1468 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1469
1470 (*copy_ops)++;
1471
1472 if (data_len < txreq.size) {
1473 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1474 pending_idx);
1475 xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1476 extra_count, gop);
1477 gop++;
1478 } else {
1479 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1480 INVALID_PENDING_IDX);
1481 memcpy(&queue->pending_tx_info[pending_idx].req,
1482 &txreq, sizeof(txreq));
1483 queue->pending_tx_info[pending_idx].extra_count =
1484 extra_count;
1485 }
1486
1487 queue->pending_cons++;
1488
1489 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1490 frag_overflow, nskb);
1491
1492 __skb_queue_tail(&queue->tx_queue, skb);
1493
1494 queue->tx.req_cons = idx;
1495
1496 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1497 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1498 break;
1499 }
1500
1501 (*map_ops) = gop - queue->tx_map_ops;
1502 return;
1503}
1504
1505/* Consolidate skb with a frag_list into a brand new one with local pages on
1506 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1507 */
1508static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1509{
1510 unsigned int offset = skb_headlen(skb);
1511 skb_frag_t frags[MAX_SKB_FRAGS];
1512 int i, f;
1513 struct ubuf_info *uarg;
1514 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1515
1516 queue->stats.tx_zerocopy_sent += 2;
1517 queue->stats.tx_frag_overflow++;
1518
1519 xenvif_fill_frags(queue, nskb);
1520 /* Subtract frags size, we will correct it later */
1521 skb->truesize -= skb->data_len;
1522 skb->len += nskb->len;
1523 skb->data_len += nskb->len;
1524
1525 /* create a brand new frags array and coalesce there */
1526 for (i = 0; offset < skb->len; i++) {
1527 struct page *page;
1528 unsigned int len;
1529
1530 BUG_ON(i >= MAX_SKB_FRAGS);
1531 page = alloc_page(GFP_ATOMIC);
1532 if (!page) {
1533 int j;
1534 skb->truesize += skb->data_len;
1535 for (j = 0; j < i; j++)
1536 put_page(frags[j].page.p);
1537 return -ENOMEM;
1538 }
1539
1540 if (offset + PAGE_SIZE < skb->len)
1541 len = PAGE_SIZE;
1542 else
1543 len = skb->len - offset;
1544 if (skb_copy_bits(skb, offset, page_address(page), len))
1545 BUG();
1546
1547 offset += len;
1548 frags[i].page.p = page;
1549 frags[i].page_offset = 0;
1550 skb_frag_size_set(&frags[i], len);
1551 }
1552
1553 /* Copied all the bits from the frag list -- free it. */
1554 skb_frag_list_init(skb);
1555 xenvif_skb_zerocopy_prepare(queue, nskb);
1556 kfree_skb(nskb);
1557
1558 /* Release all the original (foreign) frags. */
1559 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1560 skb_frag_unref(skb, f);
1561 uarg = skb_shinfo(skb)->destructor_arg;
1562 /* increase inflight counter to offset decrement in callback */
1563 atomic_inc(&queue->inflight_packets);
1564 uarg->callback(uarg, true);
1565 skb_shinfo(skb)->destructor_arg = NULL;
1566
1567 /* Fill the skb with the new (local) frags. */
1568 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1569 skb_shinfo(skb)->nr_frags = i;
1570 skb->truesize += i * PAGE_SIZE;
1571
1572 return 0;
1573}
1574
1575static int xenvif_tx_submit(struct xenvif_queue *queue)
1576{
1577 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1578 struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1579 struct sk_buff *skb;
1580 int work_done = 0;
1581
1582 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1583 struct xen_netif_tx_request *txp;
1584 u16 pending_idx;
1585 unsigned data_len;
1586
1587 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1588 txp = &queue->pending_tx_info[pending_idx].req;
1589
1590 /* Check the remap error code. */
1591 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1592 /* If there was an error, xenvif_tx_check_gop is
1593 * expected to release all the frags which were mapped,
1594 * so kfree_skb shouldn't do it again
1595 */
1596 skb_shinfo(skb)->nr_frags = 0;
1597 if (skb_has_frag_list(skb)) {
1598 struct sk_buff *nskb =
1599 skb_shinfo(skb)->frag_list;
1600 skb_shinfo(nskb)->nr_frags = 0;
1601 }
1602 kfree_skb(skb);
1603 continue;
1604 }
1605
1606 data_len = skb->len;
1607 callback_param(queue, pending_idx).ctx = NULL;
1608 if (data_len < txp->size) {
1609 /* Append the packet payload as a fragment. */
1610 txp->offset += data_len;
1611 txp->size -= data_len;
1612 } else {
1613 /* Schedule a response immediately. */
1614 xenvif_idx_release(queue, pending_idx,
1615 XEN_NETIF_RSP_OKAY);
1616 }
1617
1618 if (txp->flags & XEN_NETTXF_csum_blank)
1619 skb->ip_summed = CHECKSUM_PARTIAL;
1620 else if (txp->flags & XEN_NETTXF_data_validated)
1621 skb->ip_summed = CHECKSUM_UNNECESSARY;
1622
1623 xenvif_fill_frags(queue, skb);
1624
1625 if (unlikely(skb_has_frag_list(skb))) {
1626 if (xenvif_handle_frag_list(queue, skb)) {
1627 if (net_ratelimit())
1628 netdev_err(queue->vif->dev,
1629 "Not enough memory to consolidate frag_list!\n");
1630 xenvif_skb_zerocopy_prepare(queue, skb);
1631 kfree_skb(skb);
1632 continue;
1633 }
1634 }
1635
1636 skb->dev = queue->vif->dev;
1637 skb->protocol = eth_type_trans(skb, skb->dev);
1638 skb_reset_network_header(skb);
1639
1640 if (checksum_setup(queue, skb)) {
1641 netdev_dbg(queue->vif->dev,
1642 "Can't setup checksum in net_tx_action\n");
1643 /* We have to set this flag to trigger the callback */
1644 if (skb_shinfo(skb)->destructor_arg)
1645 xenvif_skb_zerocopy_prepare(queue, skb);
1646 kfree_skb(skb);
1647 continue;
1648 }
1649
1650 skb_probe_transport_header(skb, 0);
1651
1652 /* If the packet is GSO then we will have just set up the
1653 * transport header offset in checksum_setup so it's now
1654 * straightforward to calculate gso_segs.
1655 */
1656 if (skb_is_gso(skb)) {
1657 int mss = skb_shinfo(skb)->gso_size;
1658 int hdrlen = skb_transport_header(skb) -
1659 skb_mac_header(skb) +
1660 tcp_hdrlen(skb);
1661
1662 skb_shinfo(skb)->gso_segs =
1663 DIV_ROUND_UP(skb->len - hdrlen, mss);
1664 }
1665
1666 queue->stats.rx_bytes += skb->len;
1667 queue->stats.rx_packets++;
1668
1669 work_done++;
1670
1671 /* Set this flag right before netif_receive_skb, otherwise
1672 * someone might think this packet already left netback, and
1673 * do a skb_copy_ubufs while we are still in control of the
1674 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1675 */
1676 if (skb_shinfo(skb)->destructor_arg) {
1677 xenvif_skb_zerocopy_prepare(queue, skb);
1678 queue->stats.tx_zerocopy_sent++;
1679 }
1680
1681 netif_receive_skb(skb);
1682 }
1683
1684 return work_done;
1685}
1686
1687void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1688{
1689 unsigned long flags;
1690 pending_ring_idx_t index;
1691 struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1692
1693 /* This is the only place where we grab this lock, to protect callbacks
1694 * from each other.
1695 */
1696 spin_lock_irqsave(&queue->callback_lock, flags);
1697 do {
1698 u16 pending_idx = ubuf->desc;
1699 ubuf = (struct ubuf_info *) ubuf->ctx;
1700 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1701 MAX_PENDING_REQS);
1702 index = pending_index(queue->dealloc_prod);
1703 queue->dealloc_ring[index] = pending_idx;
1704 /* Sync with xenvif_tx_dealloc_action:
1705 * insert idx then incr producer.
1706 */
1707 smp_wmb();
1708 queue->dealloc_prod++;
1709 } while (ubuf);
1710 spin_unlock_irqrestore(&queue->callback_lock, flags);
1711
1712 if (likely(zerocopy_success))
1713 queue->stats.tx_zerocopy_success++;
1714 else
1715 queue->stats.tx_zerocopy_fail++;
1716 xenvif_skb_zerocopy_complete(queue);
1717}
1718
1719static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1720{
1721 struct gnttab_unmap_grant_ref *gop;
1722 pending_ring_idx_t dc, dp;
1723 u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1724 unsigned int i = 0;
1725
1726 dc = queue->dealloc_cons;
1727 gop = queue->tx_unmap_ops;
1728
1729 /* Free up any grants we have finished using */
1730 do {
1731 dp = queue->dealloc_prod;
1732
1733 /* Ensure we see all indices enqueued by all
1734 * xenvif_zerocopy_callback().
1735 */
1736 smp_rmb();
1737
1738 while (dc != dp) {
1739 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1740 pending_idx =
1741 queue->dealloc_ring[pending_index(dc++)];
1742
1743 pending_idx_release[gop - queue->tx_unmap_ops] =
1744 pending_idx;
1745 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1746 queue->mmap_pages[pending_idx];
1747 gnttab_set_unmap_op(gop,
1748 idx_to_kaddr(queue, pending_idx),
1749 GNTMAP_host_map,
1750 queue->grant_tx_handle[pending_idx]);
1751 xenvif_grant_handle_reset(queue, pending_idx);
1752 ++gop;
1753 }
1754
1755 } while (dp != queue->dealloc_prod);
1756
1757 queue->dealloc_cons = dc;
1758
1759 if (gop - queue->tx_unmap_ops > 0) {
1760 int ret;
1761 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1762 NULL,
1763 queue->pages_to_unmap,
1764 gop - queue->tx_unmap_ops);
1765 if (ret) {
1766 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1767 gop - queue->tx_unmap_ops, ret);
1768 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1769 if (gop[i].status != GNTST_okay)
1770 netdev_err(queue->vif->dev,
1771 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1772 gop[i].host_addr,
1773 gop[i].handle,
1774 gop[i].status);
1775 }
1776 BUG();
1777 }
1778 }
1779
1780 for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1781 xenvif_idx_release(queue, pending_idx_release[i],
1782 XEN_NETIF_RSP_OKAY);
1783}
1784
1785
1786/* Called after netfront has transmitted */
1787int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1788{
1789 unsigned nr_mops, nr_cops = 0;
1790 int work_done, ret;
1791
1792 if (unlikely(!tx_work_todo(queue)))
1793 return 0;
1794
1795 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1796
1797 if (nr_cops == 0)
1798 return 0;
1799
1800 gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1801 if (nr_mops != 0) {
1802 ret = gnttab_map_refs(queue->tx_map_ops,
1803 NULL,
1804 queue->pages_to_map,
1805 nr_mops);
1806 BUG_ON(ret);
1807 }
1808
1809 work_done = xenvif_tx_submit(queue);
1810
1811 return work_done;
1812}
1813
1814static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1815 u8 status)
1816{
1817 struct pending_tx_info *pending_tx_info;
1818 pending_ring_idx_t index;
1819 unsigned long flags;
1820
1821 pending_tx_info = &queue->pending_tx_info[pending_idx];
1822
1823 spin_lock_irqsave(&queue->response_lock, flags);
1824
1825 make_tx_response(queue, &pending_tx_info->req,
1826 pending_tx_info->extra_count, status);
1827
1828 /* Release the pending index before pusing the Tx response so
1829 * its available before a new Tx request is pushed by the
1830 * frontend.
1831 */
1832 index = pending_index(queue->pending_prod++);
1833 queue->pending_ring[index] = pending_idx;
1834
1835 push_tx_responses(queue);
1836
1837 spin_unlock_irqrestore(&queue->response_lock, flags);
1838}
1839
1840
1841static void make_tx_response(struct xenvif_queue *queue,
1842 struct xen_netif_tx_request *txp,
1843 unsigned int extra_count,
1844 s8 st)
1845{
1846 RING_IDX i = queue->tx.rsp_prod_pvt;
1847 struct xen_netif_tx_response *resp;
1848
1849 resp = RING_GET_RESPONSE(&queue->tx, i);
1850 resp->id = txp->id;
1851 resp->status = st;
1852
1853 while (extra_count-- != 0)
1854 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1855
1856 queue->tx.rsp_prod_pvt = ++i;
1857}
1858
1859static void push_tx_responses(struct xenvif_queue *queue)
1860{
1861 int notify;
1862
1863 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1864 if (notify)
1865 notify_remote_via_irq(queue->tx_irq);
1866}
1867
1868static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1869 u16 id,
1870 s8 st,
1871 u16 offset,
1872 u16 size,
1873 u16 flags)
1874{
1875 RING_IDX i = queue->rx.rsp_prod_pvt;
1876 struct xen_netif_rx_response *resp;
1877
1878 resp = RING_GET_RESPONSE(&queue->rx, i);
1879 resp->offset = offset;
1880 resp->flags = flags;
1881 resp->id = id;
1882 resp->status = (s16)size;
1883 if (st < 0)
1884 resp->status = (s16)st;
1885
1886 queue->rx.rsp_prod_pvt = ++i;
1887
1888 return resp;
1889}
1890
1891void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1892{
1893 int ret;
1894 struct gnttab_unmap_grant_ref tx_unmap_op;
1895
1896 gnttab_set_unmap_op(&tx_unmap_op,
1897 idx_to_kaddr(queue, pending_idx),
1898 GNTMAP_host_map,
1899 queue->grant_tx_handle[pending_idx]);
1900 xenvif_grant_handle_reset(queue, pending_idx);
1901
1902 ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1903 &queue->mmap_pages[pending_idx], 1);
1904 if (ret) {
1905 netdev_err(queue->vif->dev,
1906 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1907 ret,
1908 pending_idx,
1909 tx_unmap_op.host_addr,
1910 tx_unmap_op.handle,
1911 tx_unmap_op.status);
1912 BUG();
1913 }
1914}
1915
1916static inline int tx_work_todo(struct xenvif_queue *queue)
1917{
1918 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1919 return 1;
1920
1921 return 0;
1922}
1923
1924static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1925{
1926 return queue->dealloc_cons != queue->dealloc_prod;
1927}
1928
1929void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1930{
1931 if (queue->tx.sring)
1932 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1933 queue->tx.sring);
1934 if (queue->rx.sring)
1935 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1936 queue->rx.sring);
1937}
1938
1939int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1940 grant_ref_t tx_ring_ref,
1941 grant_ref_t rx_ring_ref)
1942{
1943 void *addr;
1944 struct xen_netif_tx_sring *txs;
1945 struct xen_netif_rx_sring *rxs;
1946
1947 int err = -ENOMEM;
1948
1949 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1950 &tx_ring_ref, 1, &addr);
1951 if (err)
1952 goto err;
1953
1954 txs = (struct xen_netif_tx_sring *)addr;
1955 BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1956
1957 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1958 &rx_ring_ref, 1, &addr);
1959 if (err)
1960 goto err;
1961
1962 rxs = (struct xen_netif_rx_sring *)addr;
1963 BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1964
1965 return 0;
1966
1967err:
1968 xenvif_unmap_frontend_rings(queue);
1969 return err;
1970}
1971
1972static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1973{
1974 struct xenvif *vif = queue->vif;
1975
1976 queue->stalled = true;
1977
1978 /* At least one queue has stalled? Disable the carrier. */
1979 spin_lock(&vif->lock);
1980 if (vif->stalled_queues++ == 0) {
1981 netdev_info(vif->dev, "Guest Rx stalled");
1982 netif_carrier_off(vif->dev);
1983 }
1984 spin_unlock(&vif->lock);
1985}
1986
1987static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1988{
1989 struct xenvif *vif = queue->vif;
1990
1991 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1992 queue->stalled = false;
1993
1994 /* All queues are ready? Enable the carrier. */
1995 spin_lock(&vif->lock);
1996 if (--vif->stalled_queues == 0) {
1997 netdev_info(vif->dev, "Guest Rx ready");
1998 netif_carrier_on(vif->dev);
1999 }
2000 spin_unlock(&vif->lock);
2001}
2002
2003static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2004{
2005 RING_IDX prod, cons;
2006
2007 prod = queue->rx.sring->req_prod;
2008 cons = queue->rx.req_cons;
2009
2010 return !queue->stalled && prod - cons < 1
2011 && time_after(jiffies,
2012 queue->last_rx_time + queue->vif->stall_timeout);
2013}
2014
2015static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2016{
2017 RING_IDX prod, cons;
2018
2019 prod = queue->rx.sring->req_prod;
2020 cons = queue->rx.req_cons;
2021
2022 return queue->stalled && prod - cons >= 1;
2023}
2024
2025static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2026{
2027 return xenvif_rx_ring_slots_available(queue)
2028 || (queue->vif->stall_timeout &&
2029 (xenvif_rx_queue_stalled(queue)
2030 || xenvif_rx_queue_ready(queue)))
2031 || kthread_should_stop()
2032 || queue->vif->disabled;
2033}
2034
2035static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2036{
2037 struct sk_buff *skb;
2038 long timeout;
2039
2040 skb = skb_peek(&queue->rx_queue);
2041 if (!skb)
2042 return MAX_SCHEDULE_TIMEOUT;
2043
2044 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2045 return timeout < 0 ? 0 : timeout;
2046}
2047
2048/* Wait until the guest Rx thread has work.
2049 *
2050 * The timeout needs to be adjusted based on the current head of the
2051 * queue (and not just the head at the beginning). In particular, if
2052 * the queue is initially empty an infinite timeout is used and this
2053 * needs to be reduced when a skb is queued.
2054 *
2055 * This cannot be done with wait_event_timeout() because it only
2056 * calculates the timeout once.
2057 */
2058static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2059{
2060 DEFINE_WAIT(wait);
2061
2062 if (xenvif_have_rx_work(queue))
2063 return;
2064
2065 for (;;) {
2066 long ret;
2067
2068 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2069 if (xenvif_have_rx_work(queue))
2070 break;
2071 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2072 if (!ret)
2073 break;
2074 }
2075 finish_wait(&queue->wq, &wait);
2076}
2077
2078int xenvif_kthread_guest_rx(void *data)
2079{
2080 struct xenvif_queue *queue = data;
2081 struct xenvif *vif = queue->vif;
2082
2083 if (!vif->stall_timeout)
2084 xenvif_queue_carrier_on(queue);
2085
2086 for (;;) {
2087 xenvif_wait_for_rx_work(queue);
2088
2089 if (kthread_should_stop())
2090 break;
2091
2092 /* This frontend is found to be rogue, disable it in
2093 * kthread context. Currently this is only set when
2094 * netback finds out frontend sends malformed packet,
2095 * but we cannot disable the interface in softirq
2096 * context so we defer it here, if this thread is
2097 * associated with queue 0.
2098 */
2099 if (unlikely(vif->disabled && queue->id == 0)) {
2100 xenvif_carrier_off(vif);
2101 break;
2102 }
2103
2104 if (!skb_queue_empty(&queue->rx_queue))
2105 xenvif_rx_action(queue);
2106
2107 /* If the guest hasn't provided any Rx slots for a
2108 * while it's probably not responsive, drop the
2109 * carrier so packets are dropped earlier.
2110 */
2111 if (vif->stall_timeout) {
2112 if (xenvif_rx_queue_stalled(queue))
2113 xenvif_queue_carrier_off(queue);
2114 else if (xenvif_rx_queue_ready(queue))
2115 xenvif_queue_carrier_on(queue);
2116 }
2117
2118 /* Queued packets may have foreign pages from other
2119 * domains. These cannot be queued indefinitely as
2120 * this would starve guests of grant refs and transmit
2121 * slots.
2122 */
2123 xenvif_rx_queue_drop_expired(queue);
2124
2125 xenvif_rx_queue_maybe_wake(queue);
2126
2127 cond_resched();
2128 }
2129
2130 /* Bin any remaining skbs */
2131 xenvif_rx_queue_purge(queue);
2132
2133 return 0;
2134}
2135
2136static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2137{
2138 /* Dealloc thread must remain running until all inflight
2139 * packets complete.
2140 */
2141 return kthread_should_stop() &&
2142 !atomic_read(&queue->inflight_packets);
2143}
2144
2145int xenvif_dealloc_kthread(void *data)
2146{
2147 struct xenvif_queue *queue = data;
2148
2149 for (;;) {
2150 wait_event_interruptible(queue->dealloc_wq,
2151 tx_dealloc_work_todo(queue) ||
2152 xenvif_dealloc_kthread_should_stop(queue));
2153 if (xenvif_dealloc_kthread_should_stop(queue))
2154 break;
2155
2156 xenvif_tx_dealloc_action(queue);
2157 cond_resched();
2158 }
2159
2160 /* Unmap anything remaining*/
2161 if (tx_dealloc_work_todo(queue))
2162 xenvif_tx_dealloc_action(queue);
2163
2164 return 0;
2165}
2166
2167static int __init netback_init(void)
2168{
2169 int rc = 0;
2170
2171 if (!xen_domain())
2172 return -ENODEV;
2173
2174 /* Allow as many queues as there are CPUs if user has not
2175 * specified a value.
2176 */
2177 if (xenvif_max_queues == 0)
2178 xenvif_max_queues = num_online_cpus();
2179
2180 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2181 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2182 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2183 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2184 }
2185
2186 rc = xenvif_xenbus_init();
2187 if (rc)
2188 goto failed_init;
2189
2190#ifdef CONFIG_DEBUG_FS
2191 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2192 if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2193 pr_warn("Init of debugfs returned %ld!\n",
2194 PTR_ERR(xen_netback_dbg_root));
2195#endif /* CONFIG_DEBUG_FS */
2196
2197 return 0;
2198
2199failed_init:
2200 return rc;
2201}
2202
2203module_init(netback_init);
2204
2205static void __exit netback_fini(void)
2206{
2207#ifdef CONFIG_DEBUG_FS
2208 if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2209 debugfs_remove_recursive(xen_netback_dbg_root);
2210#endif /* CONFIG_DEBUG_FS */
2211 xenvif_xenbus_fini();
2212}
2213module_exit(netback_fini);
2214
2215MODULE_LICENSE("Dual BSD/GPL");
2216MODULE_ALIAS("xen-backend:vif");