Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Back-end of the driver for virtual network devices. This portion of the
   3 * driver exports a 'unified' network-device interface that can be accessed
   4 * by any operating system that implements a compatible front end. A
   5 * reference front-end implementation can be found in:
   6 *  drivers/net/xen-netfront.c
   7 *
   8 * Copyright (c) 2002-2005, K A Fraser
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License version 2
  12 * as published by the Free Software Foundation; or, when distributed
  13 * separately from the Linux kernel or incorporated into other
  14 * software packages, subject to the following license:
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a copy
  17 * of this source file (the "Software"), to deal in the Software without
  18 * restriction, including without limitation the rights to use, copy, modify,
  19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20 * and to permit persons to whom the Software is furnished to do so, subject to
  21 * the following conditions:
  22 *
  23 * The above copyright notice and this permission notice shall be included in
  24 * all copies or substantial portions of the Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32 * IN THE SOFTWARE.
  33 */
  34
  35#include "common.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/if_vlan.h>
  39#include <linux/udp.h>
  40#include <linux/highmem.h>
  41#include <linux/skbuff_ref.h>
  42
  43#include <net/tcp.h>
  44
  45#include <xen/xen.h>
  46#include <xen/events.h>
  47#include <xen/interface/memory.h>
  48#include <xen/page.h>
  49
  50#include <asm/xen/hypercall.h>
  51
  52/* Provide an option to disable split event channels at load time as
  53 * event channels are limited resource. Split event channels are
  54 * enabled by default.
  55 */
  56bool separate_tx_rx_irq = true;
  57module_param(separate_tx_rx_irq, bool, 0644);
  58
  59/* The time that packets can stay on the guest Rx internal queue
  60 * before they are dropped.
  61 */
  62unsigned int rx_drain_timeout_msecs = 10000;
  63module_param(rx_drain_timeout_msecs, uint, 0444);
  64
  65/* The length of time before the frontend is considered unresponsive
  66 * because it isn't providing Rx slots.
  67 */
  68unsigned int rx_stall_timeout_msecs = 60000;
  69module_param(rx_stall_timeout_msecs, uint, 0444);
  70
  71#define MAX_QUEUES_DEFAULT 8
  72unsigned int xenvif_max_queues;
  73module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  74MODULE_PARM_DESC(max_queues,
  75		 "Maximum number of queues per virtual interface");
  76
  77/*
  78 * This is the maximum slots a skb can have. If a guest sends a skb
  79 * which exceeds this limit it is considered malicious.
  80 */
  81#define FATAL_SKB_SLOTS_DEFAULT 20
  82static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  83module_param(fatal_skb_slots, uint, 0444);
  84
  85/* The amount to copy out of the first guest Tx slot into the skb's
  86 * linear area.  If the first slot has more data, it will be mapped
  87 * and put into the first frag.
  88 *
  89 * This is sized to avoid pulling headers from the frags for most
  90 * TCP/IP packets.
  91 */
  92#define XEN_NETBACK_TX_COPY_LEN 128
  93
  94/* This is the maximum number of flows in the hash cache. */
  95#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  96unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  97module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  98MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  99
 100/* The module parameter tells that we have to put data
 101 * for xen-netfront with the XDP_PACKET_HEADROOM offset
 102 * needed for XDP processing
 103 */
 104bool provides_xdp_headroom = true;
 105module_param(provides_xdp_headroom, bool, 0644);
 106
 107static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 108			       s8 status);
 109
 110static void make_tx_response(struct xenvif_queue *queue,
 111			     const struct xen_netif_tx_request *txp,
 112			     unsigned int extra_count,
 113			     s8 status);
 114
 115static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
 116
 117static inline int tx_work_todo(struct xenvif_queue *queue);
 118
 
 
 
 
 
 
 
 119static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 120				       u16 idx)
 121{
 122	return page_to_pfn(queue->mmap_pages[idx]);
 123}
 124
 125static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
 126					 u16 idx)
 127{
 128	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
 129}
 130
 131#define callback_param(vif, pending_idx) \
 132	(vif->pending_tx_info[pending_idx].callback_struct)
 133
 134/* Find the containing VIF's structure from a pointer in pending_tx_info array
 135 */
 136static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
 137{
 138	u16 pending_idx = ubuf->desc;
 139	struct pending_tx_info *temp =
 140		container_of(ubuf, struct pending_tx_info, callback_struct);
 141	return container_of(temp - pending_idx,
 142			    struct xenvif_queue,
 143			    pending_tx_info[0]);
 144}
 145
 146static u16 frag_get_pending_idx(skb_frag_t *frag)
 147{
 148	return (u16)skb_frag_off(frag);
 149}
 150
 151static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 152{
 153	skb_frag_off_set(frag, pending_idx);
 154}
 155
 156static inline pending_ring_idx_t pending_index(unsigned i)
 157{
 158	return i & (MAX_PENDING_REQS-1);
 159}
 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161void xenvif_kick_thread(struct xenvif_queue *queue)
 162{
 163	wake_up(&queue->wq);
 164}
 165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 167{
 168	int more_to_do;
 169
 170	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
 171
 172	if (more_to_do)
 173		napi_schedule(&queue->napi);
 174	else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
 175				     &queue->eoi_pending) &
 176		 (NETBK_TX_EOI | NETBK_COMMON_EOI))
 177		xen_irq_lateeoi(queue->tx_irq, 0);
 178}
 179
 180static void tx_add_credit(struct xenvif_queue *queue)
 181{
 182	unsigned long max_burst, max_credit;
 183
 184	/*
 185	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
 186	 * Otherwise the interface can seize up due to insufficient credit.
 187	 */
 188	max_burst = max(131072UL, queue->credit_bytes);
 189
 190	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
 191	max_credit = queue->remaining_credit + queue->credit_bytes;
 192	if (max_credit < queue->remaining_credit)
 193		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 194
 195	queue->remaining_credit = min(max_credit, max_burst);
 196	queue->rate_limited = false;
 197}
 198
 199void xenvif_tx_credit_callback(struct timer_list *t)
 200{
 201	struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
 202	tx_add_credit(queue);
 203	xenvif_napi_schedule_or_enable_events(queue);
 204}
 205
 206static void xenvif_tx_err(struct xenvif_queue *queue,
 207			  struct xen_netif_tx_request *txp,
 208			  unsigned int extra_count, RING_IDX end)
 209{
 210	RING_IDX cons = queue->tx.req_cons;
 
 211
 212	do {
 
 213		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
 
 
 214		if (cons == end)
 215			break;
 216		RING_COPY_REQUEST(&queue->tx, cons++, txp);
 217		extra_count = 0; /* only the first frag can have extras */
 218	} while (1);
 219	queue->tx.req_cons = cons;
 220}
 221
 222static void xenvif_fatal_tx_err(struct xenvif *vif)
 223{
 224	netdev_err(vif->dev, "fatal error; disabling device\n");
 225	vif->disabled = true;
 226	/* Disable the vif from queue 0's kthread */
 227	if (vif->num_queues)
 228		xenvif_kick_thread(&vif->queues[0]);
 229}
 230
 231static int xenvif_count_requests(struct xenvif_queue *queue,
 232				 struct xen_netif_tx_request *first,
 233				 unsigned int extra_count,
 234				 struct xen_netif_tx_request *txp,
 235				 int work_to_do)
 236{
 237	RING_IDX cons = queue->tx.req_cons;
 238	int slots = 0;
 239	int drop_err = 0;
 240	int more_data;
 241
 242	if (!(first->flags & XEN_NETTXF_more_data))
 243		return 0;
 244
 245	do {
 246		struct xen_netif_tx_request dropped_tx = { 0 };
 247
 248		if (slots >= work_to_do) {
 249			netdev_err(queue->vif->dev,
 250				   "Asked for %d slots but exceeds this limit\n",
 251				   work_to_do);
 252			xenvif_fatal_tx_err(queue->vif);
 253			return -ENODATA;
 254		}
 255
 256		/* This guest is really using too many slots and
 257		 * considered malicious.
 258		 */
 259		if (unlikely(slots >= fatal_skb_slots)) {
 260			netdev_err(queue->vif->dev,
 261				   "Malicious frontend using %d slots, threshold %u\n",
 262				   slots, fatal_skb_slots);
 263			xenvif_fatal_tx_err(queue->vif);
 264			return -E2BIG;
 265		}
 266
 267		/* Xen network protocol had implicit dependency on
 268		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
 269		 * the historical MAX_SKB_FRAGS value 18 to honor the
 270		 * same behavior as before. Any packet using more than
 271		 * 18 slots but less than fatal_skb_slots slots is
 272		 * dropped
 273		 */
 274		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
 275			if (net_ratelimit())
 276				netdev_dbg(queue->vif->dev,
 277					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
 278					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
 279			drop_err = -E2BIG;
 280		}
 281
 282		if (drop_err)
 283			txp = &dropped_tx;
 284
 285		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
 286
 287		/* If the guest submitted a frame >= 64 KiB then
 288		 * first->size overflowed and following slots will
 289		 * appear to be larger than the frame.
 290		 *
 291		 * This cannot be fatal error as there are buggy
 292		 * frontends that do this.
 293		 *
 294		 * Consume all slots and drop the packet.
 295		 */
 296		if (!drop_err && txp->size > first->size) {
 297			if (net_ratelimit())
 298				netdev_dbg(queue->vif->dev,
 299					   "Invalid tx request, slot size %u > remaining size %u\n",
 300					   txp->size, first->size);
 301			drop_err = -EIO;
 302		}
 303
 304		first->size -= txp->size;
 305		slots++;
 306
 307		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 308			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 309				 txp->offset, txp->size);
 310			xenvif_fatal_tx_err(queue->vif);
 311			return -EINVAL;
 312		}
 313
 314		more_data = txp->flags & XEN_NETTXF_more_data;
 315
 316		if (!drop_err)
 317			txp++;
 318
 319	} while (more_data);
 320
 321	if (drop_err) {
 322		xenvif_tx_err(queue, first, extra_count, cons + slots);
 323		return drop_err;
 324	}
 325
 326	return slots;
 327}
 328
 329
 330struct xenvif_tx_cb {
 331	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
 332	u8 copy_count;
 333	u32 split_mask;
 334};
 335
 336#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
 337#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
 338#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
 339
 340static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
 341					   u16 pending_idx,
 342					   struct xen_netif_tx_request *txp,
 343					   unsigned int extra_count,
 344					   struct gnttab_map_grant_ref *mop)
 345{
 346	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
 347	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
 348			  GNTMAP_host_map | GNTMAP_readonly,
 349			  txp->gref, queue->vif->domid);
 350
 351	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
 352	       sizeof(*txp));
 353	queue->pending_tx_info[pending_idx].extra_count = extra_count;
 354}
 355
 356static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 357{
 358	struct sk_buff *skb =
 359		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 360			  GFP_ATOMIC | __GFP_NOWARN);
 361
 362	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
 363	if (unlikely(skb == NULL))
 364		return NULL;
 365
 366	/* Packets passed to netif_rx() must have some headroom. */
 367	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 368
 369	/* Initialize it here to avoid later surprises */
 370	skb_shinfo(skb)->destructor_arg = NULL;
 371
 372	return skb;
 373}
 374
 375static void xenvif_get_requests(struct xenvif_queue *queue,
 376				struct sk_buff *skb,
 377				struct xen_netif_tx_request *first,
 378				struct xen_netif_tx_request *txfrags,
 379			        unsigned *copy_ops,
 380			        unsigned *map_ops,
 381				unsigned int frag_overflow,
 382				struct sk_buff *nskb,
 383				unsigned int extra_count,
 384				unsigned int data_len)
 385{
 386	struct skb_shared_info *shinfo = skb_shinfo(skb);
 387	skb_frag_t *frags = shinfo->frags;
 388	u16 pending_idx;
 
 389	pending_ring_idx_t index;
 390	unsigned int nr_slots;
 391	struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
 392	struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
 393	struct xen_netif_tx_request *txp = first;
 394
 395	nr_slots = shinfo->nr_frags + frag_overflow + 1;
 396
 397	copy_count(skb) = 0;
 398	XENVIF_TX_CB(skb)->split_mask = 0;
 399
 400	/* Create copy ops for exactly data_len bytes into the skb head. */
 401	__skb_put(skb, data_len);
 402	while (data_len > 0) {
 403		int amount = data_len > txp->size ? txp->size : data_len;
 404		bool split = false;
 405
 406		cop->source.u.ref = txp->gref;
 407		cop->source.domid = queue->vif->domid;
 408		cop->source.offset = txp->offset;
 409
 410		cop->dest.domid = DOMID_SELF;
 411		cop->dest.offset = (offset_in_page(skb->data +
 412						   skb_headlen(skb) -
 413						   data_len)) & ~XEN_PAGE_MASK;
 414		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
 415				               - data_len);
 416
 417		/* Don't cross local page boundary! */
 418		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
 419			amount = XEN_PAGE_SIZE - cop->dest.offset;
 420			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
 421			split = true;
 422		}
 423
 424		cop->len = amount;
 425		cop->flags = GNTCOPY_source_gref;
 426
 427		index = pending_index(queue->pending_cons);
 428		pending_idx = queue->pending_ring[index];
 429		callback_param(queue, pending_idx).ctx = NULL;
 430		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
 431		if (!split)
 432			copy_count(skb)++;
 433
 434		cop++;
 435		data_len -= amount;
 436
 437		if (amount == txp->size) {
 438			/* The copy op covered the full tx_request */
 439
 440			memcpy(&queue->pending_tx_info[pending_idx].req,
 441			       txp, sizeof(*txp));
 442			queue->pending_tx_info[pending_idx].extra_count =
 443				(txp == first) ? extra_count : 0;
 444
 445			if (txp == first)
 446				txp = txfrags;
 447			else
 448				txp++;
 449			queue->pending_cons++;
 450			nr_slots--;
 451		} else {
 452			/* The copy op partially covered the tx_request.
 453			 * The remainder will be mapped or copied in the next
 454			 * iteration.
 455			 */
 456			txp->offset += amount;
 457			txp->size -= amount;
 458		}
 459	}
 460
 461	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
 462	     nr_slots--) {
 463		if (unlikely(!txp->size)) {
 464			make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
 465			++txp;
 466			continue;
 467		}
 468
 
 
 469		index = pending_index(queue->pending_cons++);
 470		pending_idx = queue->pending_ring[index];
 471		xenvif_tx_create_map_op(queue, pending_idx, txp,
 472				        txp == first ? extra_count : 0, gop);
 473		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
 474		++shinfo->nr_frags;
 475		++gop;
 476
 477		if (txp == first)
 478			txp = txfrags;
 479		else
 480			txp++;
 481	}
 482
 483	if (nr_slots > 0) {
 484
 485		shinfo = skb_shinfo(nskb);
 486		frags = shinfo->frags;
 487
 488		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
 489			if (unlikely(!txp->size)) {
 490				make_tx_response(queue, txp, 0,
 491						 XEN_NETIF_RSP_OKAY);
 492				continue;
 493			}
 494
 495			index = pending_index(queue->pending_cons++);
 496			pending_idx = queue->pending_ring[index];
 497			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
 498						gop);
 499			frag_set_pending_idx(&frags[shinfo->nr_frags],
 500					     pending_idx);
 501			++shinfo->nr_frags;
 502			++gop;
 503		}
 504
 505		if (shinfo->nr_frags) {
 506			skb_shinfo(skb)->frag_list = nskb;
 507			nskb = NULL;
 508		}
 509	}
 510
 511	if (nskb) {
 512		/* A frag_list skb was allocated but it is no longer needed
 513		 * because enough slots were converted to copy ops above or some
 514		 * were empty.
 515		 */
 516		kfree_skb(nskb);
 517	}
 518
 519	(*copy_ops) = cop - queue->tx_copy_ops;
 520	(*map_ops) = gop - queue->tx_map_ops;
 521}
 522
 523static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
 524					   u16 pending_idx,
 525					   grant_handle_t handle)
 526{
 527	if (unlikely(queue->grant_tx_handle[pending_idx] !=
 528		     NETBACK_INVALID_HANDLE)) {
 529		netdev_err(queue->vif->dev,
 530			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
 531			   pending_idx);
 532		BUG();
 533	}
 534	queue->grant_tx_handle[pending_idx] = handle;
 535}
 536
 537static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
 538					     u16 pending_idx)
 539{
 540	if (unlikely(queue->grant_tx_handle[pending_idx] ==
 541		     NETBACK_INVALID_HANDLE)) {
 542		netdev_err(queue->vif->dev,
 543			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 544			   pending_idx);
 545		BUG();
 546	}
 547	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 548}
 549
 550static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 551			       struct sk_buff *skb,
 552			       struct gnttab_map_grant_ref **gopp_map,
 553			       struct gnttab_copy **gopp_copy)
 554{
 555	struct gnttab_map_grant_ref *gop_map = *gopp_map;
 556	u16 pending_idx;
 557	/* This always points to the shinfo of the skb being checked, which
 558	 * could be either the first or the one on the frag_list
 559	 */
 560	struct skb_shared_info *shinfo = skb_shinfo(skb);
 561	/* If this is non-NULL, we are currently checking the frag_list skb, and
 562	 * this points to the shinfo of the first one
 563	 */
 564	struct skb_shared_info *first_shinfo = NULL;
 565	int nr_frags = shinfo->nr_frags;
 566	const bool sharedslot = nr_frags &&
 567				frag_get_pending_idx(&shinfo->frags[0]) ==
 568				    copy_pending_idx(skb, copy_count(skb) - 1);
 569	int i, err = 0;
 570
 571	for (i = 0; i < copy_count(skb); i++) {
 572		int newerr;
 573
 574		/* Check status of header. */
 575		pending_idx = copy_pending_idx(skb, i);
 576
 577		newerr = (*gopp_copy)->status;
 578
 579		/* Split copies need to be handled together. */
 580		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
 581			(*gopp_copy)++;
 582			if (!newerr)
 583				newerr = (*gopp_copy)->status;
 584		}
 585		if (likely(!newerr)) {
 586			/* The first frag might still have this slot mapped */
 587			if (i < copy_count(skb) - 1 || !sharedslot)
 588				xenvif_idx_release(queue, pending_idx,
 589						   XEN_NETIF_RSP_OKAY);
 590		} else {
 591			err = newerr;
 592			if (net_ratelimit())
 593				netdev_dbg(queue->vif->dev,
 594					   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
 595					   (*gopp_copy)->status,
 596					   pending_idx,
 597					   (*gopp_copy)->source.u.ref);
 598			/* The first frag might still have this slot mapped */
 599			if (i < copy_count(skb) - 1 || !sharedslot)
 600				xenvif_idx_release(queue, pending_idx,
 601						   XEN_NETIF_RSP_ERROR);
 602		}
 603		(*gopp_copy)++;
 604	}
 
 605
 606check_frags:
 607	for (i = 0; i < nr_frags; i++, gop_map++) {
 608		int j, newerr;
 609
 610		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 611
 612		/* Check error status: if okay then remember grant handle. */
 613		newerr = gop_map->status;
 614
 615		if (likely(!newerr)) {
 616			xenvif_grant_handle_set(queue,
 617						pending_idx,
 618						gop_map->handle);
 619			/* Had a previous error? Invalidate this fragment. */
 620			if (unlikely(err)) {
 621				xenvif_idx_unmap(queue, pending_idx);
 622				/* If the mapping of the first frag was OK, but
 623				 * the header's copy failed, and they are
 624				 * sharing a slot, send an error
 625				 */
 626				if (i == 0 && !first_shinfo && sharedslot)
 627					xenvif_idx_release(queue, pending_idx,
 628							   XEN_NETIF_RSP_ERROR);
 629				else
 630					xenvif_idx_release(queue, pending_idx,
 631							   XEN_NETIF_RSP_OKAY);
 632			}
 633			continue;
 634		}
 635
 636		/* Error on this fragment: respond to client with an error. */
 637		if (net_ratelimit())
 638			netdev_dbg(queue->vif->dev,
 639				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
 640				   i,
 641				   gop_map->status,
 642				   pending_idx,
 643				   gop_map->ref);
 644
 645		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 646
 647		/* Not the first error? Preceding frags already invalidated. */
 648		if (err)
 649			continue;
 650
 
 
 
 
 
 
 
 
 651		/* Invalidate preceding fragments of this skb. */
 652		for (j = 0; j < i; j++) {
 653			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
 654			xenvif_idx_unmap(queue, pending_idx);
 655			xenvif_idx_release(queue, pending_idx,
 656					   XEN_NETIF_RSP_OKAY);
 657		}
 658
 659		/* And if we found the error while checking the frag_list, unmap
 660		 * the first skb's frags
 661		 */
 662		if (first_shinfo) {
 663			for (j = 0; j < first_shinfo->nr_frags; j++) {
 664				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
 665				xenvif_idx_unmap(queue, pending_idx);
 666				xenvif_idx_release(queue, pending_idx,
 667						   XEN_NETIF_RSP_OKAY);
 668			}
 669		}
 670
 671		/* Remember the error: invalidate all subsequent fragments. */
 672		err = newerr;
 673	}
 674
 675	if (skb_has_frag_list(skb) && !first_shinfo) {
 676		first_shinfo = shinfo;
 677		shinfo = skb_shinfo(shinfo->frag_list);
 678		nr_frags = shinfo->nr_frags;
 679
 680		goto check_frags;
 681	}
 682
 683	*gopp_map = gop_map;
 684	return err;
 685}
 686
 687static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
 688{
 689	struct skb_shared_info *shinfo = skb_shinfo(skb);
 690	int nr_frags = shinfo->nr_frags;
 691	int i;
 692	u16 prev_pending_idx = INVALID_PENDING_IDX;
 693
 694	for (i = 0; i < nr_frags; i++) {
 695		skb_frag_t *frag = shinfo->frags + i;
 696		struct xen_netif_tx_request *txp;
 697		struct page *page;
 698		u16 pending_idx;
 699
 700		pending_idx = frag_get_pending_idx(frag);
 701
 702		/* If this is not the first frag, chain it to the previous*/
 703		if (prev_pending_idx == INVALID_PENDING_IDX)
 704			skb_shinfo(skb)->destructor_arg =
 705				&callback_param(queue, pending_idx);
 706		else
 707			callback_param(queue, prev_pending_idx).ctx =
 708				&callback_param(queue, pending_idx);
 709
 710		callback_param(queue, pending_idx).ctx = NULL;
 711		prev_pending_idx = pending_idx;
 712
 713		txp = &queue->pending_tx_info[pending_idx].req;
 714		page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
 715		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
 716		skb->len += txp->size;
 717		skb->data_len += txp->size;
 718		skb->truesize += txp->size;
 719
 720		/* Take an extra reference to offset network stack's put_page */
 721		get_page(queue->mmap_pages[pending_idx]);
 722	}
 723}
 724
 725static int xenvif_get_extras(struct xenvif_queue *queue,
 726			     struct xen_netif_extra_info *extras,
 727			     unsigned int *extra_count,
 728			     int work_to_do)
 729{
 730	struct xen_netif_extra_info extra;
 731	RING_IDX cons = queue->tx.req_cons;
 732
 733	do {
 734		if (unlikely(work_to_do-- <= 0)) {
 735			netdev_err(queue->vif->dev, "Missing extra info\n");
 736			xenvif_fatal_tx_err(queue->vif);
 737			return -EBADR;
 738		}
 739
 740		RING_COPY_REQUEST(&queue->tx, cons, &extra);
 741
 742		queue->tx.req_cons = ++cons;
 743		(*extra_count)++;
 744
 745		if (unlikely(!extra.type ||
 746			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 747			netdev_err(queue->vif->dev,
 748				   "Invalid extra type: %d\n", extra.type);
 749			xenvif_fatal_tx_err(queue->vif);
 750			return -EINVAL;
 751		}
 752
 753		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
 754	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 755
 756	return work_to_do;
 757}
 758
 759static int xenvif_set_skb_gso(struct xenvif *vif,
 760			      struct sk_buff *skb,
 761			      struct xen_netif_extra_info *gso)
 762{
 763	if (!gso->u.gso.size) {
 764		netdev_err(vif->dev, "GSO size must not be zero.\n");
 765		xenvif_fatal_tx_err(vif);
 766		return -EINVAL;
 767	}
 768
 769	switch (gso->u.gso.type) {
 770	case XEN_NETIF_GSO_TYPE_TCPV4:
 771		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 772		break;
 773	case XEN_NETIF_GSO_TYPE_TCPV6:
 774		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 775		break;
 776	default:
 777		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
 778		xenvif_fatal_tx_err(vif);
 779		return -EINVAL;
 780	}
 781
 782	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 783	/* gso_segs will be calculated later */
 784
 785	return 0;
 786}
 787
 788static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
 789{
 790	bool recalculate_partial_csum = false;
 791
 792	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 793	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 794	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 795	 * recalculate the partial checksum.
 796	 */
 797	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 798		queue->stats.rx_gso_checksum_fixup++;
 799		skb->ip_summed = CHECKSUM_PARTIAL;
 800		recalculate_partial_csum = true;
 801	}
 802
 803	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 804	if (skb->ip_summed != CHECKSUM_PARTIAL)
 805		return 0;
 806
 807	return skb_checksum_setup(skb, recalculate_partial_csum);
 808}
 809
 810static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
 811{
 812	u64 now = get_jiffies_64();
 813	u64 next_credit = queue->credit_window_start +
 814		msecs_to_jiffies(queue->credit_usec / 1000);
 815
 816	/* Timer could already be pending in rare cases. */
 817	if (timer_pending(&queue->credit_timeout)) {
 818		queue->rate_limited = true;
 819		return true;
 820	}
 821
 822	/* Passed the point where we can replenish credit? */
 823	if (time_after_eq64(now, next_credit)) {
 824		queue->credit_window_start = now;
 825		tx_add_credit(queue);
 826	}
 827
 828	/* Still too big to send right now? Set a callback. */
 829	if (size > queue->remaining_credit) {
 
 
 830		mod_timer(&queue->credit_timeout,
 831			  next_credit);
 832		queue->credit_window_start = next_credit;
 833		queue->rate_limited = true;
 834
 835		return true;
 836	}
 837
 838	return false;
 839}
 840
 841/* No locking is required in xenvif_mcast_add/del() as they are
 842 * only ever invoked from NAPI poll. An RCU list is used because
 843 * xenvif_mcast_match() is called asynchronously, during start_xmit.
 844 */
 845
 846static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
 847{
 848	struct xenvif_mcast_addr *mcast;
 849
 850	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
 851		if (net_ratelimit())
 852			netdev_err(vif->dev,
 853				   "Too many multicast addresses\n");
 854		return -ENOSPC;
 855	}
 856
 857	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
 858	if (!mcast)
 859		return -ENOMEM;
 860
 861	ether_addr_copy(mcast->addr, addr);
 862	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
 863	vif->fe_mcast_count++;
 864
 865	return 0;
 866}
 867
 868static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
 869{
 870	struct xenvif_mcast_addr *mcast;
 871
 872	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 873		if (ether_addr_equal(addr, mcast->addr)) {
 874			--vif->fe_mcast_count;
 875			list_del_rcu(&mcast->entry);
 876			kfree_rcu(mcast, rcu);
 877			break;
 878		}
 879	}
 880}
 881
 882bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
 883{
 884	struct xenvif_mcast_addr *mcast;
 885
 886	rcu_read_lock();
 887	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 888		if (ether_addr_equal(addr, mcast->addr)) {
 889			rcu_read_unlock();
 890			return true;
 891		}
 892	}
 893	rcu_read_unlock();
 894
 895	return false;
 896}
 897
 898void xenvif_mcast_addr_list_free(struct xenvif *vif)
 899{
 900	/* No need for locking or RCU here. NAPI poll and TX queue
 901	 * are stopped.
 902	 */
 903	while (!list_empty(&vif->fe_mcast_addr)) {
 904		struct xenvif_mcast_addr *mcast;
 905
 906		mcast = list_first_entry(&vif->fe_mcast_addr,
 907					 struct xenvif_mcast_addr,
 908					 entry);
 909		--vif->fe_mcast_count;
 910		list_del(&mcast->entry);
 911		kfree(mcast);
 912	}
 913}
 914
 915static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 916				     int budget,
 917				     unsigned *copy_ops,
 918				     unsigned *map_ops)
 919{
 
 920	struct sk_buff *skb, *nskb;
 921	int ret;
 922	unsigned int frag_overflow;
 923
 924	while (skb_queue_len(&queue->tx_queue) < budget) {
 925		struct xen_netif_tx_request txreq;
 926		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
 927		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
 928		unsigned int extra_count;
 
 929		RING_IDX idx;
 930		int work_to_do;
 931		unsigned int data_len;
 
 932
 933		if (queue->tx.sring->req_prod - queue->tx.req_cons >
 934		    XEN_NETIF_TX_RING_SIZE) {
 935			netdev_err(queue->vif->dev,
 936				   "Impossible number of requests. "
 937				   "req_prod %d, req_cons %d, size %ld\n",
 938				   queue->tx.sring->req_prod, queue->tx.req_cons,
 939				   XEN_NETIF_TX_RING_SIZE);
 940			xenvif_fatal_tx_err(queue->vif);
 941			break;
 942		}
 943
 944		work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
 945		if (!work_to_do)
 946			break;
 947
 948		idx = queue->tx.req_cons;
 949		rmb(); /* Ensure that we see the request before we copy it. */
 950		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
 951
 952		/* Credit-based scheduling. */
 953		if (txreq.size > queue->remaining_credit &&
 954		    tx_credit_exceeded(queue, txreq.size))
 955			break;
 956
 957		queue->remaining_credit -= txreq.size;
 958
 959		work_to_do--;
 960		queue->tx.req_cons = ++idx;
 961
 962		memset(extras, 0, sizeof(extras));
 963		extra_count = 0;
 964		if (txreq.flags & XEN_NETTXF_extra_info) {
 965			work_to_do = xenvif_get_extras(queue, extras,
 966						       &extra_count,
 967						       work_to_do);
 968			idx = queue->tx.req_cons;
 969			if (unlikely(work_to_do < 0))
 970				break;
 971		}
 972
 973		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
 974			struct xen_netif_extra_info *extra;
 975
 976			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
 977			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
 978
 979			make_tx_response(queue, &txreq, extra_count,
 980					 (ret == 0) ?
 981					 XEN_NETIF_RSP_OKAY :
 982					 XEN_NETIF_RSP_ERROR);
 
 983			continue;
 984		}
 985
 986		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
 987			struct xen_netif_extra_info *extra;
 988
 989			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
 990			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
 991
 992			make_tx_response(queue, &txreq, extra_count,
 993					 XEN_NETIF_RSP_OKAY);
 
 994			continue;
 995		}
 996
 997		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
 998			XEN_NETBACK_TX_COPY_LEN : txreq.size;
 999
1000		ret = xenvif_count_requests(queue, &txreq, extra_count,
1001					    txfrags, work_to_do);
1002
1003		if (unlikely(ret < 0))
1004			break;
1005
1006		idx += ret;
1007
1008		if (unlikely(txreq.size < ETH_HLEN)) {
1009			netdev_dbg(queue->vif->dev,
1010				   "Bad packet size: %d\n", txreq.size);
1011			xenvif_tx_err(queue, &txreq, extra_count, idx);
1012			break;
1013		}
1014
1015		/* No crossing a page as the payload mustn't fragment. */
1016		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1017			netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1018				   txreq.offset, txreq.size);
 
 
1019			xenvif_fatal_tx_err(queue->vif);
1020			break;
1021		}
1022
1023		if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1024			data_len = txreq.size;
 
 
 
 
1025
1026		skb = xenvif_alloc_skb(data_len);
1027		if (unlikely(skb == NULL)) {
1028			netdev_dbg(queue->vif->dev,
1029				   "Can't allocate a skb in start_xmit.\n");
1030			xenvif_tx_err(queue, &txreq, extra_count, idx);
1031			break;
1032		}
1033
1034		skb_shinfo(skb)->nr_frags = ret;
 
 
1035		/* At this point shinfo->nr_frags is in fact the number of
1036		 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1037		 */
1038		frag_overflow = 0;
1039		nskb = NULL;
1040		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1041			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1042			BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1043			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1044			nskb = xenvif_alloc_skb(0);
1045			if (unlikely(nskb == NULL)) {
1046				skb_shinfo(skb)->nr_frags = 0;
1047				kfree_skb(skb);
1048				xenvif_tx_err(queue, &txreq, extra_count, idx);
1049				if (net_ratelimit())
1050					netdev_err(queue->vif->dev,
1051						   "Can't allocate the frag_list skb.\n");
1052				break;
1053			}
1054		}
1055
1056		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1057			struct xen_netif_extra_info *gso;
1058			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1059
1060			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1061				/* Failure in xenvif_set_skb_gso is fatal. */
1062				skb_shinfo(skb)->nr_frags = 0;
1063				kfree_skb(skb);
1064				kfree_skb(nskb);
1065				break;
1066			}
1067		}
1068
1069		if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1070			struct xen_netif_extra_info *extra;
1071			enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1072
1073			extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1074
1075			switch (extra->u.hash.type) {
1076			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1077			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1078				type = PKT_HASH_TYPE_L3;
1079				break;
 
 
 
 
 
1080
1081			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1082			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1083				type = PKT_HASH_TYPE_L4;
1084				break;
1085
1086			default:
1087				break;
1088			}
1089
1090			if (type != PKT_HASH_TYPE_NONE)
1091				skb_set_hash(skb,
1092					     *(u32 *)extra->u.hash.value,
1093					     type);
 
 
 
 
 
 
 
 
 
1094		}
1095
1096		xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1097				    map_ops, frag_overflow, nskb, extra_count,
1098				    data_len);
 
1099
1100		__skb_queue_tail(&queue->tx_queue, skb);
1101
1102		queue->tx.req_cons = idx;
 
 
 
 
1103	}
1104
 
1105	return;
1106}
1107
1108/* Consolidate skb with a frag_list into a brand new one with local pages on
1109 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1110 */
1111static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1112{
1113	unsigned int offset = skb_headlen(skb);
1114	skb_frag_t frags[MAX_SKB_FRAGS];
1115	int i, f;
1116	struct ubuf_info *uarg;
1117	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1118
1119	queue->stats.tx_zerocopy_sent += 2;
1120	queue->stats.tx_frag_overflow++;
1121
1122	xenvif_fill_frags(queue, nskb);
1123	/* Subtract frags size, we will correct it later */
1124	skb->truesize -= skb->data_len;
1125	skb->len += nskb->len;
1126	skb->data_len += nskb->len;
1127
1128	/* create a brand new frags array and coalesce there */
1129	for (i = 0; offset < skb->len; i++) {
1130		struct page *page;
1131		unsigned int len;
1132
1133		BUG_ON(i >= MAX_SKB_FRAGS);
1134		page = alloc_page(GFP_ATOMIC);
1135		if (!page) {
1136			int j;
1137			skb->truesize += skb->data_len;
1138			for (j = 0; j < i; j++)
1139				put_page(skb_frag_page(&frags[j]));
1140			return -ENOMEM;
1141		}
1142
1143		if (offset + PAGE_SIZE < skb->len)
1144			len = PAGE_SIZE;
1145		else
1146			len = skb->len - offset;
1147		if (skb_copy_bits(skb, offset, page_address(page), len))
1148			BUG();
1149
1150		offset += len;
1151		skb_frag_fill_page_desc(&frags[i], page, 0, len);
 
 
1152	}
1153
 
 
 
 
 
1154	/* Release all the original (foreign) frags. */
1155	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1156		skb_frag_unref(skb, f);
1157	uarg = skb_shinfo(skb)->destructor_arg;
1158	/* increase inflight counter to offset decrement in callback */
1159	atomic_inc(&queue->inflight_packets);
1160	uarg->ops->complete(NULL, uarg, true);
1161	skb_shinfo(skb)->destructor_arg = NULL;
1162
1163	/* Fill the skb with the new (local) frags. */
1164	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1165	skb_shinfo(skb)->nr_frags = i;
1166	skb->truesize += i * PAGE_SIZE;
1167
1168	return 0;
1169}
1170
1171static int xenvif_tx_submit(struct xenvif_queue *queue)
1172{
1173	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1174	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1175	struct sk_buff *skb;
1176	int work_done = 0;
1177
1178	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1179		struct xen_netif_tx_request *txp;
1180		u16 pending_idx;
 
1181
1182		pending_idx = copy_pending_idx(skb, 0);
1183		txp = &queue->pending_tx_info[pending_idx].req;
1184
1185		/* Check the remap error code. */
1186		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1187			/* If there was an error, xenvif_tx_check_gop is
1188			 * expected to release all the frags which were mapped,
1189			 * so kfree_skb shouldn't do it again
1190			 */
1191			skb_shinfo(skb)->nr_frags = 0;
1192			if (skb_has_frag_list(skb)) {
1193				struct sk_buff *nskb =
1194						skb_shinfo(skb)->frag_list;
1195				skb_shinfo(nskb)->nr_frags = 0;
1196			}
1197			kfree_skb(skb);
1198			continue;
1199		}
1200
 
 
 
 
 
 
 
 
 
 
 
 
1201		if (txp->flags & XEN_NETTXF_csum_blank)
1202			skb->ip_summed = CHECKSUM_PARTIAL;
1203		else if (txp->flags & XEN_NETTXF_data_validated)
1204			skb->ip_summed = CHECKSUM_UNNECESSARY;
1205
1206		xenvif_fill_frags(queue, skb);
1207
1208		if (unlikely(skb_has_frag_list(skb))) {
1209			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1210			xenvif_skb_zerocopy_prepare(queue, nskb);
1211			if (xenvif_handle_frag_list(queue, skb)) {
1212				if (net_ratelimit())
1213					netdev_err(queue->vif->dev,
1214						   "Not enough memory to consolidate frag_list!\n");
1215				xenvif_skb_zerocopy_prepare(queue, skb);
1216				kfree_skb(skb);
1217				continue;
1218			}
1219			/* Copied all the bits from the frag list -- free it. */
1220			skb_frag_list_init(skb);
1221			kfree_skb(nskb);
1222		}
1223
1224		skb->dev      = queue->vif->dev;
1225		skb->protocol = eth_type_trans(skb, skb->dev);
1226		skb_reset_network_header(skb);
1227
1228		if (checksum_setup(queue, skb)) {
1229			netdev_dbg(queue->vif->dev,
1230				   "Can't setup checksum in net_tx_action\n");
1231			/* We have to set this flag to trigger the callback */
1232			if (skb_shinfo(skb)->destructor_arg)
1233				xenvif_skb_zerocopy_prepare(queue, skb);
1234			kfree_skb(skb);
1235			continue;
1236		}
1237
1238		skb_probe_transport_header(skb);
1239
1240		/* If the packet is GSO then we will have just set up the
1241		 * transport header offset in checksum_setup so it's now
1242		 * straightforward to calculate gso_segs.
1243		 */
1244		if (skb_is_gso(skb)) {
1245			int mss, hdrlen;
1246
1247			/* GSO implies having the L4 header. */
1248			WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1249			if (unlikely(!skb_transport_header_was_set(skb))) {
1250				kfree_skb(skb);
1251				continue;
1252			}
1253
1254			mss = skb_shinfo(skb)->gso_size;
1255			hdrlen = skb_tcp_all_headers(skb);
1256
1257			skb_shinfo(skb)->gso_segs =
1258				DIV_ROUND_UP(skb->len - hdrlen, mss);
1259		}
1260
1261		queue->stats.rx_bytes += skb->len;
1262		queue->stats.rx_packets++;
1263
1264		work_done++;
1265
1266		/* Set this flag right before netif_receive_skb, otherwise
1267		 * someone might think this packet already left netback, and
1268		 * do a skb_copy_ubufs while we are still in control of the
1269		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1270		 */
1271		if (skb_shinfo(skb)->destructor_arg) {
1272			xenvif_skb_zerocopy_prepare(queue, skb);
1273			queue->stats.tx_zerocopy_sent++;
1274		}
1275
1276		netif_receive_skb(skb);
1277	}
1278
1279	return work_done;
1280}
1281
1282static void xenvif_zerocopy_callback(struct sk_buff *skb,
1283				     struct ubuf_info *ubuf_base,
1284				     bool zerocopy_success)
1285{
1286	unsigned long flags;
1287	pending_ring_idx_t index;
1288	struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1289	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1290
1291	/* This is the only place where we grab this lock, to protect callbacks
1292	 * from each other.
1293	 */
1294	spin_lock_irqsave(&queue->callback_lock, flags);
1295	do {
1296		u16 pending_idx = ubuf->desc;
1297		ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1298		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1299			MAX_PENDING_REQS);
1300		index = pending_index(queue->dealloc_prod);
1301		queue->dealloc_ring[index] = pending_idx;
1302		/* Sync with xenvif_tx_dealloc_action:
1303		 * insert idx then incr producer.
1304		 */
1305		smp_wmb();
1306		queue->dealloc_prod++;
1307	} while (ubuf);
1308	spin_unlock_irqrestore(&queue->callback_lock, flags);
1309
1310	if (likely(zerocopy_success))
1311		queue->stats.tx_zerocopy_success++;
1312	else
1313		queue->stats.tx_zerocopy_fail++;
1314	xenvif_skb_zerocopy_complete(queue);
1315}
1316
1317const struct ubuf_info_ops xenvif_ubuf_ops = {
1318	.complete = xenvif_zerocopy_callback,
1319};
1320
1321static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1322{
1323	struct gnttab_unmap_grant_ref *gop;
1324	pending_ring_idx_t dc, dp;
1325	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1326	unsigned int i = 0;
1327
1328	dc = queue->dealloc_cons;
1329	gop = queue->tx_unmap_ops;
1330
1331	/* Free up any grants we have finished using */
1332	do {
1333		dp = queue->dealloc_prod;
1334
1335		/* Ensure we see all indices enqueued by all
1336		 * xenvif_zerocopy_callback().
1337		 */
1338		smp_rmb();
1339
1340		while (dc != dp) {
1341			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1342			pending_idx =
1343				queue->dealloc_ring[pending_index(dc++)];
1344
1345			pending_idx_release[gop - queue->tx_unmap_ops] =
1346				pending_idx;
1347			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1348				queue->mmap_pages[pending_idx];
1349			gnttab_set_unmap_op(gop,
1350					    idx_to_kaddr(queue, pending_idx),
1351					    GNTMAP_host_map,
1352					    queue->grant_tx_handle[pending_idx]);
1353			xenvif_grant_handle_reset(queue, pending_idx);
1354			++gop;
1355		}
1356
1357	} while (dp != queue->dealloc_prod);
1358
1359	queue->dealloc_cons = dc;
1360
1361	if (gop - queue->tx_unmap_ops > 0) {
1362		int ret;
1363		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1364					NULL,
1365					queue->pages_to_unmap,
1366					gop - queue->tx_unmap_ops);
1367		if (ret) {
1368			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1369				   gop - queue->tx_unmap_ops, ret);
1370			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1371				if (gop[i].status != GNTST_okay)
1372					netdev_err(queue->vif->dev,
1373						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1374						   gop[i].host_addr,
1375						   gop[i].handle,
1376						   gop[i].status);
1377			}
1378			BUG();
1379		}
1380	}
1381
1382	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1383		xenvif_idx_release(queue, pending_idx_release[i],
1384				   XEN_NETIF_RSP_OKAY);
1385}
1386
1387
1388/* Called after netfront has transmitted */
1389int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1390{
1391	unsigned nr_mops = 0, nr_cops = 0;
1392	int work_done, ret;
1393
1394	if (unlikely(!tx_work_todo(queue)))
1395		return 0;
1396
1397	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1398
1399	if (nr_cops == 0)
1400		return 0;
1401
1402	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1403	if (nr_mops != 0) {
1404		ret = gnttab_map_refs(queue->tx_map_ops,
1405				      NULL,
1406				      queue->pages_to_map,
1407				      nr_mops);
1408		if (ret) {
1409			unsigned int i;
1410
1411			netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1412				   nr_mops, ret);
1413			for (i = 0; i < nr_mops; ++i)
1414				WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1415				             GNTST_okay);
1416		}
1417	}
1418
1419	work_done = xenvif_tx_submit(queue);
1420
1421	return work_done;
1422}
1423
1424static void _make_tx_response(struct xenvif_queue *queue,
1425			     const struct xen_netif_tx_request *txp,
1426			     unsigned int extra_count,
1427			     s8 status)
1428{
1429	RING_IDX i = queue->tx.rsp_prod_pvt;
1430	struct xen_netif_tx_response *resp;
1431
1432	resp = RING_GET_RESPONSE(&queue->tx, i);
1433	resp->id     = txp->id;
1434	resp->status = status;
1435
1436	while (extra_count-- != 0)
1437		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1438
1439	queue->tx.rsp_prod_pvt = ++i;
1440}
1441
1442static void push_tx_responses(struct xenvif_queue *queue)
1443{
1444	int notify;
1445
1446	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1447	if (notify)
1448		notify_remote_via_irq(queue->tx_irq);
1449}
1450
1451static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1452			       s8 status)
1453{
1454	struct pending_tx_info *pending_tx_info;
1455	pending_ring_idx_t index;
1456	unsigned long flags;
1457
1458	pending_tx_info = &queue->pending_tx_info[pending_idx];
1459
1460	spin_lock_irqsave(&queue->response_lock, flags);
1461
1462	_make_tx_response(queue, &pending_tx_info->req,
1463			  pending_tx_info->extra_count, status);
1464
1465	/* Release the pending index before pusing the Tx response so
1466	 * its available before a new Tx request is pushed by the
1467	 * frontend.
1468	 */
1469	index = pending_index(queue->pending_prod++);
1470	queue->pending_ring[index] = pending_idx;
1471
1472	push_tx_responses(queue);
1473
1474	spin_unlock_irqrestore(&queue->response_lock, flags);
1475}
1476
 
1477static void make_tx_response(struct xenvif_queue *queue,
1478			     const struct xen_netif_tx_request *txp,
1479			     unsigned int extra_count,
1480			     s8 status)
1481{
1482	unsigned long flags;
 
1483
1484	spin_lock_irqsave(&queue->response_lock, flags);
 
 
1485
1486	_make_tx_response(queue, txp, extra_count, status);
1487	push_tx_responses(queue);
1488
1489	spin_unlock_irqrestore(&queue->response_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1490}
1491
1492static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1493{
1494	int ret;
1495	struct gnttab_unmap_grant_ref tx_unmap_op;
1496
1497	gnttab_set_unmap_op(&tx_unmap_op,
1498			    idx_to_kaddr(queue, pending_idx),
1499			    GNTMAP_host_map,
1500			    queue->grant_tx_handle[pending_idx]);
1501	xenvif_grant_handle_reset(queue, pending_idx);
1502
1503	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1504				&queue->mmap_pages[pending_idx], 1);
1505	if (ret) {
1506		netdev_err(queue->vif->dev,
1507			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1508			   ret,
1509			   pending_idx,
1510			   tx_unmap_op.host_addr,
1511			   tx_unmap_op.handle,
1512			   tx_unmap_op.status);
1513		BUG();
1514	}
1515}
1516
1517static inline int tx_work_todo(struct xenvif_queue *queue)
1518{
1519	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1520		return 1;
1521
1522	return 0;
1523}
1524
1525static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1526{
1527	return queue->dealloc_cons != queue->dealloc_prod;
1528}
1529
1530void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1531{
1532	if (queue->tx.sring)
1533		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1534					queue->tx.sring);
1535	if (queue->rx.sring)
1536		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1537					queue->rx.sring);
1538}
1539
1540int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1541				   grant_ref_t tx_ring_ref,
1542				   grant_ref_t rx_ring_ref)
1543{
1544	void *addr;
1545	struct xen_netif_tx_sring *txs;
1546	struct xen_netif_rx_sring *rxs;
1547	RING_IDX rsp_prod, req_prod;
1548	int err;
1549
1550	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1551				     &tx_ring_ref, 1, &addr);
1552	if (err)
1553		goto err;
1554
1555	txs = (struct xen_netif_tx_sring *)addr;
1556	rsp_prod = READ_ONCE(txs->rsp_prod);
1557	req_prod = READ_ONCE(txs->req_prod);
1558
1559	BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1560
1561	err = -EIO;
1562	if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1563		goto err;
1564
1565	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1566				     &rx_ring_ref, 1, &addr);
1567	if (err)
1568		goto err;
1569
1570	rxs = (struct xen_netif_rx_sring *)addr;
1571	rsp_prod = READ_ONCE(rxs->rsp_prod);
1572	req_prod = READ_ONCE(rxs->req_prod);
1573
1574	BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1575
1576	err = -EIO;
1577	if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1578		goto err;
1579
1580	return 0;
1581
1582err:
1583	xenvif_unmap_frontend_data_rings(queue);
1584	return err;
1585}
1586
1587static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1588{
1589	/* Dealloc thread must remain running until all inflight
1590	 * packets complete.
1591	 */
1592	return kthread_should_stop() &&
1593		!atomic_read(&queue->inflight_packets);
 
 
 
 
 
 
1594}
1595
1596int xenvif_dealloc_kthread(void *data)
1597{
1598	struct xenvif_queue *queue = data;
1599
1600	for (;;) {
1601		wait_event_interruptible(queue->dealloc_wq,
1602					 tx_dealloc_work_todo(queue) ||
1603					 xenvif_dealloc_kthread_should_stop(queue));
1604		if (xenvif_dealloc_kthread_should_stop(queue))
1605			break;
1606
1607		xenvif_tx_dealloc_action(queue);
1608		cond_resched();
 
 
 
1609	}
1610
1611	/* Unmap anything remaining*/
1612	if (tx_dealloc_work_todo(queue))
1613		xenvif_tx_dealloc_action(queue);
1614
1615	return 0;
1616}
1617
1618static void make_ctrl_response(struct xenvif *vif,
1619			       const struct xen_netif_ctrl_request *req,
1620			       u32 status, u32 data)
1621{
1622	RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1623	struct xen_netif_ctrl_response rsp = {
1624		.id = req->id,
1625		.type = req->type,
1626		.status = status,
1627		.data = data,
1628	};
1629
1630	*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1631	vif->ctrl.rsp_prod_pvt = ++idx;
 
 
 
 
1632}
1633
1634static void push_ctrl_response(struct xenvif *vif)
1635{
1636	int notify;
1637
1638	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1639	if (notify)
1640		notify_remote_via_irq(vif->ctrl_irq);
 
1641}
1642
1643static void process_ctrl_request(struct xenvif *vif,
1644				 const struct xen_netif_ctrl_request *req)
1645{
1646	u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1647	u32 data = 0;
1648
1649	switch (req->type) {
1650	case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1651		status = xenvif_set_hash_alg(vif, req->data[0]);
1652		break;
1653
1654	case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1655		status = xenvif_get_hash_flags(vif, &data);
1656		break;
 
1657
1658	case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1659		status = xenvif_set_hash_flags(vif, req->data[0]);
1660		break;
1661
1662	case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1663		status = xenvif_set_hash_key(vif, req->data[0],
1664					     req->data[1]);
1665		break;
1666
1667	case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1668		status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1669		data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1670		break;
 
 
 
 
 
 
 
 
 
1671
1672	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1673		status = xenvif_set_hash_mapping_size(vif,
1674						      req->data[0]);
1675		break;
1676
1677	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1678		status = xenvif_set_hash_mapping(vif, req->data[0],
1679						 req->data[1],
1680						 req->data[2]);
1681		break;
1682
1683	default:
1684		break;
 
 
 
 
1685	}
1686
1687	make_ctrl_response(vif, req, status, data);
1688	push_ctrl_response(vif);
1689}
1690
1691static void xenvif_ctrl_action(struct xenvif *vif)
1692{
1693	for (;;) {
1694		RING_IDX req_prod, req_cons;
1695
1696		req_prod = vif->ctrl.sring->req_prod;
1697		req_cons = vif->ctrl.req_cons;
1698
1699		/* Make sure we can see requests before we process them. */
1700		rmb();
1701
1702		if (req_cons == req_prod)
1703			break;
1704
1705		while (req_cons != req_prod) {
1706			struct xen_netif_ctrl_request req;
 
 
 
 
 
 
 
 
 
1707
1708			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1709			req_cons++;
1710
1711			process_ctrl_request(vif, &req);
 
 
 
 
 
 
 
 
1712		}
1713
1714		vif->ctrl.req_cons = req_cons;
1715		vif->ctrl.sring->req_event = req_cons + 1;
 
 
 
 
 
 
 
 
1716	}
 
 
 
 
 
1717}
1718
1719static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1720{
1721	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1722		return true;
1723
1724	return false;
 
1725}
1726
1727irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1728{
1729	struct xenvif *vif = data;
1730	unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
 
 
 
 
 
 
1731
1732	while (xenvif_ctrl_work_todo(vif)) {
1733		xenvif_ctrl_action(vif);
1734		eoi_flag = 0;
1735	}
1736
1737	xen_irq_lateeoi(irq, eoi_flag);
 
 
1738
1739	return IRQ_HANDLED;
1740}
1741
1742static int __init netback_init(void)
1743{
1744	int rc = 0;
1745
1746	if (!xen_domain())
1747		return -ENODEV;
1748
1749	/* Allow as many queues as there are CPUs but max. 8 if user has not
1750	 * specified a value.
1751	 */
1752	if (xenvif_max_queues == 0)
1753		xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1754					  num_online_cpus());
1755
1756	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1757		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1758			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1759		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1760	}
1761
1762	rc = xenvif_xenbus_init();
1763	if (rc)
1764		goto failed_init;
1765
1766#ifdef CONFIG_DEBUG_FS
1767	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
 
 
 
1768#endif /* CONFIG_DEBUG_FS */
1769
1770	return 0;
1771
1772failed_init:
1773	return rc;
1774}
1775
1776module_init(netback_init);
1777
1778static void __exit netback_fini(void)
1779{
1780#ifdef CONFIG_DEBUG_FS
1781	debugfs_remove_recursive(xen_netback_dbg_root);
 
1782#endif /* CONFIG_DEBUG_FS */
1783	xenvif_xenbus_fini();
1784}
1785module_exit(netback_fini);
1786
1787MODULE_DESCRIPTION("Xen backend network device module");
1788MODULE_LICENSE("Dual BSD/GPL");
1789MODULE_ALIAS("xen-backend:vif");
v4.6
   1/*
   2 * Back-end of the driver for virtual network devices. This portion of the
   3 * driver exports a 'unified' network-device interface that can be accessed
   4 * by any operating system that implements a compatible front end. A
   5 * reference front-end implementation can be found in:
   6 *  drivers/net/xen-netfront.c
   7 *
   8 * Copyright (c) 2002-2005, K A Fraser
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License version 2
  12 * as published by the Free Software Foundation; or, when distributed
  13 * separately from the Linux kernel or incorporated into other
  14 * software packages, subject to the following license:
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a copy
  17 * of this source file (the "Software"), to deal in the Software without
  18 * restriction, including without limitation the rights to use, copy, modify,
  19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20 * and to permit persons to whom the Software is furnished to do so, subject to
  21 * the following conditions:
  22 *
  23 * The above copyright notice and this permission notice shall be included in
  24 * all copies or substantial portions of the Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32 * IN THE SOFTWARE.
  33 */
  34
  35#include "common.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/if_vlan.h>
  39#include <linux/udp.h>
  40#include <linux/highmem.h>
 
  41
  42#include <net/tcp.h>
  43
  44#include <xen/xen.h>
  45#include <xen/events.h>
  46#include <xen/interface/memory.h>
  47#include <xen/page.h>
  48
  49#include <asm/xen/hypercall.h>
  50
  51/* Provide an option to disable split event channels at load time as
  52 * event channels are limited resource. Split event channels are
  53 * enabled by default.
  54 */
  55bool separate_tx_rx_irq = true;
  56module_param(separate_tx_rx_irq, bool, 0644);
  57
  58/* The time that packets can stay on the guest Rx internal queue
  59 * before they are dropped.
  60 */
  61unsigned int rx_drain_timeout_msecs = 10000;
  62module_param(rx_drain_timeout_msecs, uint, 0444);
  63
  64/* The length of time before the frontend is considered unresponsive
  65 * because it isn't providing Rx slots.
  66 */
  67unsigned int rx_stall_timeout_msecs = 60000;
  68module_param(rx_stall_timeout_msecs, uint, 0444);
  69
 
  70unsigned int xenvif_max_queues;
  71module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  72MODULE_PARM_DESC(max_queues,
  73		 "Maximum number of queues per virtual interface");
  74
  75/*
  76 * This is the maximum slots a skb can have. If a guest sends a skb
  77 * which exceeds this limit it is considered malicious.
  78 */
  79#define FATAL_SKB_SLOTS_DEFAULT 20
  80static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  81module_param(fatal_skb_slots, uint, 0444);
  82
  83/* The amount to copy out of the first guest Tx slot into the skb's
  84 * linear area.  If the first slot has more data, it will be mapped
  85 * and put into the first frag.
  86 *
  87 * This is sized to avoid pulling headers from the frags for most
  88 * TCP/IP packets.
  89 */
  90#define XEN_NETBACK_TX_COPY_LEN 128
  91
 
 
 
 
 
 
 
 
 
 
 
 
  92
  93static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  94			       u8 status);
  95
  96static void make_tx_response(struct xenvif_queue *queue,
  97			     struct xen_netif_tx_request *txp,
  98			     unsigned int extra_count,
  99			     s8       st);
 100static void push_tx_responses(struct xenvif_queue *queue);
 
 101
 102static inline int tx_work_todo(struct xenvif_queue *queue);
 103
 104static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
 105					     u16      id,
 106					     s8       st,
 107					     u16      offset,
 108					     u16      size,
 109					     u16      flags);
 110
 111static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 112				       u16 idx)
 113{
 114	return page_to_pfn(queue->mmap_pages[idx]);
 115}
 116
 117static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
 118					 u16 idx)
 119{
 120	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
 121}
 122
 123#define callback_param(vif, pending_idx) \
 124	(vif->pending_tx_info[pending_idx].callback_struct)
 125
 126/* Find the containing VIF's structure from a pointer in pending_tx_info array
 127 */
 128static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
 129{
 130	u16 pending_idx = ubuf->desc;
 131	struct pending_tx_info *temp =
 132		container_of(ubuf, struct pending_tx_info, callback_struct);
 133	return container_of(temp - pending_idx,
 134			    struct xenvif_queue,
 135			    pending_tx_info[0]);
 136}
 137
 138static u16 frag_get_pending_idx(skb_frag_t *frag)
 139{
 140	return (u16)frag->page_offset;
 141}
 142
 143static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 144{
 145	frag->page_offset = pending_idx;
 146}
 147
 148static inline pending_ring_idx_t pending_index(unsigned i)
 149{
 150	return i & (MAX_PENDING_REQS-1);
 151}
 152
 153static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
 154{
 155	RING_IDX prod, cons;
 156	struct sk_buff *skb;
 157	int needed;
 158
 159	skb = skb_peek(&queue->rx_queue);
 160	if (!skb)
 161		return false;
 162
 163	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
 164	if (skb_is_gso(skb))
 165		needed++;
 166
 167	do {
 168		prod = queue->rx.sring->req_prod;
 169		cons = queue->rx.req_cons;
 170
 171		if (prod - cons >= needed)
 172			return true;
 173
 174		queue->rx.sring->req_event = prod + 1;
 175
 176		/* Make sure event is visible before we check prod
 177		 * again.
 178		 */
 179		mb();
 180	} while (queue->rx.sring->req_prod != prod);
 181
 182	return false;
 183}
 184
 185void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
 186{
 187	unsigned long flags;
 188
 189	spin_lock_irqsave(&queue->rx_queue.lock, flags);
 190
 191	__skb_queue_tail(&queue->rx_queue, skb);
 192
 193	queue->rx_queue_len += skb->len;
 194	if (queue->rx_queue_len > queue->rx_queue_max)
 195		netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
 196
 197	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
 198}
 199
 200static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
 201{
 202	struct sk_buff *skb;
 203
 204	spin_lock_irq(&queue->rx_queue.lock);
 205
 206	skb = __skb_dequeue(&queue->rx_queue);
 207	if (skb)
 208		queue->rx_queue_len -= skb->len;
 209
 210	spin_unlock_irq(&queue->rx_queue.lock);
 211
 212	return skb;
 213}
 214
 215static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
 216{
 217	spin_lock_irq(&queue->rx_queue.lock);
 218
 219	if (queue->rx_queue_len < queue->rx_queue_max)
 220		netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
 221
 222	spin_unlock_irq(&queue->rx_queue.lock);
 223}
 224
 225
 226static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
 227{
 228	struct sk_buff *skb;
 229	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
 230		kfree_skb(skb);
 231}
 232
 233static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
 234{
 235	struct sk_buff *skb;
 236
 237	for(;;) {
 238		skb = skb_peek(&queue->rx_queue);
 239		if (!skb)
 240			break;
 241		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
 242			break;
 243		xenvif_rx_dequeue(queue);
 244		kfree_skb(skb);
 245	}
 246}
 247
 248struct netrx_pending_operations {
 249	unsigned copy_prod, copy_cons;
 250	unsigned meta_prod, meta_cons;
 251	struct gnttab_copy *copy;
 252	struct xenvif_rx_meta *meta;
 253	int copy_off;
 254	grant_ref_t copy_gref;
 255};
 256
 257static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
 258						 struct netrx_pending_operations *npo)
 259{
 260	struct xenvif_rx_meta *meta;
 261	struct xen_netif_rx_request req;
 262
 263	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 264
 265	meta = npo->meta + npo->meta_prod++;
 266	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 267	meta->gso_size = 0;
 268	meta->size = 0;
 269	meta->id = req.id;
 270
 271	npo->copy_off = 0;
 272	npo->copy_gref = req.gref;
 273
 274	return meta;
 275}
 276
 277struct gop_frag_copy {
 278	struct xenvif_queue *queue;
 279	struct netrx_pending_operations *npo;
 280	struct xenvif_rx_meta *meta;
 281	int head;
 282	int gso_type;
 283
 284	struct page *page;
 285};
 286
 287static void xenvif_setup_copy_gop(unsigned long gfn,
 288				  unsigned int offset,
 289				  unsigned int *len,
 290				  struct gop_frag_copy *info)
 291{
 292	struct gnttab_copy *copy_gop;
 293	struct xen_page_foreign *foreign;
 294	/* Convenient aliases */
 295	struct xenvif_queue *queue = info->queue;
 296	struct netrx_pending_operations *npo = info->npo;
 297	struct page *page = info->page;
 298
 299	BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
 300
 301	if (npo->copy_off == MAX_BUFFER_OFFSET)
 302		info->meta = get_next_rx_buffer(queue, npo);
 303
 304	if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
 305		*len = MAX_BUFFER_OFFSET - npo->copy_off;
 306
 307	copy_gop = npo->copy + npo->copy_prod++;
 308	copy_gop->flags = GNTCOPY_dest_gref;
 309	copy_gop->len = *len;
 310
 311	foreign = xen_page_foreign(page);
 312	if (foreign) {
 313		copy_gop->source.domid = foreign->domid;
 314		copy_gop->source.u.ref = foreign->gref;
 315		copy_gop->flags |= GNTCOPY_source_gref;
 316	} else {
 317		copy_gop->source.domid = DOMID_SELF;
 318		copy_gop->source.u.gmfn = gfn;
 319	}
 320	copy_gop->source.offset = offset;
 321
 322	copy_gop->dest.domid = queue->vif->domid;
 323	copy_gop->dest.offset = npo->copy_off;
 324	copy_gop->dest.u.ref = npo->copy_gref;
 325
 326	npo->copy_off += *len;
 327	info->meta->size += *len;
 328
 329	/* Leave a gap for the GSO descriptor. */
 330	if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
 331		queue->rx.req_cons++;
 332
 333	info->head = 0; /* There must be something in this buffer now */
 334}
 335
 336static void xenvif_gop_frag_copy_grant(unsigned long gfn,
 337				       unsigned offset,
 338				       unsigned int len,
 339				       void *data)
 340{
 341	unsigned int bytes;
 342
 343	while (len) {
 344		bytes = len;
 345		xenvif_setup_copy_gop(gfn, offset, &bytes, data);
 346		offset += bytes;
 347		len -= bytes;
 348	}
 349}
 350
 351/*
 352 * Set up the grant operations for this fragment. If it's a flipping
 353 * interface, we also set up the unmap request from here.
 354 */
 355static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
 356				 struct netrx_pending_operations *npo,
 357				 struct page *page, unsigned long size,
 358				 unsigned long offset, int *head)
 359{
 360	struct gop_frag_copy info = {
 361		.queue = queue,
 362		.npo = npo,
 363		.head = *head,
 364		.gso_type = XEN_NETIF_GSO_TYPE_NONE,
 365	};
 366	unsigned long bytes;
 367
 368	if (skb_is_gso(skb)) {
 369		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 370			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
 371		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 372			info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
 373	}
 374
 375	/* Data must not cross a page boundary. */
 376	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
 377
 378	info.meta = npo->meta + npo->meta_prod - 1;
 379
 380	/* Skip unused frames from start of page */
 381	page += offset >> PAGE_SHIFT;
 382	offset &= ~PAGE_MASK;
 383
 384	while (size > 0) {
 385		BUG_ON(offset >= PAGE_SIZE);
 386
 387		bytes = PAGE_SIZE - offset;
 388		if (bytes > size)
 389			bytes = size;
 390
 391		info.page = page;
 392		gnttab_foreach_grant_in_range(page, offset, bytes,
 393					      xenvif_gop_frag_copy_grant,
 394					      &info);
 395		size -= bytes;
 396		offset = 0;
 397
 398		/* Next page */
 399		if (size) {
 400			BUG_ON(!PageCompound(page));
 401			page++;
 402		}
 403	}
 404
 405	*head = info.head;
 406}
 407
 408/*
 409 * Prepare an SKB to be transmitted to the frontend.
 410 *
 411 * This function is responsible for allocating grant operations, meta
 412 * structures, etc.
 413 *
 414 * It returns the number of meta structures consumed. The number of
 415 * ring slots used is always equal to the number of meta slots used
 416 * plus the number of GSO descriptors used. Currently, we use either
 417 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
 418 * frontend-side LRO).
 419 */
 420static int xenvif_gop_skb(struct sk_buff *skb,
 421			  struct netrx_pending_operations *npo,
 422			  struct xenvif_queue *queue)
 423{
 424	struct xenvif *vif = netdev_priv(skb->dev);
 425	int nr_frags = skb_shinfo(skb)->nr_frags;
 426	int i;
 427	struct xen_netif_rx_request req;
 428	struct xenvif_rx_meta *meta;
 429	unsigned char *data;
 430	int head = 1;
 431	int old_meta_prod;
 432	int gso_type;
 433
 434	old_meta_prod = npo->meta_prod;
 435
 436	gso_type = XEN_NETIF_GSO_TYPE_NONE;
 437	if (skb_is_gso(skb)) {
 438		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
 439			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
 440		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
 441			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
 442	}
 443
 444	/* Set up a GSO prefix descriptor, if necessary */
 445	if ((1 << gso_type) & vif->gso_prefix_mask) {
 446		RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 447		meta = npo->meta + npo->meta_prod++;
 448		meta->gso_type = gso_type;
 449		meta->gso_size = skb_shinfo(skb)->gso_size;
 450		meta->size = 0;
 451		meta->id = req.id;
 452	}
 453
 454	RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
 455	meta = npo->meta + npo->meta_prod++;
 456
 457	if ((1 << gso_type) & vif->gso_mask) {
 458		meta->gso_type = gso_type;
 459		meta->gso_size = skb_shinfo(skb)->gso_size;
 460	} else {
 461		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
 462		meta->gso_size = 0;
 463	}
 464
 465	meta->size = 0;
 466	meta->id = req.id;
 467	npo->copy_off = 0;
 468	npo->copy_gref = req.gref;
 469
 470	data = skb->data;
 471	while (data < skb_tail_pointer(skb)) {
 472		unsigned int offset = offset_in_page(data);
 473		unsigned int len = PAGE_SIZE - offset;
 474
 475		if (data + len > skb_tail_pointer(skb))
 476			len = skb_tail_pointer(skb) - data;
 477
 478		xenvif_gop_frag_copy(queue, skb, npo,
 479				     virt_to_page(data), len, offset, &head);
 480		data += len;
 481	}
 482
 483	for (i = 0; i < nr_frags; i++) {
 484		xenvif_gop_frag_copy(queue, skb, npo,
 485				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
 486				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
 487				     skb_shinfo(skb)->frags[i].page_offset,
 488				     &head);
 489	}
 490
 491	return npo->meta_prod - old_meta_prod;
 492}
 493
 494/*
 495 * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
 496 * used to set up the operations on the top of
 497 * netrx_pending_operations, which have since been done.  Check that
 498 * they didn't give any errors and advance over them.
 499 */
 500static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
 501			    struct netrx_pending_operations *npo)
 502{
 503	struct gnttab_copy     *copy_op;
 504	int status = XEN_NETIF_RSP_OKAY;
 505	int i;
 506
 507	for (i = 0; i < nr_meta_slots; i++) {
 508		copy_op = npo->copy + npo->copy_cons++;
 509		if (copy_op->status != GNTST_okay) {
 510			netdev_dbg(vif->dev,
 511				   "Bad status %d from copy to DOM%d.\n",
 512				   copy_op->status, vif->domid);
 513			status = XEN_NETIF_RSP_ERROR;
 514		}
 515	}
 516
 517	return status;
 518}
 519
 520static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
 521				      struct xenvif_rx_meta *meta,
 522				      int nr_meta_slots)
 523{
 524	int i;
 525	unsigned long offset;
 526
 527	/* No fragments used */
 528	if (nr_meta_slots <= 1)
 529		return;
 530
 531	nr_meta_slots--;
 532
 533	for (i = 0; i < nr_meta_slots; i++) {
 534		int flags;
 535		if (i == nr_meta_slots - 1)
 536			flags = 0;
 537		else
 538			flags = XEN_NETRXF_more_data;
 539
 540		offset = 0;
 541		make_rx_response(queue, meta[i].id, status, offset,
 542				 meta[i].size, flags);
 543	}
 544}
 545
 546void xenvif_kick_thread(struct xenvif_queue *queue)
 547{
 548	wake_up(&queue->wq);
 549}
 550
 551static void xenvif_rx_action(struct xenvif_queue *queue)
 552{
 553	s8 status;
 554	u16 flags;
 555	struct xen_netif_rx_response *resp;
 556	struct sk_buff_head rxq;
 557	struct sk_buff *skb;
 558	LIST_HEAD(notify);
 559	int ret;
 560	unsigned long offset;
 561	bool need_to_notify = false;
 562
 563	struct netrx_pending_operations npo = {
 564		.copy  = queue->grant_copy_op,
 565		.meta  = queue->meta,
 566	};
 567
 568	skb_queue_head_init(&rxq);
 569
 570	while (xenvif_rx_ring_slots_available(queue)
 571	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
 572		queue->last_rx_time = jiffies;
 573
 574		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
 575
 576		__skb_queue_tail(&rxq, skb);
 577	}
 578
 579	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
 580
 581	if (!npo.copy_prod)
 582		goto done;
 583
 584	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
 585	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
 586
 587	while ((skb = __skb_dequeue(&rxq)) != NULL) {
 588
 589		if ((1 << queue->meta[npo.meta_cons].gso_type) &
 590		    queue->vif->gso_prefix_mask) {
 591			resp = RING_GET_RESPONSE(&queue->rx,
 592						 queue->rx.rsp_prod_pvt++);
 593
 594			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
 595
 596			resp->offset = queue->meta[npo.meta_cons].gso_size;
 597			resp->id = queue->meta[npo.meta_cons].id;
 598			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
 599
 600			npo.meta_cons++;
 601			XENVIF_RX_CB(skb)->meta_slots_used--;
 602		}
 603
 604
 605		queue->stats.tx_bytes += skb->len;
 606		queue->stats.tx_packets++;
 607
 608		status = xenvif_check_gop(queue->vif,
 609					  XENVIF_RX_CB(skb)->meta_slots_used,
 610					  &npo);
 611
 612		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
 613			flags = 0;
 614		else
 615			flags = XEN_NETRXF_more_data;
 616
 617		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
 618			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
 619		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 620			/* remote but checksummed. */
 621			flags |= XEN_NETRXF_data_validated;
 622
 623		offset = 0;
 624		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
 625					status, offset,
 626					queue->meta[npo.meta_cons].size,
 627					flags);
 628
 629		if ((1 << queue->meta[npo.meta_cons].gso_type) &
 630		    queue->vif->gso_mask) {
 631			struct xen_netif_extra_info *gso =
 632				(struct xen_netif_extra_info *)
 633				RING_GET_RESPONSE(&queue->rx,
 634						  queue->rx.rsp_prod_pvt++);
 635
 636			resp->flags |= XEN_NETRXF_extra_info;
 637
 638			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
 639			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
 640			gso->u.gso.pad = 0;
 641			gso->u.gso.features = 0;
 642
 643			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 644			gso->flags = 0;
 645		}
 646
 647		xenvif_add_frag_responses(queue, status,
 648					  queue->meta + npo.meta_cons + 1,
 649					  XENVIF_RX_CB(skb)->meta_slots_used);
 650
 651		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
 652
 653		need_to_notify |= !!ret;
 654
 655		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
 656		dev_kfree_skb(skb);
 657	}
 658
 659done:
 660	if (need_to_notify)
 661		notify_remote_via_irq(queue->rx_irq);
 662}
 663
 664void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 665{
 666	int more_to_do;
 667
 668	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
 669
 670	if (more_to_do)
 671		napi_schedule(&queue->napi);
 
 
 
 
 672}
 673
 674static void tx_add_credit(struct xenvif_queue *queue)
 675{
 676	unsigned long max_burst, max_credit;
 677
 678	/*
 679	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
 680	 * Otherwise the interface can seize up due to insufficient credit.
 681	 */
 682	max_burst = max(131072UL, queue->credit_bytes);
 683
 684	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
 685	max_credit = queue->remaining_credit + queue->credit_bytes;
 686	if (max_credit < queue->remaining_credit)
 687		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 688
 689	queue->remaining_credit = min(max_credit, max_burst);
 
 690}
 691
 692void xenvif_tx_credit_callback(unsigned long data)
 693{
 694	struct xenvif_queue *queue = (struct xenvif_queue *)data;
 695	tx_add_credit(queue);
 696	xenvif_napi_schedule_or_enable_events(queue);
 697}
 698
 699static void xenvif_tx_err(struct xenvif_queue *queue,
 700			  struct xen_netif_tx_request *txp,
 701			  unsigned int extra_count, RING_IDX end)
 702{
 703	RING_IDX cons = queue->tx.req_cons;
 704	unsigned long flags;
 705
 706	do {
 707		spin_lock_irqsave(&queue->response_lock, flags);
 708		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
 709		push_tx_responses(queue);
 710		spin_unlock_irqrestore(&queue->response_lock, flags);
 711		if (cons == end)
 712			break;
 713		RING_COPY_REQUEST(&queue->tx, cons++, txp);
 714		extra_count = 0; /* only the first frag can have extras */
 715	} while (1);
 716	queue->tx.req_cons = cons;
 717}
 718
 719static void xenvif_fatal_tx_err(struct xenvif *vif)
 720{
 721	netdev_err(vif->dev, "fatal error; disabling device\n");
 722	vif->disabled = true;
 723	/* Disable the vif from queue 0's kthread */
 724	if (vif->queues)
 725		xenvif_kick_thread(&vif->queues[0]);
 726}
 727
 728static int xenvif_count_requests(struct xenvif_queue *queue,
 729				 struct xen_netif_tx_request *first,
 730				 unsigned int extra_count,
 731				 struct xen_netif_tx_request *txp,
 732				 int work_to_do)
 733{
 734	RING_IDX cons = queue->tx.req_cons;
 735	int slots = 0;
 736	int drop_err = 0;
 737	int more_data;
 738
 739	if (!(first->flags & XEN_NETTXF_more_data))
 740		return 0;
 741
 742	do {
 743		struct xen_netif_tx_request dropped_tx = { 0 };
 744
 745		if (slots >= work_to_do) {
 746			netdev_err(queue->vif->dev,
 747				   "Asked for %d slots but exceeds this limit\n",
 748				   work_to_do);
 749			xenvif_fatal_tx_err(queue->vif);
 750			return -ENODATA;
 751		}
 752
 753		/* This guest is really using too many slots and
 754		 * considered malicious.
 755		 */
 756		if (unlikely(slots >= fatal_skb_slots)) {
 757			netdev_err(queue->vif->dev,
 758				   "Malicious frontend using %d slots, threshold %u\n",
 759				   slots, fatal_skb_slots);
 760			xenvif_fatal_tx_err(queue->vif);
 761			return -E2BIG;
 762		}
 763
 764		/* Xen network protocol had implicit dependency on
 765		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
 766		 * the historical MAX_SKB_FRAGS value 18 to honor the
 767		 * same behavior as before. Any packet using more than
 768		 * 18 slots but less than fatal_skb_slots slots is
 769		 * dropped
 770		 */
 771		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
 772			if (net_ratelimit())
 773				netdev_dbg(queue->vif->dev,
 774					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
 775					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
 776			drop_err = -E2BIG;
 777		}
 778
 779		if (drop_err)
 780			txp = &dropped_tx;
 781
 782		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
 783
 784		/* If the guest submitted a frame >= 64 KiB then
 785		 * first->size overflowed and following slots will
 786		 * appear to be larger than the frame.
 787		 *
 788		 * This cannot be fatal error as there are buggy
 789		 * frontends that do this.
 790		 *
 791		 * Consume all slots and drop the packet.
 792		 */
 793		if (!drop_err && txp->size > first->size) {
 794			if (net_ratelimit())
 795				netdev_dbg(queue->vif->dev,
 796					   "Invalid tx request, slot size %u > remaining size %u\n",
 797					   txp->size, first->size);
 798			drop_err = -EIO;
 799		}
 800
 801		first->size -= txp->size;
 802		slots++;
 803
 804		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 805			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 806				 txp->offset, txp->size);
 807			xenvif_fatal_tx_err(queue->vif);
 808			return -EINVAL;
 809		}
 810
 811		more_data = txp->flags & XEN_NETTXF_more_data;
 812
 813		if (!drop_err)
 814			txp++;
 815
 816	} while (more_data);
 817
 818	if (drop_err) {
 819		xenvif_tx_err(queue, first, extra_count, cons + slots);
 820		return drop_err;
 821	}
 822
 823	return slots;
 824}
 825
 826
 827struct xenvif_tx_cb {
 828	u16 pending_idx;
 
 
 829};
 830
 831#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
 
 
 832
 833static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
 834					   u16 pending_idx,
 835					   struct xen_netif_tx_request *txp,
 836					   unsigned int extra_count,
 837					   struct gnttab_map_grant_ref *mop)
 838{
 839	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
 840	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
 841			  GNTMAP_host_map | GNTMAP_readonly,
 842			  txp->gref, queue->vif->domid);
 843
 844	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
 845	       sizeof(*txp));
 846	queue->pending_tx_info[pending_idx].extra_count = extra_count;
 847}
 848
 849static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 850{
 851	struct sk_buff *skb =
 852		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 853			  GFP_ATOMIC | __GFP_NOWARN);
 
 
 854	if (unlikely(skb == NULL))
 855		return NULL;
 856
 857	/* Packets passed to netif_rx() must have some headroom. */
 858	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 859
 860	/* Initialize it here to avoid later surprises */
 861	skb_shinfo(skb)->destructor_arg = NULL;
 862
 863	return skb;
 864}
 865
 866static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
 867							struct sk_buff *skb,
 868							struct xen_netif_tx_request *txp,
 869							struct gnttab_map_grant_ref *gop,
 870							unsigned int frag_overflow,
 871							struct sk_buff *nskb)
 
 
 
 
 872{
 873	struct skb_shared_info *shinfo = skb_shinfo(skb);
 874	skb_frag_t *frags = shinfo->frags;
 875	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 876	int start;
 877	pending_ring_idx_t index;
 878	unsigned int nr_slots;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879
 880	nr_slots = shinfo->nr_frags;
 
 881
 882	/* Skip first skb fragment if it is on same page as header fragment. */
 883	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 884
 885	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
 886	     shinfo->nr_frags++, txp++, gop++) {
 887		index = pending_index(queue->pending_cons++);
 888		pending_idx = queue->pending_ring[index];
 889		xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
 
 890		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
 
 
 
 
 
 
 
 891	}
 892
 893	if (frag_overflow) {
 894
 895		shinfo = skb_shinfo(nskb);
 896		frags = shinfo->frags;
 897
 898		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
 899		     shinfo->nr_frags++, txp++, gop++) {
 
 
 
 
 
 900			index = pending_index(queue->pending_cons++);
 901			pending_idx = queue->pending_ring[index];
 902			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
 903						gop);
 904			frag_set_pending_idx(&frags[shinfo->nr_frags],
 905					     pending_idx);
 
 
 
 
 
 
 
 906		}
 
 907
 908		skb_shinfo(skb)->frag_list = nskb;
 
 
 
 
 
 909	}
 910
 911	return gop;
 
 912}
 913
 914static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
 915					   u16 pending_idx,
 916					   grant_handle_t handle)
 917{
 918	if (unlikely(queue->grant_tx_handle[pending_idx] !=
 919		     NETBACK_INVALID_HANDLE)) {
 920		netdev_err(queue->vif->dev,
 921			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
 922			   pending_idx);
 923		BUG();
 924	}
 925	queue->grant_tx_handle[pending_idx] = handle;
 926}
 927
 928static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
 929					     u16 pending_idx)
 930{
 931	if (unlikely(queue->grant_tx_handle[pending_idx] ==
 932		     NETBACK_INVALID_HANDLE)) {
 933		netdev_err(queue->vif->dev,
 934			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 935			   pending_idx);
 936		BUG();
 937	}
 938	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 939}
 940
 941static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 942			       struct sk_buff *skb,
 943			       struct gnttab_map_grant_ref **gopp_map,
 944			       struct gnttab_copy **gopp_copy)
 945{
 946	struct gnttab_map_grant_ref *gop_map = *gopp_map;
 947	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 948	/* This always points to the shinfo of the skb being checked, which
 949	 * could be either the first or the one on the frag_list
 950	 */
 951	struct skb_shared_info *shinfo = skb_shinfo(skb);
 952	/* If this is non-NULL, we are currently checking the frag_list skb, and
 953	 * this points to the shinfo of the first one
 954	 */
 955	struct skb_shared_info *first_shinfo = NULL;
 956	int nr_frags = shinfo->nr_frags;
 957	const bool sharedslot = nr_frags &&
 958				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
 959	int i, err;
 960
 961	/* Check status of header. */
 962	err = (*gopp_copy)->status;
 963	if (unlikely(err)) {
 964		if (net_ratelimit())
 965			netdev_dbg(queue->vif->dev,
 966				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
 967				   (*gopp_copy)->status,
 968				   pending_idx,
 969				   (*gopp_copy)->source.u.ref);
 970		/* The first frag might still have this slot mapped */
 971		if (!sharedslot)
 972			xenvif_idx_release(queue, pending_idx,
 973					   XEN_NETIF_RSP_ERROR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 974	}
 975	(*gopp_copy)++;
 976
 977check_frags:
 978	for (i = 0; i < nr_frags; i++, gop_map++) {
 979		int j, newerr;
 980
 981		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 982
 983		/* Check error status: if okay then remember grant handle. */
 984		newerr = gop_map->status;
 985
 986		if (likely(!newerr)) {
 987			xenvif_grant_handle_set(queue,
 988						pending_idx,
 989						gop_map->handle);
 990			/* Had a previous error? Invalidate this fragment. */
 991			if (unlikely(err)) {
 992				xenvif_idx_unmap(queue, pending_idx);
 993				/* If the mapping of the first frag was OK, but
 994				 * the header's copy failed, and they are
 995				 * sharing a slot, send an error
 996				 */
 997				if (i == 0 && sharedslot)
 998					xenvif_idx_release(queue, pending_idx,
 999							   XEN_NETIF_RSP_ERROR);
1000				else
1001					xenvif_idx_release(queue, pending_idx,
1002							   XEN_NETIF_RSP_OKAY);
1003			}
1004			continue;
1005		}
1006
1007		/* Error on this fragment: respond to client with an error. */
1008		if (net_ratelimit())
1009			netdev_dbg(queue->vif->dev,
1010				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1011				   i,
1012				   gop_map->status,
1013				   pending_idx,
1014				   gop_map->ref);
1015
1016		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1017
1018		/* Not the first error? Preceding frags already invalidated. */
1019		if (err)
1020			continue;
1021
1022		/* First error: if the header haven't shared a slot with the
1023		 * first frag, release it as well.
1024		 */
1025		if (!sharedslot)
1026			xenvif_idx_release(queue,
1027					   XENVIF_TX_CB(skb)->pending_idx,
1028					   XEN_NETIF_RSP_OKAY);
1029
1030		/* Invalidate preceding fragments of this skb. */
1031		for (j = 0; j < i; j++) {
1032			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1033			xenvif_idx_unmap(queue, pending_idx);
1034			xenvif_idx_release(queue, pending_idx,
1035					   XEN_NETIF_RSP_OKAY);
1036		}
1037
1038		/* And if we found the error while checking the frag_list, unmap
1039		 * the first skb's frags
1040		 */
1041		if (first_shinfo) {
1042			for (j = 0; j < first_shinfo->nr_frags; j++) {
1043				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1044				xenvif_idx_unmap(queue, pending_idx);
1045				xenvif_idx_release(queue, pending_idx,
1046						   XEN_NETIF_RSP_OKAY);
1047			}
1048		}
1049
1050		/* Remember the error: invalidate all subsequent fragments. */
1051		err = newerr;
1052	}
1053
1054	if (skb_has_frag_list(skb) && !first_shinfo) {
1055		first_shinfo = skb_shinfo(skb);
1056		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1057		nr_frags = shinfo->nr_frags;
1058
1059		goto check_frags;
1060	}
1061
1062	*gopp_map = gop_map;
1063	return err;
1064}
1065
1066static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1067{
1068	struct skb_shared_info *shinfo = skb_shinfo(skb);
1069	int nr_frags = shinfo->nr_frags;
1070	int i;
1071	u16 prev_pending_idx = INVALID_PENDING_IDX;
1072
1073	for (i = 0; i < nr_frags; i++) {
1074		skb_frag_t *frag = shinfo->frags + i;
1075		struct xen_netif_tx_request *txp;
1076		struct page *page;
1077		u16 pending_idx;
1078
1079		pending_idx = frag_get_pending_idx(frag);
1080
1081		/* If this is not the first frag, chain it to the previous*/
1082		if (prev_pending_idx == INVALID_PENDING_IDX)
1083			skb_shinfo(skb)->destructor_arg =
1084				&callback_param(queue, pending_idx);
1085		else
1086			callback_param(queue, prev_pending_idx).ctx =
1087				&callback_param(queue, pending_idx);
1088
1089		callback_param(queue, pending_idx).ctx = NULL;
1090		prev_pending_idx = pending_idx;
1091
1092		txp = &queue->pending_tx_info[pending_idx].req;
1093		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1094		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1095		skb->len += txp->size;
1096		skb->data_len += txp->size;
1097		skb->truesize += txp->size;
1098
1099		/* Take an extra reference to offset network stack's put_page */
1100		get_page(queue->mmap_pages[pending_idx]);
1101	}
1102}
1103
1104static int xenvif_get_extras(struct xenvif_queue *queue,
1105			     struct xen_netif_extra_info *extras,
1106			     unsigned int *extra_count,
1107			     int work_to_do)
1108{
1109	struct xen_netif_extra_info extra;
1110	RING_IDX cons = queue->tx.req_cons;
1111
1112	do {
1113		if (unlikely(work_to_do-- <= 0)) {
1114			netdev_err(queue->vif->dev, "Missing extra info\n");
1115			xenvif_fatal_tx_err(queue->vif);
1116			return -EBADR;
1117		}
1118
1119		RING_COPY_REQUEST(&queue->tx, cons, &extra);
1120
1121		queue->tx.req_cons = ++cons;
1122		(*extra_count)++;
1123
1124		if (unlikely(!extra.type ||
1125			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1126			netdev_err(queue->vif->dev,
1127				   "Invalid extra type: %d\n", extra.type);
1128			xenvif_fatal_tx_err(queue->vif);
1129			return -EINVAL;
1130		}
1131
1132		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1133	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1134
1135	return work_to_do;
1136}
1137
1138static int xenvif_set_skb_gso(struct xenvif *vif,
1139			      struct sk_buff *skb,
1140			      struct xen_netif_extra_info *gso)
1141{
1142	if (!gso->u.gso.size) {
1143		netdev_err(vif->dev, "GSO size must not be zero.\n");
1144		xenvif_fatal_tx_err(vif);
1145		return -EINVAL;
1146	}
1147
1148	switch (gso->u.gso.type) {
1149	case XEN_NETIF_GSO_TYPE_TCPV4:
1150		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1151		break;
1152	case XEN_NETIF_GSO_TYPE_TCPV6:
1153		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1154		break;
1155	default:
1156		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1157		xenvif_fatal_tx_err(vif);
1158		return -EINVAL;
1159	}
1160
1161	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1162	/* gso_segs will be calculated later */
1163
1164	return 0;
1165}
1166
1167static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1168{
1169	bool recalculate_partial_csum = false;
1170
1171	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1172	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1173	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1174	 * recalculate the partial checksum.
1175	 */
1176	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1177		queue->stats.rx_gso_checksum_fixup++;
1178		skb->ip_summed = CHECKSUM_PARTIAL;
1179		recalculate_partial_csum = true;
1180	}
1181
1182	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1183	if (skb->ip_summed != CHECKSUM_PARTIAL)
1184		return 0;
1185
1186	return skb_checksum_setup(skb, recalculate_partial_csum);
1187}
1188
1189static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1190{
1191	u64 now = get_jiffies_64();
1192	u64 next_credit = queue->credit_window_start +
1193		msecs_to_jiffies(queue->credit_usec / 1000);
1194
1195	/* Timer could already be pending in rare cases. */
1196	if (timer_pending(&queue->credit_timeout))
 
1197		return true;
 
1198
1199	/* Passed the point where we can replenish credit? */
1200	if (time_after_eq64(now, next_credit)) {
1201		queue->credit_window_start = now;
1202		tx_add_credit(queue);
1203	}
1204
1205	/* Still too big to send right now? Set a callback. */
1206	if (size > queue->remaining_credit) {
1207		queue->credit_timeout.data     =
1208			(unsigned long)queue;
1209		mod_timer(&queue->credit_timeout,
1210			  next_credit);
1211		queue->credit_window_start = next_credit;
 
1212
1213		return true;
1214	}
1215
1216	return false;
1217}
1218
1219/* No locking is required in xenvif_mcast_add/del() as they are
1220 * only ever invoked from NAPI poll. An RCU list is used because
1221 * xenvif_mcast_match() is called asynchronously, during start_xmit.
1222 */
1223
1224static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
1225{
1226	struct xenvif_mcast_addr *mcast;
1227
1228	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
1229		if (net_ratelimit())
1230			netdev_err(vif->dev,
1231				   "Too many multicast addresses\n");
1232		return -ENOSPC;
1233	}
1234
1235	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
1236	if (!mcast)
1237		return -ENOMEM;
1238
1239	ether_addr_copy(mcast->addr, addr);
1240	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
1241	vif->fe_mcast_count++;
1242
1243	return 0;
1244}
1245
1246static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
1247{
1248	struct xenvif_mcast_addr *mcast;
1249
1250	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1251		if (ether_addr_equal(addr, mcast->addr)) {
1252			--vif->fe_mcast_count;
1253			list_del_rcu(&mcast->entry);
1254			kfree_rcu(mcast, rcu);
1255			break;
1256		}
1257	}
1258}
1259
1260bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
1261{
1262	struct xenvif_mcast_addr *mcast;
1263
1264	rcu_read_lock();
1265	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
1266		if (ether_addr_equal(addr, mcast->addr)) {
1267			rcu_read_unlock();
1268			return true;
1269		}
1270	}
1271	rcu_read_unlock();
1272
1273	return false;
1274}
1275
1276void xenvif_mcast_addr_list_free(struct xenvif *vif)
1277{
1278	/* No need for locking or RCU here. NAPI poll and TX queue
1279	 * are stopped.
1280	 */
1281	while (!list_empty(&vif->fe_mcast_addr)) {
1282		struct xenvif_mcast_addr *mcast;
1283
1284		mcast = list_first_entry(&vif->fe_mcast_addr,
1285					 struct xenvif_mcast_addr,
1286					 entry);
1287		--vif->fe_mcast_count;
1288		list_del(&mcast->entry);
1289		kfree(mcast);
1290	}
1291}
1292
1293static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294				     int budget,
1295				     unsigned *copy_ops,
1296				     unsigned *map_ops)
1297{
1298	struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1299	struct sk_buff *skb, *nskb;
1300	int ret;
1301	unsigned int frag_overflow;
1302
1303	while (skb_queue_len(&queue->tx_queue) < budget) {
1304		struct xen_netif_tx_request txreq;
1305		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1306		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1307		unsigned int extra_count;
1308		u16 pending_idx;
1309		RING_IDX idx;
1310		int work_to_do;
1311		unsigned int data_len;
1312		pending_ring_idx_t index;
1313
1314		if (queue->tx.sring->req_prod - queue->tx.req_cons >
1315		    XEN_NETIF_TX_RING_SIZE) {
1316			netdev_err(queue->vif->dev,
1317				   "Impossible number of requests. "
1318				   "req_prod %d, req_cons %d, size %ld\n",
1319				   queue->tx.sring->req_prod, queue->tx.req_cons,
1320				   XEN_NETIF_TX_RING_SIZE);
1321			xenvif_fatal_tx_err(queue->vif);
1322			break;
1323		}
1324
1325		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1326		if (!work_to_do)
1327			break;
1328
1329		idx = queue->tx.req_cons;
1330		rmb(); /* Ensure that we see the request before we copy it. */
1331		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1332
1333		/* Credit-based scheduling. */
1334		if (txreq.size > queue->remaining_credit &&
1335		    tx_credit_exceeded(queue, txreq.size))
1336			break;
1337
1338		queue->remaining_credit -= txreq.size;
1339
1340		work_to_do--;
1341		queue->tx.req_cons = ++idx;
1342
1343		memset(extras, 0, sizeof(extras));
1344		extra_count = 0;
1345		if (txreq.flags & XEN_NETTXF_extra_info) {
1346			work_to_do = xenvif_get_extras(queue, extras,
1347						       &extra_count,
1348						       work_to_do);
1349			idx = queue->tx.req_cons;
1350			if (unlikely(work_to_do < 0))
1351				break;
1352		}
1353
1354		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
1355			struct xen_netif_extra_info *extra;
1356
1357			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
1358			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
1359
1360			make_tx_response(queue, &txreq, extra_count,
1361					 (ret == 0) ?
1362					 XEN_NETIF_RSP_OKAY :
1363					 XEN_NETIF_RSP_ERROR);
1364			push_tx_responses(queue);
1365			continue;
1366		}
1367
1368		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
1369			struct xen_netif_extra_info *extra;
1370
1371			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
1372			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
1373
1374			make_tx_response(queue, &txreq, extra_count,
1375					 XEN_NETIF_RSP_OKAY);
1376			push_tx_responses(queue);
1377			continue;
1378		}
1379
 
 
 
1380		ret = xenvif_count_requests(queue, &txreq, extra_count,
1381					    txfrags, work_to_do);
 
1382		if (unlikely(ret < 0))
1383			break;
1384
1385		idx += ret;
1386
1387		if (unlikely(txreq.size < ETH_HLEN)) {
1388			netdev_dbg(queue->vif->dev,
1389				   "Bad packet size: %d\n", txreq.size);
1390			xenvif_tx_err(queue, &txreq, extra_count, idx);
1391			break;
1392		}
1393
1394		/* No crossing a page as the payload mustn't fragment. */
1395		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1396			netdev_err(queue->vif->dev,
1397				   "txreq.offset: %u, size: %u, end: %lu\n",
1398				   txreq.offset, txreq.size,
1399				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
1400			xenvif_fatal_tx_err(queue->vif);
1401			break;
1402		}
1403
1404		index = pending_index(queue->pending_cons);
1405		pending_idx = queue->pending_ring[index];
1406
1407		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1408			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1409			XEN_NETBACK_TX_COPY_LEN : txreq.size;
1410
1411		skb = xenvif_alloc_skb(data_len);
1412		if (unlikely(skb == NULL)) {
1413			netdev_dbg(queue->vif->dev,
1414				   "Can't allocate a skb in start_xmit.\n");
1415			xenvif_tx_err(queue, &txreq, extra_count, idx);
1416			break;
1417		}
1418
1419		skb_shinfo(skb)->nr_frags = ret;
1420		if (data_len < txreq.size)
1421			skb_shinfo(skb)->nr_frags++;
1422		/* At this point shinfo->nr_frags is in fact the number of
1423		 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1424		 */
1425		frag_overflow = 0;
1426		nskb = NULL;
1427		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1428			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1429			BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1430			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1431			nskb = xenvif_alloc_skb(0);
1432			if (unlikely(nskb == NULL)) {
 
1433				kfree_skb(skb);
1434				xenvif_tx_err(queue, &txreq, extra_count, idx);
1435				if (net_ratelimit())
1436					netdev_err(queue->vif->dev,
1437						   "Can't allocate the frag_list skb.\n");
1438				break;
1439			}
1440		}
1441
1442		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1443			struct xen_netif_extra_info *gso;
1444			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1445
1446			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1447				/* Failure in xenvif_set_skb_gso is fatal. */
 
1448				kfree_skb(skb);
1449				kfree_skb(nskb);
1450				break;
1451			}
1452		}
1453
1454		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
 
 
 
 
1455
1456		__skb_put(skb, data_len);
1457		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1458		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1459		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1460
1461		queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1462			virt_to_gfn(skb->data);
1463		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1464		queue->tx_copy_ops[*copy_ops].dest.offset =
1465			offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1466
1467		queue->tx_copy_ops[*copy_ops].len = data_len;
1468		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
 
 
1469
1470		(*copy_ops)++;
 
 
1471
1472		if (data_len < txreq.size) {
1473			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1474					     pending_idx);
1475			xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1476						extra_count, gop);
1477			gop++;
1478		} else {
1479			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1480					     INVALID_PENDING_IDX);
1481			memcpy(&queue->pending_tx_info[pending_idx].req,
1482			       &txreq, sizeof(txreq));
1483			queue->pending_tx_info[pending_idx].extra_count =
1484				extra_count;
1485		}
1486
1487		queue->pending_cons++;
1488
1489		gop = xenvif_get_requests(queue, skb, txfrags, gop,
1490				          frag_overflow, nskb);
1491
1492		__skb_queue_tail(&queue->tx_queue, skb);
1493
1494		queue->tx.req_cons = idx;
1495
1496		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1497		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1498			break;
1499	}
1500
1501	(*map_ops) = gop - queue->tx_map_ops;
1502	return;
1503}
1504
1505/* Consolidate skb with a frag_list into a brand new one with local pages on
1506 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1507 */
1508static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1509{
1510	unsigned int offset = skb_headlen(skb);
1511	skb_frag_t frags[MAX_SKB_FRAGS];
1512	int i, f;
1513	struct ubuf_info *uarg;
1514	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1515
1516	queue->stats.tx_zerocopy_sent += 2;
1517	queue->stats.tx_frag_overflow++;
1518
1519	xenvif_fill_frags(queue, nskb);
1520	/* Subtract frags size, we will correct it later */
1521	skb->truesize -= skb->data_len;
1522	skb->len += nskb->len;
1523	skb->data_len += nskb->len;
1524
1525	/* create a brand new frags array and coalesce there */
1526	for (i = 0; offset < skb->len; i++) {
1527		struct page *page;
1528		unsigned int len;
1529
1530		BUG_ON(i >= MAX_SKB_FRAGS);
1531		page = alloc_page(GFP_ATOMIC);
1532		if (!page) {
1533			int j;
1534			skb->truesize += skb->data_len;
1535			for (j = 0; j < i; j++)
1536				put_page(frags[j].page.p);
1537			return -ENOMEM;
1538		}
1539
1540		if (offset + PAGE_SIZE < skb->len)
1541			len = PAGE_SIZE;
1542		else
1543			len = skb->len - offset;
1544		if (skb_copy_bits(skb, offset, page_address(page), len))
1545			BUG();
1546
1547		offset += len;
1548		frags[i].page.p = page;
1549		frags[i].page_offset = 0;
1550		skb_frag_size_set(&frags[i], len);
1551	}
1552
1553	/* Copied all the bits from the frag list -- free it. */
1554	skb_frag_list_init(skb);
1555	xenvif_skb_zerocopy_prepare(queue, nskb);
1556	kfree_skb(nskb);
1557
1558	/* Release all the original (foreign) frags. */
1559	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1560		skb_frag_unref(skb, f);
1561	uarg = skb_shinfo(skb)->destructor_arg;
1562	/* increase inflight counter to offset decrement in callback */
1563	atomic_inc(&queue->inflight_packets);
1564	uarg->callback(uarg, true);
1565	skb_shinfo(skb)->destructor_arg = NULL;
1566
1567	/* Fill the skb with the new (local) frags. */
1568	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1569	skb_shinfo(skb)->nr_frags = i;
1570	skb->truesize += i * PAGE_SIZE;
1571
1572	return 0;
1573}
1574
1575static int xenvif_tx_submit(struct xenvif_queue *queue)
1576{
1577	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1578	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1579	struct sk_buff *skb;
1580	int work_done = 0;
1581
1582	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1583		struct xen_netif_tx_request *txp;
1584		u16 pending_idx;
1585		unsigned data_len;
1586
1587		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1588		txp = &queue->pending_tx_info[pending_idx].req;
1589
1590		/* Check the remap error code. */
1591		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1592			/* If there was an error, xenvif_tx_check_gop is
1593			 * expected to release all the frags which were mapped,
1594			 * so kfree_skb shouldn't do it again
1595			 */
1596			skb_shinfo(skb)->nr_frags = 0;
1597			if (skb_has_frag_list(skb)) {
1598				struct sk_buff *nskb =
1599						skb_shinfo(skb)->frag_list;
1600				skb_shinfo(nskb)->nr_frags = 0;
1601			}
1602			kfree_skb(skb);
1603			continue;
1604		}
1605
1606		data_len = skb->len;
1607		callback_param(queue, pending_idx).ctx = NULL;
1608		if (data_len < txp->size) {
1609			/* Append the packet payload as a fragment. */
1610			txp->offset += data_len;
1611			txp->size -= data_len;
1612		} else {
1613			/* Schedule a response immediately. */
1614			xenvif_idx_release(queue, pending_idx,
1615					   XEN_NETIF_RSP_OKAY);
1616		}
1617
1618		if (txp->flags & XEN_NETTXF_csum_blank)
1619			skb->ip_summed = CHECKSUM_PARTIAL;
1620		else if (txp->flags & XEN_NETTXF_data_validated)
1621			skb->ip_summed = CHECKSUM_UNNECESSARY;
1622
1623		xenvif_fill_frags(queue, skb);
1624
1625		if (unlikely(skb_has_frag_list(skb))) {
 
 
1626			if (xenvif_handle_frag_list(queue, skb)) {
1627				if (net_ratelimit())
1628					netdev_err(queue->vif->dev,
1629						   "Not enough memory to consolidate frag_list!\n");
1630				xenvif_skb_zerocopy_prepare(queue, skb);
1631				kfree_skb(skb);
1632				continue;
1633			}
 
 
 
1634		}
1635
1636		skb->dev      = queue->vif->dev;
1637		skb->protocol = eth_type_trans(skb, skb->dev);
1638		skb_reset_network_header(skb);
1639
1640		if (checksum_setup(queue, skb)) {
1641			netdev_dbg(queue->vif->dev,
1642				   "Can't setup checksum in net_tx_action\n");
1643			/* We have to set this flag to trigger the callback */
1644			if (skb_shinfo(skb)->destructor_arg)
1645				xenvif_skb_zerocopy_prepare(queue, skb);
1646			kfree_skb(skb);
1647			continue;
1648		}
1649
1650		skb_probe_transport_header(skb, 0);
1651
1652		/* If the packet is GSO then we will have just set up the
1653		 * transport header offset in checksum_setup so it's now
1654		 * straightforward to calculate gso_segs.
1655		 */
1656		if (skb_is_gso(skb)) {
1657			int mss = skb_shinfo(skb)->gso_size;
1658			int hdrlen = skb_transport_header(skb) -
1659				skb_mac_header(skb) +
1660				tcp_hdrlen(skb);
 
 
 
 
 
 
 
1661
1662			skb_shinfo(skb)->gso_segs =
1663				DIV_ROUND_UP(skb->len - hdrlen, mss);
1664		}
1665
1666		queue->stats.rx_bytes += skb->len;
1667		queue->stats.rx_packets++;
1668
1669		work_done++;
1670
1671		/* Set this flag right before netif_receive_skb, otherwise
1672		 * someone might think this packet already left netback, and
1673		 * do a skb_copy_ubufs while we are still in control of the
1674		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1675		 */
1676		if (skb_shinfo(skb)->destructor_arg) {
1677			xenvif_skb_zerocopy_prepare(queue, skb);
1678			queue->stats.tx_zerocopy_sent++;
1679		}
1680
1681		netif_receive_skb(skb);
1682	}
1683
1684	return work_done;
1685}
1686
1687void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
 
 
1688{
1689	unsigned long flags;
1690	pending_ring_idx_t index;
 
1691	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1692
1693	/* This is the only place where we grab this lock, to protect callbacks
1694	 * from each other.
1695	 */
1696	spin_lock_irqsave(&queue->callback_lock, flags);
1697	do {
1698		u16 pending_idx = ubuf->desc;
1699		ubuf = (struct ubuf_info *) ubuf->ctx;
1700		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1701			MAX_PENDING_REQS);
1702		index = pending_index(queue->dealloc_prod);
1703		queue->dealloc_ring[index] = pending_idx;
1704		/* Sync with xenvif_tx_dealloc_action:
1705		 * insert idx then incr producer.
1706		 */
1707		smp_wmb();
1708		queue->dealloc_prod++;
1709	} while (ubuf);
1710	spin_unlock_irqrestore(&queue->callback_lock, flags);
1711
1712	if (likely(zerocopy_success))
1713		queue->stats.tx_zerocopy_success++;
1714	else
1715		queue->stats.tx_zerocopy_fail++;
1716	xenvif_skb_zerocopy_complete(queue);
1717}
1718
 
 
 
 
1719static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1720{
1721	struct gnttab_unmap_grant_ref *gop;
1722	pending_ring_idx_t dc, dp;
1723	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1724	unsigned int i = 0;
1725
1726	dc = queue->dealloc_cons;
1727	gop = queue->tx_unmap_ops;
1728
1729	/* Free up any grants we have finished using */
1730	do {
1731		dp = queue->dealloc_prod;
1732
1733		/* Ensure we see all indices enqueued by all
1734		 * xenvif_zerocopy_callback().
1735		 */
1736		smp_rmb();
1737
1738		while (dc != dp) {
1739			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1740			pending_idx =
1741				queue->dealloc_ring[pending_index(dc++)];
1742
1743			pending_idx_release[gop - queue->tx_unmap_ops] =
1744				pending_idx;
1745			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1746				queue->mmap_pages[pending_idx];
1747			gnttab_set_unmap_op(gop,
1748					    idx_to_kaddr(queue, pending_idx),
1749					    GNTMAP_host_map,
1750					    queue->grant_tx_handle[pending_idx]);
1751			xenvif_grant_handle_reset(queue, pending_idx);
1752			++gop;
1753		}
1754
1755	} while (dp != queue->dealloc_prod);
1756
1757	queue->dealloc_cons = dc;
1758
1759	if (gop - queue->tx_unmap_ops > 0) {
1760		int ret;
1761		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1762					NULL,
1763					queue->pages_to_unmap,
1764					gop - queue->tx_unmap_ops);
1765		if (ret) {
1766			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1767				   gop - queue->tx_unmap_ops, ret);
1768			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1769				if (gop[i].status != GNTST_okay)
1770					netdev_err(queue->vif->dev,
1771						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1772						   gop[i].host_addr,
1773						   gop[i].handle,
1774						   gop[i].status);
1775			}
1776			BUG();
1777		}
1778	}
1779
1780	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1781		xenvif_idx_release(queue, pending_idx_release[i],
1782				   XEN_NETIF_RSP_OKAY);
1783}
1784
1785
1786/* Called after netfront has transmitted */
1787int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1788{
1789	unsigned nr_mops, nr_cops = 0;
1790	int work_done, ret;
1791
1792	if (unlikely(!tx_work_todo(queue)))
1793		return 0;
1794
1795	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1796
1797	if (nr_cops == 0)
1798		return 0;
1799
1800	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1801	if (nr_mops != 0) {
1802		ret = gnttab_map_refs(queue->tx_map_ops,
1803				      NULL,
1804				      queue->pages_to_map,
1805				      nr_mops);
1806		BUG_ON(ret);
 
 
 
 
 
 
 
 
1807	}
1808
1809	work_done = xenvif_tx_submit(queue);
1810
1811	return work_done;
1812}
1813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1814static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1815			       u8 status)
1816{
1817	struct pending_tx_info *pending_tx_info;
1818	pending_ring_idx_t index;
1819	unsigned long flags;
1820
1821	pending_tx_info = &queue->pending_tx_info[pending_idx];
1822
1823	spin_lock_irqsave(&queue->response_lock, flags);
1824
1825	make_tx_response(queue, &pending_tx_info->req,
1826			 pending_tx_info->extra_count, status);
1827
1828	/* Release the pending index before pusing the Tx response so
1829	 * its available before a new Tx request is pushed by the
1830	 * frontend.
1831	 */
1832	index = pending_index(queue->pending_prod++);
1833	queue->pending_ring[index] = pending_idx;
1834
1835	push_tx_responses(queue);
1836
1837	spin_unlock_irqrestore(&queue->response_lock, flags);
1838}
1839
1840
1841static void make_tx_response(struct xenvif_queue *queue,
1842			     struct xen_netif_tx_request *txp,
1843			     unsigned int extra_count,
1844			     s8       st)
1845{
1846	RING_IDX i = queue->tx.rsp_prod_pvt;
1847	struct xen_netif_tx_response *resp;
1848
1849	resp = RING_GET_RESPONSE(&queue->tx, i);
1850	resp->id     = txp->id;
1851	resp->status = st;
1852
1853	while (extra_count-- != 0)
1854		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1855
1856	queue->tx.rsp_prod_pvt = ++i;
1857}
1858
1859static void push_tx_responses(struct xenvif_queue *queue)
1860{
1861	int notify;
1862
1863	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1864	if (notify)
1865		notify_remote_via_irq(queue->tx_irq);
1866}
1867
1868static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1869					     u16      id,
1870					     s8       st,
1871					     u16      offset,
1872					     u16      size,
1873					     u16      flags)
1874{
1875	RING_IDX i = queue->rx.rsp_prod_pvt;
1876	struct xen_netif_rx_response *resp;
1877
1878	resp = RING_GET_RESPONSE(&queue->rx, i);
1879	resp->offset     = offset;
1880	resp->flags      = flags;
1881	resp->id         = id;
1882	resp->status     = (s16)size;
1883	if (st < 0)
1884		resp->status = (s16)st;
1885
1886	queue->rx.rsp_prod_pvt = ++i;
1887
1888	return resp;
1889}
1890
1891void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1892{
1893	int ret;
1894	struct gnttab_unmap_grant_ref tx_unmap_op;
1895
1896	gnttab_set_unmap_op(&tx_unmap_op,
1897			    idx_to_kaddr(queue, pending_idx),
1898			    GNTMAP_host_map,
1899			    queue->grant_tx_handle[pending_idx]);
1900	xenvif_grant_handle_reset(queue, pending_idx);
1901
1902	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1903				&queue->mmap_pages[pending_idx], 1);
1904	if (ret) {
1905		netdev_err(queue->vif->dev,
1906			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1907			   ret,
1908			   pending_idx,
1909			   tx_unmap_op.host_addr,
1910			   tx_unmap_op.handle,
1911			   tx_unmap_op.status);
1912		BUG();
1913	}
1914}
1915
1916static inline int tx_work_todo(struct xenvif_queue *queue)
1917{
1918	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1919		return 1;
1920
1921	return 0;
1922}
1923
1924static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1925{
1926	return queue->dealloc_cons != queue->dealloc_prod;
1927}
1928
1929void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1930{
1931	if (queue->tx.sring)
1932		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1933					queue->tx.sring);
1934	if (queue->rx.sring)
1935		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1936					queue->rx.sring);
1937}
1938
1939int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1940			      grant_ref_t tx_ring_ref,
1941			      grant_ref_t rx_ring_ref)
1942{
1943	void *addr;
1944	struct xen_netif_tx_sring *txs;
1945	struct xen_netif_rx_sring *rxs;
1946
1947	int err = -ENOMEM;
1948
1949	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1950				     &tx_ring_ref, 1, &addr);
1951	if (err)
1952		goto err;
1953
1954	txs = (struct xen_netif_tx_sring *)addr;
1955	BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 
 
 
 
 
 
1956
1957	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1958				     &rx_ring_ref, 1, &addr);
1959	if (err)
1960		goto err;
1961
1962	rxs = (struct xen_netif_rx_sring *)addr;
1963	BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 
 
 
 
 
 
1964
1965	return 0;
1966
1967err:
1968	xenvif_unmap_frontend_rings(queue);
1969	return err;
1970}
1971
1972static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1973{
1974	struct xenvif *vif = queue->vif;
1975
1976	queue->stalled = true;
1977
1978	/* At least one queue has stalled? Disable the carrier. */
1979	spin_lock(&vif->lock);
1980	if (vif->stalled_queues++ == 0) {
1981		netdev_info(vif->dev, "Guest Rx stalled");
1982		netif_carrier_off(vif->dev);
1983	}
1984	spin_unlock(&vif->lock);
1985}
1986
1987static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1988{
1989	struct xenvif *vif = queue->vif;
1990
1991	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1992	queue->stalled = false;
 
 
 
 
1993
1994	/* All queues are ready? Enable the carrier. */
1995	spin_lock(&vif->lock);
1996	if (--vif->stalled_queues == 0) {
1997		netdev_info(vif->dev, "Guest Rx ready");
1998		netif_carrier_on(vif->dev);
1999	}
2000	spin_unlock(&vif->lock);
 
 
 
 
 
2001}
2002
2003static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2004{
2005	RING_IDX prod, cons;
 
 
 
 
 
 
 
 
2006
2007	prod = queue->rx.sring->req_prod;
2008	cons = queue->rx.req_cons;
2009
2010	return !queue->stalled && prod - cons < 1
2011		&& time_after(jiffies,
2012			      queue->last_rx_time + queue->vif->stall_timeout);
2013}
2014
2015static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2016{
2017	RING_IDX prod, cons;
2018
2019	prod = queue->rx.sring->req_prod;
2020	cons = queue->rx.req_cons;
2021
2022	return queue->stalled && prod - cons >= 1;
2023}
2024
2025static bool xenvif_have_rx_work(struct xenvif_queue *queue)
 
2026{
2027	return xenvif_rx_ring_slots_available(queue)
2028		|| (queue->vif->stall_timeout &&
2029		    (xenvif_rx_queue_stalled(queue)
2030		     || xenvif_rx_queue_ready(queue)))
2031		|| kthread_should_stop()
2032		|| queue->vif->disabled;
2033}
2034
2035static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2036{
2037	struct sk_buff *skb;
2038	long timeout;
2039
2040	skb = skb_peek(&queue->rx_queue);
2041	if (!skb)
2042		return MAX_SCHEDULE_TIMEOUT;
2043
2044	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2045	return timeout < 0 ? 0 : timeout;
2046}
 
2047
2048/* Wait until the guest Rx thread has work.
2049 *
2050 * The timeout needs to be adjusted based on the current head of the
2051 * queue (and not just the head at the beginning).  In particular, if
2052 * the queue is initially empty an infinite timeout is used and this
2053 * needs to be reduced when a skb is queued.
2054 *
2055 * This cannot be done with wait_event_timeout() because it only
2056 * calculates the timeout once.
2057 */
2058static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2059{
2060	DEFINE_WAIT(wait);
2061
2062	if (xenvif_have_rx_work(queue))
2063		return;
 
 
2064
2065	for (;;) {
2066		long ret;
 
 
 
2067
2068		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2069		if (xenvif_have_rx_work(queue))
2070			break;
2071		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2072		if (!ret)
2073			break;
2074	}
2075	finish_wait(&queue->wq, &wait);
 
 
2076}
2077
2078int xenvif_kthread_guest_rx(void *data)
2079{
2080	struct xenvif_queue *queue = data;
2081	struct xenvif *vif = queue->vif;
2082
2083	if (!vif->stall_timeout)
2084		xenvif_queue_carrier_on(queue);
2085
2086	for (;;) {
2087		xenvif_wait_for_rx_work(queue);
2088
2089		if (kthread_should_stop())
2090			break;
2091
2092		/* This frontend is found to be rogue, disable it in
2093		 * kthread context. Currently this is only set when
2094		 * netback finds out frontend sends malformed packet,
2095		 * but we cannot disable the interface in softirq
2096		 * context so we defer it here, if this thread is
2097		 * associated with queue 0.
2098		 */
2099		if (unlikely(vif->disabled && queue->id == 0)) {
2100			xenvif_carrier_off(vif);
2101			break;
2102		}
2103
2104		if (!skb_queue_empty(&queue->rx_queue))
2105			xenvif_rx_action(queue);
2106
2107		/* If the guest hasn't provided any Rx slots for a
2108		 * while it's probably not responsive, drop the
2109		 * carrier so packets are dropped earlier.
2110		 */
2111		if (vif->stall_timeout) {
2112			if (xenvif_rx_queue_stalled(queue))
2113				xenvif_queue_carrier_off(queue);
2114			else if (xenvif_rx_queue_ready(queue))
2115				xenvif_queue_carrier_on(queue);
2116		}
2117
2118		/* Queued packets may have foreign pages from other
2119		 * domains.  These cannot be queued indefinitely as
2120		 * this would starve guests of grant refs and transmit
2121		 * slots.
2122		 */
2123		xenvif_rx_queue_drop_expired(queue);
2124
2125		xenvif_rx_queue_maybe_wake(queue);
2126
2127		cond_resched();
2128	}
2129
2130	/* Bin any remaining skbs */
2131	xenvif_rx_queue_purge(queue);
2132
2133	return 0;
2134}
2135
2136static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2137{
2138	/* Dealloc thread must remain running until all inflight
2139	 * packets complete.
2140	 */
2141	return kthread_should_stop() &&
2142		!atomic_read(&queue->inflight_packets);
2143}
2144
2145int xenvif_dealloc_kthread(void *data)
2146{
2147	struct xenvif_queue *queue = data;
2148
2149	for (;;) {
2150		wait_event_interruptible(queue->dealloc_wq,
2151					 tx_dealloc_work_todo(queue) ||
2152					 xenvif_dealloc_kthread_should_stop(queue));
2153		if (xenvif_dealloc_kthread_should_stop(queue))
2154			break;
2155
2156		xenvif_tx_dealloc_action(queue);
2157		cond_resched();
 
2158	}
2159
2160	/* Unmap anything remaining*/
2161	if (tx_dealloc_work_todo(queue))
2162		xenvif_tx_dealloc_action(queue);
2163
2164	return 0;
2165}
2166
2167static int __init netback_init(void)
2168{
2169	int rc = 0;
2170
2171	if (!xen_domain())
2172		return -ENODEV;
2173
2174	/* Allow as many queues as there are CPUs if user has not
2175	 * specified a value.
2176	 */
2177	if (xenvif_max_queues == 0)
2178		xenvif_max_queues = num_online_cpus();
 
2179
2180	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2181		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2182			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2183		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2184	}
2185
2186	rc = xenvif_xenbus_init();
2187	if (rc)
2188		goto failed_init;
2189
2190#ifdef CONFIG_DEBUG_FS
2191	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2192	if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2193		pr_warn("Init of debugfs returned %ld!\n",
2194			PTR_ERR(xen_netback_dbg_root));
2195#endif /* CONFIG_DEBUG_FS */
2196
2197	return 0;
2198
2199failed_init:
2200	return rc;
2201}
2202
2203module_init(netback_init);
2204
2205static void __exit netback_fini(void)
2206{
2207#ifdef CONFIG_DEBUG_FS
2208	if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2209		debugfs_remove_recursive(xen_netback_dbg_root);
2210#endif /* CONFIG_DEBUG_FS */
2211	xenvif_xenbus_fini();
2212}
2213module_exit(netback_fini);
2214
 
2215MODULE_LICENSE("Dual BSD/GPL");
2216MODULE_ALIAS("xen-backend:vif");