Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v6.8
   1/*
   2 * Back-end of the driver for virtual network devices. This portion of the
   3 * driver exports a 'unified' network-device interface that can be accessed
   4 * by any operating system that implements a compatible front end. A
   5 * reference front-end implementation can be found in:
   6 *  drivers/net/xen-netfront.c
   7 *
   8 * Copyright (c) 2002-2005, K A Fraser
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License version 2
  12 * as published by the Free Software Foundation; or, when distributed
  13 * separately from the Linux kernel or incorporated into other
  14 * software packages, subject to the following license:
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a copy
  17 * of this source file (the "Software"), to deal in the Software without
  18 * restriction, including without limitation the rights to use, copy, modify,
  19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20 * and to permit persons to whom the Software is furnished to do so, subject to
  21 * the following conditions:
  22 *
  23 * The above copyright notice and this permission notice shall be included in
  24 * all copies or substantial portions of the Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32 * IN THE SOFTWARE.
  33 */
  34
  35#include "common.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/if_vlan.h>
  39#include <linux/udp.h>
  40#include <linux/highmem.h>
  41
  42#include <net/tcp.h>
  43
  44#include <xen/xen.h>
  45#include <xen/events.h>
  46#include <xen/interface/memory.h>
  47#include <xen/page.h>
  48
  49#include <asm/xen/hypercall.h>
  50
  51/* Provide an option to disable split event channels at load time as
  52 * event channels are limited resource. Split event channels are
  53 * enabled by default.
  54 */
  55bool separate_tx_rx_irq = true;
  56module_param(separate_tx_rx_irq, bool, 0644);
  57
  58/* The time that packets can stay on the guest Rx internal queue
  59 * before they are dropped.
  60 */
  61unsigned int rx_drain_timeout_msecs = 10000;
  62module_param(rx_drain_timeout_msecs, uint, 0444);
  63
  64/* The length of time before the frontend is considered unresponsive
  65 * because it isn't providing Rx slots.
  66 */
  67unsigned int rx_stall_timeout_msecs = 60000;
  68module_param(rx_stall_timeout_msecs, uint, 0444);
  69
  70#define MAX_QUEUES_DEFAULT 8
  71unsigned int xenvif_max_queues;
  72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  73MODULE_PARM_DESC(max_queues,
  74		 "Maximum number of queues per virtual interface");
  75
  76/*
  77 * This is the maximum slots a skb can have. If a guest sends a skb
  78 * which exceeds this limit it is considered malicious.
  79 */
  80#define FATAL_SKB_SLOTS_DEFAULT 20
  81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  82module_param(fatal_skb_slots, uint, 0444);
  83
  84/* The amount to copy out of the first guest Tx slot into the skb's
  85 * linear area.  If the first slot has more data, it will be mapped
  86 * and put into the first frag.
  87 *
  88 * This is sized to avoid pulling headers from the frags for most
  89 * TCP/IP packets.
  90 */
  91#define XEN_NETBACK_TX_COPY_LEN 128
  92
  93/* This is the maximum number of flows in the hash cache. */
  94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  98
  99/* The module parameter tells that we have to put data
 100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
 101 * needed for XDP processing
 102 */
 103bool provides_xdp_headroom = true;
 104module_param(provides_xdp_headroom, bool, 0644);
 105
 106static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 107			       s8 status);
 108
 109static void make_tx_response(struct xenvif_queue *queue,
 110			     const struct xen_netif_tx_request *txp,
 111			     unsigned int extra_count,
 112			     s8 status);
 
 113
 114static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
 115
 116static inline int tx_work_todo(struct xenvif_queue *queue);
 117
 118static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 119				       u16 idx)
 120{
 121	return page_to_pfn(queue->mmap_pages[idx]);
 122}
 123
 124static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
 125					 u16 idx)
 126{
 127	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
 128}
 129
 130#define callback_param(vif, pending_idx) \
 131	(vif->pending_tx_info[pending_idx].callback_struct)
 132
 133/* Find the containing VIF's structure from a pointer in pending_tx_info array
 134 */
 135static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
 136{
 137	u16 pending_idx = ubuf->desc;
 138	struct pending_tx_info *temp =
 139		container_of(ubuf, struct pending_tx_info, callback_struct);
 140	return container_of(temp - pending_idx,
 141			    struct xenvif_queue,
 142			    pending_tx_info[0]);
 143}
 144
 145static u16 frag_get_pending_idx(skb_frag_t *frag)
 146{
 147	return (u16)skb_frag_off(frag);
 148}
 149
 150static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 151{
 152	skb_frag_off_set(frag, pending_idx);
 153}
 154
 155static inline pending_ring_idx_t pending_index(unsigned i)
 156{
 157	return i & (MAX_PENDING_REQS-1);
 158}
 159
 160void xenvif_kick_thread(struct xenvif_queue *queue)
 161{
 162	wake_up(&queue->wq);
 163}
 164
 165void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 166{
 167	int more_to_do;
 168
 169	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
 170
 171	if (more_to_do)
 172		napi_schedule(&queue->napi);
 173	else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
 174				     &queue->eoi_pending) &
 175		 (NETBK_TX_EOI | NETBK_COMMON_EOI))
 176		xen_irq_lateeoi(queue->tx_irq, 0);
 177}
 178
 179static void tx_add_credit(struct xenvif_queue *queue)
 180{
 181	unsigned long max_burst, max_credit;
 182
 183	/*
 184	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
 185	 * Otherwise the interface can seize up due to insufficient credit.
 186	 */
 187	max_burst = max(131072UL, queue->credit_bytes);
 188
 189	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
 190	max_credit = queue->remaining_credit + queue->credit_bytes;
 191	if (max_credit < queue->remaining_credit)
 192		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 193
 194	queue->remaining_credit = min(max_credit, max_burst);
 195	queue->rate_limited = false;
 196}
 197
 198void xenvif_tx_credit_callback(struct timer_list *t)
 199{
 200	struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
 201	tx_add_credit(queue);
 202	xenvif_napi_schedule_or_enable_events(queue);
 203}
 204
 205static void xenvif_tx_err(struct xenvif_queue *queue,
 206			  struct xen_netif_tx_request *txp,
 207			  unsigned int extra_count, RING_IDX end)
 208{
 209	RING_IDX cons = queue->tx.req_cons;
 
 210
 211	do {
 
 212		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
 
 
 213		if (cons == end)
 214			break;
 215		RING_COPY_REQUEST(&queue->tx, cons++, txp);
 216		extra_count = 0; /* only the first frag can have extras */
 217	} while (1);
 218	queue->tx.req_cons = cons;
 219}
 220
 221static void xenvif_fatal_tx_err(struct xenvif *vif)
 222{
 223	netdev_err(vif->dev, "fatal error; disabling device\n");
 224	vif->disabled = true;
 225	/* Disable the vif from queue 0's kthread */
 226	if (vif->num_queues)
 227		xenvif_kick_thread(&vif->queues[0]);
 228}
 229
 230static int xenvif_count_requests(struct xenvif_queue *queue,
 231				 struct xen_netif_tx_request *first,
 232				 unsigned int extra_count,
 233				 struct xen_netif_tx_request *txp,
 234				 int work_to_do)
 235{
 236	RING_IDX cons = queue->tx.req_cons;
 237	int slots = 0;
 238	int drop_err = 0;
 239	int more_data;
 240
 241	if (!(first->flags & XEN_NETTXF_more_data))
 242		return 0;
 243
 244	do {
 245		struct xen_netif_tx_request dropped_tx = { 0 };
 246
 247		if (slots >= work_to_do) {
 248			netdev_err(queue->vif->dev,
 249				   "Asked for %d slots but exceeds this limit\n",
 250				   work_to_do);
 251			xenvif_fatal_tx_err(queue->vif);
 252			return -ENODATA;
 253		}
 254
 255		/* This guest is really using too many slots and
 256		 * considered malicious.
 257		 */
 258		if (unlikely(slots >= fatal_skb_slots)) {
 259			netdev_err(queue->vif->dev,
 260				   "Malicious frontend using %d slots, threshold %u\n",
 261				   slots, fatal_skb_slots);
 262			xenvif_fatal_tx_err(queue->vif);
 263			return -E2BIG;
 264		}
 265
 266		/* Xen network protocol had implicit dependency on
 267		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
 268		 * the historical MAX_SKB_FRAGS value 18 to honor the
 269		 * same behavior as before. Any packet using more than
 270		 * 18 slots but less than fatal_skb_slots slots is
 271		 * dropped
 272		 */
 273		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
 274			if (net_ratelimit())
 275				netdev_dbg(queue->vif->dev,
 276					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
 277					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
 278			drop_err = -E2BIG;
 279		}
 280
 281		if (drop_err)
 282			txp = &dropped_tx;
 283
 284		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
 285
 286		/* If the guest submitted a frame >= 64 KiB then
 287		 * first->size overflowed and following slots will
 288		 * appear to be larger than the frame.
 289		 *
 290		 * This cannot be fatal error as there are buggy
 291		 * frontends that do this.
 292		 *
 293		 * Consume all slots and drop the packet.
 294		 */
 295		if (!drop_err && txp->size > first->size) {
 296			if (net_ratelimit())
 297				netdev_dbg(queue->vif->dev,
 298					   "Invalid tx request, slot size %u > remaining size %u\n",
 299					   txp->size, first->size);
 300			drop_err = -EIO;
 301		}
 302
 303		first->size -= txp->size;
 304		slots++;
 305
 306		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 307			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 308				 txp->offset, txp->size);
 309			xenvif_fatal_tx_err(queue->vif);
 310			return -EINVAL;
 311		}
 312
 313		more_data = txp->flags & XEN_NETTXF_more_data;
 314
 315		if (!drop_err)
 316			txp++;
 317
 318	} while (more_data);
 319
 320	if (drop_err) {
 321		xenvif_tx_err(queue, first, extra_count, cons + slots);
 322		return drop_err;
 323	}
 324
 325	return slots;
 326}
 327
 328
 329struct xenvif_tx_cb {
 330	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
 331	u8 copy_count;
 332	u32 split_mask;
 333};
 334
 335#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
 336#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
 337#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
 338
 339static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
 340					   u16 pending_idx,
 341					   struct xen_netif_tx_request *txp,
 342					   unsigned int extra_count,
 343					   struct gnttab_map_grant_ref *mop)
 344{
 345	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
 346	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
 347			  GNTMAP_host_map | GNTMAP_readonly,
 348			  txp->gref, queue->vif->domid);
 349
 350	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
 351	       sizeof(*txp));
 352	queue->pending_tx_info[pending_idx].extra_count = extra_count;
 353}
 354
 355static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 356{
 357	struct sk_buff *skb =
 358		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 359			  GFP_ATOMIC | __GFP_NOWARN);
 360
 361	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
 362	if (unlikely(skb == NULL))
 363		return NULL;
 364
 365	/* Packets passed to netif_rx() must have some headroom. */
 366	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 367
 368	/* Initialize it here to avoid later surprises */
 369	skb_shinfo(skb)->destructor_arg = NULL;
 370
 371	return skb;
 372}
 373
 374static void xenvif_get_requests(struct xenvif_queue *queue,
 375				struct sk_buff *skb,
 376				struct xen_netif_tx_request *first,
 377				struct xen_netif_tx_request *txfrags,
 378			        unsigned *copy_ops,
 379			        unsigned *map_ops,
 380				unsigned int frag_overflow,
 381				struct sk_buff *nskb,
 382				unsigned int extra_count,
 383				unsigned int data_len)
 384{
 385	struct skb_shared_info *shinfo = skb_shinfo(skb);
 386	skb_frag_t *frags = shinfo->frags;
 387	u16 pending_idx;
 388	pending_ring_idx_t index;
 389	unsigned int nr_slots;
 390	struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
 391	struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
 392	struct xen_netif_tx_request *txp = first;
 393
 394	nr_slots = shinfo->nr_frags + frag_overflow + 1;
 395
 396	copy_count(skb) = 0;
 397	XENVIF_TX_CB(skb)->split_mask = 0;
 398
 399	/* Create copy ops for exactly data_len bytes into the skb head. */
 400	__skb_put(skb, data_len);
 401	while (data_len > 0) {
 402		int amount = data_len > txp->size ? txp->size : data_len;
 403		bool split = false;
 404
 405		cop->source.u.ref = txp->gref;
 406		cop->source.domid = queue->vif->domid;
 407		cop->source.offset = txp->offset;
 408
 409		cop->dest.domid = DOMID_SELF;
 410		cop->dest.offset = (offset_in_page(skb->data +
 411						   skb_headlen(skb) -
 412						   data_len)) & ~XEN_PAGE_MASK;
 413		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
 414				               - data_len);
 415
 416		/* Don't cross local page boundary! */
 417		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
 418			amount = XEN_PAGE_SIZE - cop->dest.offset;
 419			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
 420			split = true;
 421		}
 422
 423		cop->len = amount;
 424		cop->flags = GNTCOPY_source_gref;
 425
 426		index = pending_index(queue->pending_cons);
 427		pending_idx = queue->pending_ring[index];
 428		callback_param(queue, pending_idx).ctx = NULL;
 429		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
 430		if (!split)
 431			copy_count(skb)++;
 432
 433		cop++;
 434		data_len -= amount;
 435
 436		if (amount == txp->size) {
 437			/* The copy op covered the full tx_request */
 438
 439			memcpy(&queue->pending_tx_info[pending_idx].req,
 440			       txp, sizeof(*txp));
 441			queue->pending_tx_info[pending_idx].extra_count =
 442				(txp == first) ? extra_count : 0;
 443
 444			if (txp == first)
 445				txp = txfrags;
 446			else
 447				txp++;
 448			queue->pending_cons++;
 449			nr_slots--;
 450		} else {
 451			/* The copy op partially covered the tx_request.
 452			 * The remainder will be mapped or copied in the next
 453			 * iteration.
 454			 */
 455			txp->offset += amount;
 456			txp->size -= amount;
 457		}
 458	}
 459
 460	for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
 461	     nr_slots--) {
 462		if (unlikely(!txp->size)) {
 463			make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
 464			++txp;
 465			continue;
 466		}
 467
 468		index = pending_index(queue->pending_cons++);
 469		pending_idx = queue->pending_ring[index];
 470		xenvif_tx_create_map_op(queue, pending_idx, txp,
 471				        txp == first ? extra_count : 0, gop);
 472		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
 473		++shinfo->nr_frags;
 474		++gop;
 475
 476		if (txp == first)
 477			txp = txfrags;
 478		else
 479			txp++;
 480	}
 481
 482	if (nr_slots > 0) {
 483
 484		shinfo = skb_shinfo(nskb);
 485		frags = shinfo->frags;
 486
 487		for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
 488			if (unlikely(!txp->size)) {
 489				make_tx_response(queue, txp, 0,
 490						 XEN_NETIF_RSP_OKAY);
 491				continue;
 492			}
 493
 494			index = pending_index(queue->pending_cons++);
 495			pending_idx = queue->pending_ring[index];
 496			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
 497						gop);
 498			frag_set_pending_idx(&frags[shinfo->nr_frags],
 499					     pending_idx);
 500			++shinfo->nr_frags;
 501			++gop;
 502		}
 503
 504		if (shinfo->nr_frags) {
 505			skb_shinfo(skb)->frag_list = nskb;
 506			nskb = NULL;
 507		}
 508	}
 509
 510	if (nskb) {
 511		/* A frag_list skb was allocated but it is no longer needed
 512		 * because enough slots were converted to copy ops above or some
 513		 * were empty.
 514		 */
 515		kfree_skb(nskb);
 516	}
 517
 518	(*copy_ops) = cop - queue->tx_copy_ops;
 519	(*map_ops) = gop - queue->tx_map_ops;
 520}
 521
 522static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
 523					   u16 pending_idx,
 524					   grant_handle_t handle)
 525{
 526	if (unlikely(queue->grant_tx_handle[pending_idx] !=
 527		     NETBACK_INVALID_HANDLE)) {
 528		netdev_err(queue->vif->dev,
 529			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
 530			   pending_idx);
 531		BUG();
 532	}
 533	queue->grant_tx_handle[pending_idx] = handle;
 534}
 535
 536static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
 537					     u16 pending_idx)
 538{
 539	if (unlikely(queue->grant_tx_handle[pending_idx] ==
 540		     NETBACK_INVALID_HANDLE)) {
 541		netdev_err(queue->vif->dev,
 542			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 543			   pending_idx);
 544		BUG();
 545	}
 546	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 547}
 548
 549static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 550			       struct sk_buff *skb,
 551			       struct gnttab_map_grant_ref **gopp_map,
 552			       struct gnttab_copy **gopp_copy)
 553{
 554	struct gnttab_map_grant_ref *gop_map = *gopp_map;
 555	u16 pending_idx;
 556	/* This always points to the shinfo of the skb being checked, which
 557	 * could be either the first or the one on the frag_list
 558	 */
 559	struct skb_shared_info *shinfo = skb_shinfo(skb);
 560	/* If this is non-NULL, we are currently checking the frag_list skb, and
 561	 * this points to the shinfo of the first one
 562	 */
 563	struct skb_shared_info *first_shinfo = NULL;
 564	int nr_frags = shinfo->nr_frags;
 565	const bool sharedslot = nr_frags &&
 566				frag_get_pending_idx(&shinfo->frags[0]) ==
 567				    copy_pending_idx(skb, copy_count(skb) - 1);
 568	int i, err = 0;
 569
 570	for (i = 0; i < copy_count(skb); i++) {
 571		int newerr;
 572
 573		/* Check status of header. */
 574		pending_idx = copy_pending_idx(skb, i);
 575
 576		newerr = (*gopp_copy)->status;
 577
 578		/* Split copies need to be handled together. */
 579		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
 580			(*gopp_copy)++;
 581			if (!newerr)
 582				newerr = (*gopp_copy)->status;
 583		}
 584		if (likely(!newerr)) {
 585			/* The first frag might still have this slot mapped */
 586			if (i < copy_count(skb) - 1 || !sharedslot)
 587				xenvif_idx_release(queue, pending_idx,
 588						   XEN_NETIF_RSP_OKAY);
 589		} else {
 590			err = newerr;
 591			if (net_ratelimit())
 592				netdev_dbg(queue->vif->dev,
 593					   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
 594					   (*gopp_copy)->status,
 595					   pending_idx,
 596					   (*gopp_copy)->source.u.ref);
 597			/* The first frag might still have this slot mapped */
 598			if (i < copy_count(skb) - 1 || !sharedslot)
 599				xenvif_idx_release(queue, pending_idx,
 600						   XEN_NETIF_RSP_ERROR);
 601		}
 602		(*gopp_copy)++;
 603	}
 604
 605check_frags:
 606	for (i = 0; i < nr_frags; i++, gop_map++) {
 607		int j, newerr;
 608
 609		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 610
 611		/* Check error status: if okay then remember grant handle. */
 612		newerr = gop_map->status;
 613
 614		if (likely(!newerr)) {
 615			xenvif_grant_handle_set(queue,
 616						pending_idx,
 617						gop_map->handle);
 618			/* Had a previous error? Invalidate this fragment. */
 619			if (unlikely(err)) {
 620				xenvif_idx_unmap(queue, pending_idx);
 621				/* If the mapping of the first frag was OK, but
 622				 * the header's copy failed, and they are
 623				 * sharing a slot, send an error
 624				 */
 625				if (i == 0 && !first_shinfo && sharedslot)
 626					xenvif_idx_release(queue, pending_idx,
 627							   XEN_NETIF_RSP_ERROR);
 628				else
 629					xenvif_idx_release(queue, pending_idx,
 630							   XEN_NETIF_RSP_OKAY);
 631			}
 632			continue;
 633		}
 634
 635		/* Error on this fragment: respond to client with an error. */
 636		if (net_ratelimit())
 637			netdev_dbg(queue->vif->dev,
 638				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
 639				   i,
 640				   gop_map->status,
 641				   pending_idx,
 642				   gop_map->ref);
 643
 644		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 645
 646		/* Not the first error? Preceding frags already invalidated. */
 647		if (err)
 648			continue;
 649
 650		/* Invalidate preceding fragments of this skb. */
 651		for (j = 0; j < i; j++) {
 652			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
 653			xenvif_idx_unmap(queue, pending_idx);
 654			xenvif_idx_release(queue, pending_idx,
 655					   XEN_NETIF_RSP_OKAY);
 656		}
 657
 658		/* And if we found the error while checking the frag_list, unmap
 659		 * the first skb's frags
 660		 */
 661		if (first_shinfo) {
 662			for (j = 0; j < first_shinfo->nr_frags; j++) {
 663				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
 664				xenvif_idx_unmap(queue, pending_idx);
 665				xenvif_idx_release(queue, pending_idx,
 666						   XEN_NETIF_RSP_OKAY);
 667			}
 668		}
 669
 670		/* Remember the error: invalidate all subsequent fragments. */
 671		err = newerr;
 672	}
 673
 674	if (skb_has_frag_list(skb) && !first_shinfo) {
 675		first_shinfo = shinfo;
 676		shinfo = skb_shinfo(shinfo->frag_list);
 677		nr_frags = shinfo->nr_frags;
 678
 679		goto check_frags;
 680	}
 681
 682	*gopp_map = gop_map;
 683	return err;
 684}
 685
 686static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
 687{
 688	struct skb_shared_info *shinfo = skb_shinfo(skb);
 689	int nr_frags = shinfo->nr_frags;
 690	int i;
 691	u16 prev_pending_idx = INVALID_PENDING_IDX;
 692
 693	for (i = 0; i < nr_frags; i++) {
 694		skb_frag_t *frag = shinfo->frags + i;
 695		struct xen_netif_tx_request *txp;
 696		struct page *page;
 697		u16 pending_idx;
 698
 699		pending_idx = frag_get_pending_idx(frag);
 700
 701		/* If this is not the first frag, chain it to the previous*/
 702		if (prev_pending_idx == INVALID_PENDING_IDX)
 703			skb_shinfo(skb)->destructor_arg =
 704				&callback_param(queue, pending_idx);
 705		else
 706			callback_param(queue, prev_pending_idx).ctx =
 707				&callback_param(queue, pending_idx);
 708
 709		callback_param(queue, pending_idx).ctx = NULL;
 710		prev_pending_idx = pending_idx;
 711
 712		txp = &queue->pending_tx_info[pending_idx].req;
 713		page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
 714		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
 715		skb->len += txp->size;
 716		skb->data_len += txp->size;
 717		skb->truesize += txp->size;
 718
 719		/* Take an extra reference to offset network stack's put_page */
 720		get_page(queue->mmap_pages[pending_idx]);
 721	}
 722}
 723
 724static int xenvif_get_extras(struct xenvif_queue *queue,
 725			     struct xen_netif_extra_info *extras,
 726			     unsigned int *extra_count,
 727			     int work_to_do)
 728{
 729	struct xen_netif_extra_info extra;
 730	RING_IDX cons = queue->tx.req_cons;
 731
 732	do {
 733		if (unlikely(work_to_do-- <= 0)) {
 734			netdev_err(queue->vif->dev, "Missing extra info\n");
 735			xenvif_fatal_tx_err(queue->vif);
 736			return -EBADR;
 737		}
 738
 739		RING_COPY_REQUEST(&queue->tx, cons, &extra);
 740
 741		queue->tx.req_cons = ++cons;
 742		(*extra_count)++;
 743
 744		if (unlikely(!extra.type ||
 745			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 746			netdev_err(queue->vif->dev,
 747				   "Invalid extra type: %d\n", extra.type);
 748			xenvif_fatal_tx_err(queue->vif);
 749			return -EINVAL;
 750		}
 751
 752		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
 753	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 754
 755	return work_to_do;
 756}
 757
 758static int xenvif_set_skb_gso(struct xenvif *vif,
 759			      struct sk_buff *skb,
 760			      struct xen_netif_extra_info *gso)
 761{
 762	if (!gso->u.gso.size) {
 763		netdev_err(vif->dev, "GSO size must not be zero.\n");
 764		xenvif_fatal_tx_err(vif);
 765		return -EINVAL;
 766	}
 767
 768	switch (gso->u.gso.type) {
 769	case XEN_NETIF_GSO_TYPE_TCPV4:
 770		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 771		break;
 772	case XEN_NETIF_GSO_TYPE_TCPV6:
 773		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 774		break;
 775	default:
 776		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
 777		xenvif_fatal_tx_err(vif);
 778		return -EINVAL;
 779	}
 780
 781	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 782	/* gso_segs will be calculated later */
 783
 784	return 0;
 785}
 786
 787static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
 788{
 789	bool recalculate_partial_csum = false;
 790
 791	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 792	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 793	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 794	 * recalculate the partial checksum.
 795	 */
 796	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 797		queue->stats.rx_gso_checksum_fixup++;
 798		skb->ip_summed = CHECKSUM_PARTIAL;
 799		recalculate_partial_csum = true;
 800	}
 801
 802	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 803	if (skb->ip_summed != CHECKSUM_PARTIAL)
 804		return 0;
 805
 806	return skb_checksum_setup(skb, recalculate_partial_csum);
 807}
 808
 809static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
 810{
 811	u64 now = get_jiffies_64();
 812	u64 next_credit = queue->credit_window_start +
 813		msecs_to_jiffies(queue->credit_usec / 1000);
 814
 815	/* Timer could already be pending in rare cases. */
 816	if (timer_pending(&queue->credit_timeout)) {
 817		queue->rate_limited = true;
 818		return true;
 819	}
 820
 821	/* Passed the point where we can replenish credit? */
 822	if (time_after_eq64(now, next_credit)) {
 823		queue->credit_window_start = now;
 824		tx_add_credit(queue);
 825	}
 826
 827	/* Still too big to send right now? Set a callback. */
 828	if (size > queue->remaining_credit) {
 829		mod_timer(&queue->credit_timeout,
 830			  next_credit);
 831		queue->credit_window_start = next_credit;
 832		queue->rate_limited = true;
 833
 834		return true;
 835	}
 836
 837	return false;
 838}
 839
 840/* No locking is required in xenvif_mcast_add/del() as they are
 841 * only ever invoked from NAPI poll. An RCU list is used because
 842 * xenvif_mcast_match() is called asynchronously, during start_xmit.
 843 */
 844
 845static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
 846{
 847	struct xenvif_mcast_addr *mcast;
 848
 849	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
 850		if (net_ratelimit())
 851			netdev_err(vif->dev,
 852				   "Too many multicast addresses\n");
 853		return -ENOSPC;
 854	}
 855
 856	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
 857	if (!mcast)
 858		return -ENOMEM;
 859
 860	ether_addr_copy(mcast->addr, addr);
 861	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
 862	vif->fe_mcast_count++;
 863
 864	return 0;
 865}
 866
 867static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
 868{
 869	struct xenvif_mcast_addr *mcast;
 870
 871	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 872		if (ether_addr_equal(addr, mcast->addr)) {
 873			--vif->fe_mcast_count;
 874			list_del_rcu(&mcast->entry);
 875			kfree_rcu(mcast, rcu);
 876			break;
 877		}
 878	}
 879}
 880
 881bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
 882{
 883	struct xenvif_mcast_addr *mcast;
 884
 885	rcu_read_lock();
 886	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 887		if (ether_addr_equal(addr, mcast->addr)) {
 888			rcu_read_unlock();
 889			return true;
 890		}
 891	}
 892	rcu_read_unlock();
 893
 894	return false;
 895}
 896
 897void xenvif_mcast_addr_list_free(struct xenvif *vif)
 898{
 899	/* No need for locking or RCU here. NAPI poll and TX queue
 900	 * are stopped.
 901	 */
 902	while (!list_empty(&vif->fe_mcast_addr)) {
 903		struct xenvif_mcast_addr *mcast;
 904
 905		mcast = list_first_entry(&vif->fe_mcast_addr,
 906					 struct xenvif_mcast_addr,
 907					 entry);
 908		--vif->fe_mcast_count;
 909		list_del(&mcast->entry);
 910		kfree(mcast);
 911	}
 912}
 913
 914static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 915				     int budget,
 916				     unsigned *copy_ops,
 917				     unsigned *map_ops)
 918{
 919	struct sk_buff *skb, *nskb;
 920	int ret;
 921	unsigned int frag_overflow;
 922
 923	while (skb_queue_len(&queue->tx_queue) < budget) {
 924		struct xen_netif_tx_request txreq;
 925		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
 926		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
 927		unsigned int extra_count;
 
 928		RING_IDX idx;
 929		int work_to_do;
 930		unsigned int data_len;
 
 931
 932		if (queue->tx.sring->req_prod - queue->tx.req_cons >
 933		    XEN_NETIF_TX_RING_SIZE) {
 934			netdev_err(queue->vif->dev,
 935				   "Impossible number of requests. "
 936				   "req_prod %d, req_cons %d, size %ld\n",
 937				   queue->tx.sring->req_prod, queue->tx.req_cons,
 938				   XEN_NETIF_TX_RING_SIZE);
 939			xenvif_fatal_tx_err(queue->vif);
 940			break;
 941		}
 942
 943		work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
 944		if (!work_to_do)
 945			break;
 946
 947		idx = queue->tx.req_cons;
 948		rmb(); /* Ensure that we see the request before we copy it. */
 949		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
 950
 951		/* Credit-based scheduling. */
 952		if (txreq.size > queue->remaining_credit &&
 953		    tx_credit_exceeded(queue, txreq.size))
 954			break;
 955
 956		queue->remaining_credit -= txreq.size;
 957
 958		work_to_do--;
 959		queue->tx.req_cons = ++idx;
 960
 961		memset(extras, 0, sizeof(extras));
 962		extra_count = 0;
 963		if (txreq.flags & XEN_NETTXF_extra_info) {
 964			work_to_do = xenvif_get_extras(queue, extras,
 965						       &extra_count,
 966						       work_to_do);
 967			idx = queue->tx.req_cons;
 968			if (unlikely(work_to_do < 0))
 969				break;
 970		}
 971
 972		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
 973			struct xen_netif_extra_info *extra;
 974
 975			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
 976			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
 977
 978			make_tx_response(queue, &txreq, extra_count,
 979					 (ret == 0) ?
 980					 XEN_NETIF_RSP_OKAY :
 981					 XEN_NETIF_RSP_ERROR);
 
 982			continue;
 983		}
 984
 985		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
 986			struct xen_netif_extra_info *extra;
 987
 988			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
 989			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
 990
 991			make_tx_response(queue, &txreq, extra_count,
 992					 XEN_NETIF_RSP_OKAY);
 
 993			continue;
 994		}
 995
 996		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
 997			XEN_NETBACK_TX_COPY_LEN : txreq.size;
 998
 999		ret = xenvif_count_requests(queue, &txreq, extra_count,
1000					    txfrags, work_to_do);
1001
1002		if (unlikely(ret < 0))
1003			break;
1004
1005		idx += ret;
1006
1007		if (unlikely(txreq.size < ETH_HLEN)) {
1008			netdev_dbg(queue->vif->dev,
1009				   "Bad packet size: %d\n", txreq.size);
1010			xenvif_tx_err(queue, &txreq, extra_count, idx);
1011			break;
1012		}
1013
1014		/* No crossing a page as the payload mustn't fragment. */
1015		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
1016			netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
1017				   txreq.offset, txreq.size);
 
 
1018			xenvif_fatal_tx_err(queue->vif);
1019			break;
1020		}
1021
 
 
 
1022		if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1023			data_len = txreq.size;
1024
1025		skb = xenvif_alloc_skb(data_len);
1026		if (unlikely(skb == NULL)) {
1027			netdev_dbg(queue->vif->dev,
1028				   "Can't allocate a skb in start_xmit.\n");
1029			xenvif_tx_err(queue, &txreq, extra_count, idx);
1030			break;
1031		}
1032
1033		skb_shinfo(skb)->nr_frags = ret;
1034		/* At this point shinfo->nr_frags is in fact the number of
1035		 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1036		 */
1037		frag_overflow = 0;
1038		nskb = NULL;
1039		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1040			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1041			BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1042			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1043			nskb = xenvif_alloc_skb(0);
1044			if (unlikely(nskb == NULL)) {
1045				skb_shinfo(skb)->nr_frags = 0;
1046				kfree_skb(skb);
1047				xenvif_tx_err(queue, &txreq, extra_count, idx);
1048				if (net_ratelimit())
1049					netdev_err(queue->vif->dev,
1050						   "Can't allocate the frag_list skb.\n");
1051				break;
1052			}
1053		}
1054
1055		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1056			struct xen_netif_extra_info *gso;
1057			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1058
1059			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1060				/* Failure in xenvif_set_skb_gso is fatal. */
1061				skb_shinfo(skb)->nr_frags = 0;
1062				kfree_skb(skb);
1063				kfree_skb(nskb);
1064				break;
1065			}
1066		}
1067
1068		if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1069			struct xen_netif_extra_info *extra;
1070			enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1071
1072			extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1073
1074			switch (extra->u.hash.type) {
1075			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1076			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1077				type = PKT_HASH_TYPE_L3;
1078				break;
1079
1080			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1081			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1082				type = PKT_HASH_TYPE_L4;
1083				break;
1084
1085			default:
1086				break;
1087			}
1088
1089			if (type != PKT_HASH_TYPE_NONE)
1090				skb_set_hash(skb,
1091					     *(u32 *)extra->u.hash.value,
1092					     type);
1093		}
1094
1095		xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1096				    map_ops, frag_overflow, nskb, extra_count,
1097				    data_len);
1098
1099		__skb_queue_tail(&queue->tx_queue, skb);
1100
1101		queue->tx.req_cons = idx;
 
 
 
 
1102	}
1103
1104	return;
1105}
1106
1107/* Consolidate skb with a frag_list into a brand new one with local pages on
1108 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1109 */
1110static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1111{
1112	unsigned int offset = skb_headlen(skb);
1113	skb_frag_t frags[MAX_SKB_FRAGS];
1114	int i, f;
1115	struct ubuf_info *uarg;
1116	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1117
1118	queue->stats.tx_zerocopy_sent += 2;
1119	queue->stats.tx_frag_overflow++;
1120
1121	xenvif_fill_frags(queue, nskb);
1122	/* Subtract frags size, we will correct it later */
1123	skb->truesize -= skb->data_len;
1124	skb->len += nskb->len;
1125	skb->data_len += nskb->len;
1126
1127	/* create a brand new frags array and coalesce there */
1128	for (i = 0; offset < skb->len; i++) {
1129		struct page *page;
1130		unsigned int len;
1131
1132		BUG_ON(i >= MAX_SKB_FRAGS);
1133		page = alloc_page(GFP_ATOMIC);
1134		if (!page) {
1135			int j;
1136			skb->truesize += skb->data_len;
1137			for (j = 0; j < i; j++)
1138				put_page(skb_frag_page(&frags[j]));
1139			return -ENOMEM;
1140		}
1141
1142		if (offset + PAGE_SIZE < skb->len)
1143			len = PAGE_SIZE;
1144		else
1145			len = skb->len - offset;
1146		if (skb_copy_bits(skb, offset, page_address(page), len))
1147			BUG();
1148
1149		offset += len;
1150		skb_frag_fill_page_desc(&frags[i], page, 0, len);
 
 
1151	}
1152
1153	/* Release all the original (foreign) frags. */
1154	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1155		skb_frag_unref(skb, f);
1156	uarg = skb_shinfo(skb)->destructor_arg;
1157	/* increase inflight counter to offset decrement in callback */
1158	atomic_inc(&queue->inflight_packets);
1159	uarg->callback(NULL, uarg, true);
1160	skb_shinfo(skb)->destructor_arg = NULL;
1161
1162	/* Fill the skb with the new (local) frags. */
1163	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1164	skb_shinfo(skb)->nr_frags = i;
1165	skb->truesize += i * PAGE_SIZE;
1166
1167	return 0;
1168}
1169
1170static int xenvif_tx_submit(struct xenvif_queue *queue)
1171{
1172	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1173	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1174	struct sk_buff *skb;
1175	int work_done = 0;
1176
1177	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1178		struct xen_netif_tx_request *txp;
1179		u16 pending_idx;
1180
1181		pending_idx = copy_pending_idx(skb, 0);
1182		txp = &queue->pending_tx_info[pending_idx].req;
1183
1184		/* Check the remap error code. */
1185		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1186			/* If there was an error, xenvif_tx_check_gop is
1187			 * expected to release all the frags which were mapped,
1188			 * so kfree_skb shouldn't do it again
1189			 */
1190			skb_shinfo(skb)->nr_frags = 0;
1191			if (skb_has_frag_list(skb)) {
1192				struct sk_buff *nskb =
1193						skb_shinfo(skb)->frag_list;
1194				skb_shinfo(nskb)->nr_frags = 0;
1195			}
1196			kfree_skb(skb);
1197			continue;
1198		}
1199
1200		if (txp->flags & XEN_NETTXF_csum_blank)
1201			skb->ip_summed = CHECKSUM_PARTIAL;
1202		else if (txp->flags & XEN_NETTXF_data_validated)
1203			skb->ip_summed = CHECKSUM_UNNECESSARY;
1204
1205		xenvif_fill_frags(queue, skb);
1206
1207		if (unlikely(skb_has_frag_list(skb))) {
1208			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1209			xenvif_skb_zerocopy_prepare(queue, nskb);
1210			if (xenvif_handle_frag_list(queue, skb)) {
1211				if (net_ratelimit())
1212					netdev_err(queue->vif->dev,
1213						   "Not enough memory to consolidate frag_list!\n");
1214				xenvif_skb_zerocopy_prepare(queue, skb);
1215				kfree_skb(skb);
1216				continue;
1217			}
1218			/* Copied all the bits from the frag list -- free it. */
1219			skb_frag_list_init(skb);
1220			kfree_skb(nskb);
1221		}
1222
1223		skb->dev      = queue->vif->dev;
1224		skb->protocol = eth_type_trans(skb, skb->dev);
1225		skb_reset_network_header(skb);
1226
1227		if (checksum_setup(queue, skb)) {
1228			netdev_dbg(queue->vif->dev,
1229				   "Can't setup checksum in net_tx_action\n");
1230			/* We have to set this flag to trigger the callback */
1231			if (skb_shinfo(skb)->destructor_arg)
1232				xenvif_skb_zerocopy_prepare(queue, skb);
1233			kfree_skb(skb);
1234			continue;
1235		}
1236
1237		skb_probe_transport_header(skb);
1238
1239		/* If the packet is GSO then we will have just set up the
1240		 * transport header offset in checksum_setup so it's now
1241		 * straightforward to calculate gso_segs.
1242		 */
1243		if (skb_is_gso(skb)) {
1244			int mss, hdrlen;
1245
1246			/* GSO implies having the L4 header. */
1247			WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1248			if (unlikely(!skb_transport_header_was_set(skb))) {
1249				kfree_skb(skb);
1250				continue;
1251			}
1252
1253			mss = skb_shinfo(skb)->gso_size;
1254			hdrlen = skb_tcp_all_headers(skb);
1255
1256			skb_shinfo(skb)->gso_segs =
1257				DIV_ROUND_UP(skb->len - hdrlen, mss);
1258		}
1259
1260		queue->stats.rx_bytes += skb->len;
1261		queue->stats.rx_packets++;
1262
1263		work_done++;
1264
1265		/* Set this flag right before netif_receive_skb, otherwise
1266		 * someone might think this packet already left netback, and
1267		 * do a skb_copy_ubufs while we are still in control of the
1268		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1269		 */
1270		if (skb_shinfo(skb)->destructor_arg) {
1271			xenvif_skb_zerocopy_prepare(queue, skb);
1272			queue->stats.tx_zerocopy_sent++;
1273		}
1274
1275		netif_receive_skb(skb);
1276	}
1277
1278	return work_done;
1279}
1280
1281void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
1282			      bool zerocopy_success)
1283{
1284	unsigned long flags;
1285	pending_ring_idx_t index;
1286	struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1287	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1288
1289	/* This is the only place where we grab this lock, to protect callbacks
1290	 * from each other.
1291	 */
1292	spin_lock_irqsave(&queue->callback_lock, flags);
1293	do {
1294		u16 pending_idx = ubuf->desc;
1295		ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1296		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1297			MAX_PENDING_REQS);
1298		index = pending_index(queue->dealloc_prod);
1299		queue->dealloc_ring[index] = pending_idx;
1300		/* Sync with xenvif_tx_dealloc_action:
1301		 * insert idx then incr producer.
1302		 */
1303		smp_wmb();
1304		queue->dealloc_prod++;
1305	} while (ubuf);
1306	spin_unlock_irqrestore(&queue->callback_lock, flags);
1307
1308	if (likely(zerocopy_success))
1309		queue->stats.tx_zerocopy_success++;
1310	else
1311		queue->stats.tx_zerocopy_fail++;
1312	xenvif_skb_zerocopy_complete(queue);
1313}
1314
1315static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1316{
1317	struct gnttab_unmap_grant_ref *gop;
1318	pending_ring_idx_t dc, dp;
1319	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1320	unsigned int i = 0;
1321
1322	dc = queue->dealloc_cons;
1323	gop = queue->tx_unmap_ops;
1324
1325	/* Free up any grants we have finished using */
1326	do {
1327		dp = queue->dealloc_prod;
1328
1329		/* Ensure we see all indices enqueued by all
1330		 * xenvif_zerocopy_callback().
1331		 */
1332		smp_rmb();
1333
1334		while (dc != dp) {
1335			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1336			pending_idx =
1337				queue->dealloc_ring[pending_index(dc++)];
1338
1339			pending_idx_release[gop - queue->tx_unmap_ops] =
1340				pending_idx;
1341			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1342				queue->mmap_pages[pending_idx];
1343			gnttab_set_unmap_op(gop,
1344					    idx_to_kaddr(queue, pending_idx),
1345					    GNTMAP_host_map,
1346					    queue->grant_tx_handle[pending_idx]);
1347			xenvif_grant_handle_reset(queue, pending_idx);
1348			++gop;
1349		}
1350
1351	} while (dp != queue->dealloc_prod);
1352
1353	queue->dealloc_cons = dc;
1354
1355	if (gop - queue->tx_unmap_ops > 0) {
1356		int ret;
1357		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1358					NULL,
1359					queue->pages_to_unmap,
1360					gop - queue->tx_unmap_ops);
1361		if (ret) {
1362			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1363				   gop - queue->tx_unmap_ops, ret);
1364			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1365				if (gop[i].status != GNTST_okay)
1366					netdev_err(queue->vif->dev,
1367						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1368						   gop[i].host_addr,
1369						   gop[i].handle,
1370						   gop[i].status);
1371			}
1372			BUG();
1373		}
1374	}
1375
1376	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1377		xenvif_idx_release(queue, pending_idx_release[i],
1378				   XEN_NETIF_RSP_OKAY);
1379}
1380
1381
1382/* Called after netfront has transmitted */
1383int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1384{
1385	unsigned nr_mops = 0, nr_cops = 0;
1386	int work_done, ret;
1387
1388	if (unlikely(!tx_work_todo(queue)))
1389		return 0;
1390
1391	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1392
1393	if (nr_cops == 0)
1394		return 0;
1395
1396	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1397	if (nr_mops != 0) {
1398		ret = gnttab_map_refs(queue->tx_map_ops,
1399				      NULL,
1400				      queue->pages_to_map,
1401				      nr_mops);
1402		if (ret) {
1403			unsigned int i;
1404
1405			netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1406				   nr_mops, ret);
1407			for (i = 0; i < nr_mops; ++i)
1408				WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1409				             GNTST_okay);
1410		}
1411	}
1412
1413	work_done = xenvif_tx_submit(queue);
1414
1415	return work_done;
1416}
1417
1418static void _make_tx_response(struct xenvif_queue *queue,
1419			     const struct xen_netif_tx_request *txp,
1420			     unsigned int extra_count,
1421			     s8 status)
1422{
1423	RING_IDX i = queue->tx.rsp_prod_pvt;
1424	struct xen_netif_tx_response *resp;
1425
1426	resp = RING_GET_RESPONSE(&queue->tx, i);
1427	resp->id     = txp->id;
1428	resp->status = status;
1429
1430	while (extra_count-- != 0)
1431		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1432
1433	queue->tx.rsp_prod_pvt = ++i;
1434}
1435
1436static void push_tx_responses(struct xenvif_queue *queue)
1437{
1438	int notify;
1439
1440	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1441	if (notify)
1442		notify_remote_via_irq(queue->tx_irq);
1443}
1444
1445static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1446			       s8 status)
1447{
1448	struct pending_tx_info *pending_tx_info;
1449	pending_ring_idx_t index;
1450	unsigned long flags;
1451
1452	pending_tx_info = &queue->pending_tx_info[pending_idx];
1453
1454	spin_lock_irqsave(&queue->response_lock, flags);
1455
1456	_make_tx_response(queue, &pending_tx_info->req,
1457			  pending_tx_info->extra_count, status);
1458
1459	/* Release the pending index before pusing the Tx response so
1460	 * its available before a new Tx request is pushed by the
1461	 * frontend.
1462	 */
1463	index = pending_index(queue->pending_prod++);
1464	queue->pending_ring[index] = pending_idx;
1465
1466	push_tx_responses(queue);
1467
1468	spin_unlock_irqrestore(&queue->response_lock, flags);
1469}
1470
 
1471static void make_tx_response(struct xenvif_queue *queue,
1472			     const struct xen_netif_tx_request *txp,
1473			     unsigned int extra_count,
1474			     s8 status)
1475{
1476	unsigned long flags;
 
1477
1478	spin_lock_irqsave(&queue->response_lock, flags);
 
 
1479
1480	_make_tx_response(queue, txp, extra_count, status);
1481	push_tx_responses(queue);
1482
1483	spin_unlock_irqrestore(&queue->response_lock, flags);
 
 
 
 
 
 
 
 
 
1484}
1485
1486static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1487{
1488	int ret;
1489	struct gnttab_unmap_grant_ref tx_unmap_op;
1490
1491	gnttab_set_unmap_op(&tx_unmap_op,
1492			    idx_to_kaddr(queue, pending_idx),
1493			    GNTMAP_host_map,
1494			    queue->grant_tx_handle[pending_idx]);
1495	xenvif_grant_handle_reset(queue, pending_idx);
1496
1497	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1498				&queue->mmap_pages[pending_idx], 1);
1499	if (ret) {
1500		netdev_err(queue->vif->dev,
1501			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1502			   ret,
1503			   pending_idx,
1504			   tx_unmap_op.host_addr,
1505			   tx_unmap_op.handle,
1506			   tx_unmap_op.status);
1507		BUG();
1508	}
1509}
1510
1511static inline int tx_work_todo(struct xenvif_queue *queue)
1512{
1513	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1514		return 1;
1515
1516	return 0;
1517}
1518
1519static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1520{
1521	return queue->dealloc_cons != queue->dealloc_prod;
1522}
1523
1524void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1525{
1526	if (queue->tx.sring)
1527		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1528					queue->tx.sring);
1529	if (queue->rx.sring)
1530		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1531					queue->rx.sring);
1532}
1533
1534int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1535				   grant_ref_t tx_ring_ref,
1536				   grant_ref_t rx_ring_ref)
1537{
1538	void *addr;
1539	struct xen_netif_tx_sring *txs;
1540	struct xen_netif_rx_sring *rxs;
1541	RING_IDX rsp_prod, req_prod;
1542	int err;
1543
1544	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1545				     &tx_ring_ref, 1, &addr);
1546	if (err)
1547		goto err;
1548
1549	txs = (struct xen_netif_tx_sring *)addr;
1550	rsp_prod = READ_ONCE(txs->rsp_prod);
1551	req_prod = READ_ONCE(txs->req_prod);
1552
1553	BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1554
1555	err = -EIO;
1556	if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1557		goto err;
1558
1559	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1560				     &rx_ring_ref, 1, &addr);
1561	if (err)
1562		goto err;
1563
1564	rxs = (struct xen_netif_rx_sring *)addr;
1565	rsp_prod = READ_ONCE(rxs->rsp_prod);
1566	req_prod = READ_ONCE(rxs->req_prod);
1567
1568	BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1569
1570	err = -EIO;
1571	if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1572		goto err;
1573
1574	return 0;
1575
1576err:
1577	xenvif_unmap_frontend_data_rings(queue);
1578	return err;
1579}
1580
1581static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1582{
1583	/* Dealloc thread must remain running until all inflight
1584	 * packets complete.
1585	 */
1586	return kthread_should_stop() &&
1587		!atomic_read(&queue->inflight_packets);
1588}
1589
1590int xenvif_dealloc_kthread(void *data)
1591{
1592	struct xenvif_queue *queue = data;
1593
1594	for (;;) {
1595		wait_event_interruptible(queue->dealloc_wq,
1596					 tx_dealloc_work_todo(queue) ||
1597					 xenvif_dealloc_kthread_should_stop(queue));
1598		if (xenvif_dealloc_kthread_should_stop(queue))
1599			break;
1600
1601		xenvif_tx_dealloc_action(queue);
1602		cond_resched();
1603	}
1604
1605	/* Unmap anything remaining*/
1606	if (tx_dealloc_work_todo(queue))
1607		xenvif_tx_dealloc_action(queue);
1608
1609	return 0;
1610}
1611
1612static void make_ctrl_response(struct xenvif *vif,
1613			       const struct xen_netif_ctrl_request *req,
1614			       u32 status, u32 data)
1615{
1616	RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1617	struct xen_netif_ctrl_response rsp = {
1618		.id = req->id,
1619		.type = req->type,
1620		.status = status,
1621		.data = data,
1622	};
1623
1624	*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1625	vif->ctrl.rsp_prod_pvt = ++idx;
1626}
1627
1628static void push_ctrl_response(struct xenvif *vif)
1629{
1630	int notify;
1631
1632	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1633	if (notify)
1634		notify_remote_via_irq(vif->ctrl_irq);
1635}
1636
1637static void process_ctrl_request(struct xenvif *vif,
1638				 const struct xen_netif_ctrl_request *req)
1639{
1640	u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1641	u32 data = 0;
1642
1643	switch (req->type) {
1644	case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1645		status = xenvif_set_hash_alg(vif, req->data[0]);
1646		break;
1647
1648	case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1649		status = xenvif_get_hash_flags(vif, &data);
1650		break;
1651
1652	case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1653		status = xenvif_set_hash_flags(vif, req->data[0]);
1654		break;
1655
1656	case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1657		status = xenvif_set_hash_key(vif, req->data[0],
1658					     req->data[1]);
1659		break;
1660
1661	case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1662		status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1663		data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1664		break;
1665
1666	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1667		status = xenvif_set_hash_mapping_size(vif,
1668						      req->data[0]);
1669		break;
1670
1671	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1672		status = xenvif_set_hash_mapping(vif, req->data[0],
1673						 req->data[1],
1674						 req->data[2]);
1675		break;
1676
1677	default:
1678		break;
1679	}
1680
1681	make_ctrl_response(vif, req, status, data);
1682	push_ctrl_response(vif);
1683}
1684
1685static void xenvif_ctrl_action(struct xenvif *vif)
1686{
1687	for (;;) {
1688		RING_IDX req_prod, req_cons;
1689
1690		req_prod = vif->ctrl.sring->req_prod;
1691		req_cons = vif->ctrl.req_cons;
1692
1693		/* Make sure we can see requests before we process them. */
1694		rmb();
1695
1696		if (req_cons == req_prod)
1697			break;
1698
1699		while (req_cons != req_prod) {
1700			struct xen_netif_ctrl_request req;
1701
1702			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1703			req_cons++;
1704
1705			process_ctrl_request(vif, &req);
1706		}
1707
1708		vif->ctrl.req_cons = req_cons;
1709		vif->ctrl.sring->req_event = req_cons + 1;
1710	}
1711}
1712
1713static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1714{
1715	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1716		return true;
1717
1718	return false;
1719}
1720
1721irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1722{
1723	struct xenvif *vif = data;
1724	unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1725
1726	while (xenvif_ctrl_work_todo(vif)) {
1727		xenvif_ctrl_action(vif);
1728		eoi_flag = 0;
1729	}
1730
1731	xen_irq_lateeoi(irq, eoi_flag);
1732
1733	return IRQ_HANDLED;
1734}
1735
1736static int __init netback_init(void)
1737{
1738	int rc = 0;
1739
1740	if (!xen_domain())
1741		return -ENODEV;
1742
1743	/* Allow as many queues as there are CPUs but max. 8 if user has not
1744	 * specified a value.
1745	 */
1746	if (xenvif_max_queues == 0)
1747		xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1748					  num_online_cpus());
1749
1750	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1751		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1752			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1753		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1754	}
1755
1756	rc = xenvif_xenbus_init();
1757	if (rc)
1758		goto failed_init;
1759
1760#ifdef CONFIG_DEBUG_FS
1761	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1762#endif /* CONFIG_DEBUG_FS */
1763
1764	return 0;
1765
1766failed_init:
1767	return rc;
1768}
1769
1770module_init(netback_init);
1771
1772static void __exit netback_fini(void)
1773{
1774#ifdef CONFIG_DEBUG_FS
1775	debugfs_remove_recursive(xen_netback_dbg_root);
1776#endif /* CONFIG_DEBUG_FS */
1777	xenvif_xenbus_fini();
1778}
1779module_exit(netback_fini);
1780
1781MODULE_DESCRIPTION("Xen backend network device module");
1782MODULE_LICENSE("Dual BSD/GPL");
1783MODULE_ALIAS("xen-backend:vif");
v6.2
   1/*
   2 * Back-end of the driver for virtual network devices. This portion of the
   3 * driver exports a 'unified' network-device interface that can be accessed
   4 * by any operating system that implements a compatible front end. A
   5 * reference front-end implementation can be found in:
   6 *  drivers/net/xen-netfront.c
   7 *
   8 * Copyright (c) 2002-2005, K A Fraser
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License version 2
  12 * as published by the Free Software Foundation; or, when distributed
  13 * separately from the Linux kernel or incorporated into other
  14 * software packages, subject to the following license:
  15 *
  16 * Permission is hereby granted, free of charge, to any person obtaining a copy
  17 * of this source file (the "Software"), to deal in the Software without
  18 * restriction, including without limitation the rights to use, copy, modify,
  19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20 * and to permit persons to whom the Software is furnished to do so, subject to
  21 * the following conditions:
  22 *
  23 * The above copyright notice and this permission notice shall be included in
  24 * all copies or substantial portions of the Software.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32 * IN THE SOFTWARE.
  33 */
  34
  35#include "common.h"
  36
  37#include <linux/kthread.h>
  38#include <linux/if_vlan.h>
  39#include <linux/udp.h>
  40#include <linux/highmem.h>
  41
  42#include <net/tcp.h>
  43
  44#include <xen/xen.h>
  45#include <xen/events.h>
  46#include <xen/interface/memory.h>
  47#include <xen/page.h>
  48
  49#include <asm/xen/hypercall.h>
  50
  51/* Provide an option to disable split event channels at load time as
  52 * event channels are limited resource. Split event channels are
  53 * enabled by default.
  54 */
  55bool separate_tx_rx_irq = true;
  56module_param(separate_tx_rx_irq, bool, 0644);
  57
  58/* The time that packets can stay on the guest Rx internal queue
  59 * before they are dropped.
  60 */
  61unsigned int rx_drain_timeout_msecs = 10000;
  62module_param(rx_drain_timeout_msecs, uint, 0444);
  63
  64/* The length of time before the frontend is considered unresponsive
  65 * because it isn't providing Rx slots.
  66 */
  67unsigned int rx_stall_timeout_msecs = 60000;
  68module_param(rx_stall_timeout_msecs, uint, 0444);
  69
  70#define MAX_QUEUES_DEFAULT 8
  71unsigned int xenvif_max_queues;
  72module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  73MODULE_PARM_DESC(max_queues,
  74		 "Maximum number of queues per virtual interface");
  75
  76/*
  77 * This is the maximum slots a skb can have. If a guest sends a skb
  78 * which exceeds this limit it is considered malicious.
  79 */
  80#define FATAL_SKB_SLOTS_DEFAULT 20
  81static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  82module_param(fatal_skb_slots, uint, 0444);
  83
  84/* The amount to copy out of the first guest Tx slot into the skb's
  85 * linear area.  If the first slot has more data, it will be mapped
  86 * and put into the first frag.
  87 *
  88 * This is sized to avoid pulling headers from the frags for most
  89 * TCP/IP packets.
  90 */
  91#define XEN_NETBACK_TX_COPY_LEN 128
  92
  93/* This is the maximum number of flows in the hash cache. */
  94#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  95unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  96module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  97MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  98
  99/* The module parameter tells that we have to put data
 100 * for xen-netfront with the XDP_PACKET_HEADROOM offset
 101 * needed for XDP processing
 102 */
 103bool provides_xdp_headroom = true;
 104module_param(provides_xdp_headroom, bool, 0644);
 105
 106static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
 107			       u8 status);
 108
 109static void make_tx_response(struct xenvif_queue *queue,
 110			     struct xen_netif_tx_request *txp,
 111			     unsigned int extra_count,
 112			     s8       st);
 113static void push_tx_responses(struct xenvif_queue *queue);
 114
 115static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
 116
 117static inline int tx_work_todo(struct xenvif_queue *queue);
 118
 119static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
 120				       u16 idx)
 121{
 122	return page_to_pfn(queue->mmap_pages[idx]);
 123}
 124
 125static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
 126					 u16 idx)
 127{
 128	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
 129}
 130
 131#define callback_param(vif, pending_idx) \
 132	(vif->pending_tx_info[pending_idx].callback_struct)
 133
 134/* Find the containing VIF's structure from a pointer in pending_tx_info array
 135 */
 136static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
 137{
 138	u16 pending_idx = ubuf->desc;
 139	struct pending_tx_info *temp =
 140		container_of(ubuf, struct pending_tx_info, callback_struct);
 141	return container_of(temp - pending_idx,
 142			    struct xenvif_queue,
 143			    pending_tx_info[0]);
 144}
 145
 146static u16 frag_get_pending_idx(skb_frag_t *frag)
 147{
 148	return (u16)skb_frag_off(frag);
 149}
 150
 151static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 152{
 153	skb_frag_off_set(frag, pending_idx);
 154}
 155
 156static inline pending_ring_idx_t pending_index(unsigned i)
 157{
 158	return i & (MAX_PENDING_REQS-1);
 159}
 160
 161void xenvif_kick_thread(struct xenvif_queue *queue)
 162{
 163	wake_up(&queue->wq);
 164}
 165
 166void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
 167{
 168	int more_to_do;
 169
 170	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
 171
 172	if (more_to_do)
 173		napi_schedule(&queue->napi);
 174	else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
 175				     &queue->eoi_pending) &
 176		 (NETBK_TX_EOI | NETBK_COMMON_EOI))
 177		xen_irq_lateeoi(queue->tx_irq, 0);
 178}
 179
 180static void tx_add_credit(struct xenvif_queue *queue)
 181{
 182	unsigned long max_burst, max_credit;
 183
 184	/*
 185	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
 186	 * Otherwise the interface can seize up due to insufficient credit.
 187	 */
 188	max_burst = max(131072UL, queue->credit_bytes);
 189
 190	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
 191	max_credit = queue->remaining_credit + queue->credit_bytes;
 192	if (max_credit < queue->remaining_credit)
 193		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 194
 195	queue->remaining_credit = min(max_credit, max_burst);
 196	queue->rate_limited = false;
 197}
 198
 199void xenvif_tx_credit_callback(struct timer_list *t)
 200{
 201	struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
 202	tx_add_credit(queue);
 203	xenvif_napi_schedule_or_enable_events(queue);
 204}
 205
 206static void xenvif_tx_err(struct xenvif_queue *queue,
 207			  struct xen_netif_tx_request *txp,
 208			  unsigned int extra_count, RING_IDX end)
 209{
 210	RING_IDX cons = queue->tx.req_cons;
 211	unsigned long flags;
 212
 213	do {
 214		spin_lock_irqsave(&queue->response_lock, flags);
 215		make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
 216		push_tx_responses(queue);
 217		spin_unlock_irqrestore(&queue->response_lock, flags);
 218		if (cons == end)
 219			break;
 220		RING_COPY_REQUEST(&queue->tx, cons++, txp);
 221		extra_count = 0; /* only the first frag can have extras */
 222	} while (1);
 223	queue->tx.req_cons = cons;
 224}
 225
 226static void xenvif_fatal_tx_err(struct xenvif *vif)
 227{
 228	netdev_err(vif->dev, "fatal error; disabling device\n");
 229	vif->disabled = true;
 230	/* Disable the vif from queue 0's kthread */
 231	if (vif->num_queues)
 232		xenvif_kick_thread(&vif->queues[0]);
 233}
 234
 235static int xenvif_count_requests(struct xenvif_queue *queue,
 236				 struct xen_netif_tx_request *first,
 237				 unsigned int extra_count,
 238				 struct xen_netif_tx_request *txp,
 239				 int work_to_do)
 240{
 241	RING_IDX cons = queue->tx.req_cons;
 242	int slots = 0;
 243	int drop_err = 0;
 244	int more_data;
 245
 246	if (!(first->flags & XEN_NETTXF_more_data))
 247		return 0;
 248
 249	do {
 250		struct xen_netif_tx_request dropped_tx = { 0 };
 251
 252		if (slots >= work_to_do) {
 253			netdev_err(queue->vif->dev,
 254				   "Asked for %d slots but exceeds this limit\n",
 255				   work_to_do);
 256			xenvif_fatal_tx_err(queue->vif);
 257			return -ENODATA;
 258		}
 259
 260		/* This guest is really using too many slots and
 261		 * considered malicious.
 262		 */
 263		if (unlikely(slots >= fatal_skb_slots)) {
 264			netdev_err(queue->vif->dev,
 265				   "Malicious frontend using %d slots, threshold %u\n",
 266				   slots, fatal_skb_slots);
 267			xenvif_fatal_tx_err(queue->vif);
 268			return -E2BIG;
 269		}
 270
 271		/* Xen network protocol had implicit dependency on
 272		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
 273		 * the historical MAX_SKB_FRAGS value 18 to honor the
 274		 * same behavior as before. Any packet using more than
 275		 * 18 slots but less than fatal_skb_slots slots is
 276		 * dropped
 277		 */
 278		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
 279			if (net_ratelimit())
 280				netdev_dbg(queue->vif->dev,
 281					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
 282					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
 283			drop_err = -E2BIG;
 284		}
 285
 286		if (drop_err)
 287			txp = &dropped_tx;
 288
 289		RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
 290
 291		/* If the guest submitted a frame >= 64 KiB then
 292		 * first->size overflowed and following slots will
 293		 * appear to be larger than the frame.
 294		 *
 295		 * This cannot be fatal error as there are buggy
 296		 * frontends that do this.
 297		 *
 298		 * Consume all slots and drop the packet.
 299		 */
 300		if (!drop_err && txp->size > first->size) {
 301			if (net_ratelimit())
 302				netdev_dbg(queue->vif->dev,
 303					   "Invalid tx request, slot size %u > remaining size %u\n",
 304					   txp->size, first->size);
 305			drop_err = -EIO;
 306		}
 307
 308		first->size -= txp->size;
 309		slots++;
 310
 311		if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
 312			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
 313				 txp->offset, txp->size);
 314			xenvif_fatal_tx_err(queue->vif);
 315			return -EINVAL;
 316		}
 317
 318		more_data = txp->flags & XEN_NETTXF_more_data;
 319
 320		if (!drop_err)
 321			txp++;
 322
 323	} while (more_data);
 324
 325	if (drop_err) {
 326		xenvif_tx_err(queue, first, extra_count, cons + slots);
 327		return drop_err;
 328	}
 329
 330	return slots;
 331}
 332
 333
 334struct xenvif_tx_cb {
 335	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
 336	u8 copy_count;
 
 337};
 338
 339#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
 340#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
 341#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
 342
 343static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
 344					   u16 pending_idx,
 345					   struct xen_netif_tx_request *txp,
 346					   unsigned int extra_count,
 347					   struct gnttab_map_grant_ref *mop)
 348{
 349	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
 350	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
 351			  GNTMAP_host_map | GNTMAP_readonly,
 352			  txp->gref, queue->vif->domid);
 353
 354	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
 355	       sizeof(*txp));
 356	queue->pending_tx_info[pending_idx].extra_count = extra_count;
 357}
 358
 359static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 360{
 361	struct sk_buff *skb =
 362		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
 363			  GFP_ATOMIC | __GFP_NOWARN);
 
 
 364	if (unlikely(skb == NULL))
 365		return NULL;
 366
 367	/* Packets passed to netif_rx() must have some headroom. */
 368	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 369
 370	/* Initialize it here to avoid later surprises */
 371	skb_shinfo(skb)->destructor_arg = NULL;
 372
 373	return skb;
 374}
 375
 376static void xenvif_get_requests(struct xenvif_queue *queue,
 377				struct sk_buff *skb,
 378				struct xen_netif_tx_request *first,
 379				struct xen_netif_tx_request *txfrags,
 380			        unsigned *copy_ops,
 381			        unsigned *map_ops,
 382				unsigned int frag_overflow,
 383				struct sk_buff *nskb,
 384				unsigned int extra_count,
 385				unsigned int data_len)
 386{
 387	struct skb_shared_info *shinfo = skb_shinfo(skb);
 388	skb_frag_t *frags = shinfo->frags;
 389	u16 pending_idx;
 390	pending_ring_idx_t index;
 391	unsigned int nr_slots;
 392	struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
 393	struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
 394	struct xen_netif_tx_request *txp = first;
 395
 396	nr_slots = shinfo->nr_frags + 1;
 397
 398	copy_count(skb) = 0;
 
 399
 400	/* Create copy ops for exactly data_len bytes into the skb head. */
 401	__skb_put(skb, data_len);
 402	while (data_len > 0) {
 403		int amount = data_len > txp->size ? txp->size : data_len;
 
 404
 405		cop->source.u.ref = txp->gref;
 406		cop->source.domid = queue->vif->domid;
 407		cop->source.offset = txp->offset;
 408
 409		cop->dest.domid = DOMID_SELF;
 410		cop->dest.offset = (offset_in_page(skb->data +
 411						   skb_headlen(skb) -
 412						   data_len)) & ~XEN_PAGE_MASK;
 413		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
 414				               - data_len);
 415
 
 
 
 
 
 
 
 416		cop->len = amount;
 417		cop->flags = GNTCOPY_source_gref;
 418
 419		index = pending_index(queue->pending_cons);
 420		pending_idx = queue->pending_ring[index];
 421		callback_param(queue, pending_idx).ctx = NULL;
 422		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
 423		copy_count(skb)++;
 
 424
 425		cop++;
 426		data_len -= amount;
 427
 428		if (amount == txp->size) {
 429			/* The copy op covered the full tx_request */
 430
 431			memcpy(&queue->pending_tx_info[pending_idx].req,
 432			       txp, sizeof(*txp));
 433			queue->pending_tx_info[pending_idx].extra_count =
 434				(txp == first) ? extra_count : 0;
 435
 436			if (txp == first)
 437				txp = txfrags;
 438			else
 439				txp++;
 440			queue->pending_cons++;
 441			nr_slots--;
 442		} else {
 443			/* The copy op partially covered the tx_request.
 444			 * The remainder will be mapped.
 
 445			 */
 446			txp->offset += amount;
 447			txp->size -= amount;
 448		}
 449	}
 450
 451	for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
 452	     shinfo->nr_frags++, gop++) {
 
 
 
 
 
 
 453		index = pending_index(queue->pending_cons++);
 454		pending_idx = queue->pending_ring[index];
 455		xenvif_tx_create_map_op(queue, pending_idx, txp,
 456				        txp == first ? extra_count : 0, gop);
 457		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
 
 
 458
 459		if (txp == first)
 460			txp = txfrags;
 461		else
 462			txp++;
 463	}
 464
 465	if (frag_overflow) {
 466
 467		shinfo = skb_shinfo(nskb);
 468		frags = shinfo->frags;
 469
 470		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
 471		     shinfo->nr_frags++, txp++, gop++) {
 
 
 
 
 
 472			index = pending_index(queue->pending_cons++);
 473			pending_idx = queue->pending_ring[index];
 474			xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
 475						gop);
 476			frag_set_pending_idx(&frags[shinfo->nr_frags],
 477					     pending_idx);
 
 
 
 
 
 
 
 478		}
 
 479
 480		skb_shinfo(skb)->frag_list = nskb;
 
 
 
 
 
 481	}
 482
 483	(*copy_ops) = cop - queue->tx_copy_ops;
 484	(*map_ops) = gop - queue->tx_map_ops;
 485}
 486
 487static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
 488					   u16 pending_idx,
 489					   grant_handle_t handle)
 490{
 491	if (unlikely(queue->grant_tx_handle[pending_idx] !=
 492		     NETBACK_INVALID_HANDLE)) {
 493		netdev_err(queue->vif->dev,
 494			   "Trying to overwrite active handle! pending_idx: 0x%x\n",
 495			   pending_idx);
 496		BUG();
 497	}
 498	queue->grant_tx_handle[pending_idx] = handle;
 499}
 500
 501static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
 502					     u16 pending_idx)
 503{
 504	if (unlikely(queue->grant_tx_handle[pending_idx] ==
 505		     NETBACK_INVALID_HANDLE)) {
 506		netdev_err(queue->vif->dev,
 507			   "Trying to unmap invalid handle! pending_idx: 0x%x\n",
 508			   pending_idx);
 509		BUG();
 510	}
 511	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 512}
 513
 514static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 515			       struct sk_buff *skb,
 516			       struct gnttab_map_grant_ref **gopp_map,
 517			       struct gnttab_copy **gopp_copy)
 518{
 519	struct gnttab_map_grant_ref *gop_map = *gopp_map;
 520	u16 pending_idx;
 521	/* This always points to the shinfo of the skb being checked, which
 522	 * could be either the first or the one on the frag_list
 523	 */
 524	struct skb_shared_info *shinfo = skb_shinfo(skb);
 525	/* If this is non-NULL, we are currently checking the frag_list skb, and
 526	 * this points to the shinfo of the first one
 527	 */
 528	struct skb_shared_info *first_shinfo = NULL;
 529	int nr_frags = shinfo->nr_frags;
 530	const bool sharedslot = nr_frags &&
 531				frag_get_pending_idx(&shinfo->frags[0]) ==
 532				    copy_pending_idx(skb, copy_count(skb) - 1);
 533	int i, err = 0;
 534
 535	for (i = 0; i < copy_count(skb); i++) {
 536		int newerr;
 537
 538		/* Check status of header. */
 539		pending_idx = copy_pending_idx(skb, i);
 540
 541		newerr = (*gopp_copy)->status;
 
 
 
 
 
 
 
 542		if (likely(!newerr)) {
 543			/* The first frag might still have this slot mapped */
 544			if (i < copy_count(skb) - 1 || !sharedslot)
 545				xenvif_idx_release(queue, pending_idx,
 546						   XEN_NETIF_RSP_OKAY);
 547		} else {
 548			err = newerr;
 549			if (net_ratelimit())
 550				netdev_dbg(queue->vif->dev,
 551					   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
 552					   (*gopp_copy)->status,
 553					   pending_idx,
 554					   (*gopp_copy)->source.u.ref);
 555			/* The first frag might still have this slot mapped */
 556			if (i < copy_count(skb) - 1 || !sharedslot)
 557				xenvif_idx_release(queue, pending_idx,
 558						   XEN_NETIF_RSP_ERROR);
 559		}
 560		(*gopp_copy)++;
 561	}
 562
 563check_frags:
 564	for (i = 0; i < nr_frags; i++, gop_map++) {
 565		int j, newerr;
 566
 567		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 568
 569		/* Check error status: if okay then remember grant handle. */
 570		newerr = gop_map->status;
 571
 572		if (likely(!newerr)) {
 573			xenvif_grant_handle_set(queue,
 574						pending_idx,
 575						gop_map->handle);
 576			/* Had a previous error? Invalidate this fragment. */
 577			if (unlikely(err)) {
 578				xenvif_idx_unmap(queue, pending_idx);
 579				/* If the mapping of the first frag was OK, but
 580				 * the header's copy failed, and they are
 581				 * sharing a slot, send an error
 582				 */
 583				if (i == 0 && !first_shinfo && sharedslot)
 584					xenvif_idx_release(queue, pending_idx,
 585							   XEN_NETIF_RSP_ERROR);
 586				else
 587					xenvif_idx_release(queue, pending_idx,
 588							   XEN_NETIF_RSP_OKAY);
 589			}
 590			continue;
 591		}
 592
 593		/* Error on this fragment: respond to client with an error. */
 594		if (net_ratelimit())
 595			netdev_dbg(queue->vif->dev,
 596				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
 597				   i,
 598				   gop_map->status,
 599				   pending_idx,
 600				   gop_map->ref);
 601
 602		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 603
 604		/* Not the first error? Preceding frags already invalidated. */
 605		if (err)
 606			continue;
 607
 608		/* Invalidate preceding fragments of this skb. */
 609		for (j = 0; j < i; j++) {
 610			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
 611			xenvif_idx_unmap(queue, pending_idx);
 612			xenvif_idx_release(queue, pending_idx,
 613					   XEN_NETIF_RSP_OKAY);
 614		}
 615
 616		/* And if we found the error while checking the frag_list, unmap
 617		 * the first skb's frags
 618		 */
 619		if (first_shinfo) {
 620			for (j = 0; j < first_shinfo->nr_frags; j++) {
 621				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
 622				xenvif_idx_unmap(queue, pending_idx);
 623				xenvif_idx_release(queue, pending_idx,
 624						   XEN_NETIF_RSP_OKAY);
 625			}
 626		}
 627
 628		/* Remember the error: invalidate all subsequent fragments. */
 629		err = newerr;
 630	}
 631
 632	if (skb_has_frag_list(skb) && !first_shinfo) {
 633		first_shinfo = shinfo;
 634		shinfo = skb_shinfo(shinfo->frag_list);
 635		nr_frags = shinfo->nr_frags;
 636
 637		goto check_frags;
 638	}
 639
 640	*gopp_map = gop_map;
 641	return err;
 642}
 643
 644static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
 645{
 646	struct skb_shared_info *shinfo = skb_shinfo(skb);
 647	int nr_frags = shinfo->nr_frags;
 648	int i;
 649	u16 prev_pending_idx = INVALID_PENDING_IDX;
 650
 651	for (i = 0; i < nr_frags; i++) {
 652		skb_frag_t *frag = shinfo->frags + i;
 653		struct xen_netif_tx_request *txp;
 654		struct page *page;
 655		u16 pending_idx;
 656
 657		pending_idx = frag_get_pending_idx(frag);
 658
 659		/* If this is not the first frag, chain it to the previous*/
 660		if (prev_pending_idx == INVALID_PENDING_IDX)
 661			skb_shinfo(skb)->destructor_arg =
 662				&callback_param(queue, pending_idx);
 663		else
 664			callback_param(queue, prev_pending_idx).ctx =
 665				&callback_param(queue, pending_idx);
 666
 667		callback_param(queue, pending_idx).ctx = NULL;
 668		prev_pending_idx = pending_idx;
 669
 670		txp = &queue->pending_tx_info[pending_idx].req;
 671		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
 672		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
 673		skb->len += txp->size;
 674		skb->data_len += txp->size;
 675		skb->truesize += txp->size;
 676
 677		/* Take an extra reference to offset network stack's put_page */
 678		get_page(queue->mmap_pages[pending_idx]);
 679	}
 680}
 681
 682static int xenvif_get_extras(struct xenvif_queue *queue,
 683			     struct xen_netif_extra_info *extras,
 684			     unsigned int *extra_count,
 685			     int work_to_do)
 686{
 687	struct xen_netif_extra_info extra;
 688	RING_IDX cons = queue->tx.req_cons;
 689
 690	do {
 691		if (unlikely(work_to_do-- <= 0)) {
 692			netdev_err(queue->vif->dev, "Missing extra info\n");
 693			xenvif_fatal_tx_err(queue->vif);
 694			return -EBADR;
 695		}
 696
 697		RING_COPY_REQUEST(&queue->tx, cons, &extra);
 698
 699		queue->tx.req_cons = ++cons;
 700		(*extra_count)++;
 701
 702		if (unlikely(!extra.type ||
 703			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 704			netdev_err(queue->vif->dev,
 705				   "Invalid extra type: %d\n", extra.type);
 706			xenvif_fatal_tx_err(queue->vif);
 707			return -EINVAL;
 708		}
 709
 710		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
 711	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 712
 713	return work_to_do;
 714}
 715
 716static int xenvif_set_skb_gso(struct xenvif *vif,
 717			      struct sk_buff *skb,
 718			      struct xen_netif_extra_info *gso)
 719{
 720	if (!gso->u.gso.size) {
 721		netdev_err(vif->dev, "GSO size must not be zero.\n");
 722		xenvif_fatal_tx_err(vif);
 723		return -EINVAL;
 724	}
 725
 726	switch (gso->u.gso.type) {
 727	case XEN_NETIF_GSO_TYPE_TCPV4:
 728		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 729		break;
 730	case XEN_NETIF_GSO_TYPE_TCPV6:
 731		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 732		break;
 733	default:
 734		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
 735		xenvif_fatal_tx_err(vif);
 736		return -EINVAL;
 737	}
 738
 739	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 740	/* gso_segs will be calculated later */
 741
 742	return 0;
 743}
 744
 745static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
 746{
 747	bool recalculate_partial_csum = false;
 748
 749	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 750	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 751	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 752	 * recalculate the partial checksum.
 753	 */
 754	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 755		queue->stats.rx_gso_checksum_fixup++;
 756		skb->ip_summed = CHECKSUM_PARTIAL;
 757		recalculate_partial_csum = true;
 758	}
 759
 760	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 761	if (skb->ip_summed != CHECKSUM_PARTIAL)
 762		return 0;
 763
 764	return skb_checksum_setup(skb, recalculate_partial_csum);
 765}
 766
 767static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
 768{
 769	u64 now = get_jiffies_64();
 770	u64 next_credit = queue->credit_window_start +
 771		msecs_to_jiffies(queue->credit_usec / 1000);
 772
 773	/* Timer could already be pending in rare cases. */
 774	if (timer_pending(&queue->credit_timeout)) {
 775		queue->rate_limited = true;
 776		return true;
 777	}
 778
 779	/* Passed the point where we can replenish credit? */
 780	if (time_after_eq64(now, next_credit)) {
 781		queue->credit_window_start = now;
 782		tx_add_credit(queue);
 783	}
 784
 785	/* Still too big to send right now? Set a callback. */
 786	if (size > queue->remaining_credit) {
 787		mod_timer(&queue->credit_timeout,
 788			  next_credit);
 789		queue->credit_window_start = next_credit;
 790		queue->rate_limited = true;
 791
 792		return true;
 793	}
 794
 795	return false;
 796}
 797
 798/* No locking is required in xenvif_mcast_add/del() as they are
 799 * only ever invoked from NAPI poll. An RCU list is used because
 800 * xenvif_mcast_match() is called asynchronously, during start_xmit.
 801 */
 802
 803static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
 804{
 805	struct xenvif_mcast_addr *mcast;
 806
 807	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
 808		if (net_ratelimit())
 809			netdev_err(vif->dev,
 810				   "Too many multicast addresses\n");
 811		return -ENOSPC;
 812	}
 813
 814	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
 815	if (!mcast)
 816		return -ENOMEM;
 817
 818	ether_addr_copy(mcast->addr, addr);
 819	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
 820	vif->fe_mcast_count++;
 821
 822	return 0;
 823}
 824
 825static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
 826{
 827	struct xenvif_mcast_addr *mcast;
 828
 829	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 830		if (ether_addr_equal(addr, mcast->addr)) {
 831			--vif->fe_mcast_count;
 832			list_del_rcu(&mcast->entry);
 833			kfree_rcu(mcast, rcu);
 834			break;
 835		}
 836	}
 837}
 838
 839bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
 840{
 841	struct xenvif_mcast_addr *mcast;
 842
 843	rcu_read_lock();
 844	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
 845		if (ether_addr_equal(addr, mcast->addr)) {
 846			rcu_read_unlock();
 847			return true;
 848		}
 849	}
 850	rcu_read_unlock();
 851
 852	return false;
 853}
 854
 855void xenvif_mcast_addr_list_free(struct xenvif *vif)
 856{
 857	/* No need for locking or RCU here. NAPI poll and TX queue
 858	 * are stopped.
 859	 */
 860	while (!list_empty(&vif->fe_mcast_addr)) {
 861		struct xenvif_mcast_addr *mcast;
 862
 863		mcast = list_first_entry(&vif->fe_mcast_addr,
 864					 struct xenvif_mcast_addr,
 865					 entry);
 866		--vif->fe_mcast_count;
 867		list_del(&mcast->entry);
 868		kfree(mcast);
 869	}
 870}
 871
 872static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 873				     int budget,
 874				     unsigned *copy_ops,
 875				     unsigned *map_ops)
 876{
 877	struct sk_buff *skb, *nskb;
 878	int ret;
 879	unsigned int frag_overflow;
 880
 881	while (skb_queue_len(&queue->tx_queue) < budget) {
 882		struct xen_netif_tx_request txreq;
 883		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
 884		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
 885		unsigned int extra_count;
 886		u16 pending_idx;
 887		RING_IDX idx;
 888		int work_to_do;
 889		unsigned int data_len;
 890		pending_ring_idx_t index;
 891
 892		if (queue->tx.sring->req_prod - queue->tx.req_cons >
 893		    XEN_NETIF_TX_RING_SIZE) {
 894			netdev_err(queue->vif->dev,
 895				   "Impossible number of requests. "
 896				   "req_prod %d, req_cons %d, size %ld\n",
 897				   queue->tx.sring->req_prod, queue->tx.req_cons,
 898				   XEN_NETIF_TX_RING_SIZE);
 899			xenvif_fatal_tx_err(queue->vif);
 900			break;
 901		}
 902
 903		work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
 904		if (!work_to_do)
 905			break;
 906
 907		idx = queue->tx.req_cons;
 908		rmb(); /* Ensure that we see the request before we copy it. */
 909		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
 910
 911		/* Credit-based scheduling. */
 912		if (txreq.size > queue->remaining_credit &&
 913		    tx_credit_exceeded(queue, txreq.size))
 914			break;
 915
 916		queue->remaining_credit -= txreq.size;
 917
 918		work_to_do--;
 919		queue->tx.req_cons = ++idx;
 920
 921		memset(extras, 0, sizeof(extras));
 922		extra_count = 0;
 923		if (txreq.flags & XEN_NETTXF_extra_info) {
 924			work_to_do = xenvif_get_extras(queue, extras,
 925						       &extra_count,
 926						       work_to_do);
 927			idx = queue->tx.req_cons;
 928			if (unlikely(work_to_do < 0))
 929				break;
 930		}
 931
 932		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
 933			struct xen_netif_extra_info *extra;
 934
 935			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
 936			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
 937
 938			make_tx_response(queue, &txreq, extra_count,
 939					 (ret == 0) ?
 940					 XEN_NETIF_RSP_OKAY :
 941					 XEN_NETIF_RSP_ERROR);
 942			push_tx_responses(queue);
 943			continue;
 944		}
 945
 946		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
 947			struct xen_netif_extra_info *extra;
 948
 949			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
 950			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
 951
 952			make_tx_response(queue, &txreq, extra_count,
 953					 XEN_NETIF_RSP_OKAY);
 954			push_tx_responses(queue);
 955			continue;
 956		}
 957
 958		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
 959			XEN_NETBACK_TX_COPY_LEN : txreq.size;
 960
 961		ret = xenvif_count_requests(queue, &txreq, extra_count,
 962					    txfrags, work_to_do);
 963
 964		if (unlikely(ret < 0))
 965			break;
 966
 967		idx += ret;
 968
 969		if (unlikely(txreq.size < ETH_HLEN)) {
 970			netdev_dbg(queue->vif->dev,
 971				   "Bad packet size: %d\n", txreq.size);
 972			xenvif_tx_err(queue, &txreq, extra_count, idx);
 973			break;
 974		}
 975
 976		/* No crossing a page as the payload mustn't fragment. */
 977		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
 978			netdev_err(queue->vif->dev,
 979				   "txreq.offset: %u, size: %u, end: %lu\n",
 980				   txreq.offset, txreq.size,
 981				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
 982			xenvif_fatal_tx_err(queue->vif);
 983			break;
 984		}
 985
 986		index = pending_index(queue->pending_cons);
 987		pending_idx = queue->pending_ring[index];
 988
 989		if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
 990			data_len = txreq.size;
 991
 992		skb = xenvif_alloc_skb(data_len);
 993		if (unlikely(skb == NULL)) {
 994			netdev_dbg(queue->vif->dev,
 995				   "Can't allocate a skb in start_xmit.\n");
 996			xenvif_tx_err(queue, &txreq, extra_count, idx);
 997			break;
 998		}
 999
1000		skb_shinfo(skb)->nr_frags = ret;
1001		/* At this point shinfo->nr_frags is in fact the number of
1002		 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1003		 */
1004		frag_overflow = 0;
1005		nskb = NULL;
1006		if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1007			frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1008			BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1009			skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1010			nskb = xenvif_alloc_skb(0);
1011			if (unlikely(nskb == NULL)) {
1012				skb_shinfo(skb)->nr_frags = 0;
1013				kfree_skb(skb);
1014				xenvif_tx_err(queue, &txreq, extra_count, idx);
1015				if (net_ratelimit())
1016					netdev_err(queue->vif->dev,
1017						   "Can't allocate the frag_list skb.\n");
1018				break;
1019			}
1020		}
1021
1022		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1023			struct xen_netif_extra_info *gso;
1024			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1025
1026			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1027				/* Failure in xenvif_set_skb_gso is fatal. */
1028				skb_shinfo(skb)->nr_frags = 0;
1029				kfree_skb(skb);
1030				kfree_skb(nskb);
1031				break;
1032			}
1033		}
1034
1035		if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1036			struct xen_netif_extra_info *extra;
1037			enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1038
1039			extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1040
1041			switch (extra->u.hash.type) {
1042			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1043			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1044				type = PKT_HASH_TYPE_L3;
1045				break;
1046
1047			case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1048			case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1049				type = PKT_HASH_TYPE_L4;
1050				break;
1051
1052			default:
1053				break;
1054			}
1055
1056			if (type != PKT_HASH_TYPE_NONE)
1057				skb_set_hash(skb,
1058					     *(u32 *)extra->u.hash.value,
1059					     type);
1060		}
1061
1062		xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1063				    map_ops, frag_overflow, nskb, extra_count,
1064				    data_len);
1065
1066		__skb_queue_tail(&queue->tx_queue, skb);
1067
1068		queue->tx.req_cons = idx;
1069
1070		if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
1071		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1072			break;
1073	}
1074
1075	return;
1076}
1077
1078/* Consolidate skb with a frag_list into a brand new one with local pages on
1079 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1080 */
1081static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1082{
1083	unsigned int offset = skb_headlen(skb);
1084	skb_frag_t frags[MAX_SKB_FRAGS];
1085	int i, f;
1086	struct ubuf_info *uarg;
1087	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1088
1089	queue->stats.tx_zerocopy_sent += 2;
1090	queue->stats.tx_frag_overflow++;
1091
1092	xenvif_fill_frags(queue, nskb);
1093	/* Subtract frags size, we will correct it later */
1094	skb->truesize -= skb->data_len;
1095	skb->len += nskb->len;
1096	skb->data_len += nskb->len;
1097
1098	/* create a brand new frags array and coalesce there */
1099	for (i = 0; offset < skb->len; i++) {
1100		struct page *page;
1101		unsigned int len;
1102
1103		BUG_ON(i >= MAX_SKB_FRAGS);
1104		page = alloc_page(GFP_ATOMIC);
1105		if (!page) {
1106			int j;
1107			skb->truesize += skb->data_len;
1108			for (j = 0; j < i; j++)
1109				put_page(skb_frag_page(&frags[j]));
1110			return -ENOMEM;
1111		}
1112
1113		if (offset + PAGE_SIZE < skb->len)
1114			len = PAGE_SIZE;
1115		else
1116			len = skb->len - offset;
1117		if (skb_copy_bits(skb, offset, page_address(page), len))
1118			BUG();
1119
1120		offset += len;
1121		__skb_frag_set_page(&frags[i], page);
1122		skb_frag_off_set(&frags[i], 0);
1123		skb_frag_size_set(&frags[i], len);
1124	}
1125
1126	/* Release all the original (foreign) frags. */
1127	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1128		skb_frag_unref(skb, f);
1129	uarg = skb_shinfo(skb)->destructor_arg;
1130	/* increase inflight counter to offset decrement in callback */
1131	atomic_inc(&queue->inflight_packets);
1132	uarg->callback(NULL, uarg, true);
1133	skb_shinfo(skb)->destructor_arg = NULL;
1134
1135	/* Fill the skb with the new (local) frags. */
1136	memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1137	skb_shinfo(skb)->nr_frags = i;
1138	skb->truesize += i * PAGE_SIZE;
1139
1140	return 0;
1141}
1142
1143static int xenvif_tx_submit(struct xenvif_queue *queue)
1144{
1145	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1146	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1147	struct sk_buff *skb;
1148	int work_done = 0;
1149
1150	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1151		struct xen_netif_tx_request *txp;
1152		u16 pending_idx;
1153
1154		pending_idx = copy_pending_idx(skb, 0);
1155		txp = &queue->pending_tx_info[pending_idx].req;
1156
1157		/* Check the remap error code. */
1158		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1159			/* If there was an error, xenvif_tx_check_gop is
1160			 * expected to release all the frags which were mapped,
1161			 * so kfree_skb shouldn't do it again
1162			 */
1163			skb_shinfo(skb)->nr_frags = 0;
1164			if (skb_has_frag_list(skb)) {
1165				struct sk_buff *nskb =
1166						skb_shinfo(skb)->frag_list;
1167				skb_shinfo(nskb)->nr_frags = 0;
1168			}
1169			kfree_skb(skb);
1170			continue;
1171		}
1172
1173		if (txp->flags & XEN_NETTXF_csum_blank)
1174			skb->ip_summed = CHECKSUM_PARTIAL;
1175		else if (txp->flags & XEN_NETTXF_data_validated)
1176			skb->ip_summed = CHECKSUM_UNNECESSARY;
1177
1178		xenvif_fill_frags(queue, skb);
1179
1180		if (unlikely(skb_has_frag_list(skb))) {
1181			struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1182			xenvif_skb_zerocopy_prepare(queue, nskb);
1183			if (xenvif_handle_frag_list(queue, skb)) {
1184				if (net_ratelimit())
1185					netdev_err(queue->vif->dev,
1186						   "Not enough memory to consolidate frag_list!\n");
1187				xenvif_skb_zerocopy_prepare(queue, skb);
1188				kfree_skb(skb);
1189				continue;
1190			}
1191			/* Copied all the bits from the frag list -- free it. */
1192			skb_frag_list_init(skb);
1193			kfree_skb(nskb);
1194		}
1195
1196		skb->dev      = queue->vif->dev;
1197		skb->protocol = eth_type_trans(skb, skb->dev);
1198		skb_reset_network_header(skb);
1199
1200		if (checksum_setup(queue, skb)) {
1201			netdev_dbg(queue->vif->dev,
1202				   "Can't setup checksum in net_tx_action\n");
1203			/* We have to set this flag to trigger the callback */
1204			if (skb_shinfo(skb)->destructor_arg)
1205				xenvif_skb_zerocopy_prepare(queue, skb);
1206			kfree_skb(skb);
1207			continue;
1208		}
1209
1210		skb_probe_transport_header(skb);
1211
1212		/* If the packet is GSO then we will have just set up the
1213		 * transport header offset in checksum_setup so it's now
1214		 * straightforward to calculate gso_segs.
1215		 */
1216		if (skb_is_gso(skb)) {
1217			int mss, hdrlen;
1218
1219			/* GSO implies having the L4 header. */
1220			WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1221			if (unlikely(!skb_transport_header_was_set(skb))) {
1222				kfree_skb(skb);
1223				continue;
1224			}
1225
1226			mss = skb_shinfo(skb)->gso_size;
1227			hdrlen = skb_tcp_all_headers(skb);
1228
1229			skb_shinfo(skb)->gso_segs =
1230				DIV_ROUND_UP(skb->len - hdrlen, mss);
1231		}
1232
1233		queue->stats.rx_bytes += skb->len;
1234		queue->stats.rx_packets++;
1235
1236		work_done++;
1237
1238		/* Set this flag right before netif_receive_skb, otherwise
1239		 * someone might think this packet already left netback, and
1240		 * do a skb_copy_ubufs while we are still in control of the
1241		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1242		 */
1243		if (skb_shinfo(skb)->destructor_arg) {
1244			xenvif_skb_zerocopy_prepare(queue, skb);
1245			queue->stats.tx_zerocopy_sent++;
1246		}
1247
1248		netif_receive_skb(skb);
1249	}
1250
1251	return work_done;
1252}
1253
1254void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
1255			      bool zerocopy_success)
1256{
1257	unsigned long flags;
1258	pending_ring_idx_t index;
1259	struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1260	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1261
1262	/* This is the only place where we grab this lock, to protect callbacks
1263	 * from each other.
1264	 */
1265	spin_lock_irqsave(&queue->callback_lock, flags);
1266	do {
1267		u16 pending_idx = ubuf->desc;
1268		ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1269		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1270			MAX_PENDING_REQS);
1271		index = pending_index(queue->dealloc_prod);
1272		queue->dealloc_ring[index] = pending_idx;
1273		/* Sync with xenvif_tx_dealloc_action:
1274		 * insert idx then incr producer.
1275		 */
1276		smp_wmb();
1277		queue->dealloc_prod++;
1278	} while (ubuf);
1279	spin_unlock_irqrestore(&queue->callback_lock, flags);
1280
1281	if (likely(zerocopy_success))
1282		queue->stats.tx_zerocopy_success++;
1283	else
1284		queue->stats.tx_zerocopy_fail++;
1285	xenvif_skb_zerocopy_complete(queue);
1286}
1287
1288static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1289{
1290	struct gnttab_unmap_grant_ref *gop;
1291	pending_ring_idx_t dc, dp;
1292	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1293	unsigned int i = 0;
1294
1295	dc = queue->dealloc_cons;
1296	gop = queue->tx_unmap_ops;
1297
1298	/* Free up any grants we have finished using */
1299	do {
1300		dp = queue->dealloc_prod;
1301
1302		/* Ensure we see all indices enqueued by all
1303		 * xenvif_zerocopy_callback().
1304		 */
1305		smp_rmb();
1306
1307		while (dc != dp) {
1308			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1309			pending_idx =
1310				queue->dealloc_ring[pending_index(dc++)];
1311
1312			pending_idx_release[gop - queue->tx_unmap_ops] =
1313				pending_idx;
1314			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1315				queue->mmap_pages[pending_idx];
1316			gnttab_set_unmap_op(gop,
1317					    idx_to_kaddr(queue, pending_idx),
1318					    GNTMAP_host_map,
1319					    queue->grant_tx_handle[pending_idx]);
1320			xenvif_grant_handle_reset(queue, pending_idx);
1321			++gop;
1322		}
1323
1324	} while (dp != queue->dealloc_prod);
1325
1326	queue->dealloc_cons = dc;
1327
1328	if (gop - queue->tx_unmap_ops > 0) {
1329		int ret;
1330		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1331					NULL,
1332					queue->pages_to_unmap,
1333					gop - queue->tx_unmap_ops);
1334		if (ret) {
1335			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1336				   gop - queue->tx_unmap_ops, ret);
1337			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1338				if (gop[i].status != GNTST_okay)
1339					netdev_err(queue->vif->dev,
1340						   " host_addr: 0x%llx handle: 0x%x status: %d\n",
1341						   gop[i].host_addr,
1342						   gop[i].handle,
1343						   gop[i].status);
1344			}
1345			BUG();
1346		}
1347	}
1348
1349	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1350		xenvif_idx_release(queue, pending_idx_release[i],
1351				   XEN_NETIF_RSP_OKAY);
1352}
1353
1354
1355/* Called after netfront has transmitted */
1356int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1357{
1358	unsigned nr_mops = 0, nr_cops = 0;
1359	int work_done, ret;
1360
1361	if (unlikely(!tx_work_todo(queue)))
1362		return 0;
1363
1364	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1365
1366	if (nr_cops == 0)
1367		return 0;
1368
1369	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1370	if (nr_mops != 0) {
1371		ret = gnttab_map_refs(queue->tx_map_ops,
1372				      NULL,
1373				      queue->pages_to_map,
1374				      nr_mops);
1375		if (ret) {
1376			unsigned int i;
1377
1378			netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1379				   nr_mops, ret);
1380			for (i = 0; i < nr_mops; ++i)
1381				WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1382				             GNTST_okay);
1383		}
1384	}
1385
1386	work_done = xenvif_tx_submit(queue);
1387
1388	return work_done;
1389}
1390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1392			       u8 status)
1393{
1394	struct pending_tx_info *pending_tx_info;
1395	pending_ring_idx_t index;
1396	unsigned long flags;
1397
1398	pending_tx_info = &queue->pending_tx_info[pending_idx];
1399
1400	spin_lock_irqsave(&queue->response_lock, flags);
1401
1402	make_tx_response(queue, &pending_tx_info->req,
1403			 pending_tx_info->extra_count, status);
1404
1405	/* Release the pending index before pusing the Tx response so
1406	 * its available before a new Tx request is pushed by the
1407	 * frontend.
1408	 */
1409	index = pending_index(queue->pending_prod++);
1410	queue->pending_ring[index] = pending_idx;
1411
1412	push_tx_responses(queue);
1413
1414	spin_unlock_irqrestore(&queue->response_lock, flags);
1415}
1416
1417
1418static void make_tx_response(struct xenvif_queue *queue,
1419			     struct xen_netif_tx_request *txp,
1420			     unsigned int extra_count,
1421			     s8       st)
1422{
1423	RING_IDX i = queue->tx.rsp_prod_pvt;
1424	struct xen_netif_tx_response *resp;
1425
1426	resp = RING_GET_RESPONSE(&queue->tx, i);
1427	resp->id     = txp->id;
1428	resp->status = st;
1429
1430	while (extra_count-- != 0)
1431		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1432
1433	queue->tx.rsp_prod_pvt = ++i;
1434}
1435
1436static void push_tx_responses(struct xenvif_queue *queue)
1437{
1438	int notify;
1439
1440	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1441	if (notify)
1442		notify_remote_via_irq(queue->tx_irq);
1443}
1444
1445static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1446{
1447	int ret;
1448	struct gnttab_unmap_grant_ref tx_unmap_op;
1449
1450	gnttab_set_unmap_op(&tx_unmap_op,
1451			    idx_to_kaddr(queue, pending_idx),
1452			    GNTMAP_host_map,
1453			    queue->grant_tx_handle[pending_idx]);
1454	xenvif_grant_handle_reset(queue, pending_idx);
1455
1456	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1457				&queue->mmap_pages[pending_idx], 1);
1458	if (ret) {
1459		netdev_err(queue->vif->dev,
1460			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1461			   ret,
1462			   pending_idx,
1463			   tx_unmap_op.host_addr,
1464			   tx_unmap_op.handle,
1465			   tx_unmap_op.status);
1466		BUG();
1467	}
1468}
1469
1470static inline int tx_work_todo(struct xenvif_queue *queue)
1471{
1472	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1473		return 1;
1474
1475	return 0;
1476}
1477
1478static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1479{
1480	return queue->dealloc_cons != queue->dealloc_prod;
1481}
1482
1483void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1484{
1485	if (queue->tx.sring)
1486		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1487					queue->tx.sring);
1488	if (queue->rx.sring)
1489		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1490					queue->rx.sring);
1491}
1492
1493int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1494				   grant_ref_t tx_ring_ref,
1495				   grant_ref_t rx_ring_ref)
1496{
1497	void *addr;
1498	struct xen_netif_tx_sring *txs;
1499	struct xen_netif_rx_sring *rxs;
1500	RING_IDX rsp_prod, req_prod;
1501	int err;
1502
1503	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1504				     &tx_ring_ref, 1, &addr);
1505	if (err)
1506		goto err;
1507
1508	txs = (struct xen_netif_tx_sring *)addr;
1509	rsp_prod = READ_ONCE(txs->rsp_prod);
1510	req_prod = READ_ONCE(txs->req_prod);
1511
1512	BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
1513
1514	err = -EIO;
1515	if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
1516		goto err;
1517
1518	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1519				     &rx_ring_ref, 1, &addr);
1520	if (err)
1521		goto err;
1522
1523	rxs = (struct xen_netif_rx_sring *)addr;
1524	rsp_prod = READ_ONCE(rxs->rsp_prod);
1525	req_prod = READ_ONCE(rxs->req_prod);
1526
1527	BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
1528
1529	err = -EIO;
1530	if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
1531		goto err;
1532
1533	return 0;
1534
1535err:
1536	xenvif_unmap_frontend_data_rings(queue);
1537	return err;
1538}
1539
1540static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1541{
1542	/* Dealloc thread must remain running until all inflight
1543	 * packets complete.
1544	 */
1545	return kthread_should_stop() &&
1546		!atomic_read(&queue->inflight_packets);
1547}
1548
1549int xenvif_dealloc_kthread(void *data)
1550{
1551	struct xenvif_queue *queue = data;
1552
1553	for (;;) {
1554		wait_event_interruptible(queue->dealloc_wq,
1555					 tx_dealloc_work_todo(queue) ||
1556					 xenvif_dealloc_kthread_should_stop(queue));
1557		if (xenvif_dealloc_kthread_should_stop(queue))
1558			break;
1559
1560		xenvif_tx_dealloc_action(queue);
1561		cond_resched();
1562	}
1563
1564	/* Unmap anything remaining*/
1565	if (tx_dealloc_work_todo(queue))
1566		xenvif_tx_dealloc_action(queue);
1567
1568	return 0;
1569}
1570
1571static void make_ctrl_response(struct xenvif *vif,
1572			       const struct xen_netif_ctrl_request *req,
1573			       u32 status, u32 data)
1574{
1575	RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1576	struct xen_netif_ctrl_response rsp = {
1577		.id = req->id,
1578		.type = req->type,
1579		.status = status,
1580		.data = data,
1581	};
1582
1583	*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1584	vif->ctrl.rsp_prod_pvt = ++idx;
1585}
1586
1587static void push_ctrl_response(struct xenvif *vif)
1588{
1589	int notify;
1590
1591	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1592	if (notify)
1593		notify_remote_via_irq(vif->ctrl_irq);
1594}
1595
1596static void process_ctrl_request(struct xenvif *vif,
1597				 const struct xen_netif_ctrl_request *req)
1598{
1599	u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1600	u32 data = 0;
1601
1602	switch (req->type) {
1603	case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1604		status = xenvif_set_hash_alg(vif, req->data[0]);
1605		break;
1606
1607	case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1608		status = xenvif_get_hash_flags(vif, &data);
1609		break;
1610
1611	case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1612		status = xenvif_set_hash_flags(vif, req->data[0]);
1613		break;
1614
1615	case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1616		status = xenvif_set_hash_key(vif, req->data[0],
1617					     req->data[1]);
1618		break;
1619
1620	case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1621		status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1622		data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1623		break;
1624
1625	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1626		status = xenvif_set_hash_mapping_size(vif,
1627						      req->data[0]);
1628		break;
1629
1630	case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1631		status = xenvif_set_hash_mapping(vif, req->data[0],
1632						 req->data[1],
1633						 req->data[2]);
1634		break;
1635
1636	default:
1637		break;
1638	}
1639
1640	make_ctrl_response(vif, req, status, data);
1641	push_ctrl_response(vif);
1642}
1643
1644static void xenvif_ctrl_action(struct xenvif *vif)
1645{
1646	for (;;) {
1647		RING_IDX req_prod, req_cons;
1648
1649		req_prod = vif->ctrl.sring->req_prod;
1650		req_cons = vif->ctrl.req_cons;
1651
1652		/* Make sure we can see requests before we process them. */
1653		rmb();
1654
1655		if (req_cons == req_prod)
1656			break;
1657
1658		while (req_cons != req_prod) {
1659			struct xen_netif_ctrl_request req;
1660
1661			RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1662			req_cons++;
1663
1664			process_ctrl_request(vif, &req);
1665		}
1666
1667		vif->ctrl.req_cons = req_cons;
1668		vif->ctrl.sring->req_event = req_cons + 1;
1669	}
1670}
1671
1672static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1673{
1674	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1675		return true;
1676
1677	return false;
1678}
1679
1680irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1681{
1682	struct xenvif *vif = data;
1683	unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1684
1685	while (xenvif_ctrl_work_todo(vif)) {
1686		xenvif_ctrl_action(vif);
1687		eoi_flag = 0;
1688	}
1689
1690	xen_irq_lateeoi(irq, eoi_flag);
1691
1692	return IRQ_HANDLED;
1693}
1694
1695static int __init netback_init(void)
1696{
1697	int rc = 0;
1698
1699	if (!xen_domain())
1700		return -ENODEV;
1701
1702	/* Allow as many queues as there are CPUs but max. 8 if user has not
1703	 * specified a value.
1704	 */
1705	if (xenvif_max_queues == 0)
1706		xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1707					  num_online_cpus());
1708
1709	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1710		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1711			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1712		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1713	}
1714
1715	rc = xenvif_xenbus_init();
1716	if (rc)
1717		goto failed_init;
1718
1719#ifdef CONFIG_DEBUG_FS
1720	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1721#endif /* CONFIG_DEBUG_FS */
1722
1723	return 0;
1724
1725failed_init:
1726	return rc;
1727}
1728
1729module_init(netback_init);
1730
1731static void __exit netback_fini(void)
1732{
1733#ifdef CONFIG_DEBUG_FS
1734	debugfs_remove_recursive(xen_netback_dbg_root);
1735#endif /* CONFIG_DEBUG_FS */
1736	xenvif_xenbus_fini();
1737}
1738module_exit(netback_fini);
1739
 
1740MODULE_LICENSE("Dual BSD/GPL");
1741MODULE_ALIAS("xen-backend:vif");