Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47#include <linux/bpf.h>
  48#include <net/page_pool/types.h>
  49#include <linux/bpf_trace.h>
  50
  51#include <xen/xen.h>
  52#include <xen/xenbus.h>
  53#include <xen/events.h>
  54#include <xen/page.h>
  55#include <xen/platform_pci.h>
  56#include <xen/grant_table.h>
  57
  58#include <xen/interface/io/netif.h>
  59#include <xen/interface/memory.h>
  60#include <xen/interface/grant_table.h>
  61
  62/* Module parameters */
  63#define MAX_QUEUES_DEFAULT 8
  64static unsigned int xennet_max_queues;
  65module_param_named(max_queues, xennet_max_queues, uint, 0644);
  66MODULE_PARM_DESC(max_queues,
  67		 "Maximum number of queues per virtual interface");
  68
  69static bool __read_mostly xennet_trusted = true;
  70module_param_named(trusted, xennet_trusted, bool, 0644);
  71MODULE_PARM_DESC(trusted, "Is the backend trusted");
  72
  73#define XENNET_TIMEOUT  (5 * HZ)
  74
  75static const struct ethtool_ops xennet_ethtool_ops;
  76
  77struct netfront_cb {
  78	int pull_to;
  79};
  80
  81#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  82
  83#define RX_COPY_THRESHOLD 256
  84
 
 
  85#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  86#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  87
  88/* Minimum number of Rx slots (includes slot for GSO metadata). */
  89#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  90
  91/* Queue name is interface name with "-qNNN" appended */
  92#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  93
  94/* IRQ name is queue name with "-tx" or "-rx" appended */
  95#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  96
  97static DECLARE_WAIT_QUEUE_HEAD(module_wq);
  98
  99struct netfront_stats {
 100	u64			packets;
 101	u64			bytes;
 102	struct u64_stats_sync	syncp;
 103};
 104
 105struct netfront_info;
 106
 107struct netfront_queue {
 108	unsigned int id; /* Queue ID, 0-based */
 109	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 110	struct netfront_info *info;
 111
 112	struct bpf_prog __rcu *xdp_prog;
 113
 114	struct napi_struct napi;
 115
 116	/* Split event channels support, tx_* == rx_* when using
 117	 * single event channel.
 118	 */
 119	unsigned int tx_evtchn, rx_evtchn;
 120	unsigned int tx_irq, rx_irq;
 121	/* Only used when split event channels support is enabled */
 122	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 123	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 124
 125	spinlock_t   tx_lock;
 126	struct xen_netif_tx_front_ring tx;
 127	int tx_ring_ref;
 128
 129	/*
 130	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 131	 * are linked from tx_skb_freelist through tx_link.
 
 
 
 
 
 132	 */
 133	struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
 134	unsigned short tx_link[NET_TX_RING_SIZE];
 135#define TX_LINK_NONE 0xffff
 136#define TX_PENDING   0xfffe
 137	grant_ref_t gref_tx_head;
 138	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 139	struct page *grant_tx_page[NET_TX_RING_SIZE];
 140	unsigned tx_skb_freelist;
 141	unsigned int tx_pend_queue;
 142
 143	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 144	struct xen_netif_rx_front_ring rx;
 145	int rx_ring_ref;
 146
 147	struct timer_list rx_refill_timer;
 148
 149	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 150	grant_ref_t gref_rx_head;
 151	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 152
 153	unsigned int rx_rsp_unconsumed;
 154	spinlock_t rx_cons_lock;
 155
 156	struct page_pool *page_pool;
 157	struct xdp_rxq_info xdp_rxq;
 158};
 159
 160struct netfront_info {
 161	struct list_head list;
 162	struct net_device *netdev;
 163
 164	struct xenbus_device *xbdev;
 165
 166	/* Multi-queue support */
 167	struct netfront_queue *queues;
 168
 169	/* Statistics */
 170	struct netfront_stats __percpu *rx_stats;
 171	struct netfront_stats __percpu *tx_stats;
 172
 173	/* XDP state */
 174	bool netback_has_xdp_headroom;
 175	bool netfront_xdp_enabled;
 176
 177	/* Is device behaving sane? */
 178	bool broken;
 179
 180	/* Should skbs be bounced into a zeroed buffer? */
 181	bool bounce;
 182
 183	atomic_t rx_gso_checksum_fixup;
 184};
 185
 186struct netfront_rx_info {
 187	struct xen_netif_rx_response rx;
 188	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 189};
 190
 
 
 
 
 
 
 
 
 
 
 
 191/*
 192 * Access macros for acquiring freeing slots in tx_skbs[].
 193 */
 194
 195static void add_id_to_list(unsigned *head, unsigned short *list,
 196			   unsigned short id)
 197{
 198	list[id] = *head;
 199	*head = id;
 200}
 201
 202static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
 
 203{
 204	unsigned int id = *head;
 205
 206	if (id != TX_LINK_NONE) {
 207		*head = list[id];
 208		list[id] = TX_LINK_NONE;
 209	}
 210	return id;
 211}
 212
 213static int xennet_rxidx(RING_IDX idx)
 214{
 215	return idx & (NET_RX_RING_SIZE - 1);
 216}
 217
 218static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 219					 RING_IDX ri)
 220{
 221	int i = xennet_rxidx(ri);
 222	struct sk_buff *skb = queue->rx_skbs[i];
 223	queue->rx_skbs[i] = NULL;
 224	return skb;
 225}
 226
 227static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 228					    RING_IDX ri)
 229{
 230	int i = xennet_rxidx(ri);
 231	grant_ref_t ref = queue->grant_rx_ref[i];
 232	queue->grant_rx_ref[i] = INVALID_GRANT_REF;
 233	return ref;
 234}
 235
 236#ifdef CONFIG_SYSFS
 237static const struct attribute_group xennet_dev_group;
 238#endif
 239
 240static bool xennet_can_sg(struct net_device *dev)
 241{
 242	return dev->features & NETIF_F_SG;
 243}
 244
 245
 246static void rx_refill_timeout(struct timer_list *t)
 247{
 248	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 249	napi_schedule(&queue->napi);
 250}
 251
 252static int netfront_tx_slot_available(struct netfront_queue *queue)
 253{
 254	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 255		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 256}
 257
 258static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 259{
 260	struct net_device *dev = queue->info->netdev;
 261	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 262
 263	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 264	    netfront_tx_slot_available(queue) &&
 265	    likely(netif_running(dev)))
 266		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 267}
 268
 269
 270static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 271{
 272	struct sk_buff *skb;
 273	struct page *page;
 274
 275	skb = __netdev_alloc_skb(queue->info->netdev,
 276				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 277				 GFP_ATOMIC | __GFP_NOWARN);
 278	if (unlikely(!skb))
 279		return NULL;
 280
 281	page = page_pool_alloc_pages(queue->page_pool,
 282				     GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
 283	if (unlikely(!page)) {
 284		kfree_skb(skb);
 285		return NULL;
 286	}
 287	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 288	skb_mark_for_recycle(skb);
 289
 290	/* Align ip header to a 16 bytes boundary */
 291	skb_reserve(skb, NET_IP_ALIGN);
 292	skb->dev = queue->info->netdev;
 293
 294	return skb;
 295}
 296
 297
 298static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 299{
 300	RING_IDX req_prod = queue->rx.req_prod_pvt;
 301	int notify;
 302	int err = 0;
 303
 304	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 305		return;
 306
 307	for (req_prod = queue->rx.req_prod_pvt;
 308	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 309	     req_prod++) {
 310		struct sk_buff *skb;
 311		unsigned short id;
 312		grant_ref_t ref;
 313		struct page *page;
 314		struct xen_netif_rx_request *req;
 315
 316		skb = xennet_alloc_one_rx_buffer(queue);
 317		if (!skb) {
 318			err = -ENOMEM;
 319			break;
 320		}
 321
 322		id = xennet_rxidx(req_prod);
 323
 324		BUG_ON(queue->rx_skbs[id]);
 325		queue->rx_skbs[id] = skb;
 326
 327		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 328		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 329		queue->grant_rx_ref[id] = ref;
 330
 331		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 332
 333		req = RING_GET_REQUEST(&queue->rx, req_prod);
 334		gnttab_page_grant_foreign_access_ref_one(ref,
 335							 queue->info->xbdev->otherend_id,
 336							 page,
 337							 0);
 338		req->id = id;
 339		req->gref = ref;
 340	}
 341
 342	queue->rx.req_prod_pvt = req_prod;
 343
 344	/* Try again later if there are not enough requests or skb allocation
 345	 * failed.
 346	 * Enough requests is quantified as the sum of newly created slots and
 347	 * the unconsumed slots at the backend.
 348	 */
 349	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 350	    unlikely(err)) {
 351		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 352		return;
 353	}
 354
 
 
 355	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 356	if (notify)
 357		notify_remote_via_irq(queue->rx_irq);
 358}
 359
 360static int xennet_open(struct net_device *dev)
 361{
 362	struct netfront_info *np = netdev_priv(dev);
 363	unsigned int num_queues = dev->real_num_tx_queues;
 364	unsigned int i = 0;
 365	struct netfront_queue *queue = NULL;
 366
 367	if (!np->queues || np->broken)
 368		return -ENODEV;
 369
 370	for (i = 0; i < num_queues; ++i) {
 371		queue = &np->queues[i];
 372		napi_enable(&queue->napi);
 373
 374		spin_lock_bh(&queue->rx_lock);
 375		if (netif_carrier_ok(dev)) {
 376			xennet_alloc_rx_buffers(queue);
 377			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 378			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 379				napi_schedule(&queue->napi);
 380		}
 381		spin_unlock_bh(&queue->rx_lock);
 382	}
 383
 384	netif_tx_start_all_queues(dev);
 385
 386	return 0;
 387}
 388
 389static bool xennet_tx_buf_gc(struct netfront_queue *queue)
 390{
 391	RING_IDX cons, prod;
 392	unsigned short id;
 393	struct sk_buff *skb;
 394	bool more_to_do;
 395	bool work_done = false;
 396	const struct device *dev = &queue->info->netdev->dev;
 397
 398	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 399
 400	do {
 401		prod = queue->tx.sring->rsp_prod;
 402		if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
 403			dev_alert(dev, "Illegal number of responses %u\n",
 404				  prod - queue->tx.rsp_cons);
 405			goto err;
 406		}
 407		rmb(); /* Ensure we see responses up to 'rp'. */
 408
 409		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 410			struct xen_netif_tx_response txrsp;
 411
 412			work_done = true;
 413
 414			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
 415			if (txrsp.status == XEN_NETIF_RSP_NULL)
 416				continue;
 417
 418			id = txrsp.id;
 419			if (id >= RING_SIZE(&queue->tx)) {
 420				dev_alert(dev,
 421					  "Response has incorrect id (%u)\n",
 422					  id);
 423				goto err;
 424			}
 425			if (queue->tx_link[id] != TX_PENDING) {
 426				dev_alert(dev,
 427					  "Response for inactive request\n");
 428				goto err;
 429			}
 430
 431			queue->tx_link[id] = TX_LINK_NONE;
 432			skb = queue->tx_skbs[id];
 433			queue->tx_skbs[id] = NULL;
 434			if (unlikely(!gnttab_end_foreign_access_ref(
 435				queue->grant_tx_ref[id]))) {
 436				dev_alert(dev,
 437					  "Grant still in use by backend domain\n");
 438				goto err;
 439			}
 
 
 440			gnttab_release_grant_reference(
 441				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 442			queue->grant_tx_ref[id] = INVALID_GRANT_REF;
 443			queue->grant_tx_page[id] = NULL;
 444			add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
 445			dev_kfree_skb_irq(skb);
 446		}
 447
 448		queue->tx.rsp_cons = prod;
 449
 450		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 451	} while (more_to_do);
 452
 453	xennet_maybe_wake_tx(queue);
 454
 455	return work_done;
 456
 457 err:
 458	queue->info->broken = true;
 459	dev_alert(dev, "Disabled for further use\n");
 460
 461	return work_done;
 462}
 463
 464struct xennet_gnttab_make_txreq {
 465	struct netfront_queue *queue;
 466	struct sk_buff *skb;
 467	struct page *page;
 468	struct xen_netif_tx_request *tx;      /* Last request on ring page */
 469	struct xen_netif_tx_request tx_local; /* Last request local copy*/
 470	unsigned int size;
 471};
 472
 473static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 474				  unsigned int len, void *data)
 475{
 476	struct xennet_gnttab_make_txreq *info = data;
 477	unsigned int id;
 478	struct xen_netif_tx_request *tx;
 479	grant_ref_t ref;
 480	/* convenient aliases */
 481	struct page *page = info->page;
 482	struct netfront_queue *queue = info->queue;
 483	struct sk_buff *skb = info->skb;
 484
 485	id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
 486	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 487	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 488	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 489
 490	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 491					gfn, GNTMAP_readonly);
 492
 493	queue->tx_skbs[id] = skb;
 494	queue->grant_tx_page[id] = page;
 495	queue->grant_tx_ref[id] = ref;
 496
 497	info->tx_local.id = id;
 498	info->tx_local.gref = ref;
 499	info->tx_local.offset = offset;
 500	info->tx_local.size = len;
 501	info->tx_local.flags = 0;
 502
 503	*tx = info->tx_local;
 504
 505	/*
 506	 * Put the request in the pending queue, it will be set to be pending
 507	 * when the producer index is about to be raised.
 508	 */
 509	add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
 510
 511	info->tx = tx;
 512	info->size += info->tx_local.size;
 513}
 514
 515static struct xen_netif_tx_request *xennet_make_first_txreq(
 516	struct xennet_gnttab_make_txreq *info,
 517	unsigned int offset, unsigned int len)
 518{
 519	info->size = 0;
 
 
 
 
 
 520
 521	gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
 522
 523	return info->tx;
 524}
 525
 526static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 527				  unsigned int len, void *data)
 528{
 529	struct xennet_gnttab_make_txreq *info = data;
 530
 531	info->tx->flags |= XEN_NETTXF_more_data;
 532	skb_get(info->skb);
 533	xennet_tx_setup_grant(gfn, offset, len, data);
 534}
 535
 536static void xennet_make_txreqs(
 537	struct xennet_gnttab_make_txreq *info,
 538	struct page *page,
 539	unsigned int offset, unsigned int len)
 540{
 
 
 
 
 
 
 541	/* Skip unused frames from start of page */
 542	page += offset >> PAGE_SHIFT;
 543	offset &= ~PAGE_MASK;
 544
 545	while (len) {
 546		info->page = page;
 547		info->size = 0;
 548
 549		gnttab_foreach_grant_in_range(page, offset, len,
 550					      xennet_make_one_txreq,
 551					      info);
 552
 553		page++;
 554		offset = 0;
 555		len -= info->size;
 556	}
 
 
 557}
 558
 559/*
 560 * Count how many ring slots are required to send this skb. Each frag
 561 * might be a compound page.
 562 */
 563static int xennet_count_skb_slots(struct sk_buff *skb)
 564{
 565	int i, frags = skb_shinfo(skb)->nr_frags;
 566	int slots;
 567
 568	slots = gnttab_count_grant(offset_in_page(skb->data),
 569				   skb_headlen(skb));
 570
 571	for (i = 0; i < frags; i++) {
 572		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 573		unsigned long size = skb_frag_size(frag);
 574		unsigned long offset = skb_frag_off(frag);
 575
 576		/* Skip unused frames from start of page */
 577		offset &= ~PAGE_MASK;
 578
 579		slots += gnttab_count_grant(offset, size);
 580	}
 581
 582	return slots;
 583}
 584
 585static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 586			       struct net_device *sb_dev)
 587{
 588	unsigned int num_queues = dev->real_num_tx_queues;
 589	u32 hash;
 590	u16 queue_idx;
 591
 592	/* First, check if there is only one queue */
 593	if (num_queues == 1) {
 594		queue_idx = 0;
 595	} else {
 596		hash = skb_get_hash(skb);
 597		queue_idx = hash % num_queues;
 598	}
 599
 600	return queue_idx;
 601}
 602
 603static void xennet_mark_tx_pending(struct netfront_queue *queue)
 604{
 605	unsigned int i;
 606
 607	while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
 608	       TX_LINK_NONE)
 609		queue->tx_link[i] = TX_PENDING;
 610}
 611
 612static int xennet_xdp_xmit_one(struct net_device *dev,
 613			       struct netfront_queue *queue,
 614			       struct xdp_frame *xdpf)
 615{
 616	struct netfront_info *np = netdev_priv(dev);
 617	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 618	struct xennet_gnttab_make_txreq info = {
 619		.queue = queue,
 620		.skb = NULL,
 621		.page = virt_to_page(xdpf->data),
 622	};
 623	int notify;
 624
 625	xennet_make_first_txreq(&info,
 626				offset_in_page(xdpf->data),
 627				xdpf->len);
 628
 629	xennet_mark_tx_pending(queue);
 630
 631	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 632	if (notify)
 633		notify_remote_via_irq(queue->tx_irq);
 634
 635	u64_stats_update_begin(&tx_stats->syncp);
 636	tx_stats->bytes += xdpf->len;
 637	tx_stats->packets++;
 638	u64_stats_update_end(&tx_stats->syncp);
 639
 640	xennet_tx_buf_gc(queue);
 641
 642	return 0;
 643}
 644
 645static int xennet_xdp_xmit(struct net_device *dev, int n,
 646			   struct xdp_frame **frames, u32 flags)
 647{
 648	unsigned int num_queues = dev->real_num_tx_queues;
 649	struct netfront_info *np = netdev_priv(dev);
 650	struct netfront_queue *queue = NULL;
 651	unsigned long irq_flags;
 652	int nxmit = 0;
 653	int i;
 654
 655	if (unlikely(np->broken))
 656		return -ENODEV;
 657	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 658		return -EINVAL;
 659
 660	queue = &np->queues[smp_processor_id() % num_queues];
 661
 662	spin_lock_irqsave(&queue->tx_lock, irq_flags);
 663	for (i = 0; i < n; i++) {
 664		struct xdp_frame *xdpf = frames[i];
 665
 666		if (!xdpf)
 667			continue;
 668		if (xennet_xdp_xmit_one(dev, queue, xdpf))
 669			break;
 670		nxmit++;
 671	}
 672	spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
 673
 674	return nxmit;
 675}
 676
 677static struct sk_buff *bounce_skb(const struct sk_buff *skb)
 678{
 679	unsigned int headerlen = skb_headroom(skb);
 680	/* Align size to allocate full pages and avoid contiguous data leaks */
 681	unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
 682				  XEN_PAGE_SIZE);
 683	struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
 684
 685	if (!n)
 686		return NULL;
 687
 688	if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
 689		WARN_ONCE(1, "misaligned skb allocated\n");
 690		kfree_skb(n);
 691		return NULL;
 692	}
 693
 694	/* Set the data pointer */
 695	skb_reserve(n, headerlen);
 696	/* Set the tail pointer and length */
 697	skb_put(n, skb->len);
 698
 699	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
 700
 701	skb_copy_header(n, skb);
 702	return n;
 703}
 704
 705#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 706
 707static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 708{
 709	struct netfront_info *np = netdev_priv(dev);
 710	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 711	struct xen_netif_tx_request *first_tx;
 712	unsigned int i;
 713	int notify;
 714	int slots;
 715	struct page *page;
 716	unsigned int offset;
 717	unsigned int len;
 718	unsigned long flags;
 719	struct netfront_queue *queue = NULL;
 720	struct xennet_gnttab_make_txreq info = { };
 721	unsigned int num_queues = dev->real_num_tx_queues;
 722	u16 queue_index;
 723	struct sk_buff *nskb;
 724
 725	/* Drop the packet if no queues are set up */
 726	if (num_queues < 1)
 727		goto drop;
 728	if (unlikely(np->broken))
 729		goto drop;
 730	/* Determine which queue to transmit this SKB on */
 731	queue_index = skb_get_queue_mapping(skb);
 732	queue = &np->queues[queue_index];
 733
 734	/* If skb->len is too big for wire format, drop skb and alert
 735	 * user about misconfiguration.
 736	 */
 737	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 738		net_alert_ratelimited(
 739			"xennet: skb->len = %u, too big for wire format\n",
 740			skb->len);
 741		goto drop;
 742	}
 743
 744	slots = xennet_count_skb_slots(skb);
 745	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 746		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 747				    slots, skb->len);
 748		if (skb_linearize(skb))
 749			goto drop;
 750	}
 751
 752	page = virt_to_page(skb->data);
 753	offset = offset_in_page(skb->data);
 754
 755	/* The first req should be at least ETH_HLEN size or the packet will be
 756	 * dropped by netback.
 757	 *
 758	 * If the backend is not trusted bounce all data to zeroed pages to
 759	 * avoid exposing contiguous data on the granted page not belonging to
 760	 * the skb.
 761	 */
 762	if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 763		nskb = bounce_skb(skb);
 764		if (!nskb)
 765			goto drop;
 766		dev_consume_skb_any(skb);
 767		skb = nskb;
 768		page = virt_to_page(skb->data);
 769		offset = offset_in_page(skb->data);
 770	}
 771
 772	len = skb_headlen(skb);
 773
 774	spin_lock_irqsave(&queue->tx_lock, flags);
 775
 776	if (unlikely(!netif_carrier_ok(dev) ||
 777		     (slots > 1 && !xennet_can_sg(dev)) ||
 778		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 779		spin_unlock_irqrestore(&queue->tx_lock, flags);
 780		goto drop;
 781	}
 782
 783	/* First request for the linear area. */
 784	info.queue = queue;
 785	info.skb = skb;
 786	info.page = page;
 787	first_tx = xennet_make_first_txreq(&info, offset, len);
 788	offset += info.tx_local.size;
 789	if (offset == PAGE_SIZE) {
 790		page++;
 791		offset = 0;
 792	}
 793	len -= info.tx_local.size;
 794
 795	if (skb->ip_summed == CHECKSUM_PARTIAL)
 796		/* local packet? */
 797		first_tx->flags |= XEN_NETTXF_csum_blank |
 798				   XEN_NETTXF_data_validated;
 799	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 800		/* remote but checksummed. */
 801		first_tx->flags |= XEN_NETTXF_data_validated;
 802
 803	/* Optional extra info after the first request. */
 804	if (skb_shinfo(skb)->gso_size) {
 805		struct xen_netif_extra_info *gso;
 806
 807		gso = (struct xen_netif_extra_info *)
 808			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 809
 810		first_tx->flags |= XEN_NETTXF_extra_info;
 811
 812		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 813		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 814			XEN_NETIF_GSO_TYPE_TCPV6 :
 815			XEN_NETIF_GSO_TYPE_TCPV4;
 816		gso->u.gso.pad = 0;
 817		gso->u.gso.features = 0;
 818
 819		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 820		gso->flags = 0;
 821	}
 822
 823	/* Requests for the rest of the linear area. */
 824	xennet_make_txreqs(&info, page, offset, len);
 825
 826	/* Requests for all the frags. */
 827	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 828		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 829		xennet_make_txreqs(&info, skb_frag_page(frag),
 830					skb_frag_off(frag),
 831					skb_frag_size(frag));
 832	}
 833
 834	/* First request has the packet length. */
 835	first_tx->size = skb->len;
 836
 837	/* timestamp packet in software */
 838	skb_tx_timestamp(skb);
 839
 840	xennet_mark_tx_pending(queue);
 841
 842	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 843	if (notify)
 844		notify_remote_via_irq(queue->tx_irq);
 845
 846	u64_stats_update_begin(&tx_stats->syncp);
 847	tx_stats->bytes += skb->len;
 848	tx_stats->packets++;
 849	u64_stats_update_end(&tx_stats->syncp);
 850
 851	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 852	xennet_tx_buf_gc(queue);
 853
 854	if (!netfront_tx_slot_available(queue))
 855		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 856
 857	spin_unlock_irqrestore(&queue->tx_lock, flags);
 858
 859	return NETDEV_TX_OK;
 860
 861 drop:
 862	dev->stats.tx_dropped++;
 863	dev_kfree_skb_any(skb);
 864	return NETDEV_TX_OK;
 865}
 866
 867static int xennet_close(struct net_device *dev)
 868{
 869	struct netfront_info *np = netdev_priv(dev);
 870	unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
 871	unsigned int i;
 872	struct netfront_queue *queue;
 873	netif_tx_stop_all_queues(np->netdev);
 874	for (i = 0; i < num_queues; ++i) {
 875		queue = &np->queues[i];
 876		napi_disable(&queue->napi);
 877	}
 878	return 0;
 879}
 880
 881static void xennet_destroy_queues(struct netfront_info *info)
 882{
 883	unsigned int i;
 884
 885	if (!info->queues)
 886		return;
 887
 888	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
 889		struct netfront_queue *queue = &info->queues[i];
 890
 891		if (netif_running(info->netdev))
 892			napi_disable(&queue->napi);
 893		netif_napi_del(&queue->napi);
 894	}
 895
 896	kfree(info->queues);
 897	info->queues = NULL;
 898}
 899
 900static void xennet_uninit(struct net_device *dev)
 901{
 902	struct netfront_info *np = netdev_priv(dev);
 903	xennet_destroy_queues(np);
 904}
 905
 906static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
 907{
 908	unsigned long flags;
 909
 910	spin_lock_irqsave(&queue->rx_cons_lock, flags);
 911	queue->rx.rsp_cons = val;
 912	queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
 913	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
 914}
 915
 916static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 917				grant_ref_t ref)
 918{
 919	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 920
 921	BUG_ON(queue->rx_skbs[new]);
 922	queue->rx_skbs[new] = skb;
 923	queue->grant_rx_ref[new] = ref;
 924	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 925	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 926	queue->rx.req_prod_pvt++;
 927}
 928
 929static int xennet_get_extras(struct netfront_queue *queue,
 930			     struct xen_netif_extra_info *extras,
 931			     RING_IDX rp)
 932
 933{
 934	struct xen_netif_extra_info extra;
 935	struct device *dev = &queue->info->netdev->dev;
 936	RING_IDX cons = queue->rx.rsp_cons;
 937	int err = 0;
 938
 939	do {
 940		struct sk_buff *skb;
 941		grant_ref_t ref;
 942
 943		if (unlikely(cons + 1 == rp)) {
 944			if (net_ratelimit())
 945				dev_warn(dev, "Missing extra info\n");
 946			err = -EBADR;
 947			break;
 948		}
 949
 950		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
 
 951
 952		if (unlikely(!extra.type ||
 953			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 954			if (net_ratelimit())
 955				dev_warn(dev, "Invalid extra type: %d\n",
 956					 extra.type);
 957			err = -EINVAL;
 958		} else {
 959			extras[extra.type - 1] = extra;
 
 960		}
 961
 962		skb = xennet_get_rx_skb(queue, cons);
 963		ref = xennet_get_rx_ref(queue, cons);
 964		xennet_move_rx_slot(queue, skb, ref);
 965	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 966
 967	xennet_set_rx_rsp_cons(queue, cons);
 968	return err;
 969}
 970
 971static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
 972		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
 973		   struct xdp_buff *xdp, bool *need_xdp_flush)
 974{
 975	struct xdp_frame *xdpf;
 976	u32 len = rx->status;
 977	u32 act;
 978	int err;
 979
 980	xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
 981		      &queue->xdp_rxq);
 982	xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
 983			 len, false);
 984
 985	act = bpf_prog_run_xdp(prog, xdp);
 986	switch (act) {
 987	case XDP_TX:
 988		get_page(pdata);
 989		xdpf = xdp_convert_buff_to_frame(xdp);
 990		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
 991		if (unlikely(!err))
 992			xdp_return_frame_rx_napi(xdpf);
 993		else if (unlikely(err < 0))
 994			trace_xdp_exception(queue->info->netdev, prog, act);
 995		break;
 996	case XDP_REDIRECT:
 997		get_page(pdata);
 998		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
 999		*need_xdp_flush = true;
1000		if (unlikely(err))
1001			trace_xdp_exception(queue->info->netdev, prog, act);
1002		break;
1003	case XDP_PASS:
1004	case XDP_DROP:
1005		break;
1006
1007	case XDP_ABORTED:
1008		trace_xdp_exception(queue->info->netdev, prog, act);
1009		break;
1010
1011	default:
1012		bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1013	}
1014
1015	return act;
1016}
1017
1018static int xennet_get_responses(struct netfront_queue *queue,
1019				struct netfront_rx_info *rinfo, RING_IDX rp,
1020				struct sk_buff_head *list,
1021				bool *need_xdp_flush)
1022{
1023	struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1024	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
 
1025	RING_IDX cons = queue->rx.rsp_cons;
1026	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1027	struct xen_netif_extra_info *extras = rinfo->extras;
1028	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1029	struct device *dev = &queue->info->netdev->dev;
1030	struct bpf_prog *xdp_prog;
1031	struct xdp_buff xdp;
1032	int slots = 1;
1033	int err = 0;
1034	u32 verdict;
1035
1036	if (rx->flags & XEN_NETRXF_extra_info) {
1037		err = xennet_get_extras(queue, extras, rp);
1038		if (!err) {
1039			if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1040				struct xen_netif_extra_info *xdp;
1041
1042				xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1043				rx->offset = xdp->u.xdp.headroom;
1044			}
1045		}
1046		cons = queue->rx.rsp_cons;
1047	}
1048
1049	for (;;) {
 
 
 
 
 
 
 
 
 
 
1050		/*
1051		 * This definitely indicates a bug, either in this driver or in
1052		 * the backend driver. In future this should flag the bad
1053		 * situation to the system controller to reboot the backend.
1054		 */
1055		if (ref == INVALID_GRANT_REF) {
1056			if (net_ratelimit())
1057				dev_warn(dev, "Bad rx response id %d.\n",
1058					 rx->id);
1059			err = -EINVAL;
1060			goto next;
1061		}
1062
1063		if (unlikely(rx->status < 0 ||
1064			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
1065			if (net_ratelimit())
1066				dev_warn(dev, "rx->offset: %u, size: %d\n",
1067					 rx->offset, rx->status);
1068			xennet_move_rx_slot(queue, skb, ref);
1069			err = -EINVAL;
1070			goto next;
1071		}
1072
1073		if (!gnttab_end_foreign_access_ref(ref)) {
1074			dev_alert(dev,
1075				  "Grant still in use by backend domain\n");
1076			queue->info->broken = true;
1077			dev_alert(dev, "Disabled for further use\n");
1078			return -EINVAL;
1079		}
1080
1081		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1082
1083		rcu_read_lock();
1084		xdp_prog = rcu_dereference(queue->xdp_prog);
1085		if (xdp_prog) {
1086			if (!(rx->flags & XEN_NETRXF_more_data)) {
1087				/* currently only a single page contains data */
1088				verdict = xennet_run_xdp(queue,
1089							 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1090							 rx, xdp_prog, &xdp, need_xdp_flush);
1091				if (verdict != XDP_PASS)
1092					err = -EINVAL;
1093			} else {
1094				/* drop the frame */
1095				err = -EINVAL;
1096			}
1097		}
1098		rcu_read_unlock();
1099
1100		__skb_queue_tail(list, skb);
1101
1102next:
1103		if (!(rx->flags & XEN_NETRXF_more_data))
1104			break;
1105
1106		if (cons + slots == rp) {
1107			if (net_ratelimit())
1108				dev_warn(dev, "Need more slots\n");
1109			err = -ENOENT;
1110			break;
1111		}
1112
1113		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1114		rx = &rx_local;
1115		skb = xennet_get_rx_skb(queue, cons + slots);
1116		ref = xennet_get_rx_ref(queue, cons + slots);
1117		slots++;
1118	}
1119
1120	if (unlikely(slots > max)) {
1121		if (net_ratelimit())
1122			dev_warn(dev, "Too many slots\n");
1123		err = -E2BIG;
1124	}
1125
1126	if (unlikely(err))
1127		xennet_set_rx_rsp_cons(queue, cons + slots);
1128
1129	return err;
1130}
1131
1132static int xennet_set_skb_gso(struct sk_buff *skb,
1133			      struct xen_netif_extra_info *gso)
1134{
1135	if (!gso->u.gso.size) {
1136		if (net_ratelimit())
1137			pr_warn("GSO size must not be zero\n");
1138		return -EINVAL;
1139	}
1140
1141	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1142	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1143		if (net_ratelimit())
1144			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1145		return -EINVAL;
1146	}
1147
1148	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1149	skb_shinfo(skb)->gso_type =
1150		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1151		SKB_GSO_TCPV4 :
1152		SKB_GSO_TCPV6;
1153
1154	/* Header must be checked, and gso_segs computed. */
1155	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1156	skb_shinfo(skb)->gso_segs = 0;
1157
1158	return 0;
1159}
1160
1161static int xennet_fill_frags(struct netfront_queue *queue,
1162			     struct sk_buff *skb,
1163			     struct sk_buff_head *list)
1164{
 
1165	RING_IDX cons = queue->rx.rsp_cons;
1166	struct sk_buff *nskb;
1167
1168	while ((nskb = __skb_dequeue(list))) {
1169		struct xen_netif_rx_response rx;
 
1170		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1171
1172		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1173
1174		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1175			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1176
1177			BUG_ON(pull_to < skb_headlen(skb));
1178			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1179		}
1180		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1181			xennet_set_rx_rsp_cons(queue,
1182					       ++cons + skb_queue_len(list));
1183			kfree_skb(nskb);
1184			return -ENOENT;
1185		}
1186
1187		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1188				skb_frag_page(nfrag),
1189				rx.offset, rx.status, PAGE_SIZE);
1190
1191		skb_shinfo(nskb)->nr_frags = 0;
1192		kfree_skb(nskb);
1193	}
1194
1195	xennet_set_rx_rsp_cons(queue, cons);
1196
1197	return 0;
1198}
1199
1200static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1201{
1202	bool recalculate_partial_csum = false;
1203
1204	/*
1205	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1206	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1207	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1208	 * recalculate the partial checksum.
1209	 */
1210	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1211		struct netfront_info *np = netdev_priv(dev);
1212		atomic_inc(&np->rx_gso_checksum_fixup);
1213		skb->ip_summed = CHECKSUM_PARTIAL;
1214		recalculate_partial_csum = true;
1215	}
1216
1217	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1218	if (skb->ip_summed != CHECKSUM_PARTIAL)
1219		return 0;
1220
1221	return skb_checksum_setup(skb, recalculate_partial_csum);
1222}
1223
1224static int handle_incoming_queue(struct netfront_queue *queue,
1225				 struct sk_buff_head *rxq)
1226{
1227	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1228	int packets_dropped = 0;
1229	struct sk_buff *skb;
1230
1231	while ((skb = __skb_dequeue(rxq)) != NULL) {
1232		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1233
1234		if (pull_to > skb_headlen(skb))
1235			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1236
1237		/* Ethernet work: Delayed to here as it peeks the header. */
1238		skb->protocol = eth_type_trans(skb, queue->info->netdev);
1239		skb_reset_network_header(skb);
1240
1241		if (checksum_setup(queue->info->netdev, skb)) {
1242			kfree_skb(skb);
1243			packets_dropped++;
1244			queue->info->netdev->stats.rx_errors++;
1245			continue;
1246		}
1247
1248		u64_stats_update_begin(&rx_stats->syncp);
1249		rx_stats->packets++;
1250		rx_stats->bytes += skb->len;
1251		u64_stats_update_end(&rx_stats->syncp);
1252
1253		/* Pass it up. */
1254		napi_gro_receive(&queue->napi, skb);
1255	}
1256
1257	return packets_dropped;
1258}
1259
1260static int xennet_poll(struct napi_struct *napi, int budget)
1261{
1262	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1263	struct net_device *dev = queue->info->netdev;
1264	struct sk_buff *skb;
1265	struct netfront_rx_info rinfo;
1266	struct xen_netif_rx_response *rx = &rinfo.rx;
1267	struct xen_netif_extra_info *extras = rinfo.extras;
1268	RING_IDX i, rp;
1269	int work_done;
1270	struct sk_buff_head rxq;
1271	struct sk_buff_head errq;
1272	struct sk_buff_head tmpq;
1273	int err;
1274	bool need_xdp_flush = false;
1275
1276	spin_lock(&queue->rx_lock);
1277
1278	skb_queue_head_init(&rxq);
1279	skb_queue_head_init(&errq);
1280	skb_queue_head_init(&tmpq);
1281
1282	rp = queue->rx.sring->rsp_prod;
1283	if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1284		dev_alert(&dev->dev, "Illegal number of responses %u\n",
1285			  rp - queue->rx.rsp_cons);
1286		queue->info->broken = true;
1287		spin_unlock(&queue->rx_lock);
1288		return 0;
1289	}
1290	rmb(); /* Ensure we see queued responses up to 'rp'. */
1291
1292	i = queue->rx.rsp_cons;
1293	work_done = 0;
1294	while ((i != rp) && (work_done < budget)) {
1295		RING_COPY_RESPONSE(&queue->rx, i, rx);
1296		memset(extras, 0, sizeof(rinfo.extras));
1297
1298		err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1299					   &need_xdp_flush);
1300
1301		if (unlikely(err)) {
1302			if (queue->info->broken) {
1303				spin_unlock(&queue->rx_lock);
1304				return 0;
1305			}
1306err:
1307			while ((skb = __skb_dequeue(&tmpq)))
1308				__skb_queue_tail(&errq, skb);
1309			dev->stats.rx_errors++;
1310			i = queue->rx.rsp_cons;
1311			continue;
1312		}
1313
1314		skb = __skb_dequeue(&tmpq);
1315
1316		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1317			struct xen_netif_extra_info *gso;
1318			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1319
1320			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1321				__skb_queue_head(&tmpq, skb);
1322				xennet_set_rx_rsp_cons(queue,
1323						       queue->rx.rsp_cons +
1324						       skb_queue_len(&tmpq));
1325				goto err;
1326			}
1327		}
1328
1329		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1330		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1331			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1332
1333		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1334		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1335		skb->data_len = rx->status;
1336		skb->len += rx->status;
1337
1338		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1339			goto err;
1340
1341		if (rx->flags & XEN_NETRXF_csum_blank)
1342			skb->ip_summed = CHECKSUM_PARTIAL;
1343		else if (rx->flags & XEN_NETRXF_data_validated)
1344			skb->ip_summed = CHECKSUM_UNNECESSARY;
1345
1346		__skb_queue_tail(&rxq, skb);
1347
1348		i = queue->rx.rsp_cons + 1;
1349		xennet_set_rx_rsp_cons(queue, i);
1350		work_done++;
1351	}
1352	if (need_xdp_flush)
1353		xdp_do_flush();
1354
1355	__skb_queue_purge(&errq);
1356
1357	work_done -= handle_incoming_queue(queue, &rxq);
1358
1359	xennet_alloc_rx_buffers(queue);
1360
1361	if (work_done < budget) {
1362		int more_to_do = 0;
1363
1364		napi_complete_done(napi, work_done);
1365
1366		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1367		if (more_to_do)
1368			napi_schedule(napi);
1369	}
1370
1371	spin_unlock(&queue->rx_lock);
1372
1373	return work_done;
1374}
1375
1376static int xennet_change_mtu(struct net_device *dev, int mtu)
1377{
1378	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1379
1380	if (mtu > max)
1381		return -EINVAL;
1382	WRITE_ONCE(dev->mtu, mtu);
1383	return 0;
1384}
1385
1386static void xennet_get_stats64(struct net_device *dev,
1387			       struct rtnl_link_stats64 *tot)
1388{
1389	struct netfront_info *np = netdev_priv(dev);
1390	int cpu;
1391
1392	for_each_possible_cpu(cpu) {
1393		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1394		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1395		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1396		unsigned int start;
1397
1398		do {
1399			start = u64_stats_fetch_begin(&tx_stats->syncp);
1400			tx_packets = tx_stats->packets;
1401			tx_bytes = tx_stats->bytes;
1402		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1403
1404		do {
1405			start = u64_stats_fetch_begin(&rx_stats->syncp);
1406			rx_packets = rx_stats->packets;
1407			rx_bytes = rx_stats->bytes;
1408		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1409
1410		tot->rx_packets += rx_packets;
1411		tot->tx_packets += tx_packets;
1412		tot->rx_bytes   += rx_bytes;
1413		tot->tx_bytes   += tx_bytes;
1414	}
1415
1416	tot->rx_errors  = dev->stats.rx_errors;
1417	tot->tx_dropped = dev->stats.tx_dropped;
 
 
1418}
1419
1420static void xennet_release_tx_bufs(struct netfront_queue *queue)
1421{
1422	struct sk_buff *skb;
1423	int i;
1424
1425	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1426		/* Skip over entries which are actually freelist references */
1427		if (!queue->tx_skbs[i])
1428			continue;
1429
1430		skb = queue->tx_skbs[i];
1431		queue->tx_skbs[i] = NULL;
1432		get_page(queue->grant_tx_page[i]);
1433		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1434					  queue->grant_tx_page[i]);
 
1435		queue->grant_tx_page[i] = NULL;
1436		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1437		add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1438		dev_kfree_skb_irq(skb);
1439	}
1440}
1441
1442static void xennet_release_rx_bufs(struct netfront_queue *queue)
1443{
1444	int id, ref;
1445
1446	spin_lock_bh(&queue->rx_lock);
1447
1448	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1449		struct sk_buff *skb;
1450		struct page *page;
1451
1452		skb = queue->rx_skbs[id];
1453		if (!skb)
1454			continue;
1455
1456		ref = queue->grant_rx_ref[id];
1457		if (ref == INVALID_GRANT_REF)
1458			continue;
1459
1460		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1461
1462		/* gnttab_end_foreign_access() needs a page ref until
1463		 * foreign access is ended (which may be deferred).
1464		 */
1465		get_page(page);
1466		gnttab_end_foreign_access(ref, page);
1467		queue->grant_rx_ref[id] = INVALID_GRANT_REF;
 
1468
1469		kfree_skb(skb);
1470	}
1471
1472	spin_unlock_bh(&queue->rx_lock);
1473}
1474
1475static netdev_features_t xennet_fix_features(struct net_device *dev,
1476	netdev_features_t features)
1477{
1478	struct netfront_info *np = netdev_priv(dev);
1479
1480	if (features & NETIF_F_SG &&
1481	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1482		features &= ~NETIF_F_SG;
1483
1484	if (features & NETIF_F_IPV6_CSUM &&
1485	    !xenbus_read_unsigned(np->xbdev->otherend,
1486				  "feature-ipv6-csum-offload", 0))
1487		features &= ~NETIF_F_IPV6_CSUM;
1488
1489	if (features & NETIF_F_TSO &&
1490	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1491		features &= ~NETIF_F_TSO;
1492
1493	if (features & NETIF_F_TSO6 &&
1494	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1495		features &= ~NETIF_F_TSO6;
1496
1497	return features;
1498}
1499
1500static int xennet_set_features(struct net_device *dev,
1501	netdev_features_t features)
1502{
1503	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1504		netdev_info(dev, "Reducing MTU because no SG offload");
1505		dev->mtu = ETH_DATA_LEN;
1506	}
1507
1508	return 0;
1509}
1510
1511static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1512{
 
1513	unsigned long flags;
1514
1515	if (unlikely(queue->info->broken))
1516		return false;
1517
1518	spin_lock_irqsave(&queue->tx_lock, flags);
1519	if (xennet_tx_buf_gc(queue))
1520		*eoi = 0;
1521	spin_unlock_irqrestore(&queue->tx_lock, flags);
1522
1523	return true;
1524}
1525
1526static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1527{
1528	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1529
1530	if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1531		xen_irq_lateeoi(irq, eoiflag);
1532
1533	return IRQ_HANDLED;
1534}
1535
1536static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1537{
1538	unsigned int work_queued;
1539	unsigned long flags;
1540
1541	if (unlikely(queue->info->broken))
1542		return false;
1543
1544	spin_lock_irqsave(&queue->rx_cons_lock, flags);
1545	work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1546	if (work_queued > queue->rx_rsp_unconsumed) {
1547		queue->rx_rsp_unconsumed = work_queued;
1548		*eoi = 0;
1549	} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1550		const struct device *dev = &queue->info->netdev->dev;
1551
1552		spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1553		dev_alert(dev, "RX producer index going backwards\n");
1554		dev_alert(dev, "Disabled for further use\n");
1555		queue->info->broken = true;
1556		return false;
1557	}
1558	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1559
1560	if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1561		napi_schedule(&queue->napi);
1562
1563	return true;
1564}
1565
1566static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1567{
1568	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
 
1569
1570	if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1571		xen_irq_lateeoi(irq, eoiflag);
 
1572
1573	return IRQ_HANDLED;
1574}
1575
1576static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1577{
1578	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1579
1580	if (xennet_handle_tx(dev_id, &eoiflag) &&
1581	    xennet_handle_rx(dev_id, &eoiflag))
1582		xen_irq_lateeoi(irq, eoiflag);
1583
1584	return IRQ_HANDLED;
1585}
1586
1587#ifdef CONFIG_NET_POLL_CONTROLLER
1588static void xennet_poll_controller(struct net_device *dev)
1589{
1590	/* Poll each queue */
1591	struct netfront_info *info = netdev_priv(dev);
1592	unsigned int num_queues = dev->real_num_tx_queues;
1593	unsigned int i;
1594
1595	if (info->broken)
1596		return;
1597
1598	for (i = 0; i < num_queues; ++i)
1599		xennet_interrupt(0, &info->queues[i]);
1600}
1601#endif
1602
1603#define NETBACK_XDP_HEADROOM_DISABLE	0
1604#define NETBACK_XDP_HEADROOM_ENABLE	1
1605
1606static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1607{
1608	int err;
1609	unsigned short headroom;
1610
1611	headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1612	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1613			    "xdp-headroom", "%hu",
1614			    headroom);
1615	if (err)
1616		pr_warn("Error writing xdp-headroom\n");
1617
1618	return err;
1619}
1620
1621static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1622			  struct netlink_ext_ack *extack)
1623{
1624	unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1625	struct netfront_info *np = netdev_priv(dev);
1626	struct bpf_prog *old_prog;
1627	unsigned int i, err;
1628
1629	if (dev->mtu > max_mtu) {
1630		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1631		return -EINVAL;
1632	}
1633
1634	if (!np->netback_has_xdp_headroom)
1635		return 0;
1636
1637	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1638
1639	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1640				  NETBACK_XDP_HEADROOM_DISABLE);
1641	if (err)
1642		return err;
1643
1644	/* avoid the race with XDP headroom adjustment */
1645	wait_event(module_wq,
1646		   xenbus_read_driver_state(np->xbdev->otherend) ==
1647		   XenbusStateReconfigured);
1648	np->netfront_xdp_enabled = true;
1649
1650	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1651
1652	if (prog)
1653		bpf_prog_add(prog, dev->real_num_tx_queues);
1654
1655	for (i = 0; i < dev->real_num_tx_queues; ++i)
1656		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1657
1658	if (old_prog)
1659		for (i = 0; i < dev->real_num_tx_queues; ++i)
1660			bpf_prog_put(old_prog);
1661
1662	xenbus_switch_state(np->xbdev, XenbusStateConnected);
1663
1664	return 0;
1665}
1666
1667static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1668{
1669	struct netfront_info *np = netdev_priv(dev);
1670
1671	if (np->broken)
1672		return -ENODEV;
1673
1674	switch (xdp->command) {
1675	case XDP_SETUP_PROG:
1676		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1677	default:
1678		return -EINVAL;
1679	}
1680}
1681
1682static const struct net_device_ops xennet_netdev_ops = {
1683	.ndo_uninit          = xennet_uninit,
1684	.ndo_open            = xennet_open,
1685	.ndo_stop            = xennet_close,
1686	.ndo_start_xmit      = xennet_start_xmit,
1687	.ndo_change_mtu	     = xennet_change_mtu,
1688	.ndo_get_stats64     = xennet_get_stats64,
1689	.ndo_set_mac_address = eth_mac_addr,
1690	.ndo_validate_addr   = eth_validate_addr,
1691	.ndo_fix_features    = xennet_fix_features,
1692	.ndo_set_features    = xennet_set_features,
1693	.ndo_select_queue    = xennet_select_queue,
1694	.ndo_bpf            = xennet_xdp,
1695	.ndo_xdp_xmit	    = xennet_xdp_xmit,
1696#ifdef CONFIG_NET_POLL_CONTROLLER
1697	.ndo_poll_controller = xennet_poll_controller,
1698#endif
1699};
1700
1701static void xennet_free_netdev(struct net_device *netdev)
1702{
1703	struct netfront_info *np = netdev_priv(netdev);
1704
1705	free_percpu(np->rx_stats);
1706	free_percpu(np->tx_stats);
1707	free_netdev(netdev);
1708}
1709
1710static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1711{
1712	int err;
1713	struct net_device *netdev;
1714	struct netfront_info *np;
1715
1716	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1717	if (!netdev)
1718		return ERR_PTR(-ENOMEM);
1719
1720	np                   = netdev_priv(netdev);
1721	np->xbdev            = dev;
1722
1723	np->queues = NULL;
1724
1725	err = -ENOMEM;
1726	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1727	if (np->rx_stats == NULL)
1728		goto exit;
1729	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1730	if (np->tx_stats == NULL)
1731		goto exit;
1732
1733	netdev->netdev_ops	= &xennet_netdev_ops;
1734
1735	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1736				  NETIF_F_GSO_ROBUST;
1737	netdev->hw_features	= NETIF_F_SG |
1738				  NETIF_F_IPV6_CSUM |
1739				  NETIF_F_TSO | NETIF_F_TSO6;
1740
1741	/*
1742         * Assume that all hw features are available for now. This set
1743         * will be adjusted by the call to netdev_update_features() in
1744         * xennet_connect() which is the earliest point where we can
1745         * negotiate with the backend regarding supported features.
1746         */
1747	netdev->features |= netdev->hw_features;
1748	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
1749			       NETDEV_XDP_ACT_NDO_XMIT;
1750
1751	netdev->ethtool_ops = &xennet_ethtool_ops;
1752	netdev->min_mtu = ETH_MIN_MTU;
1753	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1754	SET_NETDEV_DEV(netdev, &dev->dev);
1755
1756	np->netdev = netdev;
1757	np->netfront_xdp_enabled = false;
1758
1759	netif_carrier_off(netdev);
1760
1761	do {
1762		xenbus_switch_state(dev, XenbusStateInitialising);
1763		err = wait_event_timeout(module_wq,
1764				 xenbus_read_driver_state(dev->otherend) !=
1765				 XenbusStateClosed &&
1766				 xenbus_read_driver_state(dev->otherend) !=
1767				 XenbusStateUnknown, XENNET_TIMEOUT);
1768	} while (!err);
1769
1770	return netdev;
1771
1772 exit:
1773	xennet_free_netdev(netdev);
1774	return ERR_PTR(err);
1775}
1776
1777/*
1778 * Entry point to this code when a new device is created.  Allocate the basic
1779 * structures and the ring buffers for communication with the backend, and
1780 * inform the backend of the appropriate details for those.
1781 */
1782static int netfront_probe(struct xenbus_device *dev,
1783			  const struct xenbus_device_id *id)
1784{
1785	int err;
1786	struct net_device *netdev;
1787	struct netfront_info *info;
1788
1789	netdev = xennet_create_dev(dev);
1790	if (IS_ERR(netdev)) {
1791		err = PTR_ERR(netdev);
1792		xenbus_dev_fatal(dev, err, "creating netdev");
1793		return err;
1794	}
1795
1796	info = netdev_priv(netdev);
1797	dev_set_drvdata(&dev->dev, info);
1798#ifdef CONFIG_SYSFS
1799	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1800#endif
 
 
 
 
 
1801
1802	return 0;
 
 
 
 
 
1803}
1804
1805static void xennet_end_access(int ref, void *page)
1806{
1807	/* This frees the page as a side-effect */
1808	if (ref != INVALID_GRANT_REF)
1809		gnttab_end_foreign_access(ref, virt_to_page(page));
1810}
1811
1812static void xennet_disconnect_backend(struct netfront_info *info)
1813{
1814	unsigned int i = 0;
1815	unsigned int num_queues = info->netdev->real_num_tx_queues;
1816
1817	netif_carrier_off(info->netdev);
1818
1819	for (i = 0; i < num_queues && info->queues; ++i) {
1820		struct netfront_queue *queue = &info->queues[i];
1821
1822		del_timer_sync(&queue->rx_refill_timer);
1823
1824		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1825			unbind_from_irqhandler(queue->tx_irq, queue);
1826		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1827			unbind_from_irqhandler(queue->tx_irq, queue);
1828			unbind_from_irqhandler(queue->rx_irq, queue);
1829		}
1830		queue->tx_evtchn = queue->rx_evtchn = 0;
1831		queue->tx_irq = queue->rx_irq = 0;
1832
1833		if (netif_running(info->netdev))
1834			napi_synchronize(&queue->napi);
1835
1836		xennet_release_tx_bufs(queue);
1837		xennet_release_rx_bufs(queue);
1838		gnttab_free_grant_references(queue->gref_tx_head);
1839		gnttab_free_grant_references(queue->gref_rx_head);
1840
1841		/* End access and free the pages */
1842		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1843		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1844
1845		queue->tx_ring_ref = INVALID_GRANT_REF;
1846		queue->rx_ring_ref = INVALID_GRANT_REF;
1847		queue->tx.sring = NULL;
1848		queue->rx.sring = NULL;
1849
1850		page_pool_destroy(queue->page_pool);
1851	}
1852}
1853
1854/*
1855 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1856 * driver restart.  We tear down our netif structure and recreate it, but
1857 * leave the device-layer structures intact so that this is transparent to the
1858 * rest of the kernel.
1859 */
1860static int netfront_resume(struct xenbus_device *dev)
1861{
1862	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1863
1864	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1865
1866	netif_tx_lock_bh(info->netdev);
1867	netif_device_detach(info->netdev);
1868	netif_tx_unlock_bh(info->netdev);
1869
1870	xennet_disconnect_backend(info);
1871
1872	rtnl_lock();
1873	if (info->queues)
1874		xennet_destroy_queues(info);
1875	rtnl_unlock();
1876
1877	return 0;
1878}
1879
1880static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1881{
1882	char *s, *e, *macstr;
1883	int i;
1884
1885	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1886	if (IS_ERR(macstr))
1887		return PTR_ERR(macstr);
1888
1889	for (i = 0; i < ETH_ALEN; i++) {
1890		mac[i] = simple_strtoul(s, &e, 16);
1891		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1892			kfree(macstr);
1893			return -ENOENT;
1894		}
1895		s = e+1;
1896	}
1897
1898	kfree(macstr);
1899	return 0;
1900}
1901
1902static int setup_netfront_single(struct netfront_queue *queue)
1903{
1904	int err;
1905
1906	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1907	if (err < 0)
1908		goto fail;
1909
1910	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1911						xennet_interrupt, 0,
1912						queue->info->netdev->name,
1913						queue);
1914	if (err < 0)
1915		goto bind_fail;
1916	queue->rx_evtchn = queue->tx_evtchn;
1917	queue->rx_irq = queue->tx_irq = err;
1918
1919	return 0;
1920
1921bind_fail:
1922	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1923	queue->tx_evtchn = 0;
1924fail:
1925	return err;
1926}
1927
1928static int setup_netfront_split(struct netfront_queue *queue)
1929{
1930	int err;
1931
1932	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1933	if (err < 0)
1934		goto fail;
1935	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1936	if (err < 0)
1937		goto alloc_rx_evtchn_fail;
1938
1939	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1940		 "%s-tx", queue->name);
1941	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1942						xennet_tx_interrupt, 0,
1943						queue->tx_irq_name, queue);
1944	if (err < 0)
1945		goto bind_tx_fail;
1946	queue->tx_irq = err;
1947
1948	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1949		 "%s-rx", queue->name);
1950	err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1951						xennet_rx_interrupt, 0,
1952						queue->rx_irq_name, queue);
1953	if (err < 0)
1954		goto bind_rx_fail;
1955	queue->rx_irq = err;
1956
1957	return 0;
1958
1959bind_rx_fail:
1960	unbind_from_irqhandler(queue->tx_irq, queue);
1961	queue->tx_irq = 0;
1962bind_tx_fail:
1963	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1964	queue->rx_evtchn = 0;
1965alloc_rx_evtchn_fail:
1966	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1967	queue->tx_evtchn = 0;
1968fail:
1969	return err;
1970}
1971
1972static int setup_netfront(struct xenbus_device *dev,
1973			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1974{
1975	struct xen_netif_tx_sring *txs;
1976	struct xen_netif_rx_sring *rxs;
 
1977	int err;
1978
1979	queue->tx_ring_ref = INVALID_GRANT_REF;
1980	queue->rx_ring_ref = INVALID_GRANT_REF;
1981	queue->rx.sring = NULL;
1982	queue->tx.sring = NULL;
1983
1984	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1985				1, &queue->tx_ring_ref);
1986	if (err)
 
1987		goto fail;
 
 
 
1988
1989	XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 
 
1990
1991	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1992				1, &queue->rx_ring_ref);
1993	if (err)
1994		goto fail;
 
 
 
 
1995
1996	XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 
 
1997
1998	if (feature_split_evtchn)
1999		err = setup_netfront_split(queue);
2000	/* setup single event channel if
2001	 *  a) feature-split-event-channels == 0
2002	 *  b) feature-split-event-channels == 1 but failed to setup
2003	 */
2004	if (!feature_split_evtchn || err)
2005		err = setup_netfront_single(queue);
2006
2007	if (err)
2008		goto fail;
2009
2010	return 0;
2011
2012 fail:
2013	xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2014	xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2015
 
 
 
 
 
 
 
 
2016	return err;
2017}
2018
2019/* Queue-specific initialisation
2020 * This used to be done in xennet_create_dev() but must now
2021 * be run per-queue.
2022 */
2023static int xennet_init_queue(struct netfront_queue *queue)
2024{
2025	unsigned short i;
2026	int err = 0;
2027	char *devid;
2028
2029	spin_lock_init(&queue->tx_lock);
2030	spin_lock_init(&queue->rx_lock);
2031	spin_lock_init(&queue->rx_cons_lock);
2032
2033	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
 
2034
2035	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2036	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2037		 devid, queue->id);
2038
2039	/* Initialise tx_skb_freelist as a free chain containing every entry. */
2040	queue->tx_skb_freelist = 0;
2041	queue->tx_pend_queue = TX_LINK_NONE;
2042	for (i = 0; i < NET_TX_RING_SIZE; i++) {
2043		queue->tx_link[i] = i + 1;
2044		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2045		queue->grant_tx_page[i] = NULL;
2046	}
2047	queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2048
2049	/* Clear out rx_skbs */
2050	for (i = 0; i < NET_RX_RING_SIZE; i++) {
2051		queue->rx_skbs[i] = NULL;
2052		queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2053	}
2054
2055	/* A grant for every tx ring slot */
2056	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2057					  &queue->gref_tx_head) < 0) {
2058		pr_alert("can't alloc tx grant refs\n");
2059		err = -ENOMEM;
2060		goto exit;
2061	}
2062
2063	/* A grant for every rx ring slot */
2064	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2065					  &queue->gref_rx_head) < 0) {
2066		pr_alert("can't alloc rx grant refs\n");
2067		err = -ENOMEM;
2068		goto exit_free_tx;
2069	}
2070
2071	return 0;
2072
2073 exit_free_tx:
2074	gnttab_free_grant_references(queue->gref_tx_head);
2075 exit:
2076	return err;
2077}
2078
2079static int write_queue_xenstore_keys(struct netfront_queue *queue,
2080			   struct xenbus_transaction *xbt, int write_hierarchical)
2081{
2082	/* Write the queue-specific keys into XenStore in the traditional
2083	 * way for a single queue, or in a queue subkeys for multiple
2084	 * queues.
2085	 */
2086	struct xenbus_device *dev = queue->info->xbdev;
2087	int err;
2088	const char *message;
2089	char *path;
2090	size_t pathsize;
2091
2092	/* Choose the correct place to write the keys */
2093	if (write_hierarchical) {
2094		pathsize = strlen(dev->nodename) + 10;
2095		path = kzalloc(pathsize, GFP_KERNEL);
2096		if (!path) {
2097			err = -ENOMEM;
2098			message = "out of memory while writing ring references";
2099			goto error;
2100		}
2101		snprintf(path, pathsize, "%s/queue-%u",
2102				dev->nodename, queue->id);
2103	} else {
2104		path = (char *)dev->nodename;
2105	}
2106
2107	/* Write ring references */
2108	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2109			queue->tx_ring_ref);
2110	if (err) {
2111		message = "writing tx-ring-ref";
2112		goto error;
2113	}
2114
2115	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2116			queue->rx_ring_ref);
2117	if (err) {
2118		message = "writing rx-ring-ref";
2119		goto error;
2120	}
2121
2122	/* Write event channels; taking into account both shared
2123	 * and split event channel scenarios.
2124	 */
2125	if (queue->tx_evtchn == queue->rx_evtchn) {
2126		/* Shared event channel */
2127		err = xenbus_printf(*xbt, path,
2128				"event-channel", "%u", queue->tx_evtchn);
2129		if (err) {
2130			message = "writing event-channel";
2131			goto error;
2132		}
2133	} else {
2134		/* Split event channels */
2135		err = xenbus_printf(*xbt, path,
2136				"event-channel-tx", "%u", queue->tx_evtchn);
2137		if (err) {
2138			message = "writing event-channel-tx";
2139			goto error;
2140		}
2141
2142		err = xenbus_printf(*xbt, path,
2143				"event-channel-rx", "%u", queue->rx_evtchn);
2144		if (err) {
2145			message = "writing event-channel-rx";
2146			goto error;
2147		}
2148	}
2149
2150	if (write_hierarchical)
2151		kfree(path);
2152	return 0;
2153
2154error:
2155	if (write_hierarchical)
2156		kfree(path);
2157	xenbus_dev_fatal(dev, err, "%s", message);
2158	return err;
2159}
2160
2161
2162
2163static int xennet_create_page_pool(struct netfront_queue *queue)
2164{
2165	int err;
2166	struct page_pool_params pp_params = {
2167		.order = 0,
2168		.flags = 0,
2169		.pool_size = NET_RX_RING_SIZE,
2170		.nid = NUMA_NO_NODE,
2171		.dev = &queue->info->netdev->dev,
2172		.offset = XDP_PACKET_HEADROOM,
2173		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2174	};
2175
2176	queue->page_pool = page_pool_create(&pp_params);
2177	if (IS_ERR(queue->page_pool)) {
2178		err = PTR_ERR(queue->page_pool);
2179		queue->page_pool = NULL;
2180		return err;
2181	}
2182
2183	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2184			       queue->id, 0);
2185	if (err) {
2186		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2187		goto err_free_pp;
2188	}
2189
2190	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2191					 MEM_TYPE_PAGE_POOL, queue->page_pool);
2192	if (err) {
2193		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2194		goto err_unregister_rxq;
2195	}
2196	return 0;
2197
2198err_unregister_rxq:
2199	xdp_rxq_info_unreg(&queue->xdp_rxq);
2200err_free_pp:
2201	page_pool_destroy(queue->page_pool);
2202	queue->page_pool = NULL;
2203	return err;
2204}
2205
2206static int xennet_create_queues(struct netfront_info *info,
2207				unsigned int *num_queues)
2208{
2209	unsigned int i;
2210	int ret;
2211
2212	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2213			       GFP_KERNEL);
2214	if (!info->queues)
2215		return -ENOMEM;
2216
 
 
2217	for (i = 0; i < *num_queues; i++) {
2218		struct netfront_queue *queue = &info->queues[i];
2219
2220		queue->id = i;
2221		queue->info = info;
2222
2223		ret = xennet_init_queue(queue);
2224		if (ret < 0) {
2225			dev_warn(&info->xbdev->dev,
2226				 "only created %d queues\n", i);
2227			*num_queues = i;
2228			break;
2229		}
2230
2231		/* use page pool recycling instead of buddy allocator */
2232		ret = xennet_create_page_pool(queue);
2233		if (ret < 0) {
2234			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2235			*num_queues = i;
2236			return ret;
2237		}
2238
2239		netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2240		if (netif_running(info->netdev))
2241			napi_enable(&queue->napi);
2242	}
2243
2244	netif_set_real_num_tx_queues(info->netdev, *num_queues);
2245
 
 
2246	if (*num_queues == 0) {
2247		dev_err(&info->xbdev->dev, "no queues\n");
2248		return -EINVAL;
2249	}
2250	return 0;
2251}
2252
2253/* Common code used when first setting up, and when resuming. */
2254static int talk_to_netback(struct xenbus_device *dev,
2255			   struct netfront_info *info)
2256{
2257	const char *message;
2258	struct xenbus_transaction xbt;
2259	int err;
2260	unsigned int feature_split_evtchn;
2261	unsigned int i = 0;
2262	unsigned int max_queues = 0;
2263	struct netfront_queue *queue = NULL;
2264	unsigned int num_queues = 1;
2265	u8 addr[ETH_ALEN];
2266
2267	info->netdev->irq = 0;
2268
2269	/* Check if backend is trusted. */
2270	info->bounce = !xennet_trusted ||
2271		       !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2272
2273	/* Check if backend supports multiple queues */
2274	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2275					  "multi-queue-max-queues", 1);
2276	num_queues = min(max_queues, xennet_max_queues);
2277
2278	/* Check feature-split-event-channels */
2279	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2280					"feature-split-event-channels", 0);
2281
2282	/* Read mac addr. */
2283	err = xen_net_read_mac(dev, addr);
2284	if (err) {
2285		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2286		goto out_unlocked;
2287	}
2288	eth_hw_addr_set(info->netdev, addr);
2289
2290	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2291							      "feature-xdp-headroom", 0);
2292	if (info->netback_has_xdp_headroom) {
2293		/* set the current xen-netfront xdp state */
2294		err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2295					  NETBACK_XDP_HEADROOM_ENABLE :
2296					  NETBACK_XDP_HEADROOM_DISABLE);
2297		if (err)
2298			goto out_unlocked;
2299	}
2300
2301	rtnl_lock();
2302	if (info->queues)
2303		xennet_destroy_queues(info);
2304
2305	/* For the case of a reconnect reset the "broken" indicator. */
2306	info->broken = false;
2307
2308	err = xennet_create_queues(info, &num_queues);
2309	if (err < 0) {
2310		xenbus_dev_fatal(dev, err, "creating queues");
2311		kfree(info->queues);
2312		info->queues = NULL;
2313		goto out;
2314	}
2315	rtnl_unlock();
2316
2317	/* Create shared ring, alloc event channel -- for each queue */
2318	for (i = 0; i < num_queues; ++i) {
2319		queue = &info->queues[i];
2320		err = setup_netfront(dev, queue, feature_split_evtchn);
2321		if (err)
2322			goto destroy_ring;
2323	}
2324
2325again:
2326	err = xenbus_transaction_start(&xbt);
2327	if (err) {
2328		xenbus_dev_fatal(dev, err, "starting transaction");
2329		goto destroy_ring;
2330	}
2331
2332	if (xenbus_exists(XBT_NIL,
2333			  info->xbdev->otherend, "multi-queue-max-queues")) {
2334		/* Write the number of queues */
2335		err = xenbus_printf(xbt, dev->nodename,
2336				    "multi-queue-num-queues", "%u", num_queues);
2337		if (err) {
2338			message = "writing multi-queue-num-queues";
2339			goto abort_transaction_no_dev_fatal;
2340		}
2341	}
2342
2343	if (num_queues == 1) {
2344		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2345		if (err)
2346			goto abort_transaction_no_dev_fatal;
2347	} else {
2348		/* Write the keys for each queue */
2349		for (i = 0; i < num_queues; ++i) {
2350			queue = &info->queues[i];
2351			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2352			if (err)
2353				goto abort_transaction_no_dev_fatal;
2354		}
2355	}
2356
2357	/* The remaining keys are not queue-specific */
2358	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2359			    1);
2360	if (err) {
2361		message = "writing request-rx-copy";
2362		goto abort_transaction;
2363	}
2364
2365	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2366	if (err) {
2367		message = "writing feature-rx-notify";
2368		goto abort_transaction;
2369	}
2370
2371	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2372	if (err) {
2373		message = "writing feature-sg";
2374		goto abort_transaction;
2375	}
2376
2377	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2378	if (err) {
2379		message = "writing feature-gso-tcpv4";
2380		goto abort_transaction;
2381	}
2382
2383	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2384	if (err) {
2385		message = "writing feature-gso-tcpv6";
2386		goto abort_transaction;
2387	}
2388
2389	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2390			   "1");
2391	if (err) {
2392		message = "writing feature-ipv6-csum-offload";
2393		goto abort_transaction;
2394	}
2395
2396	err = xenbus_transaction_end(xbt, 0);
2397	if (err) {
2398		if (err == -EAGAIN)
2399			goto again;
2400		xenbus_dev_fatal(dev, err, "completing transaction");
2401		goto destroy_ring;
2402	}
2403
2404	return 0;
2405
2406 abort_transaction:
2407	xenbus_dev_fatal(dev, err, "%s", message);
2408abort_transaction_no_dev_fatal:
2409	xenbus_transaction_end(xbt, 1);
2410 destroy_ring:
2411	xennet_disconnect_backend(info);
2412	rtnl_lock();
2413	xennet_destroy_queues(info);
2414 out:
2415	rtnl_unlock();
2416out_unlocked:
2417	device_unregister(&dev->dev);
2418	return err;
2419}
2420
2421static int xennet_connect(struct net_device *dev)
2422{
2423	struct netfront_info *np = netdev_priv(dev);
2424	unsigned int num_queues = 0;
2425	int err;
2426	unsigned int j = 0;
2427	struct netfront_queue *queue = NULL;
2428
2429	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2430		dev_info(&dev->dev,
2431			 "backend does not support copying receive path\n");
2432		return -ENODEV;
2433	}
2434
2435	err = talk_to_netback(np->xbdev, np);
2436	if (err)
2437		return err;
2438	if (np->netback_has_xdp_headroom)
2439		pr_info("backend supports XDP headroom\n");
2440	if (np->bounce)
2441		dev_info(&np->xbdev->dev,
2442			 "bouncing transmitted data to zeroed pages\n");
2443
2444	/* talk_to_netback() sets the correct number of queues */
2445	num_queues = dev->real_num_tx_queues;
2446
2447	if (dev->reg_state == NETREG_UNINITIALIZED) {
2448		err = register_netdev(dev);
2449		if (err) {
2450			pr_warn("%s: register_netdev err=%d\n", __func__, err);
2451			device_unregister(&np->xbdev->dev);
2452			return err;
2453		}
2454	}
2455
2456	rtnl_lock();
2457	netdev_update_features(dev);
2458	rtnl_unlock();
2459
2460	/*
2461	 * All public and private state should now be sane.  Get
2462	 * ready to start sending and receiving packets and give the driver
2463	 * domain a kick because we've probably just requeued some
2464	 * packets.
2465	 */
2466	netif_tx_lock_bh(np->netdev);
2467	netif_device_attach(np->netdev);
2468	netif_tx_unlock_bh(np->netdev);
2469
2470	netif_carrier_on(np->netdev);
2471	for (j = 0; j < num_queues; ++j) {
2472		queue = &np->queues[j];
2473
2474		notify_remote_via_irq(queue->tx_irq);
2475		if (queue->tx_irq != queue->rx_irq)
2476			notify_remote_via_irq(queue->rx_irq);
2477
 
 
 
 
2478		spin_lock_bh(&queue->rx_lock);
2479		xennet_alloc_rx_buffers(queue);
2480		spin_unlock_bh(&queue->rx_lock);
2481	}
2482
2483	return 0;
2484}
2485
2486/*
2487 * Callback received when the backend's state changes.
2488 */
2489static void netback_changed(struct xenbus_device *dev,
2490			    enum xenbus_state backend_state)
2491{
2492	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2493	struct net_device *netdev = np->netdev;
2494
2495	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2496
2497	wake_up_all(&module_wq);
2498
2499	switch (backend_state) {
2500	case XenbusStateInitialising:
2501	case XenbusStateInitialised:
2502	case XenbusStateReconfiguring:
2503	case XenbusStateReconfigured:
2504	case XenbusStateUnknown:
2505		break;
2506
2507	case XenbusStateInitWait:
2508		if (dev->state != XenbusStateInitialising)
2509			break;
2510		if (xennet_connect(netdev) != 0)
2511			break;
2512		xenbus_switch_state(dev, XenbusStateConnected);
2513		break;
2514
2515	case XenbusStateConnected:
2516		netdev_notify_peers(netdev);
2517		break;
2518
2519	case XenbusStateClosed:
2520		if (dev->state == XenbusStateClosed)
2521			break;
2522		fallthrough;	/* Missed the backend's CLOSING state */
2523	case XenbusStateClosing:
2524		xenbus_frontend_closed(dev);
2525		break;
2526	}
2527}
2528
2529static const struct xennet_stat {
2530	char name[ETH_GSTRING_LEN];
2531	u16 offset;
2532} xennet_stats[] = {
2533	{
2534		"rx_gso_checksum_fixup",
2535		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2536	},
2537};
2538
2539static int xennet_get_sset_count(struct net_device *dev, int string_set)
2540{
2541	switch (string_set) {
2542	case ETH_SS_STATS:
2543		return ARRAY_SIZE(xennet_stats);
2544	default:
2545		return -EINVAL;
2546	}
2547}
2548
2549static void xennet_get_ethtool_stats(struct net_device *dev,
2550				     struct ethtool_stats *stats, u64 * data)
2551{
2552	void *np = netdev_priv(dev);
2553	int i;
2554
2555	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2556		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2557}
2558
2559static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2560{
2561	int i;
2562
2563	switch (stringset) {
2564	case ETH_SS_STATS:
2565		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2566			memcpy(data + i * ETH_GSTRING_LEN,
2567			       xennet_stats[i].name, ETH_GSTRING_LEN);
2568		break;
2569	}
2570}
2571
2572static const struct ethtool_ops xennet_ethtool_ops =
2573{
2574	.get_link = ethtool_op_get_link,
2575
2576	.get_sset_count = xennet_get_sset_count,
2577	.get_ethtool_stats = xennet_get_ethtool_stats,
2578	.get_strings = xennet_get_strings,
2579	.get_ts_info = ethtool_op_get_ts_info,
2580};
2581
2582#ifdef CONFIG_SYSFS
2583static ssize_t show_rxbuf(struct device *dev,
2584			  struct device_attribute *attr, char *buf)
2585{
2586	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2587}
2588
2589static ssize_t store_rxbuf(struct device *dev,
2590			   struct device_attribute *attr,
2591			   const char *buf, size_t len)
2592{
2593	char *endp;
 
2594
2595	if (!capable(CAP_NET_ADMIN))
2596		return -EPERM;
2597
2598	simple_strtoul(buf, &endp, 0);
2599	if (endp == buf)
2600		return -EBADMSG;
2601
2602	/* rxbuf_min and rxbuf_max are no longer configurable. */
2603
2604	return len;
2605}
2606
2607static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2608static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2609static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2610
2611static struct attribute *xennet_dev_attrs[] = {
2612	&dev_attr_rxbuf_min.attr,
2613	&dev_attr_rxbuf_max.attr,
2614	&dev_attr_rxbuf_cur.attr,
2615	NULL
2616};
2617
2618static const struct attribute_group xennet_dev_group = {
2619	.attrs = xennet_dev_attrs
2620};
2621#endif /* CONFIG_SYSFS */
2622
2623static void xennet_bus_close(struct xenbus_device *dev)
2624{
2625	int ret;
2626
2627	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2628		return;
2629	do {
2630		xenbus_switch_state(dev, XenbusStateClosing);
2631		ret = wait_event_timeout(module_wq,
2632				   xenbus_read_driver_state(dev->otherend) ==
2633				   XenbusStateClosing ||
2634				   xenbus_read_driver_state(dev->otherend) ==
2635				   XenbusStateClosed ||
2636				   xenbus_read_driver_state(dev->otherend) ==
2637				   XenbusStateUnknown,
2638				   XENNET_TIMEOUT);
2639	} while (!ret);
2640
2641	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2642		return;
2643
2644	do {
2645		xenbus_switch_state(dev, XenbusStateClosed);
2646		ret = wait_event_timeout(module_wq,
2647				   xenbus_read_driver_state(dev->otherend) ==
2648				   XenbusStateClosed ||
2649				   xenbus_read_driver_state(dev->otherend) ==
2650				   XenbusStateUnknown,
2651				   XENNET_TIMEOUT);
2652	} while (!ret);
2653}
2654
2655static void xennet_remove(struct xenbus_device *dev)
2656{
2657	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2658
2659	xennet_bus_close(dev);
 
2660	xennet_disconnect_backend(info);
2661
2662	if (info->netdev->reg_state == NETREG_REGISTERED)
2663		unregister_netdev(info->netdev);
2664
2665	if (info->queues) {
2666		rtnl_lock();
2667		xennet_destroy_queues(info);
2668		rtnl_unlock();
2669	}
2670	xennet_free_netdev(info->netdev);
 
 
2671}
2672
2673static const struct xenbus_device_id netfront_ids[] = {
2674	{ "vif" },
2675	{ "" }
2676};
2677
2678static struct xenbus_driver netfront_driver = {
2679	.ids = netfront_ids,
2680	.probe = netfront_probe,
2681	.remove = xennet_remove,
2682	.resume = netfront_resume,
2683	.otherend_changed = netback_changed,
2684};
2685
2686static int __init netif_init(void)
2687{
2688	if (!xen_domain())
2689		return -ENODEV;
2690
2691	if (!xen_has_pv_nic_devices())
2692		return -ENODEV;
2693
2694	pr_info("Initialising Xen virtual ethernet driver\n");
2695
2696	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2697	 * specified a value.
2698	 */
2699	if (xennet_max_queues == 0)
2700		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2701					  num_online_cpus());
2702
2703	return xenbus_register_frontend(&netfront_driver);
2704}
2705module_init(netif_init);
2706
2707
2708static void __exit netif_exit(void)
2709{
2710	xenbus_unregister_driver(&netfront_driver);
2711}
2712module_exit(netif_exit);
2713
2714MODULE_DESCRIPTION("Xen virtual network device frontend");
2715MODULE_LICENSE("GPL");
2716MODULE_ALIAS("xen:vif");
2717MODULE_ALIAS("xennet");
v4.10.11
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
 
 
 
  47
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
 
  60static unsigned int xennet_max_queues;
  61module_param_named(max_queues, xennet_max_queues, uint, 0644);
  62MODULE_PARM_DESC(max_queues,
  63		 "Maximum number of queues per virtual interface");
  64
 
 
 
 
 
 
  65static const struct ethtool_ops xennet_ethtool_ops;
  66
  67struct netfront_cb {
  68	int pull_to;
  69};
  70
  71#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  72
  73#define RX_COPY_THRESHOLD 256
  74
  75#define GRANT_INVALID_REF	0
  76
  77#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  78#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  79
  80/* Minimum number of Rx slots (includes slot for GSO metadata). */
  81#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  82
  83/* Queue name is interface name with "-qNNN" appended */
  84#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  85
  86/* IRQ name is queue name with "-tx" or "-rx" appended */
  87#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  88
 
 
  89struct netfront_stats {
  90	u64			packets;
  91	u64			bytes;
  92	struct u64_stats_sync	syncp;
  93};
  94
  95struct netfront_info;
  96
  97struct netfront_queue {
  98	unsigned int id; /* Queue ID, 0-based */
  99	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 100	struct netfront_info *info;
 101
 
 
 102	struct napi_struct napi;
 103
 104	/* Split event channels support, tx_* == rx_* when using
 105	 * single event channel.
 106	 */
 107	unsigned int tx_evtchn, rx_evtchn;
 108	unsigned int tx_irq, rx_irq;
 109	/* Only used when split event channels support is enabled */
 110	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 111	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 112
 113	spinlock_t   tx_lock;
 114	struct xen_netif_tx_front_ring tx;
 115	int tx_ring_ref;
 116
 117	/*
 118	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 119	 * are linked from tx_skb_freelist through skb_entry.link.
 120	 *
 121	 *  NB. Freelist index entries are always going to be less than
 122	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 123	 *  greater than PAGE_OFFSET: we use this property to distinguish
 124	 *  them.
 125	 */
 126	union skb_entry {
 127		struct sk_buff *skb;
 128		unsigned long link;
 129	} tx_skbs[NET_TX_RING_SIZE];
 130	grant_ref_t gref_tx_head;
 131	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 132	struct page *grant_tx_page[NET_TX_RING_SIZE];
 133	unsigned tx_skb_freelist;
 
 134
 135	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 136	struct xen_netif_rx_front_ring rx;
 137	int rx_ring_ref;
 138
 139	struct timer_list rx_refill_timer;
 140
 141	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 142	grant_ref_t gref_rx_head;
 143	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 
 
 
 
 
 144};
 145
 146struct netfront_info {
 147	struct list_head list;
 148	struct net_device *netdev;
 149
 150	struct xenbus_device *xbdev;
 151
 152	/* Multi-queue support */
 153	struct netfront_queue *queues;
 154
 155	/* Statistics */
 156	struct netfront_stats __percpu *rx_stats;
 157	struct netfront_stats __percpu *tx_stats;
 158
 
 
 
 
 
 
 
 
 
 
 159	atomic_t rx_gso_checksum_fixup;
 160};
 161
 162struct netfront_rx_info {
 163	struct xen_netif_rx_response rx;
 164	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 165};
 166
 167static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 168{
 169	list->link = id;
 170}
 171
 172static int skb_entry_is_link(const union skb_entry *list)
 173{
 174	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 175	return (unsigned long)list->skb < PAGE_OFFSET;
 176}
 177
 178/*
 179 * Access macros for acquiring freeing slots in tx_skbs[].
 180 */
 181
 182static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 183			       unsigned short id)
 184{
 185	skb_entry_set_link(&list[id], *head);
 186	*head = id;
 187}
 188
 189static unsigned short get_id_from_freelist(unsigned *head,
 190					   union skb_entry *list)
 191{
 192	unsigned int id = *head;
 193	*head = list[id].link;
 
 
 
 
 194	return id;
 195}
 196
 197static int xennet_rxidx(RING_IDX idx)
 198{
 199	return idx & (NET_RX_RING_SIZE - 1);
 200}
 201
 202static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 203					 RING_IDX ri)
 204{
 205	int i = xennet_rxidx(ri);
 206	struct sk_buff *skb = queue->rx_skbs[i];
 207	queue->rx_skbs[i] = NULL;
 208	return skb;
 209}
 210
 211static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 212					    RING_IDX ri)
 213{
 214	int i = xennet_rxidx(ri);
 215	grant_ref_t ref = queue->grant_rx_ref[i];
 216	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 217	return ref;
 218}
 219
 220#ifdef CONFIG_SYSFS
 221static const struct attribute_group xennet_dev_group;
 222#endif
 223
 224static bool xennet_can_sg(struct net_device *dev)
 225{
 226	return dev->features & NETIF_F_SG;
 227}
 228
 229
 230static void rx_refill_timeout(unsigned long data)
 231{
 232	struct netfront_queue *queue = (struct netfront_queue *)data;
 233	napi_schedule(&queue->napi);
 234}
 235
 236static int netfront_tx_slot_available(struct netfront_queue *queue)
 237{
 238	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 239		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 240}
 241
 242static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 243{
 244	struct net_device *dev = queue->info->netdev;
 245	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 246
 247	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 248	    netfront_tx_slot_available(queue) &&
 249	    likely(netif_running(dev)))
 250		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 251}
 252
 253
 254static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 255{
 256	struct sk_buff *skb;
 257	struct page *page;
 258
 259	skb = __netdev_alloc_skb(queue->info->netdev,
 260				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 261				 GFP_ATOMIC | __GFP_NOWARN);
 262	if (unlikely(!skb))
 263		return NULL;
 264
 265	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 266	if (!page) {
 
 267		kfree_skb(skb);
 268		return NULL;
 269	}
 270	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 
 271
 272	/* Align ip header to a 16 bytes boundary */
 273	skb_reserve(skb, NET_IP_ALIGN);
 274	skb->dev = queue->info->netdev;
 275
 276	return skb;
 277}
 278
 279
 280static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 281{
 282	RING_IDX req_prod = queue->rx.req_prod_pvt;
 283	int notify;
 284	int err = 0;
 285
 286	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 287		return;
 288
 289	for (req_prod = queue->rx.req_prod_pvt;
 290	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 291	     req_prod++) {
 292		struct sk_buff *skb;
 293		unsigned short id;
 294		grant_ref_t ref;
 295		struct page *page;
 296		struct xen_netif_rx_request *req;
 297
 298		skb = xennet_alloc_one_rx_buffer(queue);
 299		if (!skb) {
 300			err = -ENOMEM;
 301			break;
 302		}
 303
 304		id = xennet_rxidx(req_prod);
 305
 306		BUG_ON(queue->rx_skbs[id]);
 307		queue->rx_skbs[id] = skb;
 308
 309		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 310		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 311		queue->grant_rx_ref[id] = ref;
 312
 313		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 314
 315		req = RING_GET_REQUEST(&queue->rx, req_prod);
 316		gnttab_page_grant_foreign_access_ref_one(ref,
 317							 queue->info->xbdev->otherend_id,
 318							 page,
 319							 0);
 320		req->id = id;
 321		req->gref = ref;
 322	}
 323
 324	queue->rx.req_prod_pvt = req_prod;
 325
 326	/* Try again later if there are not enough requests or skb allocation
 327	 * failed.
 328	 * Enough requests is quantified as the sum of newly created slots and
 329	 * the unconsumed slots at the backend.
 330	 */
 331	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 332	    unlikely(err)) {
 333		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 334		return;
 335	}
 336
 337	wmb();		/* barrier so backend seens requests */
 338
 339	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 340	if (notify)
 341		notify_remote_via_irq(queue->rx_irq);
 342}
 343
 344static int xennet_open(struct net_device *dev)
 345{
 346	struct netfront_info *np = netdev_priv(dev);
 347	unsigned int num_queues = dev->real_num_tx_queues;
 348	unsigned int i = 0;
 349	struct netfront_queue *queue = NULL;
 350
 
 
 
 351	for (i = 0; i < num_queues; ++i) {
 352		queue = &np->queues[i];
 353		napi_enable(&queue->napi);
 354
 355		spin_lock_bh(&queue->rx_lock);
 356		if (netif_carrier_ok(dev)) {
 357			xennet_alloc_rx_buffers(queue);
 358			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 359			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 360				napi_schedule(&queue->napi);
 361		}
 362		spin_unlock_bh(&queue->rx_lock);
 363	}
 364
 365	netif_tx_start_all_queues(dev);
 366
 367	return 0;
 368}
 369
 370static void xennet_tx_buf_gc(struct netfront_queue *queue)
 371{
 372	RING_IDX cons, prod;
 373	unsigned short id;
 374	struct sk_buff *skb;
 375	bool more_to_do;
 
 
 376
 377	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 378
 379	do {
 380		prod = queue->tx.sring->rsp_prod;
 
 
 
 
 
 381		rmb(); /* Ensure we see responses up to 'rp'. */
 382
 383		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 384			struct xen_netif_tx_response *txrsp;
 
 
 385
 386			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 387			if (txrsp->status == XEN_NETIF_RSP_NULL)
 388				continue;
 389
 390			id  = txrsp->id;
 391			skb = queue->tx_skbs[id].skb;
 392			if (unlikely(gnttab_query_foreign_access(
 393				queue->grant_tx_ref[id]) != 0)) {
 394				pr_alert("%s: warning -- grant still in use by backend domain\n",
 395					 __func__);
 396				BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397			}
 398			gnttab_end_foreign_access_ref(
 399				queue->grant_tx_ref[id], GNTMAP_readonly);
 400			gnttab_release_grant_reference(
 401				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 402			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 403			queue->grant_tx_page[id] = NULL;
 404			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 405			dev_kfree_skb_irq(skb);
 406		}
 407
 408		queue->tx.rsp_cons = prod;
 409
 410		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 411	} while (more_to_do);
 412
 413	xennet_maybe_wake_tx(queue);
 
 
 
 
 
 
 
 
 414}
 415
 416struct xennet_gnttab_make_txreq {
 417	struct netfront_queue *queue;
 418	struct sk_buff *skb;
 419	struct page *page;
 420	struct xen_netif_tx_request *tx; /* Last request */
 
 421	unsigned int size;
 422};
 423
 424static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 425				  unsigned int len, void *data)
 426{
 427	struct xennet_gnttab_make_txreq *info = data;
 428	unsigned int id;
 429	struct xen_netif_tx_request *tx;
 430	grant_ref_t ref;
 431	/* convenient aliases */
 432	struct page *page = info->page;
 433	struct netfront_queue *queue = info->queue;
 434	struct sk_buff *skb = info->skb;
 435
 436	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 437	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 438	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 439	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 440
 441	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 442					gfn, GNTMAP_readonly);
 443
 444	queue->tx_skbs[id].skb = skb;
 445	queue->grant_tx_page[id] = page;
 446	queue->grant_tx_ref[id] = ref;
 447
 448	tx->id = id;
 449	tx->gref = ref;
 450	tx->offset = offset;
 451	tx->size = len;
 452	tx->flags = 0;
 
 
 
 
 
 
 
 
 453
 454	info->tx = tx;
 455	info->size += tx->size;
 456}
 457
 458static struct xen_netif_tx_request *xennet_make_first_txreq(
 459	struct netfront_queue *queue, struct sk_buff *skb,
 460	struct page *page, unsigned int offset, unsigned int len)
 461{
 462	struct xennet_gnttab_make_txreq info = {
 463		.queue = queue,
 464		.skb = skb,
 465		.page = page,
 466		.size = 0,
 467	};
 468
 469	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 470
 471	return info.tx;
 472}
 473
 474static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 475				  unsigned int len, void *data)
 476{
 477	struct xennet_gnttab_make_txreq *info = data;
 478
 479	info->tx->flags |= XEN_NETTXF_more_data;
 480	skb_get(info->skb);
 481	xennet_tx_setup_grant(gfn, offset, len, data);
 482}
 483
 484static struct xen_netif_tx_request *xennet_make_txreqs(
 485	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 486	struct sk_buff *skb, struct page *page,
 487	unsigned int offset, unsigned int len)
 488{
 489	struct xennet_gnttab_make_txreq info = {
 490		.queue = queue,
 491		.skb = skb,
 492		.tx = tx,
 493	};
 494
 495	/* Skip unused frames from start of page */
 496	page += offset >> PAGE_SHIFT;
 497	offset &= ~PAGE_MASK;
 498
 499	while (len) {
 500		info.page = page;
 501		info.size = 0;
 502
 503		gnttab_foreach_grant_in_range(page, offset, len,
 504					      xennet_make_one_txreq,
 505					      &info);
 506
 507		page++;
 508		offset = 0;
 509		len -= info.size;
 510	}
 511
 512	return info.tx;
 513}
 514
 515/*
 516 * Count how many ring slots are required to send this skb. Each frag
 517 * might be a compound page.
 518 */
 519static int xennet_count_skb_slots(struct sk_buff *skb)
 520{
 521	int i, frags = skb_shinfo(skb)->nr_frags;
 522	int slots;
 523
 524	slots = gnttab_count_grant(offset_in_page(skb->data),
 525				   skb_headlen(skb));
 526
 527	for (i = 0; i < frags; i++) {
 528		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 529		unsigned long size = skb_frag_size(frag);
 530		unsigned long offset = frag->page_offset;
 531
 532		/* Skip unused frames from start of page */
 533		offset &= ~PAGE_MASK;
 534
 535		slots += gnttab_count_grant(offset, size);
 536	}
 537
 538	return slots;
 539}
 540
 541static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 542			       void *accel_priv, select_queue_fallback_t fallback)
 543{
 544	unsigned int num_queues = dev->real_num_tx_queues;
 545	u32 hash;
 546	u16 queue_idx;
 547
 548	/* First, check if there is only one queue */
 549	if (num_queues == 1) {
 550		queue_idx = 0;
 551	} else {
 552		hash = skb_get_hash(skb);
 553		queue_idx = hash % num_queues;
 554	}
 555
 556	return queue_idx;
 557}
 558
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 560
 561static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 562{
 563	struct netfront_info *np = netdev_priv(dev);
 564	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 565	struct xen_netif_tx_request *tx, *first_tx;
 566	unsigned int i;
 567	int notify;
 568	int slots;
 569	struct page *page;
 570	unsigned int offset;
 571	unsigned int len;
 572	unsigned long flags;
 573	struct netfront_queue *queue = NULL;
 
 574	unsigned int num_queues = dev->real_num_tx_queues;
 575	u16 queue_index;
 576	struct sk_buff *nskb;
 577
 578	/* Drop the packet if no queues are set up */
 579	if (num_queues < 1)
 580		goto drop;
 
 
 581	/* Determine which queue to transmit this SKB on */
 582	queue_index = skb_get_queue_mapping(skb);
 583	queue = &np->queues[queue_index];
 584
 585	/* If skb->len is too big for wire format, drop skb and alert
 586	 * user about misconfiguration.
 587	 */
 588	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 589		net_alert_ratelimited(
 590			"xennet: skb->len = %u, too big for wire format\n",
 591			skb->len);
 592		goto drop;
 593	}
 594
 595	slots = xennet_count_skb_slots(skb);
 596	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 597		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 598				    slots, skb->len);
 599		if (skb_linearize(skb))
 600			goto drop;
 601	}
 602
 603	page = virt_to_page(skb->data);
 604	offset = offset_in_page(skb->data);
 605
 606	/* The first req should be at least ETH_HLEN size or the packet will be
 607	 * dropped by netback.
 
 
 
 
 608	 */
 609	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 610		nskb = skb_copy(skb, GFP_ATOMIC);
 611		if (!nskb)
 612			goto drop;
 613		dev_kfree_skb_any(skb);
 614		skb = nskb;
 615		page = virt_to_page(skb->data);
 616		offset = offset_in_page(skb->data);
 617	}
 618
 619	len = skb_headlen(skb);
 620
 621	spin_lock_irqsave(&queue->tx_lock, flags);
 622
 623	if (unlikely(!netif_carrier_ok(dev) ||
 624		     (slots > 1 && !xennet_can_sg(dev)) ||
 625		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 626		spin_unlock_irqrestore(&queue->tx_lock, flags);
 627		goto drop;
 628	}
 629
 630	/* First request for the linear area. */
 631	first_tx = tx = xennet_make_first_txreq(queue, skb,
 632						page, offset, len);
 633	offset += tx->size;
 
 
 634	if (offset == PAGE_SIZE) {
 635		page++;
 636		offset = 0;
 637	}
 638	len -= tx->size;
 639
 640	if (skb->ip_summed == CHECKSUM_PARTIAL)
 641		/* local packet? */
 642		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 
 643	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 644		/* remote but checksummed. */
 645		tx->flags |= XEN_NETTXF_data_validated;
 646
 647	/* Optional extra info after the first request. */
 648	if (skb_shinfo(skb)->gso_size) {
 649		struct xen_netif_extra_info *gso;
 650
 651		gso = (struct xen_netif_extra_info *)
 652			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 653
 654		tx->flags |= XEN_NETTXF_extra_info;
 655
 656		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 657		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 658			XEN_NETIF_GSO_TYPE_TCPV6 :
 659			XEN_NETIF_GSO_TYPE_TCPV4;
 660		gso->u.gso.pad = 0;
 661		gso->u.gso.features = 0;
 662
 663		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 664		gso->flags = 0;
 665	}
 666
 667	/* Requests for the rest of the linear area. */
 668	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 669
 670	/* Requests for all the frags. */
 671	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 672		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 673		tx = xennet_make_txreqs(queue, tx, skb,
 674					skb_frag_page(frag), frag->page_offset,
 675					skb_frag_size(frag));
 676	}
 677
 678	/* First request has the packet length. */
 679	first_tx->size = skb->len;
 680
 
 
 
 
 
 681	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 682	if (notify)
 683		notify_remote_via_irq(queue->tx_irq);
 684
 685	u64_stats_update_begin(&tx_stats->syncp);
 686	tx_stats->bytes += skb->len;
 687	tx_stats->packets++;
 688	u64_stats_update_end(&tx_stats->syncp);
 689
 690	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 691	xennet_tx_buf_gc(queue);
 692
 693	if (!netfront_tx_slot_available(queue))
 694		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 695
 696	spin_unlock_irqrestore(&queue->tx_lock, flags);
 697
 698	return NETDEV_TX_OK;
 699
 700 drop:
 701	dev->stats.tx_dropped++;
 702	dev_kfree_skb_any(skb);
 703	return NETDEV_TX_OK;
 704}
 705
 706static int xennet_close(struct net_device *dev)
 707{
 708	struct netfront_info *np = netdev_priv(dev);
 709	unsigned int num_queues = dev->real_num_tx_queues;
 710	unsigned int i;
 711	struct netfront_queue *queue;
 712	netif_tx_stop_all_queues(np->netdev);
 713	for (i = 0; i < num_queues; ++i) {
 714		queue = &np->queues[i];
 715		napi_disable(&queue->napi);
 716	}
 717	return 0;
 718}
 719
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 721				grant_ref_t ref)
 722{
 723	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 724
 725	BUG_ON(queue->rx_skbs[new]);
 726	queue->rx_skbs[new] = skb;
 727	queue->grant_rx_ref[new] = ref;
 728	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 729	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 730	queue->rx.req_prod_pvt++;
 731}
 732
 733static int xennet_get_extras(struct netfront_queue *queue,
 734			     struct xen_netif_extra_info *extras,
 735			     RING_IDX rp)
 736
 737{
 738	struct xen_netif_extra_info *extra;
 739	struct device *dev = &queue->info->netdev->dev;
 740	RING_IDX cons = queue->rx.rsp_cons;
 741	int err = 0;
 742
 743	do {
 744		struct sk_buff *skb;
 745		grant_ref_t ref;
 746
 747		if (unlikely(cons + 1 == rp)) {
 748			if (net_ratelimit())
 749				dev_warn(dev, "Missing extra info\n");
 750			err = -EBADR;
 751			break;
 752		}
 753
 754		extra = (struct xen_netif_extra_info *)
 755			RING_GET_RESPONSE(&queue->rx, ++cons);
 756
 757		if (unlikely(!extra->type ||
 758			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 759			if (net_ratelimit())
 760				dev_warn(dev, "Invalid extra type: %d\n",
 761					extra->type);
 762			err = -EINVAL;
 763		} else {
 764			memcpy(&extras[extra->type - 1], extra,
 765			       sizeof(*extra));
 766		}
 767
 768		skb = xennet_get_rx_skb(queue, cons);
 769		ref = xennet_get_rx_ref(queue, cons);
 770		xennet_move_rx_slot(queue, skb, ref);
 771	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 772
 773	queue->rx.rsp_cons = cons;
 774	return err;
 775}
 776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777static int xennet_get_responses(struct netfront_queue *queue,
 778				struct netfront_rx_info *rinfo, RING_IDX rp,
 779				struct sk_buff_head *list)
 
 780{
 781	struct xen_netif_rx_response *rx = &rinfo->rx;
 782	struct xen_netif_extra_info *extras = rinfo->extras;
 783	struct device *dev = &queue->info->netdev->dev;
 784	RING_IDX cons = queue->rx.rsp_cons;
 785	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 
 786	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 787	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 
 
 788	int slots = 1;
 789	int err = 0;
 790	unsigned long ret;
 791
 792	if (rx->flags & XEN_NETRXF_extra_info) {
 793		err = xennet_get_extras(queue, extras, rp);
 
 
 
 
 
 
 
 
 794		cons = queue->rx.rsp_cons;
 795	}
 796
 797	for (;;) {
 798		if (unlikely(rx->status < 0 ||
 799			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 800			if (net_ratelimit())
 801				dev_warn(dev, "rx->offset: %u, size: %d\n",
 802					 rx->offset, rx->status);
 803			xennet_move_rx_slot(queue, skb, ref);
 804			err = -EINVAL;
 805			goto next;
 806		}
 807
 808		/*
 809		 * This definitely indicates a bug, either in this driver or in
 810		 * the backend driver. In future this should flag the bad
 811		 * situation to the system controller to reboot the backend.
 812		 */
 813		if (ref == GRANT_INVALID_REF) {
 814			if (net_ratelimit())
 815				dev_warn(dev, "Bad rx response id %d.\n",
 816					 rx->id);
 817			err = -EINVAL;
 818			goto next;
 819		}
 820
 821		ret = gnttab_end_foreign_access_ref(ref, 0);
 822		BUG_ON(!ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 823
 824		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 826		__skb_queue_tail(list, skb);
 827
 828next:
 829		if (!(rx->flags & XEN_NETRXF_more_data))
 830			break;
 831
 832		if (cons + slots == rp) {
 833			if (net_ratelimit())
 834				dev_warn(dev, "Need more slots\n");
 835			err = -ENOENT;
 836			break;
 837		}
 838
 839		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 
 840		skb = xennet_get_rx_skb(queue, cons + slots);
 841		ref = xennet_get_rx_ref(queue, cons + slots);
 842		slots++;
 843	}
 844
 845	if (unlikely(slots > max)) {
 846		if (net_ratelimit())
 847			dev_warn(dev, "Too many slots\n");
 848		err = -E2BIG;
 849	}
 850
 851	if (unlikely(err))
 852		queue->rx.rsp_cons = cons + slots;
 853
 854	return err;
 855}
 856
 857static int xennet_set_skb_gso(struct sk_buff *skb,
 858			      struct xen_netif_extra_info *gso)
 859{
 860	if (!gso->u.gso.size) {
 861		if (net_ratelimit())
 862			pr_warn("GSO size must not be zero\n");
 863		return -EINVAL;
 864	}
 865
 866	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 867	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 868		if (net_ratelimit())
 869			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 870		return -EINVAL;
 871	}
 872
 873	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 874	skb_shinfo(skb)->gso_type =
 875		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 876		SKB_GSO_TCPV4 :
 877		SKB_GSO_TCPV6;
 878
 879	/* Header must be checked, and gso_segs computed. */
 880	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 881	skb_shinfo(skb)->gso_segs = 0;
 882
 883	return 0;
 884}
 885
 886static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 887				  struct sk_buff *skb,
 888				  struct sk_buff_head *list)
 889{
 890	struct skb_shared_info *shinfo = skb_shinfo(skb);
 891	RING_IDX cons = queue->rx.rsp_cons;
 892	struct sk_buff *nskb;
 893
 894	while ((nskb = __skb_dequeue(list))) {
 895		struct xen_netif_rx_response *rx =
 896			RING_GET_RESPONSE(&queue->rx, ++cons);
 897		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 898
 899		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 
 
 900			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 901
 902			BUG_ON(pull_to <= skb_headlen(skb));
 903			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 904		}
 905		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 
 
 
 
 
 906
 907		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 908				rx->offset, rx->status, PAGE_SIZE);
 
 909
 910		skb_shinfo(nskb)->nr_frags = 0;
 911		kfree_skb(nskb);
 912	}
 913
 914	return cons;
 
 
 915}
 916
 917static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 918{
 919	bool recalculate_partial_csum = false;
 920
 921	/*
 922	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 923	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 924	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 925	 * recalculate the partial checksum.
 926	 */
 927	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 928		struct netfront_info *np = netdev_priv(dev);
 929		atomic_inc(&np->rx_gso_checksum_fixup);
 930		skb->ip_summed = CHECKSUM_PARTIAL;
 931		recalculate_partial_csum = true;
 932	}
 933
 934	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 935	if (skb->ip_summed != CHECKSUM_PARTIAL)
 936		return 0;
 937
 938	return skb_checksum_setup(skb, recalculate_partial_csum);
 939}
 940
 941static int handle_incoming_queue(struct netfront_queue *queue,
 942				 struct sk_buff_head *rxq)
 943{
 944	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 945	int packets_dropped = 0;
 946	struct sk_buff *skb;
 947
 948	while ((skb = __skb_dequeue(rxq)) != NULL) {
 949		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 950
 951		if (pull_to > skb_headlen(skb))
 952			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 953
 954		/* Ethernet work: Delayed to here as it peeks the header. */
 955		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 956		skb_reset_network_header(skb);
 957
 958		if (checksum_setup(queue->info->netdev, skb)) {
 959			kfree_skb(skb);
 960			packets_dropped++;
 961			queue->info->netdev->stats.rx_errors++;
 962			continue;
 963		}
 964
 965		u64_stats_update_begin(&rx_stats->syncp);
 966		rx_stats->packets++;
 967		rx_stats->bytes += skb->len;
 968		u64_stats_update_end(&rx_stats->syncp);
 969
 970		/* Pass it up. */
 971		napi_gro_receive(&queue->napi, skb);
 972	}
 973
 974	return packets_dropped;
 975}
 976
 977static int xennet_poll(struct napi_struct *napi, int budget)
 978{
 979	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 980	struct net_device *dev = queue->info->netdev;
 981	struct sk_buff *skb;
 982	struct netfront_rx_info rinfo;
 983	struct xen_netif_rx_response *rx = &rinfo.rx;
 984	struct xen_netif_extra_info *extras = rinfo.extras;
 985	RING_IDX i, rp;
 986	int work_done;
 987	struct sk_buff_head rxq;
 988	struct sk_buff_head errq;
 989	struct sk_buff_head tmpq;
 990	int err;
 
 991
 992	spin_lock(&queue->rx_lock);
 993
 994	skb_queue_head_init(&rxq);
 995	skb_queue_head_init(&errq);
 996	skb_queue_head_init(&tmpq);
 997
 998	rp = queue->rx.sring->rsp_prod;
 
 
 
 
 
 
 
 999	rmb(); /* Ensure we see queued responses up to 'rp'. */
1000
1001	i = queue->rx.rsp_cons;
1002	work_done = 0;
1003	while ((i != rp) && (work_done < budget)) {
1004		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1005		memset(extras, 0, sizeof(rinfo.extras));
1006
1007		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
 
1008
1009		if (unlikely(err)) {
 
 
 
 
1010err:
1011			while ((skb = __skb_dequeue(&tmpq)))
1012				__skb_queue_tail(&errq, skb);
1013			dev->stats.rx_errors++;
1014			i = queue->rx.rsp_cons;
1015			continue;
1016		}
1017
1018		skb = __skb_dequeue(&tmpq);
1019
1020		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1021			struct xen_netif_extra_info *gso;
1022			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1023
1024			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1025				__skb_queue_head(&tmpq, skb);
1026				queue->rx.rsp_cons += skb_queue_len(&tmpq);
 
 
1027				goto err;
1028			}
1029		}
1030
1031		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1032		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1033			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1034
1035		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1036		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1037		skb->data_len = rx->status;
1038		skb->len += rx->status;
1039
1040		i = xennet_fill_frags(queue, skb, &tmpq);
 
1041
1042		if (rx->flags & XEN_NETRXF_csum_blank)
1043			skb->ip_summed = CHECKSUM_PARTIAL;
1044		else if (rx->flags & XEN_NETRXF_data_validated)
1045			skb->ip_summed = CHECKSUM_UNNECESSARY;
1046
1047		__skb_queue_tail(&rxq, skb);
1048
1049		queue->rx.rsp_cons = ++i;
 
1050		work_done++;
1051	}
 
 
1052
1053	__skb_queue_purge(&errq);
1054
1055	work_done -= handle_incoming_queue(queue, &rxq);
1056
1057	xennet_alloc_rx_buffers(queue);
1058
1059	if (work_done < budget) {
1060		int more_to_do = 0;
1061
1062		napi_complete(napi);
1063
1064		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1065		if (more_to_do)
1066			napi_schedule(napi);
1067	}
1068
1069	spin_unlock(&queue->rx_lock);
1070
1071	return work_done;
1072}
1073
1074static int xennet_change_mtu(struct net_device *dev, int mtu)
1075{
1076	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1077
1078	if (mtu > max)
1079		return -EINVAL;
1080	dev->mtu = mtu;
1081	return 0;
1082}
1083
1084static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1085						    struct rtnl_link_stats64 *tot)
1086{
1087	struct netfront_info *np = netdev_priv(dev);
1088	int cpu;
1089
1090	for_each_possible_cpu(cpu) {
1091		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1092		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1093		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1094		unsigned int start;
1095
1096		do {
1097			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1098			tx_packets = tx_stats->packets;
1099			tx_bytes = tx_stats->bytes;
1100		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1101
1102		do {
1103			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1104			rx_packets = rx_stats->packets;
1105			rx_bytes = rx_stats->bytes;
1106		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1107
1108		tot->rx_packets += rx_packets;
1109		tot->tx_packets += tx_packets;
1110		tot->rx_bytes   += rx_bytes;
1111		tot->tx_bytes   += tx_bytes;
1112	}
1113
1114	tot->rx_errors  = dev->stats.rx_errors;
1115	tot->tx_dropped = dev->stats.tx_dropped;
1116
1117	return tot;
1118}
1119
1120static void xennet_release_tx_bufs(struct netfront_queue *queue)
1121{
1122	struct sk_buff *skb;
1123	int i;
1124
1125	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1126		/* Skip over entries which are actually freelist references */
1127		if (skb_entry_is_link(&queue->tx_skbs[i]))
1128			continue;
1129
1130		skb = queue->tx_skbs[i].skb;
 
1131		get_page(queue->grant_tx_page[i]);
1132		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1133					  GNTMAP_readonly,
1134					  (unsigned long)page_address(queue->grant_tx_page[i]));
1135		queue->grant_tx_page[i] = NULL;
1136		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1137		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1138		dev_kfree_skb_irq(skb);
1139	}
1140}
1141
1142static void xennet_release_rx_bufs(struct netfront_queue *queue)
1143{
1144	int id, ref;
1145
1146	spin_lock_bh(&queue->rx_lock);
1147
1148	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1149		struct sk_buff *skb;
1150		struct page *page;
1151
1152		skb = queue->rx_skbs[id];
1153		if (!skb)
1154			continue;
1155
1156		ref = queue->grant_rx_ref[id];
1157		if (ref == GRANT_INVALID_REF)
1158			continue;
1159
1160		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1161
1162		/* gnttab_end_foreign_access() needs a page ref until
1163		 * foreign access is ended (which may be deferred).
1164		 */
1165		get_page(page);
1166		gnttab_end_foreign_access(ref, 0,
1167					  (unsigned long)page_address(page));
1168		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1169
1170		kfree_skb(skb);
1171	}
1172
1173	spin_unlock_bh(&queue->rx_lock);
1174}
1175
1176static netdev_features_t xennet_fix_features(struct net_device *dev,
1177	netdev_features_t features)
1178{
1179	struct netfront_info *np = netdev_priv(dev);
1180
1181	if (features & NETIF_F_SG &&
1182	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1183		features &= ~NETIF_F_SG;
1184
1185	if (features & NETIF_F_IPV6_CSUM &&
1186	    !xenbus_read_unsigned(np->xbdev->otherend,
1187				  "feature-ipv6-csum-offload", 0))
1188		features &= ~NETIF_F_IPV6_CSUM;
1189
1190	if (features & NETIF_F_TSO &&
1191	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1192		features &= ~NETIF_F_TSO;
1193
1194	if (features & NETIF_F_TSO6 &&
1195	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1196		features &= ~NETIF_F_TSO6;
1197
1198	return features;
1199}
1200
1201static int xennet_set_features(struct net_device *dev,
1202	netdev_features_t features)
1203{
1204	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1205		netdev_info(dev, "Reducing MTU because no SG offload");
1206		dev->mtu = ETH_DATA_LEN;
1207	}
1208
1209	return 0;
1210}
1211
1212static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1213{
1214	struct netfront_queue *queue = dev_id;
1215	unsigned long flags;
1216
 
 
 
1217	spin_lock_irqsave(&queue->tx_lock, flags);
1218	xennet_tx_buf_gc(queue);
 
1219	spin_unlock_irqrestore(&queue->tx_lock, flags);
1220
 
 
 
 
 
 
 
 
 
 
1221	return IRQ_HANDLED;
1222}
1223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1224static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1225{
1226	struct netfront_queue *queue = dev_id;
1227	struct net_device *dev = queue->info->netdev;
1228
1229	if (likely(netif_carrier_ok(dev) &&
1230		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1231		napi_schedule(&queue->napi);
1232
1233	return IRQ_HANDLED;
1234}
1235
1236static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1237{
1238	xennet_tx_interrupt(irq, dev_id);
1239	xennet_rx_interrupt(irq, dev_id);
 
 
 
 
1240	return IRQ_HANDLED;
1241}
1242
1243#ifdef CONFIG_NET_POLL_CONTROLLER
1244static void xennet_poll_controller(struct net_device *dev)
1245{
1246	/* Poll each queue */
1247	struct netfront_info *info = netdev_priv(dev);
1248	unsigned int num_queues = dev->real_num_tx_queues;
1249	unsigned int i;
 
 
 
 
1250	for (i = 0; i < num_queues; ++i)
1251		xennet_interrupt(0, &info->queues[i]);
1252}
1253#endif
1254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255static const struct net_device_ops xennet_netdev_ops = {
 
1256	.ndo_open            = xennet_open,
1257	.ndo_stop            = xennet_close,
1258	.ndo_start_xmit      = xennet_start_xmit,
1259	.ndo_change_mtu	     = xennet_change_mtu,
1260	.ndo_get_stats64     = xennet_get_stats64,
1261	.ndo_set_mac_address = eth_mac_addr,
1262	.ndo_validate_addr   = eth_validate_addr,
1263	.ndo_fix_features    = xennet_fix_features,
1264	.ndo_set_features    = xennet_set_features,
1265	.ndo_select_queue    = xennet_select_queue,
 
 
1266#ifdef CONFIG_NET_POLL_CONTROLLER
1267	.ndo_poll_controller = xennet_poll_controller,
1268#endif
1269};
1270
1271static void xennet_free_netdev(struct net_device *netdev)
1272{
1273	struct netfront_info *np = netdev_priv(netdev);
1274
1275	free_percpu(np->rx_stats);
1276	free_percpu(np->tx_stats);
1277	free_netdev(netdev);
1278}
1279
1280static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1281{
1282	int err;
1283	struct net_device *netdev;
1284	struct netfront_info *np;
1285
1286	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1287	if (!netdev)
1288		return ERR_PTR(-ENOMEM);
1289
1290	np                   = netdev_priv(netdev);
1291	np->xbdev            = dev;
1292
1293	np->queues = NULL;
1294
1295	err = -ENOMEM;
1296	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1297	if (np->rx_stats == NULL)
1298		goto exit;
1299	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1300	if (np->tx_stats == NULL)
1301		goto exit;
1302
1303	netdev->netdev_ops	= &xennet_netdev_ops;
1304
1305	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1306				  NETIF_F_GSO_ROBUST;
1307	netdev->hw_features	= NETIF_F_SG |
1308				  NETIF_F_IPV6_CSUM |
1309				  NETIF_F_TSO | NETIF_F_TSO6;
1310
1311	/*
1312         * Assume that all hw features are available for now. This set
1313         * will be adjusted by the call to netdev_update_features() in
1314         * xennet_connect() which is the earliest point where we can
1315         * negotiate with the backend regarding supported features.
1316         */
1317	netdev->features |= netdev->hw_features;
 
 
1318
1319	netdev->ethtool_ops = &xennet_ethtool_ops;
1320	netdev->min_mtu = 0;
1321	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1322	SET_NETDEV_DEV(netdev, &dev->dev);
1323
1324	np->netdev = netdev;
 
1325
1326	netif_carrier_off(netdev);
1327
 
 
 
 
 
 
 
 
 
1328	return netdev;
1329
1330 exit:
1331	xennet_free_netdev(netdev);
1332	return ERR_PTR(err);
1333}
1334
1335/**
1336 * Entry point to this code when a new device is created.  Allocate the basic
1337 * structures and the ring buffers for communication with the backend, and
1338 * inform the backend of the appropriate details for those.
1339 */
1340static int netfront_probe(struct xenbus_device *dev,
1341			  const struct xenbus_device_id *id)
1342{
1343	int err;
1344	struct net_device *netdev;
1345	struct netfront_info *info;
1346
1347	netdev = xennet_create_dev(dev);
1348	if (IS_ERR(netdev)) {
1349		err = PTR_ERR(netdev);
1350		xenbus_dev_fatal(dev, err, "creating netdev");
1351		return err;
1352	}
1353
1354	info = netdev_priv(netdev);
1355	dev_set_drvdata(&dev->dev, info);
1356#ifdef CONFIG_SYSFS
1357	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1358#endif
1359	err = register_netdev(info->netdev);
1360	if (err) {
1361		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1362		goto fail;
1363	}
1364
1365	return 0;
1366
1367 fail:
1368	xennet_free_netdev(netdev);
1369	dev_set_drvdata(&dev->dev, NULL);
1370	return err;
1371}
1372
1373static void xennet_end_access(int ref, void *page)
1374{
1375	/* This frees the page as a side-effect */
1376	if (ref != GRANT_INVALID_REF)
1377		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1378}
1379
1380static void xennet_disconnect_backend(struct netfront_info *info)
1381{
1382	unsigned int i = 0;
1383	unsigned int num_queues = info->netdev->real_num_tx_queues;
1384
1385	netif_carrier_off(info->netdev);
1386
1387	for (i = 0; i < num_queues && info->queues; ++i) {
1388		struct netfront_queue *queue = &info->queues[i];
1389
1390		del_timer_sync(&queue->rx_refill_timer);
1391
1392		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1393			unbind_from_irqhandler(queue->tx_irq, queue);
1394		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1395			unbind_from_irqhandler(queue->tx_irq, queue);
1396			unbind_from_irqhandler(queue->rx_irq, queue);
1397		}
1398		queue->tx_evtchn = queue->rx_evtchn = 0;
1399		queue->tx_irq = queue->rx_irq = 0;
1400
1401		if (netif_running(info->netdev))
1402			napi_synchronize(&queue->napi);
1403
1404		xennet_release_tx_bufs(queue);
1405		xennet_release_rx_bufs(queue);
1406		gnttab_free_grant_references(queue->gref_tx_head);
1407		gnttab_free_grant_references(queue->gref_rx_head);
1408
1409		/* End access and free the pages */
1410		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1411		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1412
1413		queue->tx_ring_ref = GRANT_INVALID_REF;
1414		queue->rx_ring_ref = GRANT_INVALID_REF;
1415		queue->tx.sring = NULL;
1416		queue->rx.sring = NULL;
 
 
1417	}
1418}
1419
1420/**
1421 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1422 * driver restart.  We tear down our netif structure and recreate it, but
1423 * leave the device-layer structures intact so that this is transparent to the
1424 * rest of the kernel.
1425 */
1426static int netfront_resume(struct xenbus_device *dev)
1427{
1428	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1429
1430	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1431
 
 
 
 
1432	xennet_disconnect_backend(info);
 
 
 
 
 
 
1433	return 0;
1434}
1435
1436static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1437{
1438	char *s, *e, *macstr;
1439	int i;
1440
1441	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1442	if (IS_ERR(macstr))
1443		return PTR_ERR(macstr);
1444
1445	for (i = 0; i < ETH_ALEN; i++) {
1446		mac[i] = simple_strtoul(s, &e, 16);
1447		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1448			kfree(macstr);
1449			return -ENOENT;
1450		}
1451		s = e+1;
1452	}
1453
1454	kfree(macstr);
1455	return 0;
1456}
1457
1458static int setup_netfront_single(struct netfront_queue *queue)
1459{
1460	int err;
1461
1462	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1463	if (err < 0)
1464		goto fail;
1465
1466	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1467					xennet_interrupt,
1468					0, queue->info->netdev->name, queue);
 
1469	if (err < 0)
1470		goto bind_fail;
1471	queue->rx_evtchn = queue->tx_evtchn;
1472	queue->rx_irq = queue->tx_irq = err;
1473
1474	return 0;
1475
1476bind_fail:
1477	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1478	queue->tx_evtchn = 0;
1479fail:
1480	return err;
1481}
1482
1483static int setup_netfront_split(struct netfront_queue *queue)
1484{
1485	int err;
1486
1487	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1488	if (err < 0)
1489		goto fail;
1490	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1491	if (err < 0)
1492		goto alloc_rx_evtchn_fail;
1493
1494	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1495		 "%s-tx", queue->name);
1496	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1497					xennet_tx_interrupt,
1498					0, queue->tx_irq_name, queue);
1499	if (err < 0)
1500		goto bind_tx_fail;
1501	queue->tx_irq = err;
1502
1503	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1504		 "%s-rx", queue->name);
1505	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1506					xennet_rx_interrupt,
1507					0, queue->rx_irq_name, queue);
1508	if (err < 0)
1509		goto bind_rx_fail;
1510	queue->rx_irq = err;
1511
1512	return 0;
1513
1514bind_rx_fail:
1515	unbind_from_irqhandler(queue->tx_irq, queue);
1516	queue->tx_irq = 0;
1517bind_tx_fail:
1518	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1519	queue->rx_evtchn = 0;
1520alloc_rx_evtchn_fail:
1521	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1522	queue->tx_evtchn = 0;
1523fail:
1524	return err;
1525}
1526
1527static int setup_netfront(struct xenbus_device *dev,
1528			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1529{
1530	struct xen_netif_tx_sring *txs;
1531	struct xen_netif_rx_sring *rxs;
1532	grant_ref_t gref;
1533	int err;
1534
1535	queue->tx_ring_ref = GRANT_INVALID_REF;
1536	queue->rx_ring_ref = GRANT_INVALID_REF;
1537	queue->rx.sring = NULL;
1538	queue->tx.sring = NULL;
1539
1540	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1541	if (!txs) {
1542		err = -ENOMEM;
1543		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1544		goto fail;
1545	}
1546	SHARED_RING_INIT(txs);
1547	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1548
1549	err = xenbus_grant_ring(dev, txs, 1, &gref);
1550	if (err < 0)
1551		goto grant_tx_ring_fail;
1552	queue->tx_ring_ref = gref;
1553
1554	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1555	if (!rxs) {
1556		err = -ENOMEM;
1557		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1558		goto alloc_rx_ring_fail;
1559	}
1560	SHARED_RING_INIT(rxs);
1561	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1562
1563	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1564	if (err < 0)
1565		goto grant_rx_ring_fail;
1566	queue->rx_ring_ref = gref;
1567
1568	if (feature_split_evtchn)
1569		err = setup_netfront_split(queue);
1570	/* setup single event channel if
1571	 *  a) feature-split-event-channels == 0
1572	 *  b) feature-split-event-channels == 1 but failed to setup
1573	 */
1574	if (!feature_split_evtchn || (feature_split_evtchn && err))
1575		err = setup_netfront_single(queue);
1576
1577	if (err)
1578		goto alloc_evtchn_fail;
1579
1580	return 0;
1581
1582	/* If we fail to setup netfront, it is safe to just revoke access to
1583	 * granted pages because backend is not accessing it at this point.
1584	 */
1585alloc_evtchn_fail:
1586	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1587grant_rx_ring_fail:
1588	free_page((unsigned long)rxs);
1589alloc_rx_ring_fail:
1590	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1591grant_tx_ring_fail:
1592	free_page((unsigned long)txs);
1593fail:
1594	return err;
1595}
1596
1597/* Queue-specific initialisation
1598 * This used to be done in xennet_create_dev() but must now
1599 * be run per-queue.
1600 */
1601static int xennet_init_queue(struct netfront_queue *queue)
1602{
1603	unsigned short i;
1604	int err = 0;
 
1605
1606	spin_lock_init(&queue->tx_lock);
1607	spin_lock_init(&queue->rx_lock);
 
1608
1609	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1610		    (unsigned long)queue);
1611
1612	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1613		 queue->info->netdev->name, queue->id);
 
1614
1615	/* Initialise tx_skbs as a free chain containing every entry. */
1616	queue->tx_skb_freelist = 0;
 
1617	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1618		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1619		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1620		queue->grant_tx_page[i] = NULL;
1621	}
 
1622
1623	/* Clear out rx_skbs */
1624	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1625		queue->rx_skbs[i] = NULL;
1626		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1627	}
1628
1629	/* A grant for every tx ring slot */
1630	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1631					  &queue->gref_tx_head) < 0) {
1632		pr_alert("can't alloc tx grant refs\n");
1633		err = -ENOMEM;
1634		goto exit;
1635	}
1636
1637	/* A grant for every rx ring slot */
1638	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1639					  &queue->gref_rx_head) < 0) {
1640		pr_alert("can't alloc rx grant refs\n");
1641		err = -ENOMEM;
1642		goto exit_free_tx;
1643	}
1644
1645	return 0;
1646
1647 exit_free_tx:
1648	gnttab_free_grant_references(queue->gref_tx_head);
1649 exit:
1650	return err;
1651}
1652
1653static int write_queue_xenstore_keys(struct netfront_queue *queue,
1654			   struct xenbus_transaction *xbt, int write_hierarchical)
1655{
1656	/* Write the queue-specific keys into XenStore in the traditional
1657	 * way for a single queue, or in a queue subkeys for multiple
1658	 * queues.
1659	 */
1660	struct xenbus_device *dev = queue->info->xbdev;
1661	int err;
1662	const char *message;
1663	char *path;
1664	size_t pathsize;
1665
1666	/* Choose the correct place to write the keys */
1667	if (write_hierarchical) {
1668		pathsize = strlen(dev->nodename) + 10;
1669		path = kzalloc(pathsize, GFP_KERNEL);
1670		if (!path) {
1671			err = -ENOMEM;
1672			message = "out of memory while writing ring references";
1673			goto error;
1674		}
1675		snprintf(path, pathsize, "%s/queue-%u",
1676				dev->nodename, queue->id);
1677	} else {
1678		path = (char *)dev->nodename;
1679	}
1680
1681	/* Write ring references */
1682	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1683			queue->tx_ring_ref);
1684	if (err) {
1685		message = "writing tx-ring-ref";
1686		goto error;
1687	}
1688
1689	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1690			queue->rx_ring_ref);
1691	if (err) {
1692		message = "writing rx-ring-ref";
1693		goto error;
1694	}
1695
1696	/* Write event channels; taking into account both shared
1697	 * and split event channel scenarios.
1698	 */
1699	if (queue->tx_evtchn == queue->rx_evtchn) {
1700		/* Shared event channel */
1701		err = xenbus_printf(*xbt, path,
1702				"event-channel", "%u", queue->tx_evtchn);
1703		if (err) {
1704			message = "writing event-channel";
1705			goto error;
1706		}
1707	} else {
1708		/* Split event channels */
1709		err = xenbus_printf(*xbt, path,
1710				"event-channel-tx", "%u", queue->tx_evtchn);
1711		if (err) {
1712			message = "writing event-channel-tx";
1713			goto error;
1714		}
1715
1716		err = xenbus_printf(*xbt, path,
1717				"event-channel-rx", "%u", queue->rx_evtchn);
1718		if (err) {
1719			message = "writing event-channel-rx";
1720			goto error;
1721		}
1722	}
1723
1724	if (write_hierarchical)
1725		kfree(path);
1726	return 0;
1727
1728error:
1729	if (write_hierarchical)
1730		kfree(path);
1731	xenbus_dev_fatal(dev, err, "%s", message);
1732	return err;
1733}
1734
1735static void xennet_destroy_queues(struct netfront_info *info)
 
 
1736{
1737	unsigned int i;
 
 
 
 
 
 
 
 
 
1738
1739	rtnl_lock();
 
 
 
 
 
1740
1741	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1742		struct netfront_queue *queue = &info->queues[i];
 
 
 
 
1743
1744		if (netif_running(info->netdev))
1745			napi_disable(&queue->napi);
1746		netif_napi_del(&queue->napi);
 
 
1747	}
 
1748
1749	rtnl_unlock();
1750
1751	kfree(info->queues);
1752	info->queues = NULL;
 
 
1753}
1754
1755static int xennet_create_queues(struct netfront_info *info,
1756				unsigned int *num_queues)
1757{
1758	unsigned int i;
1759	int ret;
1760
1761	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1762			       GFP_KERNEL);
1763	if (!info->queues)
1764		return -ENOMEM;
1765
1766	rtnl_lock();
1767
1768	for (i = 0; i < *num_queues; i++) {
1769		struct netfront_queue *queue = &info->queues[i];
1770
1771		queue->id = i;
1772		queue->info = info;
1773
1774		ret = xennet_init_queue(queue);
1775		if (ret < 0) {
1776			dev_warn(&info->netdev->dev,
1777				 "only created %d queues\n", i);
1778			*num_queues = i;
1779			break;
1780		}
1781
1782		netif_napi_add(queue->info->netdev, &queue->napi,
1783			       xennet_poll, 64);
 
 
 
 
 
 
 
1784		if (netif_running(info->netdev))
1785			napi_enable(&queue->napi);
1786	}
1787
1788	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1789
1790	rtnl_unlock();
1791
1792	if (*num_queues == 0) {
1793		dev_err(&info->netdev->dev, "no queues\n");
1794		return -EINVAL;
1795	}
1796	return 0;
1797}
1798
1799/* Common code used when first setting up, and when resuming. */
1800static int talk_to_netback(struct xenbus_device *dev,
1801			   struct netfront_info *info)
1802{
1803	const char *message;
1804	struct xenbus_transaction xbt;
1805	int err;
1806	unsigned int feature_split_evtchn;
1807	unsigned int i = 0;
1808	unsigned int max_queues = 0;
1809	struct netfront_queue *queue = NULL;
1810	unsigned int num_queues = 1;
 
1811
1812	info->netdev->irq = 0;
1813
 
 
 
 
1814	/* Check if backend supports multiple queues */
1815	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1816					  "multi-queue-max-queues", 1);
1817	num_queues = min(max_queues, xennet_max_queues);
1818
1819	/* Check feature-split-event-channels */
1820	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1821					"feature-split-event-channels", 0);
1822
1823	/* Read mac addr. */
1824	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1825	if (err) {
1826		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1827		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
1828	}
1829
 
1830	if (info->queues)
1831		xennet_destroy_queues(info);
1832
 
 
 
1833	err = xennet_create_queues(info, &num_queues);
1834	if (err < 0) {
1835		xenbus_dev_fatal(dev, err, "creating queues");
1836		kfree(info->queues);
1837		info->queues = NULL;
1838		goto out;
1839	}
 
1840
1841	/* Create shared ring, alloc event channel -- for each queue */
1842	for (i = 0; i < num_queues; ++i) {
1843		queue = &info->queues[i];
1844		err = setup_netfront(dev, queue, feature_split_evtchn);
1845		if (err)
1846			goto destroy_ring;
1847	}
1848
1849again:
1850	err = xenbus_transaction_start(&xbt);
1851	if (err) {
1852		xenbus_dev_fatal(dev, err, "starting transaction");
1853		goto destroy_ring;
1854	}
1855
1856	if (xenbus_exists(XBT_NIL,
1857			  info->xbdev->otherend, "multi-queue-max-queues")) {
1858		/* Write the number of queues */
1859		err = xenbus_printf(xbt, dev->nodename,
1860				    "multi-queue-num-queues", "%u", num_queues);
1861		if (err) {
1862			message = "writing multi-queue-num-queues";
1863			goto abort_transaction_no_dev_fatal;
1864		}
1865	}
1866
1867	if (num_queues == 1) {
1868		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1869		if (err)
1870			goto abort_transaction_no_dev_fatal;
1871	} else {
1872		/* Write the keys for each queue */
1873		for (i = 0; i < num_queues; ++i) {
1874			queue = &info->queues[i];
1875			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1876			if (err)
1877				goto abort_transaction_no_dev_fatal;
1878		}
1879	}
1880
1881	/* The remaining keys are not queue-specific */
1882	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1883			    1);
1884	if (err) {
1885		message = "writing request-rx-copy";
1886		goto abort_transaction;
1887	}
1888
1889	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1890	if (err) {
1891		message = "writing feature-rx-notify";
1892		goto abort_transaction;
1893	}
1894
1895	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1896	if (err) {
1897		message = "writing feature-sg";
1898		goto abort_transaction;
1899	}
1900
1901	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1902	if (err) {
1903		message = "writing feature-gso-tcpv4";
1904		goto abort_transaction;
1905	}
1906
1907	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1908	if (err) {
1909		message = "writing feature-gso-tcpv6";
1910		goto abort_transaction;
1911	}
1912
1913	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1914			   "1");
1915	if (err) {
1916		message = "writing feature-ipv6-csum-offload";
1917		goto abort_transaction;
1918	}
1919
1920	err = xenbus_transaction_end(xbt, 0);
1921	if (err) {
1922		if (err == -EAGAIN)
1923			goto again;
1924		xenbus_dev_fatal(dev, err, "completing transaction");
1925		goto destroy_ring;
1926	}
1927
1928	return 0;
1929
1930 abort_transaction:
1931	xenbus_dev_fatal(dev, err, "%s", message);
1932abort_transaction_no_dev_fatal:
1933	xenbus_transaction_end(xbt, 1);
1934 destroy_ring:
1935	xennet_disconnect_backend(info);
 
1936	xennet_destroy_queues(info);
1937 out:
1938	unregister_netdev(info->netdev);
1939	xennet_free_netdev(info->netdev);
 
1940	return err;
1941}
1942
1943static int xennet_connect(struct net_device *dev)
1944{
1945	struct netfront_info *np = netdev_priv(dev);
1946	unsigned int num_queues = 0;
1947	int err;
1948	unsigned int j = 0;
1949	struct netfront_queue *queue = NULL;
1950
1951	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1952		dev_info(&dev->dev,
1953			 "backend does not support copying receive path\n");
1954		return -ENODEV;
1955	}
1956
1957	err = talk_to_netback(np->xbdev, np);
1958	if (err)
1959		return err;
 
 
 
 
 
1960
1961	/* talk_to_netback() sets the correct number of queues */
1962	num_queues = dev->real_num_tx_queues;
1963
 
 
 
 
 
 
 
 
 
1964	rtnl_lock();
1965	netdev_update_features(dev);
1966	rtnl_unlock();
1967
1968	/*
1969	 * All public and private state should now be sane.  Get
1970	 * ready to start sending and receiving packets and give the driver
1971	 * domain a kick because we've probably just requeued some
1972	 * packets.
1973	 */
 
 
 
 
1974	netif_carrier_on(np->netdev);
1975	for (j = 0; j < num_queues; ++j) {
1976		queue = &np->queues[j];
1977
1978		notify_remote_via_irq(queue->tx_irq);
1979		if (queue->tx_irq != queue->rx_irq)
1980			notify_remote_via_irq(queue->rx_irq);
1981
1982		spin_lock_irq(&queue->tx_lock);
1983		xennet_tx_buf_gc(queue);
1984		spin_unlock_irq(&queue->tx_lock);
1985
1986		spin_lock_bh(&queue->rx_lock);
1987		xennet_alloc_rx_buffers(queue);
1988		spin_unlock_bh(&queue->rx_lock);
1989	}
1990
1991	return 0;
1992}
1993
1994/**
1995 * Callback received when the backend's state changes.
1996 */
1997static void netback_changed(struct xenbus_device *dev,
1998			    enum xenbus_state backend_state)
1999{
2000	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2001	struct net_device *netdev = np->netdev;
2002
2003	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2004
 
 
2005	switch (backend_state) {
2006	case XenbusStateInitialising:
2007	case XenbusStateInitialised:
2008	case XenbusStateReconfiguring:
2009	case XenbusStateReconfigured:
2010	case XenbusStateUnknown:
2011		break;
2012
2013	case XenbusStateInitWait:
2014		if (dev->state != XenbusStateInitialising)
2015			break;
2016		if (xennet_connect(netdev) != 0)
2017			break;
2018		xenbus_switch_state(dev, XenbusStateConnected);
2019		break;
2020
2021	case XenbusStateConnected:
2022		netdev_notify_peers(netdev);
2023		break;
2024
2025	case XenbusStateClosed:
2026		if (dev->state == XenbusStateClosed)
2027			break;
2028		/* Missed the backend's CLOSING state -- fallthrough */
2029	case XenbusStateClosing:
2030		xenbus_frontend_closed(dev);
2031		break;
2032	}
2033}
2034
2035static const struct xennet_stat {
2036	char name[ETH_GSTRING_LEN];
2037	u16 offset;
2038} xennet_stats[] = {
2039	{
2040		"rx_gso_checksum_fixup",
2041		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2042	},
2043};
2044
2045static int xennet_get_sset_count(struct net_device *dev, int string_set)
2046{
2047	switch (string_set) {
2048	case ETH_SS_STATS:
2049		return ARRAY_SIZE(xennet_stats);
2050	default:
2051		return -EINVAL;
2052	}
2053}
2054
2055static void xennet_get_ethtool_stats(struct net_device *dev,
2056				     struct ethtool_stats *stats, u64 * data)
2057{
2058	void *np = netdev_priv(dev);
2059	int i;
2060
2061	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2062		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2063}
2064
2065static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2066{
2067	int i;
2068
2069	switch (stringset) {
2070	case ETH_SS_STATS:
2071		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2072			memcpy(data + i * ETH_GSTRING_LEN,
2073			       xennet_stats[i].name, ETH_GSTRING_LEN);
2074		break;
2075	}
2076}
2077
2078static const struct ethtool_ops xennet_ethtool_ops =
2079{
2080	.get_link = ethtool_op_get_link,
2081
2082	.get_sset_count = xennet_get_sset_count,
2083	.get_ethtool_stats = xennet_get_ethtool_stats,
2084	.get_strings = xennet_get_strings,
 
2085};
2086
2087#ifdef CONFIG_SYSFS
2088static ssize_t show_rxbuf(struct device *dev,
2089			  struct device_attribute *attr, char *buf)
2090{
2091	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2092}
2093
2094static ssize_t store_rxbuf(struct device *dev,
2095			   struct device_attribute *attr,
2096			   const char *buf, size_t len)
2097{
2098	char *endp;
2099	unsigned long target;
2100
2101	if (!capable(CAP_NET_ADMIN))
2102		return -EPERM;
2103
2104	target = simple_strtoul(buf, &endp, 0);
2105	if (endp == buf)
2106		return -EBADMSG;
2107
2108	/* rxbuf_min and rxbuf_max are no longer configurable. */
2109
2110	return len;
2111}
2112
2113static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2114static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2115static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2116
2117static struct attribute *xennet_dev_attrs[] = {
2118	&dev_attr_rxbuf_min.attr,
2119	&dev_attr_rxbuf_max.attr,
2120	&dev_attr_rxbuf_cur.attr,
2121	NULL
2122};
2123
2124static const struct attribute_group xennet_dev_group = {
2125	.attrs = xennet_dev_attrs
2126};
2127#endif /* CONFIG_SYSFS */
2128
2129static int xennet_remove(struct xenbus_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2130{
2131	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2132
2133	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2134
2135	xennet_disconnect_backend(info);
2136
2137	unregister_netdev(info->netdev);
 
2138
2139	if (info->queues)
 
2140		xennet_destroy_queues(info);
 
 
2141	xennet_free_netdev(info->netdev);
2142
2143	return 0;
2144}
2145
2146static const struct xenbus_device_id netfront_ids[] = {
2147	{ "vif" },
2148	{ "" }
2149};
2150
2151static struct xenbus_driver netfront_driver = {
2152	.ids = netfront_ids,
2153	.probe = netfront_probe,
2154	.remove = xennet_remove,
2155	.resume = netfront_resume,
2156	.otherend_changed = netback_changed,
2157};
2158
2159static int __init netif_init(void)
2160{
2161	if (!xen_domain())
2162		return -ENODEV;
2163
2164	if (!xen_has_pv_nic_devices())
2165		return -ENODEV;
2166
2167	pr_info("Initialising Xen virtual ethernet driver\n");
2168
2169	/* Allow as many queues as there are CPUs if user has not
2170	 * specified a value.
2171	 */
2172	if (xennet_max_queues == 0)
2173		xennet_max_queues = num_online_cpus();
 
2174
2175	return xenbus_register_frontend(&netfront_driver);
2176}
2177module_init(netif_init);
2178
2179
2180static void __exit netif_exit(void)
2181{
2182	xenbus_unregister_driver(&netfront_driver);
2183}
2184module_exit(netif_exit);
2185
2186MODULE_DESCRIPTION("Xen virtual network device frontend");
2187MODULE_LICENSE("GPL");
2188MODULE_ALIAS("xen:vif");
2189MODULE_ALIAS("xennet");