Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
 
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
  60#define MAX_QUEUES_DEFAULT 8
  61static unsigned int xennet_max_queues;
  62module_param_named(max_queues, xennet_max_queues, uint, 0644);
  63MODULE_PARM_DESC(max_queues,
  64		 "Maximum number of queues per virtual interface");
  65
  66static const struct ethtool_ops xennet_ethtool_ops;
  67
  68struct netfront_cb {
  69	int pull_to;
  70};
  71
  72#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  73
  74#define RX_COPY_THRESHOLD 256
  75
  76#define GRANT_INVALID_REF	0
  77
  78#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  80
  81/* Minimum number of Rx slots (includes slot for GSO metadata). */
  82#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  83
  84/* Queue name is interface name with "-qNNN" appended */
  85#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  86
  87/* IRQ name is queue name with "-tx" or "-rx" appended */
  88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  89
  90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
  91
  92struct netfront_stats {
  93	u64			packets;
  94	u64			bytes;
 
 
  95	struct u64_stats_sync	syncp;
  96};
  97
  98struct netfront_info;
  99
 100struct netfront_queue {
 101	unsigned int id; /* Queue ID, 0-based */
 102	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 103	struct netfront_info *info;
 104
 105	struct napi_struct napi;
 106
 107	/* Split event channels support, tx_* == rx_* when using
 108	 * single event channel.
 109	 */
 110	unsigned int tx_evtchn, rx_evtchn;
 111	unsigned int tx_irq, rx_irq;
 112	/* Only used when split event channels support is enabled */
 113	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 114	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 
 
 115
 116	spinlock_t   tx_lock;
 117	struct xen_netif_tx_front_ring tx;
 118	int tx_ring_ref;
 119
 120	/*
 121	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 122	 * are linked from tx_skb_freelist through skb_entry.link.
 123	 *
 124	 *  NB. Freelist index entries are always going to be less than
 125	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 126	 *  greater than PAGE_OFFSET: we use this property to distinguish
 127	 *  them.
 128	 */
 129	union skb_entry {
 130		struct sk_buff *skb;
 131		unsigned long link;
 132	} tx_skbs[NET_TX_RING_SIZE];
 133	grant_ref_t gref_tx_head;
 134	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 135	struct page *grant_tx_page[NET_TX_RING_SIZE];
 136	unsigned tx_skb_freelist;
 137
 138	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 139	struct xen_netif_rx_front_ring rx;
 140	int rx_ring_ref;
 141
 
 
 
 
 
 
 
 142	struct timer_list rx_refill_timer;
 143
 144	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 145	grant_ref_t gref_rx_head;
 146	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 147};
 148
 149struct netfront_info {
 150	struct list_head list;
 151	struct net_device *netdev;
 152
 153	struct xenbus_device *xbdev;
 154
 155	/* Multi-queue support */
 156	struct netfront_queue *queues;
 
 157
 158	/* Statistics */
 159	struct netfront_stats __percpu *rx_stats;
 160	struct netfront_stats __percpu *tx_stats;
 161
 162	atomic_t rx_gso_checksum_fixup;
 163};
 164
 165struct netfront_rx_info {
 166	struct xen_netif_rx_response rx;
 167	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 168};
 169
 170static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 171{
 172	list->link = id;
 173}
 174
 175static int skb_entry_is_link(const union skb_entry *list)
 176{
 177	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 178	return (unsigned long)list->skb < PAGE_OFFSET;
 179}
 180
 181/*
 182 * Access macros for acquiring freeing slots in tx_skbs[].
 183 */
 184
 185static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 186			       unsigned short id)
 187{
 188	skb_entry_set_link(&list[id], *head);
 189	*head = id;
 190}
 191
 192static unsigned short get_id_from_freelist(unsigned *head,
 193					   union skb_entry *list)
 194{
 195	unsigned int id = *head;
 196	*head = list[id].link;
 197	return id;
 198}
 199
 200static int xennet_rxidx(RING_IDX idx)
 201{
 202	return idx & (NET_RX_RING_SIZE - 1);
 203}
 204
 205static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 206					 RING_IDX ri)
 207{
 208	int i = xennet_rxidx(ri);
 209	struct sk_buff *skb = queue->rx_skbs[i];
 210	queue->rx_skbs[i] = NULL;
 211	return skb;
 212}
 213
 214static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 215					    RING_IDX ri)
 216{
 217	int i = xennet_rxidx(ri);
 218	grant_ref_t ref = queue->grant_rx_ref[i];
 219	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 220	return ref;
 221}
 222
 223#ifdef CONFIG_SYSFS
 224static const struct attribute_group xennet_dev_group;
 
 
 
 
 225#endif
 226
 227static bool xennet_can_sg(struct net_device *dev)
 228{
 229	return dev->features & NETIF_F_SG;
 230}
 231
 232
 233static void rx_refill_timeout(struct timer_list *t)
 234{
 235	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 236	napi_schedule(&queue->napi);
 
 237}
 238
 239static int netfront_tx_slot_available(struct netfront_queue *queue)
 240{
 241	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 242		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 243}
 244
 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 246{
 247	struct net_device *dev = queue->info->netdev;
 248	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 249
 250	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 251	    netfront_tx_slot_available(queue) &&
 252	    likely(netif_running(dev)))
 253		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 254}
 255
 256
 257static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 258{
 
 
 259	struct sk_buff *skb;
 260	struct page *page;
 
 
 
 
 
 
 261
 262	skb = __netdev_alloc_skb(queue->info->netdev,
 263				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 264				 GFP_ATOMIC | __GFP_NOWARN);
 265	if (unlikely(!skb))
 266		return NULL;
 267
 268	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 269	if (!page) {
 270		kfree_skb(skb);
 271		return NULL;
 272	}
 273	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 274
 275	/* Align ip header to a 16 bytes boundary */
 276	skb_reserve(skb, NET_IP_ALIGN);
 277	skb->dev = queue->info->netdev;
 
 
 
 
 
 
 
 
 
 278
 279	return skb;
 280}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 
 282
 283static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 284{
 285	RING_IDX req_prod = queue->rx.req_prod_pvt;
 286	int notify;
 287	int err = 0;
 288
 289	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 
 
 
 290		return;
 
 291
 292	for (req_prod = queue->rx.req_prod_pvt;
 293	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 294	     req_prod++) {
 295		struct sk_buff *skb;
 296		unsigned short id;
 297		grant_ref_t ref;
 298		struct page *page;
 299		struct xen_netif_rx_request *req;
 300
 301		skb = xennet_alloc_one_rx_buffer(queue);
 302		if (!skb) {
 303			err = -ENOMEM;
 304			break;
 305		}
 306
 307		id = xennet_rxidx(req_prod);
 308
 309		BUG_ON(queue->rx_skbs[id]);
 310		queue->rx_skbs[id] = skb;
 311
 312		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 313		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 314		queue->grant_rx_ref[id] = ref;
 315
 316		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
 
 317
 318		req = RING_GET_REQUEST(&queue->rx, req_prod);
 319		gnttab_page_grant_foreign_access_ref_one(ref,
 320							 queue->info->xbdev->otherend_id,
 321							 page,
 322							 0);
 323		req->id = id;
 324		req->gref = ref;
 325	}
 326
 327	queue->rx.req_prod_pvt = req_prod;
 
 
 
 
 328
 329	/* Try again later if there are not enough requests or skb allocation
 330	 * failed.
 331	 * Enough requests is quantified as the sum of newly created slots and
 332	 * the unconsumed slots at the backend.
 333	 */
 334	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 335	    unlikely(err)) {
 336		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 337		return;
 338	}
 339
 340	wmb();		/* barrier so backend seens requests */
 341
 342	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 
 
 
 343	if (notify)
 344		notify_remote_via_irq(queue->rx_irq);
 345}
 346
 347static int xennet_open(struct net_device *dev)
 348{
 349	struct netfront_info *np = netdev_priv(dev);
 350	unsigned int num_queues = dev->real_num_tx_queues;
 351	unsigned int i = 0;
 352	struct netfront_queue *queue = NULL;
 353
 354	if (!np->queues)
 355		return -ENODEV;
 356
 357	for (i = 0; i < num_queues; ++i) {
 358		queue = &np->queues[i];
 359		napi_enable(&queue->napi);
 360
 361		spin_lock_bh(&queue->rx_lock);
 362		if (netif_carrier_ok(dev)) {
 363			xennet_alloc_rx_buffers(queue);
 364			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 365			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 366				napi_schedule(&queue->napi);
 367		}
 368		spin_unlock_bh(&queue->rx_lock);
 369	}
 
 370
 371	netif_tx_start_all_queues(dev);
 372
 373	return 0;
 374}
 375
 376static void xennet_tx_buf_gc(struct netfront_queue *queue)
 377{
 378	RING_IDX cons, prod;
 379	unsigned short id;
 
 380	struct sk_buff *skb;
 381	bool more_to_do;
 382
 383	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 384
 385	do {
 386		prod = queue->tx.sring->rsp_prod;
 387		rmb(); /* Ensure we see responses up to 'rp'. */
 388
 389		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 390			struct xen_netif_tx_response *txrsp;
 391
 392			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 393			if (txrsp->status == XEN_NETIF_RSP_NULL)
 394				continue;
 395
 396			id  = txrsp->id;
 397			skb = queue->tx_skbs[id].skb;
 398			if (unlikely(gnttab_query_foreign_access(
 399				queue->grant_tx_ref[id]) != 0)) {
 400				pr_alert("%s: warning -- grant still in use by backend domain\n",
 401					 __func__);
 402				BUG();
 403			}
 404			gnttab_end_foreign_access_ref(
 405				queue->grant_tx_ref[id], GNTMAP_readonly);
 406			gnttab_release_grant_reference(
 407				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 408			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 409			queue->grant_tx_page[id] = NULL;
 410			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 411			dev_kfree_skb_irq(skb);
 412		}
 413
 414		queue->tx.rsp_cons = prod;
 415
 416		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 417	} while (more_to_do);
 
 
 
 
 
 
 
 
 
 
 418
 419	xennet_maybe_wake_tx(queue);
 420}
 421
 422struct xennet_gnttab_make_txreq {
 423	struct netfront_queue *queue;
 424	struct sk_buff *skb;
 425	struct page *page;
 426	struct xen_netif_tx_request *tx; /* Last request */
 427	unsigned int size;
 428};
 429
 430static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 431				  unsigned int len, void *data)
 432{
 433	struct xennet_gnttab_make_txreq *info = data;
 
 
 
 
 
 
 434	unsigned int id;
 435	struct xen_netif_tx_request *tx;
 436	grant_ref_t ref;
 437	/* convenient aliases */
 438	struct page *page = info->page;
 439	struct netfront_queue *queue = info->queue;
 440	struct sk_buff *skb = info->skb;
 441
 442	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 443	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 444	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 445	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 446
 447	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 448					gfn, GNTMAP_readonly);
 449
 450	queue->tx_skbs[id].skb = skb;
 451	queue->grant_tx_page[id] = page;
 452	queue->grant_tx_ref[id] = ref;
 453
 454	tx->id = id;
 455	tx->gref = ref;
 456	tx->offset = offset;
 457	tx->size = len;
 458	tx->flags = 0;
 459
 460	info->tx = tx;
 461	info->size += tx->size;
 462}
 463
 464static struct xen_netif_tx_request *xennet_make_first_txreq(
 465	struct netfront_queue *queue, struct sk_buff *skb,
 466	struct page *page, unsigned int offset, unsigned int len)
 467{
 468	struct xennet_gnttab_make_txreq info = {
 469		.queue = queue,
 470		.skb = skb,
 471		.page = page,
 472		.size = 0,
 473	};
 474
 475	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 
 
 
 
 
 
 
 476
 477	return info.tx;
 478}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479
 480static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 481				  unsigned int len, void *data)
 482{
 483	struct xennet_gnttab_make_txreq *info = data;
 484
 485	info->tx->flags |= XEN_NETTXF_more_data;
 486	skb_get(info->skb);
 487	xennet_tx_setup_grant(gfn, offset, len, data);
 488}
 489
 490static struct xen_netif_tx_request *xennet_make_txreqs(
 491	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 492	struct sk_buff *skb, struct page *page,
 493	unsigned int offset, unsigned int len)
 494{
 495	struct xennet_gnttab_make_txreq info = {
 496		.queue = queue,
 497		.skb = skb,
 498		.tx = tx,
 499	};
 500
 501	/* Skip unused frames from start of page */
 502	page += offset >> PAGE_SHIFT;
 503	offset &= ~PAGE_MASK;
 504
 505	while (len) {
 506		info.page = page;
 507		info.size = 0;
 508
 509		gnttab_foreach_grant_in_range(page, offset, len,
 510					      xennet_make_one_txreq,
 511					      &info);
 512
 513		page++;
 514		offset = 0;
 515		len -= info.size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516	}
 517
 518	return info.tx;
 519}
 520
 521/*
 522 * Count how many ring slots are required to send this skb. Each frag
 523 * might be a compound page.
 524 */
 525static int xennet_count_skb_slots(struct sk_buff *skb)
 526{
 527	int i, frags = skb_shinfo(skb)->nr_frags;
 528	int slots;
 529
 530	slots = gnttab_count_grant(offset_in_page(skb->data),
 531				   skb_headlen(skb));
 532
 533	for (i = 0; i < frags; i++) {
 534		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 535		unsigned long size = skb_frag_size(frag);
 536		unsigned long offset = frag->page_offset;
 537
 538		/* Skip unused frames from start of page */
 539		offset &= ~PAGE_MASK;
 540
 541		slots += gnttab_count_grant(offset, size);
 542	}
 543
 544	return slots;
 545}
 546
 547static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 548			       void *accel_priv, select_queue_fallback_t fallback)
 549{
 550	unsigned int num_queues = dev->real_num_tx_queues;
 551	u32 hash;
 552	u16 queue_idx;
 553
 554	/* First, check if there is only one queue */
 555	if (num_queues == 1) {
 556		queue_idx = 0;
 557	} else {
 558		hash = skb_get_hash(skb);
 559		queue_idx = hash % num_queues;
 560	}
 561
 562	return queue_idx;
 563}
 564
 565#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 566
 567static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 568{
 
 569	struct netfront_info *np = netdev_priv(dev);
 570	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 571	struct xen_netif_tx_request *tx, *first_tx;
 572	unsigned int i;
 
 
 
 573	int notify;
 574	int slots;
 575	struct page *page;
 576	unsigned int offset;
 577	unsigned int len;
 578	unsigned long flags;
 579	struct netfront_queue *queue = NULL;
 580	unsigned int num_queues = dev->real_num_tx_queues;
 581	u16 queue_index;
 582	struct sk_buff *nskb;
 583
 584	/* Drop the packet if no queues are set up */
 585	if (num_queues < 1)
 586		goto drop;
 587	/* Determine which queue to transmit this SKB on */
 588	queue_index = skb_get_queue_mapping(skb);
 589	queue = &np->queues[queue_index];
 590
 591	/* If skb->len is too big for wire format, drop skb and alert
 592	 * user about misconfiguration.
 593	 */
 594	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 595		net_alert_ratelimited(
 596			"xennet: skb->len = %u, too big for wire format\n",
 597			skb->len);
 598		goto drop;
 599	}
 600
 601	slots = xennet_count_skb_slots(skb);
 602	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 603		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 604				    slots, skb->len);
 605		if (skb_linearize(skb))
 606			goto drop;
 607	}
 608
 609	page = virt_to_page(skb->data);
 610	offset = offset_in_page(skb->data);
 611
 612	/* The first req should be at least ETH_HLEN size or the packet will be
 613	 * dropped by netback.
 614	 */
 615	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 616		nskb = skb_copy(skb, GFP_ATOMIC);
 617		if (!nskb)
 618			goto drop;
 619		dev_consume_skb_any(skb);
 620		skb = nskb;
 621		page = virt_to_page(skb->data);
 622		offset = offset_in_page(skb->data);
 623	}
 624
 625	len = skb_headlen(skb);
 626
 627	spin_lock_irqsave(&queue->tx_lock, flags);
 628
 629	if (unlikely(!netif_carrier_ok(dev) ||
 630		     (slots > 1 && !xennet_can_sg(dev)) ||
 631		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 632		spin_unlock_irqrestore(&queue->tx_lock, flags);
 633		goto drop;
 634	}
 635
 636	/* First request for the linear area. */
 637	first_tx = tx = xennet_make_first_txreq(queue, skb,
 638						page, offset, len);
 639	offset += tx->size;
 640	if (offset == PAGE_SIZE) {
 641		page++;
 642		offset = 0;
 643	}
 644	len -= tx->size;
 
 
 
 
 
 
 
 
 645
 
 646	if (skb->ip_summed == CHECKSUM_PARTIAL)
 647		/* local packet? */
 648		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 649	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 650		/* remote but checksummed. */
 651		tx->flags |= XEN_NETTXF_data_validated;
 652
 653	/* Optional extra info after the first request. */
 654	if (skb_shinfo(skb)->gso_size) {
 655		struct xen_netif_extra_info *gso;
 656
 657		gso = (struct xen_netif_extra_info *)
 658			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 659
 660		tx->flags |= XEN_NETTXF_extra_info;
 661
 662		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 663		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 664			XEN_NETIF_GSO_TYPE_TCPV6 :
 665			XEN_NETIF_GSO_TYPE_TCPV4;
 666		gso->u.gso.pad = 0;
 667		gso->u.gso.features = 0;
 668
 669		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 670		gso->flags = 0;
 671	}
 672
 673	/* Requests for the rest of the linear area. */
 674	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 675
 676	/* Requests for all the frags. */
 677	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 678		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 679		tx = xennet_make_txreqs(queue, tx, skb,
 680					skb_frag_page(frag), frag->page_offset,
 681					skb_frag_size(frag));
 682	}
 683
 684	/* First request has the packet length. */
 685	first_tx->size = skb->len;
 686
 687	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 688	if (notify)
 689		notify_remote_via_irq(queue->tx_irq);
 690
 691	u64_stats_update_begin(&tx_stats->syncp);
 692	tx_stats->bytes += skb->len;
 693	tx_stats->packets++;
 694	u64_stats_update_end(&tx_stats->syncp);
 695
 696	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 697	xennet_tx_buf_gc(queue);
 698
 699	if (!netfront_tx_slot_available(queue))
 700		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 701
 702	spin_unlock_irqrestore(&queue->tx_lock, flags);
 703
 704	return NETDEV_TX_OK;
 705
 706 drop:
 707	dev->stats.tx_dropped++;
 708	dev_kfree_skb_any(skb);
 709	return NETDEV_TX_OK;
 710}
 711
 712static int xennet_close(struct net_device *dev)
 713{
 714	struct netfront_info *np = netdev_priv(dev);
 715	unsigned int num_queues = dev->real_num_tx_queues;
 716	unsigned int i;
 717	struct netfront_queue *queue;
 718	netif_tx_stop_all_queues(np->netdev);
 719	for (i = 0; i < num_queues; ++i) {
 720		queue = &np->queues[i];
 721		napi_disable(&queue->napi);
 722	}
 723	return 0;
 724}
 725
 726static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 727				grant_ref_t ref)
 728{
 729	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 730
 731	BUG_ON(queue->rx_skbs[new]);
 732	queue->rx_skbs[new] = skb;
 733	queue->grant_rx_ref[new] = ref;
 734	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 735	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 736	queue->rx.req_prod_pvt++;
 737}
 738
 739static int xennet_get_extras(struct netfront_queue *queue,
 740			     struct xen_netif_extra_info *extras,
 741			     RING_IDX rp)
 742
 743{
 744	struct xen_netif_extra_info *extra;
 745	struct device *dev = &queue->info->netdev->dev;
 746	RING_IDX cons = queue->rx.rsp_cons;
 747	int err = 0;
 748
 749	do {
 750		struct sk_buff *skb;
 751		grant_ref_t ref;
 752
 753		if (unlikely(cons + 1 == rp)) {
 754			if (net_ratelimit())
 755				dev_warn(dev, "Missing extra info\n");
 756			err = -EBADR;
 757			break;
 758		}
 759
 760		extra = (struct xen_netif_extra_info *)
 761			RING_GET_RESPONSE(&queue->rx, ++cons);
 762
 763		if (unlikely(!extra->type ||
 764			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 765			if (net_ratelimit())
 766				dev_warn(dev, "Invalid extra type: %d\n",
 767					extra->type);
 768			err = -EINVAL;
 769		} else {
 770			memcpy(&extras[extra->type - 1], extra,
 771			       sizeof(*extra));
 772		}
 773
 774		skb = xennet_get_rx_skb(queue, cons);
 775		ref = xennet_get_rx_ref(queue, cons);
 776		xennet_move_rx_slot(queue, skb, ref);
 777	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 778
 779	queue->rx.rsp_cons = cons;
 780	return err;
 781}
 782
 783static int xennet_get_responses(struct netfront_queue *queue,
 784				struct netfront_rx_info *rinfo, RING_IDX rp,
 785				struct sk_buff_head *list)
 786{
 787	struct xen_netif_rx_response *rx = &rinfo->rx;
 788	struct xen_netif_extra_info *extras = rinfo->extras;
 789	struct device *dev = &queue->info->netdev->dev;
 790	RING_IDX cons = queue->rx.rsp_cons;
 791	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 792	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 793	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 794	int slots = 1;
 795	int err = 0;
 796	unsigned long ret;
 797
 798	if (rx->flags & XEN_NETRXF_extra_info) {
 799		err = xennet_get_extras(queue, extras, rp);
 800		cons = queue->rx.rsp_cons;
 801	}
 802
 803	for (;;) {
 804		if (unlikely(rx->status < 0 ||
 805			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 806			if (net_ratelimit())
 807				dev_warn(dev, "rx->offset: %u, size: %d\n",
 808					 rx->offset, rx->status);
 809			xennet_move_rx_slot(queue, skb, ref);
 810			err = -EINVAL;
 811			goto next;
 812		}
 813
 814		/*
 815		 * This definitely indicates a bug, either in this driver or in
 816		 * the backend driver. In future this should flag the bad
 817		 * situation to the system controller to reboot the backend.
 818		 */
 819		if (ref == GRANT_INVALID_REF) {
 820			if (net_ratelimit())
 821				dev_warn(dev, "Bad rx response id %d.\n",
 822					 rx->id);
 823			err = -EINVAL;
 824			goto next;
 825		}
 826
 827		ret = gnttab_end_foreign_access_ref(ref, 0);
 828		BUG_ON(!ret);
 829
 830		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 831
 832		__skb_queue_tail(list, skb);
 833
 834next:
 835		if (!(rx->flags & XEN_NETRXF_more_data))
 836			break;
 837
 838		if (cons + slots == rp) {
 839			if (net_ratelimit())
 840				dev_warn(dev, "Need more slots\n");
 841			err = -ENOENT;
 842			break;
 843		}
 844
 845		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 846		skb = xennet_get_rx_skb(queue, cons + slots);
 847		ref = xennet_get_rx_ref(queue, cons + slots);
 848		slots++;
 849	}
 850
 851	if (unlikely(slots > max)) {
 852		if (net_ratelimit())
 853			dev_warn(dev, "Too many slots\n");
 854		err = -E2BIG;
 855	}
 856
 857	if (unlikely(err))
 858		queue->rx.rsp_cons = cons + slots;
 859
 860	return err;
 861}
 862
 863static int xennet_set_skb_gso(struct sk_buff *skb,
 864			      struct xen_netif_extra_info *gso)
 865{
 866	if (!gso->u.gso.size) {
 867		if (net_ratelimit())
 868			pr_warn("GSO size must not be zero\n");
 869		return -EINVAL;
 870	}
 871
 872	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 873	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 874		if (net_ratelimit())
 875			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 876		return -EINVAL;
 877	}
 878
 879	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 880	skb_shinfo(skb)->gso_type =
 881		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 882		SKB_GSO_TCPV4 :
 883		SKB_GSO_TCPV6;
 884
 885	/* Header must be checked, and gso_segs computed. */
 886	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 887	skb_shinfo(skb)->gso_segs = 0;
 888
 889	return 0;
 890}
 891
 892static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 893				  struct sk_buff *skb,
 894				  struct sk_buff_head *list)
 895{
 896	struct skb_shared_info *shinfo = skb_shinfo(skb);
 897	RING_IDX cons = queue->rx.rsp_cons;
 898	struct sk_buff *nskb;
 899
 900	while ((nskb = __skb_dequeue(list))) {
 901		struct xen_netif_rx_response *rx =
 902			RING_GET_RESPONSE(&queue->rx, ++cons);
 903		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 904
 905		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 906			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 907
 908			BUG_ON(pull_to <= skb_headlen(skb));
 909			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 910		}
 911		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 912
 913		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 914				rx->offset, rx->status, PAGE_SIZE);
 915
 916		skb_shinfo(nskb)->nr_frags = 0;
 917		kfree_skb(nskb);
 918	}
 919
 920	return cons;
 921}
 922
 923static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 924{
 925	bool recalculate_partial_csum = false;
 926
 927	/*
 928	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 929	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 930	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 931	 * recalculate the partial checksum.
 932	 */
 933	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 934		struct netfront_info *np = netdev_priv(dev);
 935		atomic_inc(&np->rx_gso_checksum_fixup);
 936		skb->ip_summed = CHECKSUM_PARTIAL;
 937		recalculate_partial_csum = true;
 938	}
 939
 940	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 941	if (skb->ip_summed != CHECKSUM_PARTIAL)
 942		return 0;
 943
 944	return skb_checksum_setup(skb, recalculate_partial_csum);
 945}
 946
 947static int handle_incoming_queue(struct netfront_queue *queue,
 948				 struct sk_buff_head *rxq)
 949{
 950	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 
 951	int packets_dropped = 0;
 952	struct sk_buff *skb;
 953
 954	while ((skb = __skb_dequeue(rxq)) != NULL) {
 955		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 956
 957		if (pull_to > skb_headlen(skb))
 958			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 959
 960		/* Ethernet work: Delayed to here as it peeks the header. */
 961		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 962		skb_reset_network_header(skb);
 963
 964		if (checksum_setup(queue->info->netdev, skb)) {
 965			kfree_skb(skb);
 966			packets_dropped++;
 967			queue->info->netdev->stats.rx_errors++;
 968			continue;
 969		}
 970
 971		u64_stats_update_begin(&rx_stats->syncp);
 972		rx_stats->packets++;
 973		rx_stats->bytes += skb->len;
 974		u64_stats_update_end(&rx_stats->syncp);
 975
 976		/* Pass it up. */
 977		napi_gro_receive(&queue->napi, skb);
 978	}
 979
 980	return packets_dropped;
 981}
 982
 983static int xennet_poll(struct napi_struct *napi, int budget)
 984{
 985	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 986	struct net_device *dev = queue->info->netdev;
 987	struct sk_buff *skb;
 988	struct netfront_rx_info rinfo;
 989	struct xen_netif_rx_response *rx = &rinfo.rx;
 990	struct xen_netif_extra_info *extras = rinfo.extras;
 991	RING_IDX i, rp;
 992	int work_done;
 993	struct sk_buff_head rxq;
 994	struct sk_buff_head errq;
 995	struct sk_buff_head tmpq;
 
 996	int err;
 997
 998	spin_lock(&queue->rx_lock);
 999
1000	skb_queue_head_init(&rxq);
1001	skb_queue_head_init(&errq);
1002	skb_queue_head_init(&tmpq);
1003
1004	rp = queue->rx.sring->rsp_prod;
1005	rmb(); /* Ensure we see queued responses up to 'rp'. */
1006
1007	i = queue->rx.rsp_cons;
1008	work_done = 0;
1009	while ((i != rp) && (work_done < budget)) {
1010		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1011		memset(extras, 0, sizeof(rinfo.extras));
1012
1013		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1014
1015		if (unlikely(err)) {
1016err:
1017			while ((skb = __skb_dequeue(&tmpq)))
1018				__skb_queue_tail(&errq, skb);
1019			dev->stats.rx_errors++;
1020			i = queue->rx.rsp_cons;
1021			continue;
1022		}
1023
1024		skb = __skb_dequeue(&tmpq);
1025
1026		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1027			struct xen_netif_extra_info *gso;
1028			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1029
1030			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1031				__skb_queue_head(&tmpq, skb);
1032				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1033				goto err;
1034			}
1035		}
1036
1037		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1038		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1039			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1040
1041		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1042		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1043		skb->data_len = rx->status;
1044		skb->len += rx->status;
1045
1046		i = xennet_fill_frags(queue, skb, &tmpq);
1047
1048		if (rx->flags & XEN_NETRXF_csum_blank)
1049			skb->ip_summed = CHECKSUM_PARTIAL;
1050		else if (rx->flags & XEN_NETRXF_data_validated)
1051			skb->ip_summed = CHECKSUM_UNNECESSARY;
1052
1053		__skb_queue_tail(&rxq, skb);
1054
1055		queue->rx.rsp_cons = ++i;
1056		work_done++;
1057	}
1058
1059	__skb_queue_purge(&errq);
1060
1061	work_done -= handle_incoming_queue(queue, &rxq);
 
 
 
 
 
 
 
1062
1063	xennet_alloc_rx_buffers(queue);
1064
1065	if (work_done < budget) {
1066		int more_to_do = 0;
1067
1068		napi_complete_done(napi, work_done);
1069
1070		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1071		if (more_to_do)
1072			napi_schedule(napi);
 
 
 
 
1073	}
1074
1075	spin_unlock(&queue->rx_lock);
1076
1077	return work_done;
1078}
1079
1080static int xennet_change_mtu(struct net_device *dev, int mtu)
1081{
1082	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
 
1083
1084	if (mtu > max)
1085		return -EINVAL;
1086	dev->mtu = mtu;
1087	return 0;
1088}
1089
1090static void xennet_get_stats64(struct net_device *dev,
1091			       struct rtnl_link_stats64 *tot)
1092{
1093	struct netfront_info *np = netdev_priv(dev);
1094	int cpu;
1095
1096	for_each_possible_cpu(cpu) {
1097		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1098		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1099		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1100		unsigned int start;
1101
1102		do {
1103			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1104			tx_packets = tx_stats->packets;
1105			tx_bytes = tx_stats->bytes;
1106		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1107
1108		do {
1109			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1110			rx_packets = rx_stats->packets;
1111			rx_bytes = rx_stats->bytes;
1112		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1113
1114		tot->rx_packets += rx_packets;
1115		tot->tx_packets += tx_packets;
1116		tot->rx_bytes   += rx_bytes;
1117		tot->tx_bytes   += tx_bytes;
1118	}
1119
1120	tot->rx_errors  = dev->stats.rx_errors;
1121	tot->tx_dropped = dev->stats.tx_dropped;
 
 
1122}
1123
1124static void xennet_release_tx_bufs(struct netfront_queue *queue)
1125{
1126	struct sk_buff *skb;
1127	int i;
1128
1129	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1130		/* Skip over entries which are actually freelist references */
1131		if (skb_entry_is_link(&queue->tx_skbs[i]))
1132			continue;
1133
1134		skb = queue->tx_skbs[i].skb;
1135		get_page(queue->grant_tx_page[i]);
1136		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1137					  GNTMAP_readonly,
1138					  (unsigned long)page_address(queue->grant_tx_page[i]));
1139		queue->grant_tx_page[i] = NULL;
1140		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1141		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1142		dev_kfree_skb_irq(skb);
1143	}
1144}
1145
1146static void xennet_release_rx_bufs(struct netfront_queue *queue)
1147{
1148	int id, ref;
1149
1150	spin_lock_bh(&queue->rx_lock);
1151
1152	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1153		struct sk_buff *skb;
1154		struct page *page;
1155
1156		skb = queue->rx_skbs[id];
1157		if (!skb)
1158			continue;
1159
1160		ref = queue->grant_rx_ref[id];
1161		if (ref == GRANT_INVALID_REF)
1162			continue;
1163
1164		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1165
1166		/* gnttab_end_foreign_access() needs a page ref until
1167		 * foreign access is ended (which may be deferred).
1168		 */
1169		get_page(page);
1170		gnttab_end_foreign_access(ref, 0,
1171					  (unsigned long)page_address(page));
1172		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1173
1174		kfree_skb(skb);
1175	}
1176
1177	spin_unlock_bh(&queue->rx_lock);
 
 
 
 
 
 
 
 
 
1178}
1179
1180static netdev_features_t xennet_fix_features(struct net_device *dev,
1181	netdev_features_t features)
1182{
1183	struct netfront_info *np = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184
1185	if (features & NETIF_F_SG &&
1186	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1187		features &= ~NETIF_F_SG;
1188
1189	if (features & NETIF_F_IPV6_CSUM &&
1190	    !xenbus_read_unsigned(np->xbdev->otherend,
1191				  "feature-ipv6-csum-offload", 0))
1192		features &= ~NETIF_F_IPV6_CSUM;
1193
1194	if (features & NETIF_F_TSO &&
1195	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1196		features &= ~NETIF_F_TSO;
1197
1198	if (features & NETIF_F_TSO6 &&
1199	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1200		features &= ~NETIF_F_TSO6;
1201
1202	return features;
1203}
1204
1205static int xennet_set_features(struct net_device *dev,
1206	netdev_features_t features)
1207{
1208	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1209		netdev_info(dev, "Reducing MTU because no SG offload");
1210		dev->mtu = ETH_DATA_LEN;
1211	}
1212
1213	return 0;
1214}
1215
1216static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1217{
1218	struct netfront_queue *queue = dev_id;
 
1219	unsigned long flags;
1220
1221	spin_lock_irqsave(&queue->tx_lock, flags);
1222	xennet_tx_buf_gc(queue);
1223	spin_unlock_irqrestore(&queue->tx_lock, flags);
1224
1225	return IRQ_HANDLED;
1226}
1227
1228static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1229{
1230	struct netfront_queue *queue = dev_id;
1231	struct net_device *dev = queue->info->netdev;
1232
1233	if (likely(netif_carrier_ok(dev) &&
1234		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1235		napi_schedule(&queue->napi);
1236
1237	return IRQ_HANDLED;
1238}
1239
1240static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1241{
1242	xennet_tx_interrupt(irq, dev_id);
1243	xennet_rx_interrupt(irq, dev_id);
1244	return IRQ_HANDLED;
1245}
1246
1247#ifdef CONFIG_NET_POLL_CONTROLLER
1248static void xennet_poll_controller(struct net_device *dev)
1249{
1250	/* Poll each queue */
1251	struct netfront_info *info = netdev_priv(dev);
1252	unsigned int num_queues = dev->real_num_tx_queues;
1253	unsigned int i;
1254	for (i = 0; i < num_queues; ++i)
1255		xennet_interrupt(0, &info->queues[i]);
1256}
1257#endif
1258
1259static const struct net_device_ops xennet_netdev_ops = {
1260	.ndo_open            = xennet_open,
 
1261	.ndo_stop            = xennet_close,
1262	.ndo_start_xmit      = xennet_start_xmit,
1263	.ndo_change_mtu	     = xennet_change_mtu,
1264	.ndo_get_stats64     = xennet_get_stats64,
1265	.ndo_set_mac_address = eth_mac_addr,
1266	.ndo_validate_addr   = eth_validate_addr,
1267	.ndo_fix_features    = xennet_fix_features,
1268	.ndo_set_features    = xennet_set_features,
1269	.ndo_select_queue    = xennet_select_queue,
1270#ifdef CONFIG_NET_POLL_CONTROLLER
1271	.ndo_poll_controller = xennet_poll_controller,
1272#endif
1273};
1274
1275static void xennet_free_netdev(struct net_device *netdev)
1276{
1277	struct netfront_info *np = netdev_priv(netdev);
1278
1279	free_percpu(np->rx_stats);
1280	free_percpu(np->tx_stats);
1281	free_netdev(netdev);
1282}
1283
1284static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1285{
1286	int err;
1287	struct net_device *netdev;
1288	struct netfront_info *np;
1289
1290	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1291	if (!netdev)
1292		return ERR_PTR(-ENOMEM);
1293
1294	np                   = netdev_priv(netdev);
1295	np->xbdev            = dev;
1296
1297	np->queues = NULL;
 
 
 
 
 
 
 
 
 
 
1298
1299	err = -ENOMEM;
1300	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1301	if (np->rx_stats == NULL)
1302		goto exit;
1303	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1304	if (np->tx_stats == NULL)
1305		goto exit;
1306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1307	netdev->netdev_ops	= &xennet_netdev_ops;
1308
 
1309	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1310				  NETIF_F_GSO_ROBUST;
1311	netdev->hw_features	= NETIF_F_SG |
1312				  NETIF_F_IPV6_CSUM |
1313				  NETIF_F_TSO | NETIF_F_TSO6;
1314
1315	/*
1316         * Assume that all hw features are available for now. This set
1317         * will be adjusted by the call to netdev_update_features() in
1318         * xennet_connect() which is the earliest point where we can
1319         * negotiate with the backend regarding supported features.
1320         */
1321	netdev->features |= netdev->hw_features;
1322
1323	netdev->ethtool_ops = &xennet_ethtool_ops;
1324	netdev->min_mtu = ETH_MIN_MTU;
1325	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1326	SET_NETDEV_DEV(netdev, &dev->dev);
1327
 
 
1328	np->netdev = netdev;
1329
1330	netif_carrier_off(netdev);
1331
1332	xenbus_switch_state(dev, XenbusStateInitialising);
1333	return netdev;
1334
 
 
 
 
1335 exit:
1336	xennet_free_netdev(netdev);
1337	return ERR_PTR(err);
1338}
1339
1340/**
1341 * Entry point to this code when a new device is created.  Allocate the basic
1342 * structures and the ring buffers for communication with the backend, and
1343 * inform the backend of the appropriate details for those.
1344 */
1345static int netfront_probe(struct xenbus_device *dev,
1346			  const struct xenbus_device_id *id)
1347{
1348	int err;
1349	struct net_device *netdev;
1350	struct netfront_info *info;
1351
1352	netdev = xennet_create_dev(dev);
1353	if (IS_ERR(netdev)) {
1354		err = PTR_ERR(netdev);
1355		xenbus_dev_fatal(dev, err, "creating netdev");
1356		return err;
1357	}
1358
1359	info = netdev_priv(netdev);
1360	dev_set_drvdata(&dev->dev, info);
1361#ifdef CONFIG_SYSFS
1362	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1363#endif
 
 
 
 
 
 
 
 
 
 
1364
1365	return 0;
 
 
 
 
 
1366}
1367
1368static void xennet_end_access(int ref, void *page)
1369{
1370	/* This frees the page as a side-effect */
1371	if (ref != GRANT_INVALID_REF)
1372		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1373}
1374
1375static void xennet_disconnect_backend(struct netfront_info *info)
1376{
1377	unsigned int i = 0;
1378	unsigned int num_queues = info->netdev->real_num_tx_queues;
1379
1380	netif_carrier_off(info->netdev);
 
 
1381
1382	for (i = 0; i < num_queues && info->queues; ++i) {
1383		struct netfront_queue *queue = &info->queues[i];
1384
1385		del_timer_sync(&queue->rx_refill_timer);
1386
1387		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1388			unbind_from_irqhandler(queue->tx_irq, queue);
1389		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1390			unbind_from_irqhandler(queue->tx_irq, queue);
1391			unbind_from_irqhandler(queue->rx_irq, queue);
1392		}
1393		queue->tx_evtchn = queue->rx_evtchn = 0;
1394		queue->tx_irq = queue->rx_irq = 0;
1395
1396		if (netif_running(info->netdev))
1397			napi_synchronize(&queue->napi);
1398
1399		xennet_release_tx_bufs(queue);
1400		xennet_release_rx_bufs(queue);
1401		gnttab_free_grant_references(queue->gref_tx_head);
1402		gnttab_free_grant_references(queue->gref_rx_head);
1403
1404		/* End access and free the pages */
1405		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1406		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1407
1408		queue->tx_ring_ref = GRANT_INVALID_REF;
1409		queue->rx_ring_ref = GRANT_INVALID_REF;
1410		queue->tx.sring = NULL;
1411		queue->rx.sring = NULL;
1412	}
1413}
1414
1415/**
1416 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1417 * driver restart.  We tear down our netif structure and recreate it, but
1418 * leave the device-layer structures intact so that this is transparent to the
1419 * rest of the kernel.
1420 */
1421static int netfront_resume(struct xenbus_device *dev)
1422{
1423	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1424
1425	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1426
1427	xennet_disconnect_backend(info);
1428	return 0;
1429}
1430
1431static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1432{
1433	char *s, *e, *macstr;
1434	int i;
1435
1436	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1437	if (IS_ERR(macstr))
1438		return PTR_ERR(macstr);
1439
1440	for (i = 0; i < ETH_ALEN; i++) {
1441		mac[i] = simple_strtoul(s, &e, 16);
1442		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1443			kfree(macstr);
1444			return -ENOENT;
1445		}
1446		s = e+1;
1447	}
1448
1449	kfree(macstr);
1450	return 0;
1451}
1452
1453static int setup_netfront_single(struct netfront_queue *queue)
1454{
1455	int err;
1456
1457	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1458	if (err < 0)
1459		goto fail;
1460
1461	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1462					xennet_interrupt,
1463					0, queue->info->netdev->name, queue);
1464	if (err < 0)
1465		goto bind_fail;
1466	queue->rx_evtchn = queue->tx_evtchn;
1467	queue->rx_irq = queue->tx_irq = err;
1468
1469	return 0;
1470
1471bind_fail:
1472	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1473	queue->tx_evtchn = 0;
1474fail:
1475	return err;
1476}
1477
1478static int setup_netfront_split(struct netfront_queue *queue)
1479{
1480	int err;
1481
1482	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1483	if (err < 0)
1484		goto fail;
1485	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1486	if (err < 0)
1487		goto alloc_rx_evtchn_fail;
1488
1489	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1490		 "%s-tx", queue->name);
1491	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1492					xennet_tx_interrupt,
1493					0, queue->tx_irq_name, queue);
1494	if (err < 0)
1495		goto bind_tx_fail;
1496	queue->tx_irq = err;
1497
1498	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1499		 "%s-rx", queue->name);
1500	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1501					xennet_rx_interrupt,
1502					0, queue->rx_irq_name, queue);
1503	if (err < 0)
1504		goto bind_rx_fail;
1505	queue->rx_irq = err;
1506
1507	return 0;
1508
1509bind_rx_fail:
1510	unbind_from_irqhandler(queue->tx_irq, queue);
1511	queue->tx_irq = 0;
1512bind_tx_fail:
1513	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1514	queue->rx_evtchn = 0;
1515alloc_rx_evtchn_fail:
1516	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1517	queue->tx_evtchn = 0;
1518fail:
1519	return err;
1520}
1521
1522static int setup_netfront(struct xenbus_device *dev,
1523			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1524{
1525	struct xen_netif_tx_sring *txs;
1526	struct xen_netif_rx_sring *rxs;
1527	grant_ref_t gref;
1528	int err;
 
 
1529
1530	queue->tx_ring_ref = GRANT_INVALID_REF;
1531	queue->rx_ring_ref = GRANT_INVALID_REF;
1532	queue->rx.sring = NULL;
1533	queue->tx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1534
1535	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1536	if (!txs) {
1537		err = -ENOMEM;
1538		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1539		goto fail;
1540	}
1541	SHARED_RING_INIT(txs);
1542	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1543
1544	err = xenbus_grant_ring(dev, txs, 1, &gref);
1545	if (err < 0)
1546		goto grant_tx_ring_fail;
1547	queue->tx_ring_ref = gref;
1548
 
1549	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1550	if (!rxs) {
1551		err = -ENOMEM;
1552		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1553		goto alloc_rx_ring_fail;
1554	}
1555	SHARED_RING_INIT(rxs);
1556	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1557
1558	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1559	if (err < 0)
1560		goto grant_rx_ring_fail;
1561	queue->rx_ring_ref = gref;
1562
1563	if (feature_split_evtchn)
1564		err = setup_netfront_split(queue);
1565	/* setup single event channel if
1566	 *  a) feature-split-event-channels == 0
1567	 *  b) feature-split-event-channels == 1 but failed to setup
1568	 */
1569	if (!feature_split_evtchn || (feature_split_evtchn && err))
1570		err = setup_netfront_single(queue);
1571
1572	if (err)
1573		goto alloc_evtchn_fail;
1574
1575	return 0;
1576
1577	/* If we fail to setup netfront, it is safe to just revoke access to
1578	 * granted pages because backend is not accessing it at this point.
1579	 */
1580alloc_evtchn_fail:
1581	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1582grant_rx_ring_fail:
1583	free_page((unsigned long)rxs);
1584alloc_rx_ring_fail:
1585	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1586grant_tx_ring_fail:
1587	free_page((unsigned long)txs);
1588fail:
1589	return err;
1590}
1591
1592/* Queue-specific initialisation
1593 * This used to be done in xennet_create_dev() but must now
1594 * be run per-queue.
1595 */
1596static int xennet_init_queue(struct netfront_queue *queue)
1597{
1598	unsigned short i;
1599	int err = 0;
1600
1601	spin_lock_init(&queue->tx_lock);
1602	spin_lock_init(&queue->rx_lock);
1603
1604	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1605
1606	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1607		 queue->info->netdev->name, queue->id);
1608
1609	/* Initialise tx_skbs as a free chain containing every entry. */
1610	queue->tx_skb_freelist = 0;
1611	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1612		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1613		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1614		queue->grant_tx_page[i] = NULL;
1615	}
1616
1617	/* Clear out rx_skbs */
1618	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1619		queue->rx_skbs[i] = NULL;
1620		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1621	}
1622
1623	/* A grant for every tx ring slot */
1624	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1625					  &queue->gref_tx_head) < 0) {
1626		pr_alert("can't alloc tx grant refs\n");
1627		err = -ENOMEM;
1628		goto exit;
1629	}
1630
1631	/* A grant for every rx ring slot */
1632	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1633					  &queue->gref_rx_head) < 0) {
1634		pr_alert("can't alloc rx grant refs\n");
1635		err = -ENOMEM;
1636		goto exit_free_tx;
1637	}
1638
1639	return 0;
1640
1641 exit_free_tx:
1642	gnttab_free_grant_references(queue->gref_tx_head);
1643 exit:
1644	return err;
1645}
1646
1647static int write_queue_xenstore_keys(struct netfront_queue *queue,
1648			   struct xenbus_transaction *xbt, int write_hierarchical)
1649{
1650	/* Write the queue-specific keys into XenStore in the traditional
1651	 * way for a single queue, or in a queue subkeys for multiple
1652	 * queues.
1653	 */
1654	struct xenbus_device *dev = queue->info->xbdev;
1655	int err;
1656	const char *message;
1657	char *path;
1658	size_t pathsize;
1659
1660	/* Choose the correct place to write the keys */
1661	if (write_hierarchical) {
1662		pathsize = strlen(dev->nodename) + 10;
1663		path = kzalloc(pathsize, GFP_KERNEL);
1664		if (!path) {
1665			err = -ENOMEM;
1666			message = "out of memory while writing ring references";
1667			goto error;
1668		}
1669		snprintf(path, pathsize, "%s/queue-%u",
1670				dev->nodename, queue->id);
1671	} else {
1672		path = (char *)dev->nodename;
1673	}
1674
1675	/* Write ring references */
1676	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1677			queue->tx_ring_ref);
1678	if (err) {
1679		message = "writing tx-ring-ref";
1680		goto error;
1681	}
1682
1683	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1684			queue->rx_ring_ref);
1685	if (err) {
1686		message = "writing rx-ring-ref";
1687		goto error;
1688	}
1689
1690	/* Write event channels; taking into account both shared
1691	 * and split event channel scenarios.
1692	 */
1693	if (queue->tx_evtchn == queue->rx_evtchn) {
1694		/* Shared event channel */
1695		err = xenbus_printf(*xbt, path,
1696				"event-channel", "%u", queue->tx_evtchn);
1697		if (err) {
1698			message = "writing event-channel";
1699			goto error;
1700		}
1701	} else {
1702		/* Split event channels */
1703		err = xenbus_printf(*xbt, path,
1704				"event-channel-tx", "%u", queue->tx_evtchn);
1705		if (err) {
1706			message = "writing event-channel-tx";
1707			goto error;
1708		}
1709
1710		err = xenbus_printf(*xbt, path,
1711				"event-channel-rx", "%u", queue->rx_evtchn);
1712		if (err) {
1713			message = "writing event-channel-rx";
1714			goto error;
1715		}
1716	}
1717
1718	if (write_hierarchical)
1719		kfree(path);
1720	return 0;
1721
1722error:
1723	if (write_hierarchical)
1724		kfree(path);
1725	xenbus_dev_fatal(dev, err, "%s", message);
1726	return err;
1727}
1728
1729static void xennet_destroy_queues(struct netfront_info *info)
1730{
1731	unsigned int i;
1732
1733	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1734		struct netfront_queue *queue = &info->queues[i];
1735
1736		if (netif_running(info->netdev))
1737			napi_disable(&queue->napi);
1738		netif_napi_del(&queue->napi);
1739	}
1740
1741	kfree(info->queues);
1742	info->queues = NULL;
1743}
1744
1745static int xennet_create_queues(struct netfront_info *info,
1746				unsigned int *num_queues)
1747{
1748	unsigned int i;
1749	int ret;
1750
1751	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1752			       GFP_KERNEL);
1753	if (!info->queues)
1754		return -ENOMEM;
1755
1756	for (i = 0; i < *num_queues; i++) {
1757		struct netfront_queue *queue = &info->queues[i];
1758
1759		queue->id = i;
1760		queue->info = info;
1761
1762		ret = xennet_init_queue(queue);
1763		if (ret < 0) {
1764			dev_warn(&info->xbdev->dev,
1765				 "only created %d queues\n", i);
1766			*num_queues = i;
1767			break;
1768		}
1769
1770		netif_napi_add(queue->info->netdev, &queue->napi,
1771			       xennet_poll, 64);
1772		if (netif_running(info->netdev))
1773			napi_enable(&queue->napi);
1774	}
1775
1776	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1777
1778	if (*num_queues == 0) {
1779		dev_err(&info->xbdev->dev, "no queues\n");
1780		return -EINVAL;
1781	}
1782	return 0;
1783}
1784
1785/* Common code used when first setting up, and when resuming. */
1786static int talk_to_netback(struct xenbus_device *dev,
1787			   struct netfront_info *info)
1788{
1789	const char *message;
1790	struct xenbus_transaction xbt;
1791	int err;
1792	unsigned int feature_split_evtchn;
1793	unsigned int i = 0;
1794	unsigned int max_queues = 0;
1795	struct netfront_queue *queue = NULL;
1796	unsigned int num_queues = 1;
1797
1798	info->netdev->irq = 0;
1799
1800	/* Check if backend supports multiple queues */
1801	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1802					  "multi-queue-max-queues", 1);
1803	num_queues = min(max_queues, xennet_max_queues);
1804
1805	/* Check feature-split-event-channels */
1806	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1807					"feature-split-event-channels", 0);
1808
1809	/* Read mac addr. */
1810	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1811	if (err) {
1812		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1813		goto out;
1814	}
1815
1816	rtnl_lock();
1817	if (info->queues)
1818		xennet_destroy_queues(info);
1819
1820	err = xennet_create_queues(info, &num_queues);
1821	if (err < 0) {
1822		xenbus_dev_fatal(dev, err, "creating queues");
1823		kfree(info->queues);
1824		info->queues = NULL;
1825		goto out;
1826	}
1827	rtnl_unlock();
1828
1829	/* Create shared ring, alloc event channel -- for each queue */
1830	for (i = 0; i < num_queues; ++i) {
1831		queue = &info->queues[i];
1832		err = setup_netfront(dev, queue, feature_split_evtchn);
1833		if (err)
1834			goto destroy_ring;
1835	}
1836
1837again:
1838	err = xenbus_transaction_start(&xbt);
1839	if (err) {
1840		xenbus_dev_fatal(dev, err, "starting transaction");
1841		goto destroy_ring;
1842	}
1843
1844	if (xenbus_exists(XBT_NIL,
1845			  info->xbdev->otherend, "multi-queue-max-queues")) {
1846		/* Write the number of queues */
 
 
 
 
 
 
 
 
 
 
 
1847		err = xenbus_printf(xbt, dev->nodename,
1848				    "multi-queue-num-queues", "%u", num_queues);
1849		if (err) {
1850			message = "writing multi-queue-num-queues";
1851			goto abort_transaction_no_dev_fatal;
1852		}
1853	}
1854
1855	if (num_queues == 1) {
1856		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1857		if (err)
1858			goto abort_transaction_no_dev_fatal;
1859	} else {
1860		/* Write the keys for each queue */
1861		for (i = 0; i < num_queues; ++i) {
1862			queue = &info->queues[i];
1863			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1864			if (err)
1865				goto abort_transaction_no_dev_fatal;
 
 
 
 
 
1866		}
1867	}
1868
1869	/* The remaining keys are not queue-specific */
1870	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1871			    1);
1872	if (err) {
1873		message = "writing request-rx-copy";
1874		goto abort_transaction;
1875	}
1876
1877	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1878	if (err) {
1879		message = "writing feature-rx-notify";
1880		goto abort_transaction;
1881	}
1882
1883	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1884	if (err) {
1885		message = "writing feature-sg";
1886		goto abort_transaction;
1887	}
1888
1889	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1890	if (err) {
1891		message = "writing feature-gso-tcpv4";
1892		goto abort_transaction;
1893	}
1894
1895	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1896	if (err) {
1897		message = "writing feature-gso-tcpv6";
1898		goto abort_transaction;
1899	}
1900
1901	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1902			   "1");
1903	if (err) {
1904		message = "writing feature-ipv6-csum-offload";
1905		goto abort_transaction;
1906	}
1907
1908	err = xenbus_transaction_end(xbt, 0);
1909	if (err) {
1910		if (err == -EAGAIN)
1911			goto again;
1912		xenbus_dev_fatal(dev, err, "completing transaction");
1913		goto destroy_ring;
1914	}
1915
1916	return 0;
1917
1918 abort_transaction:
1919	xenbus_dev_fatal(dev, err, "%s", message);
1920abort_transaction_no_dev_fatal:
1921	xenbus_transaction_end(xbt, 1);
 
1922 destroy_ring:
1923	xennet_disconnect_backend(info);
1924	rtnl_lock();
1925	xennet_destroy_queues(info);
1926 out:
1927	rtnl_unlock();
1928	device_unregister(&dev->dev);
1929	return err;
1930}
1931
1932static int xennet_connect(struct net_device *dev)
1933{
1934	struct netfront_info *np = netdev_priv(dev);
1935	unsigned int num_queues = 0;
1936	int err;
1937	unsigned int j = 0;
1938	struct netfront_queue *queue = NULL;
 
1939
1940	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
 
 
 
 
 
1941		dev_info(&dev->dev,
1942			 "backend does not support copying receive path\n");
1943		return -ENODEV;
1944	}
1945
1946	err = talk_to_netback(np->xbdev, np);
1947	if (err)
1948		return err;
1949
1950	/* talk_to_netback() sets the correct number of queues */
1951	num_queues = dev->real_num_tx_queues;
1952
1953	rtnl_lock();
1954	netdev_update_features(dev);
1955	rtnl_unlock();
1956
1957	if (dev->reg_state == NETREG_UNINITIALIZED) {
1958		err = register_netdev(dev);
1959		if (err) {
1960			pr_warn("%s: register_netdev err=%d\n", __func__, err);
1961			device_unregister(&np->xbdev->dev);
1962			return err;
1963		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1964	}
1965
 
 
1966	/*
1967	 * All public and private state should now be sane.  Get
1968	 * ready to start sending and receiving packets and give the driver
1969	 * domain a kick because we've probably just requeued some
1970	 * packets.
1971	 */
1972	netif_carrier_on(np->netdev);
1973	for (j = 0; j < num_queues; ++j) {
1974		queue = &np->queues[j];
 
 
 
1975
1976		notify_remote_via_irq(queue->tx_irq);
1977		if (queue->tx_irq != queue->rx_irq)
1978			notify_remote_via_irq(queue->rx_irq);
1979
1980		spin_lock_irq(&queue->tx_lock);
1981		xennet_tx_buf_gc(queue);
1982		spin_unlock_irq(&queue->tx_lock);
1983
1984		spin_lock_bh(&queue->rx_lock);
1985		xennet_alloc_rx_buffers(queue);
1986		spin_unlock_bh(&queue->rx_lock);
1987	}
1988
1989	return 0;
1990}
1991
1992/**
1993 * Callback received when the backend's state changes.
1994 */
1995static void netback_changed(struct xenbus_device *dev,
1996			    enum xenbus_state backend_state)
1997{
1998	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1999	struct net_device *netdev = np->netdev;
2000
2001	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2002
2003	switch (backend_state) {
2004	case XenbusStateInitialising:
2005	case XenbusStateInitialised:
2006	case XenbusStateReconfiguring:
2007	case XenbusStateReconfigured:
2008		break;
2009
2010	case XenbusStateUnknown:
2011		wake_up_all(&module_unload_q);
2012		break;
2013
2014	case XenbusStateInitWait:
2015		if (dev->state != XenbusStateInitialising)
2016			break;
2017		if (xennet_connect(netdev) != 0)
2018			break;
2019		xenbus_switch_state(dev, XenbusStateConnected);
2020		break;
2021
2022	case XenbusStateConnected:
2023		netdev_notify_peers(netdev);
2024		break;
2025
2026	case XenbusStateClosed:
2027		wake_up_all(&module_unload_q);
2028		if (dev->state == XenbusStateClosed)
2029			break;
2030		/* Missed the backend's CLOSING state -- fallthrough */
2031	case XenbusStateClosing:
2032		wake_up_all(&module_unload_q);
2033		xenbus_frontend_closed(dev);
2034		break;
2035	}
2036}
2037
2038static const struct xennet_stat {
2039	char name[ETH_GSTRING_LEN];
2040	u16 offset;
2041} xennet_stats[] = {
2042	{
2043		"rx_gso_checksum_fixup",
2044		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2045	},
2046};
2047
2048static int xennet_get_sset_count(struct net_device *dev, int string_set)
2049{
2050	switch (string_set) {
2051	case ETH_SS_STATS:
2052		return ARRAY_SIZE(xennet_stats);
2053	default:
2054		return -EINVAL;
2055	}
2056}
2057
2058static void xennet_get_ethtool_stats(struct net_device *dev,
2059				     struct ethtool_stats *stats, u64 * data)
2060{
2061	void *np = netdev_priv(dev);
2062	int i;
2063
2064	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2065		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2066}
2067
2068static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2069{
2070	int i;
2071
2072	switch (stringset) {
2073	case ETH_SS_STATS:
2074		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2075			memcpy(data + i * ETH_GSTRING_LEN,
2076			       xennet_stats[i].name, ETH_GSTRING_LEN);
2077		break;
2078	}
2079}
2080
2081static const struct ethtool_ops xennet_ethtool_ops =
2082{
2083	.get_link = ethtool_op_get_link,
2084
2085	.get_sset_count = xennet_get_sset_count,
2086	.get_ethtool_stats = xennet_get_ethtool_stats,
2087	.get_strings = xennet_get_strings,
2088};
2089
2090#ifdef CONFIG_SYSFS
2091static ssize_t show_rxbuf(struct device *dev,
2092			  struct device_attribute *attr, char *buf)
2093{
2094	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
 
 
 
2095}
2096
2097static ssize_t store_rxbuf(struct device *dev,
2098			   struct device_attribute *attr,
2099			   const char *buf, size_t len)
2100{
 
 
2101	char *endp;
2102	unsigned long target;
2103
2104	if (!capable(CAP_NET_ADMIN))
2105		return -EPERM;
2106
2107	target = simple_strtoul(buf, &endp, 0);
2108	if (endp == buf)
2109		return -EBADMSG;
2110
2111	/* rxbuf_min and rxbuf_max are no longer configurable. */
 
 
 
 
 
 
 
 
 
 
2112
 
 
 
2113	return len;
2114}
2115
2116static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2117static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2118static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2119
2120static struct attribute *xennet_dev_attrs[] = {
2121	&dev_attr_rxbuf_min.attr,
2122	&dev_attr_rxbuf_max.attr,
2123	&dev_attr_rxbuf_cur.attr,
2124	NULL
2125};
2126
2127static const struct attribute_group xennet_dev_group = {
2128	.attrs = xennet_dev_attrs
2129};
2130#endif /* CONFIG_SYSFS */
2131
2132static int xennet_remove(struct xenbus_device *dev)
 
 
2133{
2134	struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
 
 
2135
2136	dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
2137
2138	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2139		xenbus_switch_state(dev, XenbusStateClosing);
2140		wait_event(module_unload_q,
2141			   xenbus_read_driver_state(dev->otherend) ==
2142			   XenbusStateClosing ||
2143			   xenbus_read_driver_state(dev->otherend) ==
2144			   XenbusStateUnknown);
2145
2146		xenbus_switch_state(dev, XenbusStateClosed);
2147		wait_event(module_unload_q,
2148			   xenbus_read_driver_state(dev->otherend) ==
2149			   XenbusStateClosed ||
2150			   xenbus_read_driver_state(dev->otherend) ==
2151			   XenbusStateUnknown);
2152	}
2153
2154	xennet_disconnect_backend(info);
 
 
 
 
 
 
 
 
 
 
2155
2156	if (info->netdev->reg_state == NETREG_REGISTERED)
2157		unregister_netdev(info->netdev);
2158
2159	if (info->queues) {
2160		rtnl_lock();
2161		xennet_destroy_queues(info);
2162		rtnl_unlock();
2163	}
2164	xennet_free_netdev(info->netdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2165
 
 
 
 
 
 
2166	return 0;
 
 
 
 
 
2167}
2168
 
 
 
 
 
 
 
 
 
 
2169static const struct xenbus_device_id netfront_ids[] = {
2170	{ "vif" },
2171	{ "" }
2172};
2173
2174static struct xenbus_driver netfront_driver = {
2175	.ids = netfront_ids,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2176	.probe = netfront_probe,
2177	.remove = xennet_remove,
2178	.resume = netfront_resume,
2179	.otherend_changed = netback_changed,
2180};
2181
2182static int __init netif_init(void)
2183{
2184	if (!xen_domain())
2185		return -ENODEV;
2186
2187	if (!xen_has_pv_nic_devices())
2188		return -ENODEV;
2189
2190	pr_info("Initialising Xen virtual ethernet driver\n");
2191
2192	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2193	 * specified a value.
2194	 */
2195	if (xennet_max_queues == 0)
2196		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2197					  num_online_cpus());
2198
2199	return xenbus_register_frontend(&netfront_driver);
2200}
2201module_init(netif_init);
2202
2203
2204static void __exit netif_exit(void)
2205{
2206	xenbus_unregister_driver(&netfront_driver);
2207}
2208module_exit(netif_exit);
2209
2210MODULE_DESCRIPTION("Xen virtual network device frontend");
2211MODULE_LICENSE("GPL");
2212MODULE_ALIAS("xen:vif");
2213MODULE_ALIAS("xennet");
v3.15
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
  48#include <asm/xen/page.h>
  49#include <xen/xen.h>
  50#include <xen/xenbus.h>
  51#include <xen/events.h>
  52#include <xen/page.h>
  53#include <xen/platform_pci.h>
  54#include <xen/grant_table.h>
  55
  56#include <xen/interface/io/netif.h>
  57#include <xen/interface/memory.h>
  58#include <xen/interface/grant_table.h>
  59
 
 
 
 
 
 
 
  60static const struct ethtool_ops xennet_ethtool_ops;
  61
  62struct netfront_cb {
  63	int pull_to;
  64};
  65
  66#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  67
  68#define RX_COPY_THRESHOLD 256
  69
  70#define GRANT_INVALID_REF	0
  71
  72#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
  74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
 
 
 
 
 
 
 
 
 
 
  75
  76struct netfront_stats {
  77	u64			rx_packets;
  78	u64			tx_packets;
  79	u64			rx_bytes;
  80	u64			tx_bytes;
  81	struct u64_stats_sync	syncp;
  82};
  83
  84struct netfront_info {
  85	struct list_head list;
  86	struct net_device *netdev;
 
 
 
  87
  88	struct napi_struct napi;
  89
  90	/* Split event channels support, tx_* == rx_* when using
  91	 * single event channel.
  92	 */
  93	unsigned int tx_evtchn, rx_evtchn;
  94	unsigned int tx_irq, rx_irq;
  95	/* Only used when split event channels support is enabled */
  96	char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
  97	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
  98
  99	struct xenbus_device *xbdev;
 100
 101	spinlock_t   tx_lock;
 102	struct xen_netif_tx_front_ring tx;
 103	int tx_ring_ref;
 104
 105	/*
 106	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 107	 * are linked from tx_skb_freelist through skb_entry.link.
 108	 *
 109	 *  NB. Freelist index entries are always going to be less than
 110	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 111	 *  greater than PAGE_OFFSET: we use this property to distinguish
 112	 *  them.
 113	 */
 114	union skb_entry {
 115		struct sk_buff *skb;
 116		unsigned long link;
 117	} tx_skbs[NET_TX_RING_SIZE];
 118	grant_ref_t gref_tx_head;
 119	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 120	struct page *grant_tx_page[NET_TX_RING_SIZE];
 121	unsigned tx_skb_freelist;
 122
 123	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 124	struct xen_netif_rx_front_ring rx;
 125	int rx_ring_ref;
 126
 127	/* Receive-ring batched refills. */
 128#define RX_MIN_TARGET 8
 129#define RX_DFL_MIN_TARGET 64
 130#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 131	unsigned rx_min_target, rx_max_target, rx_target;
 132	struct sk_buff_head rx_batch;
 133
 134	struct timer_list rx_refill_timer;
 135
 136	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 137	grant_ref_t gref_rx_head;
 138	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 
 
 
 
 
 
 139
 140	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 141	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 142	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 143
 144	/* Statistics */
 145	struct netfront_stats __percpu *stats;
 
 146
 147	unsigned long rx_gso_checksum_fixup;
 148};
 149
 150struct netfront_rx_info {
 151	struct xen_netif_rx_response rx;
 152	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 153};
 154
 155static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 156{
 157	list->link = id;
 158}
 159
 160static int skb_entry_is_link(const union skb_entry *list)
 161{
 162	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 163	return (unsigned long)list->skb < PAGE_OFFSET;
 164}
 165
 166/*
 167 * Access macros for acquiring freeing slots in tx_skbs[].
 168 */
 169
 170static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 171			       unsigned short id)
 172{
 173	skb_entry_set_link(&list[id], *head);
 174	*head = id;
 175}
 176
 177static unsigned short get_id_from_freelist(unsigned *head,
 178					   union skb_entry *list)
 179{
 180	unsigned int id = *head;
 181	*head = list[id].link;
 182	return id;
 183}
 184
 185static int xennet_rxidx(RING_IDX idx)
 186{
 187	return idx & (NET_RX_RING_SIZE - 1);
 188}
 189
 190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
 191					 RING_IDX ri)
 192{
 193	int i = xennet_rxidx(ri);
 194	struct sk_buff *skb = np->rx_skbs[i];
 195	np->rx_skbs[i] = NULL;
 196	return skb;
 197}
 198
 199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
 200					    RING_IDX ri)
 201{
 202	int i = xennet_rxidx(ri);
 203	grant_ref_t ref = np->grant_rx_ref[i];
 204	np->grant_rx_ref[i] = GRANT_INVALID_REF;
 205	return ref;
 206}
 207
 208#ifdef CONFIG_SYSFS
 209static int xennet_sysfs_addif(struct net_device *netdev);
 210static void xennet_sysfs_delif(struct net_device *netdev);
 211#else /* !CONFIG_SYSFS */
 212#define xennet_sysfs_addif(dev) (0)
 213#define xennet_sysfs_delif(dev) do { } while (0)
 214#endif
 215
 216static bool xennet_can_sg(struct net_device *dev)
 217{
 218	return dev->features & NETIF_F_SG;
 219}
 220
 221
 222static void rx_refill_timeout(unsigned long data)
 223{
 224	struct net_device *dev = (struct net_device *)data;
 225	struct netfront_info *np = netdev_priv(dev);
 226	napi_schedule(&np->napi);
 227}
 228
 229static int netfront_tx_slot_available(struct netfront_info *np)
 230{
 231	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
 232		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
 233}
 234
 235static void xennet_maybe_wake_tx(struct net_device *dev)
 236{
 237	struct netfront_info *np = netdev_priv(dev);
 
 238
 239	if (unlikely(netif_queue_stopped(dev)) &&
 240	    netfront_tx_slot_available(np) &&
 241	    likely(netif_running(dev)))
 242		netif_wake_queue(dev);
 243}
 244
 245static void xennet_alloc_rx_buffers(struct net_device *dev)
 
 246{
 247	unsigned short id;
 248	struct netfront_info *np = netdev_priv(dev);
 249	struct sk_buff *skb;
 250	struct page *page;
 251	int i, batch_target, notify;
 252	RING_IDX req_prod = np->rx.req_prod_pvt;
 253	grant_ref_t ref;
 254	unsigned long pfn;
 255	void *vaddr;
 256	struct xen_netif_rx_request *req;
 257
 258	if (unlikely(!netif_carrier_ok(dev)))
 259		return;
 
 
 
 
 
 
 
 
 
 
 260
 261	/*
 262	 * Allocate skbuffs greedily, even though we batch updates to the
 263	 * receive ring. This creates a less bursty demand on the memory
 264	 * allocator, so should reduce the chance of failed allocation requests
 265	 * both for ourself and for other kernel subsystems.
 266	 */
 267	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
 268	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
 269		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
 270					 GFP_ATOMIC | __GFP_NOWARN);
 271		if (unlikely(!skb))
 272			goto no_skb;
 273
 274		/* Align ip header to a 16 bytes boundary */
 275		skb_reserve(skb, NET_IP_ALIGN);
 276
 277		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 278		if (!page) {
 279			kfree_skb(skb);
 280no_skb:
 281			/* Could not allocate any skbuffs. Try again later. */
 282			mod_timer(&np->rx_refill_timer,
 283				  jiffies + (HZ/10));
 284
 285			/* Any skbuffs queued for refill? Force them out. */
 286			if (i != 0)
 287				goto refill;
 288			break;
 289		}
 290
 291		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 292		__skb_queue_tail(&np->rx_batch, skb);
 293	}
 
 
 294
 295	/* Is the batch large enough to be worthwhile? */
 296	if (i < (np->rx_target/2)) {
 297		if (req_prod > np->rx.sring->req_prod)
 298			goto push;
 299		return;
 300	}
 301
 302	/* Adjust our fill target if we risked running out of buffers. */
 303	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
 304	    ((np->rx_target *= 2) > np->rx_max_target))
 305		np->rx_target = np->rx_max_target;
 306
 307 refill:
 308	for (i = 0; ; i++) {
 309		skb = __skb_dequeue(&np->rx_batch);
 310		if (skb == NULL)
 
 
 
 311			break;
 
 312
 313		skb->dev = dev;
 314
 315		id = xennet_rxidx(req_prod + i);
 
 316
 317		BUG_ON(np->rx_skbs[id]);
 318		np->rx_skbs[id] = skb;
 
 319
 320		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
 321		BUG_ON((signed short)ref < 0);
 322		np->grant_rx_ref[id] = ref;
 323
 324		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 325		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 
 
 
 
 
 
 326
 327		req = RING_GET_REQUEST(&np->rx, req_prod + i);
 328		gnttab_grant_foreign_access_ref(ref,
 329						np->xbdev->otherend_id,
 330						pfn_to_mfn(pfn),
 331						0);
 332
 333		req->id = id;
 334		req->gref = ref;
 
 
 
 
 
 
 
 335	}
 336
 337	wmb();		/* barrier so backend seens requests */
 338
 339	/* Above is a suitable barrier to ensure backend will see requests. */
 340	np->rx.req_prod_pvt = req_prod + i;
 341 push:
 342	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
 343	if (notify)
 344		notify_remote_via_irq(np->rx_irq);
 345}
 346
 347static int xennet_open(struct net_device *dev)
 348{
 349	struct netfront_info *np = netdev_priv(dev);
 
 
 
 350
 351	napi_enable(&np->napi);
 
 352
 353	spin_lock_bh(&np->rx_lock);
 354	if (netif_carrier_ok(dev)) {
 355		xennet_alloc_rx_buffers(dev);
 356		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
 357		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 358			napi_schedule(&np->napi);
 
 
 
 
 
 
 359	}
 360	spin_unlock_bh(&np->rx_lock);
 361
 362	netif_start_queue(dev);
 363
 364	return 0;
 365}
 366
 367static void xennet_tx_buf_gc(struct net_device *dev)
 368{
 369	RING_IDX cons, prod;
 370	unsigned short id;
 371	struct netfront_info *np = netdev_priv(dev);
 372	struct sk_buff *skb;
 
 373
 374	BUG_ON(!netif_carrier_ok(dev));
 375
 376	do {
 377		prod = np->tx.sring->rsp_prod;
 378		rmb(); /* Ensure we see responses up to 'rp'. */
 379
 380		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
 381			struct xen_netif_tx_response *txrsp;
 382
 383			txrsp = RING_GET_RESPONSE(&np->tx, cons);
 384			if (txrsp->status == XEN_NETIF_RSP_NULL)
 385				continue;
 386
 387			id  = txrsp->id;
 388			skb = np->tx_skbs[id].skb;
 389			if (unlikely(gnttab_query_foreign_access(
 390				np->grant_tx_ref[id]) != 0)) {
 391				pr_alert("%s: warning -- grant still in use by backend domain\n",
 392					 __func__);
 393				BUG();
 394			}
 395			gnttab_end_foreign_access_ref(
 396				np->grant_tx_ref[id], GNTMAP_readonly);
 397			gnttab_release_grant_reference(
 398				&np->gref_tx_head, np->grant_tx_ref[id]);
 399			np->grant_tx_ref[id] = GRANT_INVALID_REF;
 400			np->grant_tx_page[id] = NULL;
 401			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
 402			dev_kfree_skb_irq(skb);
 403		}
 404
 405		np->tx.rsp_cons = prod;
 406
 407		/*
 408		 * Set a new event, then check for race with update of tx_cons.
 409		 * Note that it is essential to schedule a callback, no matter
 410		 * how few buffers are pending. Even if there is space in the
 411		 * transmit ring, higher layers may be blocked because too much
 412		 * data is outstanding: in such cases notification from Xen is
 413		 * likely to be the only kick that we'll get.
 414		 */
 415		np->tx.sring->rsp_event =
 416			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
 417		mb();		/* update shared area */
 418	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
 419
 420	xennet_maybe_wake_tx(dev);
 421}
 422
 423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
 424			      struct xen_netif_tx_request *tx)
 
 
 
 
 
 
 
 
 425{
 426	struct netfront_info *np = netdev_priv(dev);
 427	char *data = skb->data;
 428	unsigned long mfn;
 429	RING_IDX prod = np->tx.req_prod_pvt;
 430	int frags = skb_shinfo(skb)->nr_frags;
 431	unsigned int offset = offset_in_page(data);
 432	unsigned int len = skb_headlen(skb);
 433	unsigned int id;
 
 434	grant_ref_t ref;
 435	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	/* While the header overlaps a page boundary (including being
 438	   larger than a page), split it it into page-sized chunks. */
 439	while (len > PAGE_SIZE - offset) {
 440		tx->size = PAGE_SIZE - offset;
 441		tx->flags |= XEN_NETTXF_more_data;
 442		len -= tx->size;
 443		data += tx->size;
 444		offset = 0;
 445
 446		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 447		np->tx_skbs[id].skb = skb_get(skb);
 448		tx = RING_GET_REQUEST(&np->tx, prod++);
 449		tx->id = id;
 450		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 451		BUG_ON((signed short)ref < 0);
 452
 453		mfn = virt_to_mfn(data);
 454		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 455						mfn, GNTMAP_readonly);
 456
 457		np->grant_tx_page[id] = virt_to_page(data);
 458		tx->gref = np->grant_tx_ref[id] = ref;
 459		tx->offset = offset;
 460		tx->size = len;
 461		tx->flags = 0;
 462	}
 463
 464	/* Grant backend access to each skb fragment page. */
 465	for (i = 0; i < frags; i++) {
 466		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 467		struct page *page = skb_frag_page(frag);
 468
 469		len = skb_frag_size(frag);
 470		offset = frag->page_offset;
 
 
 471
 472		/* Data must not cross a page boundary. */
 473		BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
 
 
 
 
 
 
 
 
 474
 475		/* Skip unused frames from start of page */
 476		page += offset >> PAGE_SHIFT;
 477		offset &= ~PAGE_MASK;
 478
 479		while (len > 0) {
 480			unsigned long bytes;
 
 481
 482			BUG_ON(offset >= PAGE_SIZE);
 
 
 483
 484			bytes = PAGE_SIZE - offset;
 485			if (bytes > len)
 486				bytes = len;
 487
 488			tx->flags |= XEN_NETTXF_more_data;
 489
 490			id = get_id_from_freelist(&np->tx_skb_freelist,
 491						  np->tx_skbs);
 492			np->tx_skbs[id].skb = skb_get(skb);
 493			tx = RING_GET_REQUEST(&np->tx, prod++);
 494			tx->id = id;
 495			ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 496			BUG_ON((signed short)ref < 0);
 497
 498			mfn = pfn_to_mfn(page_to_pfn(page));
 499			gnttab_grant_foreign_access_ref(ref,
 500							np->xbdev->otherend_id,
 501							mfn, GNTMAP_readonly);
 502
 503			np->grant_tx_page[id] = page;
 504			tx->gref = np->grant_tx_ref[id] = ref;
 505			tx->offset = offset;
 506			tx->size = bytes;
 507			tx->flags = 0;
 508
 509			offset += bytes;
 510			len -= bytes;
 511
 512			/* Next frame */
 513			if (offset == PAGE_SIZE && len) {
 514				BUG_ON(!PageCompound(page));
 515				page++;
 516				offset = 0;
 517			}
 518		}
 519	}
 520
 521	np->tx.req_prod_pvt = prod;
 522}
 523
 524/*
 525 * Count how many ring slots are required to send the frags of this
 526 * skb. Each frag might be a compound page.
 527 */
 528static int xennet_count_skb_frag_slots(struct sk_buff *skb)
 529{
 530	int i, frags = skb_shinfo(skb)->nr_frags;
 531	int pages = 0;
 
 
 
 532
 533	for (i = 0; i < frags; i++) {
 534		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 535		unsigned long size = skb_frag_size(frag);
 536		unsigned long offset = frag->page_offset;
 537
 538		/* Skip unused frames from start of page */
 539		offset &= ~PAGE_MASK;
 540
 541		pages += PFN_UP(offset + size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542	}
 543
 544	return pages;
 545}
 546
 
 
 547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 548{
 549	unsigned short id;
 550	struct netfront_info *np = netdev_priv(dev);
 551	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 552	struct xen_netif_tx_request *tx;
 553	char *data = skb->data;
 554	RING_IDX i;
 555	grant_ref_t ref;
 556	unsigned long mfn;
 557	int notify;
 558	int slots;
 559	unsigned int offset = offset_in_page(data);
 560	unsigned int len = skb_headlen(skb);
 
 561	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 562
 563	/* If skb->len is too big for wire format, drop skb and alert
 564	 * user about misconfiguration.
 565	 */
 566	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 567		net_alert_ratelimited(
 568			"xennet: skb->len = %u, too big for wire format\n",
 569			skb->len);
 570		goto drop;
 571	}
 572
 573	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
 574		xennet_count_skb_frag_slots(skb);
 575	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
 576		net_alert_ratelimited(
 577			"xennet: skb rides the rocket: %d slots\n", slots);
 578		goto drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579	}
 580
 581	spin_lock_irqsave(&np->tx_lock, flags);
 
 
 582
 583	if (unlikely(!netif_carrier_ok(dev) ||
 584		     (slots > 1 && !xennet_can_sg(dev)) ||
 585		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 586		spin_unlock_irqrestore(&np->tx_lock, flags);
 587		goto drop;
 588	}
 589
 590	i = np->tx.req_prod_pvt;
 591
 592	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 593	np->tx_skbs[id].skb = skb;
 594
 595	tx = RING_GET_REQUEST(&np->tx, i);
 596
 597	tx->id   = id;
 598	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 599	BUG_ON((signed short)ref < 0);
 600	mfn = virt_to_mfn(data);
 601	gnttab_grant_foreign_access_ref(
 602		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
 603	np->grant_tx_page[id] = virt_to_page(data);
 604	tx->gref = np->grant_tx_ref[id] = ref;
 605	tx->offset = offset;
 606	tx->size = len;
 607
 608	tx->flags = 0;
 609	if (skb->ip_summed == CHECKSUM_PARTIAL)
 610		/* local packet? */
 611		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 612	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 613		/* remote but checksummed. */
 614		tx->flags |= XEN_NETTXF_data_validated;
 615
 
 616	if (skb_shinfo(skb)->gso_size) {
 617		struct xen_netif_extra_info *gso;
 618
 619		gso = (struct xen_netif_extra_info *)
 620			RING_GET_REQUEST(&np->tx, ++i);
 621
 622		tx->flags |= XEN_NETTXF_extra_info;
 623
 624		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 625		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 626			XEN_NETIF_GSO_TYPE_TCPV6 :
 627			XEN_NETIF_GSO_TYPE_TCPV4;
 628		gso->u.gso.pad = 0;
 629		gso->u.gso.features = 0;
 630
 631		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 632		gso->flags = 0;
 633	}
 634
 635	np->tx.req_prod_pvt = i + 1;
 
 636
 637	xennet_make_frags(skb, dev, tx);
 638	tx->size = skb->len;
 
 
 
 
 
 639
 640	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
 
 
 
 641	if (notify)
 642		notify_remote_via_irq(np->tx_irq);
 643
 644	u64_stats_update_begin(&stats->syncp);
 645	stats->tx_bytes += skb->len;
 646	stats->tx_packets++;
 647	u64_stats_update_end(&stats->syncp);
 648
 649	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 650	xennet_tx_buf_gc(dev);
 651
 652	if (!netfront_tx_slot_available(np))
 653		netif_stop_queue(dev);
 654
 655	spin_unlock_irqrestore(&np->tx_lock, flags);
 656
 657	return NETDEV_TX_OK;
 658
 659 drop:
 660	dev->stats.tx_dropped++;
 661	dev_kfree_skb_any(skb);
 662	return NETDEV_TX_OK;
 663}
 664
 665static int xennet_close(struct net_device *dev)
 666{
 667	struct netfront_info *np = netdev_priv(dev);
 668	netif_stop_queue(np->netdev);
 669	napi_disable(&np->napi);
 
 
 
 
 
 
 670	return 0;
 671}
 672
 673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
 674				grant_ref_t ref)
 675{
 676	int new = xennet_rxidx(np->rx.req_prod_pvt);
 677
 678	BUG_ON(np->rx_skbs[new]);
 679	np->rx_skbs[new] = skb;
 680	np->grant_rx_ref[new] = ref;
 681	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
 682	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
 683	np->rx.req_prod_pvt++;
 684}
 685
 686static int xennet_get_extras(struct netfront_info *np,
 687			     struct xen_netif_extra_info *extras,
 688			     RING_IDX rp)
 689
 690{
 691	struct xen_netif_extra_info *extra;
 692	struct device *dev = &np->netdev->dev;
 693	RING_IDX cons = np->rx.rsp_cons;
 694	int err = 0;
 695
 696	do {
 697		struct sk_buff *skb;
 698		grant_ref_t ref;
 699
 700		if (unlikely(cons + 1 == rp)) {
 701			if (net_ratelimit())
 702				dev_warn(dev, "Missing extra info\n");
 703			err = -EBADR;
 704			break;
 705		}
 706
 707		extra = (struct xen_netif_extra_info *)
 708			RING_GET_RESPONSE(&np->rx, ++cons);
 709
 710		if (unlikely(!extra->type ||
 711			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 712			if (net_ratelimit())
 713				dev_warn(dev, "Invalid extra type: %d\n",
 714					extra->type);
 715			err = -EINVAL;
 716		} else {
 717			memcpy(&extras[extra->type - 1], extra,
 718			       sizeof(*extra));
 719		}
 720
 721		skb = xennet_get_rx_skb(np, cons);
 722		ref = xennet_get_rx_ref(np, cons);
 723		xennet_move_rx_slot(np, skb, ref);
 724	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 725
 726	np->rx.rsp_cons = cons;
 727	return err;
 728}
 729
 730static int xennet_get_responses(struct netfront_info *np,
 731				struct netfront_rx_info *rinfo, RING_IDX rp,
 732				struct sk_buff_head *list)
 733{
 734	struct xen_netif_rx_response *rx = &rinfo->rx;
 735	struct xen_netif_extra_info *extras = rinfo->extras;
 736	struct device *dev = &np->netdev->dev;
 737	RING_IDX cons = np->rx.rsp_cons;
 738	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
 739	grant_ref_t ref = xennet_get_rx_ref(np, cons);
 740	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 741	int slots = 1;
 742	int err = 0;
 743	unsigned long ret;
 744
 745	if (rx->flags & XEN_NETRXF_extra_info) {
 746		err = xennet_get_extras(np, extras, rp);
 747		cons = np->rx.rsp_cons;
 748	}
 749
 750	for (;;) {
 751		if (unlikely(rx->status < 0 ||
 752			     rx->offset + rx->status > PAGE_SIZE)) {
 753			if (net_ratelimit())
 754				dev_warn(dev, "rx->offset: %x, size: %u\n",
 755					 rx->offset, rx->status);
 756			xennet_move_rx_slot(np, skb, ref);
 757			err = -EINVAL;
 758			goto next;
 759		}
 760
 761		/*
 762		 * This definitely indicates a bug, either in this driver or in
 763		 * the backend driver. In future this should flag the bad
 764		 * situation to the system controller to reboot the backend.
 765		 */
 766		if (ref == GRANT_INVALID_REF) {
 767			if (net_ratelimit())
 768				dev_warn(dev, "Bad rx response id %d.\n",
 769					 rx->id);
 770			err = -EINVAL;
 771			goto next;
 772		}
 773
 774		ret = gnttab_end_foreign_access_ref(ref, 0);
 775		BUG_ON(!ret);
 776
 777		gnttab_release_grant_reference(&np->gref_rx_head, ref);
 778
 779		__skb_queue_tail(list, skb);
 780
 781next:
 782		if (!(rx->flags & XEN_NETRXF_more_data))
 783			break;
 784
 785		if (cons + slots == rp) {
 786			if (net_ratelimit())
 787				dev_warn(dev, "Need more slots\n");
 788			err = -ENOENT;
 789			break;
 790		}
 791
 792		rx = RING_GET_RESPONSE(&np->rx, cons + slots);
 793		skb = xennet_get_rx_skb(np, cons + slots);
 794		ref = xennet_get_rx_ref(np, cons + slots);
 795		slots++;
 796	}
 797
 798	if (unlikely(slots > max)) {
 799		if (net_ratelimit())
 800			dev_warn(dev, "Too many slots\n");
 801		err = -E2BIG;
 802	}
 803
 804	if (unlikely(err))
 805		np->rx.rsp_cons = cons + slots;
 806
 807	return err;
 808}
 809
 810static int xennet_set_skb_gso(struct sk_buff *skb,
 811			      struct xen_netif_extra_info *gso)
 812{
 813	if (!gso->u.gso.size) {
 814		if (net_ratelimit())
 815			pr_warn("GSO size must not be zero\n");
 816		return -EINVAL;
 817	}
 818
 819	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 820	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 821		if (net_ratelimit())
 822			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 823		return -EINVAL;
 824	}
 825
 826	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 827	skb_shinfo(skb)->gso_type =
 828		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 829		SKB_GSO_TCPV4 :
 830		SKB_GSO_TCPV6;
 831
 832	/* Header must be checked, and gso_segs computed. */
 833	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 834	skb_shinfo(skb)->gso_segs = 0;
 835
 836	return 0;
 837}
 838
 839static RING_IDX xennet_fill_frags(struct netfront_info *np,
 840				  struct sk_buff *skb,
 841				  struct sk_buff_head *list)
 842{
 843	struct skb_shared_info *shinfo = skb_shinfo(skb);
 844	RING_IDX cons = np->rx.rsp_cons;
 845	struct sk_buff *nskb;
 846
 847	while ((nskb = __skb_dequeue(list))) {
 848		struct xen_netif_rx_response *rx =
 849			RING_GET_RESPONSE(&np->rx, ++cons);
 850		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 851
 852		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 853			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 854
 855			BUG_ON(pull_to <= skb_headlen(skb));
 856			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 857		}
 858		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 859
 860		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 861				rx->offset, rx->status, PAGE_SIZE);
 862
 863		skb_shinfo(nskb)->nr_frags = 0;
 864		kfree_skb(nskb);
 865	}
 866
 867	return cons;
 868}
 869
 870static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 871{
 872	bool recalculate_partial_csum = false;
 873
 874	/*
 875	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 876	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 877	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 878	 * recalculate the partial checksum.
 879	 */
 880	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 881		struct netfront_info *np = netdev_priv(dev);
 882		np->rx_gso_checksum_fixup++;
 883		skb->ip_summed = CHECKSUM_PARTIAL;
 884		recalculate_partial_csum = true;
 885	}
 886
 887	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 888	if (skb->ip_summed != CHECKSUM_PARTIAL)
 889		return 0;
 890
 891	return skb_checksum_setup(skb, recalculate_partial_csum);
 892}
 893
 894static int handle_incoming_queue(struct net_device *dev,
 895				 struct sk_buff_head *rxq)
 896{
 897	struct netfront_info *np = netdev_priv(dev);
 898	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 899	int packets_dropped = 0;
 900	struct sk_buff *skb;
 901
 902	while ((skb = __skb_dequeue(rxq)) != NULL) {
 903		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 904
 905		if (pull_to > skb_headlen(skb))
 906			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 907
 908		/* Ethernet work: Delayed to here as it peeks the header. */
 909		skb->protocol = eth_type_trans(skb, dev);
 910		skb_reset_network_header(skb);
 911
 912		if (checksum_setup(dev, skb)) {
 913			kfree_skb(skb);
 914			packets_dropped++;
 915			dev->stats.rx_errors++;
 916			continue;
 917		}
 918
 919		u64_stats_update_begin(&stats->syncp);
 920		stats->rx_packets++;
 921		stats->rx_bytes += skb->len;
 922		u64_stats_update_end(&stats->syncp);
 923
 924		/* Pass it up. */
 925		napi_gro_receive(&np->napi, skb);
 926	}
 927
 928	return packets_dropped;
 929}
 930
 931static int xennet_poll(struct napi_struct *napi, int budget)
 932{
 933	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
 934	struct net_device *dev = np->netdev;
 935	struct sk_buff *skb;
 936	struct netfront_rx_info rinfo;
 937	struct xen_netif_rx_response *rx = &rinfo.rx;
 938	struct xen_netif_extra_info *extras = rinfo.extras;
 939	RING_IDX i, rp;
 940	int work_done;
 941	struct sk_buff_head rxq;
 942	struct sk_buff_head errq;
 943	struct sk_buff_head tmpq;
 944	unsigned long flags;
 945	int err;
 946
 947	spin_lock(&np->rx_lock);
 948
 949	skb_queue_head_init(&rxq);
 950	skb_queue_head_init(&errq);
 951	skb_queue_head_init(&tmpq);
 952
 953	rp = np->rx.sring->rsp_prod;
 954	rmb(); /* Ensure we see queued responses up to 'rp'. */
 955
 956	i = np->rx.rsp_cons;
 957	work_done = 0;
 958	while ((i != rp) && (work_done < budget)) {
 959		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
 960		memset(extras, 0, sizeof(rinfo.extras));
 961
 962		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
 963
 964		if (unlikely(err)) {
 965err:
 966			while ((skb = __skb_dequeue(&tmpq)))
 967				__skb_queue_tail(&errq, skb);
 968			dev->stats.rx_errors++;
 969			i = np->rx.rsp_cons;
 970			continue;
 971		}
 972
 973		skb = __skb_dequeue(&tmpq);
 974
 975		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
 976			struct xen_netif_extra_info *gso;
 977			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 978
 979			if (unlikely(xennet_set_skb_gso(skb, gso))) {
 980				__skb_queue_head(&tmpq, skb);
 981				np->rx.rsp_cons += skb_queue_len(&tmpq);
 982				goto err;
 983			}
 984		}
 985
 986		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
 987		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
 988			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 989
 990		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
 991		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
 992		skb->data_len = rx->status;
 993		skb->len += rx->status;
 994
 995		i = xennet_fill_frags(np, skb, &tmpq);
 996
 997		if (rx->flags & XEN_NETRXF_csum_blank)
 998			skb->ip_summed = CHECKSUM_PARTIAL;
 999		else if (rx->flags & XEN_NETRXF_data_validated)
1000			skb->ip_summed = CHECKSUM_UNNECESSARY;
1001
1002		__skb_queue_tail(&rxq, skb);
1003
1004		np->rx.rsp_cons = ++i;
1005		work_done++;
1006	}
1007
1008	__skb_queue_purge(&errq);
1009
1010	work_done -= handle_incoming_queue(dev, &rxq);
1011
1012	/* If we get a callback with very few responses, reduce fill target. */
1013	/* NB. Note exponential increase, linear decrease. */
1014	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1015	     ((3*np->rx_target) / 4)) &&
1016	    (--np->rx_target < np->rx_min_target))
1017		np->rx_target = np->rx_min_target;
1018
1019	xennet_alloc_rx_buffers(dev);
1020
1021	if (work_done < budget) {
1022		int more_to_do = 0;
1023
1024		napi_gro_flush(napi, false);
1025
1026		local_irq_save(flags);
1027
1028		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1029		if (!more_to_do)
1030			__napi_complete(napi);
1031
1032		local_irq_restore(flags);
1033	}
1034
1035	spin_unlock(&np->rx_lock);
1036
1037	return work_done;
1038}
1039
1040static int xennet_change_mtu(struct net_device *dev, int mtu)
1041{
1042	int max = xennet_can_sg(dev) ?
1043		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1044
1045	if (mtu > max)
1046		return -EINVAL;
1047	dev->mtu = mtu;
1048	return 0;
1049}
1050
1051static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1052						    struct rtnl_link_stats64 *tot)
1053{
1054	struct netfront_info *np = netdev_priv(dev);
1055	int cpu;
1056
1057	for_each_possible_cpu(cpu) {
1058		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
 
1059		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1060		unsigned int start;
1061
1062		do {
1063			start = u64_stats_fetch_begin_irq(&stats->syncp);
 
 
 
1064
1065			rx_packets = stats->rx_packets;
1066			tx_packets = stats->tx_packets;
1067			rx_bytes = stats->rx_bytes;
1068			tx_bytes = stats->tx_bytes;
1069		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1070
1071		tot->rx_packets += rx_packets;
1072		tot->tx_packets += tx_packets;
1073		tot->rx_bytes   += rx_bytes;
1074		tot->tx_bytes   += tx_bytes;
1075	}
1076
1077	tot->rx_errors  = dev->stats.rx_errors;
1078	tot->tx_dropped = dev->stats.tx_dropped;
1079
1080	return tot;
1081}
1082
1083static void xennet_release_tx_bufs(struct netfront_info *np)
1084{
1085	struct sk_buff *skb;
1086	int i;
1087
1088	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089		/* Skip over entries which are actually freelist references */
1090		if (skb_entry_is_link(&np->tx_skbs[i]))
1091			continue;
1092
1093		skb = np->tx_skbs[i].skb;
1094		get_page(np->grant_tx_page[i]);
1095		gnttab_end_foreign_access(np->grant_tx_ref[i],
1096					  GNTMAP_readonly,
1097					  (unsigned long)page_address(np->grant_tx_page[i]));
1098		np->grant_tx_page[i] = NULL;
1099		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1100		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1101		dev_kfree_skb_irq(skb);
1102	}
1103}
1104
1105static void xennet_release_rx_bufs(struct netfront_info *np)
1106{
1107	int id, ref;
1108
1109	spin_lock_bh(&np->rx_lock);
1110
1111	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112		struct sk_buff *skb;
1113		struct page *page;
1114
1115		skb = np->rx_skbs[id];
1116		if (!skb)
1117			continue;
1118
1119		ref = np->grant_rx_ref[id];
1120		if (ref == GRANT_INVALID_REF)
1121			continue;
1122
1123		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1124
1125		/* gnttab_end_foreign_access() needs a page ref until
1126		 * foreign access is ended (which may be deferred).
1127		 */
1128		get_page(page);
1129		gnttab_end_foreign_access(ref, 0,
1130					  (unsigned long)page_address(page));
1131		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1132
1133		kfree_skb(skb);
1134	}
1135
1136	spin_unlock_bh(&np->rx_lock);
1137}
1138
1139static void xennet_uninit(struct net_device *dev)
1140{
1141	struct netfront_info *np = netdev_priv(dev);
1142	xennet_release_tx_bufs(np);
1143	xennet_release_rx_bufs(np);
1144	gnttab_free_grant_references(np->gref_tx_head);
1145	gnttab_free_grant_references(np->gref_rx_head);
1146}
1147
1148static netdev_features_t xennet_fix_features(struct net_device *dev,
1149	netdev_features_t features)
1150{
1151	struct netfront_info *np = netdev_priv(dev);
1152	int val;
1153
1154	if (features & NETIF_F_SG) {
1155		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1156				 "%d", &val) < 0)
1157			val = 0;
1158
1159		if (!val)
1160			features &= ~NETIF_F_SG;
1161	}
1162
1163	if (features & NETIF_F_IPV6_CSUM) {
1164		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1165				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1166			val = 0;
1167
1168		if (!val)
1169			features &= ~NETIF_F_IPV6_CSUM;
1170	}
1171
1172	if (features & NETIF_F_TSO) {
1173		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1174				 "feature-gso-tcpv4", "%d", &val) < 0)
1175			val = 0;
1176
1177		if (!val)
1178			features &= ~NETIF_F_TSO;
1179	}
1180
1181	if (features & NETIF_F_TSO6) {
1182		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1183				 "feature-gso-tcpv6", "%d", &val) < 0)
1184			val = 0;
1185
1186		if (!val)
1187			features &= ~NETIF_F_TSO6;
1188	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1189
1190	return features;
1191}
1192
1193static int xennet_set_features(struct net_device *dev,
1194	netdev_features_t features)
1195{
1196	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1197		netdev_info(dev, "Reducing MTU because no SG offload");
1198		dev->mtu = ETH_DATA_LEN;
1199	}
1200
1201	return 0;
1202}
1203
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{
1206	struct netfront_info *np = dev_id;
1207	struct net_device *dev = np->netdev;
1208	unsigned long flags;
1209
1210	spin_lock_irqsave(&np->tx_lock, flags);
1211	xennet_tx_buf_gc(dev);
1212	spin_unlock_irqrestore(&np->tx_lock, flags);
1213
1214	return IRQ_HANDLED;
1215}
1216
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{
1219	struct netfront_info *np = dev_id;
1220	struct net_device *dev = np->netdev;
1221
1222	if (likely(netif_carrier_ok(dev) &&
1223		   RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1224			napi_schedule(&np->napi);
1225
1226	return IRQ_HANDLED;
1227}
1228
1229static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1230{
1231	xennet_tx_interrupt(irq, dev_id);
1232	xennet_rx_interrupt(irq, dev_id);
1233	return IRQ_HANDLED;
1234}
1235
1236#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev)
1238{
1239	xennet_interrupt(0, dev);
 
 
 
 
 
1240}
1241#endif
1242
1243static const struct net_device_ops xennet_netdev_ops = {
1244	.ndo_open            = xennet_open,
1245	.ndo_uninit          = xennet_uninit,
1246	.ndo_stop            = xennet_close,
1247	.ndo_start_xmit      = xennet_start_xmit,
1248	.ndo_change_mtu	     = xennet_change_mtu,
1249	.ndo_get_stats64     = xennet_get_stats64,
1250	.ndo_set_mac_address = eth_mac_addr,
1251	.ndo_validate_addr   = eth_validate_addr,
1252	.ndo_fix_features    = xennet_fix_features,
1253	.ndo_set_features    = xennet_set_features,
 
1254#ifdef CONFIG_NET_POLL_CONTROLLER
1255	.ndo_poll_controller = xennet_poll_controller,
1256#endif
1257};
1258
 
 
 
 
 
 
 
 
 
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{
1261	int i, err;
1262	struct net_device *netdev;
1263	struct netfront_info *np;
1264
1265	netdev = alloc_etherdev(sizeof(struct netfront_info));
1266	if (!netdev)
1267		return ERR_PTR(-ENOMEM);
1268
1269	np                   = netdev_priv(netdev);
1270	np->xbdev            = dev;
1271
1272	spin_lock_init(&np->tx_lock);
1273	spin_lock_init(&np->rx_lock);
1274
1275	skb_queue_head_init(&np->rx_batch);
1276	np->rx_target     = RX_DFL_MIN_TARGET;
1277	np->rx_min_target = RX_DFL_MIN_TARGET;
1278	np->rx_max_target = RX_MAX_TARGET;
1279
1280	init_timer(&np->rx_refill_timer);
1281	np->rx_refill_timer.data = (unsigned long)netdev;
1282	np->rx_refill_timer.function = rx_refill_timeout;
1283
1284	err = -ENOMEM;
1285	np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286	if (np->stats == NULL)
 
 
 
1287		goto exit;
1288
1289	/* Initialise tx_skbs as a free chain containing every entry. */
1290	np->tx_skb_freelist = 0;
1291	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292		skb_entry_set_link(&np->tx_skbs[i], i+1);
1293		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294		np->grant_tx_page[i] = NULL;
1295	}
1296
1297	/* Clear out rx_skbs */
1298	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299		np->rx_skbs[i] = NULL;
1300		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301	}
1302
1303	/* A grant for every tx ring slot */
1304	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305					  &np->gref_tx_head) < 0) {
1306		pr_alert("can't alloc tx grant refs\n");
1307		err = -ENOMEM;
1308		goto exit_free_stats;
1309	}
1310	/* A grant for every rx ring slot */
1311	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312					  &np->gref_rx_head) < 0) {
1313		pr_alert("can't alloc rx grant refs\n");
1314		err = -ENOMEM;
1315		goto exit_free_tx;
1316	}
1317
1318	netdev->netdev_ops	= &xennet_netdev_ops;
1319
1320	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322				  NETIF_F_GSO_ROBUST;
1323	netdev->hw_features	= NETIF_F_SG |
1324				  NETIF_F_IPV6_CSUM |
1325				  NETIF_F_TSO | NETIF_F_TSO6;
1326
1327	/*
1328         * Assume that all hw features are available for now. This set
1329         * will be adjusted by the call to netdev_update_features() in
1330         * xennet_connect() which is the earliest point where we can
1331         * negotiate with the backend regarding supported features.
1332         */
1333	netdev->features |= netdev->hw_features;
1334
1335	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
 
 
1336	SET_NETDEV_DEV(netdev, &dev->dev);
1337
1338	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1339
1340	np->netdev = netdev;
1341
1342	netif_carrier_off(netdev);
1343
 
1344	return netdev;
1345
1346 exit_free_tx:
1347	gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349	free_percpu(np->stats);
1350 exit:
1351	free_netdev(netdev);
1352	return ERR_PTR(err);
1353}
1354
1355/**
1356 * Entry point to this code when a new device is created.  Allocate the basic
1357 * structures and the ring buffers for communication with the backend, and
1358 * inform the backend of the appropriate details for those.
1359 */
1360static int netfront_probe(struct xenbus_device *dev,
1361			  const struct xenbus_device_id *id)
1362{
1363	int err;
1364	struct net_device *netdev;
1365	struct netfront_info *info;
1366
1367	netdev = xennet_create_dev(dev);
1368	if (IS_ERR(netdev)) {
1369		err = PTR_ERR(netdev);
1370		xenbus_dev_fatal(dev, err, "creating netdev");
1371		return err;
1372	}
1373
1374	info = netdev_priv(netdev);
1375	dev_set_drvdata(&dev->dev, info);
1376
1377	err = register_netdev(info->netdev);
1378	if (err) {
1379		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1380		goto fail;
1381	}
1382
1383	err = xennet_sysfs_addif(info->netdev);
1384	if (err) {
1385		unregister_netdev(info->netdev);
1386		pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1387		goto fail;
1388	}
1389
1390	return 0;
1391
1392 fail:
1393	free_netdev(netdev);
1394	dev_set_drvdata(&dev->dev, NULL);
1395	return err;
1396}
1397
1398static void xennet_end_access(int ref, void *page)
1399{
1400	/* This frees the page as a side-effect */
1401	if (ref != GRANT_INVALID_REF)
1402		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1403}
1404
1405static void xennet_disconnect_backend(struct netfront_info *info)
1406{
1407	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1408	spin_lock_bh(&info->rx_lock);
1409	spin_lock_irq(&info->tx_lock);
1410	netif_carrier_off(info->netdev);
1411	spin_unlock_irq(&info->tx_lock);
1412	spin_unlock_bh(&info->rx_lock);
1413
1414	if (info->tx_irq && (info->tx_irq == info->rx_irq))
1415		unbind_from_irqhandler(info->tx_irq, info);
1416	if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1417		unbind_from_irqhandler(info->tx_irq, info);
1418		unbind_from_irqhandler(info->rx_irq, info);
1419	}
1420	info->tx_evtchn = info->rx_evtchn = 0;
1421	info->tx_irq = info->rx_irq = 0;
1422
1423	/* End access and free the pages */
1424	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1425	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1426
1427	info->tx_ring_ref = GRANT_INVALID_REF;
1428	info->rx_ring_ref = GRANT_INVALID_REF;
1429	info->tx.sring = NULL;
1430	info->rx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1431}
1432
1433/**
1434 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1435 * driver restart.  We tear down our netif structure and recreate it, but
1436 * leave the device-layer structures intact so that this is transparent to the
1437 * rest of the kernel.
1438 */
1439static int netfront_resume(struct xenbus_device *dev)
1440{
1441	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1442
1443	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1444
1445	xennet_disconnect_backend(info);
1446	return 0;
1447}
1448
1449static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1450{
1451	char *s, *e, *macstr;
1452	int i;
1453
1454	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1455	if (IS_ERR(macstr))
1456		return PTR_ERR(macstr);
1457
1458	for (i = 0; i < ETH_ALEN; i++) {
1459		mac[i] = simple_strtoul(s, &e, 16);
1460		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1461			kfree(macstr);
1462			return -ENOENT;
1463		}
1464		s = e+1;
1465	}
1466
1467	kfree(macstr);
1468	return 0;
1469}
1470
1471static int setup_netfront_single(struct netfront_info *info)
1472{
1473	int err;
1474
1475	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1476	if (err < 0)
1477		goto fail;
1478
1479	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1480					xennet_interrupt,
1481					0, info->netdev->name, info);
1482	if (err < 0)
1483		goto bind_fail;
1484	info->rx_evtchn = info->tx_evtchn;
1485	info->rx_irq = info->tx_irq = err;
1486
1487	return 0;
1488
1489bind_fail:
1490	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1491	info->tx_evtchn = 0;
1492fail:
1493	return err;
1494}
1495
1496static int setup_netfront_split(struct netfront_info *info)
1497{
1498	int err;
1499
1500	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1501	if (err < 0)
1502		goto fail;
1503	err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1504	if (err < 0)
1505		goto alloc_rx_evtchn_fail;
1506
1507	snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1508		 "%s-tx", info->netdev->name);
1509	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1510					xennet_tx_interrupt,
1511					0, info->tx_irq_name, info);
1512	if (err < 0)
1513		goto bind_tx_fail;
1514	info->tx_irq = err;
1515
1516	snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1517		 "%s-rx", info->netdev->name);
1518	err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1519					xennet_rx_interrupt,
1520					0, info->rx_irq_name, info);
1521	if (err < 0)
1522		goto bind_rx_fail;
1523	info->rx_irq = err;
1524
1525	return 0;
1526
1527bind_rx_fail:
1528	unbind_from_irqhandler(info->tx_irq, info);
1529	info->tx_irq = 0;
1530bind_tx_fail:
1531	xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1532	info->rx_evtchn = 0;
1533alloc_rx_evtchn_fail:
1534	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1535	info->tx_evtchn = 0;
1536fail:
1537	return err;
1538}
1539
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
 
1541{
1542	struct xen_netif_tx_sring *txs;
1543	struct xen_netif_rx_sring *rxs;
 
1544	int err;
1545	struct net_device *netdev = info->netdev;
1546	unsigned int feature_split_evtchn;
1547
1548	info->tx_ring_ref = GRANT_INVALID_REF;
1549	info->rx_ring_ref = GRANT_INVALID_REF;
1550	info->rx.sring = NULL;
1551	info->tx.sring = NULL;
1552	netdev->irq = 0;
1553
1554	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555			   "feature-split-event-channels", "%u",
1556			   &feature_split_evtchn);
1557	if (err < 0)
1558		feature_split_evtchn = 0;
1559
1560	err = xen_net_read_mac(dev, netdev->dev_addr);
1561	if (err) {
1562		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563		goto fail;
1564	}
1565
1566	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567	if (!txs) {
1568		err = -ENOMEM;
1569		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1570		goto fail;
1571	}
1572	SHARED_RING_INIT(txs);
1573	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1574
1575	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576	if (err < 0)
1577		goto grant_tx_ring_fail;
 
1578
1579	info->tx_ring_ref = err;
1580	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581	if (!rxs) {
1582		err = -ENOMEM;
1583		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1584		goto alloc_rx_ring_fail;
1585	}
1586	SHARED_RING_INIT(rxs);
1587	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1588
1589	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590	if (err < 0)
1591		goto grant_rx_ring_fail;
1592	info->rx_ring_ref = err;
1593
1594	if (feature_split_evtchn)
1595		err = setup_netfront_split(info);
1596	/* setup single event channel if
1597	 *  a) feature-split-event-channels == 0
1598	 *  b) feature-split-event-channels == 1 but failed to setup
1599	 */
1600	if (!feature_split_evtchn || (feature_split_evtchn && err))
1601		err = setup_netfront_single(info);
1602
1603	if (err)
1604		goto alloc_evtchn_fail;
1605
1606	return 0;
1607
1608	/* If we fail to setup netfront, it is safe to just revoke access to
1609	 * granted pages because backend is not accessing it at this point.
1610	 */
1611alloc_evtchn_fail:
1612	gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1613grant_rx_ring_fail:
1614	free_page((unsigned long)rxs);
1615alloc_rx_ring_fail:
1616	gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1617grant_tx_ring_fail:
1618	free_page((unsigned long)txs);
1619fail:
1620	return err;
1621}
1622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1623/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev,
1625			   struct netfront_info *info)
1626{
1627	const char *message;
1628	struct xenbus_transaction xbt;
1629	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1630
1631	/* Create shared ring, alloc event channel. */
1632	err = setup_netfront(dev, info);
1633	if (err)
 
 
 
 
 
 
1634		goto out;
 
 
 
 
 
 
 
 
 
 
1635
1636again:
1637	err = xenbus_transaction_start(&xbt);
1638	if (err) {
1639		xenbus_dev_fatal(dev, err, "starting transaction");
1640		goto destroy_ring;
1641	}
1642
1643	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1644			    info->tx_ring_ref);
1645	if (err) {
1646		message = "writing tx ring-ref";
1647		goto abort_transaction;
1648	}
1649	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650			    info->rx_ring_ref);
1651	if (err) {
1652		message = "writing rx ring-ref";
1653		goto abort_transaction;
1654	}
1655
1656	if (info->tx_evtchn == info->rx_evtchn) {
1657		err = xenbus_printf(xbt, dev->nodename,
1658				    "event-channel", "%u", info->tx_evtchn);
1659		if (err) {
1660			message = "writing event-channel";
1661			goto abort_transaction;
1662		}
 
 
 
 
 
 
1663	} else {
1664		err = xenbus_printf(xbt, dev->nodename,
1665				    "event-channel-tx", "%u", info->tx_evtchn);
1666		if (err) {
1667			message = "writing event-channel-tx";
1668			goto abort_transaction;
1669		}
1670		err = xenbus_printf(xbt, dev->nodename,
1671				    "event-channel-rx", "%u", info->rx_evtchn);
1672		if (err) {
1673			message = "writing event-channel-rx";
1674			goto abort_transaction;
1675		}
1676	}
1677
 
1678	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679			    1);
1680	if (err) {
1681		message = "writing request-rx-copy";
1682		goto abort_transaction;
1683	}
1684
1685	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1686	if (err) {
1687		message = "writing feature-rx-notify";
1688		goto abort_transaction;
1689	}
1690
1691	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1692	if (err) {
1693		message = "writing feature-sg";
1694		goto abort_transaction;
1695	}
1696
1697	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1698	if (err) {
1699		message = "writing feature-gso-tcpv4";
1700		goto abort_transaction;
1701	}
1702
1703	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1704	if (err) {
1705		message = "writing feature-gso-tcpv6";
1706		goto abort_transaction;
1707	}
1708
1709	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1710			   "1");
1711	if (err) {
1712		message = "writing feature-ipv6-csum-offload";
1713		goto abort_transaction;
1714	}
1715
1716	err = xenbus_transaction_end(xbt, 0);
1717	if (err) {
1718		if (err == -EAGAIN)
1719			goto again;
1720		xenbus_dev_fatal(dev, err, "completing transaction");
1721		goto destroy_ring;
1722	}
1723
1724	return 0;
1725
1726 abort_transaction:
 
 
1727	xenbus_transaction_end(xbt, 1);
1728	xenbus_dev_fatal(dev, err, "%s", message);
1729 destroy_ring:
1730	xennet_disconnect_backend(info);
 
 
1731 out:
 
 
1732	return err;
1733}
1734
1735static int xennet_connect(struct net_device *dev)
1736{
1737	struct netfront_info *np = netdev_priv(dev);
1738	int i, requeue_idx, err;
1739	struct sk_buff *skb;
1740	grant_ref_t ref;
1741	struct xen_netif_rx_request *req;
1742	unsigned int feature_rx_copy;
1743
1744	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745			   "feature-rx-copy", "%u", &feature_rx_copy);
1746	if (err != 1)
1747		feature_rx_copy = 0;
1748
1749	if (!feature_rx_copy) {
1750		dev_info(&dev->dev,
1751			 "backend does not support copying receive path\n");
1752		return -ENODEV;
1753	}
1754
1755	err = talk_to_netback(np->xbdev, np);
1756	if (err)
1757		return err;
1758
 
 
 
1759	rtnl_lock();
1760	netdev_update_features(dev);
1761	rtnl_unlock();
1762
1763	spin_lock_bh(&np->rx_lock);
1764	spin_lock_irq(&np->tx_lock);
1765
1766	/* Step 1: Discard all pending TX packet fragments. */
1767	xennet_release_tx_bufs(np);
1768
1769	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771		skb_frag_t *frag;
1772		const struct page *page;
1773		if (!np->rx_skbs[i])
1774			continue;
1775
1776		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1777		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1778		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1779
1780		frag = &skb_shinfo(skb)->frags[0];
1781		page = skb_frag_page(frag);
1782		gnttab_grant_foreign_access_ref(
1783			ref, np->xbdev->otherend_id,
1784			pfn_to_mfn(page_to_pfn(page)),
1785			0);
1786		req->gref = ref;
1787		req->id   = requeue_idx;
1788
1789		requeue_idx++;
1790	}
1791
1792	np->rx.req_prod_pvt = requeue_idx;
1793
1794	/*
1795	 * Step 3: All public and private state should now be sane.  Get
1796	 * ready to start sending and receiving packets and give the driver
1797	 * domain a kick because we've probably just requeued some
1798	 * packets.
1799	 */
1800	netif_carrier_on(np->netdev);
1801	notify_remote_via_irq(np->tx_irq);
1802	if (np->tx_irq != np->rx_irq)
1803		notify_remote_via_irq(np->rx_irq);
1804	xennet_tx_buf_gc(dev);
1805	xennet_alloc_rx_buffers(dev);
1806
1807	spin_unlock_irq(&np->tx_lock);
1808	spin_unlock_bh(&np->rx_lock);
 
 
 
 
 
 
 
 
 
 
1809
1810	return 0;
1811}
1812
1813/**
1814 * Callback received when the backend's state changes.
1815 */
1816static void netback_changed(struct xenbus_device *dev,
1817			    enum xenbus_state backend_state)
1818{
1819	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1820	struct net_device *netdev = np->netdev;
1821
1822	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1823
1824	switch (backend_state) {
1825	case XenbusStateInitialising:
1826	case XenbusStateInitialised:
1827	case XenbusStateReconfiguring:
1828	case XenbusStateReconfigured:
 
 
1829	case XenbusStateUnknown:
 
1830		break;
1831
1832	case XenbusStateInitWait:
1833		if (dev->state != XenbusStateInitialising)
1834			break;
1835		if (xennet_connect(netdev) != 0)
1836			break;
1837		xenbus_switch_state(dev, XenbusStateConnected);
1838		break;
1839
1840	case XenbusStateConnected:
1841		netdev_notify_peers(netdev);
1842		break;
1843
1844	case XenbusStateClosed:
 
1845		if (dev->state == XenbusStateClosed)
1846			break;
1847		/* Missed the backend's CLOSING state -- fallthrough */
1848	case XenbusStateClosing:
 
1849		xenbus_frontend_closed(dev);
1850		break;
1851	}
1852}
1853
1854static const struct xennet_stat {
1855	char name[ETH_GSTRING_LEN];
1856	u16 offset;
1857} xennet_stats[] = {
1858	{
1859		"rx_gso_checksum_fixup",
1860		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1861	},
1862};
1863
1864static int xennet_get_sset_count(struct net_device *dev, int string_set)
1865{
1866	switch (string_set) {
1867	case ETH_SS_STATS:
1868		return ARRAY_SIZE(xennet_stats);
1869	default:
1870		return -EINVAL;
1871	}
1872}
1873
1874static void xennet_get_ethtool_stats(struct net_device *dev,
1875				     struct ethtool_stats *stats, u64 * data)
1876{
1877	void *np = netdev_priv(dev);
1878	int i;
1879
1880	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1882}
1883
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1885{
1886	int i;
1887
1888	switch (stringset) {
1889	case ETH_SS_STATS:
1890		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1891			memcpy(data + i * ETH_GSTRING_LEN,
1892			       xennet_stats[i].name, ETH_GSTRING_LEN);
1893		break;
1894	}
1895}
1896
1897static const struct ethtool_ops xennet_ethtool_ops =
1898{
1899	.get_link = ethtool_op_get_link,
1900
1901	.get_sset_count = xennet_get_sset_count,
1902	.get_ethtool_stats = xennet_get_ethtool_stats,
1903	.get_strings = xennet_get_strings,
1904};
1905
1906#ifdef CONFIG_SYSFS
1907static ssize_t show_rxbuf_min(struct device *dev,
1908			      struct device_attribute *attr, char *buf)
1909{
1910	struct net_device *netdev = to_net_dev(dev);
1911	struct netfront_info *info = netdev_priv(netdev);
1912
1913	return sprintf(buf, "%u\n", info->rx_min_target);
1914}
1915
1916static ssize_t store_rxbuf_min(struct device *dev,
1917			       struct device_attribute *attr,
1918			       const char *buf, size_t len)
1919{
1920	struct net_device *netdev = to_net_dev(dev);
1921	struct netfront_info *np = netdev_priv(netdev);
1922	char *endp;
1923	unsigned long target;
1924
1925	if (!capable(CAP_NET_ADMIN))
1926		return -EPERM;
1927
1928	target = simple_strtoul(buf, &endp, 0);
1929	if (endp == buf)
1930		return -EBADMSG;
1931
1932	if (target < RX_MIN_TARGET)
1933		target = RX_MIN_TARGET;
1934	if (target > RX_MAX_TARGET)
1935		target = RX_MAX_TARGET;
1936
1937	spin_lock_bh(&np->rx_lock);
1938	if (target > np->rx_max_target)
1939		np->rx_max_target = target;
1940	np->rx_min_target = target;
1941	if (target > np->rx_target)
1942		np->rx_target = target;
1943
1944	xennet_alloc_rx_buffers(netdev);
1945
1946	spin_unlock_bh(&np->rx_lock);
1947	return len;
1948}
1949
1950static ssize_t show_rxbuf_max(struct device *dev,
1951			      struct device_attribute *attr, char *buf)
1952{
1953	struct net_device *netdev = to_net_dev(dev);
1954	struct netfront_info *info = netdev_priv(netdev);
 
 
 
 
 
1955
1956	return sprintf(buf, "%u\n", info->rx_max_target);
1957}
 
 
1958
1959static ssize_t store_rxbuf_max(struct device *dev,
1960			       struct device_attribute *attr,
1961			       const char *buf, size_t len)
1962{
1963	struct net_device *netdev = to_net_dev(dev);
1964	struct netfront_info *np = netdev_priv(netdev);
1965	char *endp;
1966	unsigned long target;
1967
1968	if (!capable(CAP_NET_ADMIN))
1969		return -EPERM;
1970
1971	target = simple_strtoul(buf, &endp, 0);
1972	if (endp == buf)
1973		return -EBADMSG;
 
 
 
 
 
 
 
 
 
 
 
 
1974
1975	if (target < RX_MIN_TARGET)
1976		target = RX_MIN_TARGET;
1977	if (target > RX_MAX_TARGET)
1978		target = RX_MAX_TARGET;
1979
1980	spin_lock_bh(&np->rx_lock);
1981	if (target < np->rx_min_target)
1982		np->rx_min_target = target;
1983	np->rx_max_target = target;
1984	if (target < np->rx_target)
1985		np->rx_target = target;
1986
1987	xennet_alloc_rx_buffers(netdev);
 
1988
1989	spin_unlock_bh(&np->rx_lock);
1990	return len;
1991}
1992
1993static ssize_t show_rxbuf_cur(struct device *dev,
1994			      struct device_attribute *attr, char *buf)
1995{
1996	struct net_device *netdev = to_net_dev(dev);
1997	struct netfront_info *info = netdev_priv(netdev);
1998
1999	return sprintf(buf, "%u\n", info->rx_target);
2000}
2001
2002static struct device_attribute xennet_attrs[] = {
2003	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2004	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2005	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2006};
2007
2008static int xennet_sysfs_addif(struct net_device *netdev)
2009{
2010	int i;
2011	int err;
2012
2013	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2014		err = device_create_file(&netdev->dev,
2015					   &xennet_attrs[i]);
2016		if (err)
2017			goto fail;
2018	}
2019	return 0;
2020
2021 fail:
2022	while (--i >= 0)
2023		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2024	return err;
2025}
2026
2027static void xennet_sysfs_delif(struct net_device *netdev)
2028{
2029	int i;
2030
2031	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2032		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2033}
2034
2035#endif /* CONFIG_SYSFS */
2036
2037static const struct xenbus_device_id netfront_ids[] = {
2038	{ "vif" },
2039	{ "" }
2040};
2041
2042
2043static int xennet_remove(struct xenbus_device *dev)
2044{
2045	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2046
2047	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048
2049	xennet_disconnect_backend(info);
2050
2051	xennet_sysfs_delif(info->netdev);
2052
2053	unregister_netdev(info->netdev);
2054
2055	del_timer_sync(&info->rx_refill_timer);
2056
2057	free_percpu(info->stats);
2058
2059	free_netdev(info->netdev);
2060
2061	return 0;
2062}
2063
2064static DEFINE_XENBUS_DRIVER(netfront, ,
2065	.probe = netfront_probe,
2066	.remove = xennet_remove,
2067	.resume = netfront_resume,
2068	.otherend_changed = netback_changed,
2069);
2070
2071static int __init netif_init(void)
2072{
2073	if (!xen_domain())
2074		return -ENODEV;
2075
2076	if (!xen_has_pv_nic_devices())
2077		return -ENODEV;
2078
2079	pr_info("Initialising Xen virtual ethernet driver\n");
 
 
 
 
 
 
 
2080
2081	return xenbus_register_frontend(&netfront_driver);
2082}
2083module_init(netif_init);
2084
2085
2086static void __exit netif_exit(void)
2087{
2088	xenbus_unregister_driver(&netfront_driver);
2089}
2090module_exit(netif_exit);
2091
2092MODULE_DESCRIPTION("Xen virtual network device frontend");
2093MODULE_LICENSE("GPL");
2094MODULE_ALIAS("xen:vif");
2095MODULE_ALIAS("xennet");