Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
  48#include <asm/xen/page.h>
  49#include <xen/xen.h>
  50#include <xen/xenbus.h>
  51#include <xen/events.h>
  52#include <xen/page.h>
  53#include <xen/platform_pci.h>
  54#include <xen/grant_table.h>
  55
  56#include <xen/interface/io/netif.h>
  57#include <xen/interface/memory.h>
  58#include <xen/interface/grant_table.h>
  59
 
 
 
 
 
 
 
  60static const struct ethtool_ops xennet_ethtool_ops;
  61
  62struct netfront_cb {
  63	int pull_to;
  64};
  65
  66#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  67
  68#define RX_COPY_THRESHOLD 256
  69
  70#define GRANT_INVALID_REF	0
  71
  72#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
  74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
 
 
 
 
 
 
 
 
 
 
  75
  76struct netfront_stats {
  77	u64			rx_packets;
  78	u64			tx_packets;
  79	u64			rx_bytes;
  80	u64			tx_bytes;
  81	struct u64_stats_sync	syncp;
  82};
  83
  84struct netfront_info {
  85	struct list_head list;
  86	struct net_device *netdev;
 
 
 
  87
  88	struct napi_struct napi;
  89
  90	/* Split event channels support, tx_* == rx_* when using
  91	 * single event channel.
  92	 */
  93	unsigned int tx_evtchn, rx_evtchn;
  94	unsigned int tx_irq, rx_irq;
  95	/* Only used when split event channels support is enabled */
  96	char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
  97	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
  98
  99	struct xenbus_device *xbdev;
 100
 101	spinlock_t   tx_lock;
 102	struct xen_netif_tx_front_ring tx;
 103	int tx_ring_ref;
 104
 105	/*
 106	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 107	 * are linked from tx_skb_freelist through skb_entry.link.
 108	 *
 109	 *  NB. Freelist index entries are always going to be less than
 110	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 111	 *  greater than PAGE_OFFSET: we use this property to distinguish
 112	 *  them.
 113	 */
 114	union skb_entry {
 115		struct sk_buff *skb;
 116		unsigned long link;
 117	} tx_skbs[NET_TX_RING_SIZE];
 118	grant_ref_t gref_tx_head;
 119	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 120	struct page *grant_tx_page[NET_TX_RING_SIZE];
 121	unsigned tx_skb_freelist;
 122
 123	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 124	struct xen_netif_rx_front_ring rx;
 125	int rx_ring_ref;
 126
 127	/* Receive-ring batched refills. */
 128#define RX_MIN_TARGET 8
 129#define RX_DFL_MIN_TARGET 64
 130#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 131	unsigned rx_min_target, rx_max_target, rx_target;
 132	struct sk_buff_head rx_batch;
 133
 134	struct timer_list rx_refill_timer;
 135
 136	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 137	grant_ref_t gref_rx_head;
 138	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 139
 140	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 141	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 142	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 
 
 
 
 
 143
 144	/* Statistics */
 145	struct netfront_stats __percpu *stats;
 
 146
 147	unsigned long rx_gso_checksum_fixup;
 148};
 149
 150struct netfront_rx_info {
 151	struct xen_netif_rx_response rx;
 152	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 153};
 154
 155static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 156{
 157	list->link = id;
 158}
 159
 160static int skb_entry_is_link(const union skb_entry *list)
 161{
 162	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 163	return (unsigned long)list->skb < PAGE_OFFSET;
 164}
 165
 166/*
 167 * Access macros for acquiring freeing slots in tx_skbs[].
 168 */
 169
 170static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 171			       unsigned short id)
 172{
 173	skb_entry_set_link(&list[id], *head);
 174	*head = id;
 175}
 176
 177static unsigned short get_id_from_freelist(unsigned *head,
 178					   union skb_entry *list)
 179{
 180	unsigned int id = *head;
 181	*head = list[id].link;
 182	return id;
 183}
 184
 185static int xennet_rxidx(RING_IDX idx)
 186{
 187	return idx & (NET_RX_RING_SIZE - 1);
 188}
 189
 190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
 191					 RING_IDX ri)
 192{
 193	int i = xennet_rxidx(ri);
 194	struct sk_buff *skb = np->rx_skbs[i];
 195	np->rx_skbs[i] = NULL;
 196	return skb;
 197}
 198
 199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
 200					    RING_IDX ri)
 201{
 202	int i = xennet_rxidx(ri);
 203	grant_ref_t ref = np->grant_rx_ref[i];
 204	np->grant_rx_ref[i] = GRANT_INVALID_REF;
 205	return ref;
 206}
 207
 208#ifdef CONFIG_SYSFS
 209static int xennet_sysfs_addif(struct net_device *netdev);
 210static void xennet_sysfs_delif(struct net_device *netdev);
 211#else /* !CONFIG_SYSFS */
 212#define xennet_sysfs_addif(dev) (0)
 213#define xennet_sysfs_delif(dev) do { } while (0)
 214#endif
 215
 216static bool xennet_can_sg(struct net_device *dev)
 217{
 218	return dev->features & NETIF_F_SG;
 219}
 220
 221
 222static void rx_refill_timeout(unsigned long data)
 223{
 224	struct net_device *dev = (struct net_device *)data;
 225	struct netfront_info *np = netdev_priv(dev);
 226	napi_schedule(&np->napi);
 227}
 228
 229static int netfront_tx_slot_available(struct netfront_info *np)
 230{
 231	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
 232		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
 233}
 234
 235static void xennet_maybe_wake_tx(struct net_device *dev)
 236{
 237	struct netfront_info *np = netdev_priv(dev);
 
 238
 239	if (unlikely(netif_queue_stopped(dev)) &&
 240	    netfront_tx_slot_available(np) &&
 241	    likely(netif_running(dev)))
 242		netif_wake_queue(dev);
 243}
 244
 245static void xennet_alloc_rx_buffers(struct net_device *dev)
 
 246{
 247	unsigned short id;
 248	struct netfront_info *np = netdev_priv(dev);
 249	struct sk_buff *skb;
 250	struct page *page;
 251	int i, batch_target, notify;
 252	RING_IDX req_prod = np->rx.req_prod_pvt;
 253	grant_ref_t ref;
 254	unsigned long pfn;
 255	void *vaddr;
 256	struct xen_netif_rx_request *req;
 257
 258	if (unlikely(!netif_carrier_ok(dev)))
 259		return;
 
 
 
 260
 261	/*
 262	 * Allocate skbuffs greedily, even though we batch updates to the
 263	 * receive ring. This creates a less bursty demand on the memory
 264	 * allocator, so should reduce the chance of failed allocation requests
 265	 * both for ourself and for other kernel subsystems.
 266	 */
 267	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
 268	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
 269		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
 270					 GFP_ATOMIC | __GFP_NOWARN);
 271		if (unlikely(!skb))
 272			goto no_skb;
 273
 274		/* Align ip header to a 16 bytes boundary */
 275		skb_reserve(skb, NET_IP_ALIGN);
 
 276
 277		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 278		if (!page) {
 279			kfree_skb(skb);
 280no_skb:
 281			/* Could not allocate any skbuffs. Try again later. */
 282			mod_timer(&np->rx_refill_timer,
 283				  jiffies + (HZ/10));
 284
 285			/* Any skbuffs queued for refill? Force them out. */
 286			if (i != 0)
 287				goto refill;
 288			break;
 289		}
 290
 291		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 292		__skb_queue_tail(&np->rx_batch, skb);
 293	}
 294
 295	/* Is the batch large enough to be worthwhile? */
 296	if (i < (np->rx_target/2)) {
 297		if (req_prod > np->rx.sring->req_prod)
 298			goto push;
 299		return;
 300	}
 301
 302	/* Adjust our fill target if we risked running out of buffers. */
 303	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
 304	    ((np->rx_target *= 2) > np->rx_max_target))
 305		np->rx_target = np->rx_max_target;
 306
 307 refill:
 308	for (i = 0; ; i++) {
 309		skb = __skb_dequeue(&np->rx_batch);
 310		if (skb == NULL)
 311			break;
 312
 313		skb->dev = dev;
 
 
 
 
 
 
 
 314
 315		id = xennet_rxidx(req_prod + i);
 
 
 
 
 316
 317		BUG_ON(np->rx_skbs[id]);
 318		np->rx_skbs[id] = skb;
 319
 320		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
 321		BUG_ON((signed short)ref < 0);
 322		np->grant_rx_ref[id] = ref;
 323
 324		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 325		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 
 326
 327		req = RING_GET_REQUEST(&np->rx, req_prod + i);
 328		gnttab_grant_foreign_access_ref(ref,
 329						np->xbdev->otherend_id,
 330						pfn_to_mfn(pfn),
 331						0);
 332
 
 
 
 
 
 333		req->id = id;
 334		req->gref = ref;
 335	}
 336
 337	wmb();		/* barrier so backend seens requests */
 338
 339	/* Above is a suitable barrier to ensure backend will see requests. */
 340	np->rx.req_prod_pvt = req_prod + i;
 341 push:
 342	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
 
 
 
 
 
 
 
 
 343	if (notify)
 344		notify_remote_via_irq(np->rx_irq);
 345}
 346
 347static int xennet_open(struct net_device *dev)
 348{
 349	struct netfront_info *np = netdev_priv(dev);
 
 
 
 350
 351	napi_enable(&np->napi);
 
 352
 353	spin_lock_bh(&np->rx_lock);
 354	if (netif_carrier_ok(dev)) {
 355		xennet_alloc_rx_buffers(dev);
 356		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
 357		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 358			napi_schedule(&np->napi);
 
 
 
 
 
 
 359	}
 360	spin_unlock_bh(&np->rx_lock);
 361
 362	netif_start_queue(dev);
 363
 364	return 0;
 365}
 366
 367static void xennet_tx_buf_gc(struct net_device *dev)
 368{
 369	RING_IDX cons, prod;
 370	unsigned short id;
 371	struct netfront_info *np = netdev_priv(dev);
 372	struct sk_buff *skb;
 
 373
 374	BUG_ON(!netif_carrier_ok(dev));
 375
 376	do {
 377		prod = np->tx.sring->rsp_prod;
 378		rmb(); /* Ensure we see responses up to 'rp'. */
 379
 380		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
 381			struct xen_netif_tx_response *txrsp;
 382
 383			txrsp = RING_GET_RESPONSE(&np->tx, cons);
 384			if (txrsp->status == XEN_NETIF_RSP_NULL)
 385				continue;
 386
 387			id  = txrsp->id;
 388			skb = np->tx_skbs[id].skb;
 389			if (unlikely(gnttab_query_foreign_access(
 390				np->grant_tx_ref[id]) != 0)) {
 391				pr_alert("%s: warning -- grant still in use by backend domain\n",
 392					 __func__);
 393				BUG();
 394			}
 395			gnttab_end_foreign_access_ref(
 396				np->grant_tx_ref[id], GNTMAP_readonly);
 397			gnttab_release_grant_reference(
 398				&np->gref_tx_head, np->grant_tx_ref[id]);
 399			np->grant_tx_ref[id] = GRANT_INVALID_REF;
 400			np->grant_tx_page[id] = NULL;
 401			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
 402			dev_kfree_skb_irq(skb);
 403		}
 404
 405		np->tx.rsp_cons = prod;
 406
 407		/*
 408		 * Set a new event, then check for race with update of tx_cons.
 409		 * Note that it is essential to schedule a callback, no matter
 410		 * how few buffers are pending. Even if there is space in the
 411		 * transmit ring, higher layers may be blocked because too much
 412		 * data is outstanding: in such cases notification from Xen is
 413		 * likely to be the only kick that we'll get.
 414		 */
 415		np->tx.sring->rsp_event =
 416			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
 417		mb();		/* update shared area */
 418	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
 419
 420	xennet_maybe_wake_tx(dev);
 421}
 422
 423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
 424			      struct xen_netif_tx_request *tx)
 
 
 
 
 
 
 
 
 425{
 426	struct netfront_info *np = netdev_priv(dev);
 427	char *data = skb->data;
 428	unsigned long mfn;
 429	RING_IDX prod = np->tx.req_prod_pvt;
 430	int frags = skb_shinfo(skb)->nr_frags;
 431	unsigned int offset = offset_in_page(data);
 432	unsigned int len = skb_headlen(skb);
 433	unsigned int id;
 
 434	grant_ref_t ref;
 435	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	/* While the header overlaps a page boundary (including being
 438	   larger than a page), split it it into page-sized chunks. */
 439	while (len > PAGE_SIZE - offset) {
 440		tx->size = PAGE_SIZE - offset;
 441		tx->flags |= XEN_NETTXF_more_data;
 442		len -= tx->size;
 443		data += tx->size;
 444		offset = 0;
 445
 446		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 447		np->tx_skbs[id].skb = skb_get(skb);
 448		tx = RING_GET_REQUEST(&np->tx, prod++);
 449		tx->id = id;
 450		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 451		BUG_ON((signed short)ref < 0);
 452
 453		mfn = virt_to_mfn(data);
 454		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 455						mfn, GNTMAP_readonly);
 456
 457		np->grant_tx_page[id] = virt_to_page(data);
 458		tx->gref = np->grant_tx_ref[id] = ref;
 459		tx->offset = offset;
 460		tx->size = len;
 461		tx->flags = 0;
 462	}
 463
 464	/* Grant backend access to each skb fragment page. */
 465	for (i = 0; i < frags; i++) {
 466		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 467		struct page *page = skb_frag_page(frag);
 
 
 
 
 
 
 468
 469		len = skb_frag_size(frag);
 470		offset = frag->page_offset;
 471
 472		/* Data must not cross a page boundary. */
 473		BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
 474
 475		/* Skip unused frames from start of page */
 476		page += offset >> PAGE_SHIFT;
 477		offset &= ~PAGE_MASK;
 
 478
 479		while (len > 0) {
 480			unsigned long bytes;
 
 
 481
 482			BUG_ON(offset >= PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 483
 484			bytes = PAGE_SIZE - offset;
 485			if (bytes > len)
 486				bytes = len;
 487
 488			tx->flags |= XEN_NETTXF_more_data;
 489
 490			id = get_id_from_freelist(&np->tx_skb_freelist,
 491						  np->tx_skbs);
 492			np->tx_skbs[id].skb = skb_get(skb);
 493			tx = RING_GET_REQUEST(&np->tx, prod++);
 494			tx->id = id;
 495			ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 496			BUG_ON((signed short)ref < 0);
 497
 498			mfn = pfn_to_mfn(page_to_pfn(page));
 499			gnttab_grant_foreign_access_ref(ref,
 500							np->xbdev->otherend_id,
 501							mfn, GNTMAP_readonly);
 502
 503			np->grant_tx_page[id] = page;
 504			tx->gref = np->grant_tx_ref[id] = ref;
 505			tx->offset = offset;
 506			tx->size = bytes;
 507			tx->flags = 0;
 508
 509			offset += bytes;
 510			len -= bytes;
 511
 512			/* Next frame */
 513			if (offset == PAGE_SIZE && len) {
 514				BUG_ON(!PageCompound(page));
 515				page++;
 516				offset = 0;
 517			}
 518		}
 519	}
 520
 521	np->tx.req_prod_pvt = prod;
 522}
 523
 524/*
 525 * Count how many ring slots are required to send the frags of this
 526 * skb. Each frag might be a compound page.
 527 */
 528static int xennet_count_skb_frag_slots(struct sk_buff *skb)
 529{
 530	int i, frags = skb_shinfo(skb)->nr_frags;
 531	int pages = 0;
 
 
 
 532
 533	for (i = 0; i < frags; i++) {
 534		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 535		unsigned long size = skb_frag_size(frag);
 536		unsigned long offset = frag->page_offset;
 537
 538		/* Skip unused frames from start of page */
 539		offset &= ~PAGE_MASK;
 540
 541		pages += PFN_UP(offset + size);
 542	}
 543
 544	return pages;
 545}
 546
 547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548{
 549	unsigned short id;
 550	struct netfront_info *np = netdev_priv(dev);
 551	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 552	struct xen_netif_tx_request *tx;
 553	char *data = skb->data;
 554	RING_IDX i;
 555	grant_ref_t ref;
 556	unsigned long mfn;
 557	int notify;
 558	int slots;
 559	unsigned int offset = offset_in_page(data);
 560	unsigned int len = skb_headlen(skb);
 
 561	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 562
 563	/* If skb->len is too big for wire format, drop skb and alert
 564	 * user about misconfiguration.
 565	 */
 566	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 567		net_alert_ratelimited(
 568			"xennet: skb->len = %u, too big for wire format\n",
 569			skb->len);
 570		goto drop;
 571	}
 572
 573	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
 574		xennet_count_skb_frag_slots(skb);
 575	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
 576		net_alert_ratelimited(
 577			"xennet: skb rides the rocket: %d slots\n", slots);
 578		goto drop;
 579	}
 580
 581	spin_lock_irqsave(&np->tx_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582
 583	if (unlikely(!netif_carrier_ok(dev) ||
 584		     (slots > 1 && !xennet_can_sg(dev)) ||
 585		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 586		spin_unlock_irqrestore(&np->tx_lock, flags);
 587		goto drop;
 588	}
 589
 590	i = np->tx.req_prod_pvt;
 591
 592	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 593	np->tx_skbs[id].skb = skb;
 594
 595	tx = RING_GET_REQUEST(&np->tx, i);
 596
 597	tx->id   = id;
 598	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 599	BUG_ON((signed short)ref < 0);
 600	mfn = virt_to_mfn(data);
 601	gnttab_grant_foreign_access_ref(
 602		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
 603	np->grant_tx_page[id] = virt_to_page(data);
 604	tx->gref = np->grant_tx_ref[id] = ref;
 605	tx->offset = offset;
 606	tx->size = len;
 607
 608	tx->flags = 0;
 609	if (skb->ip_summed == CHECKSUM_PARTIAL)
 610		/* local packet? */
 611		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 612	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 613		/* remote but checksummed. */
 614		tx->flags |= XEN_NETTXF_data_validated;
 615
 
 616	if (skb_shinfo(skb)->gso_size) {
 617		struct xen_netif_extra_info *gso;
 618
 619		gso = (struct xen_netif_extra_info *)
 620			RING_GET_REQUEST(&np->tx, ++i);
 621
 622		tx->flags |= XEN_NETTXF_extra_info;
 623
 624		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 625		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 626			XEN_NETIF_GSO_TYPE_TCPV6 :
 627			XEN_NETIF_GSO_TYPE_TCPV4;
 628		gso->u.gso.pad = 0;
 629		gso->u.gso.features = 0;
 630
 631		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 632		gso->flags = 0;
 633	}
 634
 635	np->tx.req_prod_pvt = i + 1;
 
 636
 637	xennet_make_frags(skb, dev, tx);
 638	tx->size = skb->len;
 
 
 
 
 
 
 
 
 639
 640	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
 641	if (notify)
 642		notify_remote_via_irq(np->tx_irq);
 643
 644	u64_stats_update_begin(&stats->syncp);
 645	stats->tx_bytes += skb->len;
 646	stats->tx_packets++;
 647	u64_stats_update_end(&stats->syncp);
 648
 649	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 650	xennet_tx_buf_gc(dev);
 651
 652	if (!netfront_tx_slot_available(np))
 653		netif_stop_queue(dev);
 654
 655	spin_unlock_irqrestore(&np->tx_lock, flags);
 656
 657	return NETDEV_TX_OK;
 658
 659 drop:
 660	dev->stats.tx_dropped++;
 661	dev_kfree_skb_any(skb);
 662	return NETDEV_TX_OK;
 663}
 664
 665static int xennet_close(struct net_device *dev)
 666{
 667	struct netfront_info *np = netdev_priv(dev);
 668	netif_stop_queue(np->netdev);
 669	napi_disable(&np->napi);
 
 
 
 
 
 
 670	return 0;
 671}
 672
 673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
 674				grant_ref_t ref)
 675{
 676	int new = xennet_rxidx(np->rx.req_prod_pvt);
 677
 678	BUG_ON(np->rx_skbs[new]);
 679	np->rx_skbs[new] = skb;
 680	np->grant_rx_ref[new] = ref;
 681	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
 682	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
 683	np->rx.req_prod_pvt++;
 684}
 685
 686static int xennet_get_extras(struct netfront_info *np,
 687			     struct xen_netif_extra_info *extras,
 688			     RING_IDX rp)
 689
 690{
 691	struct xen_netif_extra_info *extra;
 692	struct device *dev = &np->netdev->dev;
 693	RING_IDX cons = np->rx.rsp_cons;
 694	int err = 0;
 695
 696	do {
 697		struct sk_buff *skb;
 698		grant_ref_t ref;
 699
 700		if (unlikely(cons + 1 == rp)) {
 701			if (net_ratelimit())
 702				dev_warn(dev, "Missing extra info\n");
 703			err = -EBADR;
 704			break;
 705		}
 706
 707		extra = (struct xen_netif_extra_info *)
 708			RING_GET_RESPONSE(&np->rx, ++cons);
 709
 710		if (unlikely(!extra->type ||
 711			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 712			if (net_ratelimit())
 713				dev_warn(dev, "Invalid extra type: %d\n",
 714					extra->type);
 715			err = -EINVAL;
 716		} else {
 717			memcpy(&extras[extra->type - 1], extra,
 718			       sizeof(*extra));
 719		}
 720
 721		skb = xennet_get_rx_skb(np, cons);
 722		ref = xennet_get_rx_ref(np, cons);
 723		xennet_move_rx_slot(np, skb, ref);
 724	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 725
 726	np->rx.rsp_cons = cons;
 727	return err;
 728}
 729
 730static int xennet_get_responses(struct netfront_info *np,
 731				struct netfront_rx_info *rinfo, RING_IDX rp,
 732				struct sk_buff_head *list)
 733{
 734	struct xen_netif_rx_response *rx = &rinfo->rx;
 735	struct xen_netif_extra_info *extras = rinfo->extras;
 736	struct device *dev = &np->netdev->dev;
 737	RING_IDX cons = np->rx.rsp_cons;
 738	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
 739	grant_ref_t ref = xennet_get_rx_ref(np, cons);
 740	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 741	int slots = 1;
 742	int err = 0;
 743	unsigned long ret;
 744
 745	if (rx->flags & XEN_NETRXF_extra_info) {
 746		err = xennet_get_extras(np, extras, rp);
 747		cons = np->rx.rsp_cons;
 748	}
 749
 750	for (;;) {
 751		if (unlikely(rx->status < 0 ||
 752			     rx->offset + rx->status > PAGE_SIZE)) {
 753			if (net_ratelimit())
 754				dev_warn(dev, "rx->offset: %x, size: %u\n",
 755					 rx->offset, rx->status);
 756			xennet_move_rx_slot(np, skb, ref);
 757			err = -EINVAL;
 758			goto next;
 759		}
 760
 761		/*
 762		 * This definitely indicates a bug, either in this driver or in
 763		 * the backend driver. In future this should flag the bad
 764		 * situation to the system controller to reboot the backend.
 765		 */
 766		if (ref == GRANT_INVALID_REF) {
 767			if (net_ratelimit())
 768				dev_warn(dev, "Bad rx response id %d.\n",
 769					 rx->id);
 770			err = -EINVAL;
 771			goto next;
 772		}
 773
 774		ret = gnttab_end_foreign_access_ref(ref, 0);
 775		BUG_ON(!ret);
 776
 777		gnttab_release_grant_reference(&np->gref_rx_head, ref);
 778
 779		__skb_queue_tail(list, skb);
 780
 781next:
 782		if (!(rx->flags & XEN_NETRXF_more_data))
 783			break;
 784
 785		if (cons + slots == rp) {
 786			if (net_ratelimit())
 787				dev_warn(dev, "Need more slots\n");
 788			err = -ENOENT;
 789			break;
 790		}
 791
 792		rx = RING_GET_RESPONSE(&np->rx, cons + slots);
 793		skb = xennet_get_rx_skb(np, cons + slots);
 794		ref = xennet_get_rx_ref(np, cons + slots);
 795		slots++;
 796	}
 797
 798	if (unlikely(slots > max)) {
 799		if (net_ratelimit())
 800			dev_warn(dev, "Too many slots\n");
 801		err = -E2BIG;
 802	}
 803
 804	if (unlikely(err))
 805		np->rx.rsp_cons = cons + slots;
 806
 807	return err;
 808}
 809
 810static int xennet_set_skb_gso(struct sk_buff *skb,
 811			      struct xen_netif_extra_info *gso)
 812{
 813	if (!gso->u.gso.size) {
 814		if (net_ratelimit())
 815			pr_warn("GSO size must not be zero\n");
 816		return -EINVAL;
 817	}
 818
 819	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 820	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 821		if (net_ratelimit())
 822			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 823		return -EINVAL;
 824	}
 825
 826	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 827	skb_shinfo(skb)->gso_type =
 828		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 829		SKB_GSO_TCPV4 :
 830		SKB_GSO_TCPV6;
 831
 832	/* Header must be checked, and gso_segs computed. */
 833	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 834	skb_shinfo(skb)->gso_segs = 0;
 835
 836	return 0;
 837}
 838
 839static RING_IDX xennet_fill_frags(struct netfront_info *np,
 840				  struct sk_buff *skb,
 841				  struct sk_buff_head *list)
 842{
 843	struct skb_shared_info *shinfo = skb_shinfo(skb);
 844	RING_IDX cons = np->rx.rsp_cons;
 845	struct sk_buff *nskb;
 846
 847	while ((nskb = __skb_dequeue(list))) {
 848		struct xen_netif_rx_response *rx =
 849			RING_GET_RESPONSE(&np->rx, ++cons);
 850		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 851
 852		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 853			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 854
 855			BUG_ON(pull_to <= skb_headlen(skb));
 856			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 857		}
 858		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 
 
 
 
 859
 860		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 
 861				rx->offset, rx->status, PAGE_SIZE);
 862
 863		skb_shinfo(nskb)->nr_frags = 0;
 864		kfree_skb(nskb);
 865	}
 866
 867	return cons;
 
 
 868}
 869
 870static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 871{
 872	bool recalculate_partial_csum = false;
 873
 874	/*
 875	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 876	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 877	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 878	 * recalculate the partial checksum.
 879	 */
 880	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 881		struct netfront_info *np = netdev_priv(dev);
 882		np->rx_gso_checksum_fixup++;
 883		skb->ip_summed = CHECKSUM_PARTIAL;
 884		recalculate_partial_csum = true;
 885	}
 886
 887	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 888	if (skb->ip_summed != CHECKSUM_PARTIAL)
 889		return 0;
 890
 891	return skb_checksum_setup(skb, recalculate_partial_csum);
 892}
 893
 894static int handle_incoming_queue(struct net_device *dev,
 895				 struct sk_buff_head *rxq)
 896{
 897	struct netfront_info *np = netdev_priv(dev);
 898	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 899	int packets_dropped = 0;
 900	struct sk_buff *skb;
 901
 902	while ((skb = __skb_dequeue(rxq)) != NULL) {
 903		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 904
 905		if (pull_to > skb_headlen(skb))
 906			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 907
 908		/* Ethernet work: Delayed to here as it peeks the header. */
 909		skb->protocol = eth_type_trans(skb, dev);
 910		skb_reset_network_header(skb);
 911
 912		if (checksum_setup(dev, skb)) {
 913			kfree_skb(skb);
 914			packets_dropped++;
 915			dev->stats.rx_errors++;
 916			continue;
 917		}
 918
 919		u64_stats_update_begin(&stats->syncp);
 920		stats->rx_packets++;
 921		stats->rx_bytes += skb->len;
 922		u64_stats_update_end(&stats->syncp);
 923
 924		/* Pass it up. */
 925		napi_gro_receive(&np->napi, skb);
 926	}
 927
 928	return packets_dropped;
 929}
 930
 931static int xennet_poll(struct napi_struct *napi, int budget)
 932{
 933	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
 934	struct net_device *dev = np->netdev;
 935	struct sk_buff *skb;
 936	struct netfront_rx_info rinfo;
 937	struct xen_netif_rx_response *rx = &rinfo.rx;
 938	struct xen_netif_extra_info *extras = rinfo.extras;
 939	RING_IDX i, rp;
 940	int work_done;
 941	struct sk_buff_head rxq;
 942	struct sk_buff_head errq;
 943	struct sk_buff_head tmpq;
 944	unsigned long flags;
 945	int err;
 946
 947	spin_lock(&np->rx_lock);
 948
 949	skb_queue_head_init(&rxq);
 950	skb_queue_head_init(&errq);
 951	skb_queue_head_init(&tmpq);
 952
 953	rp = np->rx.sring->rsp_prod;
 954	rmb(); /* Ensure we see queued responses up to 'rp'. */
 955
 956	i = np->rx.rsp_cons;
 957	work_done = 0;
 958	while ((i != rp) && (work_done < budget)) {
 959		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
 960		memset(extras, 0, sizeof(rinfo.extras));
 961
 962		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
 963
 964		if (unlikely(err)) {
 965err:
 966			while ((skb = __skb_dequeue(&tmpq)))
 967				__skb_queue_tail(&errq, skb);
 968			dev->stats.rx_errors++;
 969			i = np->rx.rsp_cons;
 970			continue;
 971		}
 972
 973		skb = __skb_dequeue(&tmpq);
 974
 975		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
 976			struct xen_netif_extra_info *gso;
 977			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 978
 979			if (unlikely(xennet_set_skb_gso(skb, gso))) {
 980				__skb_queue_head(&tmpq, skb);
 981				np->rx.rsp_cons += skb_queue_len(&tmpq);
 982				goto err;
 983			}
 984		}
 985
 986		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
 987		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
 988			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 989
 990		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
 991		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
 992		skb->data_len = rx->status;
 993		skb->len += rx->status;
 994
 995		i = xennet_fill_frags(np, skb, &tmpq);
 
 996
 997		if (rx->flags & XEN_NETRXF_csum_blank)
 998			skb->ip_summed = CHECKSUM_PARTIAL;
 999		else if (rx->flags & XEN_NETRXF_data_validated)
1000			skb->ip_summed = CHECKSUM_UNNECESSARY;
1001
1002		__skb_queue_tail(&rxq, skb);
1003
1004		np->rx.rsp_cons = ++i;
1005		work_done++;
1006	}
1007
1008	__skb_queue_purge(&errq);
1009
1010	work_done -= handle_incoming_queue(dev, &rxq);
1011
1012	/* If we get a callback with very few responses, reduce fill target. */
1013	/* NB. Note exponential increase, linear decrease. */
1014	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1015	     ((3*np->rx_target) / 4)) &&
1016	    (--np->rx_target < np->rx_min_target))
1017		np->rx_target = np->rx_min_target;
1018
1019	xennet_alloc_rx_buffers(dev);
1020
1021	if (work_done < budget) {
1022		int more_to_do = 0;
1023
1024		napi_gro_flush(napi, false);
1025
1026		local_irq_save(flags);
1027
1028		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1029		if (!more_to_do)
1030			__napi_complete(napi);
1031
1032		local_irq_restore(flags);
 
 
1033	}
1034
1035	spin_unlock(&np->rx_lock);
1036
1037	return work_done;
1038}
1039
1040static int xennet_change_mtu(struct net_device *dev, int mtu)
1041{
1042	int max = xennet_can_sg(dev) ?
1043		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1044
1045	if (mtu > max)
1046		return -EINVAL;
1047	dev->mtu = mtu;
1048	return 0;
1049}
1050
1051static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1052						    struct rtnl_link_stats64 *tot)
1053{
1054	struct netfront_info *np = netdev_priv(dev);
1055	int cpu;
1056
1057	for_each_possible_cpu(cpu) {
1058		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
 
1059		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1060		unsigned int start;
1061
1062		do {
1063			start = u64_stats_fetch_begin_irq(&stats->syncp);
 
 
 
1064
1065			rx_packets = stats->rx_packets;
1066			tx_packets = stats->tx_packets;
1067			rx_bytes = stats->rx_bytes;
1068			tx_bytes = stats->tx_bytes;
1069		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1070
1071		tot->rx_packets += rx_packets;
1072		tot->tx_packets += tx_packets;
1073		tot->rx_bytes   += rx_bytes;
1074		tot->tx_bytes   += tx_bytes;
1075	}
1076
1077	tot->rx_errors  = dev->stats.rx_errors;
1078	tot->tx_dropped = dev->stats.tx_dropped;
1079
1080	return tot;
1081}
1082
1083static void xennet_release_tx_bufs(struct netfront_info *np)
1084{
1085	struct sk_buff *skb;
1086	int i;
1087
1088	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089		/* Skip over entries which are actually freelist references */
1090		if (skb_entry_is_link(&np->tx_skbs[i]))
1091			continue;
1092
1093		skb = np->tx_skbs[i].skb;
1094		get_page(np->grant_tx_page[i]);
1095		gnttab_end_foreign_access(np->grant_tx_ref[i],
1096					  GNTMAP_readonly,
1097					  (unsigned long)page_address(np->grant_tx_page[i]));
1098		np->grant_tx_page[i] = NULL;
1099		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1100		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1101		dev_kfree_skb_irq(skb);
1102	}
1103}
1104
1105static void xennet_release_rx_bufs(struct netfront_info *np)
1106{
1107	int id, ref;
1108
1109	spin_lock_bh(&np->rx_lock);
1110
1111	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112		struct sk_buff *skb;
1113		struct page *page;
1114
1115		skb = np->rx_skbs[id];
1116		if (!skb)
1117			continue;
1118
1119		ref = np->grant_rx_ref[id];
1120		if (ref == GRANT_INVALID_REF)
1121			continue;
1122
1123		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1124
1125		/* gnttab_end_foreign_access() needs a page ref until
1126		 * foreign access is ended (which may be deferred).
1127		 */
1128		get_page(page);
1129		gnttab_end_foreign_access(ref, 0,
1130					  (unsigned long)page_address(page));
1131		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1132
1133		kfree_skb(skb);
1134	}
1135
1136	spin_unlock_bh(&np->rx_lock);
1137}
1138
1139static void xennet_uninit(struct net_device *dev)
1140{
1141	struct netfront_info *np = netdev_priv(dev);
1142	xennet_release_tx_bufs(np);
1143	xennet_release_rx_bufs(np);
1144	gnttab_free_grant_references(np->gref_tx_head);
1145	gnttab_free_grant_references(np->gref_rx_head);
1146}
1147
1148static netdev_features_t xennet_fix_features(struct net_device *dev,
1149	netdev_features_t features)
1150{
1151	struct netfront_info *np = netdev_priv(dev);
1152	int val;
1153
1154	if (features & NETIF_F_SG) {
1155		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1156				 "%d", &val) < 0)
1157			val = 0;
1158
1159		if (!val)
1160			features &= ~NETIF_F_SG;
1161	}
1162
1163	if (features & NETIF_F_IPV6_CSUM) {
1164		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1165				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1166			val = 0;
1167
1168		if (!val)
1169			features &= ~NETIF_F_IPV6_CSUM;
1170	}
1171
1172	if (features & NETIF_F_TSO) {
1173		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1174				 "feature-gso-tcpv4", "%d", &val) < 0)
1175			val = 0;
1176
1177		if (!val)
1178			features &= ~NETIF_F_TSO;
1179	}
1180
1181	if (features & NETIF_F_TSO6) {
1182		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1183				 "feature-gso-tcpv6", "%d", &val) < 0)
1184			val = 0;
1185
1186		if (!val)
1187			features &= ~NETIF_F_TSO6;
1188	}
 
 
 
 
1189
1190	return features;
1191}
1192
1193static int xennet_set_features(struct net_device *dev,
1194	netdev_features_t features)
1195{
1196	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1197		netdev_info(dev, "Reducing MTU because no SG offload");
1198		dev->mtu = ETH_DATA_LEN;
1199	}
1200
1201	return 0;
1202}
1203
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{
1206	struct netfront_info *np = dev_id;
1207	struct net_device *dev = np->netdev;
1208	unsigned long flags;
1209
1210	spin_lock_irqsave(&np->tx_lock, flags);
1211	xennet_tx_buf_gc(dev);
1212	spin_unlock_irqrestore(&np->tx_lock, flags);
1213
1214	return IRQ_HANDLED;
1215}
1216
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{
1219	struct netfront_info *np = dev_id;
1220	struct net_device *dev = np->netdev;
1221
1222	if (likely(netif_carrier_ok(dev) &&
1223		   RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1224			napi_schedule(&np->napi);
1225
1226	return IRQ_HANDLED;
1227}
1228
1229static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1230{
1231	xennet_tx_interrupt(irq, dev_id);
1232	xennet_rx_interrupt(irq, dev_id);
1233	return IRQ_HANDLED;
1234}
1235
1236#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev)
1238{
1239	xennet_interrupt(0, dev);
 
 
 
 
 
1240}
1241#endif
1242
1243static const struct net_device_ops xennet_netdev_ops = {
1244	.ndo_open            = xennet_open,
1245	.ndo_uninit          = xennet_uninit,
1246	.ndo_stop            = xennet_close,
1247	.ndo_start_xmit      = xennet_start_xmit,
1248	.ndo_change_mtu	     = xennet_change_mtu,
1249	.ndo_get_stats64     = xennet_get_stats64,
1250	.ndo_set_mac_address = eth_mac_addr,
1251	.ndo_validate_addr   = eth_validate_addr,
1252	.ndo_fix_features    = xennet_fix_features,
1253	.ndo_set_features    = xennet_set_features,
 
1254#ifdef CONFIG_NET_POLL_CONTROLLER
1255	.ndo_poll_controller = xennet_poll_controller,
1256#endif
1257};
1258
 
 
 
 
 
 
 
 
 
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{
1261	int i, err;
1262	struct net_device *netdev;
1263	struct netfront_info *np;
1264
1265	netdev = alloc_etherdev(sizeof(struct netfront_info));
1266	if (!netdev)
1267		return ERR_PTR(-ENOMEM);
1268
1269	np                   = netdev_priv(netdev);
1270	np->xbdev            = dev;
1271
1272	spin_lock_init(&np->tx_lock);
1273	spin_lock_init(&np->rx_lock);
1274
1275	skb_queue_head_init(&np->rx_batch);
1276	np->rx_target     = RX_DFL_MIN_TARGET;
1277	np->rx_min_target = RX_DFL_MIN_TARGET;
1278	np->rx_max_target = RX_MAX_TARGET;
1279
1280	init_timer(&np->rx_refill_timer);
1281	np->rx_refill_timer.data = (unsigned long)netdev;
1282	np->rx_refill_timer.function = rx_refill_timeout;
1283
1284	err = -ENOMEM;
1285	np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286	if (np->stats == NULL)
 
 
 
1287		goto exit;
1288
1289	/* Initialise tx_skbs as a free chain containing every entry. */
1290	np->tx_skb_freelist = 0;
1291	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292		skb_entry_set_link(&np->tx_skbs[i], i+1);
1293		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294		np->grant_tx_page[i] = NULL;
1295	}
1296
1297	/* Clear out rx_skbs */
1298	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299		np->rx_skbs[i] = NULL;
1300		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301	}
1302
1303	/* A grant for every tx ring slot */
1304	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305					  &np->gref_tx_head) < 0) {
1306		pr_alert("can't alloc tx grant refs\n");
1307		err = -ENOMEM;
1308		goto exit_free_stats;
1309	}
1310	/* A grant for every rx ring slot */
1311	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312					  &np->gref_rx_head) < 0) {
1313		pr_alert("can't alloc rx grant refs\n");
1314		err = -ENOMEM;
1315		goto exit_free_tx;
1316	}
1317
1318	netdev->netdev_ops	= &xennet_netdev_ops;
1319
1320	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322				  NETIF_F_GSO_ROBUST;
1323	netdev->hw_features	= NETIF_F_SG |
1324				  NETIF_F_IPV6_CSUM |
1325				  NETIF_F_TSO | NETIF_F_TSO6;
1326
1327	/*
1328         * Assume that all hw features are available for now. This set
1329         * will be adjusted by the call to netdev_update_features() in
1330         * xennet_connect() which is the earliest point where we can
1331         * negotiate with the backend regarding supported features.
1332         */
1333	netdev->features |= netdev->hw_features;
1334
1335	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
 
 
1336	SET_NETDEV_DEV(netdev, &dev->dev);
1337
1338	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1339
1340	np->netdev = netdev;
1341
1342	netif_carrier_off(netdev);
1343
 
 
 
 
 
 
1344	return netdev;
1345
1346 exit_free_tx:
1347	gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349	free_percpu(np->stats);
1350 exit:
1351	free_netdev(netdev);
1352	return ERR_PTR(err);
1353}
1354
1355/**
1356 * Entry point to this code when a new device is created.  Allocate the basic
1357 * structures and the ring buffers for communication with the backend, and
1358 * inform the backend of the appropriate details for those.
1359 */
1360static int netfront_probe(struct xenbus_device *dev,
1361			  const struct xenbus_device_id *id)
1362{
1363	int err;
1364	struct net_device *netdev;
1365	struct netfront_info *info;
1366
1367	netdev = xennet_create_dev(dev);
1368	if (IS_ERR(netdev)) {
1369		err = PTR_ERR(netdev);
1370		xenbus_dev_fatal(dev, err, "creating netdev");
1371		return err;
1372	}
1373
1374	info = netdev_priv(netdev);
1375	dev_set_drvdata(&dev->dev, info);
1376
1377	err = register_netdev(info->netdev);
1378	if (err) {
1379		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1380		goto fail;
1381	}
1382
1383	err = xennet_sysfs_addif(info->netdev);
1384	if (err) {
1385		unregister_netdev(info->netdev);
1386		pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1387		goto fail;
1388	}
1389
1390	return 0;
1391
1392 fail:
1393	free_netdev(netdev);
1394	dev_set_drvdata(&dev->dev, NULL);
1395	return err;
1396}
1397
1398static void xennet_end_access(int ref, void *page)
1399{
1400	/* This frees the page as a side-effect */
1401	if (ref != GRANT_INVALID_REF)
1402		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1403}
1404
1405static void xennet_disconnect_backend(struct netfront_info *info)
1406{
1407	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1408	spin_lock_bh(&info->rx_lock);
1409	spin_lock_irq(&info->tx_lock);
1410	netif_carrier_off(info->netdev);
1411	spin_unlock_irq(&info->tx_lock);
1412	spin_unlock_bh(&info->rx_lock);
1413
1414	if (info->tx_irq && (info->tx_irq == info->rx_irq))
1415		unbind_from_irqhandler(info->tx_irq, info);
1416	if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1417		unbind_from_irqhandler(info->tx_irq, info);
1418		unbind_from_irqhandler(info->rx_irq, info);
1419	}
1420	info->tx_evtchn = info->rx_evtchn = 0;
1421	info->tx_irq = info->rx_irq = 0;
1422
1423	/* End access and free the pages */
1424	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1425	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1426
1427	info->tx_ring_ref = GRANT_INVALID_REF;
1428	info->rx_ring_ref = GRANT_INVALID_REF;
1429	info->tx.sring = NULL;
1430	info->rx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1431}
1432
1433/**
1434 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1435 * driver restart.  We tear down our netif structure and recreate it, but
1436 * leave the device-layer structures intact so that this is transparent to the
1437 * rest of the kernel.
1438 */
1439static int netfront_resume(struct xenbus_device *dev)
1440{
1441	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1442
1443	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1444
1445	xennet_disconnect_backend(info);
1446	return 0;
1447}
1448
1449static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1450{
1451	char *s, *e, *macstr;
1452	int i;
1453
1454	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1455	if (IS_ERR(macstr))
1456		return PTR_ERR(macstr);
1457
1458	for (i = 0; i < ETH_ALEN; i++) {
1459		mac[i] = simple_strtoul(s, &e, 16);
1460		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1461			kfree(macstr);
1462			return -ENOENT;
1463		}
1464		s = e+1;
1465	}
1466
1467	kfree(macstr);
1468	return 0;
1469}
1470
1471static int setup_netfront_single(struct netfront_info *info)
1472{
1473	int err;
1474
1475	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1476	if (err < 0)
1477		goto fail;
1478
1479	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1480					xennet_interrupt,
1481					0, info->netdev->name, info);
1482	if (err < 0)
1483		goto bind_fail;
1484	info->rx_evtchn = info->tx_evtchn;
1485	info->rx_irq = info->tx_irq = err;
1486
1487	return 0;
1488
1489bind_fail:
1490	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1491	info->tx_evtchn = 0;
1492fail:
1493	return err;
1494}
1495
1496static int setup_netfront_split(struct netfront_info *info)
1497{
1498	int err;
1499
1500	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1501	if (err < 0)
1502		goto fail;
1503	err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1504	if (err < 0)
1505		goto alloc_rx_evtchn_fail;
1506
1507	snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1508		 "%s-tx", info->netdev->name);
1509	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1510					xennet_tx_interrupt,
1511					0, info->tx_irq_name, info);
1512	if (err < 0)
1513		goto bind_tx_fail;
1514	info->tx_irq = err;
1515
1516	snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1517		 "%s-rx", info->netdev->name);
1518	err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1519					xennet_rx_interrupt,
1520					0, info->rx_irq_name, info);
1521	if (err < 0)
1522		goto bind_rx_fail;
1523	info->rx_irq = err;
1524
1525	return 0;
1526
1527bind_rx_fail:
1528	unbind_from_irqhandler(info->tx_irq, info);
1529	info->tx_irq = 0;
1530bind_tx_fail:
1531	xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1532	info->rx_evtchn = 0;
1533alloc_rx_evtchn_fail:
1534	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1535	info->tx_evtchn = 0;
1536fail:
1537	return err;
1538}
1539
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
 
1541{
1542	struct xen_netif_tx_sring *txs;
1543	struct xen_netif_rx_sring *rxs;
 
1544	int err;
1545	struct net_device *netdev = info->netdev;
1546	unsigned int feature_split_evtchn;
1547
1548	info->tx_ring_ref = GRANT_INVALID_REF;
1549	info->rx_ring_ref = GRANT_INVALID_REF;
1550	info->rx.sring = NULL;
1551	info->tx.sring = NULL;
1552	netdev->irq = 0;
1553
1554	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555			   "feature-split-event-channels", "%u",
1556			   &feature_split_evtchn);
1557	if (err < 0)
1558		feature_split_evtchn = 0;
1559
1560	err = xen_net_read_mac(dev, netdev->dev_addr);
1561	if (err) {
1562		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563		goto fail;
1564	}
1565
1566	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567	if (!txs) {
1568		err = -ENOMEM;
1569		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1570		goto fail;
1571	}
1572	SHARED_RING_INIT(txs);
1573	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1574
1575	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576	if (err < 0)
1577		goto grant_tx_ring_fail;
 
1578
1579	info->tx_ring_ref = err;
1580	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581	if (!rxs) {
1582		err = -ENOMEM;
1583		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1584		goto alloc_rx_ring_fail;
1585	}
1586	SHARED_RING_INIT(rxs);
1587	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1588
1589	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590	if (err < 0)
1591		goto grant_rx_ring_fail;
1592	info->rx_ring_ref = err;
1593
1594	if (feature_split_evtchn)
1595		err = setup_netfront_split(info);
1596	/* setup single event channel if
1597	 *  a) feature-split-event-channels == 0
1598	 *  b) feature-split-event-channels == 1 but failed to setup
1599	 */
1600	if (!feature_split_evtchn || (feature_split_evtchn && err))
1601		err = setup_netfront_single(info);
1602
1603	if (err)
1604		goto alloc_evtchn_fail;
1605
1606	return 0;
1607
1608	/* If we fail to setup netfront, it is safe to just revoke access to
1609	 * granted pages because backend is not accessing it at this point.
1610	 */
1611alloc_evtchn_fail:
1612	gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1613grant_rx_ring_fail:
1614	free_page((unsigned long)rxs);
1615alloc_rx_ring_fail:
1616	gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1617grant_tx_ring_fail:
1618	free_page((unsigned long)txs);
1619fail:
1620	return err;
1621}
1622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1623/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev,
1625			   struct netfront_info *info)
1626{
1627	const char *message;
1628	struct xenbus_transaction xbt;
1629	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1630
1631	/* Create shared ring, alloc event channel. */
1632	err = setup_netfront(dev, info);
1633	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
1634		goto out;
 
 
 
 
 
 
 
 
 
 
1635
1636again:
1637	err = xenbus_transaction_start(&xbt);
1638	if (err) {
1639		xenbus_dev_fatal(dev, err, "starting transaction");
1640		goto destroy_ring;
1641	}
1642
1643	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1644			    info->tx_ring_ref);
1645	if (err) {
1646		message = "writing tx ring-ref";
1647		goto abort_transaction;
1648	}
1649	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650			    info->rx_ring_ref);
1651	if (err) {
1652		message = "writing rx ring-ref";
1653		goto abort_transaction;
1654	}
1655
1656	if (info->tx_evtchn == info->rx_evtchn) {
1657		err = xenbus_printf(xbt, dev->nodename,
1658				    "event-channel", "%u", info->tx_evtchn);
1659		if (err) {
1660			message = "writing event-channel";
1661			goto abort_transaction;
1662		}
 
 
 
 
 
 
1663	} else {
1664		err = xenbus_printf(xbt, dev->nodename,
1665				    "event-channel-tx", "%u", info->tx_evtchn);
1666		if (err) {
1667			message = "writing event-channel-tx";
1668			goto abort_transaction;
1669		}
1670		err = xenbus_printf(xbt, dev->nodename,
1671				    "event-channel-rx", "%u", info->rx_evtchn);
1672		if (err) {
1673			message = "writing event-channel-rx";
1674			goto abort_transaction;
1675		}
1676	}
1677
 
1678	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679			    1);
1680	if (err) {
1681		message = "writing request-rx-copy";
1682		goto abort_transaction;
1683	}
1684
1685	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1686	if (err) {
1687		message = "writing feature-rx-notify";
1688		goto abort_transaction;
1689	}
1690
1691	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1692	if (err) {
1693		message = "writing feature-sg";
1694		goto abort_transaction;
1695	}
1696
1697	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1698	if (err) {
1699		message = "writing feature-gso-tcpv4";
1700		goto abort_transaction;
1701	}
1702
1703	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1704	if (err) {
1705		message = "writing feature-gso-tcpv6";
1706		goto abort_transaction;
1707	}
1708
1709	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1710			   "1");
1711	if (err) {
1712		message = "writing feature-ipv6-csum-offload";
1713		goto abort_transaction;
1714	}
1715
1716	err = xenbus_transaction_end(xbt, 0);
1717	if (err) {
1718		if (err == -EAGAIN)
1719			goto again;
1720		xenbus_dev_fatal(dev, err, "completing transaction");
1721		goto destroy_ring;
1722	}
1723
1724	return 0;
1725
1726 abort_transaction:
1727	xenbus_transaction_end(xbt, 1);
1728	xenbus_dev_fatal(dev, err, "%s", message);
 
 
1729 destroy_ring:
1730	xennet_disconnect_backend(info);
 
 
1731 out:
 
 
 
1732	return err;
1733}
1734
1735static int xennet_connect(struct net_device *dev)
1736{
1737	struct netfront_info *np = netdev_priv(dev);
1738	int i, requeue_idx, err;
1739	struct sk_buff *skb;
1740	grant_ref_t ref;
1741	struct xen_netif_rx_request *req;
1742	unsigned int feature_rx_copy;
1743
1744	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745			   "feature-rx-copy", "%u", &feature_rx_copy);
1746	if (err != 1)
1747		feature_rx_copy = 0;
1748
1749	if (!feature_rx_copy) {
1750		dev_info(&dev->dev,
1751			 "backend does not support copying receive path\n");
1752		return -ENODEV;
1753	}
1754
1755	err = talk_to_netback(np->xbdev, np);
1756	if (err)
1757		return err;
1758
1759	rtnl_lock();
1760	netdev_update_features(dev);
1761	rtnl_unlock();
1762
1763	spin_lock_bh(&np->rx_lock);
1764	spin_lock_irq(&np->tx_lock);
1765
1766	/* Step 1: Discard all pending TX packet fragments. */
1767	xennet_release_tx_bufs(np);
1768
1769	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771		skb_frag_t *frag;
1772		const struct page *page;
1773		if (!np->rx_skbs[i])
1774			continue;
1775
1776		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1777		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1778		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1779
1780		frag = &skb_shinfo(skb)->frags[0];
1781		page = skb_frag_page(frag);
1782		gnttab_grant_foreign_access_ref(
1783			ref, np->xbdev->otherend_id,
1784			pfn_to_mfn(page_to_pfn(page)),
1785			0);
1786		req->gref = ref;
1787		req->id   = requeue_idx;
1788
1789		requeue_idx++;
1790	}
1791
1792	np->rx.req_prod_pvt = requeue_idx;
 
 
1793
1794	/*
1795	 * Step 3: All public and private state should now be sane.  Get
1796	 * ready to start sending and receiving packets and give the driver
1797	 * domain a kick because we've probably just requeued some
1798	 * packets.
1799	 */
1800	netif_carrier_on(np->netdev);
1801	notify_remote_via_irq(np->tx_irq);
1802	if (np->tx_irq != np->rx_irq)
1803		notify_remote_via_irq(np->rx_irq);
1804	xennet_tx_buf_gc(dev);
1805	xennet_alloc_rx_buffers(dev);
1806
1807	spin_unlock_irq(&np->tx_lock);
1808	spin_unlock_bh(&np->rx_lock);
 
 
 
 
 
 
 
 
 
 
1809
1810	return 0;
1811}
1812
1813/**
1814 * Callback received when the backend's state changes.
1815 */
1816static void netback_changed(struct xenbus_device *dev,
1817			    enum xenbus_state backend_state)
1818{
1819	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1820	struct net_device *netdev = np->netdev;
1821
1822	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1823
 
 
1824	switch (backend_state) {
1825	case XenbusStateInitialising:
1826	case XenbusStateInitialised:
1827	case XenbusStateReconfiguring:
1828	case XenbusStateReconfigured:
1829	case XenbusStateUnknown:
1830		break;
1831
1832	case XenbusStateInitWait:
1833		if (dev->state != XenbusStateInitialising)
1834			break;
1835		if (xennet_connect(netdev) != 0)
1836			break;
1837		xenbus_switch_state(dev, XenbusStateConnected);
1838		break;
1839
1840	case XenbusStateConnected:
1841		netdev_notify_peers(netdev);
1842		break;
1843
1844	case XenbusStateClosed:
1845		if (dev->state == XenbusStateClosed)
1846			break;
1847		/* Missed the backend's CLOSING state -- fallthrough */
1848	case XenbusStateClosing:
1849		xenbus_frontend_closed(dev);
1850		break;
1851	}
1852}
1853
1854static const struct xennet_stat {
1855	char name[ETH_GSTRING_LEN];
1856	u16 offset;
1857} xennet_stats[] = {
1858	{
1859		"rx_gso_checksum_fixup",
1860		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1861	},
1862};
1863
1864static int xennet_get_sset_count(struct net_device *dev, int string_set)
1865{
1866	switch (string_set) {
1867	case ETH_SS_STATS:
1868		return ARRAY_SIZE(xennet_stats);
1869	default:
1870		return -EINVAL;
1871	}
1872}
1873
1874static void xennet_get_ethtool_stats(struct net_device *dev,
1875				     struct ethtool_stats *stats, u64 * data)
1876{
1877	void *np = netdev_priv(dev);
1878	int i;
1879
1880	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1882}
1883
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1885{
1886	int i;
1887
1888	switch (stringset) {
1889	case ETH_SS_STATS:
1890		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1891			memcpy(data + i * ETH_GSTRING_LEN,
1892			       xennet_stats[i].name, ETH_GSTRING_LEN);
1893		break;
1894	}
1895}
1896
1897static const struct ethtool_ops xennet_ethtool_ops =
1898{
1899	.get_link = ethtool_op_get_link,
1900
1901	.get_sset_count = xennet_get_sset_count,
1902	.get_ethtool_stats = xennet_get_ethtool_stats,
1903	.get_strings = xennet_get_strings,
1904};
1905
1906#ifdef CONFIG_SYSFS
1907static ssize_t show_rxbuf_min(struct device *dev,
1908			      struct device_attribute *attr, char *buf)
1909{
1910	struct net_device *netdev = to_net_dev(dev);
1911	struct netfront_info *info = netdev_priv(netdev);
1912
1913	return sprintf(buf, "%u\n", info->rx_min_target);
1914}
1915
1916static ssize_t store_rxbuf_min(struct device *dev,
1917			       struct device_attribute *attr,
1918			       const char *buf, size_t len)
1919{
1920	struct net_device *netdev = to_net_dev(dev);
1921	struct netfront_info *np = netdev_priv(netdev);
1922	char *endp;
1923	unsigned long target;
1924
1925	if (!capable(CAP_NET_ADMIN))
1926		return -EPERM;
1927
1928	target = simple_strtoul(buf, &endp, 0);
1929	if (endp == buf)
1930		return -EBADMSG;
1931
1932	if (target < RX_MIN_TARGET)
1933		target = RX_MIN_TARGET;
1934	if (target > RX_MAX_TARGET)
1935		target = RX_MAX_TARGET;
1936
1937	spin_lock_bh(&np->rx_lock);
1938	if (target > np->rx_max_target)
1939		np->rx_max_target = target;
1940	np->rx_min_target = target;
1941	if (target > np->rx_target)
1942		np->rx_target = target;
1943
1944	xennet_alloc_rx_buffers(netdev);
1945
1946	spin_unlock_bh(&np->rx_lock);
1947	return len;
1948}
1949
1950static ssize_t show_rxbuf_max(struct device *dev,
1951			      struct device_attribute *attr, char *buf)
1952{
1953	struct net_device *netdev = to_net_dev(dev);
1954	struct netfront_info *info = netdev_priv(netdev);
1955
1956	return sprintf(buf, "%u\n", info->rx_max_target);
1957}
1958
1959static ssize_t store_rxbuf_max(struct device *dev,
1960			       struct device_attribute *attr,
1961			       const char *buf, size_t len)
1962{
1963	struct net_device *netdev = to_net_dev(dev);
1964	struct netfront_info *np = netdev_priv(netdev);
1965	char *endp;
1966	unsigned long target;
1967
1968	if (!capable(CAP_NET_ADMIN))
1969		return -EPERM;
1970
1971	target = simple_strtoul(buf, &endp, 0);
1972	if (endp == buf)
1973		return -EBADMSG;
1974
1975	if (target < RX_MIN_TARGET)
1976		target = RX_MIN_TARGET;
1977	if (target > RX_MAX_TARGET)
1978		target = RX_MAX_TARGET;
1979
1980	spin_lock_bh(&np->rx_lock);
1981	if (target < np->rx_min_target)
1982		np->rx_min_target = target;
1983	np->rx_max_target = target;
1984	if (target < np->rx_target)
1985		np->rx_target = target;
1986
1987	xennet_alloc_rx_buffers(netdev);
1988
1989	spin_unlock_bh(&np->rx_lock);
1990	return len;
1991}
1992
1993static ssize_t show_rxbuf_cur(struct device *dev,
1994			      struct device_attribute *attr, char *buf)
1995{
1996	struct net_device *netdev = to_net_dev(dev);
1997	struct netfront_info *info = netdev_priv(netdev);
1998
1999	return sprintf(buf, "%u\n", info->rx_target);
2000}
2001
2002static struct device_attribute xennet_attrs[] = {
2003	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2004	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2005	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2006};
2007
2008static int xennet_sysfs_addif(struct net_device *netdev)
2009{
2010	int i;
2011	int err;
2012
2013	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2014		err = device_create_file(&netdev->dev,
2015					   &xennet_attrs[i]);
2016		if (err)
2017			goto fail;
2018	}
2019	return 0;
2020
2021 fail:
2022	while (--i >= 0)
2023		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2024	return err;
2025}
2026
2027static void xennet_sysfs_delif(struct net_device *netdev)
2028{
2029	int i;
2030
2031	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2032		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2033}
2034
2035#endif /* CONFIG_SYSFS */
2036
2037static const struct xenbus_device_id netfront_ids[] = {
2038	{ "vif" },
2039	{ "" }
2040};
2041
2042
2043static int xennet_remove(struct xenbus_device *dev)
2044{
2045	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2046
2047	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048
2049	xennet_disconnect_backend(info);
2050
2051	xennet_sysfs_delif(info->netdev);
2052
2053	unregister_netdev(info->netdev);
 
 
 
 
 
 
 
 
 
 
2054
2055	del_timer_sync(&info->rx_refill_timer);
2056
2057	free_percpu(info->stats);
 
2058
2059	free_netdev(info->netdev);
 
 
 
 
 
2060
2061	return 0;
2062}
2063
2064static DEFINE_XENBUS_DRIVER(netfront, ,
 
 
 
 
 
 
2065	.probe = netfront_probe,
2066	.remove = xennet_remove,
2067	.resume = netfront_resume,
2068	.otherend_changed = netback_changed,
2069);
2070
2071static int __init netif_init(void)
2072{
2073	if (!xen_domain())
2074		return -ENODEV;
2075
2076	if (!xen_has_pv_nic_devices())
2077		return -ENODEV;
2078
2079	pr_info("Initialising Xen virtual ethernet driver\n");
 
 
 
 
 
 
 
2080
2081	return xenbus_register_frontend(&netfront_driver);
2082}
2083module_init(netif_init);
2084
2085
2086static void __exit netif_exit(void)
2087{
2088	xenbus_unregister_driver(&netfront_driver);
2089}
2090module_exit(netif_exit);
2091
2092MODULE_DESCRIPTION("Xen virtual network device frontend");
2093MODULE_LICENSE("GPL");
2094MODULE_ALIAS("xen:vif");
2095MODULE_ALIAS("xennet");
v5.4
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
 
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
  60#define MAX_QUEUES_DEFAULT 8
  61static unsigned int xennet_max_queues;
  62module_param_named(max_queues, xennet_max_queues, uint, 0644);
  63MODULE_PARM_DESC(max_queues,
  64		 "Maximum number of queues per virtual interface");
  65
  66static const struct ethtool_ops xennet_ethtool_ops;
  67
  68struct netfront_cb {
  69	int pull_to;
  70};
  71
  72#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  73
  74#define RX_COPY_THRESHOLD 256
  75
  76#define GRANT_INVALID_REF	0
  77
  78#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  80
  81/* Minimum number of Rx slots (includes slot for GSO metadata). */
  82#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  83
  84/* Queue name is interface name with "-qNNN" appended */
  85#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  86
  87/* IRQ name is queue name with "-tx" or "-rx" appended */
  88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  89
  90static DECLARE_WAIT_QUEUE_HEAD(module_wq);
  91
  92struct netfront_stats {
  93	u64			packets;
  94	u64			bytes;
 
 
  95	struct u64_stats_sync	syncp;
  96};
  97
  98struct netfront_info;
  99
 100struct netfront_queue {
 101	unsigned int id; /* Queue ID, 0-based */
 102	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 103	struct netfront_info *info;
 104
 105	struct napi_struct napi;
 106
 107	/* Split event channels support, tx_* == rx_* when using
 108	 * single event channel.
 109	 */
 110	unsigned int tx_evtchn, rx_evtchn;
 111	unsigned int tx_irq, rx_irq;
 112	/* Only used when split event channels support is enabled */
 113	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 114	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 
 
 115
 116	spinlock_t   tx_lock;
 117	struct xen_netif_tx_front_ring tx;
 118	int tx_ring_ref;
 119
 120	/*
 121	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 122	 * are linked from tx_skb_freelist through skb_entry.link.
 123	 *
 124	 *  NB. Freelist index entries are always going to be less than
 125	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 126	 *  greater than PAGE_OFFSET: we use this property to distinguish
 127	 *  them.
 128	 */
 129	union skb_entry {
 130		struct sk_buff *skb;
 131		unsigned long link;
 132	} tx_skbs[NET_TX_RING_SIZE];
 133	grant_ref_t gref_tx_head;
 134	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 135	struct page *grant_tx_page[NET_TX_RING_SIZE];
 136	unsigned tx_skb_freelist;
 137
 138	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 139	struct xen_netif_rx_front_ring rx;
 140	int rx_ring_ref;
 141
 
 
 
 
 
 
 
 142	struct timer_list rx_refill_timer;
 143
 144	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 145	grant_ref_t gref_rx_head;
 146	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 147};
 148
 149struct netfront_info {
 150	struct list_head list;
 151	struct net_device *netdev;
 152
 153	struct xenbus_device *xbdev;
 154
 155	/* Multi-queue support */
 156	struct netfront_queue *queues;
 157
 158	/* Statistics */
 159	struct netfront_stats __percpu *rx_stats;
 160	struct netfront_stats __percpu *tx_stats;
 161
 162	atomic_t rx_gso_checksum_fixup;
 163};
 164
 165struct netfront_rx_info {
 166	struct xen_netif_rx_response rx;
 167	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 168};
 169
 170static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 171{
 172	list->link = id;
 173}
 174
 175static int skb_entry_is_link(const union skb_entry *list)
 176{
 177	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 178	return (unsigned long)list->skb < PAGE_OFFSET;
 179}
 180
 181/*
 182 * Access macros for acquiring freeing slots in tx_skbs[].
 183 */
 184
 185static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 186			       unsigned short id)
 187{
 188	skb_entry_set_link(&list[id], *head);
 189	*head = id;
 190}
 191
 192static unsigned short get_id_from_freelist(unsigned *head,
 193					   union skb_entry *list)
 194{
 195	unsigned int id = *head;
 196	*head = list[id].link;
 197	return id;
 198}
 199
 200static int xennet_rxidx(RING_IDX idx)
 201{
 202	return idx & (NET_RX_RING_SIZE - 1);
 203}
 204
 205static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 206					 RING_IDX ri)
 207{
 208	int i = xennet_rxidx(ri);
 209	struct sk_buff *skb = queue->rx_skbs[i];
 210	queue->rx_skbs[i] = NULL;
 211	return skb;
 212}
 213
 214static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 215					    RING_IDX ri)
 216{
 217	int i = xennet_rxidx(ri);
 218	grant_ref_t ref = queue->grant_rx_ref[i];
 219	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 220	return ref;
 221}
 222
 223#ifdef CONFIG_SYSFS
 224static const struct attribute_group xennet_dev_group;
 
 
 
 
 225#endif
 226
 227static bool xennet_can_sg(struct net_device *dev)
 228{
 229	return dev->features & NETIF_F_SG;
 230}
 231
 232
 233static void rx_refill_timeout(struct timer_list *t)
 234{
 235	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 236	napi_schedule(&queue->napi);
 
 237}
 238
 239static int netfront_tx_slot_available(struct netfront_queue *queue)
 240{
 241	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 242		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 243}
 244
 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 246{
 247	struct net_device *dev = queue->info->netdev;
 248	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 249
 250	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 251	    netfront_tx_slot_available(queue) &&
 252	    likely(netif_running(dev)))
 253		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 254}
 255
 256
 257static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 258{
 
 
 259	struct sk_buff *skb;
 260	struct page *page;
 
 
 
 
 
 
 261
 262	skb = __netdev_alloc_skb(queue->info->netdev,
 263				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 264				 GFP_ATOMIC | __GFP_NOWARN);
 265	if (unlikely(!skb))
 266		return NULL;
 267
 268	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 269	if (!page) {
 270		kfree_skb(skb);
 271		return NULL;
 272	}
 273	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 
 
 
 
 
 
 274
 275	/* Align ip header to a 16 bytes boundary */
 276	skb_reserve(skb, NET_IP_ALIGN);
 277	skb->dev = queue->info->netdev;
 278
 279	return skb;
 280}
 
 
 
 
 
 
 
 
 
 
 
 281
 
 
 
 282
 283static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 284{
 285	RING_IDX req_prod = queue->rx.req_prod_pvt;
 286	int notify;
 287	int err = 0;
 
 288
 289	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 290		return;
 
 
 
 
 
 
 
 
 291
 292	for (req_prod = queue->rx.req_prod_pvt;
 293	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 294	     req_prod++) {
 295		struct sk_buff *skb;
 296		unsigned short id;
 297		grant_ref_t ref;
 298		struct page *page;
 299		struct xen_netif_rx_request *req;
 300
 301		skb = xennet_alloc_one_rx_buffer(queue);
 302		if (!skb) {
 303			err = -ENOMEM;
 304			break;
 305		}
 306
 307		id = xennet_rxidx(req_prod);
 
 308
 309		BUG_ON(queue->rx_skbs[id]);
 310		queue->rx_skbs[id] = skb;
 
 311
 312		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 313		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 314		queue->grant_rx_ref[id] = ref;
 315
 316		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
 
 
 
 317
 318		req = RING_GET_REQUEST(&queue->rx, req_prod);
 319		gnttab_page_grant_foreign_access_ref_one(ref,
 320							 queue->info->xbdev->otherend_id,
 321							 page,
 322							 0);
 323		req->id = id;
 324		req->gref = ref;
 325	}
 326
 327	queue->rx.req_prod_pvt = req_prod;
 328
 329	/* Try again later if there are not enough requests or skb allocation
 330	 * failed.
 331	 * Enough requests is quantified as the sum of newly created slots and
 332	 * the unconsumed slots at the backend.
 333	 */
 334	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 335	    unlikely(err)) {
 336		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 337		return;
 338	}
 339
 340	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 341	if (notify)
 342		notify_remote_via_irq(queue->rx_irq);
 343}
 344
 345static int xennet_open(struct net_device *dev)
 346{
 347	struct netfront_info *np = netdev_priv(dev);
 348	unsigned int num_queues = dev->real_num_tx_queues;
 349	unsigned int i = 0;
 350	struct netfront_queue *queue = NULL;
 351
 352	if (!np->queues)
 353		return -ENODEV;
 354
 355	for (i = 0; i < num_queues; ++i) {
 356		queue = &np->queues[i];
 357		napi_enable(&queue->napi);
 358
 359		spin_lock_bh(&queue->rx_lock);
 360		if (netif_carrier_ok(dev)) {
 361			xennet_alloc_rx_buffers(queue);
 362			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 363			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 364				napi_schedule(&queue->napi);
 365		}
 366		spin_unlock_bh(&queue->rx_lock);
 367	}
 
 368
 369	netif_tx_start_all_queues(dev);
 370
 371	return 0;
 372}
 373
 374static void xennet_tx_buf_gc(struct netfront_queue *queue)
 375{
 376	RING_IDX cons, prod;
 377	unsigned short id;
 
 378	struct sk_buff *skb;
 379	bool more_to_do;
 380
 381	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 382
 383	do {
 384		prod = queue->tx.sring->rsp_prod;
 385		rmb(); /* Ensure we see responses up to 'rp'. */
 386
 387		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 388			struct xen_netif_tx_response *txrsp;
 389
 390			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 391			if (txrsp->status == XEN_NETIF_RSP_NULL)
 392				continue;
 393
 394			id  = txrsp->id;
 395			skb = queue->tx_skbs[id].skb;
 396			if (unlikely(gnttab_query_foreign_access(
 397				queue->grant_tx_ref[id]) != 0)) {
 398				pr_alert("%s: warning -- grant still in use by backend domain\n",
 399					 __func__);
 400				BUG();
 401			}
 402			gnttab_end_foreign_access_ref(
 403				queue->grant_tx_ref[id], GNTMAP_readonly);
 404			gnttab_release_grant_reference(
 405				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 406			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 407			queue->grant_tx_page[id] = NULL;
 408			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 409			dev_kfree_skb_irq(skb);
 410		}
 411
 412		queue->tx.rsp_cons = prod;
 413
 414		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 415	} while (more_to_do);
 
 
 
 
 
 
 
 
 
 
 416
 417	xennet_maybe_wake_tx(queue);
 418}
 419
 420struct xennet_gnttab_make_txreq {
 421	struct netfront_queue *queue;
 422	struct sk_buff *skb;
 423	struct page *page;
 424	struct xen_netif_tx_request *tx; /* Last request */
 425	unsigned int size;
 426};
 427
 428static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 429				  unsigned int len, void *data)
 430{
 431	struct xennet_gnttab_make_txreq *info = data;
 
 
 
 
 
 
 432	unsigned int id;
 433	struct xen_netif_tx_request *tx;
 434	grant_ref_t ref;
 435	/* convenient aliases */
 436	struct page *page = info->page;
 437	struct netfront_queue *queue = info->queue;
 438	struct sk_buff *skb = info->skb;
 439
 440	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 441	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 442	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 443	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 444
 445	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 446					gfn, GNTMAP_readonly);
 447
 448	queue->tx_skbs[id].skb = skb;
 449	queue->grant_tx_page[id] = page;
 450	queue->grant_tx_ref[id] = ref;
 451
 452	tx->id = id;
 453	tx->gref = ref;
 454	tx->offset = offset;
 455	tx->size = len;
 456	tx->flags = 0;
 
 
 
 457
 458	info->tx = tx;
 459	info->size += tx->size;
 460}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461
 462static struct xen_netif_tx_request *xennet_make_first_txreq(
 463	struct netfront_queue *queue, struct sk_buff *skb,
 464	struct page *page, unsigned int offset, unsigned int len)
 465{
 466	struct xennet_gnttab_make_txreq info = {
 467		.queue = queue,
 468		.skb = skb,
 469		.page = page,
 470		.size = 0,
 471	};
 472
 473	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 
 474
 475	return info.tx;
 476}
 477
 478static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 479				  unsigned int len, void *data)
 480{
 481	struct xennet_gnttab_make_txreq *info = data;
 482
 483	info->tx->flags |= XEN_NETTXF_more_data;
 484	skb_get(info->skb);
 485	xennet_tx_setup_grant(gfn, offset, len, data);
 486}
 487
 488static struct xen_netif_tx_request *xennet_make_txreqs(
 489	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 490	struct sk_buff *skb, struct page *page,
 491	unsigned int offset, unsigned int len)
 492{
 493	struct xennet_gnttab_make_txreq info = {
 494		.queue = queue,
 495		.skb = skb,
 496		.tx = tx,
 497	};
 498
 499	/* Skip unused frames from start of page */
 500	page += offset >> PAGE_SHIFT;
 501	offset &= ~PAGE_MASK;
 502
 503	while (len) {
 504		info.page = page;
 505		info.size = 0;
 506
 507		gnttab_foreach_grant_in_range(page, offset, len,
 508					      xennet_make_one_txreq,
 509					      &info);
 510
 511		page++;
 512		offset = 0;
 513		len -= info.size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514	}
 515
 516	return info.tx;
 517}
 518
 519/*
 520 * Count how many ring slots are required to send this skb. Each frag
 521 * might be a compound page.
 522 */
 523static int xennet_count_skb_slots(struct sk_buff *skb)
 524{
 525	int i, frags = skb_shinfo(skb)->nr_frags;
 526	int slots;
 527
 528	slots = gnttab_count_grant(offset_in_page(skb->data),
 529				   skb_headlen(skb));
 530
 531	for (i = 0; i < frags; i++) {
 532		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 533		unsigned long size = skb_frag_size(frag);
 534		unsigned long offset = skb_frag_off(frag);
 535
 536		/* Skip unused frames from start of page */
 537		offset &= ~PAGE_MASK;
 538
 539		slots += gnttab_count_grant(offset, size);
 540	}
 541
 542	return slots;
 543}
 544
 545static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 546			       struct net_device *sb_dev)
 547{
 548	unsigned int num_queues = dev->real_num_tx_queues;
 549	u32 hash;
 550	u16 queue_idx;
 551
 552	/* First, check if there is only one queue */
 553	if (num_queues == 1) {
 554		queue_idx = 0;
 555	} else {
 556		hash = skb_get_hash(skb);
 557		queue_idx = hash % num_queues;
 558	}
 559
 560	return queue_idx;
 561}
 562
 563#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 564
 565static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 566{
 
 567	struct netfront_info *np = netdev_priv(dev);
 568	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 569	struct xen_netif_tx_request *tx, *first_tx;
 570	unsigned int i;
 
 
 
 571	int notify;
 572	int slots;
 573	struct page *page;
 574	unsigned int offset;
 575	unsigned int len;
 576	unsigned long flags;
 577	struct netfront_queue *queue = NULL;
 578	unsigned int num_queues = dev->real_num_tx_queues;
 579	u16 queue_index;
 580	struct sk_buff *nskb;
 581
 582	/* Drop the packet if no queues are set up */
 583	if (num_queues < 1)
 584		goto drop;
 585	/* Determine which queue to transmit this SKB on */
 586	queue_index = skb_get_queue_mapping(skb);
 587	queue = &np->queues[queue_index];
 588
 589	/* If skb->len is too big for wire format, drop skb and alert
 590	 * user about misconfiguration.
 591	 */
 592	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 593		net_alert_ratelimited(
 594			"xennet: skb->len = %u, too big for wire format\n",
 595			skb->len);
 596		goto drop;
 597	}
 598
 599	slots = xennet_count_skb_slots(skb);
 600	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 601		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 602				    slots, skb->len);
 603		if (skb_linearize(skb))
 604			goto drop;
 605	}
 606
 607	page = virt_to_page(skb->data);
 608	offset = offset_in_page(skb->data);
 609
 610	/* The first req should be at least ETH_HLEN size or the packet will be
 611	 * dropped by netback.
 612	 */
 613	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 614		nskb = skb_copy(skb, GFP_ATOMIC);
 615		if (!nskb)
 616			goto drop;
 617		dev_consume_skb_any(skb);
 618		skb = nskb;
 619		page = virt_to_page(skb->data);
 620		offset = offset_in_page(skb->data);
 621	}
 622
 623	len = skb_headlen(skb);
 624
 625	spin_lock_irqsave(&queue->tx_lock, flags);
 626
 627	if (unlikely(!netif_carrier_ok(dev) ||
 628		     (slots > 1 && !xennet_can_sg(dev)) ||
 629		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 630		spin_unlock_irqrestore(&queue->tx_lock, flags);
 631		goto drop;
 632	}
 633
 634	/* First request for the linear area. */
 635	first_tx = tx = xennet_make_first_txreq(queue, skb,
 636						page, offset, len);
 637	offset += tx->size;
 638	if (offset == PAGE_SIZE) {
 639		page++;
 640		offset = 0;
 641	}
 642	len -= tx->size;
 
 
 
 
 
 
 
 
 643
 
 644	if (skb->ip_summed == CHECKSUM_PARTIAL)
 645		/* local packet? */
 646		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 647	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 648		/* remote but checksummed. */
 649		tx->flags |= XEN_NETTXF_data_validated;
 650
 651	/* Optional extra info after the first request. */
 652	if (skb_shinfo(skb)->gso_size) {
 653		struct xen_netif_extra_info *gso;
 654
 655		gso = (struct xen_netif_extra_info *)
 656			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 657
 658		tx->flags |= XEN_NETTXF_extra_info;
 659
 660		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 661		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 662			XEN_NETIF_GSO_TYPE_TCPV6 :
 663			XEN_NETIF_GSO_TYPE_TCPV4;
 664		gso->u.gso.pad = 0;
 665		gso->u.gso.features = 0;
 666
 667		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 668		gso->flags = 0;
 669	}
 670
 671	/* Requests for the rest of the linear area. */
 672	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 673
 674	/* Requests for all the frags. */
 675	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 676		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 677		tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
 678					skb_frag_off(frag),
 679					skb_frag_size(frag));
 680	}
 681
 682	/* First request has the packet length. */
 683	first_tx->size = skb->len;
 684
 685	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 686	if (notify)
 687		notify_remote_via_irq(queue->tx_irq);
 688
 689	u64_stats_update_begin(&tx_stats->syncp);
 690	tx_stats->bytes += skb->len;
 691	tx_stats->packets++;
 692	u64_stats_update_end(&tx_stats->syncp);
 693
 694	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 695	xennet_tx_buf_gc(queue);
 696
 697	if (!netfront_tx_slot_available(queue))
 698		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 699
 700	spin_unlock_irqrestore(&queue->tx_lock, flags);
 701
 702	return NETDEV_TX_OK;
 703
 704 drop:
 705	dev->stats.tx_dropped++;
 706	dev_kfree_skb_any(skb);
 707	return NETDEV_TX_OK;
 708}
 709
 710static int xennet_close(struct net_device *dev)
 711{
 712	struct netfront_info *np = netdev_priv(dev);
 713	unsigned int num_queues = dev->real_num_tx_queues;
 714	unsigned int i;
 715	struct netfront_queue *queue;
 716	netif_tx_stop_all_queues(np->netdev);
 717	for (i = 0; i < num_queues; ++i) {
 718		queue = &np->queues[i];
 719		napi_disable(&queue->napi);
 720	}
 721	return 0;
 722}
 723
 724static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 725				grant_ref_t ref)
 726{
 727	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 728
 729	BUG_ON(queue->rx_skbs[new]);
 730	queue->rx_skbs[new] = skb;
 731	queue->grant_rx_ref[new] = ref;
 732	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 733	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 734	queue->rx.req_prod_pvt++;
 735}
 736
 737static int xennet_get_extras(struct netfront_queue *queue,
 738			     struct xen_netif_extra_info *extras,
 739			     RING_IDX rp)
 740
 741{
 742	struct xen_netif_extra_info *extra;
 743	struct device *dev = &queue->info->netdev->dev;
 744	RING_IDX cons = queue->rx.rsp_cons;
 745	int err = 0;
 746
 747	do {
 748		struct sk_buff *skb;
 749		grant_ref_t ref;
 750
 751		if (unlikely(cons + 1 == rp)) {
 752			if (net_ratelimit())
 753				dev_warn(dev, "Missing extra info\n");
 754			err = -EBADR;
 755			break;
 756		}
 757
 758		extra = (struct xen_netif_extra_info *)
 759			RING_GET_RESPONSE(&queue->rx, ++cons);
 760
 761		if (unlikely(!extra->type ||
 762			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 763			if (net_ratelimit())
 764				dev_warn(dev, "Invalid extra type: %d\n",
 765					extra->type);
 766			err = -EINVAL;
 767		} else {
 768			memcpy(&extras[extra->type - 1], extra,
 769			       sizeof(*extra));
 770		}
 771
 772		skb = xennet_get_rx_skb(queue, cons);
 773		ref = xennet_get_rx_ref(queue, cons);
 774		xennet_move_rx_slot(queue, skb, ref);
 775	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 776
 777	queue->rx.rsp_cons = cons;
 778	return err;
 779}
 780
 781static int xennet_get_responses(struct netfront_queue *queue,
 782				struct netfront_rx_info *rinfo, RING_IDX rp,
 783				struct sk_buff_head *list)
 784{
 785	struct xen_netif_rx_response *rx = &rinfo->rx;
 786	struct xen_netif_extra_info *extras = rinfo->extras;
 787	struct device *dev = &queue->info->netdev->dev;
 788	RING_IDX cons = queue->rx.rsp_cons;
 789	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 790	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 791	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
 792	int slots = 1;
 793	int err = 0;
 794	unsigned long ret;
 795
 796	if (rx->flags & XEN_NETRXF_extra_info) {
 797		err = xennet_get_extras(queue, extras, rp);
 798		cons = queue->rx.rsp_cons;
 799	}
 800
 801	for (;;) {
 802		if (unlikely(rx->status < 0 ||
 803			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 804			if (net_ratelimit())
 805				dev_warn(dev, "rx->offset: %u, size: %d\n",
 806					 rx->offset, rx->status);
 807			xennet_move_rx_slot(queue, skb, ref);
 808			err = -EINVAL;
 809			goto next;
 810		}
 811
 812		/*
 813		 * This definitely indicates a bug, either in this driver or in
 814		 * the backend driver. In future this should flag the bad
 815		 * situation to the system controller to reboot the backend.
 816		 */
 817		if (ref == GRANT_INVALID_REF) {
 818			if (net_ratelimit())
 819				dev_warn(dev, "Bad rx response id %d.\n",
 820					 rx->id);
 821			err = -EINVAL;
 822			goto next;
 823		}
 824
 825		ret = gnttab_end_foreign_access_ref(ref, 0);
 826		BUG_ON(!ret);
 827
 828		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 829
 830		__skb_queue_tail(list, skb);
 831
 832next:
 833		if (!(rx->flags & XEN_NETRXF_more_data))
 834			break;
 835
 836		if (cons + slots == rp) {
 837			if (net_ratelimit())
 838				dev_warn(dev, "Need more slots\n");
 839			err = -ENOENT;
 840			break;
 841		}
 842
 843		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 844		skb = xennet_get_rx_skb(queue, cons + slots);
 845		ref = xennet_get_rx_ref(queue, cons + slots);
 846		slots++;
 847	}
 848
 849	if (unlikely(slots > max)) {
 850		if (net_ratelimit())
 851			dev_warn(dev, "Too many slots\n");
 852		err = -E2BIG;
 853	}
 854
 855	if (unlikely(err))
 856		queue->rx.rsp_cons = cons + slots;
 857
 858	return err;
 859}
 860
 861static int xennet_set_skb_gso(struct sk_buff *skb,
 862			      struct xen_netif_extra_info *gso)
 863{
 864	if (!gso->u.gso.size) {
 865		if (net_ratelimit())
 866			pr_warn("GSO size must not be zero\n");
 867		return -EINVAL;
 868	}
 869
 870	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 871	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 872		if (net_ratelimit())
 873			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 874		return -EINVAL;
 875	}
 876
 877	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 878	skb_shinfo(skb)->gso_type =
 879		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 880		SKB_GSO_TCPV4 :
 881		SKB_GSO_TCPV6;
 882
 883	/* Header must be checked, and gso_segs computed. */
 884	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 885	skb_shinfo(skb)->gso_segs = 0;
 886
 887	return 0;
 888}
 889
 890static int xennet_fill_frags(struct netfront_queue *queue,
 891			     struct sk_buff *skb,
 892			     struct sk_buff_head *list)
 893{
 894	RING_IDX cons = queue->rx.rsp_cons;
 
 895	struct sk_buff *nskb;
 896
 897	while ((nskb = __skb_dequeue(list))) {
 898		struct xen_netif_rx_response *rx =
 899			RING_GET_RESPONSE(&queue->rx, ++cons);
 900		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 901
 902		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
 903			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 904
 905			BUG_ON(pull_to < skb_headlen(skb));
 906			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 907		}
 908		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
 909			queue->rx.rsp_cons = ++cons + skb_queue_len(list);
 910			kfree_skb(nskb);
 911			return -ENOENT;
 912		}
 913
 914		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 915				skb_frag_page(nfrag),
 916				rx->offset, rx->status, PAGE_SIZE);
 917
 918		skb_shinfo(nskb)->nr_frags = 0;
 919		kfree_skb(nskb);
 920	}
 921
 922	queue->rx.rsp_cons = cons;
 923
 924	return 0;
 925}
 926
 927static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 928{
 929	bool recalculate_partial_csum = false;
 930
 931	/*
 932	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 933	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 934	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 935	 * recalculate the partial checksum.
 936	 */
 937	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 938		struct netfront_info *np = netdev_priv(dev);
 939		atomic_inc(&np->rx_gso_checksum_fixup);
 940		skb->ip_summed = CHECKSUM_PARTIAL;
 941		recalculate_partial_csum = true;
 942	}
 943
 944	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 945	if (skb->ip_summed != CHECKSUM_PARTIAL)
 946		return 0;
 947
 948	return skb_checksum_setup(skb, recalculate_partial_csum);
 949}
 950
 951static int handle_incoming_queue(struct netfront_queue *queue,
 952				 struct sk_buff_head *rxq)
 953{
 954	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 
 955	int packets_dropped = 0;
 956	struct sk_buff *skb;
 957
 958	while ((skb = __skb_dequeue(rxq)) != NULL) {
 959		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 960
 961		if (pull_to > skb_headlen(skb))
 962			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 963
 964		/* Ethernet work: Delayed to here as it peeks the header. */
 965		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 966		skb_reset_network_header(skb);
 967
 968		if (checksum_setup(queue->info->netdev, skb)) {
 969			kfree_skb(skb);
 970			packets_dropped++;
 971			queue->info->netdev->stats.rx_errors++;
 972			continue;
 973		}
 974
 975		u64_stats_update_begin(&rx_stats->syncp);
 976		rx_stats->packets++;
 977		rx_stats->bytes += skb->len;
 978		u64_stats_update_end(&rx_stats->syncp);
 979
 980		/* Pass it up. */
 981		napi_gro_receive(&queue->napi, skb);
 982	}
 983
 984	return packets_dropped;
 985}
 986
 987static int xennet_poll(struct napi_struct *napi, int budget)
 988{
 989	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 990	struct net_device *dev = queue->info->netdev;
 991	struct sk_buff *skb;
 992	struct netfront_rx_info rinfo;
 993	struct xen_netif_rx_response *rx = &rinfo.rx;
 994	struct xen_netif_extra_info *extras = rinfo.extras;
 995	RING_IDX i, rp;
 996	int work_done;
 997	struct sk_buff_head rxq;
 998	struct sk_buff_head errq;
 999	struct sk_buff_head tmpq;
 
1000	int err;
1001
1002	spin_lock(&queue->rx_lock);
1003
1004	skb_queue_head_init(&rxq);
1005	skb_queue_head_init(&errq);
1006	skb_queue_head_init(&tmpq);
1007
1008	rp = queue->rx.sring->rsp_prod;
1009	rmb(); /* Ensure we see queued responses up to 'rp'. */
1010
1011	i = queue->rx.rsp_cons;
1012	work_done = 0;
1013	while ((i != rp) && (work_done < budget)) {
1014		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1015		memset(extras, 0, sizeof(rinfo.extras));
1016
1017		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1018
1019		if (unlikely(err)) {
1020err:
1021			while ((skb = __skb_dequeue(&tmpq)))
1022				__skb_queue_tail(&errq, skb);
1023			dev->stats.rx_errors++;
1024			i = queue->rx.rsp_cons;
1025			continue;
1026		}
1027
1028		skb = __skb_dequeue(&tmpq);
1029
1030		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1031			struct xen_netif_extra_info *gso;
1032			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1033
1034			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1035				__skb_queue_head(&tmpq, skb);
1036				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1037				goto err;
1038			}
1039		}
1040
1041		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1042		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1043			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1044
1045		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1046		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1047		skb->data_len = rx->status;
1048		skb->len += rx->status;
1049
1050		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1051			goto err;
1052
1053		if (rx->flags & XEN_NETRXF_csum_blank)
1054			skb->ip_summed = CHECKSUM_PARTIAL;
1055		else if (rx->flags & XEN_NETRXF_data_validated)
1056			skb->ip_summed = CHECKSUM_UNNECESSARY;
1057
1058		__skb_queue_tail(&rxq, skb);
1059
1060		i = ++queue->rx.rsp_cons;
1061		work_done++;
1062	}
1063
1064	__skb_queue_purge(&errq);
1065
1066	work_done -= handle_incoming_queue(queue, &rxq);
1067
1068	xennet_alloc_rx_buffers(queue);
 
 
 
 
 
 
 
1069
1070	if (work_done < budget) {
1071		int more_to_do = 0;
1072
1073		napi_complete_done(napi, work_done);
 
 
 
 
 
 
1074
1075		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1076		if (more_to_do)
1077			napi_schedule(napi);
1078	}
1079
1080	spin_unlock(&queue->rx_lock);
1081
1082	return work_done;
1083}
1084
1085static int xennet_change_mtu(struct net_device *dev, int mtu)
1086{
1087	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
 
1088
1089	if (mtu > max)
1090		return -EINVAL;
1091	dev->mtu = mtu;
1092	return 0;
1093}
1094
1095static void xennet_get_stats64(struct net_device *dev,
1096			       struct rtnl_link_stats64 *tot)
1097{
1098	struct netfront_info *np = netdev_priv(dev);
1099	int cpu;
1100
1101	for_each_possible_cpu(cpu) {
1102		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1103		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1104		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1105		unsigned int start;
1106
1107		do {
1108			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1109			tx_packets = tx_stats->packets;
1110			tx_bytes = tx_stats->bytes;
1111		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1112
1113		do {
1114			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1115			rx_packets = rx_stats->packets;
1116			rx_bytes = rx_stats->bytes;
1117		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1118
1119		tot->rx_packets += rx_packets;
1120		tot->tx_packets += tx_packets;
1121		tot->rx_bytes   += rx_bytes;
1122		tot->tx_bytes   += tx_bytes;
1123	}
1124
1125	tot->rx_errors  = dev->stats.rx_errors;
1126	tot->tx_dropped = dev->stats.tx_dropped;
 
 
1127}
1128
1129static void xennet_release_tx_bufs(struct netfront_queue *queue)
1130{
1131	struct sk_buff *skb;
1132	int i;
1133
1134	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1135		/* Skip over entries which are actually freelist references */
1136		if (skb_entry_is_link(&queue->tx_skbs[i]))
1137			continue;
1138
1139		skb = queue->tx_skbs[i].skb;
1140		get_page(queue->grant_tx_page[i]);
1141		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1142					  GNTMAP_readonly,
1143					  (unsigned long)page_address(queue->grant_tx_page[i]));
1144		queue->grant_tx_page[i] = NULL;
1145		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1146		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1147		dev_kfree_skb_irq(skb);
1148	}
1149}
1150
1151static void xennet_release_rx_bufs(struct netfront_queue *queue)
1152{
1153	int id, ref;
1154
1155	spin_lock_bh(&queue->rx_lock);
1156
1157	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1158		struct sk_buff *skb;
1159		struct page *page;
1160
1161		skb = queue->rx_skbs[id];
1162		if (!skb)
1163			continue;
1164
1165		ref = queue->grant_rx_ref[id];
1166		if (ref == GRANT_INVALID_REF)
1167			continue;
1168
1169		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1170
1171		/* gnttab_end_foreign_access() needs a page ref until
1172		 * foreign access is ended (which may be deferred).
1173		 */
1174		get_page(page);
1175		gnttab_end_foreign_access(ref, 0,
1176					  (unsigned long)page_address(page));
1177		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1178
1179		kfree_skb(skb);
1180	}
1181
1182	spin_unlock_bh(&queue->rx_lock);
 
 
 
 
 
 
 
 
 
1183}
1184
1185static netdev_features_t xennet_fix_features(struct net_device *dev,
1186	netdev_features_t features)
1187{
1188	struct netfront_info *np = netdev_priv(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189
1190	if (features & NETIF_F_SG &&
1191	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1192		features &= ~NETIF_F_SG;
1193
1194	if (features & NETIF_F_IPV6_CSUM &&
1195	    !xenbus_read_unsigned(np->xbdev->otherend,
1196				  "feature-ipv6-csum-offload", 0))
1197		features &= ~NETIF_F_IPV6_CSUM;
1198
1199	if (features & NETIF_F_TSO &&
1200	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1201		features &= ~NETIF_F_TSO;
1202
1203	if (features & NETIF_F_TSO6 &&
1204	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1205		features &= ~NETIF_F_TSO6;
1206
1207	return features;
1208}
1209
1210static int xennet_set_features(struct net_device *dev,
1211	netdev_features_t features)
1212{
1213	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1214		netdev_info(dev, "Reducing MTU because no SG offload");
1215		dev->mtu = ETH_DATA_LEN;
1216	}
1217
1218	return 0;
1219}
1220
1221static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1222{
1223	struct netfront_queue *queue = dev_id;
 
1224	unsigned long flags;
1225
1226	spin_lock_irqsave(&queue->tx_lock, flags);
1227	xennet_tx_buf_gc(queue);
1228	spin_unlock_irqrestore(&queue->tx_lock, flags);
1229
1230	return IRQ_HANDLED;
1231}
1232
1233static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1234{
1235	struct netfront_queue *queue = dev_id;
1236	struct net_device *dev = queue->info->netdev;
1237
1238	if (likely(netif_carrier_ok(dev) &&
1239		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1240		napi_schedule(&queue->napi);
1241
1242	return IRQ_HANDLED;
1243}
1244
1245static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1246{
1247	xennet_tx_interrupt(irq, dev_id);
1248	xennet_rx_interrupt(irq, dev_id);
1249	return IRQ_HANDLED;
1250}
1251
1252#ifdef CONFIG_NET_POLL_CONTROLLER
1253static void xennet_poll_controller(struct net_device *dev)
1254{
1255	/* Poll each queue */
1256	struct netfront_info *info = netdev_priv(dev);
1257	unsigned int num_queues = dev->real_num_tx_queues;
1258	unsigned int i;
1259	for (i = 0; i < num_queues; ++i)
1260		xennet_interrupt(0, &info->queues[i]);
1261}
1262#endif
1263
1264static const struct net_device_ops xennet_netdev_ops = {
1265	.ndo_open            = xennet_open,
 
1266	.ndo_stop            = xennet_close,
1267	.ndo_start_xmit      = xennet_start_xmit,
1268	.ndo_change_mtu	     = xennet_change_mtu,
1269	.ndo_get_stats64     = xennet_get_stats64,
1270	.ndo_set_mac_address = eth_mac_addr,
1271	.ndo_validate_addr   = eth_validate_addr,
1272	.ndo_fix_features    = xennet_fix_features,
1273	.ndo_set_features    = xennet_set_features,
1274	.ndo_select_queue    = xennet_select_queue,
1275#ifdef CONFIG_NET_POLL_CONTROLLER
1276	.ndo_poll_controller = xennet_poll_controller,
1277#endif
1278};
1279
1280static void xennet_free_netdev(struct net_device *netdev)
1281{
1282	struct netfront_info *np = netdev_priv(netdev);
1283
1284	free_percpu(np->rx_stats);
1285	free_percpu(np->tx_stats);
1286	free_netdev(netdev);
1287}
1288
1289static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1290{
1291	int err;
1292	struct net_device *netdev;
1293	struct netfront_info *np;
1294
1295	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1296	if (!netdev)
1297		return ERR_PTR(-ENOMEM);
1298
1299	np                   = netdev_priv(netdev);
1300	np->xbdev            = dev;
1301
1302	np->queues = NULL;
 
 
 
 
 
 
 
 
 
 
1303
1304	err = -ENOMEM;
1305	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1306	if (np->rx_stats == NULL)
1307		goto exit;
1308	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1309	if (np->tx_stats == NULL)
1310		goto exit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311
1312	netdev->netdev_ops	= &xennet_netdev_ops;
1313
 
1314	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1315				  NETIF_F_GSO_ROBUST;
1316	netdev->hw_features	= NETIF_F_SG |
1317				  NETIF_F_IPV6_CSUM |
1318				  NETIF_F_TSO | NETIF_F_TSO6;
1319
1320	/*
1321         * Assume that all hw features are available for now. This set
1322         * will be adjusted by the call to netdev_update_features() in
1323         * xennet_connect() which is the earliest point where we can
1324         * negotiate with the backend regarding supported features.
1325         */
1326	netdev->features |= netdev->hw_features;
1327
1328	netdev->ethtool_ops = &xennet_ethtool_ops;
1329	netdev->min_mtu = ETH_MIN_MTU;
1330	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1331	SET_NETDEV_DEV(netdev, &dev->dev);
1332
 
 
1333	np->netdev = netdev;
1334
1335	netif_carrier_off(netdev);
1336
1337	xenbus_switch_state(dev, XenbusStateInitialising);
1338	wait_event(module_wq,
1339		   xenbus_read_driver_state(dev->otherend) !=
1340		   XenbusStateClosed &&
1341		   xenbus_read_driver_state(dev->otherend) !=
1342		   XenbusStateUnknown);
1343	return netdev;
1344
 
 
 
 
1345 exit:
1346	xennet_free_netdev(netdev);
1347	return ERR_PTR(err);
1348}
1349
1350/**
1351 * Entry point to this code when a new device is created.  Allocate the basic
1352 * structures and the ring buffers for communication with the backend, and
1353 * inform the backend of the appropriate details for those.
1354 */
1355static int netfront_probe(struct xenbus_device *dev,
1356			  const struct xenbus_device_id *id)
1357{
1358	int err;
1359	struct net_device *netdev;
1360	struct netfront_info *info;
1361
1362	netdev = xennet_create_dev(dev);
1363	if (IS_ERR(netdev)) {
1364		err = PTR_ERR(netdev);
1365		xenbus_dev_fatal(dev, err, "creating netdev");
1366		return err;
1367	}
1368
1369	info = netdev_priv(netdev);
1370	dev_set_drvdata(&dev->dev, info);
1371#ifdef CONFIG_SYSFS
1372	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1373#endif
 
 
 
 
 
 
 
 
 
 
1374
1375	return 0;
 
 
 
 
 
1376}
1377
1378static void xennet_end_access(int ref, void *page)
1379{
1380	/* This frees the page as a side-effect */
1381	if (ref != GRANT_INVALID_REF)
1382		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1383}
1384
1385static void xennet_disconnect_backend(struct netfront_info *info)
1386{
1387	unsigned int i = 0;
1388	unsigned int num_queues = info->netdev->real_num_tx_queues;
1389
1390	netif_carrier_off(info->netdev);
 
 
1391
1392	for (i = 0; i < num_queues && info->queues; ++i) {
1393		struct netfront_queue *queue = &info->queues[i];
1394
1395		del_timer_sync(&queue->rx_refill_timer);
1396
1397		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1398			unbind_from_irqhandler(queue->tx_irq, queue);
1399		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1400			unbind_from_irqhandler(queue->tx_irq, queue);
1401			unbind_from_irqhandler(queue->rx_irq, queue);
1402		}
1403		queue->tx_evtchn = queue->rx_evtchn = 0;
1404		queue->tx_irq = queue->rx_irq = 0;
1405
1406		if (netif_running(info->netdev))
1407			napi_synchronize(&queue->napi);
1408
1409		xennet_release_tx_bufs(queue);
1410		xennet_release_rx_bufs(queue);
1411		gnttab_free_grant_references(queue->gref_tx_head);
1412		gnttab_free_grant_references(queue->gref_rx_head);
1413
1414		/* End access and free the pages */
1415		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1416		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1417
1418		queue->tx_ring_ref = GRANT_INVALID_REF;
1419		queue->rx_ring_ref = GRANT_INVALID_REF;
1420		queue->tx.sring = NULL;
1421		queue->rx.sring = NULL;
1422	}
1423}
1424
1425/**
1426 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1427 * driver restart.  We tear down our netif structure and recreate it, but
1428 * leave the device-layer structures intact so that this is transparent to the
1429 * rest of the kernel.
1430 */
1431static int netfront_resume(struct xenbus_device *dev)
1432{
1433	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1434
1435	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1436
1437	xennet_disconnect_backend(info);
1438	return 0;
1439}
1440
1441static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1442{
1443	char *s, *e, *macstr;
1444	int i;
1445
1446	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1447	if (IS_ERR(macstr))
1448		return PTR_ERR(macstr);
1449
1450	for (i = 0; i < ETH_ALEN; i++) {
1451		mac[i] = simple_strtoul(s, &e, 16);
1452		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1453			kfree(macstr);
1454			return -ENOENT;
1455		}
1456		s = e+1;
1457	}
1458
1459	kfree(macstr);
1460	return 0;
1461}
1462
1463static int setup_netfront_single(struct netfront_queue *queue)
1464{
1465	int err;
1466
1467	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1468	if (err < 0)
1469		goto fail;
1470
1471	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1472					xennet_interrupt,
1473					0, queue->info->netdev->name, queue);
1474	if (err < 0)
1475		goto bind_fail;
1476	queue->rx_evtchn = queue->tx_evtchn;
1477	queue->rx_irq = queue->tx_irq = err;
1478
1479	return 0;
1480
1481bind_fail:
1482	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1483	queue->tx_evtchn = 0;
1484fail:
1485	return err;
1486}
1487
1488static int setup_netfront_split(struct netfront_queue *queue)
1489{
1490	int err;
1491
1492	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1493	if (err < 0)
1494		goto fail;
1495	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1496	if (err < 0)
1497		goto alloc_rx_evtchn_fail;
1498
1499	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1500		 "%s-tx", queue->name);
1501	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1502					xennet_tx_interrupt,
1503					0, queue->tx_irq_name, queue);
1504	if (err < 0)
1505		goto bind_tx_fail;
1506	queue->tx_irq = err;
1507
1508	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1509		 "%s-rx", queue->name);
1510	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1511					xennet_rx_interrupt,
1512					0, queue->rx_irq_name, queue);
1513	if (err < 0)
1514		goto bind_rx_fail;
1515	queue->rx_irq = err;
1516
1517	return 0;
1518
1519bind_rx_fail:
1520	unbind_from_irqhandler(queue->tx_irq, queue);
1521	queue->tx_irq = 0;
1522bind_tx_fail:
1523	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1524	queue->rx_evtchn = 0;
1525alloc_rx_evtchn_fail:
1526	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1527	queue->tx_evtchn = 0;
1528fail:
1529	return err;
1530}
1531
1532static int setup_netfront(struct xenbus_device *dev,
1533			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1534{
1535	struct xen_netif_tx_sring *txs;
1536	struct xen_netif_rx_sring *rxs;
1537	grant_ref_t gref;
1538	int err;
 
 
1539
1540	queue->tx_ring_ref = GRANT_INVALID_REF;
1541	queue->rx_ring_ref = GRANT_INVALID_REF;
1542	queue->rx.sring = NULL;
1543	queue->tx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1544
1545	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1546	if (!txs) {
1547		err = -ENOMEM;
1548		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1549		goto fail;
1550	}
1551	SHARED_RING_INIT(txs);
1552	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1553
1554	err = xenbus_grant_ring(dev, txs, 1, &gref);
1555	if (err < 0)
1556		goto grant_tx_ring_fail;
1557	queue->tx_ring_ref = gref;
1558
 
1559	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1560	if (!rxs) {
1561		err = -ENOMEM;
1562		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1563		goto alloc_rx_ring_fail;
1564	}
1565	SHARED_RING_INIT(rxs);
1566	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1567
1568	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1569	if (err < 0)
1570		goto grant_rx_ring_fail;
1571	queue->rx_ring_ref = gref;
1572
1573	if (feature_split_evtchn)
1574		err = setup_netfront_split(queue);
1575	/* setup single event channel if
1576	 *  a) feature-split-event-channels == 0
1577	 *  b) feature-split-event-channels == 1 but failed to setup
1578	 */
1579	if (!feature_split_evtchn || (feature_split_evtchn && err))
1580		err = setup_netfront_single(queue);
1581
1582	if (err)
1583		goto alloc_evtchn_fail;
1584
1585	return 0;
1586
1587	/* If we fail to setup netfront, it is safe to just revoke access to
1588	 * granted pages because backend is not accessing it at this point.
1589	 */
1590alloc_evtchn_fail:
1591	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1592grant_rx_ring_fail:
1593	free_page((unsigned long)rxs);
1594alloc_rx_ring_fail:
1595	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1596grant_tx_ring_fail:
1597	free_page((unsigned long)txs);
1598fail:
1599	return err;
1600}
1601
1602/* Queue-specific initialisation
1603 * This used to be done in xennet_create_dev() but must now
1604 * be run per-queue.
1605 */
1606static int xennet_init_queue(struct netfront_queue *queue)
1607{
1608	unsigned short i;
1609	int err = 0;
1610	char *devid;
1611
1612	spin_lock_init(&queue->tx_lock);
1613	spin_lock_init(&queue->rx_lock);
1614
1615	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1616
1617	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1618	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1619		 devid, queue->id);
1620
1621	/* Initialise tx_skbs as a free chain containing every entry. */
1622	queue->tx_skb_freelist = 0;
1623	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1624		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1625		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1626		queue->grant_tx_page[i] = NULL;
1627	}
1628
1629	/* Clear out rx_skbs */
1630	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1631		queue->rx_skbs[i] = NULL;
1632		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1633	}
1634
1635	/* A grant for every tx ring slot */
1636	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1637					  &queue->gref_tx_head) < 0) {
1638		pr_alert("can't alloc tx grant refs\n");
1639		err = -ENOMEM;
1640		goto exit;
1641	}
1642
1643	/* A grant for every rx ring slot */
1644	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1645					  &queue->gref_rx_head) < 0) {
1646		pr_alert("can't alloc rx grant refs\n");
1647		err = -ENOMEM;
1648		goto exit_free_tx;
1649	}
1650
1651	return 0;
1652
1653 exit_free_tx:
1654	gnttab_free_grant_references(queue->gref_tx_head);
1655 exit:
1656	return err;
1657}
1658
1659static int write_queue_xenstore_keys(struct netfront_queue *queue,
1660			   struct xenbus_transaction *xbt, int write_hierarchical)
1661{
1662	/* Write the queue-specific keys into XenStore in the traditional
1663	 * way for a single queue, or in a queue subkeys for multiple
1664	 * queues.
1665	 */
1666	struct xenbus_device *dev = queue->info->xbdev;
1667	int err;
1668	const char *message;
1669	char *path;
1670	size_t pathsize;
1671
1672	/* Choose the correct place to write the keys */
1673	if (write_hierarchical) {
1674		pathsize = strlen(dev->nodename) + 10;
1675		path = kzalloc(pathsize, GFP_KERNEL);
1676		if (!path) {
1677			err = -ENOMEM;
1678			message = "out of memory while writing ring references";
1679			goto error;
1680		}
1681		snprintf(path, pathsize, "%s/queue-%u",
1682				dev->nodename, queue->id);
1683	} else {
1684		path = (char *)dev->nodename;
1685	}
1686
1687	/* Write ring references */
1688	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1689			queue->tx_ring_ref);
1690	if (err) {
1691		message = "writing tx-ring-ref";
1692		goto error;
1693	}
1694
1695	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1696			queue->rx_ring_ref);
1697	if (err) {
1698		message = "writing rx-ring-ref";
1699		goto error;
1700	}
1701
1702	/* Write event channels; taking into account both shared
1703	 * and split event channel scenarios.
1704	 */
1705	if (queue->tx_evtchn == queue->rx_evtchn) {
1706		/* Shared event channel */
1707		err = xenbus_printf(*xbt, path,
1708				"event-channel", "%u", queue->tx_evtchn);
1709		if (err) {
1710			message = "writing event-channel";
1711			goto error;
1712		}
1713	} else {
1714		/* Split event channels */
1715		err = xenbus_printf(*xbt, path,
1716				"event-channel-tx", "%u", queue->tx_evtchn);
1717		if (err) {
1718			message = "writing event-channel-tx";
1719			goto error;
1720		}
1721
1722		err = xenbus_printf(*xbt, path,
1723				"event-channel-rx", "%u", queue->rx_evtchn);
1724		if (err) {
1725			message = "writing event-channel-rx";
1726			goto error;
1727		}
1728	}
1729
1730	if (write_hierarchical)
1731		kfree(path);
1732	return 0;
1733
1734error:
1735	if (write_hierarchical)
1736		kfree(path);
1737	xenbus_dev_fatal(dev, err, "%s", message);
1738	return err;
1739}
1740
1741static void xennet_destroy_queues(struct netfront_info *info)
1742{
1743	unsigned int i;
1744
1745	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1746		struct netfront_queue *queue = &info->queues[i];
1747
1748		if (netif_running(info->netdev))
1749			napi_disable(&queue->napi);
1750		netif_napi_del(&queue->napi);
1751	}
1752
1753	kfree(info->queues);
1754	info->queues = NULL;
1755}
1756
1757static int xennet_create_queues(struct netfront_info *info,
1758				unsigned int *num_queues)
1759{
1760	unsigned int i;
1761	int ret;
1762
1763	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1764			       GFP_KERNEL);
1765	if (!info->queues)
1766		return -ENOMEM;
1767
1768	for (i = 0; i < *num_queues; i++) {
1769		struct netfront_queue *queue = &info->queues[i];
1770
1771		queue->id = i;
1772		queue->info = info;
1773
1774		ret = xennet_init_queue(queue);
1775		if (ret < 0) {
1776			dev_warn(&info->xbdev->dev,
1777				 "only created %d queues\n", i);
1778			*num_queues = i;
1779			break;
1780		}
1781
1782		netif_napi_add(queue->info->netdev, &queue->napi,
1783			       xennet_poll, 64);
1784		if (netif_running(info->netdev))
1785			napi_enable(&queue->napi);
1786	}
1787
1788	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1789
1790	if (*num_queues == 0) {
1791		dev_err(&info->xbdev->dev, "no queues\n");
1792		return -EINVAL;
1793	}
1794	return 0;
1795}
1796
1797/* Common code used when first setting up, and when resuming. */
1798static int talk_to_netback(struct xenbus_device *dev,
1799			   struct netfront_info *info)
1800{
1801	const char *message;
1802	struct xenbus_transaction xbt;
1803	int err;
1804	unsigned int feature_split_evtchn;
1805	unsigned int i = 0;
1806	unsigned int max_queues = 0;
1807	struct netfront_queue *queue = NULL;
1808	unsigned int num_queues = 1;
1809
1810	info->netdev->irq = 0;
1811
1812	/* Check if backend supports multiple queues */
1813	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1814					  "multi-queue-max-queues", 1);
1815	num_queues = min(max_queues, xennet_max_queues);
1816
1817	/* Check feature-split-event-channels */
1818	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1819					"feature-split-event-channels", 0);
1820
1821	/* Read mac addr. */
1822	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1823	if (err) {
1824		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1825		goto out_unlocked;
1826	}
1827
1828	rtnl_lock();
1829	if (info->queues)
1830		xennet_destroy_queues(info);
1831
1832	err = xennet_create_queues(info, &num_queues);
1833	if (err < 0) {
1834		xenbus_dev_fatal(dev, err, "creating queues");
1835		kfree(info->queues);
1836		info->queues = NULL;
1837		goto out;
1838	}
1839	rtnl_unlock();
1840
1841	/* Create shared ring, alloc event channel -- for each queue */
1842	for (i = 0; i < num_queues; ++i) {
1843		queue = &info->queues[i];
1844		err = setup_netfront(dev, queue, feature_split_evtchn);
1845		if (err)
1846			goto destroy_ring;
1847	}
1848
1849again:
1850	err = xenbus_transaction_start(&xbt);
1851	if (err) {
1852		xenbus_dev_fatal(dev, err, "starting transaction");
1853		goto destroy_ring;
1854	}
1855
1856	if (xenbus_exists(XBT_NIL,
1857			  info->xbdev->otherend, "multi-queue-max-queues")) {
1858		/* Write the number of queues */
 
 
 
 
 
 
 
 
 
 
 
1859		err = xenbus_printf(xbt, dev->nodename,
1860				    "multi-queue-num-queues", "%u", num_queues);
1861		if (err) {
1862			message = "writing multi-queue-num-queues";
1863			goto abort_transaction_no_dev_fatal;
1864		}
1865	}
1866
1867	if (num_queues == 1) {
1868		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1869		if (err)
1870			goto abort_transaction_no_dev_fatal;
1871	} else {
1872		/* Write the keys for each queue */
1873		for (i = 0; i < num_queues; ++i) {
1874			queue = &info->queues[i];
1875			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1876			if (err)
1877				goto abort_transaction_no_dev_fatal;
 
 
 
 
 
1878		}
1879	}
1880
1881	/* The remaining keys are not queue-specific */
1882	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1883			    1);
1884	if (err) {
1885		message = "writing request-rx-copy";
1886		goto abort_transaction;
1887	}
1888
1889	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1890	if (err) {
1891		message = "writing feature-rx-notify";
1892		goto abort_transaction;
1893	}
1894
1895	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1896	if (err) {
1897		message = "writing feature-sg";
1898		goto abort_transaction;
1899	}
1900
1901	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1902	if (err) {
1903		message = "writing feature-gso-tcpv4";
1904		goto abort_transaction;
1905	}
1906
1907	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1908	if (err) {
1909		message = "writing feature-gso-tcpv6";
1910		goto abort_transaction;
1911	}
1912
1913	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1914			   "1");
1915	if (err) {
1916		message = "writing feature-ipv6-csum-offload";
1917		goto abort_transaction;
1918	}
1919
1920	err = xenbus_transaction_end(xbt, 0);
1921	if (err) {
1922		if (err == -EAGAIN)
1923			goto again;
1924		xenbus_dev_fatal(dev, err, "completing transaction");
1925		goto destroy_ring;
1926	}
1927
1928	return 0;
1929
1930 abort_transaction:
 
1931	xenbus_dev_fatal(dev, err, "%s", message);
1932abort_transaction_no_dev_fatal:
1933	xenbus_transaction_end(xbt, 1);
1934 destroy_ring:
1935	xennet_disconnect_backend(info);
1936	rtnl_lock();
1937	xennet_destroy_queues(info);
1938 out:
1939	rtnl_unlock();
1940out_unlocked:
1941	device_unregister(&dev->dev);
1942	return err;
1943}
1944
1945static int xennet_connect(struct net_device *dev)
1946{
1947	struct netfront_info *np = netdev_priv(dev);
1948	unsigned int num_queues = 0;
1949	int err;
1950	unsigned int j = 0;
1951	struct netfront_queue *queue = NULL;
 
 
 
 
 
 
1952
1953	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1954		dev_info(&dev->dev,
1955			 "backend does not support copying receive path\n");
1956		return -ENODEV;
1957	}
1958
1959	err = talk_to_netback(np->xbdev, np);
1960	if (err)
1961		return err;
1962
1963	/* talk_to_netback() sets the correct number of queues */
1964	num_queues = dev->real_num_tx_queues;
 
 
 
 
1965
1966	if (dev->reg_state == NETREG_UNINITIALIZED) {
1967		err = register_netdev(dev);
1968		if (err) {
1969			pr_warn("%s: register_netdev err=%d\n", __func__, err);
1970			device_unregister(&np->xbdev->dev);
1971			return err;
1972		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1973	}
1974
1975	rtnl_lock();
1976	netdev_update_features(dev);
1977	rtnl_unlock();
1978
1979	/*
1980	 * All public and private state should now be sane.  Get
1981	 * ready to start sending and receiving packets and give the driver
1982	 * domain a kick because we've probably just requeued some
1983	 * packets.
1984	 */
1985	netif_carrier_on(np->netdev);
1986	for (j = 0; j < num_queues; ++j) {
1987		queue = &np->queues[j];
 
 
 
1988
1989		notify_remote_via_irq(queue->tx_irq);
1990		if (queue->tx_irq != queue->rx_irq)
1991			notify_remote_via_irq(queue->rx_irq);
1992
1993		spin_lock_irq(&queue->tx_lock);
1994		xennet_tx_buf_gc(queue);
1995		spin_unlock_irq(&queue->tx_lock);
1996
1997		spin_lock_bh(&queue->rx_lock);
1998		xennet_alloc_rx_buffers(queue);
1999		spin_unlock_bh(&queue->rx_lock);
2000	}
2001
2002	return 0;
2003}
2004
2005/**
2006 * Callback received when the backend's state changes.
2007 */
2008static void netback_changed(struct xenbus_device *dev,
2009			    enum xenbus_state backend_state)
2010{
2011	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2012	struct net_device *netdev = np->netdev;
2013
2014	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2015
2016	wake_up_all(&module_wq);
2017
2018	switch (backend_state) {
2019	case XenbusStateInitialising:
2020	case XenbusStateInitialised:
2021	case XenbusStateReconfiguring:
2022	case XenbusStateReconfigured:
2023	case XenbusStateUnknown:
2024		break;
2025
2026	case XenbusStateInitWait:
2027		if (dev->state != XenbusStateInitialising)
2028			break;
2029		if (xennet_connect(netdev) != 0)
2030			break;
2031		xenbus_switch_state(dev, XenbusStateConnected);
2032		break;
2033
2034	case XenbusStateConnected:
2035		netdev_notify_peers(netdev);
2036		break;
2037
2038	case XenbusStateClosed:
2039		if (dev->state == XenbusStateClosed)
2040			break;
2041		/* Fall through - Missed the backend's CLOSING state. */
2042	case XenbusStateClosing:
2043		xenbus_frontend_closed(dev);
2044		break;
2045	}
2046}
2047
2048static const struct xennet_stat {
2049	char name[ETH_GSTRING_LEN];
2050	u16 offset;
2051} xennet_stats[] = {
2052	{
2053		"rx_gso_checksum_fixup",
2054		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2055	},
2056};
2057
2058static int xennet_get_sset_count(struct net_device *dev, int string_set)
2059{
2060	switch (string_set) {
2061	case ETH_SS_STATS:
2062		return ARRAY_SIZE(xennet_stats);
2063	default:
2064		return -EINVAL;
2065	}
2066}
2067
2068static void xennet_get_ethtool_stats(struct net_device *dev,
2069				     struct ethtool_stats *stats, u64 * data)
2070{
2071	void *np = netdev_priv(dev);
2072	int i;
2073
2074	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2075		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2076}
2077
2078static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2079{
2080	int i;
2081
2082	switch (stringset) {
2083	case ETH_SS_STATS:
2084		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2085			memcpy(data + i * ETH_GSTRING_LEN,
2086			       xennet_stats[i].name, ETH_GSTRING_LEN);
2087		break;
2088	}
2089}
2090
2091static const struct ethtool_ops xennet_ethtool_ops =
2092{
2093	.get_link = ethtool_op_get_link,
2094
2095	.get_sset_count = xennet_get_sset_count,
2096	.get_ethtool_stats = xennet_get_ethtool_stats,
2097	.get_strings = xennet_get_strings,
2098};
2099
2100#ifdef CONFIG_SYSFS
2101static ssize_t show_rxbuf(struct device *dev,
2102			  struct device_attribute *attr, char *buf)
2103{
2104	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
 
 
 
2105}
2106
2107static ssize_t store_rxbuf(struct device *dev,
2108			   struct device_attribute *attr,
2109			   const char *buf, size_t len)
2110{
 
 
2111	char *endp;
2112	unsigned long target;
2113
2114	if (!capable(CAP_NET_ADMIN))
2115		return -EPERM;
2116
2117	target = simple_strtoul(buf, &endp, 0);
2118	if (endp == buf)
2119		return -EBADMSG;
2120
2121	/* rxbuf_min and rxbuf_max are no longer configurable. */
 
 
 
 
 
 
 
 
 
 
 
 
2122
 
2123	return len;
2124}
2125
2126static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2127static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2128static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2129
2130static struct attribute *xennet_dev_attrs[] = {
2131	&dev_attr_rxbuf_min.attr,
2132	&dev_attr_rxbuf_max.attr,
2133	&dev_attr_rxbuf_cur.attr,
2134	NULL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135};
2136
2137static const struct attribute_group xennet_dev_group = {
2138	.attrs = xennet_dev_attrs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139};
2140#endif /* CONFIG_SYSFS */
2141
2142static int xennet_remove(struct xenbus_device *dev)
2143{
2144	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2145
2146	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2147
2148	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2149		xenbus_switch_state(dev, XenbusStateClosing);
2150		wait_event(module_wq,
2151			   xenbus_read_driver_state(dev->otherend) ==
2152			   XenbusStateClosing ||
2153			   xenbus_read_driver_state(dev->otherend) ==
2154			   XenbusStateUnknown);
2155
2156		xenbus_switch_state(dev, XenbusStateClosed);
2157		wait_event(module_wq,
2158			   xenbus_read_driver_state(dev->otherend) ==
2159			   XenbusStateClosed ||
2160			   xenbus_read_driver_state(dev->otherend) ==
2161			   XenbusStateUnknown);
2162	}
2163
2164	xennet_disconnect_backend(info);
2165
2166	if (info->netdev->reg_state == NETREG_REGISTERED)
2167		unregister_netdev(info->netdev);
2168
2169	if (info->queues) {
2170		rtnl_lock();
2171		xennet_destroy_queues(info);
2172		rtnl_unlock();
2173	}
2174	xennet_free_netdev(info->netdev);
2175
2176	return 0;
2177}
2178
2179static const struct xenbus_device_id netfront_ids[] = {
2180	{ "vif" },
2181	{ "" }
2182};
2183
2184static struct xenbus_driver netfront_driver = {
2185	.ids = netfront_ids,
2186	.probe = netfront_probe,
2187	.remove = xennet_remove,
2188	.resume = netfront_resume,
2189	.otherend_changed = netback_changed,
2190};
2191
2192static int __init netif_init(void)
2193{
2194	if (!xen_domain())
2195		return -ENODEV;
2196
2197	if (!xen_has_pv_nic_devices())
2198		return -ENODEV;
2199
2200	pr_info("Initialising Xen virtual ethernet driver\n");
2201
2202	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2203	 * specified a value.
2204	 */
2205	if (xennet_max_queues == 0)
2206		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2207					  num_online_cpus());
2208
2209	return xenbus_register_frontend(&netfront_driver);
2210}
2211module_init(netif_init);
2212
2213
2214static void __exit netif_exit(void)
2215{
2216	xenbus_unregister_driver(&netfront_driver);
2217}
2218module_exit(netif_exit);
2219
2220MODULE_DESCRIPTION("Xen virtual network device frontend");
2221MODULE_LICENSE("GPL");
2222MODULE_ALIAS("xen:vif");
2223MODULE_ALIAS("xennet");