Linux Audio

Check our new training course

Loading...
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
 
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
  60static unsigned int xennet_max_queues;
  61module_param_named(max_queues, xennet_max_queues, uint, 0644);
  62MODULE_PARM_DESC(max_queues,
  63		 "Maximum number of queues per virtual interface");
  64
  65static const struct ethtool_ops xennet_ethtool_ops;
  66
  67struct netfront_cb {
  68	int pull_to;
  69};
  70
  71#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  72
  73#define RX_COPY_THRESHOLD 256
  74
  75#define GRANT_INVALID_REF	0
  76
  77#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  78#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  79
  80/* Minimum number of Rx slots (includes slot for GSO metadata). */
  81#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  82
  83/* Queue name is interface name with "-qNNN" appended */
  84#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  85
  86/* IRQ name is queue name with "-tx" or "-rx" appended */
  87#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  88
  89struct netfront_stats {
  90	u64			packets;
  91	u64			bytes;
 
 
  92	struct u64_stats_sync	syncp;
  93};
  94
  95struct netfront_info;
  96
  97struct netfront_queue {
  98	unsigned int id; /* Queue ID, 0-based */
  99	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 100	struct netfront_info *info;
 101
 102	struct napi_struct napi;
 103
 104	/* Split event channels support, tx_* == rx_* when using
 105	 * single event channel.
 106	 */
 107	unsigned int tx_evtchn, rx_evtchn;
 108	unsigned int tx_irq, rx_irq;
 109	/* Only used when split event channels support is enabled */
 110	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 111	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 
 
 112
 113	spinlock_t   tx_lock;
 114	struct xen_netif_tx_front_ring tx;
 115	int tx_ring_ref;
 116
 117	/*
 118	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 119	 * are linked from tx_skb_freelist through skb_entry.link.
 120	 *
 121	 *  NB. Freelist index entries are always going to be less than
 122	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 123	 *  greater than PAGE_OFFSET: we use this property to distinguish
 124	 *  them.
 125	 */
 126	union skb_entry {
 127		struct sk_buff *skb;
 128		unsigned long link;
 129	} tx_skbs[NET_TX_RING_SIZE];
 130	grant_ref_t gref_tx_head;
 131	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 132	struct page *grant_tx_page[NET_TX_RING_SIZE];
 133	unsigned tx_skb_freelist;
 134
 135	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 136	struct xen_netif_rx_front_ring rx;
 137	int rx_ring_ref;
 138
 
 
 
 
 
 
 
 139	struct timer_list rx_refill_timer;
 140
 141	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 142	grant_ref_t gref_rx_head;
 143	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 144};
 145
 146struct netfront_info {
 147	struct list_head list;
 148	struct net_device *netdev;
 149
 150	struct xenbus_device *xbdev;
 151
 152	/* Multi-queue support */
 153	struct netfront_queue *queues;
 154
 155	/* Statistics */
 156	struct netfront_stats __percpu *rx_stats;
 157	struct netfront_stats __percpu *tx_stats;
 158
 159	atomic_t rx_gso_checksum_fixup;
 160};
 161
 162struct netfront_rx_info {
 163	struct xen_netif_rx_response rx;
 164	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 165};
 166
 167static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 168{
 169	list->link = id;
 170}
 171
 172static int skb_entry_is_link(const union skb_entry *list)
 173{
 174	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 175	return (unsigned long)list->skb < PAGE_OFFSET;
 176}
 177
 178/*
 179 * Access macros for acquiring freeing slots in tx_skbs[].
 180 */
 181
 182static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 183			       unsigned short id)
 184{
 185	skb_entry_set_link(&list[id], *head);
 186	*head = id;
 187}
 188
 189static unsigned short get_id_from_freelist(unsigned *head,
 190					   union skb_entry *list)
 191{
 192	unsigned int id = *head;
 193	*head = list[id].link;
 194	return id;
 195}
 196
 197static int xennet_rxidx(RING_IDX idx)
 198{
 199	return idx & (NET_RX_RING_SIZE - 1);
 200}
 201
 202static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 203					 RING_IDX ri)
 204{
 205	int i = xennet_rxidx(ri);
 206	struct sk_buff *skb = queue->rx_skbs[i];
 207	queue->rx_skbs[i] = NULL;
 208	return skb;
 209}
 210
 211static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 212					    RING_IDX ri)
 213{
 214	int i = xennet_rxidx(ri);
 215	grant_ref_t ref = queue->grant_rx_ref[i];
 216	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 217	return ref;
 218}
 219
 220#ifdef CONFIG_SYSFS
 221static const struct attribute_group xennet_dev_group;
 
 
 
 
 222#endif
 223
 224static bool xennet_can_sg(struct net_device *dev)
 225{
 226	return dev->features & NETIF_F_SG;
 227}
 228
 229
 230static void rx_refill_timeout(unsigned long data)
 231{
 232	struct netfront_queue *queue = (struct netfront_queue *)data;
 233	napi_schedule(&queue->napi);
 
 234}
 235
 236static int netfront_tx_slot_available(struct netfront_queue *queue)
 237{
 238	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 239		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 240}
 241
 242static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 243{
 244	struct net_device *dev = queue->info->netdev;
 245	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 246
 247	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 248	    netfront_tx_slot_available(queue) &&
 249	    likely(netif_running(dev)))
 250		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 251}
 252
 253
 254static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 255{
 
 
 256	struct sk_buff *skb;
 257	struct page *page;
 
 
 
 
 
 
 258
 259	skb = __netdev_alloc_skb(queue->info->netdev,
 260				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 261				 GFP_ATOMIC | __GFP_NOWARN);
 262	if (unlikely(!skb))
 263		return NULL;
 264
 265	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 266	if (!page) {
 267		kfree_skb(skb);
 268		return NULL;
 269	}
 270	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 271
 272	/* Align ip header to a 16 bytes boundary */
 273	skb_reserve(skb, NET_IP_ALIGN);
 274	skb->dev = queue->info->netdev;
 
 
 
 
 
 
 
 
 
 275
 276	return skb;
 277}
 278
 
 
 
 
 
 
 
 
 
 
 
 
 
 279
 280static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 281{
 282	RING_IDX req_prod = queue->rx.req_prod_pvt;
 283	int notify;
 284
 285	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 
 
 
 286		return;
 
 287
 288	for (req_prod = queue->rx.req_prod_pvt;
 289	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 290	     req_prod++) {
 291		struct sk_buff *skb;
 292		unsigned short id;
 293		grant_ref_t ref;
 294		struct page *page;
 295		struct xen_netif_rx_request *req;
 296
 297		skb = xennet_alloc_one_rx_buffer(queue);
 298		if (!skb)
 299			break;
 300
 301		id = xennet_rxidx(req_prod);
 302
 303		BUG_ON(queue->rx_skbs[id]);
 304		queue->rx_skbs[id] = skb;
 305
 306		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 
 
 
 307		BUG_ON((signed short)ref < 0);
 308		queue->grant_rx_ref[id] = ref;
 309
 310		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
 
 
 
 
 
 
 311
 312		req = RING_GET_REQUEST(&queue->rx, req_prod);
 313		gnttab_page_grant_foreign_access_ref_one(ref,
 314							 queue->info->xbdev->otherend_id,
 315							 page,
 316							 0);
 317		req->id = id;
 318		req->gref = ref;
 319	}
 320
 321	queue->rx.req_prod_pvt = req_prod;
 322
 323	/* Not enough requests? Try again later. */
 324	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
 325		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 326		return;
 327	}
 328
 329	wmb();		/* barrier so backend seens requests */
 330
 331	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 
 
 
 332	if (notify)
 333		notify_remote_via_irq(queue->rx_irq);
 334}
 335
 336static int xennet_open(struct net_device *dev)
 337{
 338	struct netfront_info *np = netdev_priv(dev);
 339	unsigned int num_queues = dev->real_num_tx_queues;
 340	unsigned int i = 0;
 341	struct netfront_queue *queue = NULL;
 342
 343	for (i = 0; i < num_queues; ++i) {
 344		queue = &np->queues[i];
 345		napi_enable(&queue->napi);
 346
 347		spin_lock_bh(&queue->rx_lock);
 348		if (netif_carrier_ok(dev)) {
 349			xennet_alloc_rx_buffers(queue);
 350			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 351			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 352				napi_schedule(&queue->napi);
 353		}
 354		spin_unlock_bh(&queue->rx_lock);
 355	}
 
 356
 357	netif_tx_start_all_queues(dev);
 358
 359	return 0;
 360}
 361
 362static void xennet_tx_buf_gc(struct netfront_queue *queue)
 363{
 364	RING_IDX cons, prod;
 365	unsigned short id;
 
 366	struct sk_buff *skb;
 367	bool more_to_do;
 368
 369	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 370
 371	do {
 372		prod = queue->tx.sring->rsp_prod;
 373		rmb(); /* Ensure we see responses up to 'rp'. */
 374
 375		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 376			struct xen_netif_tx_response *txrsp;
 377
 378			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 379			if (txrsp->status == XEN_NETIF_RSP_NULL)
 380				continue;
 381
 382			id  = txrsp->id;
 383			skb = queue->tx_skbs[id].skb;
 384			if (unlikely(gnttab_query_foreign_access(
 385				queue->grant_tx_ref[id]) != 0)) {
 386				pr_alert("%s: warning -- grant still in use by backend domain\n",
 387					 __func__);
 388				BUG();
 389			}
 390			gnttab_end_foreign_access_ref(
 391				queue->grant_tx_ref[id], GNTMAP_readonly);
 392			gnttab_release_grant_reference(
 393				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 394			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 395			queue->grant_tx_page[id] = NULL;
 396			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 397			dev_kfree_skb_irq(skb);
 398		}
 399
 400		queue->tx.rsp_cons = prod;
 401
 402		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 403	} while (more_to_do);
 
 
 
 
 
 
 
 
 
 
 404
 405	xennet_maybe_wake_tx(queue);
 406}
 407
 408struct xennet_gnttab_make_txreq {
 409	struct netfront_queue *queue;
 410	struct sk_buff *skb;
 411	struct page *page;
 412	struct xen_netif_tx_request *tx; /* Last request */
 413	unsigned int size;
 414};
 415
 416static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 417				  unsigned int len, void *data)
 418{
 419	struct xennet_gnttab_make_txreq *info = data;
 
 
 
 
 
 
 420	unsigned int id;
 421	struct xen_netif_tx_request *tx;
 422	grant_ref_t ref;
 423	/* convenient aliases */
 424	struct page *page = info->page;
 425	struct netfront_queue *queue = info->queue;
 426	struct sk_buff *skb = info->skb;
 427
 428	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 429	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 430	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 431	BUG_ON((signed short)ref < 0);
 432
 433	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 434					gfn, GNTMAP_readonly);
 435
 436	queue->tx_skbs[id].skb = skb;
 437	queue->grant_tx_page[id] = page;
 438	queue->grant_tx_ref[id] = ref;
 439
 440	tx->id = id;
 441	tx->gref = ref;
 442	tx->offset = offset;
 443	tx->size = len;
 444	tx->flags = 0;
 445
 446	info->tx = tx;
 447	info->size += tx->size;
 448}
 449
 450static struct xen_netif_tx_request *xennet_make_first_txreq(
 451	struct netfront_queue *queue, struct sk_buff *skb,
 452	struct page *page, unsigned int offset, unsigned int len)
 453{
 454	struct xennet_gnttab_make_txreq info = {
 455		.queue = queue,
 456		.skb = skb,
 457		.page = page,
 458		.size = 0,
 459	};
 460
 461	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 
 
 
 
 
 462
 463	return info.tx;
 464}
 
 
 
 
 
 
 
 
 465
 466static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 467				  unsigned int len, void *data)
 468{
 469	struct xennet_gnttab_make_txreq *info = data;
 470
 471	info->tx->flags |= XEN_NETTXF_more_data;
 472	skb_get(info->skb);
 473	xennet_tx_setup_grant(gfn, offset, len, data);
 474}
 475
 476static struct xen_netif_tx_request *xennet_make_txreqs(
 477	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 478	struct sk_buff *skb, struct page *page,
 479	unsigned int offset, unsigned int len)
 480{
 481	struct xennet_gnttab_make_txreq info = {
 482		.queue = queue,
 483		.skb = skb,
 484		.tx = tx,
 485	};
 486
 487	/* Skip unused frames from start of page */
 488	page += offset >> PAGE_SHIFT;
 489	offset &= ~PAGE_MASK;
 490
 491	while (len) {
 492		info.page = page;
 493		info.size = 0;
 494
 495		gnttab_foreach_grant_in_range(page, offset, len,
 496					      xennet_make_one_txreq,
 497					      &info);
 498
 499		page++;
 500		offset = 0;
 501		len -= info.size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502	}
 503
 504	return info.tx;
 505}
 506
 507/*
 508 * Count how many ring slots are required to send this skb. Each frag
 509 * might be a compound page.
 510 */
 511static int xennet_count_skb_slots(struct sk_buff *skb)
 512{
 513	int i, frags = skb_shinfo(skb)->nr_frags;
 514	int slots;
 515
 516	slots = gnttab_count_grant(offset_in_page(skb->data),
 517				   skb_headlen(skb));
 518
 519	for (i = 0; i < frags; i++) {
 520		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 521		unsigned long size = skb_frag_size(frag);
 522		unsigned long offset = frag->page_offset;
 523
 524		/* Skip unused frames from start of page */
 525		offset &= ~PAGE_MASK;
 526
 527		slots += gnttab_count_grant(offset, size);
 528	}
 529
 530	return slots;
 531}
 532
 533static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 534			       void *accel_priv, select_queue_fallback_t fallback)
 535{
 536	unsigned int num_queues = dev->real_num_tx_queues;
 537	u32 hash;
 538	u16 queue_idx;
 539
 540	/* First, check if there is only one queue */
 541	if (num_queues == 1) {
 542		queue_idx = 0;
 543	} else {
 544		hash = skb_get_hash(skb);
 545		queue_idx = hash % num_queues;
 546	}
 547
 548	return queue_idx;
 549}
 550
 551#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 552
 553static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 554{
 
 555	struct netfront_info *np = netdev_priv(dev);
 556	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 557	struct xen_netif_tx_request *tx, *first_tx;
 558	unsigned int i;
 
 
 
 559	int notify;
 560	int slots;
 561	struct page *page;
 562	unsigned int offset;
 563	unsigned int len;
 564	unsigned long flags;
 565	struct netfront_queue *queue = NULL;
 566	unsigned int num_queues = dev->real_num_tx_queues;
 567	u16 queue_index;
 568
 569	/* Drop the packet if no queues are set up */
 570	if (num_queues < 1)
 571		goto drop;
 572	/* Determine which queue to transmit this SKB on */
 573	queue_index = skb_get_queue_mapping(skb);
 574	queue = &np->queues[queue_index];
 575
 576	/* If skb->len is too big for wire format, drop skb and alert
 577	 * user about misconfiguration.
 578	 */
 579	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 580		net_alert_ratelimited(
 581			"xennet: skb->len = %u, too big for wire format\n",
 582			skb->len);
 583		goto drop;
 584	}
 585
 586	slots = xennet_count_skb_slots(skb);
 587	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 588		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 589				    slots, skb->len);
 590		if (skb_linearize(skb))
 591			goto drop;
 592	}
 593
 594	page = virt_to_page(skb->data);
 595	offset = offset_in_page(skb->data);
 596	len = skb_headlen(skb);
 597
 598	spin_lock_irqsave(&queue->tx_lock, flags);
 599
 600	if (unlikely(!netif_carrier_ok(dev) ||
 601		     (slots > 1 && !xennet_can_sg(dev)) ||
 602		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 603		spin_unlock_irqrestore(&queue->tx_lock, flags);
 604		goto drop;
 605	}
 606
 607	/* First request for the linear area. */
 608	first_tx = tx = xennet_make_first_txreq(queue, skb,
 609						page, offset, len);
 610	offset += tx->size;
 611	if (offset == PAGE_SIZE) {
 612		page++;
 613		offset = 0;
 614	}
 615	len -= tx->size;
 
 
 
 
 
 
 
 
 616
 
 617	if (skb->ip_summed == CHECKSUM_PARTIAL)
 618		/* local packet? */
 619		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 620	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 621		/* remote but checksummed. */
 622		tx->flags |= XEN_NETTXF_data_validated;
 623
 624	/* Optional extra info after the first request. */
 625	if (skb_shinfo(skb)->gso_size) {
 626		struct xen_netif_extra_info *gso;
 627
 628		gso = (struct xen_netif_extra_info *)
 629			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 630
 631		tx->flags |= XEN_NETTXF_extra_info;
 632
 633		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 634		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 635			XEN_NETIF_GSO_TYPE_TCPV6 :
 636			XEN_NETIF_GSO_TYPE_TCPV4;
 637		gso->u.gso.pad = 0;
 638		gso->u.gso.features = 0;
 639
 640		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 641		gso->flags = 0;
 642	}
 643
 644	/* Requests for the rest of the linear area. */
 645	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 646
 647	/* Requests for all the frags. */
 648	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 649		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 650		tx = xennet_make_txreqs(queue, tx, skb,
 651					skb_frag_page(frag), frag->page_offset,
 652					skb_frag_size(frag));
 653	}
 654
 655	/* First request has the packet length. */
 656	first_tx->size = skb->len;
 657
 658	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 659	if (notify)
 660		notify_remote_via_irq(queue->tx_irq);
 661
 662	u64_stats_update_begin(&tx_stats->syncp);
 663	tx_stats->bytes += skb->len;
 664	tx_stats->packets++;
 665	u64_stats_update_end(&tx_stats->syncp);
 666
 667	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 668	xennet_tx_buf_gc(queue);
 669
 670	if (!netfront_tx_slot_available(queue))
 671		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 672
 673	spin_unlock_irqrestore(&queue->tx_lock, flags);
 674
 675	return NETDEV_TX_OK;
 676
 677 drop:
 678	dev->stats.tx_dropped++;
 679	dev_kfree_skb_any(skb);
 680	return NETDEV_TX_OK;
 681}
 682
 683static int xennet_close(struct net_device *dev)
 684{
 685	struct netfront_info *np = netdev_priv(dev);
 686	unsigned int num_queues = dev->real_num_tx_queues;
 687	unsigned int i;
 688	struct netfront_queue *queue;
 689	netif_tx_stop_all_queues(np->netdev);
 690	for (i = 0; i < num_queues; ++i) {
 691		queue = &np->queues[i];
 692		napi_disable(&queue->napi);
 693	}
 694	return 0;
 695}
 696
 697static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 698				grant_ref_t ref)
 699{
 700	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 701
 702	BUG_ON(queue->rx_skbs[new]);
 703	queue->rx_skbs[new] = skb;
 704	queue->grant_rx_ref[new] = ref;
 705	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 706	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 707	queue->rx.req_prod_pvt++;
 708}
 709
 710static int xennet_get_extras(struct netfront_queue *queue,
 711			     struct xen_netif_extra_info *extras,
 712			     RING_IDX rp)
 713
 714{
 715	struct xen_netif_extra_info *extra;
 716	struct device *dev = &queue->info->netdev->dev;
 717	RING_IDX cons = queue->rx.rsp_cons;
 718	int err = 0;
 719
 720	do {
 721		struct sk_buff *skb;
 722		grant_ref_t ref;
 723
 724		if (unlikely(cons + 1 == rp)) {
 725			if (net_ratelimit())
 726				dev_warn(dev, "Missing extra info\n");
 727			err = -EBADR;
 728			break;
 729		}
 730
 731		extra = (struct xen_netif_extra_info *)
 732			RING_GET_RESPONSE(&queue->rx, ++cons);
 733
 734		if (unlikely(!extra->type ||
 735			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 736			if (net_ratelimit())
 737				dev_warn(dev, "Invalid extra type: %d\n",
 738					extra->type);
 739			err = -EINVAL;
 740		} else {
 741			memcpy(&extras[extra->type - 1], extra,
 742			       sizeof(*extra));
 743		}
 744
 745		skb = xennet_get_rx_skb(queue, cons);
 746		ref = xennet_get_rx_ref(queue, cons);
 747		xennet_move_rx_slot(queue, skb, ref);
 748	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 749
 750	queue->rx.rsp_cons = cons;
 751	return err;
 752}
 753
 754static int xennet_get_responses(struct netfront_queue *queue,
 755				struct netfront_rx_info *rinfo, RING_IDX rp,
 756				struct sk_buff_head *list)
 757{
 758	struct xen_netif_rx_response *rx = &rinfo->rx;
 759	struct xen_netif_extra_info *extras = rinfo->extras;
 760	struct device *dev = &queue->info->netdev->dev;
 761	RING_IDX cons = queue->rx.rsp_cons;
 762	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 763	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 764	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 765	int slots = 1;
 766	int err = 0;
 767	unsigned long ret;
 768
 769	if (rx->flags & XEN_NETRXF_extra_info) {
 770		err = xennet_get_extras(queue, extras, rp);
 771		cons = queue->rx.rsp_cons;
 772	}
 773
 774	for (;;) {
 775		if (unlikely(rx->status < 0 ||
 776			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 777			if (net_ratelimit())
 778				dev_warn(dev, "rx->offset: %u, size: %d\n",
 779					 rx->offset, rx->status);
 780			xennet_move_rx_slot(queue, skb, ref);
 781			err = -EINVAL;
 782			goto next;
 783		}
 784
 785		/*
 786		 * This definitely indicates a bug, either in this driver or in
 787		 * the backend driver. In future this should flag the bad
 788		 * situation to the system controller to reboot the backend.
 789		 */
 790		if (ref == GRANT_INVALID_REF) {
 791			if (net_ratelimit())
 792				dev_warn(dev, "Bad rx response id %d.\n",
 793					 rx->id);
 794			err = -EINVAL;
 795			goto next;
 796		}
 797
 798		ret = gnttab_end_foreign_access_ref(ref, 0);
 799		BUG_ON(!ret);
 800
 801		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 802
 803		__skb_queue_tail(list, skb);
 804
 805next:
 806		if (!(rx->flags & XEN_NETRXF_more_data))
 807			break;
 808
 809		if (cons + slots == rp) {
 810			if (net_ratelimit())
 811				dev_warn(dev, "Need more slots\n");
 812			err = -ENOENT;
 813			break;
 814		}
 815
 816		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 817		skb = xennet_get_rx_skb(queue, cons + slots);
 818		ref = xennet_get_rx_ref(queue, cons + slots);
 819		slots++;
 820	}
 821
 822	if (unlikely(slots > max)) {
 823		if (net_ratelimit())
 824			dev_warn(dev, "Too many slots\n");
 825		err = -E2BIG;
 826	}
 827
 828	if (unlikely(err))
 829		queue->rx.rsp_cons = cons + slots;
 830
 831	return err;
 832}
 833
 834static int xennet_set_skb_gso(struct sk_buff *skb,
 835			      struct xen_netif_extra_info *gso)
 836{
 837	if (!gso->u.gso.size) {
 838		if (net_ratelimit())
 839			pr_warn("GSO size must not be zero\n");
 840		return -EINVAL;
 841	}
 842
 843	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 844	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 845		if (net_ratelimit())
 846			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 847		return -EINVAL;
 848	}
 849
 850	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 851	skb_shinfo(skb)->gso_type =
 852		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 853		SKB_GSO_TCPV4 :
 854		SKB_GSO_TCPV6;
 855
 856	/* Header must be checked, and gso_segs computed. */
 857	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 858	skb_shinfo(skb)->gso_segs = 0;
 859
 860	return 0;
 861}
 862
 863static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 864				  struct sk_buff *skb,
 865				  struct sk_buff_head *list)
 866{
 867	struct skb_shared_info *shinfo = skb_shinfo(skb);
 868	RING_IDX cons = queue->rx.rsp_cons;
 869	struct sk_buff *nskb;
 870
 871	while ((nskb = __skb_dequeue(list))) {
 872		struct xen_netif_rx_response *rx =
 873			RING_GET_RESPONSE(&queue->rx, ++cons);
 874		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 875
 876		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 877			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 878
 879			BUG_ON(pull_to <= skb_headlen(skb));
 880			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 881		}
 882		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 883
 884		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 885				rx->offset, rx->status, PAGE_SIZE);
 886
 887		skb_shinfo(nskb)->nr_frags = 0;
 888		kfree_skb(nskb);
 889	}
 890
 891	return cons;
 892}
 893
 894static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 895{
 896	bool recalculate_partial_csum = false;
 897
 898	/*
 899	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 900	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 901	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 902	 * recalculate the partial checksum.
 903	 */
 904	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 905		struct netfront_info *np = netdev_priv(dev);
 906		atomic_inc(&np->rx_gso_checksum_fixup);
 907		skb->ip_summed = CHECKSUM_PARTIAL;
 908		recalculate_partial_csum = true;
 909	}
 910
 911	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 912	if (skb->ip_summed != CHECKSUM_PARTIAL)
 913		return 0;
 914
 915	return skb_checksum_setup(skb, recalculate_partial_csum);
 916}
 917
 918static int handle_incoming_queue(struct netfront_queue *queue,
 919				 struct sk_buff_head *rxq)
 920{
 921	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 
 922	int packets_dropped = 0;
 923	struct sk_buff *skb;
 924
 925	while ((skb = __skb_dequeue(rxq)) != NULL) {
 926		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 927
 928		if (pull_to > skb_headlen(skb))
 929			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 930
 931		/* Ethernet work: Delayed to here as it peeks the header. */
 932		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 933		skb_reset_network_header(skb);
 934
 935		if (checksum_setup(queue->info->netdev, skb)) {
 936			kfree_skb(skb);
 937			packets_dropped++;
 938			queue->info->netdev->stats.rx_errors++;
 939			continue;
 940		}
 941
 942		u64_stats_update_begin(&rx_stats->syncp);
 943		rx_stats->packets++;
 944		rx_stats->bytes += skb->len;
 945		u64_stats_update_end(&rx_stats->syncp);
 946
 947		/* Pass it up. */
 948		napi_gro_receive(&queue->napi, skb);
 949	}
 950
 951	return packets_dropped;
 952}
 953
 954static int xennet_poll(struct napi_struct *napi, int budget)
 955{
 956	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 957	struct net_device *dev = queue->info->netdev;
 958	struct sk_buff *skb;
 959	struct netfront_rx_info rinfo;
 960	struct xen_netif_rx_response *rx = &rinfo.rx;
 961	struct xen_netif_extra_info *extras = rinfo.extras;
 962	RING_IDX i, rp;
 963	int work_done;
 964	struct sk_buff_head rxq;
 965	struct sk_buff_head errq;
 966	struct sk_buff_head tmpq;
 
 967	int err;
 968
 969	spin_lock(&queue->rx_lock);
 970
 971	skb_queue_head_init(&rxq);
 972	skb_queue_head_init(&errq);
 973	skb_queue_head_init(&tmpq);
 974
 975	rp = queue->rx.sring->rsp_prod;
 976	rmb(); /* Ensure we see queued responses up to 'rp'. */
 977
 978	i = queue->rx.rsp_cons;
 979	work_done = 0;
 980	while ((i != rp) && (work_done < budget)) {
 981		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
 982		memset(extras, 0, sizeof(rinfo.extras));
 983
 984		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
 985
 986		if (unlikely(err)) {
 987err:
 988			while ((skb = __skb_dequeue(&tmpq)))
 989				__skb_queue_tail(&errq, skb);
 990			dev->stats.rx_errors++;
 991			i = queue->rx.rsp_cons;
 992			continue;
 993		}
 994
 995		skb = __skb_dequeue(&tmpq);
 996
 997		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
 998			struct xen_netif_extra_info *gso;
 999			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1000
1001			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1002				__skb_queue_head(&tmpq, skb);
1003				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1004				goto err;
1005			}
1006		}
1007
1008		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1009		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1010			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1011
1012		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1013		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1014		skb->data_len = rx->status;
1015		skb->len += rx->status;
1016
1017		i = xennet_fill_frags(queue, skb, &tmpq);
1018
1019		if (rx->flags & XEN_NETRXF_csum_blank)
1020			skb->ip_summed = CHECKSUM_PARTIAL;
1021		else if (rx->flags & XEN_NETRXF_data_validated)
1022			skb->ip_summed = CHECKSUM_UNNECESSARY;
1023
1024		__skb_queue_tail(&rxq, skb);
1025
1026		queue->rx.rsp_cons = ++i;
1027		work_done++;
1028	}
1029
1030	__skb_queue_purge(&errq);
1031
1032	work_done -= handle_incoming_queue(queue, &rxq);
 
 
 
 
 
 
 
1033
1034	xennet_alloc_rx_buffers(queue);
1035
1036	if (work_done < budget) {
1037		int more_to_do = 0;
1038
1039		napi_complete(napi);
 
 
1040
1041		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1042		if (more_to_do)
1043			napi_schedule(napi);
 
 
1044	}
1045
1046	spin_unlock(&queue->rx_lock);
1047
1048	return work_done;
1049}
1050
1051static int xennet_change_mtu(struct net_device *dev, int mtu)
1052{
1053	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
 
1054
1055	if (mtu > max)
1056		return -EINVAL;
1057	dev->mtu = mtu;
1058	return 0;
1059}
1060
1061static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1062						    struct rtnl_link_stats64 *tot)
1063{
1064	struct netfront_info *np = netdev_priv(dev);
1065	int cpu;
1066
1067	for_each_possible_cpu(cpu) {
1068		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1069		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1070		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1071		unsigned int start;
1072
1073		do {
1074			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1075			tx_packets = tx_stats->packets;
1076			tx_bytes = tx_stats->bytes;
1077		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1078
1079		do {
1080			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1081			rx_packets = rx_stats->packets;
1082			rx_bytes = rx_stats->bytes;
1083		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1084
1085		tot->rx_packets += rx_packets;
1086		tot->tx_packets += tx_packets;
1087		tot->rx_bytes   += rx_bytes;
1088		tot->tx_bytes   += tx_bytes;
1089	}
1090
1091	tot->rx_errors  = dev->stats.rx_errors;
1092	tot->tx_dropped = dev->stats.tx_dropped;
1093
1094	return tot;
1095}
1096
1097static void xennet_release_tx_bufs(struct netfront_queue *queue)
1098{
1099	struct sk_buff *skb;
1100	int i;
1101
1102	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1103		/* Skip over entries which are actually freelist references */
1104		if (skb_entry_is_link(&queue->tx_skbs[i]))
1105			continue;
1106
1107		skb = queue->tx_skbs[i].skb;
1108		get_page(queue->grant_tx_page[i]);
1109		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1110					  GNTMAP_readonly,
1111					  (unsigned long)page_address(queue->grant_tx_page[i]));
1112		queue->grant_tx_page[i] = NULL;
1113		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1114		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1115		dev_kfree_skb_irq(skb);
1116	}
1117}
1118
1119static void xennet_release_rx_bufs(struct netfront_queue *queue)
1120{
1121	int id, ref;
1122
1123	spin_lock_bh(&queue->rx_lock);
1124
1125	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1126		struct sk_buff *skb;
1127		struct page *page;
1128
1129		skb = queue->rx_skbs[id];
1130		if (!skb)
1131			continue;
1132
1133		ref = queue->grant_rx_ref[id];
1134		if (ref == GRANT_INVALID_REF)
1135			continue;
1136
1137		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1138
1139		/* gnttab_end_foreign_access() needs a page ref until
1140		 * foreign access is ended (which may be deferred).
1141		 */
1142		get_page(page);
1143		gnttab_end_foreign_access(ref, 0,
1144					  (unsigned long)page_address(page));
1145		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1146
1147		kfree_skb(skb);
1148	}
1149
1150	spin_unlock_bh(&queue->rx_lock);
 
 
 
 
 
 
 
 
 
1151}
1152
1153static netdev_features_t xennet_fix_features(struct net_device *dev,
1154	netdev_features_t features)
1155{
1156	struct netfront_info *np = netdev_priv(dev);
1157	int val;
1158
1159	if (features & NETIF_F_SG) {
1160		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1161				 "%d", &val) < 0)
1162			val = 0;
1163
1164		if (!val)
1165			features &= ~NETIF_F_SG;
1166	}
1167
1168	if (features & NETIF_F_IPV6_CSUM) {
1169		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1170				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1171			val = 0;
1172
1173		if (!val)
1174			features &= ~NETIF_F_IPV6_CSUM;
1175	}
1176
1177	if (features & NETIF_F_TSO) {
1178		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1179				 "feature-gso-tcpv4", "%d", &val) < 0)
1180			val = 0;
1181
1182		if (!val)
1183			features &= ~NETIF_F_TSO;
1184	}
1185
1186	if (features & NETIF_F_TSO6) {
1187		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1188				 "feature-gso-tcpv6", "%d", &val) < 0)
1189			val = 0;
1190
1191		if (!val)
1192			features &= ~NETIF_F_TSO6;
1193	}
1194
1195	return features;
1196}
1197
1198static int xennet_set_features(struct net_device *dev,
1199	netdev_features_t features)
1200{
1201	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1202		netdev_info(dev, "Reducing MTU because no SG offload");
1203		dev->mtu = ETH_DATA_LEN;
1204	}
1205
1206	return 0;
1207}
1208
1209static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1210{
1211	struct netfront_queue *queue = dev_id;
 
1212	unsigned long flags;
1213
1214	spin_lock_irqsave(&queue->tx_lock, flags);
1215	xennet_tx_buf_gc(queue);
1216	spin_unlock_irqrestore(&queue->tx_lock, flags);
1217
1218	return IRQ_HANDLED;
1219}
1220
1221static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1222{
1223	struct netfront_queue *queue = dev_id;
1224	struct net_device *dev = queue->info->netdev;
1225
1226	if (likely(netif_carrier_ok(dev) &&
1227		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1228		napi_schedule(&queue->napi);
1229
1230	return IRQ_HANDLED;
1231}
1232
1233static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1234{
1235	xennet_tx_interrupt(irq, dev_id);
1236	xennet_rx_interrupt(irq, dev_id);
1237	return IRQ_HANDLED;
1238}
1239
1240#ifdef CONFIG_NET_POLL_CONTROLLER
1241static void xennet_poll_controller(struct net_device *dev)
1242{
1243	/* Poll each queue */
1244	struct netfront_info *info = netdev_priv(dev);
1245	unsigned int num_queues = dev->real_num_tx_queues;
1246	unsigned int i;
1247	for (i = 0; i < num_queues; ++i)
1248		xennet_interrupt(0, &info->queues[i]);
1249}
1250#endif
1251
1252static const struct net_device_ops xennet_netdev_ops = {
1253	.ndo_open            = xennet_open,
 
1254	.ndo_stop            = xennet_close,
1255	.ndo_start_xmit      = xennet_start_xmit,
1256	.ndo_change_mtu	     = xennet_change_mtu,
1257	.ndo_get_stats64     = xennet_get_stats64,
1258	.ndo_set_mac_address = eth_mac_addr,
1259	.ndo_validate_addr   = eth_validate_addr,
1260	.ndo_fix_features    = xennet_fix_features,
1261	.ndo_set_features    = xennet_set_features,
1262	.ndo_select_queue    = xennet_select_queue,
1263#ifdef CONFIG_NET_POLL_CONTROLLER
1264	.ndo_poll_controller = xennet_poll_controller,
1265#endif
1266};
1267
1268static void xennet_free_netdev(struct net_device *netdev)
1269{
1270	struct netfront_info *np = netdev_priv(netdev);
1271
1272	free_percpu(np->rx_stats);
1273	free_percpu(np->tx_stats);
1274	free_netdev(netdev);
1275}
1276
1277static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1278{
1279	int err;
1280	struct net_device *netdev;
1281	struct netfront_info *np;
1282
1283	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1284	if (!netdev)
1285		return ERR_PTR(-ENOMEM);
1286
1287	np                   = netdev_priv(netdev);
1288	np->xbdev            = dev;
1289
1290	np->queues = NULL;
 
 
 
 
 
 
 
 
 
 
1291
1292	err = -ENOMEM;
1293	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1294	if (np->rx_stats == NULL)
1295		goto exit;
1296	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1297	if (np->tx_stats == NULL)
1298		goto exit;
1299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1300	netdev->netdev_ops	= &xennet_netdev_ops;
1301
 
1302	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1303				  NETIF_F_GSO_ROBUST;
1304	netdev->hw_features	= NETIF_F_SG |
1305				  NETIF_F_IPV6_CSUM |
1306				  NETIF_F_TSO | NETIF_F_TSO6;
1307
1308	/*
1309         * Assume that all hw features are available for now. This set
1310         * will be adjusted by the call to netdev_update_features() in
1311         * xennet_connect() which is the earliest point where we can
1312         * negotiate with the backend regarding supported features.
1313         */
1314	netdev->features |= netdev->hw_features;
1315
1316	netdev->ethtool_ops = &xennet_ethtool_ops;
1317	SET_NETDEV_DEV(netdev, &dev->dev);
1318
 
 
1319	np->netdev = netdev;
1320
1321	netif_carrier_off(netdev);
1322
1323	return netdev;
1324
 
 
 
 
1325 exit:
1326	xennet_free_netdev(netdev);
1327	return ERR_PTR(err);
1328}
1329
1330/**
1331 * Entry point to this code when a new device is created.  Allocate the basic
1332 * structures and the ring buffers for communication with the backend, and
1333 * inform the backend of the appropriate details for those.
1334 */
1335static int netfront_probe(struct xenbus_device *dev,
1336			  const struct xenbus_device_id *id)
1337{
1338	int err;
1339	struct net_device *netdev;
1340	struct netfront_info *info;
1341
1342	netdev = xennet_create_dev(dev);
1343	if (IS_ERR(netdev)) {
1344		err = PTR_ERR(netdev);
1345		xenbus_dev_fatal(dev, err, "creating netdev");
1346		return err;
1347	}
1348
1349	info = netdev_priv(netdev);
1350	dev_set_drvdata(&dev->dev, info);
1351#ifdef CONFIG_SYSFS
1352	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1353#endif
1354	err = register_netdev(info->netdev);
1355	if (err) {
1356		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1357		goto fail;
1358	}
1359
 
 
 
 
 
 
 
1360	return 0;
1361
1362 fail:
1363	xennet_free_netdev(netdev);
1364	dev_set_drvdata(&dev->dev, NULL);
1365	return err;
1366}
1367
1368static void xennet_end_access(int ref, void *page)
1369{
1370	/* This frees the page as a side-effect */
1371	if (ref != GRANT_INVALID_REF)
1372		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1373}
1374
1375static void xennet_disconnect_backend(struct netfront_info *info)
1376{
1377	unsigned int i = 0;
1378	unsigned int num_queues = info->netdev->real_num_tx_queues;
1379
1380	netif_carrier_off(info->netdev);
 
 
1381
1382	for (i = 0; i < num_queues && info->queues; ++i) {
1383		struct netfront_queue *queue = &info->queues[i];
1384
1385		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1386			unbind_from_irqhandler(queue->tx_irq, queue);
1387		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1388			unbind_from_irqhandler(queue->tx_irq, queue);
1389			unbind_from_irqhandler(queue->rx_irq, queue);
1390		}
1391		queue->tx_evtchn = queue->rx_evtchn = 0;
1392		queue->tx_irq = queue->rx_irq = 0;
1393
1394		if (netif_running(info->netdev))
1395			napi_synchronize(&queue->napi);
1396
1397		xennet_release_tx_bufs(queue);
1398		xennet_release_rx_bufs(queue);
1399		gnttab_free_grant_references(queue->gref_tx_head);
1400		gnttab_free_grant_references(queue->gref_rx_head);
1401
1402		/* End access and free the pages */
1403		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1404		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1405
1406		queue->tx_ring_ref = GRANT_INVALID_REF;
1407		queue->rx_ring_ref = GRANT_INVALID_REF;
1408		queue->tx.sring = NULL;
1409		queue->rx.sring = NULL;
1410	}
1411}
1412
1413/**
1414 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1415 * driver restart.  We tear down our netif structure and recreate it, but
1416 * leave the device-layer structures intact so that this is transparent to the
1417 * rest of the kernel.
1418 */
1419static int netfront_resume(struct xenbus_device *dev)
1420{
1421	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1422
1423	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1424
1425	xennet_disconnect_backend(info);
1426	return 0;
1427}
1428
1429static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1430{
1431	char *s, *e, *macstr;
1432	int i;
1433
1434	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1435	if (IS_ERR(macstr))
1436		return PTR_ERR(macstr);
1437
1438	for (i = 0; i < ETH_ALEN; i++) {
1439		mac[i] = simple_strtoul(s, &e, 16);
1440		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1441			kfree(macstr);
1442			return -ENOENT;
1443		}
1444		s = e+1;
1445	}
1446
1447	kfree(macstr);
1448	return 0;
1449}
1450
1451static int setup_netfront_single(struct netfront_queue *queue)
1452{
1453	int err;
1454
1455	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1456	if (err < 0)
1457		goto fail;
1458
1459	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1460					xennet_interrupt,
1461					0, queue->info->netdev->name, queue);
1462	if (err < 0)
1463		goto bind_fail;
1464	queue->rx_evtchn = queue->tx_evtchn;
1465	queue->rx_irq = queue->tx_irq = err;
1466
1467	return 0;
1468
1469bind_fail:
1470	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1471	queue->tx_evtchn = 0;
1472fail:
1473	return err;
1474}
1475
1476static int setup_netfront_split(struct netfront_queue *queue)
1477{
1478	int err;
1479
1480	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1481	if (err < 0)
1482		goto fail;
1483	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1484	if (err < 0)
1485		goto alloc_rx_evtchn_fail;
1486
1487	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1488		 "%s-tx", queue->name);
1489	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1490					xennet_tx_interrupt,
1491					0, queue->tx_irq_name, queue);
1492	if (err < 0)
1493		goto bind_tx_fail;
1494	queue->tx_irq = err;
1495
1496	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1497		 "%s-rx", queue->name);
1498	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1499					xennet_rx_interrupt,
1500					0, queue->rx_irq_name, queue);
1501	if (err < 0)
1502		goto bind_rx_fail;
1503	queue->rx_irq = err;
1504
1505	return 0;
1506
1507bind_rx_fail:
1508	unbind_from_irqhandler(queue->tx_irq, queue);
1509	queue->tx_irq = 0;
1510bind_tx_fail:
1511	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1512	queue->rx_evtchn = 0;
1513alloc_rx_evtchn_fail:
1514	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1515	queue->tx_evtchn = 0;
1516fail:
1517	return err;
1518}
1519
1520static int setup_netfront(struct xenbus_device *dev,
1521			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1522{
1523	struct xen_netif_tx_sring *txs;
1524	struct xen_netif_rx_sring *rxs;
1525	grant_ref_t gref;
1526	int err;
 
 
1527
1528	queue->tx_ring_ref = GRANT_INVALID_REF;
1529	queue->rx_ring_ref = GRANT_INVALID_REF;
1530	queue->rx.sring = NULL;
1531	queue->tx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
1532
1533	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1534	if (!txs) {
1535		err = -ENOMEM;
1536		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1537		goto fail;
1538	}
1539	SHARED_RING_INIT(txs);
1540	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1541
1542	err = xenbus_grant_ring(dev, txs, 1, &gref);
1543	if (err < 0)
1544		goto grant_tx_ring_fail;
1545	queue->tx_ring_ref = gref;
1546
 
1547	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1548	if (!rxs) {
1549		err = -ENOMEM;
1550		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1551		goto alloc_rx_ring_fail;
1552	}
1553	SHARED_RING_INIT(rxs);
1554	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1555
1556	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1557	if (err < 0)
1558		goto grant_rx_ring_fail;
1559	queue->rx_ring_ref = gref;
1560
1561	if (feature_split_evtchn)
1562		err = setup_netfront_split(queue);
1563	/* setup single event channel if
1564	 *  a) feature-split-event-channels == 0
1565	 *  b) feature-split-event-channels == 1 but failed to setup
1566	 */
1567	if (!feature_split_evtchn || (feature_split_evtchn && err))
1568		err = setup_netfront_single(queue);
1569
1570	if (err)
1571		goto alloc_evtchn_fail;
1572
1573	return 0;
1574
1575	/* If we fail to setup netfront, it is safe to just revoke access to
1576	 * granted pages because backend is not accessing it at this point.
1577	 */
1578alloc_evtchn_fail:
1579	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1580grant_rx_ring_fail:
1581	free_page((unsigned long)rxs);
1582alloc_rx_ring_fail:
1583	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1584grant_tx_ring_fail:
1585	free_page((unsigned long)txs);
1586fail:
1587	return err;
1588}
1589
1590/* Queue-specific initialisation
1591 * This used to be done in xennet_create_dev() but must now
1592 * be run per-queue.
1593 */
1594static int xennet_init_queue(struct netfront_queue *queue)
1595{
1596	unsigned short i;
1597	int err = 0;
1598
1599	spin_lock_init(&queue->tx_lock);
1600	spin_lock_init(&queue->rx_lock);
1601
1602	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1603		    (unsigned long)queue);
1604
1605	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1606		 queue->info->netdev->name, queue->id);
1607
1608	/* Initialise tx_skbs as a free chain containing every entry. */
1609	queue->tx_skb_freelist = 0;
1610	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1611		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1612		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1613		queue->grant_tx_page[i] = NULL;
1614	}
1615
1616	/* Clear out rx_skbs */
1617	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1618		queue->rx_skbs[i] = NULL;
1619		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1620	}
1621
1622	/* A grant for every tx ring slot */
1623	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1624					  &queue->gref_tx_head) < 0) {
1625		pr_alert("can't alloc tx grant refs\n");
1626		err = -ENOMEM;
1627		goto exit;
1628	}
1629
1630	/* A grant for every rx ring slot */
1631	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1632					  &queue->gref_rx_head) < 0) {
1633		pr_alert("can't alloc rx grant refs\n");
1634		err = -ENOMEM;
1635		goto exit_free_tx;
1636	}
1637
1638	return 0;
1639
1640 exit_free_tx:
1641	gnttab_free_grant_references(queue->gref_tx_head);
1642 exit:
1643	return err;
1644}
1645
1646static int write_queue_xenstore_keys(struct netfront_queue *queue,
1647			   struct xenbus_transaction *xbt, int write_hierarchical)
1648{
1649	/* Write the queue-specific keys into XenStore in the traditional
1650	 * way for a single queue, or in a queue subkeys for multiple
1651	 * queues.
1652	 */
1653	struct xenbus_device *dev = queue->info->xbdev;
1654	int err;
1655	const char *message;
1656	char *path;
1657	size_t pathsize;
1658
1659	/* Choose the correct place to write the keys */
1660	if (write_hierarchical) {
1661		pathsize = strlen(dev->nodename) + 10;
1662		path = kzalloc(pathsize, GFP_KERNEL);
1663		if (!path) {
1664			err = -ENOMEM;
1665			message = "out of memory while writing ring references";
1666			goto error;
1667		}
1668		snprintf(path, pathsize, "%s/queue-%u",
1669				dev->nodename, queue->id);
1670	} else {
1671		path = (char *)dev->nodename;
1672	}
1673
1674	/* Write ring references */
1675	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1676			queue->tx_ring_ref);
1677	if (err) {
1678		message = "writing tx-ring-ref";
1679		goto error;
1680	}
1681
1682	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1683			queue->rx_ring_ref);
1684	if (err) {
1685		message = "writing rx-ring-ref";
1686		goto error;
1687	}
1688
1689	/* Write event channels; taking into account both shared
1690	 * and split event channel scenarios.
1691	 */
1692	if (queue->tx_evtchn == queue->rx_evtchn) {
1693		/* Shared event channel */
1694		err = xenbus_printf(*xbt, path,
1695				"event-channel", "%u", queue->tx_evtchn);
1696		if (err) {
1697			message = "writing event-channel";
1698			goto error;
1699		}
1700	} else {
1701		/* Split event channels */
1702		err = xenbus_printf(*xbt, path,
1703				"event-channel-tx", "%u", queue->tx_evtchn);
1704		if (err) {
1705			message = "writing event-channel-tx";
1706			goto error;
1707		}
1708
1709		err = xenbus_printf(*xbt, path,
1710				"event-channel-rx", "%u", queue->rx_evtchn);
1711		if (err) {
1712			message = "writing event-channel-rx";
1713			goto error;
1714		}
1715	}
1716
1717	if (write_hierarchical)
1718		kfree(path);
1719	return 0;
1720
1721error:
1722	if (write_hierarchical)
1723		kfree(path);
1724	xenbus_dev_fatal(dev, err, "%s", message);
1725	return err;
1726}
1727
1728static void xennet_destroy_queues(struct netfront_info *info)
1729{
1730	unsigned int i;
1731
1732	rtnl_lock();
1733
1734	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1735		struct netfront_queue *queue = &info->queues[i];
1736
1737		if (netif_running(info->netdev))
1738			napi_disable(&queue->napi);
1739		del_timer_sync(&queue->rx_refill_timer);
1740		netif_napi_del(&queue->napi);
1741	}
1742
1743	rtnl_unlock();
1744
1745	kfree(info->queues);
1746	info->queues = NULL;
1747}
1748
1749static int xennet_create_queues(struct netfront_info *info,
1750				unsigned int *num_queues)
1751{
1752	unsigned int i;
1753	int ret;
1754
1755	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1756			       GFP_KERNEL);
1757	if (!info->queues)
1758		return -ENOMEM;
1759
1760	rtnl_lock();
1761
1762	for (i = 0; i < *num_queues; i++) {
1763		struct netfront_queue *queue = &info->queues[i];
1764
1765		queue->id = i;
1766		queue->info = info;
1767
1768		ret = xennet_init_queue(queue);
1769		if (ret < 0) {
1770			dev_warn(&info->netdev->dev,
1771				 "only created %d queues\n", i);
1772			*num_queues = i;
1773			break;
1774		}
1775
1776		netif_napi_add(queue->info->netdev, &queue->napi,
1777			       xennet_poll, 64);
1778		if (netif_running(info->netdev))
1779			napi_enable(&queue->napi);
1780	}
1781
1782	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1783
1784	rtnl_unlock();
1785
1786	if (*num_queues == 0) {
1787		dev_err(&info->netdev->dev, "no queues\n");
1788		return -EINVAL;
1789	}
1790	return 0;
1791}
1792
1793/* Common code used when first setting up, and when resuming. */
1794static int talk_to_netback(struct xenbus_device *dev,
1795			   struct netfront_info *info)
1796{
1797	const char *message;
1798	struct xenbus_transaction xbt;
1799	int err;
1800	unsigned int feature_split_evtchn;
1801	unsigned int i = 0;
1802	unsigned int max_queues = 0;
1803	struct netfront_queue *queue = NULL;
1804	unsigned int num_queues = 1;
1805
1806	info->netdev->irq = 0;
1807
1808	/* Check if backend supports multiple queues */
1809	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1810			   "multi-queue-max-queues", "%u", &max_queues);
1811	if (err < 0)
1812		max_queues = 1;
1813	num_queues = min(max_queues, xennet_max_queues);
1814
1815	/* Check feature-split-event-channels */
1816	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1817			   "feature-split-event-channels", "%u",
1818			   &feature_split_evtchn);
1819	if (err < 0)
1820		feature_split_evtchn = 0;
1821
1822	/* Read mac addr. */
1823	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1824	if (err) {
1825		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1826		goto out;
1827	}
1828
1829	if (info->queues)
1830		xennet_destroy_queues(info);
1831
1832	err = xennet_create_queues(info, &num_queues);
1833	if (err < 0)
1834		goto destroy_ring;
1835
1836	/* Create shared ring, alloc event channel -- for each queue */
1837	for (i = 0; i < num_queues; ++i) {
1838		queue = &info->queues[i];
1839		err = setup_netfront(dev, queue, feature_split_evtchn);
1840		if (err) {
1841			/* setup_netfront() will tidy up the current
1842			 * queue on error, but we need to clean up
1843			 * those already allocated.
1844			 */
1845			if (i > 0) {
1846				rtnl_lock();
1847				netif_set_real_num_tx_queues(info->netdev, i);
1848				rtnl_unlock();
1849				goto destroy_ring;
1850			} else {
1851				goto out;
1852			}
1853		}
1854	}
1855
1856again:
1857	err = xenbus_transaction_start(&xbt);
1858	if (err) {
1859		xenbus_dev_fatal(dev, err, "starting transaction");
1860		goto destroy_ring;
1861	}
1862
1863	if (xenbus_exists(XBT_NIL,
1864			  info->xbdev->otherend, "multi-queue-max-queues")) {
1865		/* Write the number of queues */
 
 
 
 
 
 
 
 
 
 
 
1866		err = xenbus_printf(xbt, dev->nodename,
1867				    "multi-queue-num-queues", "%u", num_queues);
1868		if (err) {
1869			message = "writing multi-queue-num-queues";
1870			goto abort_transaction_no_dev_fatal;
1871		}
1872	}
1873
1874	if (num_queues == 1) {
1875		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1876		if (err)
1877			goto abort_transaction_no_dev_fatal;
1878	} else {
1879		/* Write the keys for each queue */
1880		for (i = 0; i < num_queues; ++i) {
1881			queue = &info->queues[i];
1882			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1883			if (err)
1884				goto abort_transaction_no_dev_fatal;
 
 
 
 
 
1885		}
1886	}
1887
1888	/* The remaining keys are not queue-specific */
1889	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1890			    1);
1891	if (err) {
1892		message = "writing request-rx-copy";
1893		goto abort_transaction;
1894	}
1895
1896	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1897	if (err) {
1898		message = "writing feature-rx-notify";
1899		goto abort_transaction;
1900	}
1901
1902	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1903	if (err) {
1904		message = "writing feature-sg";
1905		goto abort_transaction;
1906	}
1907
1908	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1909	if (err) {
1910		message = "writing feature-gso-tcpv4";
1911		goto abort_transaction;
1912	}
1913
1914	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1915	if (err) {
1916		message = "writing feature-gso-tcpv6";
1917		goto abort_transaction;
1918	}
1919
1920	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1921			   "1");
1922	if (err) {
1923		message = "writing feature-ipv6-csum-offload";
1924		goto abort_transaction;
1925	}
1926
1927	err = xenbus_transaction_end(xbt, 0);
1928	if (err) {
1929		if (err == -EAGAIN)
1930			goto again;
1931		xenbus_dev_fatal(dev, err, "completing transaction");
1932		goto destroy_ring;
1933	}
1934
1935	return 0;
1936
1937 abort_transaction:
1938	xenbus_dev_fatal(dev, err, "%s", message);
1939abort_transaction_no_dev_fatal:
1940	xenbus_transaction_end(xbt, 1);
 
1941 destroy_ring:
1942	xennet_disconnect_backend(info);
1943	kfree(info->queues);
1944	info->queues = NULL;
1945 out:
1946	return err;
1947}
1948
1949static int xennet_connect(struct net_device *dev)
1950{
1951	struct netfront_info *np = netdev_priv(dev);
1952	unsigned int num_queues = 0;
1953	int err;
 
 
1954	unsigned int feature_rx_copy;
1955	unsigned int j = 0;
1956	struct netfront_queue *queue = NULL;
1957
1958	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1959			   "feature-rx-copy", "%u", &feature_rx_copy);
1960	if (err != 1)
1961		feature_rx_copy = 0;
1962
1963	if (!feature_rx_copy) {
1964		dev_info(&dev->dev,
1965			 "backend does not support copying receive path\n");
1966		return -ENODEV;
1967	}
1968
1969	err = talk_to_netback(np->xbdev, np);
1970	if (err)
1971		return err;
1972
1973	/* talk_to_netback() sets the correct number of queues */
1974	num_queues = dev->real_num_tx_queues;
1975
1976	rtnl_lock();
1977	netdev_update_features(dev);
1978	rtnl_unlock();
1979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1980	/*
1981	 * All public and private state should now be sane.  Get
1982	 * ready to start sending and receiving packets and give the driver
1983	 * domain a kick because we've probably just requeued some
1984	 * packets.
1985	 */
1986	netif_carrier_on(np->netdev);
1987	for (j = 0; j < num_queues; ++j) {
1988		queue = &np->queues[j];
 
 
 
1989
1990		notify_remote_via_irq(queue->tx_irq);
1991		if (queue->tx_irq != queue->rx_irq)
1992			notify_remote_via_irq(queue->rx_irq);
1993
1994		spin_lock_irq(&queue->tx_lock);
1995		xennet_tx_buf_gc(queue);
1996		spin_unlock_irq(&queue->tx_lock);
1997
1998		spin_lock_bh(&queue->rx_lock);
1999		xennet_alloc_rx_buffers(queue);
2000		spin_unlock_bh(&queue->rx_lock);
2001	}
2002
2003	return 0;
2004}
2005
2006/**
2007 * Callback received when the backend's state changes.
2008 */
2009static void netback_changed(struct xenbus_device *dev,
2010			    enum xenbus_state backend_state)
2011{
2012	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2013	struct net_device *netdev = np->netdev;
2014
2015	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2016
2017	switch (backend_state) {
2018	case XenbusStateInitialising:
2019	case XenbusStateInitialised:
2020	case XenbusStateReconfiguring:
2021	case XenbusStateReconfigured:
2022	case XenbusStateUnknown:
2023		break;
2024
2025	case XenbusStateInitWait:
2026		if (dev->state != XenbusStateInitialising)
2027			break;
2028		if (xennet_connect(netdev) != 0)
2029			break;
2030		xenbus_switch_state(dev, XenbusStateConnected);
2031		break;
2032
2033	case XenbusStateConnected:
2034		netdev_notify_peers(netdev);
2035		break;
2036
2037	case XenbusStateClosed:
2038		if (dev->state == XenbusStateClosed)
2039			break;
2040		/* Missed the backend's CLOSING state -- fallthrough */
2041	case XenbusStateClosing:
2042		xenbus_frontend_closed(dev);
2043		break;
2044	}
2045}
2046
2047static const struct xennet_stat {
2048	char name[ETH_GSTRING_LEN];
2049	u16 offset;
2050} xennet_stats[] = {
2051	{
2052		"rx_gso_checksum_fixup",
2053		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2054	},
2055};
2056
2057static int xennet_get_sset_count(struct net_device *dev, int string_set)
2058{
2059	switch (string_set) {
2060	case ETH_SS_STATS:
2061		return ARRAY_SIZE(xennet_stats);
2062	default:
2063		return -EINVAL;
2064	}
2065}
2066
2067static void xennet_get_ethtool_stats(struct net_device *dev,
2068				     struct ethtool_stats *stats, u64 * data)
2069{
2070	void *np = netdev_priv(dev);
2071	int i;
2072
2073	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2074		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2075}
2076
2077static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2078{
2079	int i;
2080
2081	switch (stringset) {
2082	case ETH_SS_STATS:
2083		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2084			memcpy(data + i * ETH_GSTRING_LEN,
2085			       xennet_stats[i].name, ETH_GSTRING_LEN);
2086		break;
2087	}
2088}
2089
2090static const struct ethtool_ops xennet_ethtool_ops =
2091{
2092	.get_link = ethtool_op_get_link,
2093
2094	.get_sset_count = xennet_get_sset_count,
2095	.get_ethtool_stats = xennet_get_ethtool_stats,
2096	.get_strings = xennet_get_strings,
2097};
2098
2099#ifdef CONFIG_SYSFS
2100static ssize_t show_rxbuf(struct device *dev,
2101			  struct device_attribute *attr, char *buf)
2102{
2103	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
 
 
 
2104}
2105
2106static ssize_t store_rxbuf(struct device *dev,
2107			   struct device_attribute *attr,
2108			   const char *buf, size_t len)
2109{
 
 
2110	char *endp;
2111	unsigned long target;
2112
2113	if (!capable(CAP_NET_ADMIN))
2114		return -EPERM;
2115
2116	target = simple_strtoul(buf, &endp, 0);
2117	if (endp == buf)
2118		return -EBADMSG;
2119
2120	/* rxbuf_min and rxbuf_max are no longer configurable. */
 
 
 
 
 
 
 
 
 
 
 
 
2121
 
2122	return len;
2123}
2124
2125static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2126static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2127static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2128
2129static struct attribute *xennet_dev_attrs[] = {
2130	&dev_attr_rxbuf_min.attr,
2131	&dev_attr_rxbuf_max.attr,
2132	&dev_attr_rxbuf_cur.attr,
2133	NULL
2134};
 
 
 
 
 
 
 
2135
2136static const struct attribute_group xennet_dev_group = {
2137	.attrs = xennet_dev_attrs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2138};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139#endif /* CONFIG_SYSFS */
2140
 
 
 
 
 
 
2141static int xennet_remove(struct xenbus_device *dev)
2142{
2143	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2144
2145	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2146
2147	xennet_disconnect_backend(info);
2148
 
 
2149	unregister_netdev(info->netdev);
2150
2151	if (info->queues)
2152		xennet_destroy_queues(info);
2153	xennet_free_netdev(info->netdev);
 
 
2154
2155	return 0;
2156}
2157
2158static const struct xenbus_device_id netfront_ids[] = {
2159	{ "vif" },
2160	{ "" }
2161};
2162
2163static struct xenbus_driver netfront_driver = {
2164	.ids = netfront_ids,
2165	.probe = netfront_probe,
2166	.remove = xennet_remove,
2167	.resume = netfront_resume,
2168	.otherend_changed = netback_changed,
2169};
2170
2171static int __init netif_init(void)
2172{
2173	if (!xen_domain())
2174		return -ENODEV;
2175
2176	if (!xen_has_pv_nic_devices())
2177		return -ENODEV;
2178
2179	pr_info("Initialising Xen virtual ethernet driver\n");
2180
2181	/* Allow as many queues as there are CPUs if user has not
2182	 * specified a value.
2183	 */
2184	if (xennet_max_queues == 0)
2185		xennet_max_queues = num_online_cpus();
2186
2187	return xenbus_register_frontend(&netfront_driver);
2188}
2189module_init(netif_init);
2190
2191
2192static void __exit netif_exit(void)
2193{
2194	xenbus_unregister_driver(&netfront_driver);
2195}
2196module_exit(netif_exit);
2197
2198MODULE_DESCRIPTION("Xen virtual network device frontend");
2199MODULE_LICENSE("GPL");
2200MODULE_ALIAS("xen:vif");
2201MODULE_ALIAS("xennet");
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
  48#include <asm/xen/page.h>
  49#include <xen/xen.h>
  50#include <xen/xenbus.h>
  51#include <xen/events.h>
  52#include <xen/page.h>
  53#include <xen/platform_pci.h>
  54#include <xen/grant_table.h>
  55
  56#include <xen/interface/io/netif.h>
  57#include <xen/interface/memory.h>
  58#include <xen/interface/grant_table.h>
  59
 
 
 
 
 
 
  60static const struct ethtool_ops xennet_ethtool_ops;
  61
  62struct netfront_cb {
  63	int pull_to;
  64};
  65
  66#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  67
  68#define RX_COPY_THRESHOLD 256
  69
  70#define GRANT_INVALID_REF	0
  71
  72#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  73#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
  74#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
 
 
 
 
 
 
 
 
  75
  76struct netfront_stats {
  77	u64			rx_packets;
  78	u64			tx_packets;
  79	u64			rx_bytes;
  80	u64			tx_bytes;
  81	struct u64_stats_sync	syncp;
  82};
  83
  84struct netfront_info {
  85	struct list_head list;
  86	struct net_device *netdev;
 
 
 
  87
  88	struct napi_struct napi;
  89
  90	/* Split event channels support, tx_* == rx_* when using
  91	 * single event channel.
  92	 */
  93	unsigned int tx_evtchn, rx_evtchn;
  94	unsigned int tx_irq, rx_irq;
  95	/* Only used when split event channels support is enabled */
  96	char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
  97	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
  98
  99	struct xenbus_device *xbdev;
 100
 101	spinlock_t   tx_lock;
 102	struct xen_netif_tx_front_ring tx;
 103	int tx_ring_ref;
 104
 105	/*
 106	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 107	 * are linked from tx_skb_freelist through skb_entry.link.
 108	 *
 109	 *  NB. Freelist index entries are always going to be less than
 110	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 111	 *  greater than PAGE_OFFSET: we use this property to distinguish
 112	 *  them.
 113	 */
 114	union skb_entry {
 115		struct sk_buff *skb;
 116		unsigned long link;
 117	} tx_skbs[NET_TX_RING_SIZE];
 118	grant_ref_t gref_tx_head;
 119	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 120	struct page *grant_tx_page[NET_TX_RING_SIZE];
 121	unsigned tx_skb_freelist;
 122
 123	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 124	struct xen_netif_rx_front_ring rx;
 125	int rx_ring_ref;
 126
 127	/* Receive-ring batched refills. */
 128#define RX_MIN_TARGET 8
 129#define RX_DFL_MIN_TARGET 64
 130#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 131	unsigned rx_min_target, rx_max_target, rx_target;
 132	struct sk_buff_head rx_batch;
 133
 134	struct timer_list rx_refill_timer;
 135
 136	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 137	grant_ref_t gref_rx_head;
 138	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 139
 140	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 141	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 142	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 
 
 
 
 
 143
 144	/* Statistics */
 145	struct netfront_stats __percpu *stats;
 
 146
 147	unsigned long rx_gso_checksum_fixup;
 148};
 149
 150struct netfront_rx_info {
 151	struct xen_netif_rx_response rx;
 152	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 153};
 154
 155static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 156{
 157	list->link = id;
 158}
 159
 160static int skb_entry_is_link(const union skb_entry *list)
 161{
 162	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 163	return (unsigned long)list->skb < PAGE_OFFSET;
 164}
 165
 166/*
 167 * Access macros for acquiring freeing slots in tx_skbs[].
 168 */
 169
 170static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 171			       unsigned short id)
 172{
 173	skb_entry_set_link(&list[id], *head);
 174	*head = id;
 175}
 176
 177static unsigned short get_id_from_freelist(unsigned *head,
 178					   union skb_entry *list)
 179{
 180	unsigned int id = *head;
 181	*head = list[id].link;
 182	return id;
 183}
 184
 185static int xennet_rxidx(RING_IDX idx)
 186{
 187	return idx & (NET_RX_RING_SIZE - 1);
 188}
 189
 190static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
 191					 RING_IDX ri)
 192{
 193	int i = xennet_rxidx(ri);
 194	struct sk_buff *skb = np->rx_skbs[i];
 195	np->rx_skbs[i] = NULL;
 196	return skb;
 197}
 198
 199static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
 200					    RING_IDX ri)
 201{
 202	int i = xennet_rxidx(ri);
 203	grant_ref_t ref = np->grant_rx_ref[i];
 204	np->grant_rx_ref[i] = GRANT_INVALID_REF;
 205	return ref;
 206}
 207
 208#ifdef CONFIG_SYSFS
 209static int xennet_sysfs_addif(struct net_device *netdev);
 210static void xennet_sysfs_delif(struct net_device *netdev);
 211#else /* !CONFIG_SYSFS */
 212#define xennet_sysfs_addif(dev) (0)
 213#define xennet_sysfs_delif(dev) do { } while (0)
 214#endif
 215
 216static bool xennet_can_sg(struct net_device *dev)
 217{
 218	return dev->features & NETIF_F_SG;
 219}
 220
 221
 222static void rx_refill_timeout(unsigned long data)
 223{
 224	struct net_device *dev = (struct net_device *)data;
 225	struct netfront_info *np = netdev_priv(dev);
 226	napi_schedule(&np->napi);
 227}
 228
 229static int netfront_tx_slot_available(struct netfront_info *np)
 230{
 231	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
 232		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
 233}
 234
 235static void xennet_maybe_wake_tx(struct net_device *dev)
 236{
 237	struct netfront_info *np = netdev_priv(dev);
 
 238
 239	if (unlikely(netif_queue_stopped(dev)) &&
 240	    netfront_tx_slot_available(np) &&
 241	    likely(netif_running(dev)))
 242		netif_wake_queue(dev);
 243}
 244
 245static void xennet_alloc_rx_buffers(struct net_device *dev)
 
 246{
 247	unsigned short id;
 248	struct netfront_info *np = netdev_priv(dev);
 249	struct sk_buff *skb;
 250	struct page *page;
 251	int i, batch_target, notify;
 252	RING_IDX req_prod = np->rx.req_prod_pvt;
 253	grant_ref_t ref;
 254	unsigned long pfn;
 255	void *vaddr;
 256	struct xen_netif_rx_request *req;
 257
 258	if (unlikely(!netif_carrier_ok(dev)))
 259		return;
 
 
 
 
 
 
 
 
 
 
 260
 261	/*
 262	 * Allocate skbuffs greedily, even though we batch updates to the
 263	 * receive ring. This creates a less bursty demand on the memory
 264	 * allocator, so should reduce the chance of failed allocation requests
 265	 * both for ourself and for other kernel subsystems.
 266	 */
 267	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
 268	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
 269		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
 270					 GFP_ATOMIC | __GFP_NOWARN);
 271		if (unlikely(!skb))
 272			goto no_skb;
 273
 274		/* Align ip header to a 16 bytes boundary */
 275		skb_reserve(skb, NET_IP_ALIGN);
 276
 277		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 278		if (!page) {
 279			kfree_skb(skb);
 280no_skb:
 281			/* Could not allocate any skbuffs. Try again later. */
 282			mod_timer(&np->rx_refill_timer,
 283				  jiffies + (HZ/10));
 284
 285			/* Any skbuffs queued for refill? Force them out. */
 286			if (i != 0)
 287				goto refill;
 288			break;
 289		}
 290
 291		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 292		__skb_queue_tail(&np->rx_batch, skb);
 293	}
 
 294
 295	/* Is the batch large enough to be worthwhile? */
 296	if (i < (np->rx_target/2)) {
 297		if (req_prod > np->rx.sring->req_prod)
 298			goto push;
 299		return;
 300	}
 301
 302	/* Adjust our fill target if we risked running out of buffers. */
 303	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
 304	    ((np->rx_target *= 2) > np->rx_max_target))
 305		np->rx_target = np->rx_max_target;
 306
 307 refill:
 308	for (i = 0; ; i++) {
 309		skb = __skb_dequeue(&np->rx_batch);
 310		if (skb == NULL)
 
 
 311			break;
 312
 313		skb->dev = dev;
 314
 315		id = xennet_rxidx(req_prod + i);
 
 316
 317		BUG_ON(np->rx_skbs[id]);
 318		np->rx_skbs[id] = skb;
 319
 320		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
 321		BUG_ON((signed short)ref < 0);
 322		np->grant_rx_ref[id] = ref;
 323
 324		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 325		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 326
 327		req = RING_GET_REQUEST(&np->rx, req_prod + i);
 328		gnttab_grant_foreign_access_ref(ref,
 329						np->xbdev->otherend_id,
 330						pfn_to_mfn(pfn),
 331						0);
 332
 
 
 
 
 
 333		req->id = id;
 334		req->gref = ref;
 335	}
 336
 
 
 
 
 
 
 
 
 337	wmb();		/* barrier so backend seens requests */
 338
 339	/* Above is a suitable barrier to ensure backend will see requests. */
 340	np->rx.req_prod_pvt = req_prod + i;
 341 push:
 342	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
 343	if (notify)
 344		notify_remote_via_irq(np->rx_irq);
 345}
 346
 347static int xennet_open(struct net_device *dev)
 348{
 349	struct netfront_info *np = netdev_priv(dev);
 350
 351	napi_enable(&np->napi);
 352
 353	spin_lock_bh(&np->rx_lock);
 354	if (netif_carrier_ok(dev)) {
 355		xennet_alloc_rx_buffers(dev);
 356		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
 357		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 358			napi_schedule(&np->napi);
 
 
 
 
 
 
 
 359	}
 360	spin_unlock_bh(&np->rx_lock);
 361
 362	netif_start_queue(dev);
 363
 364	return 0;
 365}
 366
 367static void xennet_tx_buf_gc(struct net_device *dev)
 368{
 369	RING_IDX cons, prod;
 370	unsigned short id;
 371	struct netfront_info *np = netdev_priv(dev);
 372	struct sk_buff *skb;
 
 373
 374	BUG_ON(!netif_carrier_ok(dev));
 375
 376	do {
 377		prod = np->tx.sring->rsp_prod;
 378		rmb(); /* Ensure we see responses up to 'rp'. */
 379
 380		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
 381			struct xen_netif_tx_response *txrsp;
 382
 383			txrsp = RING_GET_RESPONSE(&np->tx, cons);
 384			if (txrsp->status == XEN_NETIF_RSP_NULL)
 385				continue;
 386
 387			id  = txrsp->id;
 388			skb = np->tx_skbs[id].skb;
 389			if (unlikely(gnttab_query_foreign_access(
 390				np->grant_tx_ref[id]) != 0)) {
 391				pr_alert("%s: warning -- grant still in use by backend domain\n",
 392					 __func__);
 393				BUG();
 394			}
 395			gnttab_end_foreign_access_ref(
 396				np->grant_tx_ref[id], GNTMAP_readonly);
 397			gnttab_release_grant_reference(
 398				&np->gref_tx_head, np->grant_tx_ref[id]);
 399			np->grant_tx_ref[id] = GRANT_INVALID_REF;
 400			np->grant_tx_page[id] = NULL;
 401			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
 402			dev_kfree_skb_irq(skb);
 403		}
 404
 405		np->tx.rsp_cons = prod;
 406
 407		/*
 408		 * Set a new event, then check for race with update of tx_cons.
 409		 * Note that it is essential to schedule a callback, no matter
 410		 * how few buffers are pending. Even if there is space in the
 411		 * transmit ring, higher layers may be blocked because too much
 412		 * data is outstanding: in such cases notification from Xen is
 413		 * likely to be the only kick that we'll get.
 414		 */
 415		np->tx.sring->rsp_event =
 416			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
 417		mb();		/* update shared area */
 418	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
 419
 420	xennet_maybe_wake_tx(dev);
 421}
 422
 423static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
 424			      struct xen_netif_tx_request *tx)
 
 
 
 
 
 
 
 
 425{
 426	struct netfront_info *np = netdev_priv(dev);
 427	char *data = skb->data;
 428	unsigned long mfn;
 429	RING_IDX prod = np->tx.req_prod_pvt;
 430	int frags = skb_shinfo(skb)->nr_frags;
 431	unsigned int offset = offset_in_page(data);
 432	unsigned int len = skb_headlen(skb);
 433	unsigned int id;
 
 434	grant_ref_t ref;
 435	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	/* While the header overlaps a page boundary (including being
 438	   larger than a page), split it it into page-sized chunks. */
 439	while (len > PAGE_SIZE - offset) {
 440		tx->size = PAGE_SIZE - offset;
 441		tx->flags |= XEN_NETTXF_more_data;
 442		len -= tx->size;
 443		data += tx->size;
 444		offset = 0;
 
 
 445
 446		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 447		np->tx_skbs[id].skb = skb_get(skb);
 448		tx = RING_GET_REQUEST(&np->tx, prod++);
 449		tx->id = id;
 450		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 451		BUG_ON((signed short)ref < 0);
 452
 453		mfn = virt_to_mfn(data);
 454		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 455						mfn, GNTMAP_readonly);
 456
 457		np->grant_tx_page[id] = virt_to_page(data);
 458		tx->gref = np->grant_tx_ref[id] = ref;
 459		tx->offset = offset;
 460		tx->size = len;
 461		tx->flags = 0;
 462	}
 463
 464	/* Grant backend access to each skb fragment page. */
 465	for (i = 0; i < frags; i++) {
 466		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 467		struct page *page = skb_frag_page(frag);
 468
 469		len = skb_frag_size(frag);
 470		offset = frag->page_offset;
 
 
 471
 472		/* Data must not cross a page boundary. */
 473		BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
 
 
 
 
 
 
 
 
 474
 475		/* Skip unused frames from start of page */
 476		page += offset >> PAGE_SHIFT;
 477		offset &= ~PAGE_MASK;
 478
 479		while (len > 0) {
 480			unsigned long bytes;
 
 481
 482			BUG_ON(offset >= PAGE_SIZE);
 
 
 483
 484			bytes = PAGE_SIZE - offset;
 485			if (bytes > len)
 486				bytes = len;
 487
 488			tx->flags |= XEN_NETTXF_more_data;
 489
 490			id = get_id_from_freelist(&np->tx_skb_freelist,
 491						  np->tx_skbs);
 492			np->tx_skbs[id].skb = skb_get(skb);
 493			tx = RING_GET_REQUEST(&np->tx, prod++);
 494			tx->id = id;
 495			ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 496			BUG_ON((signed short)ref < 0);
 497
 498			mfn = pfn_to_mfn(page_to_pfn(page));
 499			gnttab_grant_foreign_access_ref(ref,
 500							np->xbdev->otherend_id,
 501							mfn, GNTMAP_readonly);
 502
 503			np->grant_tx_page[id] = page;
 504			tx->gref = np->grant_tx_ref[id] = ref;
 505			tx->offset = offset;
 506			tx->size = bytes;
 507			tx->flags = 0;
 508
 509			offset += bytes;
 510			len -= bytes;
 511
 512			/* Next frame */
 513			if (offset == PAGE_SIZE && len) {
 514				BUG_ON(!PageCompound(page));
 515				page++;
 516				offset = 0;
 517			}
 518		}
 519	}
 520
 521	np->tx.req_prod_pvt = prod;
 522}
 523
 524/*
 525 * Count how many ring slots are required to send the frags of this
 526 * skb. Each frag might be a compound page.
 527 */
 528static int xennet_count_skb_frag_slots(struct sk_buff *skb)
 529{
 530	int i, frags = skb_shinfo(skb)->nr_frags;
 531	int pages = 0;
 
 
 
 532
 533	for (i = 0; i < frags; i++) {
 534		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 535		unsigned long size = skb_frag_size(frag);
 536		unsigned long offset = frag->page_offset;
 537
 538		/* Skip unused frames from start of page */
 539		offset &= ~PAGE_MASK;
 540
 541		pages += PFN_UP(offset + size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542	}
 543
 544	return pages;
 545}
 546
 
 
 547static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 548{
 549	unsigned short id;
 550	struct netfront_info *np = netdev_priv(dev);
 551	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 552	struct xen_netif_tx_request *tx;
 553	char *data = skb->data;
 554	RING_IDX i;
 555	grant_ref_t ref;
 556	unsigned long mfn;
 557	int notify;
 558	int slots;
 559	unsigned int offset = offset_in_page(data);
 560	unsigned int len = skb_headlen(skb);
 
 561	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 562
 563	/* If skb->len is too big for wire format, drop skb and alert
 564	 * user about misconfiguration.
 565	 */
 566	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 567		net_alert_ratelimited(
 568			"xennet: skb->len = %u, too big for wire format\n",
 569			skb->len);
 570		goto drop;
 571	}
 572
 573	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
 574		xennet_count_skb_frag_slots(skb);
 575	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
 576		net_alert_ratelimited(
 577			"xennet: skb rides the rocket: %d slots\n", slots);
 578		goto drop;
 579	}
 580
 581	spin_lock_irqsave(&np->tx_lock, flags);
 
 
 
 
 582
 583	if (unlikely(!netif_carrier_ok(dev) ||
 584		     (slots > 1 && !xennet_can_sg(dev)) ||
 585		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 586		spin_unlock_irqrestore(&np->tx_lock, flags);
 587		goto drop;
 588	}
 589
 590	i = np->tx.req_prod_pvt;
 591
 592	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 593	np->tx_skbs[id].skb = skb;
 594
 595	tx = RING_GET_REQUEST(&np->tx, i);
 596
 597	tx->id   = id;
 598	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 599	BUG_ON((signed short)ref < 0);
 600	mfn = virt_to_mfn(data);
 601	gnttab_grant_foreign_access_ref(
 602		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
 603	np->grant_tx_page[id] = virt_to_page(data);
 604	tx->gref = np->grant_tx_ref[id] = ref;
 605	tx->offset = offset;
 606	tx->size = len;
 607
 608	tx->flags = 0;
 609	if (skb->ip_summed == CHECKSUM_PARTIAL)
 610		/* local packet? */
 611		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 612	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 613		/* remote but checksummed. */
 614		tx->flags |= XEN_NETTXF_data_validated;
 615
 
 616	if (skb_shinfo(skb)->gso_size) {
 617		struct xen_netif_extra_info *gso;
 618
 619		gso = (struct xen_netif_extra_info *)
 620			RING_GET_REQUEST(&np->tx, ++i);
 621
 622		tx->flags |= XEN_NETTXF_extra_info;
 623
 624		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 625		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 626			XEN_NETIF_GSO_TYPE_TCPV6 :
 627			XEN_NETIF_GSO_TYPE_TCPV4;
 628		gso->u.gso.pad = 0;
 629		gso->u.gso.features = 0;
 630
 631		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 632		gso->flags = 0;
 633	}
 634
 635	np->tx.req_prod_pvt = i + 1;
 
 
 
 
 
 
 
 
 
 636
 637	xennet_make_frags(skb, dev, tx);
 638	tx->size = skb->len;
 639
 640	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
 641	if (notify)
 642		notify_remote_via_irq(np->tx_irq);
 643
 644	u64_stats_update_begin(&stats->syncp);
 645	stats->tx_bytes += skb->len;
 646	stats->tx_packets++;
 647	u64_stats_update_end(&stats->syncp);
 648
 649	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 650	xennet_tx_buf_gc(dev);
 651
 652	if (!netfront_tx_slot_available(np))
 653		netif_stop_queue(dev);
 654
 655	spin_unlock_irqrestore(&np->tx_lock, flags);
 656
 657	return NETDEV_TX_OK;
 658
 659 drop:
 660	dev->stats.tx_dropped++;
 661	dev_kfree_skb_any(skb);
 662	return NETDEV_TX_OK;
 663}
 664
 665static int xennet_close(struct net_device *dev)
 666{
 667	struct netfront_info *np = netdev_priv(dev);
 668	netif_stop_queue(np->netdev);
 669	napi_disable(&np->napi);
 
 
 
 
 
 
 670	return 0;
 671}
 672
 673static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
 674				grant_ref_t ref)
 675{
 676	int new = xennet_rxidx(np->rx.req_prod_pvt);
 677
 678	BUG_ON(np->rx_skbs[new]);
 679	np->rx_skbs[new] = skb;
 680	np->grant_rx_ref[new] = ref;
 681	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
 682	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
 683	np->rx.req_prod_pvt++;
 684}
 685
 686static int xennet_get_extras(struct netfront_info *np,
 687			     struct xen_netif_extra_info *extras,
 688			     RING_IDX rp)
 689
 690{
 691	struct xen_netif_extra_info *extra;
 692	struct device *dev = &np->netdev->dev;
 693	RING_IDX cons = np->rx.rsp_cons;
 694	int err = 0;
 695
 696	do {
 697		struct sk_buff *skb;
 698		grant_ref_t ref;
 699
 700		if (unlikely(cons + 1 == rp)) {
 701			if (net_ratelimit())
 702				dev_warn(dev, "Missing extra info\n");
 703			err = -EBADR;
 704			break;
 705		}
 706
 707		extra = (struct xen_netif_extra_info *)
 708			RING_GET_RESPONSE(&np->rx, ++cons);
 709
 710		if (unlikely(!extra->type ||
 711			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 712			if (net_ratelimit())
 713				dev_warn(dev, "Invalid extra type: %d\n",
 714					extra->type);
 715			err = -EINVAL;
 716		} else {
 717			memcpy(&extras[extra->type - 1], extra,
 718			       sizeof(*extra));
 719		}
 720
 721		skb = xennet_get_rx_skb(np, cons);
 722		ref = xennet_get_rx_ref(np, cons);
 723		xennet_move_rx_slot(np, skb, ref);
 724	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 725
 726	np->rx.rsp_cons = cons;
 727	return err;
 728}
 729
 730static int xennet_get_responses(struct netfront_info *np,
 731				struct netfront_rx_info *rinfo, RING_IDX rp,
 732				struct sk_buff_head *list)
 733{
 734	struct xen_netif_rx_response *rx = &rinfo->rx;
 735	struct xen_netif_extra_info *extras = rinfo->extras;
 736	struct device *dev = &np->netdev->dev;
 737	RING_IDX cons = np->rx.rsp_cons;
 738	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
 739	grant_ref_t ref = xennet_get_rx_ref(np, cons);
 740	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 741	int slots = 1;
 742	int err = 0;
 743	unsigned long ret;
 744
 745	if (rx->flags & XEN_NETRXF_extra_info) {
 746		err = xennet_get_extras(np, extras, rp);
 747		cons = np->rx.rsp_cons;
 748	}
 749
 750	for (;;) {
 751		if (unlikely(rx->status < 0 ||
 752			     rx->offset + rx->status > PAGE_SIZE)) {
 753			if (net_ratelimit())
 754				dev_warn(dev, "rx->offset: %x, size: %u\n",
 755					 rx->offset, rx->status);
 756			xennet_move_rx_slot(np, skb, ref);
 757			err = -EINVAL;
 758			goto next;
 759		}
 760
 761		/*
 762		 * This definitely indicates a bug, either in this driver or in
 763		 * the backend driver. In future this should flag the bad
 764		 * situation to the system controller to reboot the backend.
 765		 */
 766		if (ref == GRANT_INVALID_REF) {
 767			if (net_ratelimit())
 768				dev_warn(dev, "Bad rx response id %d.\n",
 769					 rx->id);
 770			err = -EINVAL;
 771			goto next;
 772		}
 773
 774		ret = gnttab_end_foreign_access_ref(ref, 0);
 775		BUG_ON(!ret);
 776
 777		gnttab_release_grant_reference(&np->gref_rx_head, ref);
 778
 779		__skb_queue_tail(list, skb);
 780
 781next:
 782		if (!(rx->flags & XEN_NETRXF_more_data))
 783			break;
 784
 785		if (cons + slots == rp) {
 786			if (net_ratelimit())
 787				dev_warn(dev, "Need more slots\n");
 788			err = -ENOENT;
 789			break;
 790		}
 791
 792		rx = RING_GET_RESPONSE(&np->rx, cons + slots);
 793		skb = xennet_get_rx_skb(np, cons + slots);
 794		ref = xennet_get_rx_ref(np, cons + slots);
 795		slots++;
 796	}
 797
 798	if (unlikely(slots > max)) {
 799		if (net_ratelimit())
 800			dev_warn(dev, "Too many slots\n");
 801		err = -E2BIG;
 802	}
 803
 804	if (unlikely(err))
 805		np->rx.rsp_cons = cons + slots;
 806
 807	return err;
 808}
 809
 810static int xennet_set_skb_gso(struct sk_buff *skb,
 811			      struct xen_netif_extra_info *gso)
 812{
 813	if (!gso->u.gso.size) {
 814		if (net_ratelimit())
 815			pr_warn("GSO size must not be zero\n");
 816		return -EINVAL;
 817	}
 818
 819	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 820	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 821		if (net_ratelimit())
 822			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 823		return -EINVAL;
 824	}
 825
 826	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 827	skb_shinfo(skb)->gso_type =
 828		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 829		SKB_GSO_TCPV4 :
 830		SKB_GSO_TCPV6;
 831
 832	/* Header must be checked, and gso_segs computed. */
 833	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 834	skb_shinfo(skb)->gso_segs = 0;
 835
 836	return 0;
 837}
 838
 839static RING_IDX xennet_fill_frags(struct netfront_info *np,
 840				  struct sk_buff *skb,
 841				  struct sk_buff_head *list)
 842{
 843	struct skb_shared_info *shinfo = skb_shinfo(skb);
 844	RING_IDX cons = np->rx.rsp_cons;
 845	struct sk_buff *nskb;
 846
 847	while ((nskb = __skb_dequeue(list))) {
 848		struct xen_netif_rx_response *rx =
 849			RING_GET_RESPONSE(&np->rx, ++cons);
 850		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 851
 852		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 853			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 854
 855			BUG_ON(pull_to <= skb_headlen(skb));
 856			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 857		}
 858		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 859
 860		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 861				rx->offset, rx->status, PAGE_SIZE);
 862
 863		skb_shinfo(nskb)->nr_frags = 0;
 864		kfree_skb(nskb);
 865	}
 866
 867	return cons;
 868}
 869
 870static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 871{
 872	bool recalculate_partial_csum = false;
 873
 874	/*
 875	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 876	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 877	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 878	 * recalculate the partial checksum.
 879	 */
 880	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 881		struct netfront_info *np = netdev_priv(dev);
 882		np->rx_gso_checksum_fixup++;
 883		skb->ip_summed = CHECKSUM_PARTIAL;
 884		recalculate_partial_csum = true;
 885	}
 886
 887	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 888	if (skb->ip_summed != CHECKSUM_PARTIAL)
 889		return 0;
 890
 891	return skb_checksum_setup(skb, recalculate_partial_csum);
 892}
 893
 894static int handle_incoming_queue(struct net_device *dev,
 895				 struct sk_buff_head *rxq)
 896{
 897	struct netfront_info *np = netdev_priv(dev);
 898	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 899	int packets_dropped = 0;
 900	struct sk_buff *skb;
 901
 902	while ((skb = __skb_dequeue(rxq)) != NULL) {
 903		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 904
 905		if (pull_to > skb_headlen(skb))
 906			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 907
 908		/* Ethernet work: Delayed to here as it peeks the header. */
 909		skb->protocol = eth_type_trans(skb, dev);
 910		skb_reset_network_header(skb);
 911
 912		if (checksum_setup(dev, skb)) {
 913			kfree_skb(skb);
 914			packets_dropped++;
 915			dev->stats.rx_errors++;
 916			continue;
 917		}
 918
 919		u64_stats_update_begin(&stats->syncp);
 920		stats->rx_packets++;
 921		stats->rx_bytes += skb->len;
 922		u64_stats_update_end(&stats->syncp);
 923
 924		/* Pass it up. */
 925		napi_gro_receive(&np->napi, skb);
 926	}
 927
 928	return packets_dropped;
 929}
 930
 931static int xennet_poll(struct napi_struct *napi, int budget)
 932{
 933	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
 934	struct net_device *dev = np->netdev;
 935	struct sk_buff *skb;
 936	struct netfront_rx_info rinfo;
 937	struct xen_netif_rx_response *rx = &rinfo.rx;
 938	struct xen_netif_extra_info *extras = rinfo.extras;
 939	RING_IDX i, rp;
 940	int work_done;
 941	struct sk_buff_head rxq;
 942	struct sk_buff_head errq;
 943	struct sk_buff_head tmpq;
 944	unsigned long flags;
 945	int err;
 946
 947	spin_lock(&np->rx_lock);
 948
 949	skb_queue_head_init(&rxq);
 950	skb_queue_head_init(&errq);
 951	skb_queue_head_init(&tmpq);
 952
 953	rp = np->rx.sring->rsp_prod;
 954	rmb(); /* Ensure we see queued responses up to 'rp'. */
 955
 956	i = np->rx.rsp_cons;
 957	work_done = 0;
 958	while ((i != rp) && (work_done < budget)) {
 959		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
 960		memset(extras, 0, sizeof(rinfo.extras));
 961
 962		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
 963
 964		if (unlikely(err)) {
 965err:
 966			while ((skb = __skb_dequeue(&tmpq)))
 967				__skb_queue_tail(&errq, skb);
 968			dev->stats.rx_errors++;
 969			i = np->rx.rsp_cons;
 970			continue;
 971		}
 972
 973		skb = __skb_dequeue(&tmpq);
 974
 975		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
 976			struct xen_netif_extra_info *gso;
 977			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 978
 979			if (unlikely(xennet_set_skb_gso(skb, gso))) {
 980				__skb_queue_head(&tmpq, skb);
 981				np->rx.rsp_cons += skb_queue_len(&tmpq);
 982				goto err;
 983			}
 984		}
 985
 986		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
 987		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
 988			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 989
 990		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
 991		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
 992		skb->data_len = rx->status;
 993		skb->len += rx->status;
 994
 995		i = xennet_fill_frags(np, skb, &tmpq);
 996
 997		if (rx->flags & XEN_NETRXF_csum_blank)
 998			skb->ip_summed = CHECKSUM_PARTIAL;
 999		else if (rx->flags & XEN_NETRXF_data_validated)
1000			skb->ip_summed = CHECKSUM_UNNECESSARY;
1001
1002		__skb_queue_tail(&rxq, skb);
1003
1004		np->rx.rsp_cons = ++i;
1005		work_done++;
1006	}
1007
1008	__skb_queue_purge(&errq);
1009
1010	work_done -= handle_incoming_queue(dev, &rxq);
1011
1012	/* If we get a callback with very few responses, reduce fill target. */
1013	/* NB. Note exponential increase, linear decrease. */
1014	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1015	     ((3*np->rx_target) / 4)) &&
1016	    (--np->rx_target < np->rx_min_target))
1017		np->rx_target = np->rx_min_target;
1018
1019	xennet_alloc_rx_buffers(dev);
1020
1021	if (work_done < budget) {
1022		int more_to_do = 0;
1023
1024		napi_gro_flush(napi, false);
1025
1026		local_irq_save(flags);
1027
1028		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1029		if (!more_to_do)
1030			__napi_complete(napi);
1031
1032		local_irq_restore(flags);
1033	}
1034
1035	spin_unlock(&np->rx_lock);
1036
1037	return work_done;
1038}
1039
1040static int xennet_change_mtu(struct net_device *dev, int mtu)
1041{
1042	int max = xennet_can_sg(dev) ?
1043		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1044
1045	if (mtu > max)
1046		return -EINVAL;
1047	dev->mtu = mtu;
1048	return 0;
1049}
1050
1051static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1052						    struct rtnl_link_stats64 *tot)
1053{
1054	struct netfront_info *np = netdev_priv(dev);
1055	int cpu;
1056
1057	for_each_possible_cpu(cpu) {
1058		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
 
1059		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1060		unsigned int start;
1061
1062		do {
1063			start = u64_stats_fetch_begin_irq(&stats->syncp);
 
 
 
1064
1065			rx_packets = stats->rx_packets;
1066			tx_packets = stats->tx_packets;
1067			rx_bytes = stats->rx_bytes;
1068			tx_bytes = stats->tx_bytes;
1069		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1070
1071		tot->rx_packets += rx_packets;
1072		tot->tx_packets += tx_packets;
1073		tot->rx_bytes   += rx_bytes;
1074		tot->tx_bytes   += tx_bytes;
1075	}
1076
1077	tot->rx_errors  = dev->stats.rx_errors;
1078	tot->tx_dropped = dev->stats.tx_dropped;
1079
1080	return tot;
1081}
1082
1083static void xennet_release_tx_bufs(struct netfront_info *np)
1084{
1085	struct sk_buff *skb;
1086	int i;
1087
1088	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1089		/* Skip over entries which are actually freelist references */
1090		if (skb_entry_is_link(&np->tx_skbs[i]))
1091			continue;
1092
1093		skb = np->tx_skbs[i].skb;
1094		get_page(np->grant_tx_page[i]);
1095		gnttab_end_foreign_access(np->grant_tx_ref[i],
1096					  GNTMAP_readonly,
1097					  (unsigned long)page_address(np->grant_tx_page[i]));
1098		np->grant_tx_page[i] = NULL;
1099		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1100		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1101		dev_kfree_skb_irq(skb);
1102	}
1103}
1104
1105static void xennet_release_rx_bufs(struct netfront_info *np)
1106{
1107	int id, ref;
1108
1109	spin_lock_bh(&np->rx_lock);
1110
1111	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1112		struct sk_buff *skb;
1113		struct page *page;
1114
1115		skb = np->rx_skbs[id];
1116		if (!skb)
1117			continue;
1118
1119		ref = np->grant_rx_ref[id];
1120		if (ref == GRANT_INVALID_REF)
1121			continue;
1122
1123		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1124
1125		/* gnttab_end_foreign_access() needs a page ref until
1126		 * foreign access is ended (which may be deferred).
1127		 */
1128		get_page(page);
1129		gnttab_end_foreign_access(ref, 0,
1130					  (unsigned long)page_address(page));
1131		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1132
1133		kfree_skb(skb);
1134	}
1135
1136	spin_unlock_bh(&np->rx_lock);
1137}
1138
1139static void xennet_uninit(struct net_device *dev)
1140{
1141	struct netfront_info *np = netdev_priv(dev);
1142	xennet_release_tx_bufs(np);
1143	xennet_release_rx_bufs(np);
1144	gnttab_free_grant_references(np->gref_tx_head);
1145	gnttab_free_grant_references(np->gref_rx_head);
1146}
1147
1148static netdev_features_t xennet_fix_features(struct net_device *dev,
1149	netdev_features_t features)
1150{
1151	struct netfront_info *np = netdev_priv(dev);
1152	int val;
1153
1154	if (features & NETIF_F_SG) {
1155		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1156				 "%d", &val) < 0)
1157			val = 0;
1158
1159		if (!val)
1160			features &= ~NETIF_F_SG;
1161	}
1162
1163	if (features & NETIF_F_IPV6_CSUM) {
1164		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1165				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1166			val = 0;
1167
1168		if (!val)
1169			features &= ~NETIF_F_IPV6_CSUM;
1170	}
1171
1172	if (features & NETIF_F_TSO) {
1173		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1174				 "feature-gso-tcpv4", "%d", &val) < 0)
1175			val = 0;
1176
1177		if (!val)
1178			features &= ~NETIF_F_TSO;
1179	}
1180
1181	if (features & NETIF_F_TSO6) {
1182		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1183				 "feature-gso-tcpv6", "%d", &val) < 0)
1184			val = 0;
1185
1186		if (!val)
1187			features &= ~NETIF_F_TSO6;
1188	}
1189
1190	return features;
1191}
1192
1193static int xennet_set_features(struct net_device *dev,
1194	netdev_features_t features)
1195{
1196	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1197		netdev_info(dev, "Reducing MTU because no SG offload");
1198		dev->mtu = ETH_DATA_LEN;
1199	}
1200
1201	return 0;
1202}
1203
1204static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1205{
1206	struct netfront_info *np = dev_id;
1207	struct net_device *dev = np->netdev;
1208	unsigned long flags;
1209
1210	spin_lock_irqsave(&np->tx_lock, flags);
1211	xennet_tx_buf_gc(dev);
1212	spin_unlock_irqrestore(&np->tx_lock, flags);
1213
1214	return IRQ_HANDLED;
1215}
1216
1217static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1218{
1219	struct netfront_info *np = dev_id;
1220	struct net_device *dev = np->netdev;
1221
1222	if (likely(netif_carrier_ok(dev) &&
1223		   RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1224			napi_schedule(&np->napi);
1225
1226	return IRQ_HANDLED;
1227}
1228
1229static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1230{
1231	xennet_tx_interrupt(irq, dev_id);
1232	xennet_rx_interrupt(irq, dev_id);
1233	return IRQ_HANDLED;
1234}
1235
1236#ifdef CONFIG_NET_POLL_CONTROLLER
1237static void xennet_poll_controller(struct net_device *dev)
1238{
1239	xennet_interrupt(0, dev);
 
 
 
 
 
1240}
1241#endif
1242
1243static const struct net_device_ops xennet_netdev_ops = {
1244	.ndo_open            = xennet_open,
1245	.ndo_uninit          = xennet_uninit,
1246	.ndo_stop            = xennet_close,
1247	.ndo_start_xmit      = xennet_start_xmit,
1248	.ndo_change_mtu	     = xennet_change_mtu,
1249	.ndo_get_stats64     = xennet_get_stats64,
1250	.ndo_set_mac_address = eth_mac_addr,
1251	.ndo_validate_addr   = eth_validate_addr,
1252	.ndo_fix_features    = xennet_fix_features,
1253	.ndo_set_features    = xennet_set_features,
 
1254#ifdef CONFIG_NET_POLL_CONTROLLER
1255	.ndo_poll_controller = xennet_poll_controller,
1256#endif
1257};
1258
 
 
 
 
 
 
 
 
 
1259static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1260{
1261	int i, err;
1262	struct net_device *netdev;
1263	struct netfront_info *np;
1264
1265	netdev = alloc_etherdev(sizeof(struct netfront_info));
1266	if (!netdev)
1267		return ERR_PTR(-ENOMEM);
1268
1269	np                   = netdev_priv(netdev);
1270	np->xbdev            = dev;
1271
1272	spin_lock_init(&np->tx_lock);
1273	spin_lock_init(&np->rx_lock);
1274
1275	skb_queue_head_init(&np->rx_batch);
1276	np->rx_target     = RX_DFL_MIN_TARGET;
1277	np->rx_min_target = RX_DFL_MIN_TARGET;
1278	np->rx_max_target = RX_MAX_TARGET;
1279
1280	init_timer(&np->rx_refill_timer);
1281	np->rx_refill_timer.data = (unsigned long)netdev;
1282	np->rx_refill_timer.function = rx_refill_timeout;
1283
1284	err = -ENOMEM;
1285	np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1286	if (np->stats == NULL)
 
 
 
1287		goto exit;
1288
1289	/* Initialise tx_skbs as a free chain containing every entry. */
1290	np->tx_skb_freelist = 0;
1291	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1292		skb_entry_set_link(&np->tx_skbs[i], i+1);
1293		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1294		np->grant_tx_page[i] = NULL;
1295	}
1296
1297	/* Clear out rx_skbs */
1298	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1299		np->rx_skbs[i] = NULL;
1300		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1301	}
1302
1303	/* A grant for every tx ring slot */
1304	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1305					  &np->gref_tx_head) < 0) {
1306		pr_alert("can't alloc tx grant refs\n");
1307		err = -ENOMEM;
1308		goto exit_free_stats;
1309	}
1310	/* A grant for every rx ring slot */
1311	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1312					  &np->gref_rx_head) < 0) {
1313		pr_alert("can't alloc rx grant refs\n");
1314		err = -ENOMEM;
1315		goto exit_free_tx;
1316	}
1317
1318	netdev->netdev_ops	= &xennet_netdev_ops;
1319
1320	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1321	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1322				  NETIF_F_GSO_ROBUST;
1323	netdev->hw_features	= NETIF_F_SG |
1324				  NETIF_F_IPV6_CSUM |
1325				  NETIF_F_TSO | NETIF_F_TSO6;
1326
1327	/*
1328         * Assume that all hw features are available for now. This set
1329         * will be adjusted by the call to netdev_update_features() in
1330         * xennet_connect() which is the earliest point where we can
1331         * negotiate with the backend regarding supported features.
1332         */
1333	netdev->features |= netdev->hw_features;
1334
1335	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1336	SET_NETDEV_DEV(netdev, &dev->dev);
1337
1338	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1339
1340	np->netdev = netdev;
1341
1342	netif_carrier_off(netdev);
1343
1344	return netdev;
1345
1346 exit_free_tx:
1347	gnttab_free_grant_references(np->gref_tx_head);
1348 exit_free_stats:
1349	free_percpu(np->stats);
1350 exit:
1351	free_netdev(netdev);
1352	return ERR_PTR(err);
1353}
1354
1355/**
1356 * Entry point to this code when a new device is created.  Allocate the basic
1357 * structures and the ring buffers for communication with the backend, and
1358 * inform the backend of the appropriate details for those.
1359 */
1360static int netfront_probe(struct xenbus_device *dev,
1361			  const struct xenbus_device_id *id)
1362{
1363	int err;
1364	struct net_device *netdev;
1365	struct netfront_info *info;
1366
1367	netdev = xennet_create_dev(dev);
1368	if (IS_ERR(netdev)) {
1369		err = PTR_ERR(netdev);
1370		xenbus_dev_fatal(dev, err, "creating netdev");
1371		return err;
1372	}
1373
1374	info = netdev_priv(netdev);
1375	dev_set_drvdata(&dev->dev, info);
1376
 
 
1377	err = register_netdev(info->netdev);
1378	if (err) {
1379		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1380		goto fail;
1381	}
1382
1383	err = xennet_sysfs_addif(info->netdev);
1384	if (err) {
1385		unregister_netdev(info->netdev);
1386		pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1387		goto fail;
1388	}
1389
1390	return 0;
1391
1392 fail:
1393	free_netdev(netdev);
1394	dev_set_drvdata(&dev->dev, NULL);
1395	return err;
1396}
1397
1398static void xennet_end_access(int ref, void *page)
1399{
1400	/* This frees the page as a side-effect */
1401	if (ref != GRANT_INVALID_REF)
1402		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1403}
1404
1405static void xennet_disconnect_backend(struct netfront_info *info)
1406{
1407	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1408	spin_lock_bh(&info->rx_lock);
1409	spin_lock_irq(&info->tx_lock);
1410	netif_carrier_off(info->netdev);
1411	spin_unlock_irq(&info->tx_lock);
1412	spin_unlock_bh(&info->rx_lock);
1413
1414	if (info->tx_irq && (info->tx_irq == info->rx_irq))
1415		unbind_from_irqhandler(info->tx_irq, info);
1416	if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1417		unbind_from_irqhandler(info->tx_irq, info);
1418		unbind_from_irqhandler(info->rx_irq, info);
1419	}
1420	info->tx_evtchn = info->rx_evtchn = 0;
1421	info->tx_irq = info->rx_irq = 0;
1422
1423	/* End access and free the pages */
1424	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1425	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1426
1427	info->tx_ring_ref = GRANT_INVALID_REF;
1428	info->rx_ring_ref = GRANT_INVALID_REF;
1429	info->tx.sring = NULL;
1430	info->rx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
1431}
1432
1433/**
1434 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1435 * driver restart.  We tear down our netif structure and recreate it, but
1436 * leave the device-layer structures intact so that this is transparent to the
1437 * rest of the kernel.
1438 */
1439static int netfront_resume(struct xenbus_device *dev)
1440{
1441	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1442
1443	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1444
1445	xennet_disconnect_backend(info);
1446	return 0;
1447}
1448
1449static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1450{
1451	char *s, *e, *macstr;
1452	int i;
1453
1454	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1455	if (IS_ERR(macstr))
1456		return PTR_ERR(macstr);
1457
1458	for (i = 0; i < ETH_ALEN; i++) {
1459		mac[i] = simple_strtoul(s, &e, 16);
1460		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1461			kfree(macstr);
1462			return -ENOENT;
1463		}
1464		s = e+1;
1465	}
1466
1467	kfree(macstr);
1468	return 0;
1469}
1470
1471static int setup_netfront_single(struct netfront_info *info)
1472{
1473	int err;
1474
1475	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1476	if (err < 0)
1477		goto fail;
1478
1479	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1480					xennet_interrupt,
1481					0, info->netdev->name, info);
1482	if (err < 0)
1483		goto bind_fail;
1484	info->rx_evtchn = info->tx_evtchn;
1485	info->rx_irq = info->tx_irq = err;
1486
1487	return 0;
1488
1489bind_fail:
1490	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1491	info->tx_evtchn = 0;
1492fail:
1493	return err;
1494}
1495
1496static int setup_netfront_split(struct netfront_info *info)
1497{
1498	int err;
1499
1500	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1501	if (err < 0)
1502		goto fail;
1503	err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1504	if (err < 0)
1505		goto alloc_rx_evtchn_fail;
1506
1507	snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1508		 "%s-tx", info->netdev->name);
1509	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1510					xennet_tx_interrupt,
1511					0, info->tx_irq_name, info);
1512	if (err < 0)
1513		goto bind_tx_fail;
1514	info->tx_irq = err;
1515
1516	snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1517		 "%s-rx", info->netdev->name);
1518	err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1519					xennet_rx_interrupt,
1520					0, info->rx_irq_name, info);
1521	if (err < 0)
1522		goto bind_rx_fail;
1523	info->rx_irq = err;
1524
1525	return 0;
1526
1527bind_rx_fail:
1528	unbind_from_irqhandler(info->tx_irq, info);
1529	info->tx_irq = 0;
1530bind_tx_fail:
1531	xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1532	info->rx_evtchn = 0;
1533alloc_rx_evtchn_fail:
1534	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1535	info->tx_evtchn = 0;
1536fail:
1537	return err;
1538}
1539
1540static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
 
1541{
1542	struct xen_netif_tx_sring *txs;
1543	struct xen_netif_rx_sring *rxs;
 
1544	int err;
1545	struct net_device *netdev = info->netdev;
1546	unsigned int feature_split_evtchn;
1547
1548	info->tx_ring_ref = GRANT_INVALID_REF;
1549	info->rx_ring_ref = GRANT_INVALID_REF;
1550	info->rx.sring = NULL;
1551	info->tx.sring = NULL;
1552	netdev->irq = 0;
1553
1554	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1555			   "feature-split-event-channels", "%u",
1556			   &feature_split_evtchn);
1557	if (err < 0)
1558		feature_split_evtchn = 0;
1559
1560	err = xen_net_read_mac(dev, netdev->dev_addr);
1561	if (err) {
1562		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1563		goto fail;
1564	}
1565
1566	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1567	if (!txs) {
1568		err = -ENOMEM;
1569		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1570		goto fail;
1571	}
1572	SHARED_RING_INIT(txs);
1573	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1574
1575	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1576	if (err < 0)
1577		goto grant_tx_ring_fail;
 
1578
1579	info->tx_ring_ref = err;
1580	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1581	if (!rxs) {
1582		err = -ENOMEM;
1583		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1584		goto alloc_rx_ring_fail;
1585	}
1586	SHARED_RING_INIT(rxs);
1587	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1588
1589	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1590	if (err < 0)
1591		goto grant_rx_ring_fail;
1592	info->rx_ring_ref = err;
1593
1594	if (feature_split_evtchn)
1595		err = setup_netfront_split(info);
1596	/* setup single event channel if
1597	 *  a) feature-split-event-channels == 0
1598	 *  b) feature-split-event-channels == 1 but failed to setup
1599	 */
1600	if (!feature_split_evtchn || (feature_split_evtchn && err))
1601		err = setup_netfront_single(info);
1602
1603	if (err)
1604		goto alloc_evtchn_fail;
1605
1606	return 0;
1607
1608	/* If we fail to setup netfront, it is safe to just revoke access to
1609	 * granted pages because backend is not accessing it at this point.
1610	 */
1611alloc_evtchn_fail:
1612	gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1613grant_rx_ring_fail:
1614	free_page((unsigned long)rxs);
1615alloc_rx_ring_fail:
1616	gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1617grant_tx_ring_fail:
1618	free_page((unsigned long)txs);
1619fail:
1620	return err;
1621}
1622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1623/* Common code used when first setting up, and when resuming. */
1624static int talk_to_netback(struct xenbus_device *dev,
1625			   struct netfront_info *info)
1626{
1627	const char *message;
1628	struct xenbus_transaction xbt;
1629	int err;
 
 
 
 
 
1630
1631	/* Create shared ring, alloc event channel. */
1632	err = setup_netfront(dev, info);
1633	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635
1636again:
1637	err = xenbus_transaction_start(&xbt);
1638	if (err) {
1639		xenbus_dev_fatal(dev, err, "starting transaction");
1640		goto destroy_ring;
1641	}
1642
1643	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1644			    info->tx_ring_ref);
1645	if (err) {
1646		message = "writing tx ring-ref";
1647		goto abort_transaction;
1648	}
1649	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1650			    info->rx_ring_ref);
1651	if (err) {
1652		message = "writing rx ring-ref";
1653		goto abort_transaction;
1654	}
1655
1656	if (info->tx_evtchn == info->rx_evtchn) {
1657		err = xenbus_printf(xbt, dev->nodename,
1658				    "event-channel", "%u", info->tx_evtchn);
1659		if (err) {
1660			message = "writing event-channel";
1661			goto abort_transaction;
1662		}
 
 
 
 
 
 
1663	} else {
1664		err = xenbus_printf(xbt, dev->nodename,
1665				    "event-channel-tx", "%u", info->tx_evtchn);
1666		if (err) {
1667			message = "writing event-channel-tx";
1668			goto abort_transaction;
1669		}
1670		err = xenbus_printf(xbt, dev->nodename,
1671				    "event-channel-rx", "%u", info->rx_evtchn);
1672		if (err) {
1673			message = "writing event-channel-rx";
1674			goto abort_transaction;
1675		}
1676	}
1677
 
1678	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1679			    1);
1680	if (err) {
1681		message = "writing request-rx-copy";
1682		goto abort_transaction;
1683	}
1684
1685	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1686	if (err) {
1687		message = "writing feature-rx-notify";
1688		goto abort_transaction;
1689	}
1690
1691	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1692	if (err) {
1693		message = "writing feature-sg";
1694		goto abort_transaction;
1695	}
1696
1697	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1698	if (err) {
1699		message = "writing feature-gso-tcpv4";
1700		goto abort_transaction;
1701	}
1702
1703	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1704	if (err) {
1705		message = "writing feature-gso-tcpv6";
1706		goto abort_transaction;
1707	}
1708
1709	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1710			   "1");
1711	if (err) {
1712		message = "writing feature-ipv6-csum-offload";
1713		goto abort_transaction;
1714	}
1715
1716	err = xenbus_transaction_end(xbt, 0);
1717	if (err) {
1718		if (err == -EAGAIN)
1719			goto again;
1720		xenbus_dev_fatal(dev, err, "completing transaction");
1721		goto destroy_ring;
1722	}
1723
1724	return 0;
1725
1726 abort_transaction:
 
 
1727	xenbus_transaction_end(xbt, 1);
1728	xenbus_dev_fatal(dev, err, "%s", message);
1729 destroy_ring:
1730	xennet_disconnect_backend(info);
 
 
1731 out:
1732	return err;
1733}
1734
1735static int xennet_connect(struct net_device *dev)
1736{
1737	struct netfront_info *np = netdev_priv(dev);
1738	int i, requeue_idx, err;
1739	struct sk_buff *skb;
1740	grant_ref_t ref;
1741	struct xen_netif_rx_request *req;
1742	unsigned int feature_rx_copy;
 
 
1743
1744	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1745			   "feature-rx-copy", "%u", &feature_rx_copy);
1746	if (err != 1)
1747		feature_rx_copy = 0;
1748
1749	if (!feature_rx_copy) {
1750		dev_info(&dev->dev,
1751			 "backend does not support copying receive path\n");
1752		return -ENODEV;
1753	}
1754
1755	err = talk_to_netback(np->xbdev, np);
1756	if (err)
1757		return err;
1758
 
 
 
1759	rtnl_lock();
1760	netdev_update_features(dev);
1761	rtnl_unlock();
1762
1763	spin_lock_bh(&np->rx_lock);
1764	spin_lock_irq(&np->tx_lock);
1765
1766	/* Step 1: Discard all pending TX packet fragments. */
1767	xennet_release_tx_bufs(np);
1768
1769	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1770	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1771		skb_frag_t *frag;
1772		const struct page *page;
1773		if (!np->rx_skbs[i])
1774			continue;
1775
1776		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1777		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1778		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1779
1780		frag = &skb_shinfo(skb)->frags[0];
1781		page = skb_frag_page(frag);
1782		gnttab_grant_foreign_access_ref(
1783			ref, np->xbdev->otherend_id,
1784			pfn_to_mfn(page_to_pfn(page)),
1785			0);
1786		req->gref = ref;
1787		req->id   = requeue_idx;
1788
1789		requeue_idx++;
1790	}
1791
1792	np->rx.req_prod_pvt = requeue_idx;
1793
1794	/*
1795	 * Step 3: All public and private state should now be sane.  Get
1796	 * ready to start sending and receiving packets and give the driver
1797	 * domain a kick because we've probably just requeued some
1798	 * packets.
1799	 */
1800	netif_carrier_on(np->netdev);
1801	notify_remote_via_irq(np->tx_irq);
1802	if (np->tx_irq != np->rx_irq)
1803		notify_remote_via_irq(np->rx_irq);
1804	xennet_tx_buf_gc(dev);
1805	xennet_alloc_rx_buffers(dev);
1806
1807	spin_unlock_irq(&np->tx_lock);
1808	spin_unlock_bh(&np->rx_lock);
 
 
 
 
 
 
 
 
 
 
1809
1810	return 0;
1811}
1812
1813/**
1814 * Callback received when the backend's state changes.
1815 */
1816static void netback_changed(struct xenbus_device *dev,
1817			    enum xenbus_state backend_state)
1818{
1819	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1820	struct net_device *netdev = np->netdev;
1821
1822	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1823
1824	switch (backend_state) {
1825	case XenbusStateInitialising:
1826	case XenbusStateInitialised:
1827	case XenbusStateReconfiguring:
1828	case XenbusStateReconfigured:
1829	case XenbusStateUnknown:
1830		break;
1831
1832	case XenbusStateInitWait:
1833		if (dev->state != XenbusStateInitialising)
1834			break;
1835		if (xennet_connect(netdev) != 0)
1836			break;
1837		xenbus_switch_state(dev, XenbusStateConnected);
1838		break;
1839
1840	case XenbusStateConnected:
1841		netdev_notify_peers(netdev);
1842		break;
1843
1844	case XenbusStateClosed:
1845		if (dev->state == XenbusStateClosed)
1846			break;
1847		/* Missed the backend's CLOSING state -- fallthrough */
1848	case XenbusStateClosing:
1849		xenbus_frontend_closed(dev);
1850		break;
1851	}
1852}
1853
1854static const struct xennet_stat {
1855	char name[ETH_GSTRING_LEN];
1856	u16 offset;
1857} xennet_stats[] = {
1858	{
1859		"rx_gso_checksum_fixup",
1860		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1861	},
1862};
1863
1864static int xennet_get_sset_count(struct net_device *dev, int string_set)
1865{
1866	switch (string_set) {
1867	case ETH_SS_STATS:
1868		return ARRAY_SIZE(xennet_stats);
1869	default:
1870		return -EINVAL;
1871	}
1872}
1873
1874static void xennet_get_ethtool_stats(struct net_device *dev,
1875				     struct ethtool_stats *stats, u64 * data)
1876{
1877	void *np = netdev_priv(dev);
1878	int i;
1879
1880	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1881		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1882}
1883
1884static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1885{
1886	int i;
1887
1888	switch (stringset) {
1889	case ETH_SS_STATS:
1890		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1891			memcpy(data + i * ETH_GSTRING_LEN,
1892			       xennet_stats[i].name, ETH_GSTRING_LEN);
1893		break;
1894	}
1895}
1896
1897static const struct ethtool_ops xennet_ethtool_ops =
1898{
1899	.get_link = ethtool_op_get_link,
1900
1901	.get_sset_count = xennet_get_sset_count,
1902	.get_ethtool_stats = xennet_get_ethtool_stats,
1903	.get_strings = xennet_get_strings,
1904};
1905
1906#ifdef CONFIG_SYSFS
1907static ssize_t show_rxbuf_min(struct device *dev,
1908			      struct device_attribute *attr, char *buf)
1909{
1910	struct net_device *netdev = to_net_dev(dev);
1911	struct netfront_info *info = netdev_priv(netdev);
1912
1913	return sprintf(buf, "%u\n", info->rx_min_target);
1914}
1915
1916static ssize_t store_rxbuf_min(struct device *dev,
1917			       struct device_attribute *attr,
1918			       const char *buf, size_t len)
1919{
1920	struct net_device *netdev = to_net_dev(dev);
1921	struct netfront_info *np = netdev_priv(netdev);
1922	char *endp;
1923	unsigned long target;
1924
1925	if (!capable(CAP_NET_ADMIN))
1926		return -EPERM;
1927
1928	target = simple_strtoul(buf, &endp, 0);
1929	if (endp == buf)
1930		return -EBADMSG;
1931
1932	if (target < RX_MIN_TARGET)
1933		target = RX_MIN_TARGET;
1934	if (target > RX_MAX_TARGET)
1935		target = RX_MAX_TARGET;
1936
1937	spin_lock_bh(&np->rx_lock);
1938	if (target > np->rx_max_target)
1939		np->rx_max_target = target;
1940	np->rx_min_target = target;
1941	if (target > np->rx_target)
1942		np->rx_target = target;
1943
1944	xennet_alloc_rx_buffers(netdev);
1945
1946	spin_unlock_bh(&np->rx_lock);
1947	return len;
1948}
1949
1950static ssize_t show_rxbuf_max(struct device *dev,
1951			      struct device_attribute *attr, char *buf)
1952{
1953	struct net_device *netdev = to_net_dev(dev);
1954	struct netfront_info *info = netdev_priv(netdev);
1955
1956	return sprintf(buf, "%u\n", info->rx_max_target);
1957}
1958
1959static ssize_t store_rxbuf_max(struct device *dev,
1960			       struct device_attribute *attr,
1961			       const char *buf, size_t len)
1962{
1963	struct net_device *netdev = to_net_dev(dev);
1964	struct netfront_info *np = netdev_priv(netdev);
1965	char *endp;
1966	unsigned long target;
1967
1968	if (!capable(CAP_NET_ADMIN))
1969		return -EPERM;
1970
1971	target = simple_strtoul(buf, &endp, 0);
1972	if (endp == buf)
1973		return -EBADMSG;
1974
1975	if (target < RX_MIN_TARGET)
1976		target = RX_MIN_TARGET;
1977	if (target > RX_MAX_TARGET)
1978		target = RX_MAX_TARGET;
1979
1980	spin_lock_bh(&np->rx_lock);
1981	if (target < np->rx_min_target)
1982		np->rx_min_target = target;
1983	np->rx_max_target = target;
1984	if (target < np->rx_target)
1985		np->rx_target = target;
1986
1987	xennet_alloc_rx_buffers(netdev);
1988
1989	spin_unlock_bh(&np->rx_lock);
1990	return len;
1991}
1992
1993static ssize_t show_rxbuf_cur(struct device *dev,
1994			      struct device_attribute *attr, char *buf)
1995{
1996	struct net_device *netdev = to_net_dev(dev);
1997	struct netfront_info *info = netdev_priv(netdev);
1998
1999	return sprintf(buf, "%u\n", info->rx_target);
2000}
2001
2002static struct device_attribute xennet_attrs[] = {
2003	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2004	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2005	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2006};
2007
2008static int xennet_sysfs_addif(struct net_device *netdev)
2009{
2010	int i;
2011	int err;
2012
2013	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2014		err = device_create_file(&netdev->dev,
2015					   &xennet_attrs[i]);
2016		if (err)
2017			goto fail;
2018	}
2019	return 0;
2020
2021 fail:
2022	while (--i >= 0)
2023		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2024	return err;
2025}
2026
2027static void xennet_sysfs_delif(struct net_device *netdev)
2028{
2029	int i;
2030
2031	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2032		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2033}
2034
2035#endif /* CONFIG_SYSFS */
2036
2037static const struct xenbus_device_id netfront_ids[] = {
2038	{ "vif" },
2039	{ "" }
2040};
2041
2042
2043static int xennet_remove(struct xenbus_device *dev)
2044{
2045	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2046
2047	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2048
2049	xennet_disconnect_backend(info);
2050
2051	xennet_sysfs_delif(info->netdev);
2052
2053	unregister_netdev(info->netdev);
2054
2055	del_timer_sync(&info->rx_refill_timer);
2056
2057	free_percpu(info->stats);
2058
2059	free_netdev(info->netdev);
2060
2061	return 0;
2062}
2063
2064static DEFINE_XENBUS_DRIVER(netfront, ,
 
 
 
 
 
 
2065	.probe = netfront_probe,
2066	.remove = xennet_remove,
2067	.resume = netfront_resume,
2068	.otherend_changed = netback_changed,
2069);
2070
2071static int __init netif_init(void)
2072{
2073	if (!xen_domain())
2074		return -ENODEV;
2075
2076	if (!xen_has_pv_nic_devices())
2077		return -ENODEV;
2078
2079	pr_info("Initialising Xen virtual ethernet driver\n");
 
 
 
 
 
 
2080
2081	return xenbus_register_frontend(&netfront_driver);
2082}
2083module_init(netif_init);
2084
2085
2086static void __exit netif_exit(void)
2087{
2088	xenbus_unregister_driver(&netfront_driver);
2089}
2090module_exit(netif_exit);
2091
2092MODULE_DESCRIPTION("Xen virtual network device frontend");
2093MODULE_LICENSE("GPL");
2094MODULE_ALIAS("xen:vif");
2095MODULE_ALIAS("xennet");