Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
 
 
  32#include <linux/module.h>
  33#include <linux/kernel.h>
  34#include <linux/netdevice.h>
  35#include <linux/etherdevice.h>
  36#include <linux/skbuff.h>
  37#include <linux/ethtool.h>
  38#include <linux/if_ether.h>
  39#include <linux/tcp.h>
  40#include <linux/udp.h>
  41#include <linux/moduleparam.h>
  42#include <linux/mm.h>
  43#include <linux/slab.h>
  44#include <net/ip.h>
  45
  46#include <xen/xen.h>
  47#include <xen/xenbus.h>
  48#include <xen/events.h>
  49#include <xen/page.h>
 
  50#include <xen/grant_table.h>
  51
  52#include <xen/interface/io/netif.h>
  53#include <xen/interface/memory.h>
  54#include <xen/interface/grant_table.h>
  55
 
 
 
 
 
 
  56static const struct ethtool_ops xennet_ethtool_ops;
  57
  58struct netfront_cb {
  59	struct page *page;
  60	unsigned offset;
  61};
  62
  63#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  64
  65#define RX_COPY_THRESHOLD 256
  66
  67#define GRANT_INVALID_REF	0
  68
  69#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
  70#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
  71#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 
 
 
 
 
 
 
 
  72
  73struct netfront_stats {
  74	u64			rx_packets;
  75	u64			tx_packets;
  76	u64			rx_bytes;
  77	u64			tx_bytes;
  78	struct u64_stats_sync	syncp;
  79};
  80
  81struct netfront_info {
  82	struct list_head list;
  83	struct net_device *netdev;
 
 
 
  84
  85	struct napi_struct napi;
  86
  87	unsigned int evtchn;
  88	struct xenbus_device *xbdev;
 
 
 
 
 
 
  89
  90	spinlock_t   tx_lock;
  91	struct xen_netif_tx_front_ring tx;
  92	int tx_ring_ref;
  93
  94	/*
  95	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
  96	 * are linked from tx_skb_freelist through skb_entry.link.
  97	 *
  98	 *  NB. Freelist index entries are always going to be less than
  99	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 100	 *  greater than PAGE_OFFSET: we use this property to distinguish
 101	 *  them.
 102	 */
 103	union skb_entry {
 104		struct sk_buff *skb;
 105		unsigned long link;
 106	} tx_skbs[NET_TX_RING_SIZE];
 107	grant_ref_t gref_tx_head;
 108	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 
 109	unsigned tx_skb_freelist;
 110
 111	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 112	struct xen_netif_rx_front_ring rx;
 113	int rx_ring_ref;
 114
 115	/* Receive-ring batched refills. */
 116#define RX_MIN_TARGET 8
 117#define RX_DFL_MIN_TARGET 64
 118#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
 119	unsigned rx_min_target, rx_max_target, rx_target;
 120	struct sk_buff_head rx_batch;
 121
 122	struct timer_list rx_refill_timer;
 123
 124	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 125	grant_ref_t gref_rx_head;
 126	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 127
 128	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 129	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 130	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
 
 
 
 
 
 131
 132	/* Statistics */
 133	struct netfront_stats __percpu *stats;
 
 134
 135	unsigned long rx_gso_checksum_fixup;
 136};
 137
 138struct netfront_rx_info {
 139	struct xen_netif_rx_response rx;
 140	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 141};
 142
 143static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 144{
 145	list->link = id;
 146}
 147
 148static int skb_entry_is_link(const union skb_entry *list)
 149{
 150	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 151	return (unsigned long)list->skb < PAGE_OFFSET;
 152}
 153
 154/*
 155 * Access macros for acquiring freeing slots in tx_skbs[].
 156 */
 157
 158static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 159			       unsigned short id)
 160{
 161	skb_entry_set_link(&list[id], *head);
 162	*head = id;
 163}
 164
 165static unsigned short get_id_from_freelist(unsigned *head,
 166					   union skb_entry *list)
 167{
 168	unsigned int id = *head;
 169	*head = list[id].link;
 170	return id;
 171}
 172
 173static int xennet_rxidx(RING_IDX idx)
 174{
 175	return idx & (NET_RX_RING_SIZE - 1);
 176}
 177
 178static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
 179					 RING_IDX ri)
 180{
 181	int i = xennet_rxidx(ri);
 182	struct sk_buff *skb = np->rx_skbs[i];
 183	np->rx_skbs[i] = NULL;
 184	return skb;
 185}
 186
 187static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
 188					    RING_IDX ri)
 189{
 190	int i = xennet_rxidx(ri);
 191	grant_ref_t ref = np->grant_rx_ref[i];
 192	np->grant_rx_ref[i] = GRANT_INVALID_REF;
 193	return ref;
 194}
 195
 196#ifdef CONFIG_SYSFS
 197static int xennet_sysfs_addif(struct net_device *netdev);
 198static void xennet_sysfs_delif(struct net_device *netdev);
 199#else /* !CONFIG_SYSFS */
 200#define xennet_sysfs_addif(dev) (0)
 201#define xennet_sysfs_delif(dev) do { } while (0)
 202#endif
 203
 204static int xennet_can_sg(struct net_device *dev)
 205{
 206	return dev->features & NETIF_F_SG;
 207}
 208
 209
 210static void rx_refill_timeout(unsigned long data)
 211{
 212	struct net_device *dev = (struct net_device *)data;
 213	struct netfront_info *np = netdev_priv(dev);
 214	napi_schedule(&np->napi);
 215}
 216
 217static int netfront_tx_slot_available(struct netfront_info *np)
 218{
 219	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
 220		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
 221}
 222
 223static void xennet_maybe_wake_tx(struct net_device *dev)
 224{
 225	struct netfront_info *np = netdev_priv(dev);
 
 226
 227	if (unlikely(netif_queue_stopped(dev)) &&
 228	    netfront_tx_slot_available(np) &&
 229	    likely(netif_running(dev)))
 230		netif_wake_queue(dev);
 231}
 232
 233static void xennet_alloc_rx_buffers(struct net_device *dev)
 
 234{
 235	unsigned short id;
 236	struct netfront_info *np = netdev_priv(dev);
 237	struct sk_buff *skb;
 238	struct page *page;
 239	int i, batch_target, notify;
 240	RING_IDX req_prod = np->rx.req_prod_pvt;
 241	grant_ref_t ref;
 242	unsigned long pfn;
 243	void *vaddr;
 244	struct xen_netif_rx_request *req;
 245
 246	if (unlikely(!netif_carrier_ok(dev)))
 247		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248
 249	/*
 250	 * Allocate skbuffs greedily, even though we batch updates to the
 251	 * receive ring. This creates a less bursty demand on the memory
 252	 * allocator, so should reduce the chance of failed allocation requests
 253	 * both for ourself and for other kernel subsystems.
 254	 */
 255	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
 256	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
 257		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
 258					 GFP_ATOMIC | __GFP_NOWARN);
 259		if (unlikely(!skb))
 260			goto no_skb;
 261
 262		/* Align ip header to a 16 bytes boundary */
 263		skb_reserve(skb, NET_IP_ALIGN);
 264
 265		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 266		if (!page) {
 267			kfree_skb(skb);
 268no_skb:
 269			/* Any skbuffs queued for refill? Force them out. */
 270			if (i != 0)
 271				goto refill;
 272			/* Could not allocate any skbuffs. Try again later. */
 273			mod_timer(&np->rx_refill_timer,
 274				  jiffies + (HZ/10));
 275			break;
 276		}
 277
 278		skb_shinfo(skb)->frags[0].page = page;
 279		skb_shinfo(skb)->nr_frags = 1;
 280		__skb_queue_tail(&np->rx_batch, skb);
 281	}
 282
 283	/* Is the batch large enough to be worthwhile? */
 284	if (i < (np->rx_target/2)) {
 285		if (req_prod > np->rx.sring->req_prod)
 286			goto push;
 287		return;
 288	}
 289
 290	/* Adjust our fill target if we risked running out of buffers. */
 291	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
 292	    ((np->rx_target *= 2) > np->rx_max_target))
 293		np->rx_target = np->rx_max_target;
 294
 295 refill:
 296	for (i = 0; ; i++) {
 297		skb = __skb_dequeue(&np->rx_batch);
 298		if (skb == NULL)
 299			break;
 300
 301		skb->dev = dev;
 302
 303		id = xennet_rxidx(req_prod + i);
 304
 305		BUG_ON(np->rx_skbs[id]);
 306		np->rx_skbs[id] = skb;
 307
 308		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
 309		BUG_ON((signed short)ref < 0);
 310		np->grant_rx_ref[id] = ref;
 
 
 311
 312		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
 313		vaddr = page_address(skb_shinfo(skb)->frags[0].page);
 314
 315		req = RING_GET_REQUEST(&np->rx, req_prod + i);
 316		gnttab_grant_foreign_access_ref(ref,
 317						np->xbdev->otherend_id,
 318						pfn_to_mfn(pfn),
 319						0);
 320
 
 
 
 
 
 
 
 
 
 
 
 321		req->id = id;
 322		req->gref = ref;
 323	}
 324
 
 
 
 
 
 
 
 
 
 
 
 
 
 325	wmb();		/* barrier so backend seens requests */
 326
 327	/* Above is a suitable barrier to ensure backend will see requests. */
 328	np->rx.req_prod_pvt = req_prod + i;
 329 push:
 330	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
 331	if (notify)
 332		notify_remote_via_irq(np->netdev->irq);
 333}
 334
 335static int xennet_open(struct net_device *dev)
 336{
 337	struct netfront_info *np = netdev_priv(dev);
 338
 339	napi_enable(&np->napi);
 340
 341	spin_lock_bh(&np->rx_lock);
 342	if (netif_carrier_ok(dev)) {
 343		xennet_alloc_rx_buffers(dev);
 344		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
 345		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
 346			napi_schedule(&np->napi);
 
 
 
 
 
 
 
 347	}
 348	spin_unlock_bh(&np->rx_lock);
 349
 350	netif_start_queue(dev);
 351
 352	return 0;
 353}
 354
 355static void xennet_tx_buf_gc(struct net_device *dev)
 356{
 357	RING_IDX cons, prod;
 358	unsigned short id;
 359	struct netfront_info *np = netdev_priv(dev);
 360	struct sk_buff *skb;
 
 361
 362	BUG_ON(!netif_carrier_ok(dev));
 363
 364	do {
 365		prod = np->tx.sring->rsp_prod;
 366		rmb(); /* Ensure we see responses up to 'rp'. */
 367
 368		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
 369			struct xen_netif_tx_response *txrsp;
 370
 371			txrsp = RING_GET_RESPONSE(&np->tx, cons);
 372			if (txrsp->status == XEN_NETIF_RSP_NULL)
 373				continue;
 374
 375			id  = txrsp->id;
 376			skb = np->tx_skbs[id].skb;
 377			if (unlikely(gnttab_query_foreign_access(
 378				np->grant_tx_ref[id]) != 0)) {
 379				printk(KERN_ALERT "xennet_tx_buf_gc: warning "
 380				       "-- grant still in use by backend "
 381				       "domain.\n");
 382				BUG();
 383			}
 384			gnttab_end_foreign_access_ref(
 385				np->grant_tx_ref[id], GNTMAP_readonly);
 386			gnttab_release_grant_reference(
 387				&np->gref_tx_head, np->grant_tx_ref[id]);
 388			np->grant_tx_ref[id] = GRANT_INVALID_REF;
 389			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
 
 390			dev_kfree_skb_irq(skb);
 391		}
 392
 393		np->tx.rsp_cons = prod;
 394
 395		/*
 396		 * Set a new event, then check for race with update of tx_cons.
 397		 * Note that it is essential to schedule a callback, no matter
 398		 * how few buffers are pending. Even if there is space in the
 399		 * transmit ring, higher layers may be blocked because too much
 400		 * data is outstanding: in such cases notification from Xen is
 401		 * likely to be the only kick that we'll get.
 402		 */
 403		np->tx.sring->rsp_event =
 404			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
 405		mb();		/* update shared area */
 406	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
 407
 408	xennet_maybe_wake_tx(dev);
 409}
 410
 411static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
 412			      struct xen_netif_tx_request *tx)
 
 
 
 
 
 
 
 
 413{
 414	struct netfront_info *np = netdev_priv(dev);
 415	char *data = skb->data;
 416	unsigned long mfn;
 417	RING_IDX prod = np->tx.req_prod_pvt;
 418	int frags = skb_shinfo(skb)->nr_frags;
 419	unsigned int offset = offset_in_page(data);
 420	unsigned int len = skb_headlen(skb);
 421	unsigned int id;
 
 422	grant_ref_t ref;
 423	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 424
 425	/* While the header overlaps a page boundary (including being
 426	   larger than a page), split it it into page-sized chunks. */
 427	while (len > PAGE_SIZE - offset) {
 428		tx->size = PAGE_SIZE - offset;
 429		tx->flags |= XEN_NETTXF_more_data;
 430		len -= tx->size;
 431		data += tx->size;
 432		offset = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433
 434		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 435		np->tx_skbs[id].skb = skb_get(skb);
 436		tx = RING_GET_REQUEST(&np->tx, prod++);
 437		tx->id = id;
 438		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 439		BUG_ON((signed short)ref < 0);
 440
 441		mfn = virt_to_mfn(data);
 442		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 443						mfn, GNTMAP_readonly);
 444
 445		tx->gref = np->grant_tx_ref[id] = ref;
 446		tx->offset = offset;
 447		tx->size = len;
 448		tx->flags = 0;
 449	}
 450
 451	/* Grant backend access to each skb fragment page. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	for (i = 0; i < frags; i++) {
 453		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 
 
 
 
 
 
 
 
 
 
 
 454
 455		tx->flags |= XEN_NETTXF_more_data;
 
 
 
 
 
 456
 457		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 458		np->tx_skbs[id].skb = skb_get(skb);
 459		tx = RING_GET_REQUEST(&np->tx, prod++);
 460		tx->id = id;
 461		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 462		BUG_ON((signed short)ref < 0);
 463
 464		mfn = pfn_to_mfn(page_to_pfn(frag->page));
 465		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
 466						mfn, GNTMAP_readonly);
 467
 468		tx->gref = np->grant_tx_ref[id] = ref;
 469		tx->offset = frag->page_offset;
 470		tx->size = frag->size;
 471		tx->flags = 0;
 472	}
 473
 474	np->tx.req_prod_pvt = prod;
 475}
 476
 
 
 477static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 478{
 479	unsigned short id;
 480	struct netfront_info *np = netdev_priv(dev);
 481	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 482	struct xen_netif_tx_request *tx;
 483	struct xen_netif_extra_info *extra;
 484	char *data = skb->data;
 485	RING_IDX i;
 486	grant_ref_t ref;
 487	unsigned long mfn;
 488	int notify;
 489	int frags = skb_shinfo(skb)->nr_frags;
 490	unsigned int offset = offset_in_page(data);
 491	unsigned int len = skb_headlen(skb);
 492
 493	frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
 494	if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
 495		printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
 496		       frags);
 497		dump_stack();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 498		goto drop;
 499	}
 500
 501	spin_lock_irq(&np->tx_lock);
 
 
 
 
 
 
 502
 503	if (unlikely(!netif_carrier_ok(dev) ||
 504		     (frags > 1 && !xennet_can_sg(dev)) ||
 505		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 506		spin_unlock_irq(&np->tx_lock);
 507		goto drop;
 
 
 
 
 
 
 
 
 
 508	}
 509
 510	i = np->tx.req_prod_pvt;
 511
 512	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
 513	np->tx_skbs[id].skb = skb;
 514
 515	tx = RING_GET_REQUEST(&np->tx, i);
 
 
 
 
 
 516
 517	tx->id   = id;
 518	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
 519	BUG_ON((signed short)ref < 0);
 520	mfn = virt_to_mfn(data);
 521	gnttab_grant_foreign_access_ref(
 522		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
 523	tx->gref = np->grant_tx_ref[id] = ref;
 524	tx->offset = offset;
 525	tx->size = len;
 526	extra = NULL;
 527
 528	tx->flags = 0;
 529	if (skb->ip_summed == CHECKSUM_PARTIAL)
 530		/* local packet? */
 531		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 532	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 533		/* remote but checksummed. */
 534		tx->flags |= XEN_NETTXF_data_validated;
 535
 
 536	if (skb_shinfo(skb)->gso_size) {
 537		struct xen_netif_extra_info *gso;
 538
 539		gso = (struct xen_netif_extra_info *)
 540			RING_GET_REQUEST(&np->tx, ++i);
 541
 542		if (extra)
 543			extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
 544		else
 545			tx->flags |= XEN_NETTXF_extra_info;
 546
 547		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 548		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
 
 
 549		gso->u.gso.pad = 0;
 550		gso->u.gso.features = 0;
 551
 552		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 553		gso->flags = 0;
 554		extra = gso;
 555	}
 556
 557	np->tx.req_prod_pvt = i + 1;
 
 558
 559	xennet_make_frags(skb, dev, tx);
 560	tx->size = skb->len;
 
 
 
 
 
 
 
 
 561
 562	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
 563	if (notify)
 564		notify_remote_via_irq(np->netdev->irq);
 565
 566	u64_stats_update_begin(&stats->syncp);
 567	stats->tx_bytes += skb->len;
 568	stats->tx_packets++;
 569	u64_stats_update_end(&stats->syncp);
 570
 571	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 572	xennet_tx_buf_gc(dev);
 573
 574	if (!netfront_tx_slot_available(np))
 575		netif_stop_queue(dev);
 576
 577	spin_unlock_irq(&np->tx_lock);
 578
 579	return NETDEV_TX_OK;
 580
 581 drop:
 582	dev->stats.tx_dropped++;
 583	dev_kfree_skb(skb);
 584	return NETDEV_TX_OK;
 585}
 586
 587static int xennet_close(struct net_device *dev)
 588{
 589	struct netfront_info *np = netdev_priv(dev);
 590	netif_stop_queue(np->netdev);
 591	napi_disable(&np->napi);
 
 
 
 
 
 
 592	return 0;
 593}
 594
 595static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
 596				grant_ref_t ref)
 597{
 598	int new = xennet_rxidx(np->rx.req_prod_pvt);
 599
 600	BUG_ON(np->rx_skbs[new]);
 601	np->rx_skbs[new] = skb;
 602	np->grant_rx_ref[new] = ref;
 603	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
 604	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
 605	np->rx.req_prod_pvt++;
 606}
 607
 608static int xennet_get_extras(struct netfront_info *np,
 609			     struct xen_netif_extra_info *extras,
 610			     RING_IDX rp)
 611
 612{
 613	struct xen_netif_extra_info *extra;
 614	struct device *dev = &np->netdev->dev;
 615	RING_IDX cons = np->rx.rsp_cons;
 616	int err = 0;
 617
 618	do {
 619		struct sk_buff *skb;
 620		grant_ref_t ref;
 621
 622		if (unlikely(cons + 1 == rp)) {
 623			if (net_ratelimit())
 624				dev_warn(dev, "Missing extra info\n");
 625			err = -EBADR;
 626			break;
 627		}
 628
 629		extra = (struct xen_netif_extra_info *)
 630			RING_GET_RESPONSE(&np->rx, ++cons);
 631
 632		if (unlikely(!extra->type ||
 633			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 634			if (net_ratelimit())
 635				dev_warn(dev, "Invalid extra type: %d\n",
 636					extra->type);
 637			err = -EINVAL;
 638		} else {
 639			memcpy(&extras[extra->type - 1], extra,
 640			       sizeof(*extra));
 641		}
 642
 643		skb = xennet_get_rx_skb(np, cons);
 644		ref = xennet_get_rx_ref(np, cons);
 645		xennet_move_rx_slot(np, skb, ref);
 646	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 647
 648	np->rx.rsp_cons = cons;
 649	return err;
 650}
 651
 652static int xennet_get_responses(struct netfront_info *np,
 653				struct netfront_rx_info *rinfo, RING_IDX rp,
 654				struct sk_buff_head *list)
 655{
 656	struct xen_netif_rx_response *rx = &rinfo->rx;
 657	struct xen_netif_extra_info *extras = rinfo->extras;
 658	struct device *dev = &np->netdev->dev;
 659	RING_IDX cons = np->rx.rsp_cons;
 660	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
 661	grant_ref_t ref = xennet_get_rx_ref(np, cons);
 662	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 663	int frags = 1;
 664	int err = 0;
 665	unsigned long ret;
 666
 667	if (rx->flags & XEN_NETRXF_extra_info) {
 668		err = xennet_get_extras(np, extras, rp);
 669		cons = np->rx.rsp_cons;
 670	}
 671
 672	for (;;) {
 673		if (unlikely(rx->status < 0 ||
 674			     rx->offset + rx->status > PAGE_SIZE)) {
 675			if (net_ratelimit())
 676				dev_warn(dev, "rx->offset: %x, size: %u\n",
 677					 rx->offset, rx->status);
 678			xennet_move_rx_slot(np, skb, ref);
 679			err = -EINVAL;
 680			goto next;
 681		}
 682
 683		/*
 684		 * This definitely indicates a bug, either in this driver or in
 685		 * the backend driver. In future this should flag the bad
 686		 * situation to the system controller to reboot the backed.
 687		 */
 688		if (ref == GRANT_INVALID_REF) {
 689			if (net_ratelimit())
 690				dev_warn(dev, "Bad rx response id %d.\n",
 691					 rx->id);
 692			err = -EINVAL;
 693			goto next;
 694		}
 695
 696		ret = gnttab_end_foreign_access_ref(ref, 0);
 697		BUG_ON(!ret);
 698
 699		gnttab_release_grant_reference(&np->gref_rx_head, ref);
 700
 701		__skb_queue_tail(list, skb);
 702
 703next:
 704		if (!(rx->flags & XEN_NETRXF_more_data))
 705			break;
 706
 707		if (cons + frags == rp) {
 708			if (net_ratelimit())
 709				dev_warn(dev, "Need more frags\n");
 710			err = -ENOENT;
 711			break;
 712		}
 713
 714		rx = RING_GET_RESPONSE(&np->rx, cons + frags);
 715		skb = xennet_get_rx_skb(np, cons + frags);
 716		ref = xennet_get_rx_ref(np, cons + frags);
 717		frags++;
 718	}
 719
 720	if (unlikely(frags > max)) {
 721		if (net_ratelimit())
 722			dev_warn(dev, "Too many frags\n");
 723		err = -E2BIG;
 724	}
 725
 726	if (unlikely(err))
 727		np->rx.rsp_cons = cons + frags;
 728
 729	return err;
 730}
 731
 732static int xennet_set_skb_gso(struct sk_buff *skb,
 733			      struct xen_netif_extra_info *gso)
 734{
 735	if (!gso->u.gso.size) {
 736		if (net_ratelimit())
 737			printk(KERN_WARNING "GSO size must not be zero.\n");
 738		return -EINVAL;
 739	}
 740
 741	/* Currently only TCPv4 S.O. is supported. */
 742	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
 743		if (net_ratelimit())
 744			printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
 745		return -EINVAL;
 746	}
 747
 748	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 749	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 
 
 
 750
 751	/* Header must be checked, and gso_segs computed. */
 752	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 753	skb_shinfo(skb)->gso_segs = 0;
 754
 755	return 0;
 756}
 757
 758static RING_IDX xennet_fill_frags(struct netfront_info *np,
 759				  struct sk_buff *skb,
 760				  struct sk_buff_head *list)
 761{
 762	struct skb_shared_info *shinfo = skb_shinfo(skb);
 763	int nr_frags = shinfo->nr_frags;
 764	RING_IDX cons = np->rx.rsp_cons;
 765	skb_frag_t *frag = shinfo->frags + nr_frags;
 766	struct sk_buff *nskb;
 767
 768	while ((nskb = __skb_dequeue(list))) {
 769		struct xen_netif_rx_response *rx =
 770			RING_GET_RESPONSE(&np->rx, ++cons);
 
 
 
 
 771
 772		frag->page = skb_shinfo(nskb)->frags[0].page;
 773		frag->page_offset = rx->offset;
 774		frag->size = rx->status;
 
 775
 776		skb->data_len += rx->status;
 
 777
 778		skb_shinfo(nskb)->nr_frags = 0;
 779		kfree_skb(nskb);
 780
 781		frag++;
 782		nr_frags++;
 783	}
 784
 785	shinfo->nr_frags = nr_frags;
 786	return cons;
 787}
 788
 789static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 790{
 791	struct iphdr *iph;
 792	unsigned char *th;
 793	int err = -EPROTO;
 794	int recalculate_partial_csum = 0;
 795
 796	/*
 797	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 798	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 799	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 800	 * recalculate the partial checksum.
 801	 */
 802	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 803		struct netfront_info *np = netdev_priv(dev);
 804		np->rx_gso_checksum_fixup++;
 805		skb->ip_summed = CHECKSUM_PARTIAL;
 806		recalculate_partial_csum = 1;
 807	}
 808
 809	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 810	if (skb->ip_summed != CHECKSUM_PARTIAL)
 811		return 0;
 812
 813	if (skb->protocol != htons(ETH_P_IP))
 814		goto out;
 815
 816	iph = (void *)skb->data;
 817	th = skb->data + 4 * iph->ihl;
 818	if (th >= skb_tail_pointer(skb))
 819		goto out;
 820
 821	skb->csum_start = th - skb->head;
 822	switch (iph->protocol) {
 823	case IPPROTO_TCP:
 824		skb->csum_offset = offsetof(struct tcphdr, check);
 825
 826		if (recalculate_partial_csum) {
 827			struct tcphdr *tcph = (struct tcphdr *)th;
 828			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
 829							 skb->len - iph->ihl*4,
 830							 IPPROTO_TCP, 0);
 831		}
 832		break;
 833	case IPPROTO_UDP:
 834		skb->csum_offset = offsetof(struct udphdr, check);
 835
 836		if (recalculate_partial_csum) {
 837			struct udphdr *udph = (struct udphdr *)th;
 838			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
 839							 skb->len - iph->ihl*4,
 840							 IPPROTO_UDP, 0);
 841		}
 842		break;
 843	default:
 844		if (net_ratelimit())
 845			printk(KERN_ERR "Attempting to checksum a non-"
 846			       "TCP/UDP packet, dropping a protocol"
 847			       " %d packet", iph->protocol);
 848		goto out;
 849	}
 850
 851	if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
 852		goto out;
 853
 854	err = 0;
 855
 856out:
 857	return err;
 858}
 859
 860static int handle_incoming_queue(struct net_device *dev,
 861				 struct sk_buff_head *rxq)
 862{
 863	struct netfront_info *np = netdev_priv(dev);
 864	struct netfront_stats *stats = this_cpu_ptr(np->stats);
 865	int packets_dropped = 0;
 866	struct sk_buff *skb;
 867
 868	while ((skb = __skb_dequeue(rxq)) != NULL) {
 869		struct page *page = NETFRONT_SKB_CB(skb)->page;
 870		void *vaddr = page_address(page);
 871		unsigned offset = NETFRONT_SKB_CB(skb)->offset;
 872
 873		memcpy(skb->data, vaddr + offset,
 874		       skb_headlen(skb));
 875
 876		if (page != skb_shinfo(skb)->frags[0].page)
 877			__free_page(page);
 878
 879		/* Ethernet work: Delayed to here as it peeks the header. */
 880		skb->protocol = eth_type_trans(skb, dev);
 
 881
 882		if (checksum_setup(dev, skb)) {
 883			kfree_skb(skb);
 884			packets_dropped++;
 885			dev->stats.rx_errors++;
 886			continue;
 887		}
 888
 889		u64_stats_update_begin(&stats->syncp);
 890		stats->rx_packets++;
 891		stats->rx_bytes += skb->len;
 892		u64_stats_update_end(&stats->syncp);
 893
 894		/* Pass it up. */
 895		netif_receive_skb(skb);
 896	}
 897
 898	return packets_dropped;
 899}
 900
 901static int xennet_poll(struct napi_struct *napi, int budget)
 902{
 903	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
 904	struct net_device *dev = np->netdev;
 905	struct sk_buff *skb;
 906	struct netfront_rx_info rinfo;
 907	struct xen_netif_rx_response *rx = &rinfo.rx;
 908	struct xen_netif_extra_info *extras = rinfo.extras;
 909	RING_IDX i, rp;
 910	int work_done;
 911	struct sk_buff_head rxq;
 912	struct sk_buff_head errq;
 913	struct sk_buff_head tmpq;
 914	unsigned long flags;
 915	unsigned int len;
 916	int err;
 917
 918	spin_lock(&np->rx_lock);
 919
 920	skb_queue_head_init(&rxq);
 921	skb_queue_head_init(&errq);
 922	skb_queue_head_init(&tmpq);
 923
 924	rp = np->rx.sring->rsp_prod;
 925	rmb(); /* Ensure we see queued responses up to 'rp'. */
 926
 927	i = np->rx.rsp_cons;
 928	work_done = 0;
 929	while ((i != rp) && (work_done < budget)) {
 930		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
 931		memset(extras, 0, sizeof(rinfo.extras));
 932
 933		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
 934
 935		if (unlikely(err)) {
 936err:
 937			while ((skb = __skb_dequeue(&tmpq)))
 938				__skb_queue_tail(&errq, skb);
 939			dev->stats.rx_errors++;
 940			i = np->rx.rsp_cons;
 941			continue;
 942		}
 943
 944		skb = __skb_dequeue(&tmpq);
 945
 946		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
 947			struct xen_netif_extra_info *gso;
 948			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 949
 950			if (unlikely(xennet_set_skb_gso(skb, gso))) {
 951				__skb_queue_head(&tmpq, skb);
 952				np->rx.rsp_cons += skb_queue_len(&tmpq);
 953				goto err;
 954			}
 955		}
 956
 957		NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
 958		NETFRONT_SKB_CB(skb)->offset = rx->offset;
 959
 960		len = rx->status;
 961		if (len > RX_COPY_THRESHOLD)
 962			len = RX_COPY_THRESHOLD;
 963		skb_put(skb, len);
 964
 965		if (rx->status > len) {
 966			skb_shinfo(skb)->frags[0].page_offset =
 967				rx->offset + len;
 968			skb_shinfo(skb)->frags[0].size = rx->status - len;
 969			skb->data_len = rx->status - len;
 970		} else {
 971			skb_shinfo(skb)->frags[0].page = NULL;
 972			skb_shinfo(skb)->nr_frags = 0;
 973		}
 974
 975		i = xennet_fill_frags(np, skb, &tmpq);
 976
 977		/*
 978		 * Truesize approximates the size of true data plus
 979		 * any supervisor overheads. Adding hypervisor
 980		 * overheads has been shown to significantly reduce
 981		 * achievable bandwidth with the default receive
 982		 * buffer size. It is therefore not wise to account
 983		 * for it here.
 984		 *
 985		 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
 986		 * to RX_COPY_THRESHOLD + the supervisor
 987		 * overheads. Here, we add the size of the data pulled
 988		 * in xennet_fill_frags().
 989		 *
 990		 * We also adjust for any unused space in the main
 991		 * data area by subtracting (RX_COPY_THRESHOLD -
 992		 * len). This is especially important with drivers
 993		 * which split incoming packets into header and data,
 994		 * using only 66 bytes of the main data area (see the
 995		 * e1000 driver for example.)  On such systems,
 996		 * without this last adjustement, our achievable
 997		 * receive throughout using the standard receive
 998		 * buffer size was cut by 25%(!!!).
 999		 */
1000		skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1001		skb->len += skb->data_len;
1002
1003		if (rx->flags & XEN_NETRXF_csum_blank)
1004			skb->ip_summed = CHECKSUM_PARTIAL;
1005		else if (rx->flags & XEN_NETRXF_data_validated)
1006			skb->ip_summed = CHECKSUM_UNNECESSARY;
1007
1008		__skb_queue_tail(&rxq, skb);
1009
1010		np->rx.rsp_cons = ++i;
1011		work_done++;
1012	}
1013
1014	__skb_queue_purge(&errq);
1015
1016	work_done -= handle_incoming_queue(dev, &rxq);
1017
1018	/* If we get a callback with very few responses, reduce fill target. */
1019	/* NB. Note exponential increase, linear decrease. */
1020	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1021	     ((3*np->rx_target) / 4)) &&
1022	    (--np->rx_target < np->rx_min_target))
1023		np->rx_target = np->rx_min_target;
1024
1025	xennet_alloc_rx_buffers(dev);
1026
1027	if (work_done < budget) {
1028		int more_to_do = 0;
1029
1030		local_irq_save(flags);
1031
1032		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1033		if (!more_to_do)
1034			__napi_complete(napi);
1035
1036		local_irq_restore(flags);
1037	}
1038
1039	spin_unlock(&np->rx_lock);
1040
1041	return work_done;
1042}
1043
1044static int xennet_change_mtu(struct net_device *dev, int mtu)
1045{
1046	int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1047
1048	if (mtu > max)
1049		return -EINVAL;
1050	dev->mtu = mtu;
1051	return 0;
1052}
1053
1054static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1055						    struct rtnl_link_stats64 *tot)
1056{
1057	struct netfront_info *np = netdev_priv(dev);
1058	int cpu;
1059
1060	for_each_possible_cpu(cpu) {
1061		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
 
1062		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1063		unsigned int start;
1064
1065		do {
1066			start = u64_stats_fetch_begin_bh(&stats->syncp);
 
 
 
1067
1068			rx_packets = stats->rx_packets;
1069			tx_packets = stats->tx_packets;
1070			rx_bytes = stats->rx_bytes;
1071			tx_bytes = stats->tx_bytes;
1072		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1073
1074		tot->rx_packets += rx_packets;
1075		tot->tx_packets += tx_packets;
1076		tot->rx_bytes   += rx_bytes;
1077		tot->tx_bytes   += tx_bytes;
1078	}
1079
1080	tot->rx_errors  = dev->stats.rx_errors;
1081	tot->tx_dropped = dev->stats.tx_dropped;
1082
1083	return tot;
1084}
1085
1086static void xennet_release_tx_bufs(struct netfront_info *np)
1087{
1088	struct sk_buff *skb;
1089	int i;
1090
1091	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1092		/* Skip over entries which are actually freelist references */
1093		if (skb_entry_is_link(&np->tx_skbs[i]))
1094			continue;
1095
1096		skb = np->tx_skbs[i].skb;
1097		gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1098					      GNTMAP_readonly);
1099		gnttab_release_grant_reference(&np->gref_tx_head,
1100					       np->grant_tx_ref[i]);
1101		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1102		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
 
1103		dev_kfree_skb_irq(skb);
1104	}
1105}
1106
1107static void xennet_release_rx_bufs(struct netfront_info *np)
1108{
1109	struct mmu_update      *mmu = np->rx_mmu;
1110	struct multicall_entry *mcl = np->rx_mcl;
1111	struct sk_buff_head free_list;
1112	struct sk_buff *skb;
1113	unsigned long mfn;
1114	int xfer = 0, noxfer = 0, unused = 0;
1115	int id, ref;
1116
1117	dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1118			 __func__);
1119	return;
1120
1121	skb_queue_head_init(&free_list);
1122
1123	spin_lock_bh(&np->rx_lock);
1124
1125	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1126		ref = np->grant_rx_ref[id];
1127		if (ref == GRANT_INVALID_REF) {
1128			unused++;
 
 
1129			continue;
1130		}
1131
1132		skb = np->rx_skbs[id];
1133		mfn = gnttab_end_foreign_transfer_ref(ref);
1134		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1135		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1136
1137		if (0 == mfn) {
1138			skb_shinfo(skb)->nr_frags = 0;
1139			dev_kfree_skb(skb);
1140			noxfer++;
1141			continue;
1142		}
1143
1144		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1145			/* Remap the page. */
1146			struct page *page = skb_shinfo(skb)->frags[0].page;
1147			unsigned long pfn = page_to_pfn(page);
1148			void *vaddr = page_address(page);
1149
1150			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1151						mfn_pte(mfn, PAGE_KERNEL),
1152						0);
1153			mcl++;
1154			mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1155				| MMU_MACHPHYS_UPDATE;
1156			mmu->val = pfn;
1157			mmu++;
1158
1159			set_phys_to_machine(pfn, mfn);
1160		}
1161		__skb_queue_tail(&free_list, skb);
1162		xfer++;
1163	}
1164
1165	dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1166		 __func__, xfer, noxfer, unused);
1167
1168	if (xfer) {
1169		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1170			/* Do all the remapping work and M2P updates. */
1171			MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1172					 NULL, DOMID_SELF);
1173			mcl++;
1174			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1175		}
1176	}
1177
1178	__skb_queue_purge(&free_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179
1180	spin_unlock_bh(&np->rx_lock);
1181}
1182
1183static void xennet_uninit(struct net_device *dev)
 
1184{
1185	struct netfront_info *np = netdev_priv(dev);
1186	xennet_release_tx_bufs(np);
1187	xennet_release_rx_bufs(np);
1188	gnttab_free_grant_references(np->gref_tx_head);
1189	gnttab_free_grant_references(np->gref_rx_head);
 
1190}
1191
1192static u32 xennet_fix_features(struct net_device *dev, u32 features)
1193{
1194	struct netfront_info *np = netdev_priv(dev);
1195	int val;
1196
1197	if (features & NETIF_F_SG) {
1198		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1199				 "%d", &val) < 0)
1200			val = 0;
1201
1202		if (!val)
1203			features &= ~NETIF_F_SG;
1204	}
1205
1206	if (features & NETIF_F_TSO) {
1207		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1208				 "feature-gso-tcpv4", "%d", &val) < 0)
1209			val = 0;
1210
1211		if (!val)
1212			features &= ~NETIF_F_TSO;
1213	}
1214
1215	return features;
1216}
1217
1218static int xennet_set_features(struct net_device *dev, u32 features)
1219{
1220	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1221		netdev_info(dev, "Reducing MTU because no SG offload");
1222		dev->mtu = ETH_DATA_LEN;
1223	}
1224
1225	return 0;
 
 
 
 
 
 
 
 
1226}
 
1227
1228static const struct net_device_ops xennet_netdev_ops = {
1229	.ndo_open            = xennet_open,
1230	.ndo_uninit          = xennet_uninit,
1231	.ndo_stop            = xennet_close,
1232	.ndo_start_xmit      = xennet_start_xmit,
1233	.ndo_change_mtu	     = xennet_change_mtu,
1234	.ndo_get_stats64     = xennet_get_stats64,
1235	.ndo_set_mac_address = eth_mac_addr,
1236	.ndo_validate_addr   = eth_validate_addr,
1237	.ndo_fix_features    = xennet_fix_features,
1238	.ndo_set_features    = xennet_set_features,
 
 
 
 
1239};
1240
1241static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
 
 
 
 
 
 
 
 
 
1242{
1243	int i, err;
1244	struct net_device *netdev;
1245	struct netfront_info *np;
1246
1247	netdev = alloc_etherdev(sizeof(struct netfront_info));
1248	if (!netdev) {
1249		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1250		       __func__);
1251		return ERR_PTR(-ENOMEM);
1252	}
1253
1254	np                   = netdev_priv(netdev);
1255	np->xbdev            = dev;
1256
1257	spin_lock_init(&np->tx_lock);
1258	spin_lock_init(&np->rx_lock);
1259
1260	skb_queue_head_init(&np->rx_batch);
1261	np->rx_target     = RX_DFL_MIN_TARGET;
1262	np->rx_min_target = RX_DFL_MIN_TARGET;
1263	np->rx_max_target = RX_MAX_TARGET;
1264
1265	init_timer(&np->rx_refill_timer);
1266	np->rx_refill_timer.data = (unsigned long)netdev;
1267	np->rx_refill_timer.function = rx_refill_timeout;
1268
1269	err = -ENOMEM;
1270	np->stats = alloc_percpu(struct netfront_stats);
1271	if (np->stats == NULL)
 
 
 
1272		goto exit;
1273
1274	/* Initialise tx_skbs as a free chain containing every entry. */
1275	np->tx_skb_freelist = 0;
1276	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1277		skb_entry_set_link(&np->tx_skbs[i], i+1);
1278		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1279	}
1280
1281	/* Clear out rx_skbs */
1282	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1283		np->rx_skbs[i] = NULL;
1284		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1285	}
1286
1287	/* A grant for every tx ring slot */
1288	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1289					  &np->gref_tx_head) < 0) {
1290		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1291		err = -ENOMEM;
1292		goto exit_free_stats;
1293	}
1294	/* A grant for every rx ring slot */
1295	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1296					  &np->gref_rx_head) < 0) {
1297		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1298		err = -ENOMEM;
1299		goto exit_free_tx;
1300	}
1301
1302	netdev->netdev_ops	= &xennet_netdev_ops;
1303
1304	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1305	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1306				  NETIF_F_GSO_ROBUST;
1307	netdev->hw_features	= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
 
 
1308
1309	/*
1310         * Assume that all hw features are available for now. This set
1311         * will be adjusted by the call to netdev_update_features() in
1312         * xennet_connect() which is the earliest point where we can
1313         * negotiate with the backend regarding supported features.
1314         */
1315	netdev->features |= netdev->hw_features;
1316
1317	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
 
 
1318	SET_NETDEV_DEV(netdev, &dev->dev);
1319
1320	np->netdev = netdev;
1321
1322	netif_carrier_off(netdev);
1323
1324	return netdev;
1325
1326 exit_free_tx:
1327	gnttab_free_grant_references(np->gref_tx_head);
1328 exit_free_stats:
1329	free_percpu(np->stats);
1330 exit:
1331	free_netdev(netdev);
1332	return ERR_PTR(err);
1333}
1334
1335/**
1336 * Entry point to this code when a new device is created.  Allocate the basic
1337 * structures and the ring buffers for communication with the backend, and
1338 * inform the backend of the appropriate details for those.
1339 */
1340static int __devinit netfront_probe(struct xenbus_device *dev,
1341				    const struct xenbus_device_id *id)
1342{
1343	int err;
1344	struct net_device *netdev;
1345	struct netfront_info *info;
1346
1347	netdev = xennet_create_dev(dev);
1348	if (IS_ERR(netdev)) {
1349		err = PTR_ERR(netdev);
1350		xenbus_dev_fatal(dev, err, "creating netdev");
1351		return err;
1352	}
1353
1354	info = netdev_priv(netdev);
1355	dev_set_drvdata(&dev->dev, info);
1356
 
 
1357	err = register_netdev(info->netdev);
1358	if (err) {
1359		printk(KERN_WARNING "%s: register_netdev err=%d\n",
1360		       __func__, err);
1361		goto fail;
1362	}
1363
1364	err = xennet_sysfs_addif(info->netdev);
1365	if (err) {
1366		unregister_netdev(info->netdev);
1367		printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1368		       __func__, err);
1369		goto fail;
1370	}
1371
1372	return 0;
1373
1374 fail:
1375	free_netdev(netdev);
1376	dev_set_drvdata(&dev->dev, NULL);
1377	return err;
1378}
1379
1380static void xennet_end_access(int ref, void *page)
1381{
1382	/* This frees the page as a side-effect */
1383	if (ref != GRANT_INVALID_REF)
1384		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1385}
1386
1387static void xennet_disconnect_backend(struct netfront_info *info)
1388{
1389	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1390	spin_lock_bh(&info->rx_lock);
1391	spin_lock_irq(&info->tx_lock);
1392	netif_carrier_off(info->netdev);
1393	spin_unlock_irq(&info->tx_lock);
1394	spin_unlock_bh(&info->rx_lock);
1395
1396	if (info->netdev->irq)
1397		unbind_from_irqhandler(info->netdev->irq, info->netdev);
1398	info->evtchn = info->netdev->irq = 0;
1399
1400	/* End access and free the pages */
1401	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1402	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1403
1404	info->tx_ring_ref = GRANT_INVALID_REF;
1405	info->rx_ring_ref = GRANT_INVALID_REF;
1406	info->tx.sring = NULL;
1407	info->rx.sring = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1408}
1409
1410/**
1411 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1412 * driver restart.  We tear down our netif structure and recreate it, but
1413 * leave the device-layer structures intact so that this is transparent to the
1414 * rest of the kernel.
1415 */
1416static int netfront_resume(struct xenbus_device *dev)
1417{
1418	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1419
1420	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1421
1422	xennet_disconnect_backend(info);
1423	return 0;
1424}
1425
1426static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1427{
1428	char *s, *e, *macstr;
1429	int i;
1430
1431	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1432	if (IS_ERR(macstr))
1433		return PTR_ERR(macstr);
1434
1435	for (i = 0; i < ETH_ALEN; i++) {
1436		mac[i] = simple_strtoul(s, &e, 16);
1437		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1438			kfree(macstr);
1439			return -ENOENT;
1440		}
1441		s = e+1;
1442	}
1443
1444	kfree(macstr);
1445	return 0;
1446}
1447
1448static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1449{
1450	struct net_device *dev = dev_id;
1451	struct netfront_info *np = netdev_priv(dev);
1452	unsigned long flags;
1453
1454	spin_lock_irqsave(&np->tx_lock, flags);
 
 
1455
1456	if (likely(netif_carrier_ok(dev))) {
1457		xennet_tx_buf_gc(dev);
1458		/* Under tx_lock: protects access to rx shared-ring indexes. */
1459		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1460			napi_schedule(&np->napi);
1461	}
 
1462
1463	spin_unlock_irqrestore(&np->tx_lock, flags);
1464
1465	return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1466}
1467
1468static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
 
1469{
1470	struct xen_netif_tx_sring *txs;
1471	struct xen_netif_rx_sring *rxs;
 
1472	int err;
1473	struct net_device *netdev = info->netdev;
1474
1475	info->tx_ring_ref = GRANT_INVALID_REF;
1476	info->rx_ring_ref = GRANT_INVALID_REF;
1477	info->rx.sring = NULL;
1478	info->tx.sring = NULL;
1479	netdev->irq = 0;
1480
1481	err = xen_net_read_mac(dev, netdev->dev_addr);
1482	if (err) {
1483		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1484		goto fail;
1485	}
1486
1487	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1488	if (!txs) {
1489		err = -ENOMEM;
1490		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1491		goto fail;
1492	}
1493	SHARED_RING_INIT(txs);
1494	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1495
1496	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1497	if (err < 0) {
1498		free_page((unsigned long)txs);
1499		goto fail;
1500	}
1501
1502	info->tx_ring_ref = err;
1503	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1504	if (!rxs) {
1505		err = -ENOMEM;
1506		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1507		goto fail;
1508	}
1509	SHARED_RING_INIT(rxs);
1510	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1511
1512	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1513	if (err < 0) {
1514		free_page((unsigned long)rxs);
1515		goto fail;
1516	}
1517	info->rx_ring_ref = err;
 
 
 
 
 
 
 
1518
1519	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1520	if (err)
1521		goto fail;
1522
1523	err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1524					0, netdev->name, netdev);
1525	if (err < 0)
1526		goto fail;
1527	netdev->irq = err;
1528	return 0;
1529
1530 fail:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531	return err;
1532}
1533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1534/* Common code used when first setting up, and when resuming. */
1535static int talk_to_netback(struct xenbus_device *dev,
1536			   struct netfront_info *info)
1537{
1538	const char *message;
1539	struct xenbus_transaction xbt;
1540	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541
1542	/* Create shared ring, alloc event channel. */
1543	err = setup_netfront(dev, info);
1544	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
1545		goto out;
 
 
 
 
 
 
 
 
 
1546
1547again:
1548	err = xenbus_transaction_start(&xbt);
1549	if (err) {
1550		xenbus_dev_fatal(dev, err, "starting transaction");
1551		goto destroy_ring;
1552	}
1553
1554	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1555			    info->tx_ring_ref);
1556	if (err) {
1557		message = "writing tx ring-ref";
1558		goto abort_transaction;
1559	}
1560	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1561			    info->rx_ring_ref);
1562	if (err) {
1563		message = "writing rx ring-ref";
1564		goto abort_transaction;
1565	}
1566	err = xenbus_printf(xbt, dev->nodename,
1567			    "event-channel", "%u", info->evtchn);
1568	if (err) {
1569		message = "writing event-channel";
1570		goto abort_transaction;
 
 
 
 
 
 
 
 
1571	}
1572
 
1573	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1574			    1);
1575	if (err) {
1576		message = "writing request-rx-copy";
1577		goto abort_transaction;
1578	}
1579
1580	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1581	if (err) {
1582		message = "writing feature-rx-notify";
1583		goto abort_transaction;
1584	}
1585
1586	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1587	if (err) {
1588		message = "writing feature-sg";
1589		goto abort_transaction;
1590	}
1591
1592	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1593	if (err) {
1594		message = "writing feature-gso-tcpv4";
1595		goto abort_transaction;
1596	}
1597
 
 
 
 
 
 
 
 
 
 
 
 
 
1598	err = xenbus_transaction_end(xbt, 0);
1599	if (err) {
1600		if (err == -EAGAIN)
1601			goto again;
1602		xenbus_dev_fatal(dev, err, "completing transaction");
1603		goto destroy_ring;
1604	}
1605
1606	return 0;
1607
1608 abort_transaction:
1609	xenbus_transaction_end(xbt, 1);
1610	xenbus_dev_fatal(dev, err, "%s", message);
 
 
1611 destroy_ring:
1612	xennet_disconnect_backend(info);
 
1613 out:
 
 
1614	return err;
1615}
1616
1617static int xennet_connect(struct net_device *dev)
1618{
1619	struct netfront_info *np = netdev_priv(dev);
1620	int i, requeue_idx, err;
1621	struct sk_buff *skb;
1622	grant_ref_t ref;
1623	struct xen_netif_rx_request *req;
1624	unsigned int feature_rx_copy;
1625
1626	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1627			   "feature-rx-copy", "%u", &feature_rx_copy);
1628	if (err != 1)
1629		feature_rx_copy = 0;
1630
1631	if (!feature_rx_copy) {
1632		dev_info(&dev->dev,
1633			 "backend does not support copying receive path\n");
1634		return -ENODEV;
1635	}
1636
1637	err = talk_to_netback(np->xbdev, np);
1638	if (err)
1639		return err;
1640
 
 
 
1641	rtnl_lock();
1642	netdev_update_features(dev);
1643	rtnl_unlock();
1644
1645	spin_lock_bh(&np->rx_lock);
1646	spin_lock_irq(&np->tx_lock);
1647
1648	/* Step 1: Discard all pending TX packet fragments. */
1649	xennet_release_tx_bufs(np);
1650
1651	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1652	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1653		if (!np->rx_skbs[i])
1654			continue;
1655
1656		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1657		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1658		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1659
1660		gnttab_grant_foreign_access_ref(
1661			ref, np->xbdev->otherend_id,
1662			pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1663					       frags->page)),
1664			0);
1665		req->gref = ref;
1666		req->id   = requeue_idx;
1667
1668		requeue_idx++;
1669	}
1670
1671	np->rx.req_prod_pvt = requeue_idx;
1672
1673	/*
1674	 * Step 3: All public and private state should now be sane.  Get
1675	 * ready to start sending and receiving packets and give the driver
1676	 * domain a kick because we've probably just requeued some
1677	 * packets.
1678	 */
1679	netif_carrier_on(np->netdev);
1680	notify_remote_via_irq(np->netdev->irq);
1681	xennet_tx_buf_gc(dev);
1682	xennet_alloc_rx_buffers(dev);
1683
1684	spin_unlock_irq(&np->tx_lock);
1685	spin_unlock_bh(&np->rx_lock);
 
 
 
 
 
 
 
 
 
 
1686
1687	return 0;
1688}
1689
1690/**
1691 * Callback received when the backend's state changes.
1692 */
1693static void netback_changed(struct xenbus_device *dev,
1694			    enum xenbus_state backend_state)
1695{
1696	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1697	struct net_device *netdev = np->netdev;
1698
1699	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1700
1701	switch (backend_state) {
1702	case XenbusStateInitialising:
1703	case XenbusStateInitialised:
1704	case XenbusStateReconfiguring:
1705	case XenbusStateReconfigured:
1706	case XenbusStateConnected:
1707	case XenbusStateUnknown:
1708	case XenbusStateClosed:
1709		break;
1710
1711	case XenbusStateInitWait:
1712		if (dev->state != XenbusStateInitialising)
1713			break;
1714		if (xennet_connect(netdev) != 0)
1715			break;
1716		xenbus_switch_state(dev, XenbusStateConnected);
1717		netif_notify_peers(netdev);
1718		break;
1719
 
 
 
 
 
 
 
 
1720	case XenbusStateClosing:
1721		xenbus_frontend_closed(dev);
1722		break;
1723	}
1724}
1725
1726static const struct xennet_stat {
1727	char name[ETH_GSTRING_LEN];
1728	u16 offset;
1729} xennet_stats[] = {
1730	{
1731		"rx_gso_checksum_fixup",
1732		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1733	},
1734};
1735
1736static int xennet_get_sset_count(struct net_device *dev, int string_set)
1737{
1738	switch (string_set) {
1739	case ETH_SS_STATS:
1740		return ARRAY_SIZE(xennet_stats);
1741	default:
1742		return -EINVAL;
1743	}
1744}
1745
1746static void xennet_get_ethtool_stats(struct net_device *dev,
1747				     struct ethtool_stats *stats, u64 * data)
1748{
1749	void *np = netdev_priv(dev);
1750	int i;
1751
1752	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1753		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1754}
1755
1756static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1757{
1758	int i;
1759
1760	switch (stringset) {
1761	case ETH_SS_STATS:
1762		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1763			memcpy(data + i * ETH_GSTRING_LEN,
1764			       xennet_stats[i].name, ETH_GSTRING_LEN);
1765		break;
1766	}
1767}
1768
1769static const struct ethtool_ops xennet_ethtool_ops =
1770{
1771	.get_link = ethtool_op_get_link,
1772
1773	.get_sset_count = xennet_get_sset_count,
1774	.get_ethtool_stats = xennet_get_ethtool_stats,
1775	.get_strings = xennet_get_strings,
1776};
1777
1778#ifdef CONFIG_SYSFS
1779static ssize_t show_rxbuf_min(struct device *dev,
1780			      struct device_attribute *attr, char *buf)
1781{
1782	struct net_device *netdev = to_net_dev(dev);
1783	struct netfront_info *info = netdev_priv(netdev);
1784
1785	return sprintf(buf, "%u\n", info->rx_min_target);
1786}
1787
1788static ssize_t store_rxbuf_min(struct device *dev,
1789			       struct device_attribute *attr,
1790			       const char *buf, size_t len)
1791{
1792	struct net_device *netdev = to_net_dev(dev);
1793	struct netfront_info *np = netdev_priv(netdev);
1794	char *endp;
1795	unsigned long target;
1796
1797	if (!capable(CAP_NET_ADMIN))
1798		return -EPERM;
1799
1800	target = simple_strtoul(buf, &endp, 0);
1801	if (endp == buf)
1802		return -EBADMSG;
1803
1804	if (target < RX_MIN_TARGET)
1805		target = RX_MIN_TARGET;
1806	if (target > RX_MAX_TARGET)
1807		target = RX_MAX_TARGET;
1808
1809	spin_lock_bh(&np->rx_lock);
1810	if (target > np->rx_max_target)
1811		np->rx_max_target = target;
1812	np->rx_min_target = target;
1813	if (target > np->rx_target)
1814		np->rx_target = target;
1815
1816	xennet_alloc_rx_buffers(netdev);
1817
1818	spin_unlock_bh(&np->rx_lock);
1819	return len;
1820}
1821
1822static ssize_t show_rxbuf_max(struct device *dev,
1823			      struct device_attribute *attr, char *buf)
1824{
1825	struct net_device *netdev = to_net_dev(dev);
1826	struct netfront_info *info = netdev_priv(netdev);
1827
1828	return sprintf(buf, "%u\n", info->rx_max_target);
1829}
1830
1831static ssize_t store_rxbuf_max(struct device *dev,
1832			       struct device_attribute *attr,
1833			       const char *buf, size_t len)
1834{
1835	struct net_device *netdev = to_net_dev(dev);
1836	struct netfront_info *np = netdev_priv(netdev);
1837	char *endp;
1838	unsigned long target;
1839
1840	if (!capable(CAP_NET_ADMIN))
1841		return -EPERM;
1842
1843	target = simple_strtoul(buf, &endp, 0);
1844	if (endp == buf)
1845		return -EBADMSG;
1846
1847	if (target < RX_MIN_TARGET)
1848		target = RX_MIN_TARGET;
1849	if (target > RX_MAX_TARGET)
1850		target = RX_MAX_TARGET;
1851
1852	spin_lock_bh(&np->rx_lock);
1853	if (target < np->rx_min_target)
1854		np->rx_min_target = target;
1855	np->rx_max_target = target;
1856	if (target < np->rx_target)
1857		np->rx_target = target;
1858
1859	xennet_alloc_rx_buffers(netdev);
1860
1861	spin_unlock_bh(&np->rx_lock);
1862	return len;
1863}
1864
1865static ssize_t show_rxbuf_cur(struct device *dev,
1866			      struct device_attribute *attr, char *buf)
1867{
1868	struct net_device *netdev = to_net_dev(dev);
1869	struct netfront_info *info = netdev_priv(netdev);
1870
1871	return sprintf(buf, "%u\n", info->rx_target);
1872}
1873
1874static struct device_attribute xennet_attrs[] = {
1875	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1876	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1877	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1878};
1879
1880static int xennet_sysfs_addif(struct net_device *netdev)
1881{
1882	int i;
1883	int err;
1884
1885	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1886		err = device_create_file(&netdev->dev,
1887					   &xennet_attrs[i]);
1888		if (err)
1889			goto fail;
1890	}
1891	return 0;
1892
1893 fail:
1894	while (--i >= 0)
1895		device_remove_file(&netdev->dev, &xennet_attrs[i]);
1896	return err;
1897}
1898
1899static void xennet_sysfs_delif(struct net_device *netdev)
1900{
1901	int i;
1902
1903	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1904		device_remove_file(&netdev->dev, &xennet_attrs[i]);
1905}
1906
1907#endif /* CONFIG_SYSFS */
1908
1909static struct xenbus_device_id netfront_ids[] = {
1910	{ "vif" },
1911	{ "" }
1912};
 
1913
1914
1915static int __devexit xennet_remove(struct xenbus_device *dev)
1916{
1917	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1918
1919	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1920
1921	unregister_netdev(info->netdev);
1922
1923	xennet_disconnect_backend(info);
1924
1925	del_timer_sync(&info->rx_refill_timer);
1926
1927	xennet_sysfs_delif(info->netdev);
1928
1929	free_percpu(info->stats);
1930
1931	free_netdev(info->netdev);
 
 
1932
1933	return 0;
1934}
1935
 
 
 
 
 
1936static struct xenbus_driver netfront_driver = {
1937	.name = "vif",
1938	.owner = THIS_MODULE,
1939	.ids = netfront_ids,
1940	.probe = netfront_probe,
1941	.remove = __devexit_p(xennet_remove),
1942	.resume = netfront_resume,
1943	.otherend_changed = netback_changed,
1944};
1945
1946static int __init netif_init(void)
1947{
1948	if (!xen_domain())
1949		return -ENODEV;
1950
1951	if (xen_initial_domain())
1952		return 0;
1953
1954	printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
 
 
 
 
 
 
1955
1956	return xenbus_register_frontend(&netfront_driver);
1957}
1958module_init(netif_init);
1959
1960
1961static void __exit netif_exit(void)
1962{
1963	if (xen_initial_domain())
1964		return;
1965
1966	xenbus_unregister_driver(&netfront_driver);
1967}
1968module_exit(netif_exit);
1969
1970MODULE_DESCRIPTION("Xen virtual network device frontend");
1971MODULE_LICENSE("GPL");
1972MODULE_ALIAS("xen:vif");
1973MODULE_ALIAS("xennet");
v4.10.11
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
  60static unsigned int xennet_max_queues;
  61module_param_named(max_queues, xennet_max_queues, uint, 0644);
  62MODULE_PARM_DESC(max_queues,
  63		 "Maximum number of queues per virtual interface");
  64
  65static const struct ethtool_ops xennet_ethtool_ops;
  66
  67struct netfront_cb {
  68	int pull_to;
 
  69};
  70
  71#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  72
  73#define RX_COPY_THRESHOLD 256
  74
  75#define GRANT_INVALID_REF	0
  76
  77#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  78#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  79
  80/* Minimum number of Rx slots (includes slot for GSO metadata). */
  81#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  82
  83/* Queue name is interface name with "-qNNN" appended */
  84#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  85
  86/* IRQ name is queue name with "-tx" or "-rx" appended */
  87#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  88
  89struct netfront_stats {
  90	u64			packets;
  91	u64			bytes;
 
 
  92	struct u64_stats_sync	syncp;
  93};
  94
  95struct netfront_info;
  96
  97struct netfront_queue {
  98	unsigned int id; /* Queue ID, 0-based */
  99	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 100	struct netfront_info *info;
 101
 102	struct napi_struct napi;
 103
 104	/* Split event channels support, tx_* == rx_* when using
 105	 * single event channel.
 106	 */
 107	unsigned int tx_evtchn, rx_evtchn;
 108	unsigned int tx_irq, rx_irq;
 109	/* Only used when split event channels support is enabled */
 110	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 111	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 112
 113	spinlock_t   tx_lock;
 114	struct xen_netif_tx_front_ring tx;
 115	int tx_ring_ref;
 116
 117	/*
 118	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 119	 * are linked from tx_skb_freelist through skb_entry.link.
 120	 *
 121	 *  NB. Freelist index entries are always going to be less than
 122	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 123	 *  greater than PAGE_OFFSET: we use this property to distinguish
 124	 *  them.
 125	 */
 126	union skb_entry {
 127		struct sk_buff *skb;
 128		unsigned long link;
 129	} tx_skbs[NET_TX_RING_SIZE];
 130	grant_ref_t gref_tx_head;
 131	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 132	struct page *grant_tx_page[NET_TX_RING_SIZE];
 133	unsigned tx_skb_freelist;
 134
 135	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 136	struct xen_netif_rx_front_ring rx;
 137	int rx_ring_ref;
 138
 
 
 
 
 
 
 
 139	struct timer_list rx_refill_timer;
 140
 141	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 142	grant_ref_t gref_rx_head;
 143	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 144};
 145
 146struct netfront_info {
 147	struct list_head list;
 148	struct net_device *netdev;
 149
 150	struct xenbus_device *xbdev;
 151
 152	/* Multi-queue support */
 153	struct netfront_queue *queues;
 154
 155	/* Statistics */
 156	struct netfront_stats __percpu *rx_stats;
 157	struct netfront_stats __percpu *tx_stats;
 158
 159	atomic_t rx_gso_checksum_fixup;
 160};
 161
 162struct netfront_rx_info {
 163	struct xen_netif_rx_response rx;
 164	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 165};
 166
 167static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 168{
 169	list->link = id;
 170}
 171
 172static int skb_entry_is_link(const union skb_entry *list)
 173{
 174	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 175	return (unsigned long)list->skb < PAGE_OFFSET;
 176}
 177
 178/*
 179 * Access macros for acquiring freeing slots in tx_skbs[].
 180 */
 181
 182static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 183			       unsigned short id)
 184{
 185	skb_entry_set_link(&list[id], *head);
 186	*head = id;
 187}
 188
 189static unsigned short get_id_from_freelist(unsigned *head,
 190					   union skb_entry *list)
 191{
 192	unsigned int id = *head;
 193	*head = list[id].link;
 194	return id;
 195}
 196
 197static int xennet_rxidx(RING_IDX idx)
 198{
 199	return idx & (NET_RX_RING_SIZE - 1);
 200}
 201
 202static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 203					 RING_IDX ri)
 204{
 205	int i = xennet_rxidx(ri);
 206	struct sk_buff *skb = queue->rx_skbs[i];
 207	queue->rx_skbs[i] = NULL;
 208	return skb;
 209}
 210
 211static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 212					    RING_IDX ri)
 213{
 214	int i = xennet_rxidx(ri);
 215	grant_ref_t ref = queue->grant_rx_ref[i];
 216	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 217	return ref;
 218}
 219
 220#ifdef CONFIG_SYSFS
 221static const struct attribute_group xennet_dev_group;
 
 
 
 
 222#endif
 223
 224static bool xennet_can_sg(struct net_device *dev)
 225{
 226	return dev->features & NETIF_F_SG;
 227}
 228
 229
 230static void rx_refill_timeout(unsigned long data)
 231{
 232	struct netfront_queue *queue = (struct netfront_queue *)data;
 233	napi_schedule(&queue->napi);
 
 234}
 235
 236static int netfront_tx_slot_available(struct netfront_queue *queue)
 237{
 238	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 239		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 240}
 241
 242static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 243{
 244	struct net_device *dev = queue->info->netdev;
 245	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 246
 247	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 248	    netfront_tx_slot_available(queue) &&
 249	    likely(netif_running(dev)))
 250		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 251}
 252
 253
 254static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 255{
 
 
 256	struct sk_buff *skb;
 257	struct page *page;
 
 
 
 
 
 
 258
 259	skb = __netdev_alloc_skb(queue->info->netdev,
 260				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 261				 GFP_ATOMIC | __GFP_NOWARN);
 262	if (unlikely(!skb))
 263		return NULL;
 264
 265	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 266	if (!page) {
 267		kfree_skb(skb);
 268		return NULL;
 269	}
 270	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 271
 272	/* Align ip header to a 16 bytes boundary */
 273	skb_reserve(skb, NET_IP_ALIGN);
 274	skb->dev = queue->info->netdev;
 275
 276	return skb;
 277}
 
 
 
 
 
 
 
 
 
 
 278
 
 
 279
 280static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 281{
 282	RING_IDX req_prod = queue->rx.req_prod_pvt;
 283	int notify;
 284	int err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 285
 286	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 
 
 
 287		return;
 
 288
 289	for (req_prod = queue->rx.req_prod_pvt;
 290	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 291	     req_prod++) {
 292		struct sk_buff *skb;
 293		unsigned short id;
 294		grant_ref_t ref;
 295		struct page *page;
 296		struct xen_netif_rx_request *req;
 
 
 
 
 
 
 
 
 
 297
 298		skb = xennet_alloc_one_rx_buffer(queue);
 299		if (!skb) {
 300			err = -ENOMEM;
 301			break;
 302		}
 303
 304		id = xennet_rxidx(req_prod);
 
 305
 306		BUG_ON(queue->rx_skbs[id]);
 307		queue->rx_skbs[id] = skb;
 
 
 
 308
 309		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 310		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 311		queue->grant_rx_ref[id] = ref;
 312
 313		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 314
 315		req = RING_GET_REQUEST(&queue->rx, req_prod);
 316		gnttab_page_grant_foreign_access_ref_one(ref,
 317							 queue->info->xbdev->otherend_id,
 318							 page,
 319							 0);
 320		req->id = id;
 321		req->gref = ref;
 322	}
 323
 324	queue->rx.req_prod_pvt = req_prod;
 325
 326	/* Try again later if there are not enough requests or skb allocation
 327	 * failed.
 328	 * Enough requests is quantified as the sum of newly created slots and
 329	 * the unconsumed slots at the backend.
 330	 */
 331	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 332	    unlikely(err)) {
 333		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 334		return;
 335	}
 336
 337	wmb();		/* barrier so backend seens requests */
 338
 339	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 
 
 
 340	if (notify)
 341		notify_remote_via_irq(queue->rx_irq);
 342}
 343
 344static int xennet_open(struct net_device *dev)
 345{
 346	struct netfront_info *np = netdev_priv(dev);
 347	unsigned int num_queues = dev->real_num_tx_queues;
 348	unsigned int i = 0;
 349	struct netfront_queue *queue = NULL;
 350
 351	for (i = 0; i < num_queues; ++i) {
 352		queue = &np->queues[i];
 353		napi_enable(&queue->napi);
 354
 355		spin_lock_bh(&queue->rx_lock);
 356		if (netif_carrier_ok(dev)) {
 357			xennet_alloc_rx_buffers(queue);
 358			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 359			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 360				napi_schedule(&queue->napi);
 361		}
 362		spin_unlock_bh(&queue->rx_lock);
 363	}
 
 364
 365	netif_tx_start_all_queues(dev);
 366
 367	return 0;
 368}
 369
 370static void xennet_tx_buf_gc(struct netfront_queue *queue)
 371{
 372	RING_IDX cons, prod;
 373	unsigned short id;
 
 374	struct sk_buff *skb;
 375	bool more_to_do;
 376
 377	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 378
 379	do {
 380		prod = queue->tx.sring->rsp_prod;
 381		rmb(); /* Ensure we see responses up to 'rp'. */
 382
 383		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 384			struct xen_netif_tx_response *txrsp;
 385
 386			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 387			if (txrsp->status == XEN_NETIF_RSP_NULL)
 388				continue;
 389
 390			id  = txrsp->id;
 391			skb = queue->tx_skbs[id].skb;
 392			if (unlikely(gnttab_query_foreign_access(
 393				queue->grant_tx_ref[id]) != 0)) {
 394				pr_alert("%s: warning -- grant still in use by backend domain\n",
 395					 __func__);
 
 396				BUG();
 397			}
 398			gnttab_end_foreign_access_ref(
 399				queue->grant_tx_ref[id], GNTMAP_readonly);
 400			gnttab_release_grant_reference(
 401				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 402			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 403			queue->grant_tx_page[id] = NULL;
 404			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 405			dev_kfree_skb_irq(skb);
 406		}
 407
 408		queue->tx.rsp_cons = prod;
 409
 410		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 411	} while (more_to_do);
 
 
 
 
 
 
 
 
 
 
 412
 413	xennet_maybe_wake_tx(queue);
 414}
 415
 416struct xennet_gnttab_make_txreq {
 417	struct netfront_queue *queue;
 418	struct sk_buff *skb;
 419	struct page *page;
 420	struct xen_netif_tx_request *tx; /* Last request */
 421	unsigned int size;
 422};
 423
 424static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 425				  unsigned int len, void *data)
 426{
 427	struct xennet_gnttab_make_txreq *info = data;
 
 
 
 
 
 
 428	unsigned int id;
 429	struct xen_netif_tx_request *tx;
 430	grant_ref_t ref;
 431	/* convenient aliases */
 432	struct page *page = info->page;
 433	struct netfront_queue *queue = info->queue;
 434	struct sk_buff *skb = info->skb;
 435
 436	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 437	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 438	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 439	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 440
 441	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 442					gfn, GNTMAP_readonly);
 443
 444	queue->tx_skbs[id].skb = skb;
 445	queue->grant_tx_page[id] = page;
 446	queue->grant_tx_ref[id] = ref;
 447
 448	tx->id = id;
 449	tx->gref = ref;
 450	tx->offset = offset;
 451	tx->size = len;
 452	tx->flags = 0;
 453
 454	info->tx = tx;
 455	info->size += tx->size;
 456}
 457
 458static struct xen_netif_tx_request *xennet_make_first_txreq(
 459	struct netfront_queue *queue, struct sk_buff *skb,
 460	struct page *page, unsigned int offset, unsigned int len)
 461{
 462	struct xennet_gnttab_make_txreq info = {
 463		.queue = queue,
 464		.skb = skb,
 465		.page = page,
 466		.size = 0,
 467	};
 468
 469	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 470
 471	return info.tx;
 472}
 473
 474static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 475				  unsigned int len, void *data)
 476{
 477	struct xennet_gnttab_make_txreq *info = data;
 478
 479	info->tx->flags |= XEN_NETTXF_more_data;
 480	skb_get(info->skb);
 481	xennet_tx_setup_grant(gfn, offset, len, data);
 482}
 483
 484static struct xen_netif_tx_request *xennet_make_txreqs(
 485	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 486	struct sk_buff *skb, struct page *page,
 487	unsigned int offset, unsigned int len)
 488{
 489	struct xennet_gnttab_make_txreq info = {
 490		.queue = queue,
 491		.skb = skb,
 492		.tx = tx,
 493	};
 494
 495	/* Skip unused frames from start of page */
 496	page += offset >> PAGE_SHIFT;
 497	offset &= ~PAGE_MASK;
 498
 499	while (len) {
 500		info.page = page;
 501		info.size = 0;
 502
 503		gnttab_foreach_grant_in_range(page, offset, len,
 504					      xennet_make_one_txreq,
 505					      &info);
 506
 507		page++;
 508		offset = 0;
 509		len -= info.size;
 
 
 
 
 510	}
 511
 512	return info.tx;
 513}
 514
 515/*
 516 * Count how many ring slots are required to send this skb. Each frag
 517 * might be a compound page.
 518 */
 519static int xennet_count_skb_slots(struct sk_buff *skb)
 520{
 521	int i, frags = skb_shinfo(skb)->nr_frags;
 522	int slots;
 523
 524	slots = gnttab_count_grant(offset_in_page(skb->data),
 525				   skb_headlen(skb));
 526
 527	for (i = 0; i < frags; i++) {
 528		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 529		unsigned long size = skb_frag_size(frag);
 530		unsigned long offset = frag->page_offset;
 531
 532		/* Skip unused frames from start of page */
 533		offset &= ~PAGE_MASK;
 534
 535		slots += gnttab_count_grant(offset, size);
 536	}
 537
 538	return slots;
 539}
 540
 541static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 542			       void *accel_priv, select_queue_fallback_t fallback)
 543{
 544	unsigned int num_queues = dev->real_num_tx_queues;
 545	u32 hash;
 546	u16 queue_idx;
 547
 548	/* First, check if there is only one queue */
 549	if (num_queues == 1) {
 550		queue_idx = 0;
 551	} else {
 552		hash = skb_get_hash(skb);
 553		queue_idx = hash % num_queues;
 
 
 
 
 
 
 
 
 
 554	}
 555
 556	return queue_idx;
 557}
 558
 559#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 560
 561static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 562{
 
 563	struct netfront_info *np = netdev_priv(dev);
 564	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 565	struct xen_netif_tx_request *tx, *first_tx;
 566	unsigned int i;
 
 
 
 
 567	int notify;
 568	int slots;
 569	struct page *page;
 570	unsigned int offset;
 571	unsigned int len;
 572	unsigned long flags;
 573	struct netfront_queue *queue = NULL;
 574	unsigned int num_queues = dev->real_num_tx_queues;
 575	u16 queue_index;
 576	struct sk_buff *nskb;
 577
 578	/* Drop the packet if no queues are set up */
 579	if (num_queues < 1)
 580		goto drop;
 581	/* Determine which queue to transmit this SKB on */
 582	queue_index = skb_get_queue_mapping(skb);
 583	queue = &np->queues[queue_index];
 584
 585	/* If skb->len is too big for wire format, drop skb and alert
 586	 * user about misconfiguration.
 587	 */
 588	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 589		net_alert_ratelimited(
 590			"xennet: skb->len = %u, too big for wire format\n",
 591			skb->len);
 592		goto drop;
 593	}
 594
 595	slots = xennet_count_skb_slots(skb);
 596	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 597		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 598				    slots, skb->len);
 599		if (skb_linearize(skb))
 600			goto drop;
 601	}
 602
 603	page = virt_to_page(skb->data);
 604	offset = offset_in_page(skb->data);
 605
 606	/* The first req should be at least ETH_HLEN size or the packet will be
 607	 * dropped by netback.
 608	 */
 609	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 610		nskb = skb_copy(skb, GFP_ATOMIC);
 611		if (!nskb)
 612			goto drop;
 613		dev_kfree_skb_any(skb);
 614		skb = nskb;
 615		page = virt_to_page(skb->data);
 616		offset = offset_in_page(skb->data);
 617	}
 618
 619	len = skb_headlen(skb);
 620
 621	spin_lock_irqsave(&queue->tx_lock, flags);
 
 622
 623	if (unlikely(!netif_carrier_ok(dev) ||
 624		     (slots > 1 && !xennet_can_sg(dev)) ||
 625		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 626		spin_unlock_irqrestore(&queue->tx_lock, flags);
 627		goto drop;
 628	}
 629
 630	/* First request for the linear area. */
 631	first_tx = tx = xennet_make_first_txreq(queue, skb,
 632						page, offset, len);
 633	offset += tx->size;
 634	if (offset == PAGE_SIZE) {
 635		page++;
 636		offset = 0;
 637	}
 638	len -= tx->size;
 
 639
 
 640	if (skb->ip_summed == CHECKSUM_PARTIAL)
 641		/* local packet? */
 642		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 643	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 644		/* remote but checksummed. */
 645		tx->flags |= XEN_NETTXF_data_validated;
 646
 647	/* Optional extra info after the first request. */
 648	if (skb_shinfo(skb)->gso_size) {
 649		struct xen_netif_extra_info *gso;
 650
 651		gso = (struct xen_netif_extra_info *)
 652			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 653
 654		tx->flags |= XEN_NETTXF_extra_info;
 
 
 
 655
 656		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 657		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 658			XEN_NETIF_GSO_TYPE_TCPV6 :
 659			XEN_NETIF_GSO_TYPE_TCPV4;
 660		gso->u.gso.pad = 0;
 661		gso->u.gso.features = 0;
 662
 663		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 664		gso->flags = 0;
 
 665	}
 666
 667	/* Requests for the rest of the linear area. */
 668	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 669
 670	/* Requests for all the frags. */
 671	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 672		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 673		tx = xennet_make_txreqs(queue, tx, skb,
 674					skb_frag_page(frag), frag->page_offset,
 675					skb_frag_size(frag));
 676	}
 677
 678	/* First request has the packet length. */
 679	first_tx->size = skb->len;
 680
 681	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 682	if (notify)
 683		notify_remote_via_irq(queue->tx_irq);
 684
 685	u64_stats_update_begin(&tx_stats->syncp);
 686	tx_stats->bytes += skb->len;
 687	tx_stats->packets++;
 688	u64_stats_update_end(&tx_stats->syncp);
 689
 690	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 691	xennet_tx_buf_gc(queue);
 692
 693	if (!netfront_tx_slot_available(queue))
 694		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 695
 696	spin_unlock_irqrestore(&queue->tx_lock, flags);
 697
 698	return NETDEV_TX_OK;
 699
 700 drop:
 701	dev->stats.tx_dropped++;
 702	dev_kfree_skb_any(skb);
 703	return NETDEV_TX_OK;
 704}
 705
 706static int xennet_close(struct net_device *dev)
 707{
 708	struct netfront_info *np = netdev_priv(dev);
 709	unsigned int num_queues = dev->real_num_tx_queues;
 710	unsigned int i;
 711	struct netfront_queue *queue;
 712	netif_tx_stop_all_queues(np->netdev);
 713	for (i = 0; i < num_queues; ++i) {
 714		queue = &np->queues[i];
 715		napi_disable(&queue->napi);
 716	}
 717	return 0;
 718}
 719
 720static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 721				grant_ref_t ref)
 722{
 723	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 724
 725	BUG_ON(queue->rx_skbs[new]);
 726	queue->rx_skbs[new] = skb;
 727	queue->grant_rx_ref[new] = ref;
 728	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 729	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 730	queue->rx.req_prod_pvt++;
 731}
 732
 733static int xennet_get_extras(struct netfront_queue *queue,
 734			     struct xen_netif_extra_info *extras,
 735			     RING_IDX rp)
 736
 737{
 738	struct xen_netif_extra_info *extra;
 739	struct device *dev = &queue->info->netdev->dev;
 740	RING_IDX cons = queue->rx.rsp_cons;
 741	int err = 0;
 742
 743	do {
 744		struct sk_buff *skb;
 745		grant_ref_t ref;
 746
 747		if (unlikely(cons + 1 == rp)) {
 748			if (net_ratelimit())
 749				dev_warn(dev, "Missing extra info\n");
 750			err = -EBADR;
 751			break;
 752		}
 753
 754		extra = (struct xen_netif_extra_info *)
 755			RING_GET_RESPONSE(&queue->rx, ++cons);
 756
 757		if (unlikely(!extra->type ||
 758			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 759			if (net_ratelimit())
 760				dev_warn(dev, "Invalid extra type: %d\n",
 761					extra->type);
 762			err = -EINVAL;
 763		} else {
 764			memcpy(&extras[extra->type - 1], extra,
 765			       sizeof(*extra));
 766		}
 767
 768		skb = xennet_get_rx_skb(queue, cons);
 769		ref = xennet_get_rx_ref(queue, cons);
 770		xennet_move_rx_slot(queue, skb, ref);
 771	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 772
 773	queue->rx.rsp_cons = cons;
 774	return err;
 775}
 776
 777static int xennet_get_responses(struct netfront_queue *queue,
 778				struct netfront_rx_info *rinfo, RING_IDX rp,
 779				struct sk_buff_head *list)
 780{
 781	struct xen_netif_rx_response *rx = &rinfo->rx;
 782	struct xen_netif_extra_info *extras = rinfo->extras;
 783	struct device *dev = &queue->info->netdev->dev;
 784	RING_IDX cons = queue->rx.rsp_cons;
 785	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 786	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 787	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 788	int slots = 1;
 789	int err = 0;
 790	unsigned long ret;
 791
 792	if (rx->flags & XEN_NETRXF_extra_info) {
 793		err = xennet_get_extras(queue, extras, rp);
 794		cons = queue->rx.rsp_cons;
 795	}
 796
 797	for (;;) {
 798		if (unlikely(rx->status < 0 ||
 799			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 800			if (net_ratelimit())
 801				dev_warn(dev, "rx->offset: %u, size: %d\n",
 802					 rx->offset, rx->status);
 803			xennet_move_rx_slot(queue, skb, ref);
 804			err = -EINVAL;
 805			goto next;
 806		}
 807
 808		/*
 809		 * This definitely indicates a bug, either in this driver or in
 810		 * the backend driver. In future this should flag the bad
 811		 * situation to the system controller to reboot the backend.
 812		 */
 813		if (ref == GRANT_INVALID_REF) {
 814			if (net_ratelimit())
 815				dev_warn(dev, "Bad rx response id %d.\n",
 816					 rx->id);
 817			err = -EINVAL;
 818			goto next;
 819		}
 820
 821		ret = gnttab_end_foreign_access_ref(ref, 0);
 822		BUG_ON(!ret);
 823
 824		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 825
 826		__skb_queue_tail(list, skb);
 827
 828next:
 829		if (!(rx->flags & XEN_NETRXF_more_data))
 830			break;
 831
 832		if (cons + slots == rp) {
 833			if (net_ratelimit())
 834				dev_warn(dev, "Need more slots\n");
 835			err = -ENOENT;
 836			break;
 837		}
 838
 839		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 840		skb = xennet_get_rx_skb(queue, cons + slots);
 841		ref = xennet_get_rx_ref(queue, cons + slots);
 842		slots++;
 843	}
 844
 845	if (unlikely(slots > max)) {
 846		if (net_ratelimit())
 847			dev_warn(dev, "Too many slots\n");
 848		err = -E2BIG;
 849	}
 850
 851	if (unlikely(err))
 852		queue->rx.rsp_cons = cons + slots;
 853
 854	return err;
 855}
 856
 857static int xennet_set_skb_gso(struct sk_buff *skb,
 858			      struct xen_netif_extra_info *gso)
 859{
 860	if (!gso->u.gso.size) {
 861		if (net_ratelimit())
 862			pr_warn("GSO size must not be zero\n");
 863		return -EINVAL;
 864	}
 865
 866	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 867	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 868		if (net_ratelimit())
 869			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 870		return -EINVAL;
 871	}
 872
 873	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 874	skb_shinfo(skb)->gso_type =
 875		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 876		SKB_GSO_TCPV4 :
 877		SKB_GSO_TCPV6;
 878
 879	/* Header must be checked, and gso_segs computed. */
 880	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 881	skb_shinfo(skb)->gso_segs = 0;
 882
 883	return 0;
 884}
 885
 886static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 887				  struct sk_buff *skb,
 888				  struct sk_buff_head *list)
 889{
 890	struct skb_shared_info *shinfo = skb_shinfo(skb);
 891	RING_IDX cons = queue->rx.rsp_cons;
 
 
 892	struct sk_buff *nskb;
 893
 894	while ((nskb = __skb_dequeue(list))) {
 895		struct xen_netif_rx_response *rx =
 896			RING_GET_RESPONSE(&queue->rx, ++cons);
 897		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 898
 899		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 900			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 901
 902			BUG_ON(pull_to <= skb_headlen(skb));
 903			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 904		}
 905		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 906
 907		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 908				rx->offset, rx->status, PAGE_SIZE);
 909
 910		skb_shinfo(nskb)->nr_frags = 0;
 911		kfree_skb(nskb);
 
 
 
 912	}
 913
 
 914	return cons;
 915}
 916
 917static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 918{
 919	bool recalculate_partial_csum = false;
 
 
 
 920
 921	/*
 922	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 923	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 924	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 925	 * recalculate the partial checksum.
 926	 */
 927	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 928		struct netfront_info *np = netdev_priv(dev);
 929		atomic_inc(&np->rx_gso_checksum_fixup);
 930		skb->ip_summed = CHECKSUM_PARTIAL;
 931		recalculate_partial_csum = true;
 932	}
 933
 934	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 935	if (skb->ip_summed != CHECKSUM_PARTIAL)
 936		return 0;
 937
 938	return skb_checksum_setup(skb, recalculate_partial_csum);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 939}
 940
 941static int handle_incoming_queue(struct netfront_queue *queue,
 942				 struct sk_buff_head *rxq)
 943{
 944	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 
 945	int packets_dropped = 0;
 946	struct sk_buff *skb;
 947
 948	while ((skb = __skb_dequeue(rxq)) != NULL) {
 949		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
 
 
 
 
 950
 951		if (pull_to > skb_headlen(skb))
 952			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 953
 954		/* Ethernet work: Delayed to here as it peeks the header. */
 955		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 956		skb_reset_network_header(skb);
 957
 958		if (checksum_setup(queue->info->netdev, skb)) {
 959			kfree_skb(skb);
 960			packets_dropped++;
 961			queue->info->netdev->stats.rx_errors++;
 962			continue;
 963		}
 964
 965		u64_stats_update_begin(&rx_stats->syncp);
 966		rx_stats->packets++;
 967		rx_stats->bytes += skb->len;
 968		u64_stats_update_end(&rx_stats->syncp);
 969
 970		/* Pass it up. */
 971		napi_gro_receive(&queue->napi, skb);
 972	}
 973
 974	return packets_dropped;
 975}
 976
 977static int xennet_poll(struct napi_struct *napi, int budget)
 978{
 979	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 980	struct net_device *dev = queue->info->netdev;
 981	struct sk_buff *skb;
 982	struct netfront_rx_info rinfo;
 983	struct xen_netif_rx_response *rx = &rinfo.rx;
 984	struct xen_netif_extra_info *extras = rinfo.extras;
 985	RING_IDX i, rp;
 986	int work_done;
 987	struct sk_buff_head rxq;
 988	struct sk_buff_head errq;
 989	struct sk_buff_head tmpq;
 
 
 990	int err;
 991
 992	spin_lock(&queue->rx_lock);
 993
 994	skb_queue_head_init(&rxq);
 995	skb_queue_head_init(&errq);
 996	skb_queue_head_init(&tmpq);
 997
 998	rp = queue->rx.sring->rsp_prod;
 999	rmb(); /* Ensure we see queued responses up to 'rp'. */
1000
1001	i = queue->rx.rsp_cons;
1002	work_done = 0;
1003	while ((i != rp) && (work_done < budget)) {
1004		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1005		memset(extras, 0, sizeof(rinfo.extras));
1006
1007		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1008
1009		if (unlikely(err)) {
1010err:
1011			while ((skb = __skb_dequeue(&tmpq)))
1012				__skb_queue_tail(&errq, skb);
1013			dev->stats.rx_errors++;
1014			i = queue->rx.rsp_cons;
1015			continue;
1016		}
1017
1018		skb = __skb_dequeue(&tmpq);
1019
1020		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1021			struct xen_netif_extra_info *gso;
1022			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1023
1024			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1025				__skb_queue_head(&tmpq, skb);
1026				queue->rx.rsp_cons += skb_queue_len(&tmpq);
1027				goto err;
1028			}
1029		}
1030
1031		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1032		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1033			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1034
1035		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1036		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1037		skb->data_len = rx->status;
1038		skb->len += rx->status;
 
 
 
 
 
 
 
 
 
 
 
1039
1040		i = xennet_fill_frags(queue, skb, &tmpq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041
1042		if (rx->flags & XEN_NETRXF_csum_blank)
1043			skb->ip_summed = CHECKSUM_PARTIAL;
1044		else if (rx->flags & XEN_NETRXF_data_validated)
1045			skb->ip_summed = CHECKSUM_UNNECESSARY;
1046
1047		__skb_queue_tail(&rxq, skb);
1048
1049		queue->rx.rsp_cons = ++i;
1050		work_done++;
1051	}
1052
1053	__skb_queue_purge(&errq);
1054
1055	work_done -= handle_incoming_queue(queue, &rxq);
 
 
 
 
 
 
 
1056
1057	xennet_alloc_rx_buffers(queue);
1058
1059	if (work_done < budget) {
1060		int more_to_do = 0;
1061
1062		napi_complete(napi);
1063
1064		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1065		if (more_to_do)
1066			napi_schedule(napi);
 
 
1067	}
1068
1069	spin_unlock(&queue->rx_lock);
1070
1071	return work_done;
1072}
1073
1074static int xennet_change_mtu(struct net_device *dev, int mtu)
1075{
1076	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1077
1078	if (mtu > max)
1079		return -EINVAL;
1080	dev->mtu = mtu;
1081	return 0;
1082}
1083
1084static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1085						    struct rtnl_link_stats64 *tot)
1086{
1087	struct netfront_info *np = netdev_priv(dev);
1088	int cpu;
1089
1090	for_each_possible_cpu(cpu) {
1091		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1092		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1093		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1094		unsigned int start;
1095
1096		do {
1097			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1098			tx_packets = tx_stats->packets;
1099			tx_bytes = tx_stats->bytes;
1100		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1101
1102		do {
1103			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1104			rx_packets = rx_stats->packets;
1105			rx_bytes = rx_stats->bytes;
1106		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1107
1108		tot->rx_packets += rx_packets;
1109		tot->tx_packets += tx_packets;
1110		tot->rx_bytes   += rx_bytes;
1111		tot->tx_bytes   += tx_bytes;
1112	}
1113
1114	tot->rx_errors  = dev->stats.rx_errors;
1115	tot->tx_dropped = dev->stats.tx_dropped;
1116
1117	return tot;
1118}
1119
1120static void xennet_release_tx_bufs(struct netfront_queue *queue)
1121{
1122	struct sk_buff *skb;
1123	int i;
1124
1125	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1126		/* Skip over entries which are actually freelist references */
1127		if (skb_entry_is_link(&queue->tx_skbs[i]))
1128			continue;
1129
1130		skb = queue->tx_skbs[i].skb;
1131		get_page(queue->grant_tx_page[i]);
1132		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1133					  GNTMAP_readonly,
1134					  (unsigned long)page_address(queue->grant_tx_page[i]));
1135		queue->grant_tx_page[i] = NULL;
1136		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1137		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1138		dev_kfree_skb_irq(skb);
1139	}
1140}
1141
1142static void xennet_release_rx_bufs(struct netfront_queue *queue)
1143{
 
 
 
 
 
 
1144	int id, ref;
1145
1146	spin_lock_bh(&queue->rx_lock);
 
 
 
 
 
 
1147
1148	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1149		struct sk_buff *skb;
1150		struct page *page;
1151
1152		skb = queue->rx_skbs[id];
1153		if (!skb)
1154			continue;
 
1155
1156		ref = queue->grant_rx_ref[id];
1157		if (ref == GRANT_INVALID_REF)
 
 
 
 
 
 
 
1158			continue;
 
1159
1160		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
 
 
 
1161
1162		/* gnttab_end_foreign_access() needs a page ref until
1163		 * foreign access is ended (which may be deferred).
1164		 */
1165		get_page(page);
1166		gnttab_end_foreign_access(ref, 0,
1167					  (unsigned long)page_address(page));
1168		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
 
1169
1170		kfree_skb(skb);
 
 
 
1171	}
1172
1173	spin_unlock_bh(&queue->rx_lock);
1174}
1175
1176static netdev_features_t xennet_fix_features(struct net_device *dev,
1177	netdev_features_t features)
1178{
1179	struct netfront_info *np = netdev_priv(dev);
 
 
 
 
 
1180
1181	if (features & NETIF_F_SG &&
1182	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1183		features &= ~NETIF_F_SG;
1184
1185	if (features & NETIF_F_IPV6_CSUM &&
1186	    !xenbus_read_unsigned(np->xbdev->otherend,
1187				  "feature-ipv6-csum-offload", 0))
1188		features &= ~NETIF_F_IPV6_CSUM;
1189
1190	if (features & NETIF_F_TSO &&
1191	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1192		features &= ~NETIF_F_TSO;
1193
1194	if (features & NETIF_F_TSO6 &&
1195	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1196		features &= ~NETIF_F_TSO6;
1197
1198	return features;
1199}
1200
1201static int xennet_set_features(struct net_device *dev,
1202	netdev_features_t features)
1203{
1204	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1205		netdev_info(dev, "Reducing MTU because no SG offload");
1206		dev->mtu = ETH_DATA_LEN;
1207	}
1208
1209	return 0;
1210}
1211
1212static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1213{
1214	struct netfront_queue *queue = dev_id;
1215	unsigned long flags;
1216
1217	spin_lock_irqsave(&queue->tx_lock, flags);
1218	xennet_tx_buf_gc(queue);
1219	spin_unlock_irqrestore(&queue->tx_lock, flags);
 
1220
1221	return IRQ_HANDLED;
1222}
 
1223
1224static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1225{
1226	struct netfront_queue *queue = dev_id;
1227	struct net_device *dev = queue->info->netdev;
1228
1229	if (likely(netif_carrier_ok(dev) &&
1230		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1231		napi_schedule(&queue->napi);
1232
1233	return IRQ_HANDLED;
1234}
1235
1236static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1237{
1238	xennet_tx_interrupt(irq, dev_id);
1239	xennet_rx_interrupt(irq, dev_id);
1240	return IRQ_HANDLED;
1241}
1242
1243#ifdef CONFIG_NET_POLL_CONTROLLER
1244static void xennet_poll_controller(struct net_device *dev)
1245{
1246	/* Poll each queue */
1247	struct netfront_info *info = netdev_priv(dev);
1248	unsigned int num_queues = dev->real_num_tx_queues;
1249	unsigned int i;
1250	for (i = 0; i < num_queues; ++i)
1251		xennet_interrupt(0, &info->queues[i]);
1252}
1253#endif
1254
1255static const struct net_device_ops xennet_netdev_ops = {
1256	.ndo_open            = xennet_open,
 
1257	.ndo_stop            = xennet_close,
1258	.ndo_start_xmit      = xennet_start_xmit,
1259	.ndo_change_mtu	     = xennet_change_mtu,
1260	.ndo_get_stats64     = xennet_get_stats64,
1261	.ndo_set_mac_address = eth_mac_addr,
1262	.ndo_validate_addr   = eth_validate_addr,
1263	.ndo_fix_features    = xennet_fix_features,
1264	.ndo_set_features    = xennet_set_features,
1265	.ndo_select_queue    = xennet_select_queue,
1266#ifdef CONFIG_NET_POLL_CONTROLLER
1267	.ndo_poll_controller = xennet_poll_controller,
1268#endif
1269};
1270
1271static void xennet_free_netdev(struct net_device *netdev)
1272{
1273	struct netfront_info *np = netdev_priv(netdev);
1274
1275	free_percpu(np->rx_stats);
1276	free_percpu(np->tx_stats);
1277	free_netdev(netdev);
1278}
1279
1280static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1281{
1282	int err;
1283	struct net_device *netdev;
1284	struct netfront_info *np;
1285
1286	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1287	if (!netdev)
 
 
1288		return ERR_PTR(-ENOMEM);
 
1289
1290	np                   = netdev_priv(netdev);
1291	np->xbdev            = dev;
1292
1293	np->queues = NULL;
 
 
 
 
 
 
 
 
 
 
1294
1295	err = -ENOMEM;
1296	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1297	if (np->rx_stats == NULL)
1298		goto exit;
1299	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1300	if (np->tx_stats == NULL)
1301		goto exit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1302
1303	netdev->netdev_ops	= &xennet_netdev_ops;
1304
 
1305	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1306				  NETIF_F_GSO_ROBUST;
1307	netdev->hw_features	= NETIF_F_SG |
1308				  NETIF_F_IPV6_CSUM |
1309				  NETIF_F_TSO | NETIF_F_TSO6;
1310
1311	/*
1312         * Assume that all hw features are available for now. This set
1313         * will be adjusted by the call to netdev_update_features() in
1314         * xennet_connect() which is the earliest point where we can
1315         * negotiate with the backend regarding supported features.
1316         */
1317	netdev->features |= netdev->hw_features;
1318
1319	netdev->ethtool_ops = &xennet_ethtool_ops;
1320	netdev->min_mtu = 0;
1321	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1322	SET_NETDEV_DEV(netdev, &dev->dev);
1323
1324	np->netdev = netdev;
1325
1326	netif_carrier_off(netdev);
1327
1328	return netdev;
1329
 
 
 
 
1330 exit:
1331	xennet_free_netdev(netdev);
1332	return ERR_PTR(err);
1333}
1334
1335/**
1336 * Entry point to this code when a new device is created.  Allocate the basic
1337 * structures and the ring buffers for communication with the backend, and
1338 * inform the backend of the appropriate details for those.
1339 */
1340static int netfront_probe(struct xenbus_device *dev,
1341			  const struct xenbus_device_id *id)
1342{
1343	int err;
1344	struct net_device *netdev;
1345	struct netfront_info *info;
1346
1347	netdev = xennet_create_dev(dev);
1348	if (IS_ERR(netdev)) {
1349		err = PTR_ERR(netdev);
1350		xenbus_dev_fatal(dev, err, "creating netdev");
1351		return err;
1352	}
1353
1354	info = netdev_priv(netdev);
1355	dev_set_drvdata(&dev->dev, info);
1356#ifdef CONFIG_SYSFS
1357	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1358#endif
1359	err = register_netdev(info->netdev);
1360	if (err) {
1361		pr_warn("%s: register_netdev err=%d\n", __func__, err);
 
 
 
 
 
 
 
 
 
1362		goto fail;
1363	}
1364
1365	return 0;
1366
1367 fail:
1368	xennet_free_netdev(netdev);
1369	dev_set_drvdata(&dev->dev, NULL);
1370	return err;
1371}
1372
1373static void xennet_end_access(int ref, void *page)
1374{
1375	/* This frees the page as a side-effect */
1376	if (ref != GRANT_INVALID_REF)
1377		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1378}
1379
1380static void xennet_disconnect_backend(struct netfront_info *info)
1381{
1382	unsigned int i = 0;
1383	unsigned int num_queues = info->netdev->real_num_tx_queues;
1384
1385	netif_carrier_off(info->netdev);
 
 
1386
1387	for (i = 0; i < num_queues && info->queues; ++i) {
1388		struct netfront_queue *queue = &info->queues[i];
1389
1390		del_timer_sync(&queue->rx_refill_timer);
1391
1392		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1393			unbind_from_irqhandler(queue->tx_irq, queue);
1394		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1395			unbind_from_irqhandler(queue->tx_irq, queue);
1396			unbind_from_irqhandler(queue->rx_irq, queue);
1397		}
1398		queue->tx_evtchn = queue->rx_evtchn = 0;
1399		queue->tx_irq = queue->rx_irq = 0;
1400
1401		if (netif_running(info->netdev))
1402			napi_synchronize(&queue->napi);
1403
1404		xennet_release_tx_bufs(queue);
1405		xennet_release_rx_bufs(queue);
1406		gnttab_free_grant_references(queue->gref_tx_head);
1407		gnttab_free_grant_references(queue->gref_rx_head);
1408
1409		/* End access and free the pages */
1410		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1411		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1412
1413		queue->tx_ring_ref = GRANT_INVALID_REF;
1414		queue->rx_ring_ref = GRANT_INVALID_REF;
1415		queue->tx.sring = NULL;
1416		queue->rx.sring = NULL;
1417	}
1418}
1419
1420/**
1421 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1422 * driver restart.  We tear down our netif structure and recreate it, but
1423 * leave the device-layer structures intact so that this is transparent to the
1424 * rest of the kernel.
1425 */
1426static int netfront_resume(struct xenbus_device *dev)
1427{
1428	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1429
1430	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1431
1432	xennet_disconnect_backend(info);
1433	return 0;
1434}
1435
1436static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1437{
1438	char *s, *e, *macstr;
1439	int i;
1440
1441	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1442	if (IS_ERR(macstr))
1443		return PTR_ERR(macstr);
1444
1445	for (i = 0; i < ETH_ALEN; i++) {
1446		mac[i] = simple_strtoul(s, &e, 16);
1447		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1448			kfree(macstr);
1449			return -ENOENT;
1450		}
1451		s = e+1;
1452	}
1453
1454	kfree(macstr);
1455	return 0;
1456}
1457
1458static int setup_netfront_single(struct netfront_queue *queue)
1459{
1460	int err;
 
 
1461
1462	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1463	if (err < 0)
1464		goto fail;
1465
1466	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1467					xennet_interrupt,
1468					0, queue->info->netdev->name, queue);
1469	if (err < 0)
1470		goto bind_fail;
1471	queue->rx_evtchn = queue->tx_evtchn;
1472	queue->rx_irq = queue->tx_irq = err;
1473
1474	return 0;
1475
1476bind_fail:
1477	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1478	queue->tx_evtchn = 0;
1479fail:
1480	return err;
1481}
1482
1483static int setup_netfront_split(struct netfront_queue *queue)
1484{
1485	int err;
1486
1487	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1488	if (err < 0)
1489		goto fail;
1490	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1491	if (err < 0)
1492		goto alloc_rx_evtchn_fail;
1493
1494	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1495		 "%s-tx", queue->name);
1496	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1497					xennet_tx_interrupt,
1498					0, queue->tx_irq_name, queue);
1499	if (err < 0)
1500		goto bind_tx_fail;
1501	queue->tx_irq = err;
1502
1503	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1504		 "%s-rx", queue->name);
1505	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1506					xennet_rx_interrupt,
1507					0, queue->rx_irq_name, queue);
1508	if (err < 0)
1509		goto bind_rx_fail;
1510	queue->rx_irq = err;
1511
1512	return 0;
1513
1514bind_rx_fail:
1515	unbind_from_irqhandler(queue->tx_irq, queue);
1516	queue->tx_irq = 0;
1517bind_tx_fail:
1518	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1519	queue->rx_evtchn = 0;
1520alloc_rx_evtchn_fail:
1521	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1522	queue->tx_evtchn = 0;
1523fail:
1524	return err;
1525}
1526
1527static int setup_netfront(struct xenbus_device *dev,
1528			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1529{
1530	struct xen_netif_tx_sring *txs;
1531	struct xen_netif_rx_sring *rxs;
1532	grant_ref_t gref;
1533	int err;
 
 
 
 
 
 
 
1534
1535	queue->tx_ring_ref = GRANT_INVALID_REF;
1536	queue->rx_ring_ref = GRANT_INVALID_REF;
1537	queue->rx.sring = NULL;
1538	queue->tx.sring = NULL;
 
1539
1540	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1541	if (!txs) {
1542		err = -ENOMEM;
1543		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1544		goto fail;
1545	}
1546	SHARED_RING_INIT(txs);
1547	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1548
1549	err = xenbus_grant_ring(dev, txs, 1, &gref);
1550	if (err < 0)
1551		goto grant_tx_ring_fail;
1552	queue->tx_ring_ref = gref;
 
1553
 
1554	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1555	if (!rxs) {
1556		err = -ENOMEM;
1557		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1558		goto alloc_rx_ring_fail;
1559	}
1560	SHARED_RING_INIT(rxs);
1561	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1562
1563	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1564	if (err < 0)
1565		goto grant_rx_ring_fail;
1566	queue->rx_ring_ref = gref;
1567
1568	if (feature_split_evtchn)
1569		err = setup_netfront_split(queue);
1570	/* setup single event channel if
1571	 *  a) feature-split-event-channels == 0
1572	 *  b) feature-split-event-channels == 1 but failed to setup
1573	 */
1574	if (!feature_split_evtchn || (feature_split_evtchn && err))
1575		err = setup_netfront_single(queue);
1576
 
1577	if (err)
1578		goto alloc_evtchn_fail;
1579
 
 
 
 
 
1580	return 0;
1581
1582	/* If we fail to setup netfront, it is safe to just revoke access to
1583	 * granted pages because backend is not accessing it at this point.
1584	 */
1585alloc_evtchn_fail:
1586	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1587grant_rx_ring_fail:
1588	free_page((unsigned long)rxs);
1589alloc_rx_ring_fail:
1590	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1591grant_tx_ring_fail:
1592	free_page((unsigned long)txs);
1593fail:
1594	return err;
1595}
1596
1597/* Queue-specific initialisation
1598 * This used to be done in xennet_create_dev() but must now
1599 * be run per-queue.
1600 */
1601static int xennet_init_queue(struct netfront_queue *queue)
1602{
1603	unsigned short i;
1604	int err = 0;
1605
1606	spin_lock_init(&queue->tx_lock);
1607	spin_lock_init(&queue->rx_lock);
1608
1609	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1610		    (unsigned long)queue);
1611
1612	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1613		 queue->info->netdev->name, queue->id);
1614
1615	/* Initialise tx_skbs as a free chain containing every entry. */
1616	queue->tx_skb_freelist = 0;
1617	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1618		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1619		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1620		queue->grant_tx_page[i] = NULL;
1621	}
1622
1623	/* Clear out rx_skbs */
1624	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1625		queue->rx_skbs[i] = NULL;
1626		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1627	}
1628
1629	/* A grant for every tx ring slot */
1630	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1631					  &queue->gref_tx_head) < 0) {
1632		pr_alert("can't alloc tx grant refs\n");
1633		err = -ENOMEM;
1634		goto exit;
1635	}
1636
1637	/* A grant for every rx ring slot */
1638	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1639					  &queue->gref_rx_head) < 0) {
1640		pr_alert("can't alloc rx grant refs\n");
1641		err = -ENOMEM;
1642		goto exit_free_tx;
1643	}
1644
1645	return 0;
1646
1647 exit_free_tx:
1648	gnttab_free_grant_references(queue->gref_tx_head);
1649 exit:
1650	return err;
1651}
1652
1653static int write_queue_xenstore_keys(struct netfront_queue *queue,
1654			   struct xenbus_transaction *xbt, int write_hierarchical)
1655{
1656	/* Write the queue-specific keys into XenStore in the traditional
1657	 * way for a single queue, or in a queue subkeys for multiple
1658	 * queues.
1659	 */
1660	struct xenbus_device *dev = queue->info->xbdev;
1661	int err;
1662	const char *message;
1663	char *path;
1664	size_t pathsize;
1665
1666	/* Choose the correct place to write the keys */
1667	if (write_hierarchical) {
1668		pathsize = strlen(dev->nodename) + 10;
1669		path = kzalloc(pathsize, GFP_KERNEL);
1670		if (!path) {
1671			err = -ENOMEM;
1672			message = "out of memory while writing ring references";
1673			goto error;
1674		}
1675		snprintf(path, pathsize, "%s/queue-%u",
1676				dev->nodename, queue->id);
1677	} else {
1678		path = (char *)dev->nodename;
1679	}
1680
1681	/* Write ring references */
1682	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1683			queue->tx_ring_ref);
1684	if (err) {
1685		message = "writing tx-ring-ref";
1686		goto error;
1687	}
1688
1689	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1690			queue->rx_ring_ref);
1691	if (err) {
1692		message = "writing rx-ring-ref";
1693		goto error;
1694	}
1695
1696	/* Write event channels; taking into account both shared
1697	 * and split event channel scenarios.
1698	 */
1699	if (queue->tx_evtchn == queue->rx_evtchn) {
1700		/* Shared event channel */
1701		err = xenbus_printf(*xbt, path,
1702				"event-channel", "%u", queue->tx_evtchn);
1703		if (err) {
1704			message = "writing event-channel";
1705			goto error;
1706		}
1707	} else {
1708		/* Split event channels */
1709		err = xenbus_printf(*xbt, path,
1710				"event-channel-tx", "%u", queue->tx_evtchn);
1711		if (err) {
1712			message = "writing event-channel-tx";
1713			goto error;
1714		}
1715
1716		err = xenbus_printf(*xbt, path,
1717				"event-channel-rx", "%u", queue->rx_evtchn);
1718		if (err) {
1719			message = "writing event-channel-rx";
1720			goto error;
1721		}
1722	}
1723
1724	if (write_hierarchical)
1725		kfree(path);
1726	return 0;
1727
1728error:
1729	if (write_hierarchical)
1730		kfree(path);
1731	xenbus_dev_fatal(dev, err, "%s", message);
1732	return err;
1733}
1734
1735static void xennet_destroy_queues(struct netfront_info *info)
1736{
1737	unsigned int i;
1738
1739	rtnl_lock();
1740
1741	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1742		struct netfront_queue *queue = &info->queues[i];
1743
1744		if (netif_running(info->netdev))
1745			napi_disable(&queue->napi);
1746		netif_napi_del(&queue->napi);
1747	}
1748
1749	rtnl_unlock();
1750
1751	kfree(info->queues);
1752	info->queues = NULL;
1753}
1754
1755static int xennet_create_queues(struct netfront_info *info,
1756				unsigned int *num_queues)
1757{
1758	unsigned int i;
1759	int ret;
1760
1761	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1762			       GFP_KERNEL);
1763	if (!info->queues)
1764		return -ENOMEM;
1765
1766	rtnl_lock();
1767
1768	for (i = 0; i < *num_queues; i++) {
1769		struct netfront_queue *queue = &info->queues[i];
1770
1771		queue->id = i;
1772		queue->info = info;
1773
1774		ret = xennet_init_queue(queue);
1775		if (ret < 0) {
1776			dev_warn(&info->netdev->dev,
1777				 "only created %d queues\n", i);
1778			*num_queues = i;
1779			break;
1780		}
1781
1782		netif_napi_add(queue->info->netdev, &queue->napi,
1783			       xennet_poll, 64);
1784		if (netif_running(info->netdev))
1785			napi_enable(&queue->napi);
1786	}
1787
1788	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1789
1790	rtnl_unlock();
1791
1792	if (*num_queues == 0) {
1793		dev_err(&info->netdev->dev, "no queues\n");
1794		return -EINVAL;
1795	}
1796	return 0;
1797}
1798
1799/* Common code used when first setting up, and when resuming. */
1800static int talk_to_netback(struct xenbus_device *dev,
1801			   struct netfront_info *info)
1802{
1803	const char *message;
1804	struct xenbus_transaction xbt;
1805	int err;
1806	unsigned int feature_split_evtchn;
1807	unsigned int i = 0;
1808	unsigned int max_queues = 0;
1809	struct netfront_queue *queue = NULL;
1810	unsigned int num_queues = 1;
1811
1812	info->netdev->irq = 0;
1813
1814	/* Check if backend supports multiple queues */
1815	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1816					  "multi-queue-max-queues", 1);
1817	num_queues = min(max_queues, xennet_max_queues);
1818
1819	/* Check feature-split-event-channels */
1820	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1821					"feature-split-event-channels", 0);
1822
1823	/* Read mac addr. */
1824	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1825	if (err) {
1826		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1827		goto out;
1828	}
1829
1830	if (info->queues)
1831		xennet_destroy_queues(info);
1832
1833	err = xennet_create_queues(info, &num_queues);
1834	if (err < 0) {
1835		xenbus_dev_fatal(dev, err, "creating queues");
1836		kfree(info->queues);
1837		info->queues = NULL;
1838		goto out;
1839	}
1840
1841	/* Create shared ring, alloc event channel -- for each queue */
1842	for (i = 0; i < num_queues; ++i) {
1843		queue = &info->queues[i];
1844		err = setup_netfront(dev, queue, feature_split_evtchn);
1845		if (err)
1846			goto destroy_ring;
1847	}
1848
1849again:
1850	err = xenbus_transaction_start(&xbt);
1851	if (err) {
1852		xenbus_dev_fatal(dev, err, "starting transaction");
1853		goto destroy_ring;
1854	}
1855
1856	if (xenbus_exists(XBT_NIL,
1857			  info->xbdev->otherend, "multi-queue-max-queues")) {
1858		/* Write the number of queues */
1859		err = xenbus_printf(xbt, dev->nodename,
1860				    "multi-queue-num-queues", "%u", num_queues);
1861		if (err) {
1862			message = "writing multi-queue-num-queues";
1863			goto abort_transaction_no_dev_fatal;
1864		}
 
 
1865	}
1866
1867	if (num_queues == 1) {
1868		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1869		if (err)
1870			goto abort_transaction_no_dev_fatal;
1871	} else {
1872		/* Write the keys for each queue */
1873		for (i = 0; i < num_queues; ++i) {
1874			queue = &info->queues[i];
1875			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1876			if (err)
1877				goto abort_transaction_no_dev_fatal;
1878		}
1879	}
1880
1881	/* The remaining keys are not queue-specific */
1882	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1883			    1);
1884	if (err) {
1885		message = "writing request-rx-copy";
1886		goto abort_transaction;
1887	}
1888
1889	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1890	if (err) {
1891		message = "writing feature-rx-notify";
1892		goto abort_transaction;
1893	}
1894
1895	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1896	if (err) {
1897		message = "writing feature-sg";
1898		goto abort_transaction;
1899	}
1900
1901	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1902	if (err) {
1903		message = "writing feature-gso-tcpv4";
1904		goto abort_transaction;
1905	}
1906
1907	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1908	if (err) {
1909		message = "writing feature-gso-tcpv6";
1910		goto abort_transaction;
1911	}
1912
1913	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1914			   "1");
1915	if (err) {
1916		message = "writing feature-ipv6-csum-offload";
1917		goto abort_transaction;
1918	}
1919
1920	err = xenbus_transaction_end(xbt, 0);
1921	if (err) {
1922		if (err == -EAGAIN)
1923			goto again;
1924		xenbus_dev_fatal(dev, err, "completing transaction");
1925		goto destroy_ring;
1926	}
1927
1928	return 0;
1929
1930 abort_transaction:
 
1931	xenbus_dev_fatal(dev, err, "%s", message);
1932abort_transaction_no_dev_fatal:
1933	xenbus_transaction_end(xbt, 1);
1934 destroy_ring:
1935	xennet_disconnect_backend(info);
1936	xennet_destroy_queues(info);
1937 out:
1938	unregister_netdev(info->netdev);
1939	xennet_free_netdev(info->netdev);
1940	return err;
1941}
1942
1943static int xennet_connect(struct net_device *dev)
1944{
1945	struct netfront_info *np = netdev_priv(dev);
1946	unsigned int num_queues = 0;
1947	int err;
1948	unsigned int j = 0;
1949	struct netfront_queue *queue = NULL;
 
 
 
 
 
 
1950
1951	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1952		dev_info(&dev->dev,
1953			 "backend does not support copying receive path\n");
1954		return -ENODEV;
1955	}
1956
1957	err = talk_to_netback(np->xbdev, np);
1958	if (err)
1959		return err;
1960
1961	/* talk_to_netback() sets the correct number of queues */
1962	num_queues = dev->real_num_tx_queues;
1963
1964	rtnl_lock();
1965	netdev_update_features(dev);
1966	rtnl_unlock();
1967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1968	/*
1969	 * All public and private state should now be sane.  Get
1970	 * ready to start sending and receiving packets and give the driver
1971	 * domain a kick because we've probably just requeued some
1972	 * packets.
1973	 */
1974	netif_carrier_on(np->netdev);
1975	for (j = 0; j < num_queues; ++j) {
1976		queue = &np->queues[j];
 
1977
1978		notify_remote_via_irq(queue->tx_irq);
1979		if (queue->tx_irq != queue->rx_irq)
1980			notify_remote_via_irq(queue->rx_irq);
1981
1982		spin_lock_irq(&queue->tx_lock);
1983		xennet_tx_buf_gc(queue);
1984		spin_unlock_irq(&queue->tx_lock);
1985
1986		spin_lock_bh(&queue->rx_lock);
1987		xennet_alloc_rx_buffers(queue);
1988		spin_unlock_bh(&queue->rx_lock);
1989	}
1990
1991	return 0;
1992}
1993
1994/**
1995 * Callback received when the backend's state changes.
1996 */
1997static void netback_changed(struct xenbus_device *dev,
1998			    enum xenbus_state backend_state)
1999{
2000	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2001	struct net_device *netdev = np->netdev;
2002
2003	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2004
2005	switch (backend_state) {
2006	case XenbusStateInitialising:
2007	case XenbusStateInitialised:
2008	case XenbusStateReconfiguring:
2009	case XenbusStateReconfigured:
 
2010	case XenbusStateUnknown:
 
2011		break;
2012
2013	case XenbusStateInitWait:
2014		if (dev->state != XenbusStateInitialising)
2015			break;
2016		if (xennet_connect(netdev) != 0)
2017			break;
2018		xenbus_switch_state(dev, XenbusStateConnected);
 
2019		break;
2020
2021	case XenbusStateConnected:
2022		netdev_notify_peers(netdev);
2023		break;
2024
2025	case XenbusStateClosed:
2026		if (dev->state == XenbusStateClosed)
2027			break;
2028		/* Missed the backend's CLOSING state -- fallthrough */
2029	case XenbusStateClosing:
2030		xenbus_frontend_closed(dev);
2031		break;
2032	}
2033}
2034
2035static const struct xennet_stat {
2036	char name[ETH_GSTRING_LEN];
2037	u16 offset;
2038} xennet_stats[] = {
2039	{
2040		"rx_gso_checksum_fixup",
2041		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2042	},
2043};
2044
2045static int xennet_get_sset_count(struct net_device *dev, int string_set)
2046{
2047	switch (string_set) {
2048	case ETH_SS_STATS:
2049		return ARRAY_SIZE(xennet_stats);
2050	default:
2051		return -EINVAL;
2052	}
2053}
2054
2055static void xennet_get_ethtool_stats(struct net_device *dev,
2056				     struct ethtool_stats *stats, u64 * data)
2057{
2058	void *np = netdev_priv(dev);
2059	int i;
2060
2061	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2062		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2063}
2064
2065static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2066{
2067	int i;
2068
2069	switch (stringset) {
2070	case ETH_SS_STATS:
2071		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2072			memcpy(data + i * ETH_GSTRING_LEN,
2073			       xennet_stats[i].name, ETH_GSTRING_LEN);
2074		break;
2075	}
2076}
2077
2078static const struct ethtool_ops xennet_ethtool_ops =
2079{
2080	.get_link = ethtool_op_get_link,
2081
2082	.get_sset_count = xennet_get_sset_count,
2083	.get_ethtool_stats = xennet_get_ethtool_stats,
2084	.get_strings = xennet_get_strings,
2085};
2086
2087#ifdef CONFIG_SYSFS
2088static ssize_t show_rxbuf(struct device *dev,
2089			  struct device_attribute *attr, char *buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2090{
2091	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
 
 
 
2092}
2093
2094static ssize_t store_rxbuf(struct device *dev,
2095			   struct device_attribute *attr,
2096			   const char *buf, size_t len)
2097{
 
 
2098	char *endp;
2099	unsigned long target;
2100
2101	if (!capable(CAP_NET_ADMIN))
2102		return -EPERM;
2103
2104	target = simple_strtoul(buf, &endp, 0);
2105	if (endp == buf)
2106		return -EBADMSG;
2107
2108	/* rxbuf_min and rxbuf_max are no longer configurable. */
 
 
 
 
 
 
 
 
 
 
 
 
2109
 
2110	return len;
2111}
2112
2113static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2114static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2115static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2116
2117static struct attribute *xennet_dev_attrs[] = {
2118	&dev_attr_rxbuf_min.attr,
2119	&dev_attr_rxbuf_max.attr,
2120	&dev_attr_rxbuf_cur.attr,
2121	NULL
 
 
 
 
2122};
2123
2124static const struct attribute_group xennet_dev_group = {
2125	.attrs = xennet_dev_attrs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126};
2127#endif /* CONFIG_SYSFS */
2128
2129static int xennet_remove(struct xenbus_device *dev)
 
2130{
2131	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2132
2133	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2134
 
 
2135	xennet_disconnect_backend(info);
2136
2137	unregister_netdev(info->netdev);
 
 
 
 
2138
2139	if (info->queues)
2140		xennet_destroy_queues(info);
2141	xennet_free_netdev(info->netdev);
2142
2143	return 0;
2144}
2145
2146static const struct xenbus_device_id netfront_ids[] = {
2147	{ "vif" },
2148	{ "" }
2149};
2150
2151static struct xenbus_driver netfront_driver = {
 
 
2152	.ids = netfront_ids,
2153	.probe = netfront_probe,
2154	.remove = xennet_remove,
2155	.resume = netfront_resume,
2156	.otherend_changed = netback_changed,
2157};
2158
2159static int __init netif_init(void)
2160{
2161	if (!xen_domain())
2162		return -ENODEV;
2163
2164	if (!xen_has_pv_nic_devices())
2165		return -ENODEV;
2166
2167	pr_info("Initialising Xen virtual ethernet driver\n");
2168
2169	/* Allow as many queues as there are CPUs if user has not
2170	 * specified a value.
2171	 */
2172	if (xennet_max_queues == 0)
2173		xennet_max_queues = num_online_cpus();
2174
2175	return xenbus_register_frontend(&netfront_driver);
2176}
2177module_init(netif_init);
2178
2179
2180static void __exit netif_exit(void)
2181{
 
 
 
2182	xenbus_unregister_driver(&netfront_driver);
2183}
2184module_exit(netif_exit);
2185
2186MODULE_DESCRIPTION("Xen virtual network device frontend");
2187MODULE_LICENSE("GPL");
2188MODULE_ALIAS("xen:vif");
2189MODULE_ALIAS("xennet");