Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
 
 
 
  47
  48#include <xen/xen.h>
  49#include <xen/xenbus.h>
  50#include <xen/events.h>
  51#include <xen/page.h>
  52#include <xen/platform_pci.h>
  53#include <xen/grant_table.h>
  54
  55#include <xen/interface/io/netif.h>
  56#include <xen/interface/memory.h>
  57#include <xen/interface/grant_table.h>
  58
  59/* Module parameters */
  60#define MAX_QUEUES_DEFAULT 8
  61static unsigned int xennet_max_queues;
  62module_param_named(max_queues, xennet_max_queues, uint, 0644);
  63MODULE_PARM_DESC(max_queues,
  64		 "Maximum number of queues per virtual interface");
  65
 
 
 
 
 
 
  66static const struct ethtool_ops xennet_ethtool_ops;
  67
  68struct netfront_cb {
  69	int pull_to;
  70};
  71
  72#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  73
  74#define RX_COPY_THRESHOLD 256
  75
  76#define GRANT_INVALID_REF	0
  77
  78#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  79#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  80
  81/* Minimum number of Rx slots (includes slot for GSO metadata). */
  82#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  83
  84/* Queue name is interface name with "-qNNN" appended */
  85#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  86
  87/* IRQ name is queue name with "-tx" or "-rx" appended */
  88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  89
  90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
  91
  92struct netfront_stats {
  93	u64			packets;
  94	u64			bytes;
  95	struct u64_stats_sync	syncp;
  96};
  97
  98struct netfront_info;
  99
 100struct netfront_queue {
 101	unsigned int id; /* Queue ID, 0-based */
 102	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 103	struct netfront_info *info;
 104
 
 
 105	struct napi_struct napi;
 106
 107	/* Split event channels support, tx_* == rx_* when using
 108	 * single event channel.
 109	 */
 110	unsigned int tx_evtchn, rx_evtchn;
 111	unsigned int tx_irq, rx_irq;
 112	/* Only used when split event channels support is enabled */
 113	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 114	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 115
 116	spinlock_t   tx_lock;
 117	struct xen_netif_tx_front_ring tx;
 118	int tx_ring_ref;
 119
 120	/*
 121	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 122	 * are linked from tx_skb_freelist through skb_entry.link.
 123	 *
 124	 *  NB. Freelist index entries are always going to be less than
 125	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 126	 *  greater than PAGE_OFFSET: we use this property to distinguish
 127	 *  them.
 128	 */
 129	union skb_entry {
 130		struct sk_buff *skb;
 131		unsigned long link;
 132	} tx_skbs[NET_TX_RING_SIZE];
 133	grant_ref_t gref_tx_head;
 134	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 135	struct page *grant_tx_page[NET_TX_RING_SIZE];
 136	unsigned tx_skb_freelist;
 
 137
 138	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 139	struct xen_netif_rx_front_ring rx;
 140	int rx_ring_ref;
 141
 142	struct timer_list rx_refill_timer;
 143
 144	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 145	grant_ref_t gref_rx_head;
 146	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
 
 
 
 
 
 147};
 148
 149struct netfront_info {
 150	struct list_head list;
 151	struct net_device *netdev;
 152
 153	struct xenbus_device *xbdev;
 154
 155	/* Multi-queue support */
 156	struct netfront_queue *queues;
 157
 158	/* Statistics */
 159	struct netfront_stats __percpu *rx_stats;
 160	struct netfront_stats __percpu *tx_stats;
 161
 
 
 
 
 
 
 
 
 
 
 162	atomic_t rx_gso_checksum_fixup;
 163};
 164
 165struct netfront_rx_info {
 166	struct xen_netif_rx_response rx;
 167	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 168};
 169
 170static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 171{
 172	list->link = id;
 173}
 174
 175static int skb_entry_is_link(const union skb_entry *list)
 176{
 177	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 178	return (unsigned long)list->skb < PAGE_OFFSET;
 179}
 180
 181/*
 182 * Access macros for acquiring freeing slots in tx_skbs[].
 183 */
 184
 185static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 186			       unsigned short id)
 187{
 188	skb_entry_set_link(&list[id], *head);
 189	*head = id;
 190}
 191
 192static unsigned short get_id_from_freelist(unsigned *head,
 193					   union skb_entry *list)
 194{
 195	unsigned int id = *head;
 196	*head = list[id].link;
 
 
 
 
 197	return id;
 198}
 199
 200static int xennet_rxidx(RING_IDX idx)
 201{
 202	return idx & (NET_RX_RING_SIZE - 1);
 203}
 204
 205static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 206					 RING_IDX ri)
 207{
 208	int i = xennet_rxidx(ri);
 209	struct sk_buff *skb = queue->rx_skbs[i];
 210	queue->rx_skbs[i] = NULL;
 211	return skb;
 212}
 213
 214static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 215					    RING_IDX ri)
 216{
 217	int i = xennet_rxidx(ri);
 218	grant_ref_t ref = queue->grant_rx_ref[i];
 219	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 220	return ref;
 221}
 222
 223#ifdef CONFIG_SYSFS
 224static const struct attribute_group xennet_dev_group;
 225#endif
 226
 227static bool xennet_can_sg(struct net_device *dev)
 228{
 229	return dev->features & NETIF_F_SG;
 230}
 231
 232
 233static void rx_refill_timeout(struct timer_list *t)
 234{
 235	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 236	napi_schedule(&queue->napi);
 237}
 238
 239static int netfront_tx_slot_available(struct netfront_queue *queue)
 240{
 241	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 242		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
 243}
 244
 245static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 246{
 247	struct net_device *dev = queue->info->netdev;
 248	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 249
 250	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 251	    netfront_tx_slot_available(queue) &&
 252	    likely(netif_running(dev)))
 253		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 254}
 255
 256
 257static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 258{
 259	struct sk_buff *skb;
 260	struct page *page;
 261
 262	skb = __netdev_alloc_skb(queue->info->netdev,
 263				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 264				 GFP_ATOMIC | __GFP_NOWARN);
 265	if (unlikely(!skb))
 266		return NULL;
 267
 268	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 269	if (!page) {
 
 270		kfree_skb(skb);
 271		return NULL;
 272	}
 273	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 
 274
 275	/* Align ip header to a 16 bytes boundary */
 276	skb_reserve(skb, NET_IP_ALIGN);
 277	skb->dev = queue->info->netdev;
 278
 279	return skb;
 280}
 281
 282
 283static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 284{
 285	RING_IDX req_prod = queue->rx.req_prod_pvt;
 286	int notify;
 287	int err = 0;
 288
 289	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 290		return;
 291
 292	for (req_prod = queue->rx.req_prod_pvt;
 293	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 294	     req_prod++) {
 295		struct sk_buff *skb;
 296		unsigned short id;
 297		grant_ref_t ref;
 298		struct page *page;
 299		struct xen_netif_rx_request *req;
 300
 301		skb = xennet_alloc_one_rx_buffer(queue);
 302		if (!skb) {
 303			err = -ENOMEM;
 304			break;
 305		}
 306
 307		id = xennet_rxidx(req_prod);
 308
 309		BUG_ON(queue->rx_skbs[id]);
 310		queue->rx_skbs[id] = skb;
 311
 312		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 313		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 314		queue->grant_rx_ref[id] = ref;
 315
 316		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 317
 318		req = RING_GET_REQUEST(&queue->rx, req_prod);
 319		gnttab_page_grant_foreign_access_ref_one(ref,
 320							 queue->info->xbdev->otherend_id,
 321							 page,
 322							 0);
 323		req->id = id;
 324		req->gref = ref;
 325	}
 326
 327	queue->rx.req_prod_pvt = req_prod;
 328
 329	/* Try again later if there are not enough requests or skb allocation
 330	 * failed.
 331	 * Enough requests is quantified as the sum of newly created slots and
 332	 * the unconsumed slots at the backend.
 333	 */
 334	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 335	    unlikely(err)) {
 336		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 337		return;
 338	}
 339
 340	wmb();		/* barrier so backend seens requests */
 341
 342	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 343	if (notify)
 344		notify_remote_via_irq(queue->rx_irq);
 345}
 346
 347static int xennet_open(struct net_device *dev)
 348{
 349	struct netfront_info *np = netdev_priv(dev);
 350	unsigned int num_queues = dev->real_num_tx_queues;
 351	unsigned int i = 0;
 352	struct netfront_queue *queue = NULL;
 353
 354	if (!np->queues)
 355		return -ENODEV;
 356
 357	for (i = 0; i < num_queues; ++i) {
 358		queue = &np->queues[i];
 359		napi_enable(&queue->napi);
 360
 361		spin_lock_bh(&queue->rx_lock);
 362		if (netif_carrier_ok(dev)) {
 363			xennet_alloc_rx_buffers(queue);
 364			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 365			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 366				napi_schedule(&queue->napi);
 367		}
 368		spin_unlock_bh(&queue->rx_lock);
 369	}
 370
 371	netif_tx_start_all_queues(dev);
 372
 373	return 0;
 374}
 375
 376static void xennet_tx_buf_gc(struct netfront_queue *queue)
 377{
 378	RING_IDX cons, prod;
 379	unsigned short id;
 380	struct sk_buff *skb;
 381	bool more_to_do;
 
 
 382
 383	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 384
 385	do {
 386		prod = queue->tx.sring->rsp_prod;
 
 
 
 
 
 387		rmb(); /* Ensure we see responses up to 'rp'. */
 388
 389		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 390			struct xen_netif_tx_response *txrsp;
 391
 392			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 393			if (txrsp->status == XEN_NETIF_RSP_NULL)
 
 
 394				continue;
 395
 396			id  = txrsp->id;
 397			skb = queue->tx_skbs[id].skb;
 398			if (unlikely(gnttab_query_foreign_access(
 399				queue->grant_tx_ref[id]) != 0)) {
 400				pr_alert("%s: warning -- grant still in use by backend domain\n",
 401					 __func__);
 402				BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 403			}
 404			gnttab_end_foreign_access_ref(
 405				queue->grant_tx_ref[id], GNTMAP_readonly);
 406			gnttab_release_grant_reference(
 407				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 408			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 409			queue->grant_tx_page[id] = NULL;
 410			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 411			dev_kfree_skb_irq(skb);
 412		}
 413
 414		queue->tx.rsp_cons = prod;
 415
 416		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 417	} while (more_to_do);
 418
 419	xennet_maybe_wake_tx(queue);
 
 
 
 
 
 
 
 
 420}
 421
 422struct xennet_gnttab_make_txreq {
 423	struct netfront_queue *queue;
 424	struct sk_buff *skb;
 425	struct page *page;
 426	struct xen_netif_tx_request *tx; /* Last request */
 
 427	unsigned int size;
 428};
 429
 430static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 431				  unsigned int len, void *data)
 432{
 433	struct xennet_gnttab_make_txreq *info = data;
 434	unsigned int id;
 435	struct xen_netif_tx_request *tx;
 436	grant_ref_t ref;
 437	/* convenient aliases */
 438	struct page *page = info->page;
 439	struct netfront_queue *queue = info->queue;
 440	struct sk_buff *skb = info->skb;
 441
 442	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 443	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 444	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 445	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 446
 447	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 448					gfn, GNTMAP_readonly);
 449
 450	queue->tx_skbs[id].skb = skb;
 451	queue->grant_tx_page[id] = page;
 452	queue->grant_tx_ref[id] = ref;
 453
 454	tx->id = id;
 455	tx->gref = ref;
 456	tx->offset = offset;
 457	tx->size = len;
 458	tx->flags = 0;
 
 
 
 
 
 
 
 
 459
 460	info->tx = tx;
 461	info->size += tx->size;
 462}
 463
 464static struct xen_netif_tx_request *xennet_make_first_txreq(
 465	struct netfront_queue *queue, struct sk_buff *skb,
 466	struct page *page, unsigned int offset, unsigned int len)
 467{
 468	struct xennet_gnttab_make_txreq info = {
 469		.queue = queue,
 470		.skb = skb,
 471		.page = page,
 472		.size = 0,
 473	};
 474
 475	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 476
 477	return info.tx;
 478}
 479
 480static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 481				  unsigned int len, void *data)
 482{
 483	struct xennet_gnttab_make_txreq *info = data;
 484
 485	info->tx->flags |= XEN_NETTXF_more_data;
 486	skb_get(info->skb);
 487	xennet_tx_setup_grant(gfn, offset, len, data);
 488}
 489
 490static struct xen_netif_tx_request *xennet_make_txreqs(
 491	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 492	struct sk_buff *skb, struct page *page,
 493	unsigned int offset, unsigned int len)
 494{
 495	struct xennet_gnttab_make_txreq info = {
 496		.queue = queue,
 497		.skb = skb,
 498		.tx = tx,
 499	};
 500
 501	/* Skip unused frames from start of page */
 502	page += offset >> PAGE_SHIFT;
 503	offset &= ~PAGE_MASK;
 504
 505	while (len) {
 506		info.page = page;
 507		info.size = 0;
 508
 509		gnttab_foreach_grant_in_range(page, offset, len,
 510					      xennet_make_one_txreq,
 511					      &info);
 512
 513		page++;
 514		offset = 0;
 515		len -= info.size;
 516	}
 517
 518	return info.tx;
 519}
 520
 521/*
 522 * Count how many ring slots are required to send this skb. Each frag
 523 * might be a compound page.
 524 */
 525static int xennet_count_skb_slots(struct sk_buff *skb)
 526{
 527	int i, frags = skb_shinfo(skb)->nr_frags;
 528	int slots;
 529
 530	slots = gnttab_count_grant(offset_in_page(skb->data),
 531				   skb_headlen(skb));
 532
 533	for (i = 0; i < frags; i++) {
 534		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 535		unsigned long size = skb_frag_size(frag);
 536		unsigned long offset = frag->page_offset;
 537
 538		/* Skip unused frames from start of page */
 539		offset &= ~PAGE_MASK;
 540
 541		slots += gnttab_count_grant(offset, size);
 542	}
 543
 544	return slots;
 545}
 546
 547static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 548			       void *accel_priv, select_queue_fallback_t fallback)
 549{
 550	unsigned int num_queues = dev->real_num_tx_queues;
 551	u32 hash;
 552	u16 queue_idx;
 553
 554	/* First, check if there is only one queue */
 555	if (num_queues == 1) {
 556		queue_idx = 0;
 557	} else {
 558		hash = skb_get_hash(skb);
 559		queue_idx = hash % num_queues;
 560	}
 561
 562	return queue_idx;
 563}
 564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 566
 567static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 568{
 569	struct netfront_info *np = netdev_priv(dev);
 570	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 571	struct xen_netif_tx_request *tx, *first_tx;
 572	unsigned int i;
 573	int notify;
 574	int slots;
 575	struct page *page;
 576	unsigned int offset;
 577	unsigned int len;
 578	unsigned long flags;
 579	struct netfront_queue *queue = NULL;
 
 580	unsigned int num_queues = dev->real_num_tx_queues;
 581	u16 queue_index;
 582	struct sk_buff *nskb;
 583
 584	/* Drop the packet if no queues are set up */
 585	if (num_queues < 1)
 586		goto drop;
 
 
 587	/* Determine which queue to transmit this SKB on */
 588	queue_index = skb_get_queue_mapping(skb);
 589	queue = &np->queues[queue_index];
 590
 591	/* If skb->len is too big for wire format, drop skb and alert
 592	 * user about misconfiguration.
 593	 */
 594	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 595		net_alert_ratelimited(
 596			"xennet: skb->len = %u, too big for wire format\n",
 597			skb->len);
 598		goto drop;
 599	}
 600
 601	slots = xennet_count_skb_slots(skb);
 602	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 603		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 604				    slots, skb->len);
 605		if (skb_linearize(skb))
 606			goto drop;
 607	}
 608
 609	page = virt_to_page(skb->data);
 610	offset = offset_in_page(skb->data);
 611
 612	/* The first req should be at least ETH_HLEN size or the packet will be
 613	 * dropped by netback.
 
 
 
 
 614	 */
 615	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 616		nskb = skb_copy(skb, GFP_ATOMIC);
 617		if (!nskb)
 618			goto drop;
 619		dev_consume_skb_any(skb);
 620		skb = nskb;
 621		page = virt_to_page(skb->data);
 622		offset = offset_in_page(skb->data);
 623	}
 624
 625	len = skb_headlen(skb);
 626
 627	spin_lock_irqsave(&queue->tx_lock, flags);
 628
 629	if (unlikely(!netif_carrier_ok(dev) ||
 630		     (slots > 1 && !xennet_can_sg(dev)) ||
 631		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 632		spin_unlock_irqrestore(&queue->tx_lock, flags);
 633		goto drop;
 634	}
 635
 636	/* First request for the linear area. */
 637	first_tx = tx = xennet_make_first_txreq(queue, skb,
 638						page, offset, len);
 639	offset += tx->size;
 
 
 640	if (offset == PAGE_SIZE) {
 641		page++;
 642		offset = 0;
 643	}
 644	len -= tx->size;
 645
 646	if (skb->ip_summed == CHECKSUM_PARTIAL)
 647		/* local packet? */
 648		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 
 649	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 650		/* remote but checksummed. */
 651		tx->flags |= XEN_NETTXF_data_validated;
 652
 653	/* Optional extra info after the first request. */
 654	if (skb_shinfo(skb)->gso_size) {
 655		struct xen_netif_extra_info *gso;
 656
 657		gso = (struct xen_netif_extra_info *)
 658			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 659
 660		tx->flags |= XEN_NETTXF_extra_info;
 661
 662		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 663		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 664			XEN_NETIF_GSO_TYPE_TCPV6 :
 665			XEN_NETIF_GSO_TYPE_TCPV4;
 666		gso->u.gso.pad = 0;
 667		gso->u.gso.features = 0;
 668
 669		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 670		gso->flags = 0;
 671	}
 672
 673	/* Requests for the rest of the linear area. */
 674	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 675
 676	/* Requests for all the frags. */
 677	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 678		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 679		tx = xennet_make_txreqs(queue, tx, skb,
 680					skb_frag_page(frag), frag->page_offset,
 681					skb_frag_size(frag));
 682	}
 683
 684	/* First request has the packet length. */
 685	first_tx->size = skb->len;
 686
 
 
 
 
 
 687	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 688	if (notify)
 689		notify_remote_via_irq(queue->tx_irq);
 690
 691	u64_stats_update_begin(&tx_stats->syncp);
 692	tx_stats->bytes += skb->len;
 693	tx_stats->packets++;
 694	u64_stats_update_end(&tx_stats->syncp);
 695
 696	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 697	xennet_tx_buf_gc(queue);
 698
 699	if (!netfront_tx_slot_available(queue))
 700		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 701
 702	spin_unlock_irqrestore(&queue->tx_lock, flags);
 703
 704	return NETDEV_TX_OK;
 705
 706 drop:
 707	dev->stats.tx_dropped++;
 708	dev_kfree_skb_any(skb);
 709	return NETDEV_TX_OK;
 710}
 711
 712static int xennet_close(struct net_device *dev)
 713{
 714	struct netfront_info *np = netdev_priv(dev);
 715	unsigned int num_queues = dev->real_num_tx_queues;
 716	unsigned int i;
 717	struct netfront_queue *queue;
 718	netif_tx_stop_all_queues(np->netdev);
 719	for (i = 0; i < num_queues; ++i) {
 720		queue = &np->queues[i];
 721		napi_disable(&queue->napi);
 722	}
 723	return 0;
 724}
 725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 727				grant_ref_t ref)
 728{
 729	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 730
 731	BUG_ON(queue->rx_skbs[new]);
 732	queue->rx_skbs[new] = skb;
 733	queue->grant_rx_ref[new] = ref;
 734	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 735	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 736	queue->rx.req_prod_pvt++;
 737}
 738
 739static int xennet_get_extras(struct netfront_queue *queue,
 740			     struct xen_netif_extra_info *extras,
 741			     RING_IDX rp)
 742
 743{
 744	struct xen_netif_extra_info *extra;
 745	struct device *dev = &queue->info->netdev->dev;
 746	RING_IDX cons = queue->rx.rsp_cons;
 747	int err = 0;
 748
 749	do {
 750		struct sk_buff *skb;
 751		grant_ref_t ref;
 752
 753		if (unlikely(cons + 1 == rp)) {
 754			if (net_ratelimit())
 755				dev_warn(dev, "Missing extra info\n");
 756			err = -EBADR;
 757			break;
 758		}
 759
 760		extra = (struct xen_netif_extra_info *)
 761			RING_GET_RESPONSE(&queue->rx, ++cons);
 762
 763		if (unlikely(!extra->type ||
 764			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 765			if (net_ratelimit())
 766				dev_warn(dev, "Invalid extra type: %d\n",
 767					extra->type);
 768			err = -EINVAL;
 769		} else {
 770			memcpy(&extras[extra->type - 1], extra,
 771			       sizeof(*extra));
 772		}
 773
 774		skb = xennet_get_rx_skb(queue, cons);
 775		ref = xennet_get_rx_ref(queue, cons);
 776		xennet_move_rx_slot(queue, skb, ref);
 777	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 778
 779	queue->rx.rsp_cons = cons;
 780	return err;
 781}
 782
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783static int xennet_get_responses(struct netfront_queue *queue,
 784				struct netfront_rx_info *rinfo, RING_IDX rp,
 785				struct sk_buff_head *list)
 
 786{
 787	struct xen_netif_rx_response *rx = &rinfo->rx;
 788	struct xen_netif_extra_info *extras = rinfo->extras;
 789	struct device *dev = &queue->info->netdev->dev;
 790	RING_IDX cons = queue->rx.rsp_cons;
 791	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 
 792	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 793	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
 
 
 794	int slots = 1;
 795	int err = 0;
 796	unsigned long ret;
 797
 798	if (rx->flags & XEN_NETRXF_extra_info) {
 799		err = xennet_get_extras(queue, extras, rp);
 
 
 
 
 
 
 
 
 800		cons = queue->rx.rsp_cons;
 801	}
 802
 803	for (;;) {
 804		if (unlikely(rx->status < 0 ||
 805			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 806			if (net_ratelimit())
 807				dev_warn(dev, "rx->offset: %u, size: %d\n",
 808					 rx->offset, rx->status);
 809			xennet_move_rx_slot(queue, skb, ref);
 810			err = -EINVAL;
 811			goto next;
 812		}
 813
 814		/*
 815		 * This definitely indicates a bug, either in this driver or in
 816		 * the backend driver. In future this should flag the bad
 817		 * situation to the system controller to reboot the backend.
 818		 */
 819		if (ref == GRANT_INVALID_REF) {
 820			if (net_ratelimit())
 821				dev_warn(dev, "Bad rx response id %d.\n",
 822					 rx->id);
 823			err = -EINVAL;
 824			goto next;
 825		}
 826
 827		ret = gnttab_end_foreign_access_ref(ref, 0);
 828		BUG_ON(!ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829
 830		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 831
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 832		__skb_queue_tail(list, skb);
 833
 834next:
 835		if (!(rx->flags & XEN_NETRXF_more_data))
 836			break;
 837
 838		if (cons + slots == rp) {
 839			if (net_ratelimit())
 840				dev_warn(dev, "Need more slots\n");
 841			err = -ENOENT;
 842			break;
 843		}
 844
 845		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 
 846		skb = xennet_get_rx_skb(queue, cons + slots);
 847		ref = xennet_get_rx_ref(queue, cons + slots);
 848		slots++;
 849	}
 850
 851	if (unlikely(slots > max)) {
 852		if (net_ratelimit())
 853			dev_warn(dev, "Too many slots\n");
 854		err = -E2BIG;
 855	}
 856
 857	if (unlikely(err))
 858		queue->rx.rsp_cons = cons + slots;
 859
 860	return err;
 861}
 862
 863static int xennet_set_skb_gso(struct sk_buff *skb,
 864			      struct xen_netif_extra_info *gso)
 865{
 866	if (!gso->u.gso.size) {
 867		if (net_ratelimit())
 868			pr_warn("GSO size must not be zero\n");
 869		return -EINVAL;
 870	}
 871
 872	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
 873	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
 874		if (net_ratelimit())
 875			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
 876		return -EINVAL;
 877	}
 878
 879	skb_shinfo(skb)->gso_size = gso->u.gso.size;
 880	skb_shinfo(skb)->gso_type =
 881		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
 882		SKB_GSO_TCPV4 :
 883		SKB_GSO_TCPV6;
 884
 885	/* Header must be checked, and gso_segs computed. */
 886	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 887	skb_shinfo(skb)->gso_segs = 0;
 888
 889	return 0;
 890}
 891
 892static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
 893				  struct sk_buff *skb,
 894				  struct sk_buff_head *list)
 895{
 896	struct skb_shared_info *shinfo = skb_shinfo(skb);
 897	RING_IDX cons = queue->rx.rsp_cons;
 898	struct sk_buff *nskb;
 899
 900	while ((nskb = __skb_dequeue(list))) {
 901		struct xen_netif_rx_response *rx =
 902			RING_GET_RESPONSE(&queue->rx, ++cons);
 903		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 904
 905		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
 
 
 906			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 907
 908			BUG_ON(pull_to <= skb_headlen(skb));
 909			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 910		}
 911		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
 
 
 
 
 
 912
 913		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
 914				rx->offset, rx->status, PAGE_SIZE);
 
 915
 916		skb_shinfo(nskb)->nr_frags = 0;
 917		kfree_skb(nskb);
 918	}
 919
 920	return cons;
 
 
 921}
 922
 923static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 924{
 925	bool recalculate_partial_csum = false;
 926
 927	/*
 928	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
 929	 * peers can fail to set NETRXF_csum_blank when sending a GSO
 930	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
 931	 * recalculate the partial checksum.
 932	 */
 933	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
 934		struct netfront_info *np = netdev_priv(dev);
 935		atomic_inc(&np->rx_gso_checksum_fixup);
 936		skb->ip_summed = CHECKSUM_PARTIAL;
 937		recalculate_partial_csum = true;
 938	}
 939
 940	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
 941	if (skb->ip_summed != CHECKSUM_PARTIAL)
 942		return 0;
 943
 944	return skb_checksum_setup(skb, recalculate_partial_csum);
 945}
 946
 947static int handle_incoming_queue(struct netfront_queue *queue,
 948				 struct sk_buff_head *rxq)
 949{
 950	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
 951	int packets_dropped = 0;
 952	struct sk_buff *skb;
 953
 954	while ((skb = __skb_dequeue(rxq)) != NULL) {
 955		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 956
 957		if (pull_to > skb_headlen(skb))
 958			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
 959
 960		/* Ethernet work: Delayed to here as it peeks the header. */
 961		skb->protocol = eth_type_trans(skb, queue->info->netdev);
 962		skb_reset_network_header(skb);
 963
 964		if (checksum_setup(queue->info->netdev, skb)) {
 965			kfree_skb(skb);
 966			packets_dropped++;
 967			queue->info->netdev->stats.rx_errors++;
 968			continue;
 969		}
 970
 971		u64_stats_update_begin(&rx_stats->syncp);
 972		rx_stats->packets++;
 973		rx_stats->bytes += skb->len;
 974		u64_stats_update_end(&rx_stats->syncp);
 975
 976		/* Pass it up. */
 977		napi_gro_receive(&queue->napi, skb);
 978	}
 979
 980	return packets_dropped;
 981}
 982
 983static int xennet_poll(struct napi_struct *napi, int budget)
 984{
 985	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
 986	struct net_device *dev = queue->info->netdev;
 987	struct sk_buff *skb;
 988	struct netfront_rx_info rinfo;
 989	struct xen_netif_rx_response *rx = &rinfo.rx;
 990	struct xen_netif_extra_info *extras = rinfo.extras;
 991	RING_IDX i, rp;
 992	int work_done;
 993	struct sk_buff_head rxq;
 994	struct sk_buff_head errq;
 995	struct sk_buff_head tmpq;
 996	int err;
 
 997
 998	spin_lock(&queue->rx_lock);
 999
1000	skb_queue_head_init(&rxq);
1001	skb_queue_head_init(&errq);
1002	skb_queue_head_init(&tmpq);
1003
1004	rp = queue->rx.sring->rsp_prod;
 
 
 
 
 
 
 
1005	rmb(); /* Ensure we see queued responses up to 'rp'. */
1006
1007	i = queue->rx.rsp_cons;
1008	work_done = 0;
1009	while ((i != rp) && (work_done < budget)) {
1010		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1011		memset(extras, 0, sizeof(rinfo.extras));
1012
1013		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
 
1014
1015		if (unlikely(err)) {
 
 
 
 
1016err:
1017			while ((skb = __skb_dequeue(&tmpq)))
1018				__skb_queue_tail(&errq, skb);
1019			dev->stats.rx_errors++;
1020			i = queue->rx.rsp_cons;
1021			continue;
1022		}
1023
1024		skb = __skb_dequeue(&tmpq);
1025
1026		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1027			struct xen_netif_extra_info *gso;
1028			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1029
1030			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1031				__skb_queue_head(&tmpq, skb);
1032				queue->rx.rsp_cons += skb_queue_len(&tmpq);
 
 
1033				goto err;
1034			}
1035		}
1036
1037		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1038		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1039			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1040
1041		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1042		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1043		skb->data_len = rx->status;
1044		skb->len += rx->status;
1045
1046		i = xennet_fill_frags(queue, skb, &tmpq);
 
1047
1048		if (rx->flags & XEN_NETRXF_csum_blank)
1049			skb->ip_summed = CHECKSUM_PARTIAL;
1050		else if (rx->flags & XEN_NETRXF_data_validated)
1051			skb->ip_summed = CHECKSUM_UNNECESSARY;
1052
1053		__skb_queue_tail(&rxq, skb);
1054
1055		queue->rx.rsp_cons = ++i;
 
1056		work_done++;
1057	}
 
 
1058
1059	__skb_queue_purge(&errq);
1060
1061	work_done -= handle_incoming_queue(queue, &rxq);
1062
1063	xennet_alloc_rx_buffers(queue);
1064
1065	if (work_done < budget) {
1066		int more_to_do = 0;
1067
1068		napi_complete_done(napi, work_done);
1069
1070		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1071		if (more_to_do)
1072			napi_schedule(napi);
1073	}
1074
1075	spin_unlock(&queue->rx_lock);
1076
1077	return work_done;
1078}
1079
1080static int xennet_change_mtu(struct net_device *dev, int mtu)
1081{
1082	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1083
1084	if (mtu > max)
1085		return -EINVAL;
1086	dev->mtu = mtu;
1087	return 0;
1088}
1089
1090static void xennet_get_stats64(struct net_device *dev,
1091			       struct rtnl_link_stats64 *tot)
1092{
1093	struct netfront_info *np = netdev_priv(dev);
1094	int cpu;
1095
1096	for_each_possible_cpu(cpu) {
1097		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1098		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1099		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1100		unsigned int start;
1101
1102		do {
1103			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1104			tx_packets = tx_stats->packets;
1105			tx_bytes = tx_stats->bytes;
1106		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1107
1108		do {
1109			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1110			rx_packets = rx_stats->packets;
1111			rx_bytes = rx_stats->bytes;
1112		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1113
1114		tot->rx_packets += rx_packets;
1115		tot->tx_packets += tx_packets;
1116		tot->rx_bytes   += rx_bytes;
1117		tot->tx_bytes   += tx_bytes;
1118	}
1119
1120	tot->rx_errors  = dev->stats.rx_errors;
1121	tot->tx_dropped = dev->stats.tx_dropped;
1122}
1123
1124static void xennet_release_tx_bufs(struct netfront_queue *queue)
1125{
1126	struct sk_buff *skb;
1127	int i;
1128
1129	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1130		/* Skip over entries which are actually freelist references */
1131		if (skb_entry_is_link(&queue->tx_skbs[i]))
1132			continue;
1133
1134		skb = queue->tx_skbs[i].skb;
 
1135		get_page(queue->grant_tx_page[i]);
1136		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1137					  GNTMAP_readonly,
1138					  (unsigned long)page_address(queue->grant_tx_page[i]));
1139		queue->grant_tx_page[i] = NULL;
1140		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1141		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1142		dev_kfree_skb_irq(skb);
1143	}
1144}
1145
1146static void xennet_release_rx_bufs(struct netfront_queue *queue)
1147{
1148	int id, ref;
1149
1150	spin_lock_bh(&queue->rx_lock);
1151
1152	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1153		struct sk_buff *skb;
1154		struct page *page;
1155
1156		skb = queue->rx_skbs[id];
1157		if (!skb)
1158			continue;
1159
1160		ref = queue->grant_rx_ref[id];
1161		if (ref == GRANT_INVALID_REF)
1162			continue;
1163
1164		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1165
1166		/* gnttab_end_foreign_access() needs a page ref until
1167		 * foreign access is ended (which may be deferred).
1168		 */
1169		get_page(page);
1170		gnttab_end_foreign_access(ref, 0,
1171					  (unsigned long)page_address(page));
1172		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1173
1174		kfree_skb(skb);
1175	}
1176
1177	spin_unlock_bh(&queue->rx_lock);
1178}
1179
1180static netdev_features_t xennet_fix_features(struct net_device *dev,
1181	netdev_features_t features)
1182{
1183	struct netfront_info *np = netdev_priv(dev);
1184
1185	if (features & NETIF_F_SG &&
1186	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1187		features &= ~NETIF_F_SG;
1188
1189	if (features & NETIF_F_IPV6_CSUM &&
1190	    !xenbus_read_unsigned(np->xbdev->otherend,
1191				  "feature-ipv6-csum-offload", 0))
1192		features &= ~NETIF_F_IPV6_CSUM;
1193
1194	if (features & NETIF_F_TSO &&
1195	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1196		features &= ~NETIF_F_TSO;
1197
1198	if (features & NETIF_F_TSO6 &&
1199	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1200		features &= ~NETIF_F_TSO6;
1201
1202	return features;
1203}
1204
1205static int xennet_set_features(struct net_device *dev,
1206	netdev_features_t features)
1207{
1208	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1209		netdev_info(dev, "Reducing MTU because no SG offload");
1210		dev->mtu = ETH_DATA_LEN;
1211	}
1212
1213	return 0;
1214}
1215
1216static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1217{
1218	struct netfront_queue *queue = dev_id;
1219	unsigned long flags;
1220
 
 
 
1221	spin_lock_irqsave(&queue->tx_lock, flags);
1222	xennet_tx_buf_gc(queue);
 
1223	spin_unlock_irqrestore(&queue->tx_lock, flags);
1224
 
 
 
 
 
 
 
 
 
 
1225	return IRQ_HANDLED;
1226}
1227
1228static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1229{
1230	struct netfront_queue *queue = dev_id;
1231	struct net_device *dev = queue->info->netdev;
1232
1233	if (likely(netif_carrier_ok(dev) &&
1234		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1235		napi_schedule(&queue->napi);
1236
 
 
 
 
 
 
 
 
 
 
1237	return IRQ_HANDLED;
1238}
1239
1240static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1241{
1242	xennet_tx_interrupt(irq, dev_id);
1243	xennet_rx_interrupt(irq, dev_id);
 
 
 
 
1244	return IRQ_HANDLED;
1245}
1246
1247#ifdef CONFIG_NET_POLL_CONTROLLER
1248static void xennet_poll_controller(struct net_device *dev)
1249{
1250	/* Poll each queue */
1251	struct netfront_info *info = netdev_priv(dev);
1252	unsigned int num_queues = dev->real_num_tx_queues;
1253	unsigned int i;
 
 
 
 
1254	for (i = 0; i < num_queues; ++i)
1255		xennet_interrupt(0, &info->queues[i]);
1256}
1257#endif
1258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1259static const struct net_device_ops xennet_netdev_ops = {
 
1260	.ndo_open            = xennet_open,
1261	.ndo_stop            = xennet_close,
1262	.ndo_start_xmit      = xennet_start_xmit,
1263	.ndo_change_mtu	     = xennet_change_mtu,
1264	.ndo_get_stats64     = xennet_get_stats64,
1265	.ndo_set_mac_address = eth_mac_addr,
1266	.ndo_validate_addr   = eth_validate_addr,
1267	.ndo_fix_features    = xennet_fix_features,
1268	.ndo_set_features    = xennet_set_features,
1269	.ndo_select_queue    = xennet_select_queue,
 
 
1270#ifdef CONFIG_NET_POLL_CONTROLLER
1271	.ndo_poll_controller = xennet_poll_controller,
1272#endif
1273};
1274
1275static void xennet_free_netdev(struct net_device *netdev)
1276{
1277	struct netfront_info *np = netdev_priv(netdev);
1278
1279	free_percpu(np->rx_stats);
1280	free_percpu(np->tx_stats);
1281	free_netdev(netdev);
1282}
1283
1284static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1285{
1286	int err;
1287	struct net_device *netdev;
1288	struct netfront_info *np;
1289
1290	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1291	if (!netdev)
1292		return ERR_PTR(-ENOMEM);
1293
1294	np                   = netdev_priv(netdev);
1295	np->xbdev            = dev;
1296
1297	np->queues = NULL;
1298
1299	err = -ENOMEM;
1300	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1301	if (np->rx_stats == NULL)
1302		goto exit;
1303	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1304	if (np->tx_stats == NULL)
1305		goto exit;
1306
1307	netdev->netdev_ops	= &xennet_netdev_ops;
1308
1309	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1310				  NETIF_F_GSO_ROBUST;
1311	netdev->hw_features	= NETIF_F_SG |
1312				  NETIF_F_IPV6_CSUM |
1313				  NETIF_F_TSO | NETIF_F_TSO6;
1314
1315	/*
1316         * Assume that all hw features are available for now. This set
1317         * will be adjusted by the call to netdev_update_features() in
1318         * xennet_connect() which is the earliest point where we can
1319         * negotiate with the backend regarding supported features.
1320         */
1321	netdev->features |= netdev->hw_features;
 
 
1322
1323	netdev->ethtool_ops = &xennet_ethtool_ops;
1324	netdev->min_mtu = ETH_MIN_MTU;
1325	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1326	SET_NETDEV_DEV(netdev, &dev->dev);
1327
1328	np->netdev = netdev;
 
1329
1330	netif_carrier_off(netdev);
1331
1332	xenbus_switch_state(dev, XenbusStateInitialising);
 
 
 
 
 
 
 
 
1333	return netdev;
1334
1335 exit:
1336	xennet_free_netdev(netdev);
1337	return ERR_PTR(err);
1338}
1339
1340/**
1341 * Entry point to this code when a new device is created.  Allocate the basic
1342 * structures and the ring buffers for communication with the backend, and
1343 * inform the backend of the appropriate details for those.
1344 */
1345static int netfront_probe(struct xenbus_device *dev,
1346			  const struct xenbus_device_id *id)
1347{
1348	int err;
1349	struct net_device *netdev;
1350	struct netfront_info *info;
1351
1352	netdev = xennet_create_dev(dev);
1353	if (IS_ERR(netdev)) {
1354		err = PTR_ERR(netdev);
1355		xenbus_dev_fatal(dev, err, "creating netdev");
1356		return err;
1357	}
1358
1359	info = netdev_priv(netdev);
1360	dev_set_drvdata(&dev->dev, info);
1361#ifdef CONFIG_SYSFS
1362	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1363#endif
1364
1365	return 0;
1366}
1367
1368static void xennet_end_access(int ref, void *page)
1369{
1370	/* This frees the page as a side-effect */
1371	if (ref != GRANT_INVALID_REF)
1372		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1373}
1374
1375static void xennet_disconnect_backend(struct netfront_info *info)
1376{
1377	unsigned int i = 0;
1378	unsigned int num_queues = info->netdev->real_num_tx_queues;
1379
1380	netif_carrier_off(info->netdev);
1381
1382	for (i = 0; i < num_queues && info->queues; ++i) {
1383		struct netfront_queue *queue = &info->queues[i];
1384
1385		del_timer_sync(&queue->rx_refill_timer);
1386
1387		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1388			unbind_from_irqhandler(queue->tx_irq, queue);
1389		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1390			unbind_from_irqhandler(queue->tx_irq, queue);
1391			unbind_from_irqhandler(queue->rx_irq, queue);
1392		}
1393		queue->tx_evtchn = queue->rx_evtchn = 0;
1394		queue->tx_irq = queue->rx_irq = 0;
1395
1396		if (netif_running(info->netdev))
1397			napi_synchronize(&queue->napi);
1398
1399		xennet_release_tx_bufs(queue);
1400		xennet_release_rx_bufs(queue);
1401		gnttab_free_grant_references(queue->gref_tx_head);
1402		gnttab_free_grant_references(queue->gref_rx_head);
1403
1404		/* End access and free the pages */
1405		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1406		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1407
1408		queue->tx_ring_ref = GRANT_INVALID_REF;
1409		queue->rx_ring_ref = GRANT_INVALID_REF;
1410		queue->tx.sring = NULL;
1411		queue->rx.sring = NULL;
 
 
1412	}
1413}
1414
1415/**
1416 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1417 * driver restart.  We tear down our netif structure and recreate it, but
1418 * leave the device-layer structures intact so that this is transparent to the
1419 * rest of the kernel.
1420 */
1421static int netfront_resume(struct xenbus_device *dev)
1422{
1423	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1424
1425	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1426
 
 
 
 
1427	xennet_disconnect_backend(info);
 
 
 
 
 
 
1428	return 0;
1429}
1430
1431static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1432{
1433	char *s, *e, *macstr;
1434	int i;
1435
1436	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1437	if (IS_ERR(macstr))
1438		return PTR_ERR(macstr);
1439
1440	for (i = 0; i < ETH_ALEN; i++) {
1441		mac[i] = simple_strtoul(s, &e, 16);
1442		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1443			kfree(macstr);
1444			return -ENOENT;
1445		}
1446		s = e+1;
1447	}
1448
1449	kfree(macstr);
1450	return 0;
1451}
1452
1453static int setup_netfront_single(struct netfront_queue *queue)
1454{
1455	int err;
1456
1457	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1458	if (err < 0)
1459		goto fail;
1460
1461	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1462					xennet_interrupt,
1463					0, queue->info->netdev->name, queue);
 
1464	if (err < 0)
1465		goto bind_fail;
1466	queue->rx_evtchn = queue->tx_evtchn;
1467	queue->rx_irq = queue->tx_irq = err;
1468
1469	return 0;
1470
1471bind_fail:
1472	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1473	queue->tx_evtchn = 0;
1474fail:
1475	return err;
1476}
1477
1478static int setup_netfront_split(struct netfront_queue *queue)
1479{
1480	int err;
1481
1482	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1483	if (err < 0)
1484		goto fail;
1485	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1486	if (err < 0)
1487		goto alloc_rx_evtchn_fail;
1488
1489	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1490		 "%s-tx", queue->name);
1491	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1492					xennet_tx_interrupt,
1493					0, queue->tx_irq_name, queue);
1494	if (err < 0)
1495		goto bind_tx_fail;
1496	queue->tx_irq = err;
1497
1498	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1499		 "%s-rx", queue->name);
1500	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1501					xennet_rx_interrupt,
1502					0, queue->rx_irq_name, queue);
1503	if (err < 0)
1504		goto bind_rx_fail;
1505	queue->rx_irq = err;
1506
1507	return 0;
1508
1509bind_rx_fail:
1510	unbind_from_irqhandler(queue->tx_irq, queue);
1511	queue->tx_irq = 0;
1512bind_tx_fail:
1513	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1514	queue->rx_evtchn = 0;
1515alloc_rx_evtchn_fail:
1516	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1517	queue->tx_evtchn = 0;
1518fail:
1519	return err;
1520}
1521
1522static int setup_netfront(struct xenbus_device *dev,
1523			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1524{
1525	struct xen_netif_tx_sring *txs;
1526	struct xen_netif_rx_sring *rxs;
1527	grant_ref_t gref;
1528	int err;
1529
1530	queue->tx_ring_ref = GRANT_INVALID_REF;
1531	queue->rx_ring_ref = GRANT_INVALID_REF;
1532	queue->rx.sring = NULL;
1533	queue->tx.sring = NULL;
1534
1535	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1536	if (!txs) {
1537		err = -ENOMEM;
1538		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1539		goto fail;
1540	}
1541	SHARED_RING_INIT(txs);
1542	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1543
1544	err = xenbus_grant_ring(dev, txs, 1, &gref);
1545	if (err < 0)
1546		goto grant_tx_ring_fail;
1547	queue->tx_ring_ref = gref;
1548
1549	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1550	if (!rxs) {
1551		err = -ENOMEM;
1552		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1553		goto alloc_rx_ring_fail;
1554	}
1555	SHARED_RING_INIT(rxs);
1556	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1557
1558	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1559	if (err < 0)
1560		goto grant_rx_ring_fail;
1561	queue->rx_ring_ref = gref;
1562
1563	if (feature_split_evtchn)
1564		err = setup_netfront_split(queue);
1565	/* setup single event channel if
1566	 *  a) feature-split-event-channels == 0
1567	 *  b) feature-split-event-channels == 1 but failed to setup
1568	 */
1569	if (!feature_split_evtchn || (feature_split_evtchn && err))
1570		err = setup_netfront_single(queue);
1571
1572	if (err)
1573		goto alloc_evtchn_fail;
1574
1575	return 0;
1576
1577	/* If we fail to setup netfront, it is safe to just revoke access to
1578	 * granted pages because backend is not accessing it at this point.
1579	 */
1580alloc_evtchn_fail:
1581	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1582grant_rx_ring_fail:
1583	free_page((unsigned long)rxs);
1584alloc_rx_ring_fail:
1585	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1586grant_tx_ring_fail:
1587	free_page((unsigned long)txs);
1588fail:
1589	return err;
1590}
1591
1592/* Queue-specific initialisation
1593 * This used to be done in xennet_create_dev() but must now
1594 * be run per-queue.
1595 */
1596static int xennet_init_queue(struct netfront_queue *queue)
1597{
1598	unsigned short i;
1599	int err = 0;
 
1600
1601	spin_lock_init(&queue->tx_lock);
1602	spin_lock_init(&queue->rx_lock);
 
1603
1604	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1605
1606	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1607		 queue->info->netdev->name, queue->id);
 
1608
1609	/* Initialise tx_skbs as a free chain containing every entry. */
1610	queue->tx_skb_freelist = 0;
 
1611	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1612		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1613		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1614		queue->grant_tx_page[i] = NULL;
1615	}
 
1616
1617	/* Clear out rx_skbs */
1618	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1619		queue->rx_skbs[i] = NULL;
1620		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1621	}
1622
1623	/* A grant for every tx ring slot */
1624	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1625					  &queue->gref_tx_head) < 0) {
1626		pr_alert("can't alloc tx grant refs\n");
1627		err = -ENOMEM;
1628		goto exit;
1629	}
1630
1631	/* A grant for every rx ring slot */
1632	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1633					  &queue->gref_rx_head) < 0) {
1634		pr_alert("can't alloc rx grant refs\n");
1635		err = -ENOMEM;
1636		goto exit_free_tx;
1637	}
1638
1639	return 0;
1640
1641 exit_free_tx:
1642	gnttab_free_grant_references(queue->gref_tx_head);
1643 exit:
1644	return err;
1645}
1646
1647static int write_queue_xenstore_keys(struct netfront_queue *queue,
1648			   struct xenbus_transaction *xbt, int write_hierarchical)
1649{
1650	/* Write the queue-specific keys into XenStore in the traditional
1651	 * way for a single queue, or in a queue subkeys for multiple
1652	 * queues.
1653	 */
1654	struct xenbus_device *dev = queue->info->xbdev;
1655	int err;
1656	const char *message;
1657	char *path;
1658	size_t pathsize;
1659
1660	/* Choose the correct place to write the keys */
1661	if (write_hierarchical) {
1662		pathsize = strlen(dev->nodename) + 10;
1663		path = kzalloc(pathsize, GFP_KERNEL);
1664		if (!path) {
1665			err = -ENOMEM;
1666			message = "out of memory while writing ring references";
1667			goto error;
1668		}
1669		snprintf(path, pathsize, "%s/queue-%u",
1670				dev->nodename, queue->id);
1671	} else {
1672		path = (char *)dev->nodename;
1673	}
1674
1675	/* Write ring references */
1676	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1677			queue->tx_ring_ref);
1678	if (err) {
1679		message = "writing tx-ring-ref";
1680		goto error;
1681	}
1682
1683	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1684			queue->rx_ring_ref);
1685	if (err) {
1686		message = "writing rx-ring-ref";
1687		goto error;
1688	}
1689
1690	/* Write event channels; taking into account both shared
1691	 * and split event channel scenarios.
1692	 */
1693	if (queue->tx_evtchn == queue->rx_evtchn) {
1694		/* Shared event channel */
1695		err = xenbus_printf(*xbt, path,
1696				"event-channel", "%u", queue->tx_evtchn);
1697		if (err) {
1698			message = "writing event-channel";
1699			goto error;
1700		}
1701	} else {
1702		/* Split event channels */
1703		err = xenbus_printf(*xbt, path,
1704				"event-channel-tx", "%u", queue->tx_evtchn);
1705		if (err) {
1706			message = "writing event-channel-tx";
1707			goto error;
1708		}
1709
1710		err = xenbus_printf(*xbt, path,
1711				"event-channel-rx", "%u", queue->rx_evtchn);
1712		if (err) {
1713			message = "writing event-channel-rx";
1714			goto error;
1715		}
1716	}
1717
1718	if (write_hierarchical)
1719		kfree(path);
1720	return 0;
1721
1722error:
1723	if (write_hierarchical)
1724		kfree(path);
1725	xenbus_dev_fatal(dev, err, "%s", message);
1726	return err;
1727}
1728
1729static void xennet_destroy_queues(struct netfront_info *info)
 
 
1730{
1731	unsigned int i;
 
 
 
 
 
 
 
 
 
1732
1733	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1734		struct netfront_queue *queue = &info->queues[i];
 
 
 
 
1735
1736		if (netif_running(info->netdev))
1737			napi_disable(&queue->napi);
1738		netif_napi_del(&queue->napi);
 
 
1739	}
1740
1741	kfree(info->queues);
1742	info->queues = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
1743}
1744
1745static int xennet_create_queues(struct netfront_info *info,
1746				unsigned int *num_queues)
1747{
1748	unsigned int i;
1749	int ret;
1750
1751	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1752			       GFP_KERNEL);
1753	if (!info->queues)
1754		return -ENOMEM;
1755
1756	for (i = 0; i < *num_queues; i++) {
1757		struct netfront_queue *queue = &info->queues[i];
1758
1759		queue->id = i;
1760		queue->info = info;
1761
1762		ret = xennet_init_queue(queue);
1763		if (ret < 0) {
1764			dev_warn(&info->xbdev->dev,
1765				 "only created %d queues\n", i);
1766			*num_queues = i;
1767			break;
1768		}
1769
1770		netif_napi_add(queue->info->netdev, &queue->napi,
1771			       xennet_poll, 64);
 
 
 
 
 
 
 
1772		if (netif_running(info->netdev))
1773			napi_enable(&queue->napi);
1774	}
1775
1776	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1777
1778	if (*num_queues == 0) {
1779		dev_err(&info->xbdev->dev, "no queues\n");
1780		return -EINVAL;
1781	}
1782	return 0;
1783}
1784
1785/* Common code used when first setting up, and when resuming. */
1786static int talk_to_netback(struct xenbus_device *dev,
1787			   struct netfront_info *info)
1788{
1789	const char *message;
1790	struct xenbus_transaction xbt;
1791	int err;
1792	unsigned int feature_split_evtchn;
1793	unsigned int i = 0;
1794	unsigned int max_queues = 0;
1795	struct netfront_queue *queue = NULL;
1796	unsigned int num_queues = 1;
 
1797
1798	info->netdev->irq = 0;
1799
 
 
 
 
1800	/* Check if backend supports multiple queues */
1801	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1802					  "multi-queue-max-queues", 1);
1803	num_queues = min(max_queues, xennet_max_queues);
1804
1805	/* Check feature-split-event-channels */
1806	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
1807					"feature-split-event-channels", 0);
1808
1809	/* Read mac addr. */
1810	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1811	if (err) {
1812		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1813		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
1814	}
1815
1816	rtnl_lock();
1817	if (info->queues)
1818		xennet_destroy_queues(info);
1819
 
 
 
1820	err = xennet_create_queues(info, &num_queues);
1821	if (err < 0) {
1822		xenbus_dev_fatal(dev, err, "creating queues");
1823		kfree(info->queues);
1824		info->queues = NULL;
1825		goto out;
1826	}
1827	rtnl_unlock();
1828
1829	/* Create shared ring, alloc event channel -- for each queue */
1830	for (i = 0; i < num_queues; ++i) {
1831		queue = &info->queues[i];
1832		err = setup_netfront(dev, queue, feature_split_evtchn);
1833		if (err)
1834			goto destroy_ring;
1835	}
1836
1837again:
1838	err = xenbus_transaction_start(&xbt);
1839	if (err) {
1840		xenbus_dev_fatal(dev, err, "starting transaction");
1841		goto destroy_ring;
1842	}
1843
1844	if (xenbus_exists(XBT_NIL,
1845			  info->xbdev->otherend, "multi-queue-max-queues")) {
1846		/* Write the number of queues */
1847		err = xenbus_printf(xbt, dev->nodename,
1848				    "multi-queue-num-queues", "%u", num_queues);
1849		if (err) {
1850			message = "writing multi-queue-num-queues";
1851			goto abort_transaction_no_dev_fatal;
1852		}
1853	}
1854
1855	if (num_queues == 1) {
1856		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1857		if (err)
1858			goto abort_transaction_no_dev_fatal;
1859	} else {
1860		/* Write the keys for each queue */
1861		for (i = 0; i < num_queues; ++i) {
1862			queue = &info->queues[i];
1863			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1864			if (err)
1865				goto abort_transaction_no_dev_fatal;
1866		}
1867	}
1868
1869	/* The remaining keys are not queue-specific */
1870	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1871			    1);
1872	if (err) {
1873		message = "writing request-rx-copy";
1874		goto abort_transaction;
1875	}
1876
1877	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1878	if (err) {
1879		message = "writing feature-rx-notify";
1880		goto abort_transaction;
1881	}
1882
1883	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1884	if (err) {
1885		message = "writing feature-sg";
1886		goto abort_transaction;
1887	}
1888
1889	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1890	if (err) {
1891		message = "writing feature-gso-tcpv4";
1892		goto abort_transaction;
1893	}
1894
1895	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1896	if (err) {
1897		message = "writing feature-gso-tcpv6";
1898		goto abort_transaction;
1899	}
1900
1901	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1902			   "1");
1903	if (err) {
1904		message = "writing feature-ipv6-csum-offload";
1905		goto abort_transaction;
1906	}
1907
1908	err = xenbus_transaction_end(xbt, 0);
1909	if (err) {
1910		if (err == -EAGAIN)
1911			goto again;
1912		xenbus_dev_fatal(dev, err, "completing transaction");
1913		goto destroy_ring;
1914	}
1915
1916	return 0;
1917
1918 abort_transaction:
1919	xenbus_dev_fatal(dev, err, "%s", message);
1920abort_transaction_no_dev_fatal:
1921	xenbus_transaction_end(xbt, 1);
1922 destroy_ring:
1923	xennet_disconnect_backend(info);
1924	rtnl_lock();
1925	xennet_destroy_queues(info);
1926 out:
1927	rtnl_unlock();
 
1928	device_unregister(&dev->dev);
1929	return err;
1930}
1931
1932static int xennet_connect(struct net_device *dev)
1933{
1934	struct netfront_info *np = netdev_priv(dev);
1935	unsigned int num_queues = 0;
1936	int err;
1937	unsigned int j = 0;
1938	struct netfront_queue *queue = NULL;
1939
1940	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
1941		dev_info(&dev->dev,
1942			 "backend does not support copying receive path\n");
1943		return -ENODEV;
1944	}
1945
1946	err = talk_to_netback(np->xbdev, np);
1947	if (err)
1948		return err;
 
 
 
 
 
1949
1950	/* talk_to_netback() sets the correct number of queues */
1951	num_queues = dev->real_num_tx_queues;
1952
1953	rtnl_lock();
1954	netdev_update_features(dev);
1955	rtnl_unlock();
1956
1957	if (dev->reg_state == NETREG_UNINITIALIZED) {
1958		err = register_netdev(dev);
1959		if (err) {
1960			pr_warn("%s: register_netdev err=%d\n", __func__, err);
1961			device_unregister(&np->xbdev->dev);
1962			return err;
1963		}
1964	}
1965
 
 
 
 
1966	/*
1967	 * All public and private state should now be sane.  Get
1968	 * ready to start sending and receiving packets and give the driver
1969	 * domain a kick because we've probably just requeued some
1970	 * packets.
1971	 */
 
 
 
 
1972	netif_carrier_on(np->netdev);
1973	for (j = 0; j < num_queues; ++j) {
1974		queue = &np->queues[j];
1975
1976		notify_remote_via_irq(queue->tx_irq);
1977		if (queue->tx_irq != queue->rx_irq)
1978			notify_remote_via_irq(queue->rx_irq);
1979
1980		spin_lock_irq(&queue->tx_lock);
1981		xennet_tx_buf_gc(queue);
1982		spin_unlock_irq(&queue->tx_lock);
1983
1984		spin_lock_bh(&queue->rx_lock);
1985		xennet_alloc_rx_buffers(queue);
1986		spin_unlock_bh(&queue->rx_lock);
1987	}
1988
1989	return 0;
1990}
1991
1992/**
1993 * Callback received when the backend's state changes.
1994 */
1995static void netback_changed(struct xenbus_device *dev,
1996			    enum xenbus_state backend_state)
1997{
1998	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1999	struct net_device *netdev = np->netdev;
2000
2001	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2002
 
 
2003	switch (backend_state) {
2004	case XenbusStateInitialising:
2005	case XenbusStateInitialised:
2006	case XenbusStateReconfiguring:
2007	case XenbusStateReconfigured:
2008		break;
2009
2010	case XenbusStateUnknown:
2011		wake_up_all(&module_unload_q);
2012		break;
2013
2014	case XenbusStateInitWait:
2015		if (dev->state != XenbusStateInitialising)
2016			break;
2017		if (xennet_connect(netdev) != 0)
2018			break;
2019		xenbus_switch_state(dev, XenbusStateConnected);
2020		break;
2021
2022	case XenbusStateConnected:
2023		netdev_notify_peers(netdev);
2024		break;
2025
2026	case XenbusStateClosed:
2027		wake_up_all(&module_unload_q);
2028		if (dev->state == XenbusStateClosed)
2029			break;
2030		/* Missed the backend's CLOSING state -- fallthrough */
2031	case XenbusStateClosing:
2032		wake_up_all(&module_unload_q);
2033		xenbus_frontend_closed(dev);
2034		break;
2035	}
2036}
2037
2038static const struct xennet_stat {
2039	char name[ETH_GSTRING_LEN];
2040	u16 offset;
2041} xennet_stats[] = {
2042	{
2043		"rx_gso_checksum_fixup",
2044		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2045	},
2046};
2047
2048static int xennet_get_sset_count(struct net_device *dev, int string_set)
2049{
2050	switch (string_set) {
2051	case ETH_SS_STATS:
2052		return ARRAY_SIZE(xennet_stats);
2053	default:
2054		return -EINVAL;
2055	}
2056}
2057
2058static void xennet_get_ethtool_stats(struct net_device *dev,
2059				     struct ethtool_stats *stats, u64 * data)
2060{
2061	void *np = netdev_priv(dev);
2062	int i;
2063
2064	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2065		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2066}
2067
2068static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2069{
2070	int i;
2071
2072	switch (stringset) {
2073	case ETH_SS_STATS:
2074		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2075			memcpy(data + i * ETH_GSTRING_LEN,
2076			       xennet_stats[i].name, ETH_GSTRING_LEN);
2077		break;
2078	}
2079}
2080
2081static const struct ethtool_ops xennet_ethtool_ops =
2082{
2083	.get_link = ethtool_op_get_link,
2084
2085	.get_sset_count = xennet_get_sset_count,
2086	.get_ethtool_stats = xennet_get_ethtool_stats,
2087	.get_strings = xennet_get_strings,
 
2088};
2089
2090#ifdef CONFIG_SYSFS
2091static ssize_t show_rxbuf(struct device *dev,
2092			  struct device_attribute *attr, char *buf)
2093{
2094	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2095}
2096
2097static ssize_t store_rxbuf(struct device *dev,
2098			   struct device_attribute *attr,
2099			   const char *buf, size_t len)
2100{
2101	char *endp;
2102	unsigned long target;
2103
2104	if (!capable(CAP_NET_ADMIN))
2105		return -EPERM;
2106
2107	target = simple_strtoul(buf, &endp, 0);
2108	if (endp == buf)
2109		return -EBADMSG;
2110
2111	/* rxbuf_min and rxbuf_max are no longer configurable. */
2112
2113	return len;
2114}
2115
2116static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2117static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2118static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2119
2120static struct attribute *xennet_dev_attrs[] = {
2121	&dev_attr_rxbuf_min.attr,
2122	&dev_attr_rxbuf_max.attr,
2123	&dev_attr_rxbuf_cur.attr,
2124	NULL
2125};
2126
2127static const struct attribute_group xennet_dev_group = {
2128	.attrs = xennet_dev_attrs
2129};
2130#endif /* CONFIG_SYSFS */
2131
2132static int xennet_remove(struct xenbus_device *dev)
2133{
2134	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2135
2136	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2137
2138	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
 
 
2139		xenbus_switch_state(dev, XenbusStateClosing);
2140		wait_event(module_unload_q,
2141			   xenbus_read_driver_state(dev->otherend) ==
2142			   XenbusStateClosing ||
2143			   xenbus_read_driver_state(dev->otherend) ==
2144			   XenbusStateUnknown);
 
 
 
 
2145
 
 
 
 
2146		xenbus_switch_state(dev, XenbusStateClosed);
2147		wait_event(module_unload_q,
2148			   xenbus_read_driver_state(dev->otherend) ==
2149			   XenbusStateClosed ||
2150			   xenbus_read_driver_state(dev->otherend) ==
2151			   XenbusStateUnknown);
2152	}
 
 
2153
 
 
 
 
 
2154	xennet_disconnect_backend(info);
2155
2156	if (info->netdev->reg_state == NETREG_REGISTERED)
2157		unregister_netdev(info->netdev);
2158
2159	if (info->queues) {
2160		rtnl_lock();
2161		xennet_destroy_queues(info);
2162		rtnl_unlock();
2163	}
2164	xennet_free_netdev(info->netdev);
2165
2166	return 0;
2167}
2168
2169static const struct xenbus_device_id netfront_ids[] = {
2170	{ "vif" },
2171	{ "" }
2172};
2173
2174static struct xenbus_driver netfront_driver = {
2175	.ids = netfront_ids,
2176	.probe = netfront_probe,
2177	.remove = xennet_remove,
2178	.resume = netfront_resume,
2179	.otherend_changed = netback_changed,
2180};
2181
2182static int __init netif_init(void)
2183{
2184	if (!xen_domain())
2185		return -ENODEV;
2186
2187	if (!xen_has_pv_nic_devices())
2188		return -ENODEV;
2189
2190	pr_info("Initialising Xen virtual ethernet driver\n");
2191
2192	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2193	 * specified a value.
2194	 */
2195	if (xennet_max_queues == 0)
2196		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2197					  num_online_cpus());
2198
2199	return xenbus_register_frontend(&netfront_driver);
2200}
2201module_init(netif_init);
2202
2203
2204static void __exit netif_exit(void)
2205{
2206	xenbus_unregister_driver(&netfront_driver);
2207}
2208module_exit(netif_exit);
2209
2210MODULE_DESCRIPTION("Xen virtual network device frontend");
2211MODULE_LICENSE("GPL");
2212MODULE_ALIAS("xen:vif");
2213MODULE_ALIAS("xennet");
v6.9.4
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47#include <linux/bpf.h>
  48#include <net/page_pool/types.h>
  49#include <linux/bpf_trace.h>
  50
  51#include <xen/xen.h>
  52#include <xen/xenbus.h>
  53#include <xen/events.h>
  54#include <xen/page.h>
  55#include <xen/platform_pci.h>
  56#include <xen/grant_table.h>
  57
  58#include <xen/interface/io/netif.h>
  59#include <xen/interface/memory.h>
  60#include <xen/interface/grant_table.h>
  61
  62/* Module parameters */
  63#define MAX_QUEUES_DEFAULT 8
  64static unsigned int xennet_max_queues;
  65module_param_named(max_queues, xennet_max_queues, uint, 0644);
  66MODULE_PARM_DESC(max_queues,
  67		 "Maximum number of queues per virtual interface");
  68
  69static bool __read_mostly xennet_trusted = true;
  70module_param_named(trusted, xennet_trusted, bool, 0644);
  71MODULE_PARM_DESC(trusted, "Is the backend trusted");
  72
  73#define XENNET_TIMEOUT  (5 * HZ)
  74
  75static const struct ethtool_ops xennet_ethtool_ops;
  76
  77struct netfront_cb {
  78	int pull_to;
  79};
  80
  81#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  82
  83#define RX_COPY_THRESHOLD 256
  84
 
 
  85#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  86#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  87
  88/* Minimum number of Rx slots (includes slot for GSO metadata). */
  89#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  90
  91/* Queue name is interface name with "-qNNN" appended */
  92#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  93
  94/* IRQ name is queue name with "-tx" or "-rx" appended */
  95#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  96
  97static DECLARE_WAIT_QUEUE_HEAD(module_wq);
  98
  99struct netfront_stats {
 100	u64			packets;
 101	u64			bytes;
 102	struct u64_stats_sync	syncp;
 103};
 104
 105struct netfront_info;
 106
 107struct netfront_queue {
 108	unsigned int id; /* Queue ID, 0-based */
 109	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 110	struct netfront_info *info;
 111
 112	struct bpf_prog __rcu *xdp_prog;
 113
 114	struct napi_struct napi;
 115
 116	/* Split event channels support, tx_* == rx_* when using
 117	 * single event channel.
 118	 */
 119	unsigned int tx_evtchn, rx_evtchn;
 120	unsigned int tx_irq, rx_irq;
 121	/* Only used when split event channels support is enabled */
 122	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 123	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 124
 125	spinlock_t   tx_lock;
 126	struct xen_netif_tx_front_ring tx;
 127	int tx_ring_ref;
 128
 129	/*
 130	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 131	 * are linked from tx_skb_freelist through tx_link.
 
 
 
 
 
 132	 */
 133	struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
 134	unsigned short tx_link[NET_TX_RING_SIZE];
 135#define TX_LINK_NONE 0xffff
 136#define TX_PENDING   0xfffe
 137	grant_ref_t gref_tx_head;
 138	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 139	struct page *grant_tx_page[NET_TX_RING_SIZE];
 140	unsigned tx_skb_freelist;
 141	unsigned int tx_pend_queue;
 142
 143	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 144	struct xen_netif_rx_front_ring rx;
 145	int rx_ring_ref;
 146
 147	struct timer_list rx_refill_timer;
 148
 149	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 150	grant_ref_t gref_rx_head;
 151	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 152
 153	unsigned int rx_rsp_unconsumed;
 154	spinlock_t rx_cons_lock;
 155
 156	struct page_pool *page_pool;
 157	struct xdp_rxq_info xdp_rxq;
 158};
 159
 160struct netfront_info {
 161	struct list_head list;
 162	struct net_device *netdev;
 163
 164	struct xenbus_device *xbdev;
 165
 166	/* Multi-queue support */
 167	struct netfront_queue *queues;
 168
 169	/* Statistics */
 170	struct netfront_stats __percpu *rx_stats;
 171	struct netfront_stats __percpu *tx_stats;
 172
 173	/* XDP state */
 174	bool netback_has_xdp_headroom;
 175	bool netfront_xdp_enabled;
 176
 177	/* Is device behaving sane? */
 178	bool broken;
 179
 180	/* Should skbs be bounced into a zeroed buffer? */
 181	bool bounce;
 182
 183	atomic_t rx_gso_checksum_fixup;
 184};
 185
 186struct netfront_rx_info {
 187	struct xen_netif_rx_response rx;
 188	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 189};
 190
 
 
 
 
 
 
 
 
 
 
 
 191/*
 192 * Access macros for acquiring freeing slots in tx_skbs[].
 193 */
 194
 195static void add_id_to_list(unsigned *head, unsigned short *list,
 196			   unsigned short id)
 197{
 198	list[id] = *head;
 199	*head = id;
 200}
 201
 202static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
 
 203{
 204	unsigned int id = *head;
 205
 206	if (id != TX_LINK_NONE) {
 207		*head = list[id];
 208		list[id] = TX_LINK_NONE;
 209	}
 210	return id;
 211}
 212
 213static int xennet_rxidx(RING_IDX idx)
 214{
 215	return idx & (NET_RX_RING_SIZE - 1);
 216}
 217
 218static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 219					 RING_IDX ri)
 220{
 221	int i = xennet_rxidx(ri);
 222	struct sk_buff *skb = queue->rx_skbs[i];
 223	queue->rx_skbs[i] = NULL;
 224	return skb;
 225}
 226
 227static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 228					    RING_IDX ri)
 229{
 230	int i = xennet_rxidx(ri);
 231	grant_ref_t ref = queue->grant_rx_ref[i];
 232	queue->grant_rx_ref[i] = INVALID_GRANT_REF;
 233	return ref;
 234}
 235
 236#ifdef CONFIG_SYSFS
 237static const struct attribute_group xennet_dev_group;
 238#endif
 239
 240static bool xennet_can_sg(struct net_device *dev)
 241{
 242	return dev->features & NETIF_F_SG;
 243}
 244
 245
 246static void rx_refill_timeout(struct timer_list *t)
 247{
 248	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 249	napi_schedule(&queue->napi);
 250}
 251
 252static int netfront_tx_slot_available(struct netfront_queue *queue)
 253{
 254	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 255		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 256}
 257
 258static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 259{
 260	struct net_device *dev = queue->info->netdev;
 261	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 262
 263	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 264	    netfront_tx_slot_available(queue) &&
 265	    likely(netif_running(dev)))
 266		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 267}
 268
 269
 270static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 271{
 272	struct sk_buff *skb;
 273	struct page *page;
 274
 275	skb = __netdev_alloc_skb(queue->info->netdev,
 276				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 277				 GFP_ATOMIC | __GFP_NOWARN);
 278	if (unlikely(!skb))
 279		return NULL;
 280
 281	page = page_pool_alloc_pages(queue->page_pool,
 282				     GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
 283	if (unlikely(!page)) {
 284		kfree_skb(skb);
 285		return NULL;
 286	}
 287	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 288	skb_mark_for_recycle(skb);
 289
 290	/* Align ip header to a 16 bytes boundary */
 291	skb_reserve(skb, NET_IP_ALIGN);
 292	skb->dev = queue->info->netdev;
 293
 294	return skb;
 295}
 296
 297
 298static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 299{
 300	RING_IDX req_prod = queue->rx.req_prod_pvt;
 301	int notify;
 302	int err = 0;
 303
 304	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 305		return;
 306
 307	for (req_prod = queue->rx.req_prod_pvt;
 308	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 309	     req_prod++) {
 310		struct sk_buff *skb;
 311		unsigned short id;
 312		grant_ref_t ref;
 313		struct page *page;
 314		struct xen_netif_rx_request *req;
 315
 316		skb = xennet_alloc_one_rx_buffer(queue);
 317		if (!skb) {
 318			err = -ENOMEM;
 319			break;
 320		}
 321
 322		id = xennet_rxidx(req_prod);
 323
 324		BUG_ON(queue->rx_skbs[id]);
 325		queue->rx_skbs[id] = skb;
 326
 327		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 328		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 329		queue->grant_rx_ref[id] = ref;
 330
 331		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 332
 333		req = RING_GET_REQUEST(&queue->rx, req_prod);
 334		gnttab_page_grant_foreign_access_ref_one(ref,
 335							 queue->info->xbdev->otherend_id,
 336							 page,
 337							 0);
 338		req->id = id;
 339		req->gref = ref;
 340	}
 341
 342	queue->rx.req_prod_pvt = req_prod;
 343
 344	/* Try again later if there are not enough requests or skb allocation
 345	 * failed.
 346	 * Enough requests is quantified as the sum of newly created slots and
 347	 * the unconsumed slots at the backend.
 348	 */
 349	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 350	    unlikely(err)) {
 351		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 352		return;
 353	}
 354
 
 
 355	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 356	if (notify)
 357		notify_remote_via_irq(queue->rx_irq);
 358}
 359
 360static int xennet_open(struct net_device *dev)
 361{
 362	struct netfront_info *np = netdev_priv(dev);
 363	unsigned int num_queues = dev->real_num_tx_queues;
 364	unsigned int i = 0;
 365	struct netfront_queue *queue = NULL;
 366
 367	if (!np->queues || np->broken)
 368		return -ENODEV;
 369
 370	for (i = 0; i < num_queues; ++i) {
 371		queue = &np->queues[i];
 372		napi_enable(&queue->napi);
 373
 374		spin_lock_bh(&queue->rx_lock);
 375		if (netif_carrier_ok(dev)) {
 376			xennet_alloc_rx_buffers(queue);
 377			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 378			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 379				napi_schedule(&queue->napi);
 380		}
 381		spin_unlock_bh(&queue->rx_lock);
 382	}
 383
 384	netif_tx_start_all_queues(dev);
 385
 386	return 0;
 387}
 388
 389static bool xennet_tx_buf_gc(struct netfront_queue *queue)
 390{
 391	RING_IDX cons, prod;
 392	unsigned short id;
 393	struct sk_buff *skb;
 394	bool more_to_do;
 395	bool work_done = false;
 396	const struct device *dev = &queue->info->netdev->dev;
 397
 398	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 399
 400	do {
 401		prod = queue->tx.sring->rsp_prod;
 402		if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
 403			dev_alert(dev, "Illegal number of responses %u\n",
 404				  prod - queue->tx.rsp_cons);
 405			goto err;
 406		}
 407		rmb(); /* Ensure we see responses up to 'rp'. */
 408
 409		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 410			struct xen_netif_tx_response txrsp;
 411
 412			work_done = true;
 413
 414			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
 415			if (txrsp.status == XEN_NETIF_RSP_NULL)
 416				continue;
 417
 418			id = txrsp.id;
 419			if (id >= RING_SIZE(&queue->tx)) {
 420				dev_alert(dev,
 421					  "Response has incorrect id (%u)\n",
 422					  id);
 423				goto err;
 424			}
 425			if (queue->tx_link[id] != TX_PENDING) {
 426				dev_alert(dev,
 427					  "Response for inactive request\n");
 428				goto err;
 429			}
 430
 431			queue->tx_link[id] = TX_LINK_NONE;
 432			skb = queue->tx_skbs[id];
 433			queue->tx_skbs[id] = NULL;
 434			if (unlikely(!gnttab_end_foreign_access_ref(
 435				queue->grant_tx_ref[id]))) {
 436				dev_alert(dev,
 437					  "Grant still in use by backend domain\n");
 438				goto err;
 439			}
 
 
 440			gnttab_release_grant_reference(
 441				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 442			queue->grant_tx_ref[id] = INVALID_GRANT_REF;
 443			queue->grant_tx_page[id] = NULL;
 444			add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
 445			dev_kfree_skb_irq(skb);
 446		}
 447
 448		queue->tx.rsp_cons = prod;
 449
 450		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 451	} while (more_to_do);
 452
 453	xennet_maybe_wake_tx(queue);
 454
 455	return work_done;
 456
 457 err:
 458	queue->info->broken = true;
 459	dev_alert(dev, "Disabled for further use\n");
 460
 461	return work_done;
 462}
 463
 464struct xennet_gnttab_make_txreq {
 465	struct netfront_queue *queue;
 466	struct sk_buff *skb;
 467	struct page *page;
 468	struct xen_netif_tx_request *tx;      /* Last request on ring page */
 469	struct xen_netif_tx_request tx_local; /* Last request local copy*/
 470	unsigned int size;
 471};
 472
 473static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 474				  unsigned int len, void *data)
 475{
 476	struct xennet_gnttab_make_txreq *info = data;
 477	unsigned int id;
 478	struct xen_netif_tx_request *tx;
 479	grant_ref_t ref;
 480	/* convenient aliases */
 481	struct page *page = info->page;
 482	struct netfront_queue *queue = info->queue;
 483	struct sk_buff *skb = info->skb;
 484
 485	id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
 486	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 487	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 488	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 489
 490	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 491					gfn, GNTMAP_readonly);
 492
 493	queue->tx_skbs[id] = skb;
 494	queue->grant_tx_page[id] = page;
 495	queue->grant_tx_ref[id] = ref;
 496
 497	info->tx_local.id = id;
 498	info->tx_local.gref = ref;
 499	info->tx_local.offset = offset;
 500	info->tx_local.size = len;
 501	info->tx_local.flags = 0;
 502
 503	*tx = info->tx_local;
 504
 505	/*
 506	 * Put the request in the pending queue, it will be set to be pending
 507	 * when the producer index is about to be raised.
 508	 */
 509	add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
 510
 511	info->tx = tx;
 512	info->size += info->tx_local.size;
 513}
 514
 515static struct xen_netif_tx_request *xennet_make_first_txreq(
 516	struct xennet_gnttab_make_txreq *info,
 517	unsigned int offset, unsigned int len)
 518{
 519	info->size = 0;
 
 
 
 
 
 520
 521	gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
 522
 523	return info->tx;
 524}
 525
 526static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 527				  unsigned int len, void *data)
 528{
 529	struct xennet_gnttab_make_txreq *info = data;
 530
 531	info->tx->flags |= XEN_NETTXF_more_data;
 532	skb_get(info->skb);
 533	xennet_tx_setup_grant(gfn, offset, len, data);
 534}
 535
 536static void xennet_make_txreqs(
 537	struct xennet_gnttab_make_txreq *info,
 538	struct page *page,
 539	unsigned int offset, unsigned int len)
 540{
 
 
 
 
 
 
 541	/* Skip unused frames from start of page */
 542	page += offset >> PAGE_SHIFT;
 543	offset &= ~PAGE_MASK;
 544
 545	while (len) {
 546		info->page = page;
 547		info->size = 0;
 548
 549		gnttab_foreach_grant_in_range(page, offset, len,
 550					      xennet_make_one_txreq,
 551					      info);
 552
 553		page++;
 554		offset = 0;
 555		len -= info->size;
 556	}
 
 
 557}
 558
 559/*
 560 * Count how many ring slots are required to send this skb. Each frag
 561 * might be a compound page.
 562 */
 563static int xennet_count_skb_slots(struct sk_buff *skb)
 564{
 565	int i, frags = skb_shinfo(skb)->nr_frags;
 566	int slots;
 567
 568	slots = gnttab_count_grant(offset_in_page(skb->data),
 569				   skb_headlen(skb));
 570
 571	for (i = 0; i < frags; i++) {
 572		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 573		unsigned long size = skb_frag_size(frag);
 574		unsigned long offset = skb_frag_off(frag);
 575
 576		/* Skip unused frames from start of page */
 577		offset &= ~PAGE_MASK;
 578
 579		slots += gnttab_count_grant(offset, size);
 580	}
 581
 582	return slots;
 583}
 584
 585static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 586			       struct net_device *sb_dev)
 587{
 588	unsigned int num_queues = dev->real_num_tx_queues;
 589	u32 hash;
 590	u16 queue_idx;
 591
 592	/* First, check if there is only one queue */
 593	if (num_queues == 1) {
 594		queue_idx = 0;
 595	} else {
 596		hash = skb_get_hash(skb);
 597		queue_idx = hash % num_queues;
 598	}
 599
 600	return queue_idx;
 601}
 602
 603static void xennet_mark_tx_pending(struct netfront_queue *queue)
 604{
 605	unsigned int i;
 606
 607	while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
 608	       TX_LINK_NONE)
 609		queue->tx_link[i] = TX_PENDING;
 610}
 611
 612static int xennet_xdp_xmit_one(struct net_device *dev,
 613			       struct netfront_queue *queue,
 614			       struct xdp_frame *xdpf)
 615{
 616	struct netfront_info *np = netdev_priv(dev);
 617	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 618	struct xennet_gnttab_make_txreq info = {
 619		.queue = queue,
 620		.skb = NULL,
 621		.page = virt_to_page(xdpf->data),
 622	};
 623	int notify;
 624
 625	xennet_make_first_txreq(&info,
 626				offset_in_page(xdpf->data),
 627				xdpf->len);
 628
 629	xennet_mark_tx_pending(queue);
 630
 631	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 632	if (notify)
 633		notify_remote_via_irq(queue->tx_irq);
 634
 635	u64_stats_update_begin(&tx_stats->syncp);
 636	tx_stats->bytes += xdpf->len;
 637	tx_stats->packets++;
 638	u64_stats_update_end(&tx_stats->syncp);
 639
 640	xennet_tx_buf_gc(queue);
 641
 642	return 0;
 643}
 644
 645static int xennet_xdp_xmit(struct net_device *dev, int n,
 646			   struct xdp_frame **frames, u32 flags)
 647{
 648	unsigned int num_queues = dev->real_num_tx_queues;
 649	struct netfront_info *np = netdev_priv(dev);
 650	struct netfront_queue *queue = NULL;
 651	unsigned long irq_flags;
 652	int nxmit = 0;
 653	int i;
 654
 655	if (unlikely(np->broken))
 656		return -ENODEV;
 657	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 658		return -EINVAL;
 659
 660	queue = &np->queues[smp_processor_id() % num_queues];
 661
 662	spin_lock_irqsave(&queue->tx_lock, irq_flags);
 663	for (i = 0; i < n; i++) {
 664		struct xdp_frame *xdpf = frames[i];
 665
 666		if (!xdpf)
 667			continue;
 668		if (xennet_xdp_xmit_one(dev, queue, xdpf))
 669			break;
 670		nxmit++;
 671	}
 672	spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
 673
 674	return nxmit;
 675}
 676
 677static struct sk_buff *bounce_skb(const struct sk_buff *skb)
 678{
 679	unsigned int headerlen = skb_headroom(skb);
 680	/* Align size to allocate full pages and avoid contiguous data leaks */
 681	unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
 682				  XEN_PAGE_SIZE);
 683	struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
 684
 685	if (!n)
 686		return NULL;
 687
 688	if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
 689		WARN_ONCE(1, "misaligned skb allocated\n");
 690		kfree_skb(n);
 691		return NULL;
 692	}
 693
 694	/* Set the data pointer */
 695	skb_reserve(n, headerlen);
 696	/* Set the tail pointer and length */
 697	skb_put(n, skb->len);
 698
 699	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
 700
 701	skb_copy_header(n, skb);
 702	return n;
 703}
 704
 705#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 706
 707static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 708{
 709	struct netfront_info *np = netdev_priv(dev);
 710	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 711	struct xen_netif_tx_request *first_tx;
 712	unsigned int i;
 713	int notify;
 714	int slots;
 715	struct page *page;
 716	unsigned int offset;
 717	unsigned int len;
 718	unsigned long flags;
 719	struct netfront_queue *queue = NULL;
 720	struct xennet_gnttab_make_txreq info = { };
 721	unsigned int num_queues = dev->real_num_tx_queues;
 722	u16 queue_index;
 723	struct sk_buff *nskb;
 724
 725	/* Drop the packet if no queues are set up */
 726	if (num_queues < 1)
 727		goto drop;
 728	if (unlikely(np->broken))
 729		goto drop;
 730	/* Determine which queue to transmit this SKB on */
 731	queue_index = skb_get_queue_mapping(skb);
 732	queue = &np->queues[queue_index];
 733
 734	/* If skb->len is too big for wire format, drop skb and alert
 735	 * user about misconfiguration.
 736	 */
 737	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 738		net_alert_ratelimited(
 739			"xennet: skb->len = %u, too big for wire format\n",
 740			skb->len);
 741		goto drop;
 742	}
 743
 744	slots = xennet_count_skb_slots(skb);
 745	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 746		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 747				    slots, skb->len);
 748		if (skb_linearize(skb))
 749			goto drop;
 750	}
 751
 752	page = virt_to_page(skb->data);
 753	offset = offset_in_page(skb->data);
 754
 755	/* The first req should be at least ETH_HLEN size or the packet will be
 756	 * dropped by netback.
 757	 *
 758	 * If the backend is not trusted bounce all data to zeroed pages to
 759	 * avoid exposing contiguous data on the granted page not belonging to
 760	 * the skb.
 761	 */
 762	if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 763		nskb = bounce_skb(skb);
 764		if (!nskb)
 765			goto drop;
 766		dev_consume_skb_any(skb);
 767		skb = nskb;
 768		page = virt_to_page(skb->data);
 769		offset = offset_in_page(skb->data);
 770	}
 771
 772	len = skb_headlen(skb);
 773
 774	spin_lock_irqsave(&queue->tx_lock, flags);
 775
 776	if (unlikely(!netif_carrier_ok(dev) ||
 777		     (slots > 1 && !xennet_can_sg(dev)) ||
 778		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 779		spin_unlock_irqrestore(&queue->tx_lock, flags);
 780		goto drop;
 781	}
 782
 783	/* First request for the linear area. */
 784	info.queue = queue;
 785	info.skb = skb;
 786	info.page = page;
 787	first_tx = xennet_make_first_txreq(&info, offset, len);
 788	offset += info.tx_local.size;
 789	if (offset == PAGE_SIZE) {
 790		page++;
 791		offset = 0;
 792	}
 793	len -= info.tx_local.size;
 794
 795	if (skb->ip_summed == CHECKSUM_PARTIAL)
 796		/* local packet? */
 797		first_tx->flags |= XEN_NETTXF_csum_blank |
 798				   XEN_NETTXF_data_validated;
 799	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 800		/* remote but checksummed. */
 801		first_tx->flags |= XEN_NETTXF_data_validated;
 802
 803	/* Optional extra info after the first request. */
 804	if (skb_shinfo(skb)->gso_size) {
 805		struct xen_netif_extra_info *gso;
 806
 807		gso = (struct xen_netif_extra_info *)
 808			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 809
 810		first_tx->flags |= XEN_NETTXF_extra_info;
 811
 812		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 813		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 814			XEN_NETIF_GSO_TYPE_TCPV6 :
 815			XEN_NETIF_GSO_TYPE_TCPV4;
 816		gso->u.gso.pad = 0;
 817		gso->u.gso.features = 0;
 818
 819		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 820		gso->flags = 0;
 821	}
 822
 823	/* Requests for the rest of the linear area. */
 824	xennet_make_txreqs(&info, page, offset, len);
 825
 826	/* Requests for all the frags. */
 827	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 828		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 829		xennet_make_txreqs(&info, skb_frag_page(frag),
 830					skb_frag_off(frag),
 831					skb_frag_size(frag));
 832	}
 833
 834	/* First request has the packet length. */
 835	first_tx->size = skb->len;
 836
 837	/* timestamp packet in software */
 838	skb_tx_timestamp(skb);
 839
 840	xennet_mark_tx_pending(queue);
 841
 842	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 843	if (notify)
 844		notify_remote_via_irq(queue->tx_irq);
 845
 846	u64_stats_update_begin(&tx_stats->syncp);
 847	tx_stats->bytes += skb->len;
 848	tx_stats->packets++;
 849	u64_stats_update_end(&tx_stats->syncp);
 850
 851	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 852	xennet_tx_buf_gc(queue);
 853
 854	if (!netfront_tx_slot_available(queue))
 855		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 856
 857	spin_unlock_irqrestore(&queue->tx_lock, flags);
 858
 859	return NETDEV_TX_OK;
 860
 861 drop:
 862	dev->stats.tx_dropped++;
 863	dev_kfree_skb_any(skb);
 864	return NETDEV_TX_OK;
 865}
 866
 867static int xennet_close(struct net_device *dev)
 868{
 869	struct netfront_info *np = netdev_priv(dev);
 870	unsigned int num_queues = dev->real_num_tx_queues;
 871	unsigned int i;
 872	struct netfront_queue *queue;
 873	netif_tx_stop_all_queues(np->netdev);
 874	for (i = 0; i < num_queues; ++i) {
 875		queue = &np->queues[i];
 876		napi_disable(&queue->napi);
 877	}
 878	return 0;
 879}
 880
 881static void xennet_destroy_queues(struct netfront_info *info)
 882{
 883	unsigned int i;
 884
 885	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
 886		struct netfront_queue *queue = &info->queues[i];
 887
 888		if (netif_running(info->netdev))
 889			napi_disable(&queue->napi);
 890		netif_napi_del(&queue->napi);
 891	}
 892
 893	kfree(info->queues);
 894	info->queues = NULL;
 895}
 896
 897static void xennet_uninit(struct net_device *dev)
 898{
 899	struct netfront_info *np = netdev_priv(dev);
 900	xennet_destroy_queues(np);
 901}
 902
 903static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
 904{
 905	unsigned long flags;
 906
 907	spin_lock_irqsave(&queue->rx_cons_lock, flags);
 908	queue->rx.rsp_cons = val;
 909	queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
 910	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
 911}
 912
 913static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 914				grant_ref_t ref)
 915{
 916	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 917
 918	BUG_ON(queue->rx_skbs[new]);
 919	queue->rx_skbs[new] = skb;
 920	queue->grant_rx_ref[new] = ref;
 921	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 922	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 923	queue->rx.req_prod_pvt++;
 924}
 925
 926static int xennet_get_extras(struct netfront_queue *queue,
 927			     struct xen_netif_extra_info *extras,
 928			     RING_IDX rp)
 929
 930{
 931	struct xen_netif_extra_info extra;
 932	struct device *dev = &queue->info->netdev->dev;
 933	RING_IDX cons = queue->rx.rsp_cons;
 934	int err = 0;
 935
 936	do {
 937		struct sk_buff *skb;
 938		grant_ref_t ref;
 939
 940		if (unlikely(cons + 1 == rp)) {
 941			if (net_ratelimit())
 942				dev_warn(dev, "Missing extra info\n");
 943			err = -EBADR;
 944			break;
 945		}
 946
 947		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
 
 948
 949		if (unlikely(!extra.type ||
 950			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 951			if (net_ratelimit())
 952				dev_warn(dev, "Invalid extra type: %d\n",
 953					 extra.type);
 954			err = -EINVAL;
 955		} else {
 956			extras[extra.type - 1] = extra;
 
 957		}
 958
 959		skb = xennet_get_rx_skb(queue, cons);
 960		ref = xennet_get_rx_ref(queue, cons);
 961		xennet_move_rx_slot(queue, skb, ref);
 962	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 963
 964	xennet_set_rx_rsp_cons(queue, cons);
 965	return err;
 966}
 967
 968static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
 969		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
 970		   struct xdp_buff *xdp, bool *need_xdp_flush)
 971{
 972	struct xdp_frame *xdpf;
 973	u32 len = rx->status;
 974	u32 act;
 975	int err;
 976
 977	xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
 978		      &queue->xdp_rxq);
 979	xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
 980			 len, false);
 981
 982	act = bpf_prog_run_xdp(prog, xdp);
 983	switch (act) {
 984	case XDP_TX:
 985		get_page(pdata);
 986		xdpf = xdp_convert_buff_to_frame(xdp);
 987		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
 988		if (unlikely(!err))
 989			xdp_return_frame_rx_napi(xdpf);
 990		else if (unlikely(err < 0))
 991			trace_xdp_exception(queue->info->netdev, prog, act);
 992		break;
 993	case XDP_REDIRECT:
 994		get_page(pdata);
 995		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
 996		*need_xdp_flush = true;
 997		if (unlikely(err))
 998			trace_xdp_exception(queue->info->netdev, prog, act);
 999		break;
1000	case XDP_PASS:
1001	case XDP_DROP:
1002		break;
1003
1004	case XDP_ABORTED:
1005		trace_xdp_exception(queue->info->netdev, prog, act);
1006		break;
1007
1008	default:
1009		bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1010	}
1011
1012	return act;
1013}
1014
1015static int xennet_get_responses(struct netfront_queue *queue,
1016				struct netfront_rx_info *rinfo, RING_IDX rp,
1017				struct sk_buff_head *list,
1018				bool *need_xdp_flush)
1019{
1020	struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1021	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
 
1022	RING_IDX cons = queue->rx.rsp_cons;
1023	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1024	struct xen_netif_extra_info *extras = rinfo->extras;
1025	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1026	struct device *dev = &queue->info->netdev->dev;
1027	struct bpf_prog *xdp_prog;
1028	struct xdp_buff xdp;
1029	int slots = 1;
1030	int err = 0;
1031	u32 verdict;
1032
1033	if (rx->flags & XEN_NETRXF_extra_info) {
1034		err = xennet_get_extras(queue, extras, rp);
1035		if (!err) {
1036			if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1037				struct xen_netif_extra_info *xdp;
1038
1039				xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1040				rx->offset = xdp->u.xdp.headroom;
1041			}
1042		}
1043		cons = queue->rx.rsp_cons;
1044	}
1045
1046	for (;;) {
 
 
 
 
 
 
 
 
 
 
1047		/*
1048		 * This definitely indicates a bug, either in this driver or in
1049		 * the backend driver. In future this should flag the bad
1050		 * situation to the system controller to reboot the backend.
1051		 */
1052		if (ref == INVALID_GRANT_REF) {
1053			if (net_ratelimit())
1054				dev_warn(dev, "Bad rx response id %d.\n",
1055					 rx->id);
1056			err = -EINVAL;
1057			goto next;
1058		}
1059
1060		if (unlikely(rx->status < 0 ||
1061			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
1062			if (net_ratelimit())
1063				dev_warn(dev, "rx->offset: %u, size: %d\n",
1064					 rx->offset, rx->status);
1065			xennet_move_rx_slot(queue, skb, ref);
1066			err = -EINVAL;
1067			goto next;
1068		}
1069
1070		if (!gnttab_end_foreign_access_ref(ref)) {
1071			dev_alert(dev,
1072				  "Grant still in use by backend domain\n");
1073			queue->info->broken = true;
1074			dev_alert(dev, "Disabled for further use\n");
1075			return -EINVAL;
1076		}
1077
1078		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1079
1080		rcu_read_lock();
1081		xdp_prog = rcu_dereference(queue->xdp_prog);
1082		if (xdp_prog) {
1083			if (!(rx->flags & XEN_NETRXF_more_data)) {
1084				/* currently only a single page contains data */
1085				verdict = xennet_run_xdp(queue,
1086							 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1087							 rx, xdp_prog, &xdp, need_xdp_flush);
1088				if (verdict != XDP_PASS)
1089					err = -EINVAL;
1090			} else {
1091				/* drop the frame */
1092				err = -EINVAL;
1093			}
1094		}
1095		rcu_read_unlock();
1096
1097		__skb_queue_tail(list, skb);
1098
1099next:
1100		if (!(rx->flags & XEN_NETRXF_more_data))
1101			break;
1102
1103		if (cons + slots == rp) {
1104			if (net_ratelimit())
1105				dev_warn(dev, "Need more slots\n");
1106			err = -ENOENT;
1107			break;
1108		}
1109
1110		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1111		rx = &rx_local;
1112		skb = xennet_get_rx_skb(queue, cons + slots);
1113		ref = xennet_get_rx_ref(queue, cons + slots);
1114		slots++;
1115	}
1116
1117	if (unlikely(slots > max)) {
1118		if (net_ratelimit())
1119			dev_warn(dev, "Too many slots\n");
1120		err = -E2BIG;
1121	}
1122
1123	if (unlikely(err))
1124		xennet_set_rx_rsp_cons(queue, cons + slots);
1125
1126	return err;
1127}
1128
1129static int xennet_set_skb_gso(struct sk_buff *skb,
1130			      struct xen_netif_extra_info *gso)
1131{
1132	if (!gso->u.gso.size) {
1133		if (net_ratelimit())
1134			pr_warn("GSO size must not be zero\n");
1135		return -EINVAL;
1136	}
1137
1138	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1139	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1140		if (net_ratelimit())
1141			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1142		return -EINVAL;
1143	}
1144
1145	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1146	skb_shinfo(skb)->gso_type =
1147		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1148		SKB_GSO_TCPV4 :
1149		SKB_GSO_TCPV6;
1150
1151	/* Header must be checked, and gso_segs computed. */
1152	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1153	skb_shinfo(skb)->gso_segs = 0;
1154
1155	return 0;
1156}
1157
1158static int xennet_fill_frags(struct netfront_queue *queue,
1159			     struct sk_buff *skb,
1160			     struct sk_buff_head *list)
1161{
 
1162	RING_IDX cons = queue->rx.rsp_cons;
1163	struct sk_buff *nskb;
1164
1165	while ((nskb = __skb_dequeue(list))) {
1166		struct xen_netif_rx_response rx;
 
1167		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1168
1169		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1170
1171		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1172			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1173
1174			BUG_ON(pull_to < skb_headlen(skb));
1175			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1176		}
1177		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1178			xennet_set_rx_rsp_cons(queue,
1179					       ++cons + skb_queue_len(list));
1180			kfree_skb(nskb);
1181			return -ENOENT;
1182		}
1183
1184		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1185				skb_frag_page(nfrag),
1186				rx.offset, rx.status, PAGE_SIZE);
1187
1188		skb_shinfo(nskb)->nr_frags = 0;
1189		kfree_skb(nskb);
1190	}
1191
1192	xennet_set_rx_rsp_cons(queue, cons);
1193
1194	return 0;
1195}
1196
1197static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1198{
1199	bool recalculate_partial_csum = false;
1200
1201	/*
1202	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1203	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1204	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1205	 * recalculate the partial checksum.
1206	 */
1207	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1208		struct netfront_info *np = netdev_priv(dev);
1209		atomic_inc(&np->rx_gso_checksum_fixup);
1210		skb->ip_summed = CHECKSUM_PARTIAL;
1211		recalculate_partial_csum = true;
1212	}
1213
1214	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1215	if (skb->ip_summed != CHECKSUM_PARTIAL)
1216		return 0;
1217
1218	return skb_checksum_setup(skb, recalculate_partial_csum);
1219}
1220
1221static int handle_incoming_queue(struct netfront_queue *queue,
1222				 struct sk_buff_head *rxq)
1223{
1224	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1225	int packets_dropped = 0;
1226	struct sk_buff *skb;
1227
1228	while ((skb = __skb_dequeue(rxq)) != NULL) {
1229		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1230
1231		if (pull_to > skb_headlen(skb))
1232			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1233
1234		/* Ethernet work: Delayed to here as it peeks the header. */
1235		skb->protocol = eth_type_trans(skb, queue->info->netdev);
1236		skb_reset_network_header(skb);
1237
1238		if (checksum_setup(queue->info->netdev, skb)) {
1239			kfree_skb(skb);
1240			packets_dropped++;
1241			queue->info->netdev->stats.rx_errors++;
1242			continue;
1243		}
1244
1245		u64_stats_update_begin(&rx_stats->syncp);
1246		rx_stats->packets++;
1247		rx_stats->bytes += skb->len;
1248		u64_stats_update_end(&rx_stats->syncp);
1249
1250		/* Pass it up. */
1251		napi_gro_receive(&queue->napi, skb);
1252	}
1253
1254	return packets_dropped;
1255}
1256
1257static int xennet_poll(struct napi_struct *napi, int budget)
1258{
1259	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1260	struct net_device *dev = queue->info->netdev;
1261	struct sk_buff *skb;
1262	struct netfront_rx_info rinfo;
1263	struct xen_netif_rx_response *rx = &rinfo.rx;
1264	struct xen_netif_extra_info *extras = rinfo.extras;
1265	RING_IDX i, rp;
1266	int work_done;
1267	struct sk_buff_head rxq;
1268	struct sk_buff_head errq;
1269	struct sk_buff_head tmpq;
1270	int err;
1271	bool need_xdp_flush = false;
1272
1273	spin_lock(&queue->rx_lock);
1274
1275	skb_queue_head_init(&rxq);
1276	skb_queue_head_init(&errq);
1277	skb_queue_head_init(&tmpq);
1278
1279	rp = queue->rx.sring->rsp_prod;
1280	if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1281		dev_alert(&dev->dev, "Illegal number of responses %u\n",
1282			  rp - queue->rx.rsp_cons);
1283		queue->info->broken = true;
1284		spin_unlock(&queue->rx_lock);
1285		return 0;
1286	}
1287	rmb(); /* Ensure we see queued responses up to 'rp'. */
1288
1289	i = queue->rx.rsp_cons;
1290	work_done = 0;
1291	while ((i != rp) && (work_done < budget)) {
1292		RING_COPY_RESPONSE(&queue->rx, i, rx);
1293		memset(extras, 0, sizeof(rinfo.extras));
1294
1295		err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1296					   &need_xdp_flush);
1297
1298		if (unlikely(err)) {
1299			if (queue->info->broken) {
1300				spin_unlock(&queue->rx_lock);
1301				return 0;
1302			}
1303err:
1304			while ((skb = __skb_dequeue(&tmpq)))
1305				__skb_queue_tail(&errq, skb);
1306			dev->stats.rx_errors++;
1307			i = queue->rx.rsp_cons;
1308			continue;
1309		}
1310
1311		skb = __skb_dequeue(&tmpq);
1312
1313		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1314			struct xen_netif_extra_info *gso;
1315			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1316
1317			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1318				__skb_queue_head(&tmpq, skb);
1319				xennet_set_rx_rsp_cons(queue,
1320						       queue->rx.rsp_cons +
1321						       skb_queue_len(&tmpq));
1322				goto err;
1323			}
1324		}
1325
1326		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1327		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1328			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1329
1330		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1331		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1332		skb->data_len = rx->status;
1333		skb->len += rx->status;
1334
1335		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1336			goto err;
1337
1338		if (rx->flags & XEN_NETRXF_csum_blank)
1339			skb->ip_summed = CHECKSUM_PARTIAL;
1340		else if (rx->flags & XEN_NETRXF_data_validated)
1341			skb->ip_summed = CHECKSUM_UNNECESSARY;
1342
1343		__skb_queue_tail(&rxq, skb);
1344
1345		i = queue->rx.rsp_cons + 1;
1346		xennet_set_rx_rsp_cons(queue, i);
1347		work_done++;
1348	}
1349	if (need_xdp_flush)
1350		xdp_do_flush();
1351
1352	__skb_queue_purge(&errq);
1353
1354	work_done -= handle_incoming_queue(queue, &rxq);
1355
1356	xennet_alloc_rx_buffers(queue);
1357
1358	if (work_done < budget) {
1359		int more_to_do = 0;
1360
1361		napi_complete_done(napi, work_done);
1362
1363		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1364		if (more_to_do)
1365			napi_schedule(napi);
1366	}
1367
1368	spin_unlock(&queue->rx_lock);
1369
1370	return work_done;
1371}
1372
1373static int xennet_change_mtu(struct net_device *dev, int mtu)
1374{
1375	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1376
1377	if (mtu > max)
1378		return -EINVAL;
1379	dev->mtu = mtu;
1380	return 0;
1381}
1382
1383static void xennet_get_stats64(struct net_device *dev,
1384			       struct rtnl_link_stats64 *tot)
1385{
1386	struct netfront_info *np = netdev_priv(dev);
1387	int cpu;
1388
1389	for_each_possible_cpu(cpu) {
1390		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1391		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1392		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1393		unsigned int start;
1394
1395		do {
1396			start = u64_stats_fetch_begin(&tx_stats->syncp);
1397			tx_packets = tx_stats->packets;
1398			tx_bytes = tx_stats->bytes;
1399		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1400
1401		do {
1402			start = u64_stats_fetch_begin(&rx_stats->syncp);
1403			rx_packets = rx_stats->packets;
1404			rx_bytes = rx_stats->bytes;
1405		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1406
1407		tot->rx_packets += rx_packets;
1408		tot->tx_packets += tx_packets;
1409		tot->rx_bytes   += rx_bytes;
1410		tot->tx_bytes   += tx_bytes;
1411	}
1412
1413	tot->rx_errors  = dev->stats.rx_errors;
1414	tot->tx_dropped = dev->stats.tx_dropped;
1415}
1416
1417static void xennet_release_tx_bufs(struct netfront_queue *queue)
1418{
1419	struct sk_buff *skb;
1420	int i;
1421
1422	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1423		/* Skip over entries which are actually freelist references */
1424		if (!queue->tx_skbs[i])
1425			continue;
1426
1427		skb = queue->tx_skbs[i];
1428		queue->tx_skbs[i] = NULL;
1429		get_page(queue->grant_tx_page[i]);
1430		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1431					  queue->grant_tx_page[i]);
 
1432		queue->grant_tx_page[i] = NULL;
1433		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1434		add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1435		dev_kfree_skb_irq(skb);
1436	}
1437}
1438
1439static void xennet_release_rx_bufs(struct netfront_queue *queue)
1440{
1441	int id, ref;
1442
1443	spin_lock_bh(&queue->rx_lock);
1444
1445	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1446		struct sk_buff *skb;
1447		struct page *page;
1448
1449		skb = queue->rx_skbs[id];
1450		if (!skb)
1451			continue;
1452
1453		ref = queue->grant_rx_ref[id];
1454		if (ref == INVALID_GRANT_REF)
1455			continue;
1456
1457		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1458
1459		/* gnttab_end_foreign_access() needs a page ref until
1460		 * foreign access is ended (which may be deferred).
1461		 */
1462		get_page(page);
1463		gnttab_end_foreign_access(ref, page);
1464		queue->grant_rx_ref[id] = INVALID_GRANT_REF;
 
1465
1466		kfree_skb(skb);
1467	}
1468
1469	spin_unlock_bh(&queue->rx_lock);
1470}
1471
1472static netdev_features_t xennet_fix_features(struct net_device *dev,
1473	netdev_features_t features)
1474{
1475	struct netfront_info *np = netdev_priv(dev);
1476
1477	if (features & NETIF_F_SG &&
1478	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1479		features &= ~NETIF_F_SG;
1480
1481	if (features & NETIF_F_IPV6_CSUM &&
1482	    !xenbus_read_unsigned(np->xbdev->otherend,
1483				  "feature-ipv6-csum-offload", 0))
1484		features &= ~NETIF_F_IPV6_CSUM;
1485
1486	if (features & NETIF_F_TSO &&
1487	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1488		features &= ~NETIF_F_TSO;
1489
1490	if (features & NETIF_F_TSO6 &&
1491	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1492		features &= ~NETIF_F_TSO6;
1493
1494	return features;
1495}
1496
1497static int xennet_set_features(struct net_device *dev,
1498	netdev_features_t features)
1499{
1500	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1501		netdev_info(dev, "Reducing MTU because no SG offload");
1502		dev->mtu = ETH_DATA_LEN;
1503	}
1504
1505	return 0;
1506}
1507
1508static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1509{
 
1510	unsigned long flags;
1511
1512	if (unlikely(queue->info->broken))
1513		return false;
1514
1515	spin_lock_irqsave(&queue->tx_lock, flags);
1516	if (xennet_tx_buf_gc(queue))
1517		*eoi = 0;
1518	spin_unlock_irqrestore(&queue->tx_lock, flags);
1519
1520	return true;
1521}
1522
1523static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1524{
1525	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1526
1527	if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1528		xen_irq_lateeoi(irq, eoiflag);
1529
1530	return IRQ_HANDLED;
1531}
1532
1533static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1534{
1535	unsigned int work_queued;
1536	unsigned long flags;
1537
1538	if (unlikely(queue->info->broken))
1539		return false;
1540
1541	spin_lock_irqsave(&queue->rx_cons_lock, flags);
1542	work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1543	if (work_queued > queue->rx_rsp_unconsumed) {
1544		queue->rx_rsp_unconsumed = work_queued;
1545		*eoi = 0;
1546	} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1547		const struct device *dev = &queue->info->netdev->dev;
1548
1549		spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1550		dev_alert(dev, "RX producer index going backwards\n");
1551		dev_alert(dev, "Disabled for further use\n");
1552		queue->info->broken = true;
1553		return false;
1554	}
1555	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1556
1557	if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1558		napi_schedule(&queue->napi);
1559
1560	return true;
1561}
1562
1563static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1564{
1565	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1566
1567	if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1568		xen_irq_lateeoi(irq, eoiflag);
1569
1570	return IRQ_HANDLED;
1571}
1572
1573static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1574{
1575	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1576
1577	if (xennet_handle_tx(dev_id, &eoiflag) &&
1578	    xennet_handle_rx(dev_id, &eoiflag))
1579		xen_irq_lateeoi(irq, eoiflag);
1580
1581	return IRQ_HANDLED;
1582}
1583
1584#ifdef CONFIG_NET_POLL_CONTROLLER
1585static void xennet_poll_controller(struct net_device *dev)
1586{
1587	/* Poll each queue */
1588	struct netfront_info *info = netdev_priv(dev);
1589	unsigned int num_queues = dev->real_num_tx_queues;
1590	unsigned int i;
1591
1592	if (info->broken)
1593		return;
1594
1595	for (i = 0; i < num_queues; ++i)
1596		xennet_interrupt(0, &info->queues[i]);
1597}
1598#endif
1599
1600#define NETBACK_XDP_HEADROOM_DISABLE	0
1601#define NETBACK_XDP_HEADROOM_ENABLE	1
1602
1603static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1604{
1605	int err;
1606	unsigned short headroom;
1607
1608	headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1609	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1610			    "xdp-headroom", "%hu",
1611			    headroom);
1612	if (err)
1613		pr_warn("Error writing xdp-headroom\n");
1614
1615	return err;
1616}
1617
1618static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1619			  struct netlink_ext_ack *extack)
1620{
1621	unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1622	struct netfront_info *np = netdev_priv(dev);
1623	struct bpf_prog *old_prog;
1624	unsigned int i, err;
1625
1626	if (dev->mtu > max_mtu) {
1627		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1628		return -EINVAL;
1629	}
1630
1631	if (!np->netback_has_xdp_headroom)
1632		return 0;
1633
1634	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1635
1636	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1637				  NETBACK_XDP_HEADROOM_DISABLE);
1638	if (err)
1639		return err;
1640
1641	/* avoid the race with XDP headroom adjustment */
1642	wait_event(module_wq,
1643		   xenbus_read_driver_state(np->xbdev->otherend) ==
1644		   XenbusStateReconfigured);
1645	np->netfront_xdp_enabled = true;
1646
1647	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1648
1649	if (prog)
1650		bpf_prog_add(prog, dev->real_num_tx_queues);
1651
1652	for (i = 0; i < dev->real_num_tx_queues; ++i)
1653		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1654
1655	if (old_prog)
1656		for (i = 0; i < dev->real_num_tx_queues; ++i)
1657			bpf_prog_put(old_prog);
1658
1659	xenbus_switch_state(np->xbdev, XenbusStateConnected);
1660
1661	return 0;
1662}
1663
1664static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1665{
1666	struct netfront_info *np = netdev_priv(dev);
1667
1668	if (np->broken)
1669		return -ENODEV;
1670
1671	switch (xdp->command) {
1672	case XDP_SETUP_PROG:
1673		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1674	default:
1675		return -EINVAL;
1676	}
1677}
1678
1679static const struct net_device_ops xennet_netdev_ops = {
1680	.ndo_uninit          = xennet_uninit,
1681	.ndo_open            = xennet_open,
1682	.ndo_stop            = xennet_close,
1683	.ndo_start_xmit      = xennet_start_xmit,
1684	.ndo_change_mtu	     = xennet_change_mtu,
1685	.ndo_get_stats64     = xennet_get_stats64,
1686	.ndo_set_mac_address = eth_mac_addr,
1687	.ndo_validate_addr   = eth_validate_addr,
1688	.ndo_fix_features    = xennet_fix_features,
1689	.ndo_set_features    = xennet_set_features,
1690	.ndo_select_queue    = xennet_select_queue,
1691	.ndo_bpf            = xennet_xdp,
1692	.ndo_xdp_xmit	    = xennet_xdp_xmit,
1693#ifdef CONFIG_NET_POLL_CONTROLLER
1694	.ndo_poll_controller = xennet_poll_controller,
1695#endif
1696};
1697
1698static void xennet_free_netdev(struct net_device *netdev)
1699{
1700	struct netfront_info *np = netdev_priv(netdev);
1701
1702	free_percpu(np->rx_stats);
1703	free_percpu(np->tx_stats);
1704	free_netdev(netdev);
1705}
1706
1707static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1708{
1709	int err;
1710	struct net_device *netdev;
1711	struct netfront_info *np;
1712
1713	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1714	if (!netdev)
1715		return ERR_PTR(-ENOMEM);
1716
1717	np                   = netdev_priv(netdev);
1718	np->xbdev            = dev;
1719
1720	np->queues = NULL;
1721
1722	err = -ENOMEM;
1723	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1724	if (np->rx_stats == NULL)
1725		goto exit;
1726	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1727	if (np->tx_stats == NULL)
1728		goto exit;
1729
1730	netdev->netdev_ops	= &xennet_netdev_ops;
1731
1732	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1733				  NETIF_F_GSO_ROBUST;
1734	netdev->hw_features	= NETIF_F_SG |
1735				  NETIF_F_IPV6_CSUM |
1736				  NETIF_F_TSO | NETIF_F_TSO6;
1737
1738	/*
1739         * Assume that all hw features are available for now. This set
1740         * will be adjusted by the call to netdev_update_features() in
1741         * xennet_connect() which is the earliest point where we can
1742         * negotiate with the backend regarding supported features.
1743         */
1744	netdev->features |= netdev->hw_features;
1745	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
1746			       NETDEV_XDP_ACT_NDO_XMIT;
1747
1748	netdev->ethtool_ops = &xennet_ethtool_ops;
1749	netdev->min_mtu = ETH_MIN_MTU;
1750	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1751	SET_NETDEV_DEV(netdev, &dev->dev);
1752
1753	np->netdev = netdev;
1754	np->netfront_xdp_enabled = false;
1755
1756	netif_carrier_off(netdev);
1757
1758	do {
1759		xenbus_switch_state(dev, XenbusStateInitialising);
1760		err = wait_event_timeout(module_wq,
1761				 xenbus_read_driver_state(dev->otherend) !=
1762				 XenbusStateClosed &&
1763				 xenbus_read_driver_state(dev->otherend) !=
1764				 XenbusStateUnknown, XENNET_TIMEOUT);
1765	} while (!err);
1766
1767	return netdev;
1768
1769 exit:
1770	xennet_free_netdev(netdev);
1771	return ERR_PTR(err);
1772}
1773
1774/*
1775 * Entry point to this code when a new device is created.  Allocate the basic
1776 * structures and the ring buffers for communication with the backend, and
1777 * inform the backend of the appropriate details for those.
1778 */
1779static int netfront_probe(struct xenbus_device *dev,
1780			  const struct xenbus_device_id *id)
1781{
1782	int err;
1783	struct net_device *netdev;
1784	struct netfront_info *info;
1785
1786	netdev = xennet_create_dev(dev);
1787	if (IS_ERR(netdev)) {
1788		err = PTR_ERR(netdev);
1789		xenbus_dev_fatal(dev, err, "creating netdev");
1790		return err;
1791	}
1792
1793	info = netdev_priv(netdev);
1794	dev_set_drvdata(&dev->dev, info);
1795#ifdef CONFIG_SYSFS
1796	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1797#endif
1798
1799	return 0;
1800}
1801
1802static void xennet_end_access(int ref, void *page)
1803{
1804	/* This frees the page as a side-effect */
1805	if (ref != INVALID_GRANT_REF)
1806		gnttab_end_foreign_access(ref, virt_to_page(page));
1807}
1808
1809static void xennet_disconnect_backend(struct netfront_info *info)
1810{
1811	unsigned int i = 0;
1812	unsigned int num_queues = info->netdev->real_num_tx_queues;
1813
1814	netif_carrier_off(info->netdev);
1815
1816	for (i = 0; i < num_queues && info->queues; ++i) {
1817		struct netfront_queue *queue = &info->queues[i];
1818
1819		del_timer_sync(&queue->rx_refill_timer);
1820
1821		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1822			unbind_from_irqhandler(queue->tx_irq, queue);
1823		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1824			unbind_from_irqhandler(queue->tx_irq, queue);
1825			unbind_from_irqhandler(queue->rx_irq, queue);
1826		}
1827		queue->tx_evtchn = queue->rx_evtchn = 0;
1828		queue->tx_irq = queue->rx_irq = 0;
1829
1830		if (netif_running(info->netdev))
1831			napi_synchronize(&queue->napi);
1832
1833		xennet_release_tx_bufs(queue);
1834		xennet_release_rx_bufs(queue);
1835		gnttab_free_grant_references(queue->gref_tx_head);
1836		gnttab_free_grant_references(queue->gref_rx_head);
1837
1838		/* End access and free the pages */
1839		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1840		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1841
1842		queue->tx_ring_ref = INVALID_GRANT_REF;
1843		queue->rx_ring_ref = INVALID_GRANT_REF;
1844		queue->tx.sring = NULL;
1845		queue->rx.sring = NULL;
1846
1847		page_pool_destroy(queue->page_pool);
1848	}
1849}
1850
1851/*
1852 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1853 * driver restart.  We tear down our netif structure and recreate it, but
1854 * leave the device-layer structures intact so that this is transparent to the
1855 * rest of the kernel.
1856 */
1857static int netfront_resume(struct xenbus_device *dev)
1858{
1859	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1860
1861	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1862
1863	netif_tx_lock_bh(info->netdev);
1864	netif_device_detach(info->netdev);
1865	netif_tx_unlock_bh(info->netdev);
1866
1867	xennet_disconnect_backend(info);
1868
1869	rtnl_lock();
1870	if (info->queues)
1871		xennet_destroy_queues(info);
1872	rtnl_unlock();
1873
1874	return 0;
1875}
1876
1877static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1878{
1879	char *s, *e, *macstr;
1880	int i;
1881
1882	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1883	if (IS_ERR(macstr))
1884		return PTR_ERR(macstr);
1885
1886	for (i = 0; i < ETH_ALEN; i++) {
1887		mac[i] = simple_strtoul(s, &e, 16);
1888		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1889			kfree(macstr);
1890			return -ENOENT;
1891		}
1892		s = e+1;
1893	}
1894
1895	kfree(macstr);
1896	return 0;
1897}
1898
1899static int setup_netfront_single(struct netfront_queue *queue)
1900{
1901	int err;
1902
1903	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1904	if (err < 0)
1905		goto fail;
1906
1907	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1908						xennet_interrupt, 0,
1909						queue->info->netdev->name,
1910						queue);
1911	if (err < 0)
1912		goto bind_fail;
1913	queue->rx_evtchn = queue->tx_evtchn;
1914	queue->rx_irq = queue->tx_irq = err;
1915
1916	return 0;
1917
1918bind_fail:
1919	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1920	queue->tx_evtchn = 0;
1921fail:
1922	return err;
1923}
1924
1925static int setup_netfront_split(struct netfront_queue *queue)
1926{
1927	int err;
1928
1929	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1930	if (err < 0)
1931		goto fail;
1932	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1933	if (err < 0)
1934		goto alloc_rx_evtchn_fail;
1935
1936	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1937		 "%s-tx", queue->name);
1938	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1939						xennet_tx_interrupt, 0,
1940						queue->tx_irq_name, queue);
1941	if (err < 0)
1942		goto bind_tx_fail;
1943	queue->tx_irq = err;
1944
1945	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1946		 "%s-rx", queue->name);
1947	err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1948						xennet_rx_interrupt, 0,
1949						queue->rx_irq_name, queue);
1950	if (err < 0)
1951		goto bind_rx_fail;
1952	queue->rx_irq = err;
1953
1954	return 0;
1955
1956bind_rx_fail:
1957	unbind_from_irqhandler(queue->tx_irq, queue);
1958	queue->tx_irq = 0;
1959bind_tx_fail:
1960	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1961	queue->rx_evtchn = 0;
1962alloc_rx_evtchn_fail:
1963	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1964	queue->tx_evtchn = 0;
1965fail:
1966	return err;
1967}
1968
1969static int setup_netfront(struct xenbus_device *dev,
1970			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1971{
1972	struct xen_netif_tx_sring *txs;
1973	struct xen_netif_rx_sring *rxs;
 
1974	int err;
1975
1976	queue->tx_ring_ref = INVALID_GRANT_REF;
1977	queue->rx_ring_ref = INVALID_GRANT_REF;
1978	queue->rx.sring = NULL;
1979	queue->tx.sring = NULL;
1980
1981	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1982				1, &queue->tx_ring_ref);
1983	if (err)
 
1984		goto fail;
 
 
 
1985
1986	XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 
 
1987
1988	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1989				1, &queue->rx_ring_ref);
1990	if (err)
1991		goto fail;
 
 
 
 
1992
1993	XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 
 
1994
1995	if (feature_split_evtchn)
1996		err = setup_netfront_split(queue);
1997	/* setup single event channel if
1998	 *  a) feature-split-event-channels == 0
1999	 *  b) feature-split-event-channels == 1 but failed to setup
2000	 */
2001	if (!feature_split_evtchn || err)
2002		err = setup_netfront_single(queue);
2003
2004	if (err)
2005		goto fail;
2006
2007	return 0;
2008
2009 fail:
2010	xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2011	xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2012
 
 
 
 
 
 
 
 
2013	return err;
2014}
2015
2016/* Queue-specific initialisation
2017 * This used to be done in xennet_create_dev() but must now
2018 * be run per-queue.
2019 */
2020static int xennet_init_queue(struct netfront_queue *queue)
2021{
2022	unsigned short i;
2023	int err = 0;
2024	char *devid;
2025
2026	spin_lock_init(&queue->tx_lock);
2027	spin_lock_init(&queue->rx_lock);
2028	spin_lock_init(&queue->rx_cons_lock);
2029
2030	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2031
2032	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2033	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2034		 devid, queue->id);
2035
2036	/* Initialise tx_skb_freelist as a free chain containing every entry. */
2037	queue->tx_skb_freelist = 0;
2038	queue->tx_pend_queue = TX_LINK_NONE;
2039	for (i = 0; i < NET_TX_RING_SIZE; i++) {
2040		queue->tx_link[i] = i + 1;
2041		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2042		queue->grant_tx_page[i] = NULL;
2043	}
2044	queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2045
2046	/* Clear out rx_skbs */
2047	for (i = 0; i < NET_RX_RING_SIZE; i++) {
2048		queue->rx_skbs[i] = NULL;
2049		queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2050	}
2051
2052	/* A grant for every tx ring slot */
2053	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2054					  &queue->gref_tx_head) < 0) {
2055		pr_alert("can't alloc tx grant refs\n");
2056		err = -ENOMEM;
2057		goto exit;
2058	}
2059
2060	/* A grant for every rx ring slot */
2061	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2062					  &queue->gref_rx_head) < 0) {
2063		pr_alert("can't alloc rx grant refs\n");
2064		err = -ENOMEM;
2065		goto exit_free_tx;
2066	}
2067
2068	return 0;
2069
2070 exit_free_tx:
2071	gnttab_free_grant_references(queue->gref_tx_head);
2072 exit:
2073	return err;
2074}
2075
2076static int write_queue_xenstore_keys(struct netfront_queue *queue,
2077			   struct xenbus_transaction *xbt, int write_hierarchical)
2078{
2079	/* Write the queue-specific keys into XenStore in the traditional
2080	 * way for a single queue, or in a queue subkeys for multiple
2081	 * queues.
2082	 */
2083	struct xenbus_device *dev = queue->info->xbdev;
2084	int err;
2085	const char *message;
2086	char *path;
2087	size_t pathsize;
2088
2089	/* Choose the correct place to write the keys */
2090	if (write_hierarchical) {
2091		pathsize = strlen(dev->nodename) + 10;
2092		path = kzalloc(pathsize, GFP_KERNEL);
2093		if (!path) {
2094			err = -ENOMEM;
2095			message = "out of memory while writing ring references";
2096			goto error;
2097		}
2098		snprintf(path, pathsize, "%s/queue-%u",
2099				dev->nodename, queue->id);
2100	} else {
2101		path = (char *)dev->nodename;
2102	}
2103
2104	/* Write ring references */
2105	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2106			queue->tx_ring_ref);
2107	if (err) {
2108		message = "writing tx-ring-ref";
2109		goto error;
2110	}
2111
2112	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2113			queue->rx_ring_ref);
2114	if (err) {
2115		message = "writing rx-ring-ref";
2116		goto error;
2117	}
2118
2119	/* Write event channels; taking into account both shared
2120	 * and split event channel scenarios.
2121	 */
2122	if (queue->tx_evtchn == queue->rx_evtchn) {
2123		/* Shared event channel */
2124		err = xenbus_printf(*xbt, path,
2125				"event-channel", "%u", queue->tx_evtchn);
2126		if (err) {
2127			message = "writing event-channel";
2128			goto error;
2129		}
2130	} else {
2131		/* Split event channels */
2132		err = xenbus_printf(*xbt, path,
2133				"event-channel-tx", "%u", queue->tx_evtchn);
2134		if (err) {
2135			message = "writing event-channel-tx";
2136			goto error;
2137		}
2138
2139		err = xenbus_printf(*xbt, path,
2140				"event-channel-rx", "%u", queue->rx_evtchn);
2141		if (err) {
2142			message = "writing event-channel-rx";
2143			goto error;
2144		}
2145	}
2146
2147	if (write_hierarchical)
2148		kfree(path);
2149	return 0;
2150
2151error:
2152	if (write_hierarchical)
2153		kfree(path);
2154	xenbus_dev_fatal(dev, err, "%s", message);
2155	return err;
2156}
2157
2158
2159
2160static int xennet_create_page_pool(struct netfront_queue *queue)
2161{
2162	int err;
2163	struct page_pool_params pp_params = {
2164		.order = 0,
2165		.flags = 0,
2166		.pool_size = NET_RX_RING_SIZE,
2167		.nid = NUMA_NO_NODE,
2168		.dev = &queue->info->netdev->dev,
2169		.offset = XDP_PACKET_HEADROOM,
2170		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2171	};
2172
2173	queue->page_pool = page_pool_create(&pp_params);
2174	if (IS_ERR(queue->page_pool)) {
2175		err = PTR_ERR(queue->page_pool);
2176		queue->page_pool = NULL;
2177		return err;
2178	}
2179
2180	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2181			       queue->id, 0);
2182	if (err) {
2183		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2184		goto err_free_pp;
2185	}
2186
2187	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2188					 MEM_TYPE_PAGE_POOL, queue->page_pool);
2189	if (err) {
2190		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2191		goto err_unregister_rxq;
2192	}
2193	return 0;
2194
2195err_unregister_rxq:
2196	xdp_rxq_info_unreg(&queue->xdp_rxq);
2197err_free_pp:
2198	page_pool_destroy(queue->page_pool);
2199	queue->page_pool = NULL;
2200	return err;
2201}
2202
2203static int xennet_create_queues(struct netfront_info *info,
2204				unsigned int *num_queues)
2205{
2206	unsigned int i;
2207	int ret;
2208
2209	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2210			       GFP_KERNEL);
2211	if (!info->queues)
2212		return -ENOMEM;
2213
2214	for (i = 0; i < *num_queues; i++) {
2215		struct netfront_queue *queue = &info->queues[i];
2216
2217		queue->id = i;
2218		queue->info = info;
2219
2220		ret = xennet_init_queue(queue);
2221		if (ret < 0) {
2222			dev_warn(&info->xbdev->dev,
2223				 "only created %d queues\n", i);
2224			*num_queues = i;
2225			break;
2226		}
2227
2228		/* use page pool recycling instead of buddy allocator */
2229		ret = xennet_create_page_pool(queue);
2230		if (ret < 0) {
2231			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2232			*num_queues = i;
2233			return ret;
2234		}
2235
2236		netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2237		if (netif_running(info->netdev))
2238			napi_enable(&queue->napi);
2239	}
2240
2241	netif_set_real_num_tx_queues(info->netdev, *num_queues);
2242
2243	if (*num_queues == 0) {
2244		dev_err(&info->xbdev->dev, "no queues\n");
2245		return -EINVAL;
2246	}
2247	return 0;
2248}
2249
2250/* Common code used when first setting up, and when resuming. */
2251static int talk_to_netback(struct xenbus_device *dev,
2252			   struct netfront_info *info)
2253{
2254	const char *message;
2255	struct xenbus_transaction xbt;
2256	int err;
2257	unsigned int feature_split_evtchn;
2258	unsigned int i = 0;
2259	unsigned int max_queues = 0;
2260	struct netfront_queue *queue = NULL;
2261	unsigned int num_queues = 1;
2262	u8 addr[ETH_ALEN];
2263
2264	info->netdev->irq = 0;
2265
2266	/* Check if backend is trusted. */
2267	info->bounce = !xennet_trusted ||
2268		       !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2269
2270	/* Check if backend supports multiple queues */
2271	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2272					  "multi-queue-max-queues", 1);
2273	num_queues = min(max_queues, xennet_max_queues);
2274
2275	/* Check feature-split-event-channels */
2276	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2277					"feature-split-event-channels", 0);
2278
2279	/* Read mac addr. */
2280	err = xen_net_read_mac(dev, addr);
2281	if (err) {
2282		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2283		goto out_unlocked;
2284	}
2285	eth_hw_addr_set(info->netdev, addr);
2286
2287	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2288							      "feature-xdp-headroom", 0);
2289	if (info->netback_has_xdp_headroom) {
2290		/* set the current xen-netfront xdp state */
2291		err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2292					  NETBACK_XDP_HEADROOM_ENABLE :
2293					  NETBACK_XDP_HEADROOM_DISABLE);
2294		if (err)
2295			goto out_unlocked;
2296	}
2297
2298	rtnl_lock();
2299	if (info->queues)
2300		xennet_destroy_queues(info);
2301
2302	/* For the case of a reconnect reset the "broken" indicator. */
2303	info->broken = false;
2304
2305	err = xennet_create_queues(info, &num_queues);
2306	if (err < 0) {
2307		xenbus_dev_fatal(dev, err, "creating queues");
2308		kfree(info->queues);
2309		info->queues = NULL;
2310		goto out;
2311	}
2312	rtnl_unlock();
2313
2314	/* Create shared ring, alloc event channel -- for each queue */
2315	for (i = 0; i < num_queues; ++i) {
2316		queue = &info->queues[i];
2317		err = setup_netfront(dev, queue, feature_split_evtchn);
2318		if (err)
2319			goto destroy_ring;
2320	}
2321
2322again:
2323	err = xenbus_transaction_start(&xbt);
2324	if (err) {
2325		xenbus_dev_fatal(dev, err, "starting transaction");
2326		goto destroy_ring;
2327	}
2328
2329	if (xenbus_exists(XBT_NIL,
2330			  info->xbdev->otherend, "multi-queue-max-queues")) {
2331		/* Write the number of queues */
2332		err = xenbus_printf(xbt, dev->nodename,
2333				    "multi-queue-num-queues", "%u", num_queues);
2334		if (err) {
2335			message = "writing multi-queue-num-queues";
2336			goto abort_transaction_no_dev_fatal;
2337		}
2338	}
2339
2340	if (num_queues == 1) {
2341		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2342		if (err)
2343			goto abort_transaction_no_dev_fatal;
2344	} else {
2345		/* Write the keys for each queue */
2346		for (i = 0; i < num_queues; ++i) {
2347			queue = &info->queues[i];
2348			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2349			if (err)
2350				goto abort_transaction_no_dev_fatal;
2351		}
2352	}
2353
2354	/* The remaining keys are not queue-specific */
2355	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2356			    1);
2357	if (err) {
2358		message = "writing request-rx-copy";
2359		goto abort_transaction;
2360	}
2361
2362	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2363	if (err) {
2364		message = "writing feature-rx-notify";
2365		goto abort_transaction;
2366	}
2367
2368	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2369	if (err) {
2370		message = "writing feature-sg";
2371		goto abort_transaction;
2372	}
2373
2374	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2375	if (err) {
2376		message = "writing feature-gso-tcpv4";
2377		goto abort_transaction;
2378	}
2379
2380	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2381	if (err) {
2382		message = "writing feature-gso-tcpv6";
2383		goto abort_transaction;
2384	}
2385
2386	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2387			   "1");
2388	if (err) {
2389		message = "writing feature-ipv6-csum-offload";
2390		goto abort_transaction;
2391	}
2392
2393	err = xenbus_transaction_end(xbt, 0);
2394	if (err) {
2395		if (err == -EAGAIN)
2396			goto again;
2397		xenbus_dev_fatal(dev, err, "completing transaction");
2398		goto destroy_ring;
2399	}
2400
2401	return 0;
2402
2403 abort_transaction:
2404	xenbus_dev_fatal(dev, err, "%s", message);
2405abort_transaction_no_dev_fatal:
2406	xenbus_transaction_end(xbt, 1);
2407 destroy_ring:
2408	xennet_disconnect_backend(info);
2409	rtnl_lock();
2410	xennet_destroy_queues(info);
2411 out:
2412	rtnl_unlock();
2413out_unlocked:
2414	device_unregister(&dev->dev);
2415	return err;
2416}
2417
2418static int xennet_connect(struct net_device *dev)
2419{
2420	struct netfront_info *np = netdev_priv(dev);
2421	unsigned int num_queues = 0;
2422	int err;
2423	unsigned int j = 0;
2424	struct netfront_queue *queue = NULL;
2425
2426	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2427		dev_info(&dev->dev,
2428			 "backend does not support copying receive path\n");
2429		return -ENODEV;
2430	}
2431
2432	err = talk_to_netback(np->xbdev, np);
2433	if (err)
2434		return err;
2435	if (np->netback_has_xdp_headroom)
2436		pr_info("backend supports XDP headroom\n");
2437	if (np->bounce)
2438		dev_info(&np->xbdev->dev,
2439			 "bouncing transmitted data to zeroed pages\n");
2440
2441	/* talk_to_netback() sets the correct number of queues */
2442	num_queues = dev->real_num_tx_queues;
2443
 
 
 
 
2444	if (dev->reg_state == NETREG_UNINITIALIZED) {
2445		err = register_netdev(dev);
2446		if (err) {
2447			pr_warn("%s: register_netdev err=%d\n", __func__, err);
2448			device_unregister(&np->xbdev->dev);
2449			return err;
2450		}
2451	}
2452
2453	rtnl_lock();
2454	netdev_update_features(dev);
2455	rtnl_unlock();
2456
2457	/*
2458	 * All public and private state should now be sane.  Get
2459	 * ready to start sending and receiving packets and give the driver
2460	 * domain a kick because we've probably just requeued some
2461	 * packets.
2462	 */
2463	netif_tx_lock_bh(np->netdev);
2464	netif_device_attach(np->netdev);
2465	netif_tx_unlock_bh(np->netdev);
2466
2467	netif_carrier_on(np->netdev);
2468	for (j = 0; j < num_queues; ++j) {
2469		queue = &np->queues[j];
2470
2471		notify_remote_via_irq(queue->tx_irq);
2472		if (queue->tx_irq != queue->rx_irq)
2473			notify_remote_via_irq(queue->rx_irq);
2474
 
 
 
 
2475		spin_lock_bh(&queue->rx_lock);
2476		xennet_alloc_rx_buffers(queue);
2477		spin_unlock_bh(&queue->rx_lock);
2478	}
2479
2480	return 0;
2481}
2482
2483/*
2484 * Callback received when the backend's state changes.
2485 */
2486static void netback_changed(struct xenbus_device *dev,
2487			    enum xenbus_state backend_state)
2488{
2489	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2490	struct net_device *netdev = np->netdev;
2491
2492	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2493
2494	wake_up_all(&module_wq);
2495
2496	switch (backend_state) {
2497	case XenbusStateInitialising:
2498	case XenbusStateInitialised:
2499	case XenbusStateReconfiguring:
2500	case XenbusStateReconfigured:
 
 
2501	case XenbusStateUnknown:
 
2502		break;
2503
2504	case XenbusStateInitWait:
2505		if (dev->state != XenbusStateInitialising)
2506			break;
2507		if (xennet_connect(netdev) != 0)
2508			break;
2509		xenbus_switch_state(dev, XenbusStateConnected);
2510		break;
2511
2512	case XenbusStateConnected:
2513		netdev_notify_peers(netdev);
2514		break;
2515
2516	case XenbusStateClosed:
 
2517		if (dev->state == XenbusStateClosed)
2518			break;
2519		fallthrough;	/* Missed the backend's CLOSING state */
2520	case XenbusStateClosing:
 
2521		xenbus_frontend_closed(dev);
2522		break;
2523	}
2524}
2525
2526static const struct xennet_stat {
2527	char name[ETH_GSTRING_LEN];
2528	u16 offset;
2529} xennet_stats[] = {
2530	{
2531		"rx_gso_checksum_fixup",
2532		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2533	},
2534};
2535
2536static int xennet_get_sset_count(struct net_device *dev, int string_set)
2537{
2538	switch (string_set) {
2539	case ETH_SS_STATS:
2540		return ARRAY_SIZE(xennet_stats);
2541	default:
2542		return -EINVAL;
2543	}
2544}
2545
2546static void xennet_get_ethtool_stats(struct net_device *dev,
2547				     struct ethtool_stats *stats, u64 * data)
2548{
2549	void *np = netdev_priv(dev);
2550	int i;
2551
2552	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2553		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2554}
2555
2556static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2557{
2558	int i;
2559
2560	switch (stringset) {
2561	case ETH_SS_STATS:
2562		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2563			memcpy(data + i * ETH_GSTRING_LEN,
2564			       xennet_stats[i].name, ETH_GSTRING_LEN);
2565		break;
2566	}
2567}
2568
2569static const struct ethtool_ops xennet_ethtool_ops =
2570{
2571	.get_link = ethtool_op_get_link,
2572
2573	.get_sset_count = xennet_get_sset_count,
2574	.get_ethtool_stats = xennet_get_ethtool_stats,
2575	.get_strings = xennet_get_strings,
2576	.get_ts_info = ethtool_op_get_ts_info,
2577};
2578
2579#ifdef CONFIG_SYSFS
2580static ssize_t show_rxbuf(struct device *dev,
2581			  struct device_attribute *attr, char *buf)
2582{
2583	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2584}
2585
2586static ssize_t store_rxbuf(struct device *dev,
2587			   struct device_attribute *attr,
2588			   const char *buf, size_t len)
2589{
2590	char *endp;
 
2591
2592	if (!capable(CAP_NET_ADMIN))
2593		return -EPERM;
2594
2595	simple_strtoul(buf, &endp, 0);
2596	if (endp == buf)
2597		return -EBADMSG;
2598
2599	/* rxbuf_min and rxbuf_max are no longer configurable. */
2600
2601	return len;
2602}
2603
2604static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2605static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2606static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2607
2608static struct attribute *xennet_dev_attrs[] = {
2609	&dev_attr_rxbuf_min.attr,
2610	&dev_attr_rxbuf_max.attr,
2611	&dev_attr_rxbuf_cur.attr,
2612	NULL
2613};
2614
2615static const struct attribute_group xennet_dev_group = {
2616	.attrs = xennet_dev_attrs
2617};
2618#endif /* CONFIG_SYSFS */
2619
2620static void xennet_bus_close(struct xenbus_device *dev)
2621{
2622	int ret;
 
 
2623
2624	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2625		return;
2626	do {
2627		xenbus_switch_state(dev, XenbusStateClosing);
2628		ret = wait_event_timeout(module_wq,
2629				   xenbus_read_driver_state(dev->otherend) ==
2630				   XenbusStateClosing ||
2631				   xenbus_read_driver_state(dev->otherend) ==
2632				   XenbusStateClosed ||
2633				   xenbus_read_driver_state(dev->otherend) ==
2634				   XenbusStateUnknown,
2635				   XENNET_TIMEOUT);
2636	} while (!ret);
2637
2638	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2639		return;
2640
2641	do {
2642		xenbus_switch_state(dev, XenbusStateClosed);
2643		ret = wait_event_timeout(module_wq,
2644				   xenbus_read_driver_state(dev->otherend) ==
2645				   XenbusStateClosed ||
2646				   xenbus_read_driver_state(dev->otherend) ==
2647				   XenbusStateUnknown,
2648				   XENNET_TIMEOUT);
2649	} while (!ret);
2650}
2651
2652static void xennet_remove(struct xenbus_device *dev)
2653{
2654	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2655
2656	xennet_bus_close(dev);
2657	xennet_disconnect_backend(info);
2658
2659	if (info->netdev->reg_state == NETREG_REGISTERED)
2660		unregister_netdev(info->netdev);
2661
2662	if (info->queues) {
2663		rtnl_lock();
2664		xennet_destroy_queues(info);
2665		rtnl_unlock();
2666	}
2667	xennet_free_netdev(info->netdev);
 
 
2668}
2669
2670static const struct xenbus_device_id netfront_ids[] = {
2671	{ "vif" },
2672	{ "" }
2673};
2674
2675static struct xenbus_driver netfront_driver = {
2676	.ids = netfront_ids,
2677	.probe = netfront_probe,
2678	.remove = xennet_remove,
2679	.resume = netfront_resume,
2680	.otherend_changed = netback_changed,
2681};
2682
2683static int __init netif_init(void)
2684{
2685	if (!xen_domain())
2686		return -ENODEV;
2687
2688	if (!xen_has_pv_nic_devices())
2689		return -ENODEV;
2690
2691	pr_info("Initialising Xen virtual ethernet driver\n");
2692
2693	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2694	 * specified a value.
2695	 */
2696	if (xennet_max_queues == 0)
2697		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2698					  num_online_cpus());
2699
2700	return xenbus_register_frontend(&netfront_driver);
2701}
2702module_init(netif_init);
2703
2704
2705static void __exit netif_exit(void)
2706{
2707	xenbus_unregister_driver(&netfront_driver);
2708}
2709module_exit(netif_exit);
2710
2711MODULE_DESCRIPTION("Xen virtual network device frontend");
2712MODULE_LICENSE("GPL");
2713MODULE_ALIAS("xen:vif");
2714MODULE_ALIAS("xennet");