Linux Audio

Check our new training course

Loading...
v5.14.15
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47#include <linux/bpf.h>
  48#include <net/page_pool.h>
  49#include <linux/bpf_trace.h>
  50
  51#include <xen/xen.h>
  52#include <xen/xenbus.h>
  53#include <xen/events.h>
  54#include <xen/page.h>
  55#include <xen/platform_pci.h>
  56#include <xen/grant_table.h>
  57
  58#include <xen/interface/io/netif.h>
  59#include <xen/interface/memory.h>
  60#include <xen/interface/grant_table.h>
  61
  62/* Module parameters */
  63#define MAX_QUEUES_DEFAULT 8
  64static unsigned int xennet_max_queues;
  65module_param_named(max_queues, xennet_max_queues, uint, 0644);
  66MODULE_PARM_DESC(max_queues,
  67		 "Maximum number of queues per virtual interface");
  68
 
 
 
 
  69#define XENNET_TIMEOUT  (5 * HZ)
  70
  71static const struct ethtool_ops xennet_ethtool_ops;
  72
  73struct netfront_cb {
  74	int pull_to;
  75};
  76
  77#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  78
  79#define RX_COPY_THRESHOLD 256
  80
  81#define GRANT_INVALID_REF	0
  82
  83#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  84#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  85
  86/* Minimum number of Rx slots (includes slot for GSO metadata). */
  87#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  88
  89/* Queue name is interface name with "-qNNN" appended */
  90#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  91
  92/* IRQ name is queue name with "-tx" or "-rx" appended */
  93#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  94
  95static DECLARE_WAIT_QUEUE_HEAD(module_wq);
  96
  97struct netfront_stats {
  98	u64			packets;
  99	u64			bytes;
 100	struct u64_stats_sync	syncp;
 101};
 102
 103struct netfront_info;
 104
 105struct netfront_queue {
 106	unsigned int id; /* Queue ID, 0-based */
 107	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 108	struct netfront_info *info;
 109
 110	struct bpf_prog __rcu *xdp_prog;
 111
 112	struct napi_struct napi;
 113
 114	/* Split event channels support, tx_* == rx_* when using
 115	 * single event channel.
 116	 */
 117	unsigned int tx_evtchn, rx_evtchn;
 118	unsigned int tx_irq, rx_irq;
 119	/* Only used when split event channels support is enabled */
 120	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 121	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 122
 123	spinlock_t   tx_lock;
 124	struct xen_netif_tx_front_ring tx;
 125	int tx_ring_ref;
 126
 127	/*
 128	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 129	 * are linked from tx_skb_freelist through skb_entry.link.
 130	 *
 131	 *  NB. Freelist index entries are always going to be less than
 132	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
 133	 *  greater than PAGE_OFFSET: we use this property to distinguish
 134	 *  them.
 135	 */
 136	union skb_entry {
 137		struct sk_buff *skb;
 138		unsigned long link;
 139	} tx_skbs[NET_TX_RING_SIZE];
 140	grant_ref_t gref_tx_head;
 141	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 142	struct page *grant_tx_page[NET_TX_RING_SIZE];
 143	unsigned tx_skb_freelist;
 
 144
 145	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 146	struct xen_netif_rx_front_ring rx;
 147	int rx_ring_ref;
 148
 149	struct timer_list rx_refill_timer;
 150
 151	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 152	grant_ref_t gref_rx_head;
 153	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 154
 
 
 
 155	struct page_pool *page_pool;
 156	struct xdp_rxq_info xdp_rxq;
 157};
 158
 159struct netfront_info {
 160	struct list_head list;
 161	struct net_device *netdev;
 162
 163	struct xenbus_device *xbdev;
 164
 165	/* Multi-queue support */
 166	struct netfront_queue *queues;
 167
 168	/* Statistics */
 169	struct netfront_stats __percpu *rx_stats;
 170	struct netfront_stats __percpu *tx_stats;
 171
 172	/* XDP state */
 173	bool netback_has_xdp_headroom;
 174	bool netfront_xdp_enabled;
 175
 
 
 
 
 
 
 176	atomic_t rx_gso_checksum_fixup;
 177};
 178
 179struct netfront_rx_info {
 180	struct xen_netif_rx_response rx;
 181	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 182};
 183
 184static void skb_entry_set_link(union skb_entry *list, unsigned short id)
 185{
 186	list->link = id;
 187}
 188
 189static int skb_entry_is_link(const union skb_entry *list)
 190{
 191	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
 192	return (unsigned long)list->skb < PAGE_OFFSET;
 193}
 194
 195/*
 196 * Access macros for acquiring freeing slots in tx_skbs[].
 197 */
 198
 199static void add_id_to_freelist(unsigned *head, union skb_entry *list,
 200			       unsigned short id)
 201{
 202	skb_entry_set_link(&list[id], *head);
 203	*head = id;
 204}
 205
 206static unsigned short get_id_from_freelist(unsigned *head,
 207					   union skb_entry *list)
 208{
 209	unsigned int id = *head;
 210	*head = list[id].link;
 
 
 
 
 211	return id;
 212}
 213
 214static int xennet_rxidx(RING_IDX idx)
 215{
 216	return idx & (NET_RX_RING_SIZE - 1);
 217}
 218
 219static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 220					 RING_IDX ri)
 221{
 222	int i = xennet_rxidx(ri);
 223	struct sk_buff *skb = queue->rx_skbs[i];
 224	queue->rx_skbs[i] = NULL;
 225	return skb;
 226}
 227
 228static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 229					    RING_IDX ri)
 230{
 231	int i = xennet_rxidx(ri);
 232	grant_ref_t ref = queue->grant_rx_ref[i];
 233	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
 234	return ref;
 235}
 236
 237#ifdef CONFIG_SYSFS
 238static const struct attribute_group xennet_dev_group;
 239#endif
 240
 241static bool xennet_can_sg(struct net_device *dev)
 242{
 243	return dev->features & NETIF_F_SG;
 244}
 245
 246
 247static void rx_refill_timeout(struct timer_list *t)
 248{
 249	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 250	napi_schedule(&queue->napi);
 251}
 252
 253static int netfront_tx_slot_available(struct netfront_queue *queue)
 254{
 255	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 256		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 257}
 258
 259static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 260{
 261	struct net_device *dev = queue->info->netdev;
 262	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 263
 264	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 265	    netfront_tx_slot_available(queue) &&
 266	    likely(netif_running(dev)))
 267		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 268}
 269
 270
 271static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 272{
 273	struct sk_buff *skb;
 274	struct page *page;
 275
 276	skb = __netdev_alloc_skb(queue->info->netdev,
 277				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 278				 GFP_ATOMIC | __GFP_NOWARN);
 279	if (unlikely(!skb))
 280		return NULL;
 281
 282	page = page_pool_dev_alloc_pages(queue->page_pool);
 
 283	if (unlikely(!page)) {
 284		kfree_skb(skb);
 285		return NULL;
 286	}
 287	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 
 288
 289	/* Align ip header to a 16 bytes boundary */
 290	skb_reserve(skb, NET_IP_ALIGN);
 291	skb->dev = queue->info->netdev;
 292
 293	return skb;
 294}
 295
 296
 297static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 298{
 299	RING_IDX req_prod = queue->rx.req_prod_pvt;
 300	int notify;
 301	int err = 0;
 302
 303	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 304		return;
 305
 306	for (req_prod = queue->rx.req_prod_pvt;
 307	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 308	     req_prod++) {
 309		struct sk_buff *skb;
 310		unsigned short id;
 311		grant_ref_t ref;
 312		struct page *page;
 313		struct xen_netif_rx_request *req;
 314
 315		skb = xennet_alloc_one_rx_buffer(queue);
 316		if (!skb) {
 317			err = -ENOMEM;
 318			break;
 319		}
 320
 321		id = xennet_rxidx(req_prod);
 322
 323		BUG_ON(queue->rx_skbs[id]);
 324		queue->rx_skbs[id] = skb;
 325
 326		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 327		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 328		queue->grant_rx_ref[id] = ref;
 329
 330		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 331
 332		req = RING_GET_REQUEST(&queue->rx, req_prod);
 333		gnttab_page_grant_foreign_access_ref_one(ref,
 334							 queue->info->xbdev->otherend_id,
 335							 page,
 336							 0);
 337		req->id = id;
 338		req->gref = ref;
 339	}
 340
 341	queue->rx.req_prod_pvt = req_prod;
 342
 343	/* Try again later if there are not enough requests or skb allocation
 344	 * failed.
 345	 * Enough requests is quantified as the sum of newly created slots and
 346	 * the unconsumed slots at the backend.
 347	 */
 348	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 349	    unlikely(err)) {
 350		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 351		return;
 352	}
 353
 354	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 355	if (notify)
 356		notify_remote_via_irq(queue->rx_irq);
 357}
 358
 359static int xennet_open(struct net_device *dev)
 360{
 361	struct netfront_info *np = netdev_priv(dev);
 362	unsigned int num_queues = dev->real_num_tx_queues;
 363	unsigned int i = 0;
 364	struct netfront_queue *queue = NULL;
 365
 366	if (!np->queues)
 367		return -ENODEV;
 368
 369	for (i = 0; i < num_queues; ++i) {
 370		queue = &np->queues[i];
 371		napi_enable(&queue->napi);
 372
 373		spin_lock_bh(&queue->rx_lock);
 374		if (netif_carrier_ok(dev)) {
 375			xennet_alloc_rx_buffers(queue);
 376			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 377			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 378				napi_schedule(&queue->napi);
 379		}
 380		spin_unlock_bh(&queue->rx_lock);
 381	}
 382
 383	netif_tx_start_all_queues(dev);
 384
 385	return 0;
 386}
 387
 388static void xennet_tx_buf_gc(struct netfront_queue *queue)
 389{
 390	RING_IDX cons, prod;
 391	unsigned short id;
 392	struct sk_buff *skb;
 393	bool more_to_do;
 
 
 394
 395	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 396
 397	do {
 398		prod = queue->tx.sring->rsp_prod;
 
 
 
 
 
 399		rmb(); /* Ensure we see responses up to 'rp'. */
 400
 401		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 402			struct xen_netif_tx_response *txrsp;
 
 
 403
 404			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
 405			if (txrsp->status == XEN_NETIF_RSP_NULL)
 406				continue;
 407
 408			id  = txrsp->id;
 409			skb = queue->tx_skbs[id].skb;
 410			if (unlikely(gnttab_query_foreign_access(
 411				queue->grant_tx_ref[id]) != 0)) {
 412				pr_alert("%s: warning -- grant still in use by backend domain\n",
 413					 __func__);
 414				BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415			}
 416			gnttab_end_foreign_access_ref(
 417				queue->grant_tx_ref[id], GNTMAP_readonly);
 418			gnttab_release_grant_reference(
 419				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 420			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
 421			queue->grant_tx_page[id] = NULL;
 422			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
 423			dev_kfree_skb_irq(skb);
 424		}
 425
 426		queue->tx.rsp_cons = prod;
 427
 428		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 429	} while (more_to_do);
 430
 431	xennet_maybe_wake_tx(queue);
 
 
 
 
 
 
 
 
 432}
 433
 434struct xennet_gnttab_make_txreq {
 435	struct netfront_queue *queue;
 436	struct sk_buff *skb;
 437	struct page *page;
 438	struct xen_netif_tx_request *tx; /* Last request */
 
 439	unsigned int size;
 440};
 441
 442static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 443				  unsigned int len, void *data)
 444{
 445	struct xennet_gnttab_make_txreq *info = data;
 446	unsigned int id;
 447	struct xen_netif_tx_request *tx;
 448	grant_ref_t ref;
 449	/* convenient aliases */
 450	struct page *page = info->page;
 451	struct netfront_queue *queue = info->queue;
 452	struct sk_buff *skb = info->skb;
 453
 454	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
 455	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 456	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 457	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 458
 459	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 460					gfn, GNTMAP_readonly);
 461
 462	queue->tx_skbs[id].skb = skb;
 463	queue->grant_tx_page[id] = page;
 464	queue->grant_tx_ref[id] = ref;
 465
 466	tx->id = id;
 467	tx->gref = ref;
 468	tx->offset = offset;
 469	tx->size = len;
 470	tx->flags = 0;
 
 
 
 
 
 
 
 
 471
 472	info->tx = tx;
 473	info->size += tx->size;
 474}
 475
 476static struct xen_netif_tx_request *xennet_make_first_txreq(
 477	struct netfront_queue *queue, struct sk_buff *skb,
 478	struct page *page, unsigned int offset, unsigned int len)
 479{
 480	struct xennet_gnttab_make_txreq info = {
 481		.queue = queue,
 482		.skb = skb,
 483		.page = page,
 484		.size = 0,
 485	};
 486
 487	gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
 488
 489	return info.tx;
 490}
 491
 492static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 493				  unsigned int len, void *data)
 494{
 495	struct xennet_gnttab_make_txreq *info = data;
 496
 497	info->tx->flags |= XEN_NETTXF_more_data;
 498	skb_get(info->skb);
 499	xennet_tx_setup_grant(gfn, offset, len, data);
 500}
 501
 502static struct xen_netif_tx_request *xennet_make_txreqs(
 503	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
 504	struct sk_buff *skb, struct page *page,
 505	unsigned int offset, unsigned int len)
 506{
 507	struct xennet_gnttab_make_txreq info = {
 508		.queue = queue,
 509		.skb = skb,
 510		.tx = tx,
 511	};
 512
 513	/* Skip unused frames from start of page */
 514	page += offset >> PAGE_SHIFT;
 515	offset &= ~PAGE_MASK;
 516
 517	while (len) {
 518		info.page = page;
 519		info.size = 0;
 520
 521		gnttab_foreach_grant_in_range(page, offset, len,
 522					      xennet_make_one_txreq,
 523					      &info);
 524
 525		page++;
 526		offset = 0;
 527		len -= info.size;
 528	}
 529
 530	return info.tx;
 531}
 532
 533/*
 534 * Count how many ring slots are required to send this skb. Each frag
 535 * might be a compound page.
 536 */
 537static int xennet_count_skb_slots(struct sk_buff *skb)
 538{
 539	int i, frags = skb_shinfo(skb)->nr_frags;
 540	int slots;
 541
 542	slots = gnttab_count_grant(offset_in_page(skb->data),
 543				   skb_headlen(skb));
 544
 545	for (i = 0; i < frags; i++) {
 546		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 547		unsigned long size = skb_frag_size(frag);
 548		unsigned long offset = skb_frag_off(frag);
 549
 550		/* Skip unused frames from start of page */
 551		offset &= ~PAGE_MASK;
 552
 553		slots += gnttab_count_grant(offset, size);
 554	}
 555
 556	return slots;
 557}
 558
 559static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 560			       struct net_device *sb_dev)
 561{
 562	unsigned int num_queues = dev->real_num_tx_queues;
 563	u32 hash;
 564	u16 queue_idx;
 565
 566	/* First, check if there is only one queue */
 567	if (num_queues == 1) {
 568		queue_idx = 0;
 569	} else {
 570		hash = skb_get_hash(skb);
 571		queue_idx = hash % num_queues;
 572	}
 573
 574	return queue_idx;
 575}
 576
 
 
 
 
 
 
 
 
 
 577static int xennet_xdp_xmit_one(struct net_device *dev,
 578			       struct netfront_queue *queue,
 579			       struct xdp_frame *xdpf)
 580{
 581	struct netfront_info *np = netdev_priv(dev);
 582	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 
 
 
 
 
 583	int notify;
 584
 585	xennet_make_first_txreq(queue, NULL,
 586				virt_to_page(xdpf->data),
 587				offset_in_page(xdpf->data),
 588				xdpf->len);
 589
 
 
 590	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 591	if (notify)
 592		notify_remote_via_irq(queue->tx_irq);
 593
 594	u64_stats_update_begin(&tx_stats->syncp);
 595	tx_stats->bytes += xdpf->len;
 596	tx_stats->packets++;
 597	u64_stats_update_end(&tx_stats->syncp);
 598
 599	xennet_tx_buf_gc(queue);
 600
 601	return 0;
 602}
 603
 604static int xennet_xdp_xmit(struct net_device *dev, int n,
 605			   struct xdp_frame **frames, u32 flags)
 606{
 607	unsigned int num_queues = dev->real_num_tx_queues;
 608	struct netfront_info *np = netdev_priv(dev);
 609	struct netfront_queue *queue = NULL;
 610	unsigned long irq_flags;
 611	int nxmit = 0;
 612	int i;
 613
 
 
 614	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 615		return -EINVAL;
 616
 617	queue = &np->queues[smp_processor_id() % num_queues];
 618
 619	spin_lock_irqsave(&queue->tx_lock, irq_flags);
 620	for (i = 0; i < n; i++) {
 621		struct xdp_frame *xdpf = frames[i];
 622
 623		if (!xdpf)
 624			continue;
 625		if (xennet_xdp_xmit_one(dev, queue, xdpf))
 626			break;
 627		nxmit++;
 628	}
 629	spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
 630
 631	return nxmit;
 632}
 633
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634
 635#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 636
 637static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 638{
 639	struct netfront_info *np = netdev_priv(dev);
 640	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 641	struct xen_netif_tx_request *tx, *first_tx;
 642	unsigned int i;
 643	int notify;
 644	int slots;
 645	struct page *page;
 646	unsigned int offset;
 647	unsigned int len;
 648	unsigned long flags;
 649	struct netfront_queue *queue = NULL;
 
 650	unsigned int num_queues = dev->real_num_tx_queues;
 651	u16 queue_index;
 652	struct sk_buff *nskb;
 653
 654	/* Drop the packet if no queues are set up */
 655	if (num_queues < 1)
 656		goto drop;
 
 
 657	/* Determine which queue to transmit this SKB on */
 658	queue_index = skb_get_queue_mapping(skb);
 659	queue = &np->queues[queue_index];
 660
 661	/* If skb->len is too big for wire format, drop skb and alert
 662	 * user about misconfiguration.
 663	 */
 664	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 665		net_alert_ratelimited(
 666			"xennet: skb->len = %u, too big for wire format\n",
 667			skb->len);
 668		goto drop;
 669	}
 670
 671	slots = xennet_count_skb_slots(skb);
 672	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 673		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 674				    slots, skb->len);
 675		if (skb_linearize(skb))
 676			goto drop;
 677	}
 678
 679	page = virt_to_page(skb->data);
 680	offset = offset_in_page(skb->data);
 681
 682	/* The first req should be at least ETH_HLEN size or the packet will be
 683	 * dropped by netback.
 
 
 
 
 684	 */
 685	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 686		nskb = skb_copy(skb, GFP_ATOMIC);
 687		if (!nskb)
 688			goto drop;
 689		dev_consume_skb_any(skb);
 690		skb = nskb;
 691		page = virt_to_page(skb->data);
 692		offset = offset_in_page(skb->data);
 693	}
 694
 695	len = skb_headlen(skb);
 696
 697	spin_lock_irqsave(&queue->tx_lock, flags);
 698
 699	if (unlikely(!netif_carrier_ok(dev) ||
 700		     (slots > 1 && !xennet_can_sg(dev)) ||
 701		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 702		spin_unlock_irqrestore(&queue->tx_lock, flags);
 703		goto drop;
 704	}
 705
 706	/* First request for the linear area. */
 707	first_tx = tx = xennet_make_first_txreq(queue, skb,
 708						page, offset, len);
 709	offset += tx->size;
 
 
 710	if (offset == PAGE_SIZE) {
 711		page++;
 712		offset = 0;
 713	}
 714	len -= tx->size;
 715
 716	if (skb->ip_summed == CHECKSUM_PARTIAL)
 717		/* local packet? */
 718		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
 
 719	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 720		/* remote but checksummed. */
 721		tx->flags |= XEN_NETTXF_data_validated;
 722
 723	/* Optional extra info after the first request. */
 724	if (skb_shinfo(skb)->gso_size) {
 725		struct xen_netif_extra_info *gso;
 726
 727		gso = (struct xen_netif_extra_info *)
 728			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 729
 730		tx->flags |= XEN_NETTXF_extra_info;
 731
 732		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 733		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 734			XEN_NETIF_GSO_TYPE_TCPV6 :
 735			XEN_NETIF_GSO_TYPE_TCPV4;
 736		gso->u.gso.pad = 0;
 737		gso->u.gso.features = 0;
 738
 739		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 740		gso->flags = 0;
 741	}
 742
 743	/* Requests for the rest of the linear area. */
 744	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
 745
 746	/* Requests for all the frags. */
 747	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 748		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 749		tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
 750					skb_frag_off(frag),
 751					skb_frag_size(frag));
 752	}
 753
 754	/* First request has the packet length. */
 755	first_tx->size = skb->len;
 756
 757	/* timestamp packet in software */
 758	skb_tx_timestamp(skb);
 759
 
 
 760	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 761	if (notify)
 762		notify_remote_via_irq(queue->tx_irq);
 763
 764	u64_stats_update_begin(&tx_stats->syncp);
 765	tx_stats->bytes += skb->len;
 766	tx_stats->packets++;
 767	u64_stats_update_end(&tx_stats->syncp);
 768
 769	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 770	xennet_tx_buf_gc(queue);
 771
 772	if (!netfront_tx_slot_available(queue))
 773		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 774
 775	spin_unlock_irqrestore(&queue->tx_lock, flags);
 776
 777	return NETDEV_TX_OK;
 778
 779 drop:
 780	dev->stats.tx_dropped++;
 781	dev_kfree_skb_any(skb);
 782	return NETDEV_TX_OK;
 783}
 784
 785static int xennet_close(struct net_device *dev)
 786{
 787	struct netfront_info *np = netdev_priv(dev);
 788	unsigned int num_queues = dev->real_num_tx_queues;
 789	unsigned int i;
 790	struct netfront_queue *queue;
 791	netif_tx_stop_all_queues(np->netdev);
 792	for (i = 0; i < num_queues; ++i) {
 793		queue = &np->queues[i];
 794		napi_disable(&queue->napi);
 795	}
 796	return 0;
 797}
 798
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 800				grant_ref_t ref)
 801{
 802	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 803
 804	BUG_ON(queue->rx_skbs[new]);
 805	queue->rx_skbs[new] = skb;
 806	queue->grant_rx_ref[new] = ref;
 807	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 808	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 809	queue->rx.req_prod_pvt++;
 810}
 811
 812static int xennet_get_extras(struct netfront_queue *queue,
 813			     struct xen_netif_extra_info *extras,
 814			     RING_IDX rp)
 815
 816{
 817	struct xen_netif_extra_info *extra;
 818	struct device *dev = &queue->info->netdev->dev;
 819	RING_IDX cons = queue->rx.rsp_cons;
 820	int err = 0;
 821
 822	do {
 823		struct sk_buff *skb;
 824		grant_ref_t ref;
 825
 826		if (unlikely(cons + 1 == rp)) {
 827			if (net_ratelimit())
 828				dev_warn(dev, "Missing extra info\n");
 829			err = -EBADR;
 830			break;
 831		}
 832
 833		extra = (struct xen_netif_extra_info *)
 834			RING_GET_RESPONSE(&queue->rx, ++cons);
 835
 836		if (unlikely(!extra->type ||
 837			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 838			if (net_ratelimit())
 839				dev_warn(dev, "Invalid extra type: %d\n",
 840					extra->type);
 841			err = -EINVAL;
 842		} else {
 843			memcpy(&extras[extra->type - 1], extra,
 844			       sizeof(*extra));
 845		}
 846
 847		skb = xennet_get_rx_skb(queue, cons);
 848		ref = xennet_get_rx_ref(queue, cons);
 849		xennet_move_rx_slot(queue, skb, ref);
 850	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
 851
 852	queue->rx.rsp_cons = cons;
 853	return err;
 854}
 855
 856static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
 857		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
 858		   struct xdp_buff *xdp, bool *need_xdp_flush)
 859{
 860	struct xdp_frame *xdpf;
 861	u32 len = rx->status;
 862	u32 act;
 863	int err;
 864
 865	xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
 866		      &queue->xdp_rxq);
 867	xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
 868			 len, false);
 869
 870	act = bpf_prog_run_xdp(prog, xdp);
 871	switch (act) {
 872	case XDP_TX:
 873		get_page(pdata);
 874		xdpf = xdp_convert_buff_to_frame(xdp);
 875		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
 876		if (unlikely(!err))
 877			xdp_return_frame_rx_napi(xdpf);
 878		else if (unlikely(err < 0))
 879			trace_xdp_exception(queue->info->netdev, prog, act);
 880		break;
 881	case XDP_REDIRECT:
 882		get_page(pdata);
 883		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
 884		*need_xdp_flush = true;
 885		if (unlikely(err))
 886			trace_xdp_exception(queue->info->netdev, prog, act);
 887		break;
 888	case XDP_PASS:
 889	case XDP_DROP:
 890		break;
 891
 892	case XDP_ABORTED:
 893		trace_xdp_exception(queue->info->netdev, prog, act);
 894		break;
 895
 896	default:
 897		bpf_warn_invalid_xdp_action(act);
 898	}
 899
 900	return act;
 901}
 902
 903static int xennet_get_responses(struct netfront_queue *queue,
 904				struct netfront_rx_info *rinfo, RING_IDX rp,
 905				struct sk_buff_head *list,
 906				bool *need_xdp_flush)
 907{
 908	struct xen_netif_rx_response *rx = &rinfo->rx;
 909	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
 910	RING_IDX cons = queue->rx.rsp_cons;
 911	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
 912	struct xen_netif_extra_info *extras = rinfo->extras;
 913	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
 914	struct device *dev = &queue->info->netdev->dev;
 915	struct bpf_prog *xdp_prog;
 916	struct xdp_buff xdp;
 917	unsigned long ret;
 918	int slots = 1;
 919	int err = 0;
 920	u32 verdict;
 921
 922	if (rx->flags & XEN_NETRXF_extra_info) {
 923		err = xennet_get_extras(queue, extras, rp);
 924		if (!err) {
 925			if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
 926				struct xen_netif_extra_info *xdp;
 927
 928				xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
 929				rx->offset = xdp->u.xdp.headroom;
 930			}
 931		}
 932		cons = queue->rx.rsp_cons;
 933	}
 934
 935	for (;;) {
 936		if (unlikely(rx->status < 0 ||
 937			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
 938			if (net_ratelimit())
 939				dev_warn(dev, "rx->offset: %u, size: %d\n",
 940					 rx->offset, rx->status);
 941			xennet_move_rx_slot(queue, skb, ref);
 942			err = -EINVAL;
 943			goto next;
 944		}
 945
 946		/*
 947		 * This definitely indicates a bug, either in this driver or in
 948		 * the backend driver. In future this should flag the bad
 949		 * situation to the system controller to reboot the backend.
 950		 */
 951		if (ref == GRANT_INVALID_REF) {
 952			if (net_ratelimit())
 953				dev_warn(dev, "Bad rx response id %d.\n",
 954					 rx->id);
 955			err = -EINVAL;
 956			goto next;
 957		}
 958
 959		ret = gnttab_end_foreign_access_ref(ref, 0);
 960		BUG_ON(!ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 961
 962		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 963
 964		rcu_read_lock();
 965		xdp_prog = rcu_dereference(queue->xdp_prog);
 966		if (xdp_prog) {
 967			if (!(rx->flags & XEN_NETRXF_more_data)) {
 968				/* currently only a single page contains data */
 969				verdict = xennet_run_xdp(queue,
 970							 skb_frag_page(&skb_shinfo(skb)->frags[0]),
 971							 rx, xdp_prog, &xdp, need_xdp_flush);
 972				if (verdict != XDP_PASS)
 973					err = -EINVAL;
 974			} else {
 975				/* drop the frame */
 976				err = -EINVAL;
 977			}
 978		}
 979		rcu_read_unlock();
 980next:
 981		__skb_queue_tail(list, skb);
 
 
 982		if (!(rx->flags & XEN_NETRXF_more_data))
 983			break;
 984
 985		if (cons + slots == rp) {
 986			if (net_ratelimit())
 987				dev_warn(dev, "Need more slots\n");
 988			err = -ENOENT;
 989			break;
 990		}
 991
 992		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
 
 993		skb = xennet_get_rx_skb(queue, cons + slots);
 994		ref = xennet_get_rx_ref(queue, cons + slots);
 995		slots++;
 996	}
 997
 998	if (unlikely(slots > max)) {
 999		if (net_ratelimit())
1000			dev_warn(dev, "Too many slots\n");
1001		err = -E2BIG;
1002	}
1003
1004	if (unlikely(err))
1005		queue->rx.rsp_cons = cons + slots;
1006
1007	return err;
1008}
1009
1010static int xennet_set_skb_gso(struct sk_buff *skb,
1011			      struct xen_netif_extra_info *gso)
1012{
1013	if (!gso->u.gso.size) {
1014		if (net_ratelimit())
1015			pr_warn("GSO size must not be zero\n");
1016		return -EINVAL;
1017	}
1018
1019	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1020	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1021		if (net_ratelimit())
1022			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1023		return -EINVAL;
1024	}
1025
1026	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1027	skb_shinfo(skb)->gso_type =
1028		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1029		SKB_GSO_TCPV4 :
1030		SKB_GSO_TCPV6;
1031
1032	/* Header must be checked, and gso_segs computed. */
1033	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1034	skb_shinfo(skb)->gso_segs = 0;
1035
1036	return 0;
1037}
1038
1039static int xennet_fill_frags(struct netfront_queue *queue,
1040			     struct sk_buff *skb,
1041			     struct sk_buff_head *list)
1042{
1043	RING_IDX cons = queue->rx.rsp_cons;
1044	struct sk_buff *nskb;
1045
1046	while ((nskb = __skb_dequeue(list))) {
1047		struct xen_netif_rx_response *rx =
1048			RING_GET_RESPONSE(&queue->rx, ++cons);
1049		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1050
 
 
1051		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1052			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1053
1054			BUG_ON(pull_to < skb_headlen(skb));
1055			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1056		}
1057		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1058			queue->rx.rsp_cons = ++cons + skb_queue_len(list);
 
1059			kfree_skb(nskb);
1060			return -ENOENT;
1061		}
1062
1063		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1064				skb_frag_page(nfrag),
1065				rx->offset, rx->status, PAGE_SIZE);
1066
1067		skb_shinfo(nskb)->nr_frags = 0;
1068		kfree_skb(nskb);
1069	}
1070
1071	queue->rx.rsp_cons = cons;
1072
1073	return 0;
1074}
1075
1076static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1077{
1078	bool recalculate_partial_csum = false;
1079
1080	/*
1081	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1082	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1083	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1084	 * recalculate the partial checksum.
1085	 */
1086	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1087		struct netfront_info *np = netdev_priv(dev);
1088		atomic_inc(&np->rx_gso_checksum_fixup);
1089		skb->ip_summed = CHECKSUM_PARTIAL;
1090		recalculate_partial_csum = true;
1091	}
1092
1093	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1094	if (skb->ip_summed != CHECKSUM_PARTIAL)
1095		return 0;
1096
1097	return skb_checksum_setup(skb, recalculate_partial_csum);
1098}
1099
1100static int handle_incoming_queue(struct netfront_queue *queue,
1101				 struct sk_buff_head *rxq)
1102{
1103	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1104	int packets_dropped = 0;
1105	struct sk_buff *skb;
1106
1107	while ((skb = __skb_dequeue(rxq)) != NULL) {
1108		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1109
1110		if (pull_to > skb_headlen(skb))
1111			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1112
1113		/* Ethernet work: Delayed to here as it peeks the header. */
1114		skb->protocol = eth_type_trans(skb, queue->info->netdev);
1115		skb_reset_network_header(skb);
1116
1117		if (checksum_setup(queue->info->netdev, skb)) {
1118			kfree_skb(skb);
1119			packets_dropped++;
1120			queue->info->netdev->stats.rx_errors++;
1121			continue;
1122		}
1123
1124		u64_stats_update_begin(&rx_stats->syncp);
1125		rx_stats->packets++;
1126		rx_stats->bytes += skb->len;
1127		u64_stats_update_end(&rx_stats->syncp);
1128
1129		/* Pass it up. */
1130		napi_gro_receive(&queue->napi, skb);
1131	}
1132
1133	return packets_dropped;
1134}
1135
1136static int xennet_poll(struct napi_struct *napi, int budget)
1137{
1138	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1139	struct net_device *dev = queue->info->netdev;
1140	struct sk_buff *skb;
1141	struct netfront_rx_info rinfo;
1142	struct xen_netif_rx_response *rx = &rinfo.rx;
1143	struct xen_netif_extra_info *extras = rinfo.extras;
1144	RING_IDX i, rp;
1145	int work_done;
1146	struct sk_buff_head rxq;
1147	struct sk_buff_head errq;
1148	struct sk_buff_head tmpq;
1149	int err;
1150	bool need_xdp_flush = false;
1151
1152	spin_lock(&queue->rx_lock);
1153
1154	skb_queue_head_init(&rxq);
1155	skb_queue_head_init(&errq);
1156	skb_queue_head_init(&tmpq);
1157
1158	rp = queue->rx.sring->rsp_prod;
 
 
 
 
 
 
 
1159	rmb(); /* Ensure we see queued responses up to 'rp'. */
1160
1161	i = queue->rx.rsp_cons;
1162	work_done = 0;
1163	while ((i != rp) && (work_done < budget)) {
1164		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1165		memset(extras, 0, sizeof(rinfo.extras));
1166
1167		err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1168					   &need_xdp_flush);
1169
1170		if (unlikely(err)) {
 
 
 
 
1171err:
1172			while ((skb = __skb_dequeue(&tmpq)))
1173				__skb_queue_tail(&errq, skb);
1174			dev->stats.rx_errors++;
1175			i = queue->rx.rsp_cons;
1176			continue;
1177		}
1178
1179		skb = __skb_dequeue(&tmpq);
1180
1181		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1182			struct xen_netif_extra_info *gso;
1183			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1184
1185			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1186				__skb_queue_head(&tmpq, skb);
1187				queue->rx.rsp_cons += skb_queue_len(&tmpq);
 
 
1188				goto err;
1189			}
1190		}
1191
1192		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1193		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1194			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1195
1196		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1197		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1198		skb->data_len = rx->status;
1199		skb->len += rx->status;
1200
1201		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1202			goto err;
1203
1204		if (rx->flags & XEN_NETRXF_csum_blank)
1205			skb->ip_summed = CHECKSUM_PARTIAL;
1206		else if (rx->flags & XEN_NETRXF_data_validated)
1207			skb->ip_summed = CHECKSUM_UNNECESSARY;
1208
1209		__skb_queue_tail(&rxq, skb);
1210
1211		i = ++queue->rx.rsp_cons;
 
1212		work_done++;
1213	}
1214	if (need_xdp_flush)
1215		xdp_do_flush();
1216
1217	__skb_queue_purge(&errq);
1218
1219	work_done -= handle_incoming_queue(queue, &rxq);
1220
1221	xennet_alloc_rx_buffers(queue);
1222
1223	if (work_done < budget) {
1224		int more_to_do = 0;
1225
1226		napi_complete_done(napi, work_done);
1227
1228		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1229		if (more_to_do)
1230			napi_schedule(napi);
1231	}
1232
1233	spin_unlock(&queue->rx_lock);
1234
1235	return work_done;
1236}
1237
1238static int xennet_change_mtu(struct net_device *dev, int mtu)
1239{
1240	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1241
1242	if (mtu > max)
1243		return -EINVAL;
1244	dev->mtu = mtu;
1245	return 0;
1246}
1247
1248static void xennet_get_stats64(struct net_device *dev,
1249			       struct rtnl_link_stats64 *tot)
1250{
1251	struct netfront_info *np = netdev_priv(dev);
1252	int cpu;
1253
1254	for_each_possible_cpu(cpu) {
1255		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1256		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1257		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1258		unsigned int start;
1259
1260		do {
1261			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1262			tx_packets = tx_stats->packets;
1263			tx_bytes = tx_stats->bytes;
1264		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1265
1266		do {
1267			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1268			rx_packets = rx_stats->packets;
1269			rx_bytes = rx_stats->bytes;
1270		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1271
1272		tot->rx_packets += rx_packets;
1273		tot->tx_packets += tx_packets;
1274		tot->rx_bytes   += rx_bytes;
1275		tot->tx_bytes   += tx_bytes;
1276	}
1277
1278	tot->rx_errors  = dev->stats.rx_errors;
1279	tot->tx_dropped = dev->stats.tx_dropped;
1280}
1281
1282static void xennet_release_tx_bufs(struct netfront_queue *queue)
1283{
1284	struct sk_buff *skb;
1285	int i;
1286
1287	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1288		/* Skip over entries which are actually freelist references */
1289		if (skb_entry_is_link(&queue->tx_skbs[i]))
1290			continue;
1291
1292		skb = queue->tx_skbs[i].skb;
 
1293		get_page(queue->grant_tx_page[i]);
1294		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1295					  GNTMAP_readonly,
1296					  (unsigned long)page_address(queue->grant_tx_page[i]));
1297		queue->grant_tx_page[i] = NULL;
1298		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1299		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1300		dev_kfree_skb_irq(skb);
1301	}
1302}
1303
1304static void xennet_release_rx_bufs(struct netfront_queue *queue)
1305{
1306	int id, ref;
1307
1308	spin_lock_bh(&queue->rx_lock);
1309
1310	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1311		struct sk_buff *skb;
1312		struct page *page;
1313
1314		skb = queue->rx_skbs[id];
1315		if (!skb)
1316			continue;
1317
1318		ref = queue->grant_rx_ref[id];
1319		if (ref == GRANT_INVALID_REF)
1320			continue;
1321
1322		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1323
1324		/* gnttab_end_foreign_access() needs a page ref until
1325		 * foreign access is ended (which may be deferred).
1326		 */
1327		get_page(page);
1328		gnttab_end_foreign_access(ref, 0,
1329					  (unsigned long)page_address(page));
1330		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1331
1332		kfree_skb(skb);
1333	}
1334
1335	spin_unlock_bh(&queue->rx_lock);
1336}
1337
1338static netdev_features_t xennet_fix_features(struct net_device *dev,
1339	netdev_features_t features)
1340{
1341	struct netfront_info *np = netdev_priv(dev);
1342
1343	if (features & NETIF_F_SG &&
1344	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1345		features &= ~NETIF_F_SG;
1346
1347	if (features & NETIF_F_IPV6_CSUM &&
1348	    !xenbus_read_unsigned(np->xbdev->otherend,
1349				  "feature-ipv6-csum-offload", 0))
1350		features &= ~NETIF_F_IPV6_CSUM;
1351
1352	if (features & NETIF_F_TSO &&
1353	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1354		features &= ~NETIF_F_TSO;
1355
1356	if (features & NETIF_F_TSO6 &&
1357	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1358		features &= ~NETIF_F_TSO6;
1359
1360	return features;
1361}
1362
1363static int xennet_set_features(struct net_device *dev,
1364	netdev_features_t features)
1365{
1366	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1367		netdev_info(dev, "Reducing MTU because no SG offload");
1368		dev->mtu = ETH_DATA_LEN;
1369	}
1370
1371	return 0;
1372}
1373
1374static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1375{
1376	struct netfront_queue *queue = dev_id;
1377	unsigned long flags;
1378
 
 
 
1379	spin_lock_irqsave(&queue->tx_lock, flags);
1380	xennet_tx_buf_gc(queue);
 
1381	spin_unlock_irqrestore(&queue->tx_lock, flags);
1382
 
 
 
 
 
 
 
 
 
 
1383	return IRQ_HANDLED;
1384}
1385
1386static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1387{
1388	struct netfront_queue *queue = dev_id;
1389	struct net_device *dev = queue->info->netdev;
 
 
 
1390
1391	if (likely(netif_carrier_ok(dev) &&
1392		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1393		napi_schedule(&queue->napi);
1394
 
 
 
 
 
 
 
 
 
 
1395	return IRQ_HANDLED;
1396}
1397
1398static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1399{
1400	xennet_tx_interrupt(irq, dev_id);
1401	xennet_rx_interrupt(irq, dev_id);
 
 
 
 
1402	return IRQ_HANDLED;
1403}
1404
1405#ifdef CONFIG_NET_POLL_CONTROLLER
1406static void xennet_poll_controller(struct net_device *dev)
1407{
1408	/* Poll each queue */
1409	struct netfront_info *info = netdev_priv(dev);
1410	unsigned int num_queues = dev->real_num_tx_queues;
1411	unsigned int i;
 
 
 
 
1412	for (i = 0; i < num_queues; ++i)
1413		xennet_interrupt(0, &info->queues[i]);
1414}
1415#endif
1416
1417#define NETBACK_XDP_HEADROOM_DISABLE	0
1418#define NETBACK_XDP_HEADROOM_ENABLE	1
1419
1420static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1421{
1422	int err;
1423	unsigned short headroom;
1424
1425	headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1426	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1427			    "xdp-headroom", "%hu",
1428			    headroom);
1429	if (err)
1430		pr_warn("Error writing xdp-headroom\n");
1431
1432	return err;
1433}
1434
1435static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1436			  struct netlink_ext_ack *extack)
1437{
1438	unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1439	struct netfront_info *np = netdev_priv(dev);
1440	struct bpf_prog *old_prog;
1441	unsigned int i, err;
1442
1443	if (dev->mtu > max_mtu) {
1444		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1445		return -EINVAL;
1446	}
1447
1448	if (!np->netback_has_xdp_headroom)
1449		return 0;
1450
1451	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1452
1453	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1454				  NETBACK_XDP_HEADROOM_DISABLE);
1455	if (err)
1456		return err;
1457
1458	/* avoid the race with XDP headroom adjustment */
1459	wait_event(module_wq,
1460		   xenbus_read_driver_state(np->xbdev->otherend) ==
1461		   XenbusStateReconfigured);
1462	np->netfront_xdp_enabled = true;
1463
1464	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1465
1466	if (prog)
1467		bpf_prog_add(prog, dev->real_num_tx_queues);
1468
1469	for (i = 0; i < dev->real_num_tx_queues; ++i)
1470		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1471
1472	if (old_prog)
1473		for (i = 0; i < dev->real_num_tx_queues; ++i)
1474			bpf_prog_put(old_prog);
1475
1476	xenbus_switch_state(np->xbdev, XenbusStateConnected);
1477
1478	return 0;
1479}
1480
1481static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1482{
 
 
 
 
 
1483	switch (xdp->command) {
1484	case XDP_SETUP_PROG:
1485		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1486	default:
1487		return -EINVAL;
1488	}
1489}
1490
1491static const struct net_device_ops xennet_netdev_ops = {
 
1492	.ndo_open            = xennet_open,
1493	.ndo_stop            = xennet_close,
1494	.ndo_start_xmit      = xennet_start_xmit,
1495	.ndo_change_mtu	     = xennet_change_mtu,
1496	.ndo_get_stats64     = xennet_get_stats64,
1497	.ndo_set_mac_address = eth_mac_addr,
1498	.ndo_validate_addr   = eth_validate_addr,
1499	.ndo_fix_features    = xennet_fix_features,
1500	.ndo_set_features    = xennet_set_features,
1501	.ndo_select_queue    = xennet_select_queue,
1502	.ndo_bpf            = xennet_xdp,
1503	.ndo_xdp_xmit	    = xennet_xdp_xmit,
1504#ifdef CONFIG_NET_POLL_CONTROLLER
1505	.ndo_poll_controller = xennet_poll_controller,
1506#endif
1507};
1508
1509static void xennet_free_netdev(struct net_device *netdev)
1510{
1511	struct netfront_info *np = netdev_priv(netdev);
1512
1513	free_percpu(np->rx_stats);
1514	free_percpu(np->tx_stats);
1515	free_netdev(netdev);
1516}
1517
1518static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1519{
1520	int err;
1521	struct net_device *netdev;
1522	struct netfront_info *np;
1523
1524	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1525	if (!netdev)
1526		return ERR_PTR(-ENOMEM);
1527
1528	np                   = netdev_priv(netdev);
1529	np->xbdev            = dev;
1530
1531	np->queues = NULL;
1532
1533	err = -ENOMEM;
1534	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1535	if (np->rx_stats == NULL)
1536		goto exit;
1537	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1538	if (np->tx_stats == NULL)
1539		goto exit;
1540
1541	netdev->netdev_ops	= &xennet_netdev_ops;
1542
1543	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1544				  NETIF_F_GSO_ROBUST;
1545	netdev->hw_features	= NETIF_F_SG |
1546				  NETIF_F_IPV6_CSUM |
1547				  NETIF_F_TSO | NETIF_F_TSO6;
1548
1549	/*
1550         * Assume that all hw features are available for now. This set
1551         * will be adjusted by the call to netdev_update_features() in
1552         * xennet_connect() which is the earliest point where we can
1553         * negotiate with the backend regarding supported features.
1554         */
1555	netdev->features |= netdev->hw_features;
 
 
1556
1557	netdev->ethtool_ops = &xennet_ethtool_ops;
1558	netdev->min_mtu = ETH_MIN_MTU;
1559	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1560	SET_NETDEV_DEV(netdev, &dev->dev);
1561
1562	np->netdev = netdev;
1563	np->netfront_xdp_enabled = false;
1564
1565	netif_carrier_off(netdev);
1566
1567	do {
1568		xenbus_switch_state(dev, XenbusStateInitialising);
1569		err = wait_event_timeout(module_wq,
1570				 xenbus_read_driver_state(dev->otherend) !=
1571				 XenbusStateClosed &&
1572				 xenbus_read_driver_state(dev->otherend) !=
1573				 XenbusStateUnknown, XENNET_TIMEOUT);
1574	} while (!err);
1575
1576	return netdev;
1577
1578 exit:
1579	xennet_free_netdev(netdev);
1580	return ERR_PTR(err);
1581}
1582
1583/*
1584 * Entry point to this code when a new device is created.  Allocate the basic
1585 * structures and the ring buffers for communication with the backend, and
1586 * inform the backend of the appropriate details for those.
1587 */
1588static int netfront_probe(struct xenbus_device *dev,
1589			  const struct xenbus_device_id *id)
1590{
1591	int err;
1592	struct net_device *netdev;
1593	struct netfront_info *info;
1594
1595	netdev = xennet_create_dev(dev);
1596	if (IS_ERR(netdev)) {
1597		err = PTR_ERR(netdev);
1598		xenbus_dev_fatal(dev, err, "creating netdev");
1599		return err;
1600	}
1601
1602	info = netdev_priv(netdev);
1603	dev_set_drvdata(&dev->dev, info);
1604#ifdef CONFIG_SYSFS
1605	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1606#endif
1607
1608	return 0;
1609}
1610
1611static void xennet_end_access(int ref, void *page)
1612{
1613	/* This frees the page as a side-effect */
1614	if (ref != GRANT_INVALID_REF)
1615		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1616}
1617
1618static void xennet_disconnect_backend(struct netfront_info *info)
1619{
1620	unsigned int i = 0;
1621	unsigned int num_queues = info->netdev->real_num_tx_queues;
1622
1623	netif_carrier_off(info->netdev);
1624
1625	for (i = 0; i < num_queues && info->queues; ++i) {
1626		struct netfront_queue *queue = &info->queues[i];
1627
1628		del_timer_sync(&queue->rx_refill_timer);
1629
1630		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1631			unbind_from_irqhandler(queue->tx_irq, queue);
1632		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1633			unbind_from_irqhandler(queue->tx_irq, queue);
1634			unbind_from_irqhandler(queue->rx_irq, queue);
1635		}
1636		queue->tx_evtchn = queue->rx_evtchn = 0;
1637		queue->tx_irq = queue->rx_irq = 0;
1638
1639		if (netif_running(info->netdev))
1640			napi_synchronize(&queue->napi);
1641
1642		xennet_release_tx_bufs(queue);
1643		xennet_release_rx_bufs(queue);
1644		gnttab_free_grant_references(queue->gref_tx_head);
1645		gnttab_free_grant_references(queue->gref_rx_head);
1646
1647		/* End access and free the pages */
1648		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1649		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1650
1651		queue->tx_ring_ref = GRANT_INVALID_REF;
1652		queue->rx_ring_ref = GRANT_INVALID_REF;
1653		queue->tx.sring = NULL;
1654		queue->rx.sring = NULL;
1655
1656		page_pool_destroy(queue->page_pool);
1657	}
1658}
1659
1660/*
1661 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1662 * driver restart.  We tear down our netif structure and recreate it, but
1663 * leave the device-layer structures intact so that this is transparent to the
1664 * rest of the kernel.
1665 */
1666static int netfront_resume(struct xenbus_device *dev)
1667{
1668	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1669
1670	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1671
 
 
 
 
1672	xennet_disconnect_backend(info);
 
 
 
 
 
 
1673	return 0;
1674}
1675
1676static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1677{
1678	char *s, *e, *macstr;
1679	int i;
1680
1681	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1682	if (IS_ERR(macstr))
1683		return PTR_ERR(macstr);
1684
1685	for (i = 0; i < ETH_ALEN; i++) {
1686		mac[i] = simple_strtoul(s, &e, 16);
1687		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1688			kfree(macstr);
1689			return -ENOENT;
1690		}
1691		s = e+1;
1692	}
1693
1694	kfree(macstr);
1695	return 0;
1696}
1697
1698static int setup_netfront_single(struct netfront_queue *queue)
1699{
1700	int err;
1701
1702	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1703	if (err < 0)
1704		goto fail;
1705
1706	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1707					xennet_interrupt,
1708					0, queue->info->netdev->name, queue);
 
1709	if (err < 0)
1710		goto bind_fail;
1711	queue->rx_evtchn = queue->tx_evtchn;
1712	queue->rx_irq = queue->tx_irq = err;
1713
1714	return 0;
1715
1716bind_fail:
1717	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1718	queue->tx_evtchn = 0;
1719fail:
1720	return err;
1721}
1722
1723static int setup_netfront_split(struct netfront_queue *queue)
1724{
1725	int err;
1726
1727	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1728	if (err < 0)
1729		goto fail;
1730	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1731	if (err < 0)
1732		goto alloc_rx_evtchn_fail;
1733
1734	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1735		 "%s-tx", queue->name);
1736	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1737					xennet_tx_interrupt,
1738					0, queue->tx_irq_name, queue);
1739	if (err < 0)
1740		goto bind_tx_fail;
1741	queue->tx_irq = err;
1742
1743	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1744		 "%s-rx", queue->name);
1745	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1746					xennet_rx_interrupt,
1747					0, queue->rx_irq_name, queue);
1748	if (err < 0)
1749		goto bind_rx_fail;
1750	queue->rx_irq = err;
1751
1752	return 0;
1753
1754bind_rx_fail:
1755	unbind_from_irqhandler(queue->tx_irq, queue);
1756	queue->tx_irq = 0;
1757bind_tx_fail:
1758	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1759	queue->rx_evtchn = 0;
1760alloc_rx_evtchn_fail:
1761	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1762	queue->tx_evtchn = 0;
1763fail:
1764	return err;
1765}
1766
1767static int setup_netfront(struct xenbus_device *dev,
1768			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1769{
1770	struct xen_netif_tx_sring *txs;
1771	struct xen_netif_rx_sring *rxs;
1772	grant_ref_t gref;
1773	int err;
1774
1775	queue->tx_ring_ref = GRANT_INVALID_REF;
1776	queue->rx_ring_ref = GRANT_INVALID_REF;
1777	queue->rx.sring = NULL;
1778	queue->tx.sring = NULL;
1779
1780	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1781	if (!txs) {
1782		err = -ENOMEM;
1783		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1784		goto fail;
1785	}
1786	SHARED_RING_INIT(txs);
1787	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1788
1789	err = xenbus_grant_ring(dev, txs, 1, &gref);
1790	if (err < 0)
1791		goto grant_tx_ring_fail;
1792	queue->tx_ring_ref = gref;
1793
1794	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1795	if (!rxs) {
1796		err = -ENOMEM;
1797		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1798		goto alloc_rx_ring_fail;
1799	}
1800	SHARED_RING_INIT(rxs);
1801	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1802
1803	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1804	if (err < 0)
1805		goto grant_rx_ring_fail;
1806	queue->rx_ring_ref = gref;
1807
1808	if (feature_split_evtchn)
1809		err = setup_netfront_split(queue);
1810	/* setup single event channel if
1811	 *  a) feature-split-event-channels == 0
1812	 *  b) feature-split-event-channels == 1 but failed to setup
1813	 */
1814	if (!feature_split_evtchn || err)
1815		err = setup_netfront_single(queue);
1816
1817	if (err)
1818		goto alloc_evtchn_fail;
1819
1820	return 0;
1821
1822	/* If we fail to setup netfront, it is safe to just revoke access to
1823	 * granted pages because backend is not accessing it at this point.
1824	 */
1825alloc_evtchn_fail:
1826	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1827grant_rx_ring_fail:
1828	free_page((unsigned long)rxs);
1829alloc_rx_ring_fail:
1830	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1831grant_tx_ring_fail:
1832	free_page((unsigned long)txs);
1833fail:
1834	return err;
1835}
1836
1837/* Queue-specific initialisation
1838 * This used to be done in xennet_create_dev() but must now
1839 * be run per-queue.
1840 */
1841static int xennet_init_queue(struct netfront_queue *queue)
1842{
1843	unsigned short i;
1844	int err = 0;
1845	char *devid;
1846
1847	spin_lock_init(&queue->tx_lock);
1848	spin_lock_init(&queue->rx_lock);
 
1849
1850	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1851
1852	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1853	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1854		 devid, queue->id);
1855
1856	/* Initialise tx_skbs as a free chain containing every entry. */
1857	queue->tx_skb_freelist = 0;
 
1858	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1859		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1860		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1861		queue->grant_tx_page[i] = NULL;
1862	}
 
1863
1864	/* Clear out rx_skbs */
1865	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1866		queue->rx_skbs[i] = NULL;
1867		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1868	}
1869
1870	/* A grant for every tx ring slot */
1871	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1872					  &queue->gref_tx_head) < 0) {
1873		pr_alert("can't alloc tx grant refs\n");
1874		err = -ENOMEM;
1875		goto exit;
1876	}
1877
1878	/* A grant for every rx ring slot */
1879	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1880					  &queue->gref_rx_head) < 0) {
1881		pr_alert("can't alloc rx grant refs\n");
1882		err = -ENOMEM;
1883		goto exit_free_tx;
1884	}
1885
1886	return 0;
1887
1888 exit_free_tx:
1889	gnttab_free_grant_references(queue->gref_tx_head);
1890 exit:
1891	return err;
1892}
1893
1894static int write_queue_xenstore_keys(struct netfront_queue *queue,
1895			   struct xenbus_transaction *xbt, int write_hierarchical)
1896{
1897	/* Write the queue-specific keys into XenStore in the traditional
1898	 * way for a single queue, or in a queue subkeys for multiple
1899	 * queues.
1900	 */
1901	struct xenbus_device *dev = queue->info->xbdev;
1902	int err;
1903	const char *message;
1904	char *path;
1905	size_t pathsize;
1906
1907	/* Choose the correct place to write the keys */
1908	if (write_hierarchical) {
1909		pathsize = strlen(dev->nodename) + 10;
1910		path = kzalloc(pathsize, GFP_KERNEL);
1911		if (!path) {
1912			err = -ENOMEM;
1913			message = "out of memory while writing ring references";
1914			goto error;
1915		}
1916		snprintf(path, pathsize, "%s/queue-%u",
1917				dev->nodename, queue->id);
1918	} else {
1919		path = (char *)dev->nodename;
1920	}
1921
1922	/* Write ring references */
1923	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1924			queue->tx_ring_ref);
1925	if (err) {
1926		message = "writing tx-ring-ref";
1927		goto error;
1928	}
1929
1930	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1931			queue->rx_ring_ref);
1932	if (err) {
1933		message = "writing rx-ring-ref";
1934		goto error;
1935	}
1936
1937	/* Write event channels; taking into account both shared
1938	 * and split event channel scenarios.
1939	 */
1940	if (queue->tx_evtchn == queue->rx_evtchn) {
1941		/* Shared event channel */
1942		err = xenbus_printf(*xbt, path,
1943				"event-channel", "%u", queue->tx_evtchn);
1944		if (err) {
1945			message = "writing event-channel";
1946			goto error;
1947		}
1948	} else {
1949		/* Split event channels */
1950		err = xenbus_printf(*xbt, path,
1951				"event-channel-tx", "%u", queue->tx_evtchn);
1952		if (err) {
1953			message = "writing event-channel-tx";
1954			goto error;
1955		}
1956
1957		err = xenbus_printf(*xbt, path,
1958				"event-channel-rx", "%u", queue->rx_evtchn);
1959		if (err) {
1960			message = "writing event-channel-rx";
1961			goto error;
1962		}
1963	}
1964
1965	if (write_hierarchical)
1966		kfree(path);
1967	return 0;
1968
1969error:
1970	if (write_hierarchical)
1971		kfree(path);
1972	xenbus_dev_fatal(dev, err, "%s", message);
1973	return err;
1974}
1975
1976static void xennet_destroy_queues(struct netfront_info *info)
1977{
1978	unsigned int i;
1979
1980	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1981		struct netfront_queue *queue = &info->queues[i];
1982
1983		if (netif_running(info->netdev))
1984			napi_disable(&queue->napi);
1985		netif_napi_del(&queue->napi);
1986	}
1987
1988	kfree(info->queues);
1989	info->queues = NULL;
1990}
1991
1992
1993
1994static int xennet_create_page_pool(struct netfront_queue *queue)
1995{
1996	int err;
1997	struct page_pool_params pp_params = {
1998		.order = 0,
1999		.flags = 0,
2000		.pool_size = NET_RX_RING_SIZE,
2001		.nid = NUMA_NO_NODE,
2002		.dev = &queue->info->netdev->dev,
2003		.offset = XDP_PACKET_HEADROOM,
2004		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2005	};
2006
2007	queue->page_pool = page_pool_create(&pp_params);
2008	if (IS_ERR(queue->page_pool)) {
2009		err = PTR_ERR(queue->page_pool);
2010		queue->page_pool = NULL;
2011		return err;
2012	}
2013
2014	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2015			       queue->id, 0);
2016	if (err) {
2017		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2018		goto err_free_pp;
2019	}
2020
2021	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2022					 MEM_TYPE_PAGE_POOL, queue->page_pool);
2023	if (err) {
2024		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2025		goto err_unregister_rxq;
2026	}
2027	return 0;
2028
2029err_unregister_rxq:
2030	xdp_rxq_info_unreg(&queue->xdp_rxq);
2031err_free_pp:
2032	page_pool_destroy(queue->page_pool);
2033	queue->page_pool = NULL;
2034	return err;
2035}
2036
2037static int xennet_create_queues(struct netfront_info *info,
2038				unsigned int *num_queues)
2039{
2040	unsigned int i;
2041	int ret;
2042
2043	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2044			       GFP_KERNEL);
2045	if (!info->queues)
2046		return -ENOMEM;
2047
2048	for (i = 0; i < *num_queues; i++) {
2049		struct netfront_queue *queue = &info->queues[i];
2050
2051		queue->id = i;
2052		queue->info = info;
2053
2054		ret = xennet_init_queue(queue);
2055		if (ret < 0) {
2056			dev_warn(&info->xbdev->dev,
2057				 "only created %d queues\n", i);
2058			*num_queues = i;
2059			break;
2060		}
2061
2062		/* use page pool recycling instead of buddy allocator */
2063		ret = xennet_create_page_pool(queue);
2064		if (ret < 0) {
2065			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2066			*num_queues = i;
2067			return ret;
2068		}
2069
2070		netif_napi_add(queue->info->netdev, &queue->napi,
2071			       xennet_poll, 64);
2072		if (netif_running(info->netdev))
2073			napi_enable(&queue->napi);
2074	}
2075
2076	netif_set_real_num_tx_queues(info->netdev, *num_queues);
2077
2078	if (*num_queues == 0) {
2079		dev_err(&info->xbdev->dev, "no queues\n");
2080		return -EINVAL;
2081	}
2082	return 0;
2083}
2084
2085/* Common code used when first setting up, and when resuming. */
2086static int talk_to_netback(struct xenbus_device *dev,
2087			   struct netfront_info *info)
2088{
2089	const char *message;
2090	struct xenbus_transaction xbt;
2091	int err;
2092	unsigned int feature_split_evtchn;
2093	unsigned int i = 0;
2094	unsigned int max_queues = 0;
2095	struct netfront_queue *queue = NULL;
2096	unsigned int num_queues = 1;
 
2097
2098	info->netdev->irq = 0;
2099
 
 
 
 
2100	/* Check if backend supports multiple queues */
2101	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2102					  "multi-queue-max-queues", 1);
2103	num_queues = min(max_queues, xennet_max_queues);
2104
2105	/* Check feature-split-event-channels */
2106	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2107					"feature-split-event-channels", 0);
2108
2109	/* Read mac addr. */
2110	err = xen_net_read_mac(dev, info->netdev->dev_addr);
2111	if (err) {
2112		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2113		goto out_unlocked;
2114	}
 
2115
2116	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2117							      "feature-xdp-headroom", 0);
2118	if (info->netback_has_xdp_headroom) {
2119		/* set the current xen-netfront xdp state */
2120		err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2121					  NETBACK_XDP_HEADROOM_ENABLE :
2122					  NETBACK_XDP_HEADROOM_DISABLE);
2123		if (err)
2124			goto out_unlocked;
2125	}
2126
2127	rtnl_lock();
2128	if (info->queues)
2129		xennet_destroy_queues(info);
2130
 
 
 
2131	err = xennet_create_queues(info, &num_queues);
2132	if (err < 0) {
2133		xenbus_dev_fatal(dev, err, "creating queues");
2134		kfree(info->queues);
2135		info->queues = NULL;
2136		goto out;
2137	}
2138	rtnl_unlock();
2139
2140	/* Create shared ring, alloc event channel -- for each queue */
2141	for (i = 0; i < num_queues; ++i) {
2142		queue = &info->queues[i];
2143		err = setup_netfront(dev, queue, feature_split_evtchn);
2144		if (err)
2145			goto destroy_ring;
2146	}
2147
2148again:
2149	err = xenbus_transaction_start(&xbt);
2150	if (err) {
2151		xenbus_dev_fatal(dev, err, "starting transaction");
2152		goto destroy_ring;
2153	}
2154
2155	if (xenbus_exists(XBT_NIL,
2156			  info->xbdev->otherend, "multi-queue-max-queues")) {
2157		/* Write the number of queues */
2158		err = xenbus_printf(xbt, dev->nodename,
2159				    "multi-queue-num-queues", "%u", num_queues);
2160		if (err) {
2161			message = "writing multi-queue-num-queues";
2162			goto abort_transaction_no_dev_fatal;
2163		}
2164	}
2165
2166	if (num_queues == 1) {
2167		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2168		if (err)
2169			goto abort_transaction_no_dev_fatal;
2170	} else {
2171		/* Write the keys for each queue */
2172		for (i = 0; i < num_queues; ++i) {
2173			queue = &info->queues[i];
2174			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2175			if (err)
2176				goto abort_transaction_no_dev_fatal;
2177		}
2178	}
2179
2180	/* The remaining keys are not queue-specific */
2181	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2182			    1);
2183	if (err) {
2184		message = "writing request-rx-copy";
2185		goto abort_transaction;
2186	}
2187
2188	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2189	if (err) {
2190		message = "writing feature-rx-notify";
2191		goto abort_transaction;
2192	}
2193
2194	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2195	if (err) {
2196		message = "writing feature-sg";
2197		goto abort_transaction;
2198	}
2199
2200	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2201	if (err) {
2202		message = "writing feature-gso-tcpv4";
2203		goto abort_transaction;
2204	}
2205
2206	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2207	if (err) {
2208		message = "writing feature-gso-tcpv6";
2209		goto abort_transaction;
2210	}
2211
2212	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2213			   "1");
2214	if (err) {
2215		message = "writing feature-ipv6-csum-offload";
2216		goto abort_transaction;
2217	}
2218
2219	err = xenbus_transaction_end(xbt, 0);
2220	if (err) {
2221		if (err == -EAGAIN)
2222			goto again;
2223		xenbus_dev_fatal(dev, err, "completing transaction");
2224		goto destroy_ring;
2225	}
2226
2227	return 0;
2228
2229 abort_transaction:
2230	xenbus_dev_fatal(dev, err, "%s", message);
2231abort_transaction_no_dev_fatal:
2232	xenbus_transaction_end(xbt, 1);
2233 destroy_ring:
2234	xennet_disconnect_backend(info);
2235	rtnl_lock();
2236	xennet_destroy_queues(info);
2237 out:
2238	rtnl_unlock();
2239out_unlocked:
2240	device_unregister(&dev->dev);
2241	return err;
2242}
2243
2244static int xennet_connect(struct net_device *dev)
2245{
2246	struct netfront_info *np = netdev_priv(dev);
2247	unsigned int num_queues = 0;
2248	int err;
2249	unsigned int j = 0;
2250	struct netfront_queue *queue = NULL;
2251
2252	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2253		dev_info(&dev->dev,
2254			 "backend does not support copying receive path\n");
2255		return -ENODEV;
2256	}
2257
2258	err = talk_to_netback(np->xbdev, np);
2259	if (err)
2260		return err;
2261	if (np->netback_has_xdp_headroom)
2262		pr_info("backend supports XDP headroom\n");
 
 
 
2263
2264	/* talk_to_netback() sets the correct number of queues */
2265	num_queues = dev->real_num_tx_queues;
2266
2267	if (dev->reg_state == NETREG_UNINITIALIZED) {
2268		err = register_netdev(dev);
2269		if (err) {
2270			pr_warn("%s: register_netdev err=%d\n", __func__, err);
2271			device_unregister(&np->xbdev->dev);
2272			return err;
2273		}
2274	}
2275
2276	rtnl_lock();
2277	netdev_update_features(dev);
2278	rtnl_unlock();
2279
2280	/*
2281	 * All public and private state should now be sane.  Get
2282	 * ready to start sending and receiving packets and give the driver
2283	 * domain a kick because we've probably just requeued some
2284	 * packets.
2285	 */
 
 
 
 
2286	netif_carrier_on(np->netdev);
2287	for (j = 0; j < num_queues; ++j) {
2288		queue = &np->queues[j];
2289
2290		notify_remote_via_irq(queue->tx_irq);
2291		if (queue->tx_irq != queue->rx_irq)
2292			notify_remote_via_irq(queue->rx_irq);
2293
2294		spin_lock_irq(&queue->tx_lock);
2295		xennet_tx_buf_gc(queue);
2296		spin_unlock_irq(&queue->tx_lock);
2297
2298		spin_lock_bh(&queue->rx_lock);
2299		xennet_alloc_rx_buffers(queue);
2300		spin_unlock_bh(&queue->rx_lock);
2301	}
2302
2303	return 0;
2304}
2305
2306/*
2307 * Callback received when the backend's state changes.
2308 */
2309static void netback_changed(struct xenbus_device *dev,
2310			    enum xenbus_state backend_state)
2311{
2312	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2313	struct net_device *netdev = np->netdev;
2314
2315	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2316
2317	wake_up_all(&module_wq);
2318
2319	switch (backend_state) {
2320	case XenbusStateInitialising:
2321	case XenbusStateInitialised:
2322	case XenbusStateReconfiguring:
2323	case XenbusStateReconfigured:
2324	case XenbusStateUnknown:
2325		break;
2326
2327	case XenbusStateInitWait:
2328		if (dev->state != XenbusStateInitialising)
2329			break;
2330		if (xennet_connect(netdev) != 0)
2331			break;
2332		xenbus_switch_state(dev, XenbusStateConnected);
2333		break;
2334
2335	case XenbusStateConnected:
2336		netdev_notify_peers(netdev);
2337		break;
2338
2339	case XenbusStateClosed:
2340		if (dev->state == XenbusStateClosed)
2341			break;
2342		fallthrough;	/* Missed the backend's CLOSING state */
2343	case XenbusStateClosing:
2344		xenbus_frontend_closed(dev);
2345		break;
2346	}
2347}
2348
2349static const struct xennet_stat {
2350	char name[ETH_GSTRING_LEN];
2351	u16 offset;
2352} xennet_stats[] = {
2353	{
2354		"rx_gso_checksum_fixup",
2355		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2356	},
2357};
2358
2359static int xennet_get_sset_count(struct net_device *dev, int string_set)
2360{
2361	switch (string_set) {
2362	case ETH_SS_STATS:
2363		return ARRAY_SIZE(xennet_stats);
2364	default:
2365		return -EINVAL;
2366	}
2367}
2368
2369static void xennet_get_ethtool_stats(struct net_device *dev,
2370				     struct ethtool_stats *stats, u64 * data)
2371{
2372	void *np = netdev_priv(dev);
2373	int i;
2374
2375	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2376		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2377}
2378
2379static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2380{
2381	int i;
2382
2383	switch (stringset) {
2384	case ETH_SS_STATS:
2385		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2386			memcpy(data + i * ETH_GSTRING_LEN,
2387			       xennet_stats[i].name, ETH_GSTRING_LEN);
2388		break;
2389	}
2390}
2391
2392static const struct ethtool_ops xennet_ethtool_ops =
2393{
2394	.get_link = ethtool_op_get_link,
2395
2396	.get_sset_count = xennet_get_sset_count,
2397	.get_ethtool_stats = xennet_get_ethtool_stats,
2398	.get_strings = xennet_get_strings,
2399	.get_ts_info = ethtool_op_get_ts_info,
2400};
2401
2402#ifdef CONFIG_SYSFS
2403static ssize_t show_rxbuf(struct device *dev,
2404			  struct device_attribute *attr, char *buf)
2405{
2406	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2407}
2408
2409static ssize_t store_rxbuf(struct device *dev,
2410			   struct device_attribute *attr,
2411			   const char *buf, size_t len)
2412{
2413	char *endp;
2414
2415	if (!capable(CAP_NET_ADMIN))
2416		return -EPERM;
2417
2418	simple_strtoul(buf, &endp, 0);
2419	if (endp == buf)
2420		return -EBADMSG;
2421
2422	/* rxbuf_min and rxbuf_max are no longer configurable. */
2423
2424	return len;
2425}
2426
2427static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2428static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2429static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2430
2431static struct attribute *xennet_dev_attrs[] = {
2432	&dev_attr_rxbuf_min.attr,
2433	&dev_attr_rxbuf_max.attr,
2434	&dev_attr_rxbuf_cur.attr,
2435	NULL
2436};
2437
2438static const struct attribute_group xennet_dev_group = {
2439	.attrs = xennet_dev_attrs
2440};
2441#endif /* CONFIG_SYSFS */
2442
2443static void xennet_bus_close(struct xenbus_device *dev)
2444{
2445	int ret;
2446
2447	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2448		return;
2449	do {
2450		xenbus_switch_state(dev, XenbusStateClosing);
2451		ret = wait_event_timeout(module_wq,
2452				   xenbus_read_driver_state(dev->otherend) ==
2453				   XenbusStateClosing ||
2454				   xenbus_read_driver_state(dev->otherend) ==
2455				   XenbusStateClosed ||
2456				   xenbus_read_driver_state(dev->otherend) ==
2457				   XenbusStateUnknown,
2458				   XENNET_TIMEOUT);
2459	} while (!ret);
2460
2461	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2462		return;
2463
2464	do {
2465		xenbus_switch_state(dev, XenbusStateClosed);
2466		ret = wait_event_timeout(module_wq,
2467				   xenbus_read_driver_state(dev->otherend) ==
2468				   XenbusStateClosed ||
2469				   xenbus_read_driver_state(dev->otherend) ==
2470				   XenbusStateUnknown,
2471				   XENNET_TIMEOUT);
2472	} while (!ret);
2473}
2474
2475static int xennet_remove(struct xenbus_device *dev)
2476{
2477	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2478
2479	xennet_bus_close(dev);
2480	xennet_disconnect_backend(info);
2481
2482	if (info->netdev->reg_state == NETREG_REGISTERED)
2483		unregister_netdev(info->netdev);
2484
2485	if (info->queues) {
2486		rtnl_lock();
2487		xennet_destroy_queues(info);
2488		rtnl_unlock();
2489	}
2490	xennet_free_netdev(info->netdev);
2491
2492	return 0;
2493}
2494
2495static const struct xenbus_device_id netfront_ids[] = {
2496	{ "vif" },
2497	{ "" }
2498};
2499
2500static struct xenbus_driver netfront_driver = {
2501	.ids = netfront_ids,
2502	.probe = netfront_probe,
2503	.remove = xennet_remove,
2504	.resume = netfront_resume,
2505	.otherend_changed = netback_changed,
2506};
2507
2508static int __init netif_init(void)
2509{
2510	if (!xen_domain())
2511		return -ENODEV;
2512
2513	if (!xen_has_pv_nic_devices())
2514		return -ENODEV;
2515
2516	pr_info("Initialising Xen virtual ethernet driver\n");
2517
2518	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2519	 * specified a value.
2520	 */
2521	if (xennet_max_queues == 0)
2522		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2523					  num_online_cpus());
2524
2525	return xenbus_register_frontend(&netfront_driver);
2526}
2527module_init(netif_init);
2528
2529
2530static void __exit netif_exit(void)
2531{
2532	xenbus_unregister_driver(&netfront_driver);
2533}
2534module_exit(netif_exit);
2535
2536MODULE_DESCRIPTION("Xen virtual network device frontend");
2537MODULE_LICENSE("GPL");
2538MODULE_ALIAS("xen:vif");
2539MODULE_ALIAS("xennet");
v6.9.4
   1/*
   2 * Virtual network driver for conversing with remote driver backends.
   3 *
   4 * Copyright (c) 2002-2005, K A Fraser
   5 * Copyright (c) 2005, XenSource Ltd
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version 2
   9 * as published by the Free Software Foundation; or, when distributed
  10 * separately from the Linux kernel or incorporated into other
  11 * software packages, subject to the following license:
  12 *
  13 * Permission is hereby granted, free of charge, to any person obtaining a copy
  14 * of this source file (the "Software"), to deal in the Software without
  15 * restriction, including without limitation the rights to use, copy, modify,
  16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17 * and to permit persons to whom the Software is furnished to do so, subject to
  18 * the following conditions:
  19 *
  20 * The above copyright notice and this permission notice shall be included in
  21 * all copies or substantial portions of the Software.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29 * IN THE SOFTWARE.
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/module.h>
  35#include <linux/kernel.h>
  36#include <linux/netdevice.h>
  37#include <linux/etherdevice.h>
  38#include <linux/skbuff.h>
  39#include <linux/ethtool.h>
  40#include <linux/if_ether.h>
  41#include <net/tcp.h>
  42#include <linux/udp.h>
  43#include <linux/moduleparam.h>
  44#include <linux/mm.h>
  45#include <linux/slab.h>
  46#include <net/ip.h>
  47#include <linux/bpf.h>
  48#include <net/page_pool/types.h>
  49#include <linux/bpf_trace.h>
  50
  51#include <xen/xen.h>
  52#include <xen/xenbus.h>
  53#include <xen/events.h>
  54#include <xen/page.h>
  55#include <xen/platform_pci.h>
  56#include <xen/grant_table.h>
  57
  58#include <xen/interface/io/netif.h>
  59#include <xen/interface/memory.h>
  60#include <xen/interface/grant_table.h>
  61
  62/* Module parameters */
  63#define MAX_QUEUES_DEFAULT 8
  64static unsigned int xennet_max_queues;
  65module_param_named(max_queues, xennet_max_queues, uint, 0644);
  66MODULE_PARM_DESC(max_queues,
  67		 "Maximum number of queues per virtual interface");
  68
  69static bool __read_mostly xennet_trusted = true;
  70module_param_named(trusted, xennet_trusted, bool, 0644);
  71MODULE_PARM_DESC(trusted, "Is the backend trusted");
  72
  73#define XENNET_TIMEOUT  (5 * HZ)
  74
  75static const struct ethtool_ops xennet_ethtool_ops;
  76
  77struct netfront_cb {
  78	int pull_to;
  79};
  80
  81#define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
  82
  83#define RX_COPY_THRESHOLD 256
  84
 
 
  85#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  86#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  87
  88/* Minimum number of Rx slots (includes slot for GSO metadata). */
  89#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  90
  91/* Queue name is interface name with "-qNNN" appended */
  92#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  93
  94/* IRQ name is queue name with "-tx" or "-rx" appended */
  95#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  96
  97static DECLARE_WAIT_QUEUE_HEAD(module_wq);
  98
  99struct netfront_stats {
 100	u64			packets;
 101	u64			bytes;
 102	struct u64_stats_sync	syncp;
 103};
 104
 105struct netfront_info;
 106
 107struct netfront_queue {
 108	unsigned int id; /* Queue ID, 0-based */
 109	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
 110	struct netfront_info *info;
 111
 112	struct bpf_prog __rcu *xdp_prog;
 113
 114	struct napi_struct napi;
 115
 116	/* Split event channels support, tx_* == rx_* when using
 117	 * single event channel.
 118	 */
 119	unsigned int tx_evtchn, rx_evtchn;
 120	unsigned int tx_irq, rx_irq;
 121	/* Only used when split event channels support is enabled */
 122	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
 123	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
 124
 125	spinlock_t   tx_lock;
 126	struct xen_netif_tx_front_ring tx;
 127	int tx_ring_ref;
 128
 129	/*
 130	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
 131	 * are linked from tx_skb_freelist through tx_link.
 
 
 
 
 
 132	 */
 133	struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
 134	unsigned short tx_link[NET_TX_RING_SIZE];
 135#define TX_LINK_NONE 0xffff
 136#define TX_PENDING   0xfffe
 137	grant_ref_t gref_tx_head;
 138	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
 139	struct page *grant_tx_page[NET_TX_RING_SIZE];
 140	unsigned tx_skb_freelist;
 141	unsigned int tx_pend_queue;
 142
 143	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
 144	struct xen_netif_rx_front_ring rx;
 145	int rx_ring_ref;
 146
 147	struct timer_list rx_refill_timer;
 148
 149	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
 150	grant_ref_t gref_rx_head;
 151	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 152
 153	unsigned int rx_rsp_unconsumed;
 154	spinlock_t rx_cons_lock;
 155
 156	struct page_pool *page_pool;
 157	struct xdp_rxq_info xdp_rxq;
 158};
 159
 160struct netfront_info {
 161	struct list_head list;
 162	struct net_device *netdev;
 163
 164	struct xenbus_device *xbdev;
 165
 166	/* Multi-queue support */
 167	struct netfront_queue *queues;
 168
 169	/* Statistics */
 170	struct netfront_stats __percpu *rx_stats;
 171	struct netfront_stats __percpu *tx_stats;
 172
 173	/* XDP state */
 174	bool netback_has_xdp_headroom;
 175	bool netfront_xdp_enabled;
 176
 177	/* Is device behaving sane? */
 178	bool broken;
 179
 180	/* Should skbs be bounced into a zeroed buffer? */
 181	bool bounce;
 182
 183	atomic_t rx_gso_checksum_fixup;
 184};
 185
 186struct netfront_rx_info {
 187	struct xen_netif_rx_response rx;
 188	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
 189};
 190
 
 
 
 
 
 
 
 
 
 
 
 191/*
 192 * Access macros for acquiring freeing slots in tx_skbs[].
 193 */
 194
 195static void add_id_to_list(unsigned *head, unsigned short *list,
 196			   unsigned short id)
 197{
 198	list[id] = *head;
 199	*head = id;
 200}
 201
 202static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
 
 203{
 204	unsigned int id = *head;
 205
 206	if (id != TX_LINK_NONE) {
 207		*head = list[id];
 208		list[id] = TX_LINK_NONE;
 209	}
 210	return id;
 211}
 212
 213static int xennet_rxidx(RING_IDX idx)
 214{
 215	return idx & (NET_RX_RING_SIZE - 1);
 216}
 217
 218static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
 219					 RING_IDX ri)
 220{
 221	int i = xennet_rxidx(ri);
 222	struct sk_buff *skb = queue->rx_skbs[i];
 223	queue->rx_skbs[i] = NULL;
 224	return skb;
 225}
 226
 227static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
 228					    RING_IDX ri)
 229{
 230	int i = xennet_rxidx(ri);
 231	grant_ref_t ref = queue->grant_rx_ref[i];
 232	queue->grant_rx_ref[i] = INVALID_GRANT_REF;
 233	return ref;
 234}
 235
 236#ifdef CONFIG_SYSFS
 237static const struct attribute_group xennet_dev_group;
 238#endif
 239
 240static bool xennet_can_sg(struct net_device *dev)
 241{
 242	return dev->features & NETIF_F_SG;
 243}
 244
 245
 246static void rx_refill_timeout(struct timer_list *t)
 247{
 248	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
 249	napi_schedule(&queue->napi);
 250}
 251
 252static int netfront_tx_slot_available(struct netfront_queue *queue)
 253{
 254	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
 255		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
 256}
 257
 258static void xennet_maybe_wake_tx(struct netfront_queue *queue)
 259{
 260	struct net_device *dev = queue->info->netdev;
 261	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
 262
 263	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
 264	    netfront_tx_slot_available(queue) &&
 265	    likely(netif_running(dev)))
 266		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
 267}
 268
 269
 270static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
 271{
 272	struct sk_buff *skb;
 273	struct page *page;
 274
 275	skb = __netdev_alloc_skb(queue->info->netdev,
 276				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
 277				 GFP_ATOMIC | __GFP_NOWARN);
 278	if (unlikely(!skb))
 279		return NULL;
 280
 281	page = page_pool_alloc_pages(queue->page_pool,
 282				     GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
 283	if (unlikely(!page)) {
 284		kfree_skb(skb);
 285		return NULL;
 286	}
 287	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
 288	skb_mark_for_recycle(skb);
 289
 290	/* Align ip header to a 16 bytes boundary */
 291	skb_reserve(skb, NET_IP_ALIGN);
 292	skb->dev = queue->info->netdev;
 293
 294	return skb;
 295}
 296
 297
 298static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 299{
 300	RING_IDX req_prod = queue->rx.req_prod_pvt;
 301	int notify;
 302	int err = 0;
 303
 304	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
 305		return;
 306
 307	for (req_prod = queue->rx.req_prod_pvt;
 308	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
 309	     req_prod++) {
 310		struct sk_buff *skb;
 311		unsigned short id;
 312		grant_ref_t ref;
 313		struct page *page;
 314		struct xen_netif_rx_request *req;
 315
 316		skb = xennet_alloc_one_rx_buffer(queue);
 317		if (!skb) {
 318			err = -ENOMEM;
 319			break;
 320		}
 321
 322		id = xennet_rxidx(req_prod);
 323
 324		BUG_ON(queue->rx_skbs[id]);
 325		queue->rx_skbs[id] = skb;
 326
 327		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
 328		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 329		queue->grant_rx_ref[id] = ref;
 330
 331		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 332
 333		req = RING_GET_REQUEST(&queue->rx, req_prod);
 334		gnttab_page_grant_foreign_access_ref_one(ref,
 335							 queue->info->xbdev->otherend_id,
 336							 page,
 337							 0);
 338		req->id = id;
 339		req->gref = ref;
 340	}
 341
 342	queue->rx.req_prod_pvt = req_prod;
 343
 344	/* Try again later if there are not enough requests or skb allocation
 345	 * failed.
 346	 * Enough requests is quantified as the sum of newly created slots and
 347	 * the unconsumed slots at the backend.
 348	 */
 349	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
 350	    unlikely(err)) {
 351		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
 352		return;
 353	}
 354
 355	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
 356	if (notify)
 357		notify_remote_via_irq(queue->rx_irq);
 358}
 359
 360static int xennet_open(struct net_device *dev)
 361{
 362	struct netfront_info *np = netdev_priv(dev);
 363	unsigned int num_queues = dev->real_num_tx_queues;
 364	unsigned int i = 0;
 365	struct netfront_queue *queue = NULL;
 366
 367	if (!np->queues || np->broken)
 368		return -ENODEV;
 369
 370	for (i = 0; i < num_queues; ++i) {
 371		queue = &np->queues[i];
 372		napi_enable(&queue->napi);
 373
 374		spin_lock_bh(&queue->rx_lock);
 375		if (netif_carrier_ok(dev)) {
 376			xennet_alloc_rx_buffers(queue);
 377			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
 378			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
 379				napi_schedule(&queue->napi);
 380		}
 381		spin_unlock_bh(&queue->rx_lock);
 382	}
 383
 384	netif_tx_start_all_queues(dev);
 385
 386	return 0;
 387}
 388
 389static bool xennet_tx_buf_gc(struct netfront_queue *queue)
 390{
 391	RING_IDX cons, prod;
 392	unsigned short id;
 393	struct sk_buff *skb;
 394	bool more_to_do;
 395	bool work_done = false;
 396	const struct device *dev = &queue->info->netdev->dev;
 397
 398	BUG_ON(!netif_carrier_ok(queue->info->netdev));
 399
 400	do {
 401		prod = queue->tx.sring->rsp_prod;
 402		if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
 403			dev_alert(dev, "Illegal number of responses %u\n",
 404				  prod - queue->tx.rsp_cons);
 405			goto err;
 406		}
 407		rmb(); /* Ensure we see responses up to 'rp'. */
 408
 409		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
 410			struct xen_netif_tx_response txrsp;
 411
 412			work_done = true;
 413
 414			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
 415			if (txrsp.status == XEN_NETIF_RSP_NULL)
 416				continue;
 417
 418			id = txrsp.id;
 419			if (id >= RING_SIZE(&queue->tx)) {
 420				dev_alert(dev,
 421					  "Response has incorrect id (%u)\n",
 422					  id);
 423				goto err;
 424			}
 425			if (queue->tx_link[id] != TX_PENDING) {
 426				dev_alert(dev,
 427					  "Response for inactive request\n");
 428				goto err;
 429			}
 430
 431			queue->tx_link[id] = TX_LINK_NONE;
 432			skb = queue->tx_skbs[id];
 433			queue->tx_skbs[id] = NULL;
 434			if (unlikely(!gnttab_end_foreign_access_ref(
 435				queue->grant_tx_ref[id]))) {
 436				dev_alert(dev,
 437					  "Grant still in use by backend domain\n");
 438				goto err;
 439			}
 
 
 440			gnttab_release_grant_reference(
 441				&queue->gref_tx_head, queue->grant_tx_ref[id]);
 442			queue->grant_tx_ref[id] = INVALID_GRANT_REF;
 443			queue->grant_tx_page[id] = NULL;
 444			add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
 445			dev_kfree_skb_irq(skb);
 446		}
 447
 448		queue->tx.rsp_cons = prod;
 449
 450		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
 451	} while (more_to_do);
 452
 453	xennet_maybe_wake_tx(queue);
 454
 455	return work_done;
 456
 457 err:
 458	queue->info->broken = true;
 459	dev_alert(dev, "Disabled for further use\n");
 460
 461	return work_done;
 462}
 463
 464struct xennet_gnttab_make_txreq {
 465	struct netfront_queue *queue;
 466	struct sk_buff *skb;
 467	struct page *page;
 468	struct xen_netif_tx_request *tx;      /* Last request on ring page */
 469	struct xen_netif_tx_request tx_local; /* Last request local copy*/
 470	unsigned int size;
 471};
 472
 473static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
 474				  unsigned int len, void *data)
 475{
 476	struct xennet_gnttab_make_txreq *info = data;
 477	unsigned int id;
 478	struct xen_netif_tx_request *tx;
 479	grant_ref_t ref;
 480	/* convenient aliases */
 481	struct page *page = info->page;
 482	struct netfront_queue *queue = info->queue;
 483	struct sk_buff *skb = info->skb;
 484
 485	id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
 486	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 487	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
 488	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
 489
 490	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
 491					gfn, GNTMAP_readonly);
 492
 493	queue->tx_skbs[id] = skb;
 494	queue->grant_tx_page[id] = page;
 495	queue->grant_tx_ref[id] = ref;
 496
 497	info->tx_local.id = id;
 498	info->tx_local.gref = ref;
 499	info->tx_local.offset = offset;
 500	info->tx_local.size = len;
 501	info->tx_local.flags = 0;
 502
 503	*tx = info->tx_local;
 504
 505	/*
 506	 * Put the request in the pending queue, it will be set to be pending
 507	 * when the producer index is about to be raised.
 508	 */
 509	add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
 510
 511	info->tx = tx;
 512	info->size += info->tx_local.size;
 513}
 514
 515static struct xen_netif_tx_request *xennet_make_first_txreq(
 516	struct xennet_gnttab_make_txreq *info,
 517	unsigned int offset, unsigned int len)
 518{
 519	info->size = 0;
 
 
 
 
 
 520
 521	gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
 522
 523	return info->tx;
 524}
 525
 526static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
 527				  unsigned int len, void *data)
 528{
 529	struct xennet_gnttab_make_txreq *info = data;
 530
 531	info->tx->flags |= XEN_NETTXF_more_data;
 532	skb_get(info->skb);
 533	xennet_tx_setup_grant(gfn, offset, len, data);
 534}
 535
 536static void xennet_make_txreqs(
 537	struct xennet_gnttab_make_txreq *info,
 538	struct page *page,
 539	unsigned int offset, unsigned int len)
 540{
 
 
 
 
 
 
 541	/* Skip unused frames from start of page */
 542	page += offset >> PAGE_SHIFT;
 543	offset &= ~PAGE_MASK;
 544
 545	while (len) {
 546		info->page = page;
 547		info->size = 0;
 548
 549		gnttab_foreach_grant_in_range(page, offset, len,
 550					      xennet_make_one_txreq,
 551					      info);
 552
 553		page++;
 554		offset = 0;
 555		len -= info->size;
 556	}
 
 
 557}
 558
 559/*
 560 * Count how many ring slots are required to send this skb. Each frag
 561 * might be a compound page.
 562 */
 563static int xennet_count_skb_slots(struct sk_buff *skb)
 564{
 565	int i, frags = skb_shinfo(skb)->nr_frags;
 566	int slots;
 567
 568	slots = gnttab_count_grant(offset_in_page(skb->data),
 569				   skb_headlen(skb));
 570
 571	for (i = 0; i < frags; i++) {
 572		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 573		unsigned long size = skb_frag_size(frag);
 574		unsigned long offset = skb_frag_off(frag);
 575
 576		/* Skip unused frames from start of page */
 577		offset &= ~PAGE_MASK;
 578
 579		slots += gnttab_count_grant(offset, size);
 580	}
 581
 582	return slots;
 583}
 584
 585static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
 586			       struct net_device *sb_dev)
 587{
 588	unsigned int num_queues = dev->real_num_tx_queues;
 589	u32 hash;
 590	u16 queue_idx;
 591
 592	/* First, check if there is only one queue */
 593	if (num_queues == 1) {
 594		queue_idx = 0;
 595	} else {
 596		hash = skb_get_hash(skb);
 597		queue_idx = hash % num_queues;
 598	}
 599
 600	return queue_idx;
 601}
 602
 603static void xennet_mark_tx_pending(struct netfront_queue *queue)
 604{
 605	unsigned int i;
 606
 607	while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
 608	       TX_LINK_NONE)
 609		queue->tx_link[i] = TX_PENDING;
 610}
 611
 612static int xennet_xdp_xmit_one(struct net_device *dev,
 613			       struct netfront_queue *queue,
 614			       struct xdp_frame *xdpf)
 615{
 616	struct netfront_info *np = netdev_priv(dev);
 617	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 618	struct xennet_gnttab_make_txreq info = {
 619		.queue = queue,
 620		.skb = NULL,
 621		.page = virt_to_page(xdpf->data),
 622	};
 623	int notify;
 624
 625	xennet_make_first_txreq(&info,
 
 626				offset_in_page(xdpf->data),
 627				xdpf->len);
 628
 629	xennet_mark_tx_pending(queue);
 630
 631	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 632	if (notify)
 633		notify_remote_via_irq(queue->tx_irq);
 634
 635	u64_stats_update_begin(&tx_stats->syncp);
 636	tx_stats->bytes += xdpf->len;
 637	tx_stats->packets++;
 638	u64_stats_update_end(&tx_stats->syncp);
 639
 640	xennet_tx_buf_gc(queue);
 641
 642	return 0;
 643}
 644
 645static int xennet_xdp_xmit(struct net_device *dev, int n,
 646			   struct xdp_frame **frames, u32 flags)
 647{
 648	unsigned int num_queues = dev->real_num_tx_queues;
 649	struct netfront_info *np = netdev_priv(dev);
 650	struct netfront_queue *queue = NULL;
 651	unsigned long irq_flags;
 652	int nxmit = 0;
 653	int i;
 654
 655	if (unlikely(np->broken))
 656		return -ENODEV;
 657	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 658		return -EINVAL;
 659
 660	queue = &np->queues[smp_processor_id() % num_queues];
 661
 662	spin_lock_irqsave(&queue->tx_lock, irq_flags);
 663	for (i = 0; i < n; i++) {
 664		struct xdp_frame *xdpf = frames[i];
 665
 666		if (!xdpf)
 667			continue;
 668		if (xennet_xdp_xmit_one(dev, queue, xdpf))
 669			break;
 670		nxmit++;
 671	}
 672	spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
 673
 674	return nxmit;
 675}
 676
 677static struct sk_buff *bounce_skb(const struct sk_buff *skb)
 678{
 679	unsigned int headerlen = skb_headroom(skb);
 680	/* Align size to allocate full pages and avoid contiguous data leaks */
 681	unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
 682				  XEN_PAGE_SIZE);
 683	struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
 684
 685	if (!n)
 686		return NULL;
 687
 688	if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
 689		WARN_ONCE(1, "misaligned skb allocated\n");
 690		kfree_skb(n);
 691		return NULL;
 692	}
 693
 694	/* Set the data pointer */
 695	skb_reserve(n, headerlen);
 696	/* Set the tail pointer and length */
 697	skb_put(n, skb->len);
 698
 699	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
 700
 701	skb_copy_header(n, skb);
 702	return n;
 703}
 704
 705#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 706
 707static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 708{
 709	struct netfront_info *np = netdev_priv(dev);
 710	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
 711	struct xen_netif_tx_request *first_tx;
 712	unsigned int i;
 713	int notify;
 714	int slots;
 715	struct page *page;
 716	unsigned int offset;
 717	unsigned int len;
 718	unsigned long flags;
 719	struct netfront_queue *queue = NULL;
 720	struct xennet_gnttab_make_txreq info = { };
 721	unsigned int num_queues = dev->real_num_tx_queues;
 722	u16 queue_index;
 723	struct sk_buff *nskb;
 724
 725	/* Drop the packet if no queues are set up */
 726	if (num_queues < 1)
 727		goto drop;
 728	if (unlikely(np->broken))
 729		goto drop;
 730	/* Determine which queue to transmit this SKB on */
 731	queue_index = skb_get_queue_mapping(skb);
 732	queue = &np->queues[queue_index];
 733
 734	/* If skb->len is too big for wire format, drop skb and alert
 735	 * user about misconfiguration.
 736	 */
 737	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
 738		net_alert_ratelimited(
 739			"xennet: skb->len = %u, too big for wire format\n",
 740			skb->len);
 741		goto drop;
 742	}
 743
 744	slots = xennet_count_skb_slots(skb);
 745	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
 746		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
 747				    slots, skb->len);
 748		if (skb_linearize(skb))
 749			goto drop;
 750	}
 751
 752	page = virt_to_page(skb->data);
 753	offset = offset_in_page(skb->data);
 754
 755	/* The first req should be at least ETH_HLEN size or the packet will be
 756	 * dropped by netback.
 757	 *
 758	 * If the backend is not trusted bounce all data to zeroed pages to
 759	 * avoid exposing contiguous data on the granted page not belonging to
 760	 * the skb.
 761	 */
 762	if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
 763		nskb = bounce_skb(skb);
 764		if (!nskb)
 765			goto drop;
 766		dev_consume_skb_any(skb);
 767		skb = nskb;
 768		page = virt_to_page(skb->data);
 769		offset = offset_in_page(skb->data);
 770	}
 771
 772	len = skb_headlen(skb);
 773
 774	spin_lock_irqsave(&queue->tx_lock, flags);
 775
 776	if (unlikely(!netif_carrier_ok(dev) ||
 777		     (slots > 1 && !xennet_can_sg(dev)) ||
 778		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 779		spin_unlock_irqrestore(&queue->tx_lock, flags);
 780		goto drop;
 781	}
 782
 783	/* First request for the linear area. */
 784	info.queue = queue;
 785	info.skb = skb;
 786	info.page = page;
 787	first_tx = xennet_make_first_txreq(&info, offset, len);
 788	offset += info.tx_local.size;
 789	if (offset == PAGE_SIZE) {
 790		page++;
 791		offset = 0;
 792	}
 793	len -= info.tx_local.size;
 794
 795	if (skb->ip_summed == CHECKSUM_PARTIAL)
 796		/* local packet? */
 797		first_tx->flags |= XEN_NETTXF_csum_blank |
 798				   XEN_NETTXF_data_validated;
 799	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
 800		/* remote but checksummed. */
 801		first_tx->flags |= XEN_NETTXF_data_validated;
 802
 803	/* Optional extra info after the first request. */
 804	if (skb_shinfo(skb)->gso_size) {
 805		struct xen_netif_extra_info *gso;
 806
 807		gso = (struct xen_netif_extra_info *)
 808			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
 809
 810		first_tx->flags |= XEN_NETTXF_extra_info;
 811
 812		gso->u.gso.size = skb_shinfo(skb)->gso_size;
 813		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
 814			XEN_NETIF_GSO_TYPE_TCPV6 :
 815			XEN_NETIF_GSO_TYPE_TCPV4;
 816		gso->u.gso.pad = 0;
 817		gso->u.gso.features = 0;
 818
 819		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
 820		gso->flags = 0;
 821	}
 822
 823	/* Requests for the rest of the linear area. */
 824	xennet_make_txreqs(&info, page, offset, len);
 825
 826	/* Requests for all the frags. */
 827	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 828		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 829		xennet_make_txreqs(&info, skb_frag_page(frag),
 830					skb_frag_off(frag),
 831					skb_frag_size(frag));
 832	}
 833
 834	/* First request has the packet length. */
 835	first_tx->size = skb->len;
 836
 837	/* timestamp packet in software */
 838	skb_tx_timestamp(skb);
 839
 840	xennet_mark_tx_pending(queue);
 841
 842	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
 843	if (notify)
 844		notify_remote_via_irq(queue->tx_irq);
 845
 846	u64_stats_update_begin(&tx_stats->syncp);
 847	tx_stats->bytes += skb->len;
 848	tx_stats->packets++;
 849	u64_stats_update_end(&tx_stats->syncp);
 850
 851	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
 852	xennet_tx_buf_gc(queue);
 853
 854	if (!netfront_tx_slot_available(queue))
 855		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
 856
 857	spin_unlock_irqrestore(&queue->tx_lock, flags);
 858
 859	return NETDEV_TX_OK;
 860
 861 drop:
 862	dev->stats.tx_dropped++;
 863	dev_kfree_skb_any(skb);
 864	return NETDEV_TX_OK;
 865}
 866
 867static int xennet_close(struct net_device *dev)
 868{
 869	struct netfront_info *np = netdev_priv(dev);
 870	unsigned int num_queues = dev->real_num_tx_queues;
 871	unsigned int i;
 872	struct netfront_queue *queue;
 873	netif_tx_stop_all_queues(np->netdev);
 874	for (i = 0; i < num_queues; ++i) {
 875		queue = &np->queues[i];
 876		napi_disable(&queue->napi);
 877	}
 878	return 0;
 879}
 880
 881static void xennet_destroy_queues(struct netfront_info *info)
 882{
 883	unsigned int i;
 884
 885	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
 886		struct netfront_queue *queue = &info->queues[i];
 887
 888		if (netif_running(info->netdev))
 889			napi_disable(&queue->napi);
 890		netif_napi_del(&queue->napi);
 891	}
 892
 893	kfree(info->queues);
 894	info->queues = NULL;
 895}
 896
 897static void xennet_uninit(struct net_device *dev)
 898{
 899	struct netfront_info *np = netdev_priv(dev);
 900	xennet_destroy_queues(np);
 901}
 902
 903static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
 904{
 905	unsigned long flags;
 906
 907	spin_lock_irqsave(&queue->rx_cons_lock, flags);
 908	queue->rx.rsp_cons = val;
 909	queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
 910	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
 911}
 912
 913static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
 914				grant_ref_t ref)
 915{
 916	int new = xennet_rxidx(queue->rx.req_prod_pvt);
 917
 918	BUG_ON(queue->rx_skbs[new]);
 919	queue->rx_skbs[new] = skb;
 920	queue->grant_rx_ref[new] = ref;
 921	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
 922	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
 923	queue->rx.req_prod_pvt++;
 924}
 925
 926static int xennet_get_extras(struct netfront_queue *queue,
 927			     struct xen_netif_extra_info *extras,
 928			     RING_IDX rp)
 929
 930{
 931	struct xen_netif_extra_info extra;
 932	struct device *dev = &queue->info->netdev->dev;
 933	RING_IDX cons = queue->rx.rsp_cons;
 934	int err = 0;
 935
 936	do {
 937		struct sk_buff *skb;
 938		grant_ref_t ref;
 939
 940		if (unlikely(cons + 1 == rp)) {
 941			if (net_ratelimit())
 942				dev_warn(dev, "Missing extra info\n");
 943			err = -EBADR;
 944			break;
 945		}
 946
 947		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
 
 948
 949		if (unlikely(!extra.type ||
 950			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
 951			if (net_ratelimit())
 952				dev_warn(dev, "Invalid extra type: %d\n",
 953					 extra.type);
 954			err = -EINVAL;
 955		} else {
 956			extras[extra.type - 1] = extra;
 
 957		}
 958
 959		skb = xennet_get_rx_skb(queue, cons);
 960		ref = xennet_get_rx_ref(queue, cons);
 961		xennet_move_rx_slot(queue, skb, ref);
 962	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 963
 964	xennet_set_rx_rsp_cons(queue, cons);
 965	return err;
 966}
 967
 968static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
 969		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
 970		   struct xdp_buff *xdp, bool *need_xdp_flush)
 971{
 972	struct xdp_frame *xdpf;
 973	u32 len = rx->status;
 974	u32 act;
 975	int err;
 976
 977	xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
 978		      &queue->xdp_rxq);
 979	xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
 980			 len, false);
 981
 982	act = bpf_prog_run_xdp(prog, xdp);
 983	switch (act) {
 984	case XDP_TX:
 985		get_page(pdata);
 986		xdpf = xdp_convert_buff_to_frame(xdp);
 987		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
 988		if (unlikely(!err))
 989			xdp_return_frame_rx_napi(xdpf);
 990		else if (unlikely(err < 0))
 991			trace_xdp_exception(queue->info->netdev, prog, act);
 992		break;
 993	case XDP_REDIRECT:
 994		get_page(pdata);
 995		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
 996		*need_xdp_flush = true;
 997		if (unlikely(err))
 998			trace_xdp_exception(queue->info->netdev, prog, act);
 999		break;
1000	case XDP_PASS:
1001	case XDP_DROP:
1002		break;
1003
1004	case XDP_ABORTED:
1005		trace_xdp_exception(queue->info->netdev, prog, act);
1006		break;
1007
1008	default:
1009		bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1010	}
1011
1012	return act;
1013}
1014
1015static int xennet_get_responses(struct netfront_queue *queue,
1016				struct netfront_rx_info *rinfo, RING_IDX rp,
1017				struct sk_buff_head *list,
1018				bool *need_xdp_flush)
1019{
1020	struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1021	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1022	RING_IDX cons = queue->rx.rsp_cons;
1023	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1024	struct xen_netif_extra_info *extras = rinfo->extras;
1025	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1026	struct device *dev = &queue->info->netdev->dev;
1027	struct bpf_prog *xdp_prog;
1028	struct xdp_buff xdp;
 
1029	int slots = 1;
1030	int err = 0;
1031	u32 verdict;
1032
1033	if (rx->flags & XEN_NETRXF_extra_info) {
1034		err = xennet_get_extras(queue, extras, rp);
1035		if (!err) {
1036			if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1037				struct xen_netif_extra_info *xdp;
1038
1039				xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1040				rx->offset = xdp->u.xdp.headroom;
1041			}
1042		}
1043		cons = queue->rx.rsp_cons;
1044	}
1045
1046	for (;;) {
 
 
 
 
 
 
 
 
 
 
1047		/*
1048		 * This definitely indicates a bug, either in this driver or in
1049		 * the backend driver. In future this should flag the bad
1050		 * situation to the system controller to reboot the backend.
1051		 */
1052		if (ref == INVALID_GRANT_REF) {
1053			if (net_ratelimit())
1054				dev_warn(dev, "Bad rx response id %d.\n",
1055					 rx->id);
1056			err = -EINVAL;
1057			goto next;
1058		}
1059
1060		if (unlikely(rx->status < 0 ||
1061			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
1062			if (net_ratelimit())
1063				dev_warn(dev, "rx->offset: %u, size: %d\n",
1064					 rx->offset, rx->status);
1065			xennet_move_rx_slot(queue, skb, ref);
1066			err = -EINVAL;
1067			goto next;
1068		}
1069
1070		if (!gnttab_end_foreign_access_ref(ref)) {
1071			dev_alert(dev,
1072				  "Grant still in use by backend domain\n");
1073			queue->info->broken = true;
1074			dev_alert(dev, "Disabled for further use\n");
1075			return -EINVAL;
1076		}
1077
1078		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1079
1080		rcu_read_lock();
1081		xdp_prog = rcu_dereference(queue->xdp_prog);
1082		if (xdp_prog) {
1083			if (!(rx->flags & XEN_NETRXF_more_data)) {
1084				/* currently only a single page contains data */
1085				verdict = xennet_run_xdp(queue,
1086							 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1087							 rx, xdp_prog, &xdp, need_xdp_flush);
1088				if (verdict != XDP_PASS)
1089					err = -EINVAL;
1090			} else {
1091				/* drop the frame */
1092				err = -EINVAL;
1093			}
1094		}
1095		rcu_read_unlock();
1096
1097		__skb_queue_tail(list, skb);
1098
1099next:
1100		if (!(rx->flags & XEN_NETRXF_more_data))
1101			break;
1102
1103		if (cons + slots == rp) {
1104			if (net_ratelimit())
1105				dev_warn(dev, "Need more slots\n");
1106			err = -ENOENT;
1107			break;
1108		}
1109
1110		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1111		rx = &rx_local;
1112		skb = xennet_get_rx_skb(queue, cons + slots);
1113		ref = xennet_get_rx_ref(queue, cons + slots);
1114		slots++;
1115	}
1116
1117	if (unlikely(slots > max)) {
1118		if (net_ratelimit())
1119			dev_warn(dev, "Too many slots\n");
1120		err = -E2BIG;
1121	}
1122
1123	if (unlikely(err))
1124		xennet_set_rx_rsp_cons(queue, cons + slots);
1125
1126	return err;
1127}
1128
1129static int xennet_set_skb_gso(struct sk_buff *skb,
1130			      struct xen_netif_extra_info *gso)
1131{
1132	if (!gso->u.gso.size) {
1133		if (net_ratelimit())
1134			pr_warn("GSO size must not be zero\n");
1135		return -EINVAL;
1136	}
1137
1138	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1139	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1140		if (net_ratelimit())
1141			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1142		return -EINVAL;
1143	}
1144
1145	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1146	skb_shinfo(skb)->gso_type =
1147		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1148		SKB_GSO_TCPV4 :
1149		SKB_GSO_TCPV6;
1150
1151	/* Header must be checked, and gso_segs computed. */
1152	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1153	skb_shinfo(skb)->gso_segs = 0;
1154
1155	return 0;
1156}
1157
1158static int xennet_fill_frags(struct netfront_queue *queue,
1159			     struct sk_buff *skb,
1160			     struct sk_buff_head *list)
1161{
1162	RING_IDX cons = queue->rx.rsp_cons;
1163	struct sk_buff *nskb;
1164
1165	while ((nskb = __skb_dequeue(list))) {
1166		struct xen_netif_rx_response rx;
 
1167		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1168
1169		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1170
1171		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1172			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1173
1174			BUG_ON(pull_to < skb_headlen(skb));
1175			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1176		}
1177		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1178			xennet_set_rx_rsp_cons(queue,
1179					       ++cons + skb_queue_len(list));
1180			kfree_skb(nskb);
1181			return -ENOENT;
1182		}
1183
1184		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1185				skb_frag_page(nfrag),
1186				rx.offset, rx.status, PAGE_SIZE);
1187
1188		skb_shinfo(nskb)->nr_frags = 0;
1189		kfree_skb(nskb);
1190	}
1191
1192	xennet_set_rx_rsp_cons(queue, cons);
1193
1194	return 0;
1195}
1196
1197static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1198{
1199	bool recalculate_partial_csum = false;
1200
1201	/*
1202	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1203	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1204	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1205	 * recalculate the partial checksum.
1206	 */
1207	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1208		struct netfront_info *np = netdev_priv(dev);
1209		atomic_inc(&np->rx_gso_checksum_fixup);
1210		skb->ip_summed = CHECKSUM_PARTIAL;
1211		recalculate_partial_csum = true;
1212	}
1213
1214	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1215	if (skb->ip_summed != CHECKSUM_PARTIAL)
1216		return 0;
1217
1218	return skb_checksum_setup(skb, recalculate_partial_csum);
1219}
1220
1221static int handle_incoming_queue(struct netfront_queue *queue,
1222				 struct sk_buff_head *rxq)
1223{
1224	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1225	int packets_dropped = 0;
1226	struct sk_buff *skb;
1227
1228	while ((skb = __skb_dequeue(rxq)) != NULL) {
1229		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1230
1231		if (pull_to > skb_headlen(skb))
1232			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1233
1234		/* Ethernet work: Delayed to here as it peeks the header. */
1235		skb->protocol = eth_type_trans(skb, queue->info->netdev);
1236		skb_reset_network_header(skb);
1237
1238		if (checksum_setup(queue->info->netdev, skb)) {
1239			kfree_skb(skb);
1240			packets_dropped++;
1241			queue->info->netdev->stats.rx_errors++;
1242			continue;
1243		}
1244
1245		u64_stats_update_begin(&rx_stats->syncp);
1246		rx_stats->packets++;
1247		rx_stats->bytes += skb->len;
1248		u64_stats_update_end(&rx_stats->syncp);
1249
1250		/* Pass it up. */
1251		napi_gro_receive(&queue->napi, skb);
1252	}
1253
1254	return packets_dropped;
1255}
1256
1257static int xennet_poll(struct napi_struct *napi, int budget)
1258{
1259	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1260	struct net_device *dev = queue->info->netdev;
1261	struct sk_buff *skb;
1262	struct netfront_rx_info rinfo;
1263	struct xen_netif_rx_response *rx = &rinfo.rx;
1264	struct xen_netif_extra_info *extras = rinfo.extras;
1265	RING_IDX i, rp;
1266	int work_done;
1267	struct sk_buff_head rxq;
1268	struct sk_buff_head errq;
1269	struct sk_buff_head tmpq;
1270	int err;
1271	bool need_xdp_flush = false;
1272
1273	spin_lock(&queue->rx_lock);
1274
1275	skb_queue_head_init(&rxq);
1276	skb_queue_head_init(&errq);
1277	skb_queue_head_init(&tmpq);
1278
1279	rp = queue->rx.sring->rsp_prod;
1280	if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1281		dev_alert(&dev->dev, "Illegal number of responses %u\n",
1282			  rp - queue->rx.rsp_cons);
1283		queue->info->broken = true;
1284		spin_unlock(&queue->rx_lock);
1285		return 0;
1286	}
1287	rmb(); /* Ensure we see queued responses up to 'rp'. */
1288
1289	i = queue->rx.rsp_cons;
1290	work_done = 0;
1291	while ((i != rp) && (work_done < budget)) {
1292		RING_COPY_RESPONSE(&queue->rx, i, rx);
1293		memset(extras, 0, sizeof(rinfo.extras));
1294
1295		err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1296					   &need_xdp_flush);
1297
1298		if (unlikely(err)) {
1299			if (queue->info->broken) {
1300				spin_unlock(&queue->rx_lock);
1301				return 0;
1302			}
1303err:
1304			while ((skb = __skb_dequeue(&tmpq)))
1305				__skb_queue_tail(&errq, skb);
1306			dev->stats.rx_errors++;
1307			i = queue->rx.rsp_cons;
1308			continue;
1309		}
1310
1311		skb = __skb_dequeue(&tmpq);
1312
1313		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1314			struct xen_netif_extra_info *gso;
1315			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1316
1317			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1318				__skb_queue_head(&tmpq, skb);
1319				xennet_set_rx_rsp_cons(queue,
1320						       queue->rx.rsp_cons +
1321						       skb_queue_len(&tmpq));
1322				goto err;
1323			}
1324		}
1325
1326		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1327		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1328			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1329
1330		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1331		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1332		skb->data_len = rx->status;
1333		skb->len += rx->status;
1334
1335		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1336			goto err;
1337
1338		if (rx->flags & XEN_NETRXF_csum_blank)
1339			skb->ip_summed = CHECKSUM_PARTIAL;
1340		else if (rx->flags & XEN_NETRXF_data_validated)
1341			skb->ip_summed = CHECKSUM_UNNECESSARY;
1342
1343		__skb_queue_tail(&rxq, skb);
1344
1345		i = queue->rx.rsp_cons + 1;
1346		xennet_set_rx_rsp_cons(queue, i);
1347		work_done++;
1348	}
1349	if (need_xdp_flush)
1350		xdp_do_flush();
1351
1352	__skb_queue_purge(&errq);
1353
1354	work_done -= handle_incoming_queue(queue, &rxq);
1355
1356	xennet_alloc_rx_buffers(queue);
1357
1358	if (work_done < budget) {
1359		int more_to_do = 0;
1360
1361		napi_complete_done(napi, work_done);
1362
1363		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1364		if (more_to_do)
1365			napi_schedule(napi);
1366	}
1367
1368	spin_unlock(&queue->rx_lock);
1369
1370	return work_done;
1371}
1372
1373static int xennet_change_mtu(struct net_device *dev, int mtu)
1374{
1375	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1376
1377	if (mtu > max)
1378		return -EINVAL;
1379	dev->mtu = mtu;
1380	return 0;
1381}
1382
1383static void xennet_get_stats64(struct net_device *dev,
1384			       struct rtnl_link_stats64 *tot)
1385{
1386	struct netfront_info *np = netdev_priv(dev);
1387	int cpu;
1388
1389	for_each_possible_cpu(cpu) {
1390		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1391		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1392		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1393		unsigned int start;
1394
1395		do {
1396			start = u64_stats_fetch_begin(&tx_stats->syncp);
1397			tx_packets = tx_stats->packets;
1398			tx_bytes = tx_stats->bytes;
1399		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1400
1401		do {
1402			start = u64_stats_fetch_begin(&rx_stats->syncp);
1403			rx_packets = rx_stats->packets;
1404			rx_bytes = rx_stats->bytes;
1405		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1406
1407		tot->rx_packets += rx_packets;
1408		tot->tx_packets += tx_packets;
1409		tot->rx_bytes   += rx_bytes;
1410		tot->tx_bytes   += tx_bytes;
1411	}
1412
1413	tot->rx_errors  = dev->stats.rx_errors;
1414	tot->tx_dropped = dev->stats.tx_dropped;
1415}
1416
1417static void xennet_release_tx_bufs(struct netfront_queue *queue)
1418{
1419	struct sk_buff *skb;
1420	int i;
1421
1422	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1423		/* Skip over entries which are actually freelist references */
1424		if (!queue->tx_skbs[i])
1425			continue;
1426
1427		skb = queue->tx_skbs[i];
1428		queue->tx_skbs[i] = NULL;
1429		get_page(queue->grant_tx_page[i]);
1430		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1431					  queue->grant_tx_page[i]);
 
1432		queue->grant_tx_page[i] = NULL;
1433		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1434		add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1435		dev_kfree_skb_irq(skb);
1436	}
1437}
1438
1439static void xennet_release_rx_bufs(struct netfront_queue *queue)
1440{
1441	int id, ref;
1442
1443	spin_lock_bh(&queue->rx_lock);
1444
1445	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1446		struct sk_buff *skb;
1447		struct page *page;
1448
1449		skb = queue->rx_skbs[id];
1450		if (!skb)
1451			continue;
1452
1453		ref = queue->grant_rx_ref[id];
1454		if (ref == INVALID_GRANT_REF)
1455			continue;
1456
1457		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1458
1459		/* gnttab_end_foreign_access() needs a page ref until
1460		 * foreign access is ended (which may be deferred).
1461		 */
1462		get_page(page);
1463		gnttab_end_foreign_access(ref, page);
1464		queue->grant_rx_ref[id] = INVALID_GRANT_REF;
 
1465
1466		kfree_skb(skb);
1467	}
1468
1469	spin_unlock_bh(&queue->rx_lock);
1470}
1471
1472static netdev_features_t xennet_fix_features(struct net_device *dev,
1473	netdev_features_t features)
1474{
1475	struct netfront_info *np = netdev_priv(dev);
1476
1477	if (features & NETIF_F_SG &&
1478	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1479		features &= ~NETIF_F_SG;
1480
1481	if (features & NETIF_F_IPV6_CSUM &&
1482	    !xenbus_read_unsigned(np->xbdev->otherend,
1483				  "feature-ipv6-csum-offload", 0))
1484		features &= ~NETIF_F_IPV6_CSUM;
1485
1486	if (features & NETIF_F_TSO &&
1487	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1488		features &= ~NETIF_F_TSO;
1489
1490	if (features & NETIF_F_TSO6 &&
1491	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1492		features &= ~NETIF_F_TSO6;
1493
1494	return features;
1495}
1496
1497static int xennet_set_features(struct net_device *dev,
1498	netdev_features_t features)
1499{
1500	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1501		netdev_info(dev, "Reducing MTU because no SG offload");
1502		dev->mtu = ETH_DATA_LEN;
1503	}
1504
1505	return 0;
1506}
1507
1508static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1509{
 
1510	unsigned long flags;
1511
1512	if (unlikely(queue->info->broken))
1513		return false;
1514
1515	spin_lock_irqsave(&queue->tx_lock, flags);
1516	if (xennet_tx_buf_gc(queue))
1517		*eoi = 0;
1518	spin_unlock_irqrestore(&queue->tx_lock, flags);
1519
1520	return true;
1521}
1522
1523static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1524{
1525	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1526
1527	if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1528		xen_irq_lateeoi(irq, eoiflag);
1529
1530	return IRQ_HANDLED;
1531}
1532
1533static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1534{
1535	unsigned int work_queued;
1536	unsigned long flags;
1537
1538	if (unlikely(queue->info->broken))
1539		return false;
1540
1541	spin_lock_irqsave(&queue->rx_cons_lock, flags);
1542	work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1543	if (work_queued > queue->rx_rsp_unconsumed) {
1544		queue->rx_rsp_unconsumed = work_queued;
1545		*eoi = 0;
1546	} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1547		const struct device *dev = &queue->info->netdev->dev;
1548
1549		spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1550		dev_alert(dev, "RX producer index going backwards\n");
1551		dev_alert(dev, "Disabled for further use\n");
1552		queue->info->broken = true;
1553		return false;
1554	}
1555	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1556
1557	if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1558		napi_schedule(&queue->napi);
1559
1560	return true;
1561}
1562
1563static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1564{
1565	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1566
1567	if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1568		xen_irq_lateeoi(irq, eoiflag);
1569
1570	return IRQ_HANDLED;
1571}
1572
1573static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1574{
1575	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1576
1577	if (xennet_handle_tx(dev_id, &eoiflag) &&
1578	    xennet_handle_rx(dev_id, &eoiflag))
1579		xen_irq_lateeoi(irq, eoiflag);
1580
1581	return IRQ_HANDLED;
1582}
1583
1584#ifdef CONFIG_NET_POLL_CONTROLLER
1585static void xennet_poll_controller(struct net_device *dev)
1586{
1587	/* Poll each queue */
1588	struct netfront_info *info = netdev_priv(dev);
1589	unsigned int num_queues = dev->real_num_tx_queues;
1590	unsigned int i;
1591
1592	if (info->broken)
1593		return;
1594
1595	for (i = 0; i < num_queues; ++i)
1596		xennet_interrupt(0, &info->queues[i]);
1597}
1598#endif
1599
1600#define NETBACK_XDP_HEADROOM_DISABLE	0
1601#define NETBACK_XDP_HEADROOM_ENABLE	1
1602
1603static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1604{
1605	int err;
1606	unsigned short headroom;
1607
1608	headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1609	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1610			    "xdp-headroom", "%hu",
1611			    headroom);
1612	if (err)
1613		pr_warn("Error writing xdp-headroom\n");
1614
1615	return err;
1616}
1617
1618static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1619			  struct netlink_ext_ack *extack)
1620{
1621	unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1622	struct netfront_info *np = netdev_priv(dev);
1623	struct bpf_prog *old_prog;
1624	unsigned int i, err;
1625
1626	if (dev->mtu > max_mtu) {
1627		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1628		return -EINVAL;
1629	}
1630
1631	if (!np->netback_has_xdp_headroom)
1632		return 0;
1633
1634	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1635
1636	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1637				  NETBACK_XDP_HEADROOM_DISABLE);
1638	if (err)
1639		return err;
1640
1641	/* avoid the race with XDP headroom adjustment */
1642	wait_event(module_wq,
1643		   xenbus_read_driver_state(np->xbdev->otherend) ==
1644		   XenbusStateReconfigured);
1645	np->netfront_xdp_enabled = true;
1646
1647	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1648
1649	if (prog)
1650		bpf_prog_add(prog, dev->real_num_tx_queues);
1651
1652	for (i = 0; i < dev->real_num_tx_queues; ++i)
1653		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1654
1655	if (old_prog)
1656		for (i = 0; i < dev->real_num_tx_queues; ++i)
1657			bpf_prog_put(old_prog);
1658
1659	xenbus_switch_state(np->xbdev, XenbusStateConnected);
1660
1661	return 0;
1662}
1663
1664static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1665{
1666	struct netfront_info *np = netdev_priv(dev);
1667
1668	if (np->broken)
1669		return -ENODEV;
1670
1671	switch (xdp->command) {
1672	case XDP_SETUP_PROG:
1673		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1674	default:
1675		return -EINVAL;
1676	}
1677}
1678
1679static const struct net_device_ops xennet_netdev_ops = {
1680	.ndo_uninit          = xennet_uninit,
1681	.ndo_open            = xennet_open,
1682	.ndo_stop            = xennet_close,
1683	.ndo_start_xmit      = xennet_start_xmit,
1684	.ndo_change_mtu	     = xennet_change_mtu,
1685	.ndo_get_stats64     = xennet_get_stats64,
1686	.ndo_set_mac_address = eth_mac_addr,
1687	.ndo_validate_addr   = eth_validate_addr,
1688	.ndo_fix_features    = xennet_fix_features,
1689	.ndo_set_features    = xennet_set_features,
1690	.ndo_select_queue    = xennet_select_queue,
1691	.ndo_bpf            = xennet_xdp,
1692	.ndo_xdp_xmit	    = xennet_xdp_xmit,
1693#ifdef CONFIG_NET_POLL_CONTROLLER
1694	.ndo_poll_controller = xennet_poll_controller,
1695#endif
1696};
1697
1698static void xennet_free_netdev(struct net_device *netdev)
1699{
1700	struct netfront_info *np = netdev_priv(netdev);
1701
1702	free_percpu(np->rx_stats);
1703	free_percpu(np->tx_stats);
1704	free_netdev(netdev);
1705}
1706
1707static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1708{
1709	int err;
1710	struct net_device *netdev;
1711	struct netfront_info *np;
1712
1713	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1714	if (!netdev)
1715		return ERR_PTR(-ENOMEM);
1716
1717	np                   = netdev_priv(netdev);
1718	np->xbdev            = dev;
1719
1720	np->queues = NULL;
1721
1722	err = -ENOMEM;
1723	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1724	if (np->rx_stats == NULL)
1725		goto exit;
1726	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1727	if (np->tx_stats == NULL)
1728		goto exit;
1729
1730	netdev->netdev_ops	= &xennet_netdev_ops;
1731
1732	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1733				  NETIF_F_GSO_ROBUST;
1734	netdev->hw_features	= NETIF_F_SG |
1735				  NETIF_F_IPV6_CSUM |
1736				  NETIF_F_TSO | NETIF_F_TSO6;
1737
1738	/*
1739         * Assume that all hw features are available for now. This set
1740         * will be adjusted by the call to netdev_update_features() in
1741         * xennet_connect() which is the earliest point where we can
1742         * negotiate with the backend regarding supported features.
1743         */
1744	netdev->features |= netdev->hw_features;
1745	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
1746			       NETDEV_XDP_ACT_NDO_XMIT;
1747
1748	netdev->ethtool_ops = &xennet_ethtool_ops;
1749	netdev->min_mtu = ETH_MIN_MTU;
1750	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1751	SET_NETDEV_DEV(netdev, &dev->dev);
1752
1753	np->netdev = netdev;
1754	np->netfront_xdp_enabled = false;
1755
1756	netif_carrier_off(netdev);
1757
1758	do {
1759		xenbus_switch_state(dev, XenbusStateInitialising);
1760		err = wait_event_timeout(module_wq,
1761				 xenbus_read_driver_state(dev->otherend) !=
1762				 XenbusStateClosed &&
1763				 xenbus_read_driver_state(dev->otherend) !=
1764				 XenbusStateUnknown, XENNET_TIMEOUT);
1765	} while (!err);
1766
1767	return netdev;
1768
1769 exit:
1770	xennet_free_netdev(netdev);
1771	return ERR_PTR(err);
1772}
1773
1774/*
1775 * Entry point to this code when a new device is created.  Allocate the basic
1776 * structures and the ring buffers for communication with the backend, and
1777 * inform the backend of the appropriate details for those.
1778 */
1779static int netfront_probe(struct xenbus_device *dev,
1780			  const struct xenbus_device_id *id)
1781{
1782	int err;
1783	struct net_device *netdev;
1784	struct netfront_info *info;
1785
1786	netdev = xennet_create_dev(dev);
1787	if (IS_ERR(netdev)) {
1788		err = PTR_ERR(netdev);
1789		xenbus_dev_fatal(dev, err, "creating netdev");
1790		return err;
1791	}
1792
1793	info = netdev_priv(netdev);
1794	dev_set_drvdata(&dev->dev, info);
1795#ifdef CONFIG_SYSFS
1796	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1797#endif
1798
1799	return 0;
1800}
1801
1802static void xennet_end_access(int ref, void *page)
1803{
1804	/* This frees the page as a side-effect */
1805	if (ref != INVALID_GRANT_REF)
1806		gnttab_end_foreign_access(ref, virt_to_page(page));
1807}
1808
1809static void xennet_disconnect_backend(struct netfront_info *info)
1810{
1811	unsigned int i = 0;
1812	unsigned int num_queues = info->netdev->real_num_tx_queues;
1813
1814	netif_carrier_off(info->netdev);
1815
1816	for (i = 0; i < num_queues && info->queues; ++i) {
1817		struct netfront_queue *queue = &info->queues[i];
1818
1819		del_timer_sync(&queue->rx_refill_timer);
1820
1821		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1822			unbind_from_irqhandler(queue->tx_irq, queue);
1823		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1824			unbind_from_irqhandler(queue->tx_irq, queue);
1825			unbind_from_irqhandler(queue->rx_irq, queue);
1826		}
1827		queue->tx_evtchn = queue->rx_evtchn = 0;
1828		queue->tx_irq = queue->rx_irq = 0;
1829
1830		if (netif_running(info->netdev))
1831			napi_synchronize(&queue->napi);
1832
1833		xennet_release_tx_bufs(queue);
1834		xennet_release_rx_bufs(queue);
1835		gnttab_free_grant_references(queue->gref_tx_head);
1836		gnttab_free_grant_references(queue->gref_rx_head);
1837
1838		/* End access and free the pages */
1839		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1840		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1841
1842		queue->tx_ring_ref = INVALID_GRANT_REF;
1843		queue->rx_ring_ref = INVALID_GRANT_REF;
1844		queue->tx.sring = NULL;
1845		queue->rx.sring = NULL;
1846
1847		page_pool_destroy(queue->page_pool);
1848	}
1849}
1850
1851/*
1852 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1853 * driver restart.  We tear down our netif structure and recreate it, but
1854 * leave the device-layer structures intact so that this is transparent to the
1855 * rest of the kernel.
1856 */
1857static int netfront_resume(struct xenbus_device *dev)
1858{
1859	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1860
1861	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1862
1863	netif_tx_lock_bh(info->netdev);
1864	netif_device_detach(info->netdev);
1865	netif_tx_unlock_bh(info->netdev);
1866
1867	xennet_disconnect_backend(info);
1868
1869	rtnl_lock();
1870	if (info->queues)
1871		xennet_destroy_queues(info);
1872	rtnl_unlock();
1873
1874	return 0;
1875}
1876
1877static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1878{
1879	char *s, *e, *macstr;
1880	int i;
1881
1882	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1883	if (IS_ERR(macstr))
1884		return PTR_ERR(macstr);
1885
1886	for (i = 0; i < ETH_ALEN; i++) {
1887		mac[i] = simple_strtoul(s, &e, 16);
1888		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1889			kfree(macstr);
1890			return -ENOENT;
1891		}
1892		s = e+1;
1893	}
1894
1895	kfree(macstr);
1896	return 0;
1897}
1898
1899static int setup_netfront_single(struct netfront_queue *queue)
1900{
1901	int err;
1902
1903	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1904	if (err < 0)
1905		goto fail;
1906
1907	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1908						xennet_interrupt, 0,
1909						queue->info->netdev->name,
1910						queue);
1911	if (err < 0)
1912		goto bind_fail;
1913	queue->rx_evtchn = queue->tx_evtchn;
1914	queue->rx_irq = queue->tx_irq = err;
1915
1916	return 0;
1917
1918bind_fail:
1919	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1920	queue->tx_evtchn = 0;
1921fail:
1922	return err;
1923}
1924
1925static int setup_netfront_split(struct netfront_queue *queue)
1926{
1927	int err;
1928
1929	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1930	if (err < 0)
1931		goto fail;
1932	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1933	if (err < 0)
1934		goto alloc_rx_evtchn_fail;
1935
1936	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1937		 "%s-tx", queue->name);
1938	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1939						xennet_tx_interrupt, 0,
1940						queue->tx_irq_name, queue);
1941	if (err < 0)
1942		goto bind_tx_fail;
1943	queue->tx_irq = err;
1944
1945	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1946		 "%s-rx", queue->name);
1947	err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1948						xennet_rx_interrupt, 0,
1949						queue->rx_irq_name, queue);
1950	if (err < 0)
1951		goto bind_rx_fail;
1952	queue->rx_irq = err;
1953
1954	return 0;
1955
1956bind_rx_fail:
1957	unbind_from_irqhandler(queue->tx_irq, queue);
1958	queue->tx_irq = 0;
1959bind_tx_fail:
1960	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1961	queue->rx_evtchn = 0;
1962alloc_rx_evtchn_fail:
1963	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1964	queue->tx_evtchn = 0;
1965fail:
1966	return err;
1967}
1968
1969static int setup_netfront(struct xenbus_device *dev,
1970			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1971{
1972	struct xen_netif_tx_sring *txs;
1973	struct xen_netif_rx_sring *rxs;
 
1974	int err;
1975
1976	queue->tx_ring_ref = INVALID_GRANT_REF;
1977	queue->rx_ring_ref = INVALID_GRANT_REF;
1978	queue->rx.sring = NULL;
1979	queue->tx.sring = NULL;
1980
1981	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1982				1, &queue->tx_ring_ref);
1983	if (err)
 
1984		goto fail;
 
 
 
1985
1986	XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
 
 
1987
1988	err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1989				1, &queue->rx_ring_ref);
1990	if (err)
1991		goto fail;
 
 
 
 
1992
1993	XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
 
 
1994
1995	if (feature_split_evtchn)
1996		err = setup_netfront_split(queue);
1997	/* setup single event channel if
1998	 *  a) feature-split-event-channels == 0
1999	 *  b) feature-split-event-channels == 1 but failed to setup
2000	 */
2001	if (!feature_split_evtchn || err)
2002		err = setup_netfront_single(queue);
2003
2004	if (err)
2005		goto fail;
2006
2007	return 0;
2008
2009 fail:
2010	xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2011	xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2012
 
 
 
 
 
 
 
 
2013	return err;
2014}
2015
2016/* Queue-specific initialisation
2017 * This used to be done in xennet_create_dev() but must now
2018 * be run per-queue.
2019 */
2020static int xennet_init_queue(struct netfront_queue *queue)
2021{
2022	unsigned short i;
2023	int err = 0;
2024	char *devid;
2025
2026	spin_lock_init(&queue->tx_lock);
2027	spin_lock_init(&queue->rx_lock);
2028	spin_lock_init(&queue->rx_cons_lock);
2029
2030	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2031
2032	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2033	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2034		 devid, queue->id);
2035
2036	/* Initialise tx_skb_freelist as a free chain containing every entry. */
2037	queue->tx_skb_freelist = 0;
2038	queue->tx_pend_queue = TX_LINK_NONE;
2039	for (i = 0; i < NET_TX_RING_SIZE; i++) {
2040		queue->tx_link[i] = i + 1;
2041		queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2042		queue->grant_tx_page[i] = NULL;
2043	}
2044	queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2045
2046	/* Clear out rx_skbs */
2047	for (i = 0; i < NET_RX_RING_SIZE; i++) {
2048		queue->rx_skbs[i] = NULL;
2049		queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2050	}
2051
2052	/* A grant for every tx ring slot */
2053	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2054					  &queue->gref_tx_head) < 0) {
2055		pr_alert("can't alloc tx grant refs\n");
2056		err = -ENOMEM;
2057		goto exit;
2058	}
2059
2060	/* A grant for every rx ring slot */
2061	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2062					  &queue->gref_rx_head) < 0) {
2063		pr_alert("can't alloc rx grant refs\n");
2064		err = -ENOMEM;
2065		goto exit_free_tx;
2066	}
2067
2068	return 0;
2069
2070 exit_free_tx:
2071	gnttab_free_grant_references(queue->gref_tx_head);
2072 exit:
2073	return err;
2074}
2075
2076static int write_queue_xenstore_keys(struct netfront_queue *queue,
2077			   struct xenbus_transaction *xbt, int write_hierarchical)
2078{
2079	/* Write the queue-specific keys into XenStore in the traditional
2080	 * way for a single queue, or in a queue subkeys for multiple
2081	 * queues.
2082	 */
2083	struct xenbus_device *dev = queue->info->xbdev;
2084	int err;
2085	const char *message;
2086	char *path;
2087	size_t pathsize;
2088
2089	/* Choose the correct place to write the keys */
2090	if (write_hierarchical) {
2091		pathsize = strlen(dev->nodename) + 10;
2092		path = kzalloc(pathsize, GFP_KERNEL);
2093		if (!path) {
2094			err = -ENOMEM;
2095			message = "out of memory while writing ring references";
2096			goto error;
2097		}
2098		snprintf(path, pathsize, "%s/queue-%u",
2099				dev->nodename, queue->id);
2100	} else {
2101		path = (char *)dev->nodename;
2102	}
2103
2104	/* Write ring references */
2105	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2106			queue->tx_ring_ref);
2107	if (err) {
2108		message = "writing tx-ring-ref";
2109		goto error;
2110	}
2111
2112	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2113			queue->rx_ring_ref);
2114	if (err) {
2115		message = "writing rx-ring-ref";
2116		goto error;
2117	}
2118
2119	/* Write event channels; taking into account both shared
2120	 * and split event channel scenarios.
2121	 */
2122	if (queue->tx_evtchn == queue->rx_evtchn) {
2123		/* Shared event channel */
2124		err = xenbus_printf(*xbt, path,
2125				"event-channel", "%u", queue->tx_evtchn);
2126		if (err) {
2127			message = "writing event-channel";
2128			goto error;
2129		}
2130	} else {
2131		/* Split event channels */
2132		err = xenbus_printf(*xbt, path,
2133				"event-channel-tx", "%u", queue->tx_evtchn);
2134		if (err) {
2135			message = "writing event-channel-tx";
2136			goto error;
2137		}
2138
2139		err = xenbus_printf(*xbt, path,
2140				"event-channel-rx", "%u", queue->rx_evtchn);
2141		if (err) {
2142			message = "writing event-channel-rx";
2143			goto error;
2144		}
2145	}
2146
2147	if (write_hierarchical)
2148		kfree(path);
2149	return 0;
2150
2151error:
2152	if (write_hierarchical)
2153		kfree(path);
2154	xenbus_dev_fatal(dev, err, "%s", message);
2155	return err;
2156}
2157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2158
2159
2160static int xennet_create_page_pool(struct netfront_queue *queue)
2161{
2162	int err;
2163	struct page_pool_params pp_params = {
2164		.order = 0,
2165		.flags = 0,
2166		.pool_size = NET_RX_RING_SIZE,
2167		.nid = NUMA_NO_NODE,
2168		.dev = &queue->info->netdev->dev,
2169		.offset = XDP_PACKET_HEADROOM,
2170		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2171	};
2172
2173	queue->page_pool = page_pool_create(&pp_params);
2174	if (IS_ERR(queue->page_pool)) {
2175		err = PTR_ERR(queue->page_pool);
2176		queue->page_pool = NULL;
2177		return err;
2178	}
2179
2180	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2181			       queue->id, 0);
2182	if (err) {
2183		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2184		goto err_free_pp;
2185	}
2186
2187	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2188					 MEM_TYPE_PAGE_POOL, queue->page_pool);
2189	if (err) {
2190		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2191		goto err_unregister_rxq;
2192	}
2193	return 0;
2194
2195err_unregister_rxq:
2196	xdp_rxq_info_unreg(&queue->xdp_rxq);
2197err_free_pp:
2198	page_pool_destroy(queue->page_pool);
2199	queue->page_pool = NULL;
2200	return err;
2201}
2202
2203static int xennet_create_queues(struct netfront_info *info,
2204				unsigned int *num_queues)
2205{
2206	unsigned int i;
2207	int ret;
2208
2209	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2210			       GFP_KERNEL);
2211	if (!info->queues)
2212		return -ENOMEM;
2213
2214	for (i = 0; i < *num_queues; i++) {
2215		struct netfront_queue *queue = &info->queues[i];
2216
2217		queue->id = i;
2218		queue->info = info;
2219
2220		ret = xennet_init_queue(queue);
2221		if (ret < 0) {
2222			dev_warn(&info->xbdev->dev,
2223				 "only created %d queues\n", i);
2224			*num_queues = i;
2225			break;
2226		}
2227
2228		/* use page pool recycling instead of buddy allocator */
2229		ret = xennet_create_page_pool(queue);
2230		if (ret < 0) {
2231			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2232			*num_queues = i;
2233			return ret;
2234		}
2235
2236		netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
 
2237		if (netif_running(info->netdev))
2238			napi_enable(&queue->napi);
2239	}
2240
2241	netif_set_real_num_tx_queues(info->netdev, *num_queues);
2242
2243	if (*num_queues == 0) {
2244		dev_err(&info->xbdev->dev, "no queues\n");
2245		return -EINVAL;
2246	}
2247	return 0;
2248}
2249
2250/* Common code used when first setting up, and when resuming. */
2251static int talk_to_netback(struct xenbus_device *dev,
2252			   struct netfront_info *info)
2253{
2254	const char *message;
2255	struct xenbus_transaction xbt;
2256	int err;
2257	unsigned int feature_split_evtchn;
2258	unsigned int i = 0;
2259	unsigned int max_queues = 0;
2260	struct netfront_queue *queue = NULL;
2261	unsigned int num_queues = 1;
2262	u8 addr[ETH_ALEN];
2263
2264	info->netdev->irq = 0;
2265
2266	/* Check if backend is trusted. */
2267	info->bounce = !xennet_trusted ||
2268		       !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2269
2270	/* Check if backend supports multiple queues */
2271	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2272					  "multi-queue-max-queues", 1);
2273	num_queues = min(max_queues, xennet_max_queues);
2274
2275	/* Check feature-split-event-channels */
2276	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2277					"feature-split-event-channels", 0);
2278
2279	/* Read mac addr. */
2280	err = xen_net_read_mac(dev, addr);
2281	if (err) {
2282		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2283		goto out_unlocked;
2284	}
2285	eth_hw_addr_set(info->netdev, addr);
2286
2287	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2288							      "feature-xdp-headroom", 0);
2289	if (info->netback_has_xdp_headroom) {
2290		/* set the current xen-netfront xdp state */
2291		err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2292					  NETBACK_XDP_HEADROOM_ENABLE :
2293					  NETBACK_XDP_HEADROOM_DISABLE);
2294		if (err)
2295			goto out_unlocked;
2296	}
2297
2298	rtnl_lock();
2299	if (info->queues)
2300		xennet_destroy_queues(info);
2301
2302	/* For the case of a reconnect reset the "broken" indicator. */
2303	info->broken = false;
2304
2305	err = xennet_create_queues(info, &num_queues);
2306	if (err < 0) {
2307		xenbus_dev_fatal(dev, err, "creating queues");
2308		kfree(info->queues);
2309		info->queues = NULL;
2310		goto out;
2311	}
2312	rtnl_unlock();
2313
2314	/* Create shared ring, alloc event channel -- for each queue */
2315	for (i = 0; i < num_queues; ++i) {
2316		queue = &info->queues[i];
2317		err = setup_netfront(dev, queue, feature_split_evtchn);
2318		if (err)
2319			goto destroy_ring;
2320	}
2321
2322again:
2323	err = xenbus_transaction_start(&xbt);
2324	if (err) {
2325		xenbus_dev_fatal(dev, err, "starting transaction");
2326		goto destroy_ring;
2327	}
2328
2329	if (xenbus_exists(XBT_NIL,
2330			  info->xbdev->otherend, "multi-queue-max-queues")) {
2331		/* Write the number of queues */
2332		err = xenbus_printf(xbt, dev->nodename,
2333				    "multi-queue-num-queues", "%u", num_queues);
2334		if (err) {
2335			message = "writing multi-queue-num-queues";
2336			goto abort_transaction_no_dev_fatal;
2337		}
2338	}
2339
2340	if (num_queues == 1) {
2341		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2342		if (err)
2343			goto abort_transaction_no_dev_fatal;
2344	} else {
2345		/* Write the keys for each queue */
2346		for (i = 0; i < num_queues; ++i) {
2347			queue = &info->queues[i];
2348			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2349			if (err)
2350				goto abort_transaction_no_dev_fatal;
2351		}
2352	}
2353
2354	/* The remaining keys are not queue-specific */
2355	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2356			    1);
2357	if (err) {
2358		message = "writing request-rx-copy";
2359		goto abort_transaction;
2360	}
2361
2362	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2363	if (err) {
2364		message = "writing feature-rx-notify";
2365		goto abort_transaction;
2366	}
2367
2368	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2369	if (err) {
2370		message = "writing feature-sg";
2371		goto abort_transaction;
2372	}
2373
2374	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2375	if (err) {
2376		message = "writing feature-gso-tcpv4";
2377		goto abort_transaction;
2378	}
2379
2380	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2381	if (err) {
2382		message = "writing feature-gso-tcpv6";
2383		goto abort_transaction;
2384	}
2385
2386	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2387			   "1");
2388	if (err) {
2389		message = "writing feature-ipv6-csum-offload";
2390		goto abort_transaction;
2391	}
2392
2393	err = xenbus_transaction_end(xbt, 0);
2394	if (err) {
2395		if (err == -EAGAIN)
2396			goto again;
2397		xenbus_dev_fatal(dev, err, "completing transaction");
2398		goto destroy_ring;
2399	}
2400
2401	return 0;
2402
2403 abort_transaction:
2404	xenbus_dev_fatal(dev, err, "%s", message);
2405abort_transaction_no_dev_fatal:
2406	xenbus_transaction_end(xbt, 1);
2407 destroy_ring:
2408	xennet_disconnect_backend(info);
2409	rtnl_lock();
2410	xennet_destroy_queues(info);
2411 out:
2412	rtnl_unlock();
2413out_unlocked:
2414	device_unregister(&dev->dev);
2415	return err;
2416}
2417
2418static int xennet_connect(struct net_device *dev)
2419{
2420	struct netfront_info *np = netdev_priv(dev);
2421	unsigned int num_queues = 0;
2422	int err;
2423	unsigned int j = 0;
2424	struct netfront_queue *queue = NULL;
2425
2426	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2427		dev_info(&dev->dev,
2428			 "backend does not support copying receive path\n");
2429		return -ENODEV;
2430	}
2431
2432	err = talk_to_netback(np->xbdev, np);
2433	if (err)
2434		return err;
2435	if (np->netback_has_xdp_headroom)
2436		pr_info("backend supports XDP headroom\n");
2437	if (np->bounce)
2438		dev_info(&np->xbdev->dev,
2439			 "bouncing transmitted data to zeroed pages\n");
2440
2441	/* talk_to_netback() sets the correct number of queues */
2442	num_queues = dev->real_num_tx_queues;
2443
2444	if (dev->reg_state == NETREG_UNINITIALIZED) {
2445		err = register_netdev(dev);
2446		if (err) {
2447			pr_warn("%s: register_netdev err=%d\n", __func__, err);
2448			device_unregister(&np->xbdev->dev);
2449			return err;
2450		}
2451	}
2452
2453	rtnl_lock();
2454	netdev_update_features(dev);
2455	rtnl_unlock();
2456
2457	/*
2458	 * All public and private state should now be sane.  Get
2459	 * ready to start sending and receiving packets and give the driver
2460	 * domain a kick because we've probably just requeued some
2461	 * packets.
2462	 */
2463	netif_tx_lock_bh(np->netdev);
2464	netif_device_attach(np->netdev);
2465	netif_tx_unlock_bh(np->netdev);
2466
2467	netif_carrier_on(np->netdev);
2468	for (j = 0; j < num_queues; ++j) {
2469		queue = &np->queues[j];
2470
2471		notify_remote_via_irq(queue->tx_irq);
2472		if (queue->tx_irq != queue->rx_irq)
2473			notify_remote_via_irq(queue->rx_irq);
2474
 
 
 
 
2475		spin_lock_bh(&queue->rx_lock);
2476		xennet_alloc_rx_buffers(queue);
2477		spin_unlock_bh(&queue->rx_lock);
2478	}
2479
2480	return 0;
2481}
2482
2483/*
2484 * Callback received when the backend's state changes.
2485 */
2486static void netback_changed(struct xenbus_device *dev,
2487			    enum xenbus_state backend_state)
2488{
2489	struct netfront_info *np = dev_get_drvdata(&dev->dev);
2490	struct net_device *netdev = np->netdev;
2491
2492	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2493
2494	wake_up_all(&module_wq);
2495
2496	switch (backend_state) {
2497	case XenbusStateInitialising:
2498	case XenbusStateInitialised:
2499	case XenbusStateReconfiguring:
2500	case XenbusStateReconfigured:
2501	case XenbusStateUnknown:
2502		break;
2503
2504	case XenbusStateInitWait:
2505		if (dev->state != XenbusStateInitialising)
2506			break;
2507		if (xennet_connect(netdev) != 0)
2508			break;
2509		xenbus_switch_state(dev, XenbusStateConnected);
2510		break;
2511
2512	case XenbusStateConnected:
2513		netdev_notify_peers(netdev);
2514		break;
2515
2516	case XenbusStateClosed:
2517		if (dev->state == XenbusStateClosed)
2518			break;
2519		fallthrough;	/* Missed the backend's CLOSING state */
2520	case XenbusStateClosing:
2521		xenbus_frontend_closed(dev);
2522		break;
2523	}
2524}
2525
2526static const struct xennet_stat {
2527	char name[ETH_GSTRING_LEN];
2528	u16 offset;
2529} xennet_stats[] = {
2530	{
2531		"rx_gso_checksum_fixup",
2532		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2533	},
2534};
2535
2536static int xennet_get_sset_count(struct net_device *dev, int string_set)
2537{
2538	switch (string_set) {
2539	case ETH_SS_STATS:
2540		return ARRAY_SIZE(xennet_stats);
2541	default:
2542		return -EINVAL;
2543	}
2544}
2545
2546static void xennet_get_ethtool_stats(struct net_device *dev,
2547				     struct ethtool_stats *stats, u64 * data)
2548{
2549	void *np = netdev_priv(dev);
2550	int i;
2551
2552	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2553		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2554}
2555
2556static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2557{
2558	int i;
2559
2560	switch (stringset) {
2561	case ETH_SS_STATS:
2562		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2563			memcpy(data + i * ETH_GSTRING_LEN,
2564			       xennet_stats[i].name, ETH_GSTRING_LEN);
2565		break;
2566	}
2567}
2568
2569static const struct ethtool_ops xennet_ethtool_ops =
2570{
2571	.get_link = ethtool_op_get_link,
2572
2573	.get_sset_count = xennet_get_sset_count,
2574	.get_ethtool_stats = xennet_get_ethtool_stats,
2575	.get_strings = xennet_get_strings,
2576	.get_ts_info = ethtool_op_get_ts_info,
2577};
2578
2579#ifdef CONFIG_SYSFS
2580static ssize_t show_rxbuf(struct device *dev,
2581			  struct device_attribute *attr, char *buf)
2582{
2583	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2584}
2585
2586static ssize_t store_rxbuf(struct device *dev,
2587			   struct device_attribute *attr,
2588			   const char *buf, size_t len)
2589{
2590	char *endp;
2591
2592	if (!capable(CAP_NET_ADMIN))
2593		return -EPERM;
2594
2595	simple_strtoul(buf, &endp, 0);
2596	if (endp == buf)
2597		return -EBADMSG;
2598
2599	/* rxbuf_min and rxbuf_max are no longer configurable. */
2600
2601	return len;
2602}
2603
2604static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2605static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2606static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2607
2608static struct attribute *xennet_dev_attrs[] = {
2609	&dev_attr_rxbuf_min.attr,
2610	&dev_attr_rxbuf_max.attr,
2611	&dev_attr_rxbuf_cur.attr,
2612	NULL
2613};
2614
2615static const struct attribute_group xennet_dev_group = {
2616	.attrs = xennet_dev_attrs
2617};
2618#endif /* CONFIG_SYSFS */
2619
2620static void xennet_bus_close(struct xenbus_device *dev)
2621{
2622	int ret;
2623
2624	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2625		return;
2626	do {
2627		xenbus_switch_state(dev, XenbusStateClosing);
2628		ret = wait_event_timeout(module_wq,
2629				   xenbus_read_driver_state(dev->otherend) ==
2630				   XenbusStateClosing ||
2631				   xenbus_read_driver_state(dev->otherend) ==
2632				   XenbusStateClosed ||
2633				   xenbus_read_driver_state(dev->otherend) ==
2634				   XenbusStateUnknown,
2635				   XENNET_TIMEOUT);
2636	} while (!ret);
2637
2638	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2639		return;
2640
2641	do {
2642		xenbus_switch_state(dev, XenbusStateClosed);
2643		ret = wait_event_timeout(module_wq,
2644				   xenbus_read_driver_state(dev->otherend) ==
2645				   XenbusStateClosed ||
2646				   xenbus_read_driver_state(dev->otherend) ==
2647				   XenbusStateUnknown,
2648				   XENNET_TIMEOUT);
2649	} while (!ret);
2650}
2651
2652static void xennet_remove(struct xenbus_device *dev)
2653{
2654	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2655
2656	xennet_bus_close(dev);
2657	xennet_disconnect_backend(info);
2658
2659	if (info->netdev->reg_state == NETREG_REGISTERED)
2660		unregister_netdev(info->netdev);
2661
2662	if (info->queues) {
2663		rtnl_lock();
2664		xennet_destroy_queues(info);
2665		rtnl_unlock();
2666	}
2667	xennet_free_netdev(info->netdev);
 
 
2668}
2669
2670static const struct xenbus_device_id netfront_ids[] = {
2671	{ "vif" },
2672	{ "" }
2673};
2674
2675static struct xenbus_driver netfront_driver = {
2676	.ids = netfront_ids,
2677	.probe = netfront_probe,
2678	.remove = xennet_remove,
2679	.resume = netfront_resume,
2680	.otherend_changed = netback_changed,
2681};
2682
2683static int __init netif_init(void)
2684{
2685	if (!xen_domain())
2686		return -ENODEV;
2687
2688	if (!xen_has_pv_nic_devices())
2689		return -ENODEV;
2690
2691	pr_info("Initialising Xen virtual ethernet driver\n");
2692
2693	/* Allow as many queues as there are CPUs inut max. 8 if user has not
2694	 * specified a value.
2695	 */
2696	if (xennet_max_queues == 0)
2697		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2698					  num_online_cpus());
2699
2700	return xenbus_register_frontend(&netfront_driver);
2701}
2702module_init(netif_init);
2703
2704
2705static void __exit netif_exit(void)
2706{
2707	xenbus_unregister_driver(&netfront_driver);
2708}
2709module_exit(netif_exit);
2710
2711MODULE_DESCRIPTION("Xen virtual network device frontend");
2712MODULE_LICENSE("GPL");
2713MODULE_ALIAS("xen:vif");
2714MODULE_ALIAS("xennet");