Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* A network driver using virtio.
   3 *
   4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6//#define DEBUG
   7#include <linux/netdevice.h>
   8#include <linux/etherdevice.h>
   9#include <linux/ethtool.h>
  10#include <linux/module.h>
  11#include <linux/virtio.h>
  12#include <linux/virtio_net.h>
  13#include <linux/bpf.h>
  14#include <linux/bpf_trace.h>
  15#include <linux/scatterlist.h>
  16#include <linux/if_vlan.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/average.h>
  20#include <linux/filter.h>
  21#include <linux/kernel.h>
  22#include <linux/dim.h>
  23#include <net/route.h>
  24#include <net/xdp.h>
  25#include <net/net_failover.h>
  26#include <net/netdev_rx_queue.h>
  27
  28static int napi_weight = NAPI_POLL_WEIGHT;
  29module_param(napi_weight, int, 0444);
  30
  31static bool csum = true, gso = true, napi_tx = true;
  32module_param(csum, bool, 0444);
  33module_param(gso, bool, 0444);
  34module_param(napi_tx, bool, 0644);
  35
  36/* FIXME: MTU in config. */
  37#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  38#define GOOD_COPY_LEN	128
  39
  40#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  41
  42/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
  43#define VIRTIO_XDP_HEADROOM 256
  44
  45/* Separating two types of XDP xmit */
  46#define VIRTIO_XDP_TX		BIT(0)
  47#define VIRTIO_XDP_REDIR	BIT(1)
  48
  49#define VIRTIO_XDP_FLAG	BIT(0)
  50
  51/* RX packet size EWMA. The average packet size is used to determine the packet
  52 * buffer size when refilling RX rings. As the entire RX ring may be refilled
  53 * at once, the weight is chosen so that the EWMA will be insensitive to short-
  54 * term, transient changes in packet size.
  55 */
  56DECLARE_EWMA(pkt_len, 0, 64)
  57
  58#define VIRTNET_DRIVER_VERSION "1.0.0"
  59
  60static const unsigned long guest_offloads[] = {
  61	VIRTIO_NET_F_GUEST_TSO4,
  62	VIRTIO_NET_F_GUEST_TSO6,
  63	VIRTIO_NET_F_GUEST_ECN,
  64	VIRTIO_NET_F_GUEST_UFO,
  65	VIRTIO_NET_F_GUEST_CSUM,
  66	VIRTIO_NET_F_GUEST_USO4,
  67	VIRTIO_NET_F_GUEST_USO6,
  68	VIRTIO_NET_F_GUEST_HDRLEN
  69};
  70
  71#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
  72				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
  73				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
  74				(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
  75				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
  76				(1ULL << VIRTIO_NET_F_GUEST_USO6))
  77
  78struct virtnet_stat_desc {
  79	char desc[ETH_GSTRING_LEN];
  80	size_t offset;
  81};
  82
  83struct virtnet_sq_stats {
  84	struct u64_stats_sync syncp;
  85	u64_stats_t packets;
  86	u64_stats_t bytes;
  87	u64_stats_t xdp_tx;
  88	u64_stats_t xdp_tx_drops;
  89	u64_stats_t kicks;
  90	u64_stats_t tx_timeouts;
  91};
  92
  93struct virtnet_rq_stats {
  94	struct u64_stats_sync syncp;
  95	u64_stats_t packets;
  96	u64_stats_t bytes;
  97	u64_stats_t drops;
  98	u64_stats_t xdp_packets;
  99	u64_stats_t xdp_tx;
 100	u64_stats_t xdp_redirects;
 101	u64_stats_t xdp_drops;
 102	u64_stats_t kicks;
 103};
 104
 105#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
 106#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
 107
 108static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 109	{ "packets",		VIRTNET_SQ_STAT(packets) },
 110	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
 111	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
 112	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
 113	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
 114	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
 115};
 116
 117static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 118	{ "packets",		VIRTNET_RQ_STAT(packets) },
 119	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
 120	{ "drops",		VIRTNET_RQ_STAT(drops) },
 121	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
 122	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
 123	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
 124	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
 125	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
 126};
 127
 128#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
 129#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
 130
 131struct virtnet_interrupt_coalesce {
 132	u32 max_packets;
 133	u32 max_usecs;
 134};
 
 135
 136/* The dma information of pages allocated at a time. */
 137struct virtnet_rq_dma {
 138	dma_addr_t addr;
 139	u32 ref;
 140	u16 len;
 141	u16 need_sync;
 142};
 143
 144/* Internal representation of a send virtqueue */
 145struct send_queue {
 146	/* Virtqueue associated with this send _queue */
 147	struct virtqueue *vq;
 148
 149	/* TX: fragments + linear part + virtio header */
 150	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 151
 152	/* Name of the send queue: output.$index */
 153	char name[16];
 154
 155	struct virtnet_sq_stats stats;
 156
 157	struct virtnet_interrupt_coalesce intr_coal;
 158
 159	struct napi_struct napi;
 160
 161	/* Record whether sq is in reset state. */
 162	bool reset;
 163};
 164
 165/* Internal representation of a receive virtqueue */
 166struct receive_queue {
 167	/* Virtqueue associated with this receive_queue */
 168	struct virtqueue *vq;
 169
 170	struct napi_struct napi;
 171
 172	struct bpf_prog __rcu *xdp_prog;
 173
 174	struct virtnet_rq_stats stats;
 175
 176	/* The number of rx notifications */
 177	u16 calls;
 178
 179	/* Is dynamic interrupt moderation enabled? */
 180	bool dim_enabled;
 181
 182	/* Dynamic Interrupt Moderation */
 183	struct dim dim;
 184
 185	u32 packets_in_napi;
 186
 187	struct virtnet_interrupt_coalesce intr_coal;
 188
 189	/* Chain pages by the private ptr. */
 190	struct page *pages;
 191
 192	/* Average packet length for mergeable receive buffers. */
 193	struct ewma_pkt_len mrg_avg_pkt_len;
 194
 195	/* Page frag for packet buffer allocation. */
 196	struct page_frag alloc_frag;
 197
 198	/* RX: fragments + linear part + virtio header */
 199	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 200
 201	/* Min single buffer size for mergeable buffers case. */
 202	unsigned int min_buf_len;
 203
 204	/* Name of this receive queue: input.$index */
 205	char name[16];
 206
 207	struct xdp_rxq_info xdp_rxq;
 208
 209	/* Record the last dma info to free after new pages is allocated. */
 210	struct virtnet_rq_dma *last_dma;
 211
 212	/* Do dma by self */
 213	bool do_dma;
 214};
 215
 216/* This structure can contain rss message with maximum settings for indirection table and keysize
 217 * Note, that default structure that describes RSS configuration virtio_net_rss_config
 218 * contains same info but can't handle table values.
 219 * In any case, structure would be passed to virtio hw through sg_buf split by parts
 220 * because table sizes may be differ according to the device configuration.
 221 */
 222#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
 223#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
 224struct virtio_net_ctrl_rss {
 225	u32 hash_types;
 226	u16 indirection_table_mask;
 227	u16 unclassified_queue;
 228	u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
 229	u16 max_tx_vq;
 230	u8 hash_key_length;
 231	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
 232};
 233
 234/* Control VQ buffers: protected by the rtnl lock */
 235struct control_buf {
 236	struct virtio_net_ctrl_hdr hdr;
 237	virtio_net_ctrl_ack status;
 238	struct virtio_net_ctrl_mq mq;
 239	u8 promisc;
 240	u8 allmulti;
 241	__virtio16 vid;
 242	__virtio64 offloads;
 243	struct virtio_net_ctrl_rss rss;
 244	struct virtio_net_ctrl_coal_tx coal_tx;
 245	struct virtio_net_ctrl_coal_rx coal_rx;
 246	struct virtio_net_ctrl_coal_vq coal_vq;
 247};
 248
 249struct virtnet_info {
 250	struct virtio_device *vdev;
 251	struct virtqueue *cvq;
 252	struct net_device *dev;
 253	struct send_queue *sq;
 254	struct receive_queue *rq;
 255	unsigned int status;
 256
 257	/* Max # of queue pairs supported by the device */
 258	u16 max_queue_pairs;
 259
 260	/* # of queue pairs currently used by the driver */
 261	u16 curr_queue_pairs;
 262
 263	/* # of XDP queue pairs currently used by the driver */
 264	u16 xdp_queue_pairs;
 265
 266	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
 267	bool xdp_enabled;
 268
 269	/* I like... big packets and I cannot lie! */
 270	bool big_packets;
 271
 272	/* number of sg entries allocated for big packets */
 273	unsigned int big_packets_num_skbfrags;
 274
 275	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 276	bool mergeable_rx_bufs;
 277
 278	/* Host supports rss and/or hash report */
 279	bool has_rss;
 280	bool has_rss_hash_report;
 281	u8 rss_key_size;
 282	u16 rss_indir_table_size;
 283	u32 rss_hash_types_supported;
 284	u32 rss_hash_types_saved;
 285
 286	/* Has control virtqueue */
 287	bool has_cvq;
 288
 289	/* Host can handle any s/g split between our header and packet data */
 290	bool any_header_sg;
 291
 292	/* Packet virtio header size */
 293	u8 hdr_len;
 294
 295	/* Work struct for delayed refilling if we run low on memory. */
 296	struct delayed_work refill;
 297
 298	/* Is delayed refill enabled? */
 299	bool refill_enabled;
 300
 301	/* The lock to synchronize the access to refill_enabled */
 302	spinlock_t refill_lock;
 303
 304	/* Work struct for config space updates */
 305	struct work_struct config_work;
 306
 307	/* Does the affinity hint is set for virtqueues? */
 308	bool affinity_hint_set;
 309
 310	/* CPU hotplug instances for online & dead */
 311	struct hlist_node node;
 312	struct hlist_node node_dead;
 313
 314	struct control_buf *ctrl;
 
 
 
 
 
 
 315
 316	/* Ethtool settings */
 317	u8 duplex;
 318	u32 speed;
 319
 320	/* Is rx dynamic interrupt moderation enabled? */
 321	bool rx_dim_enabled;
 322
 323	/* Interrupt coalescing settings */
 324	struct virtnet_interrupt_coalesce intr_coal_tx;
 325	struct virtnet_interrupt_coalesce intr_coal_rx;
 326
 327	unsigned long guest_offloads;
 328	unsigned long guest_offloads_capable;
 329
 330	/* failover when STANDBY feature enabled */
 331	struct failover *failover;
 332};
 333
 334struct padded_vnet_hdr {
 335	struct virtio_net_hdr_v1_hash hdr;
 336	/*
 337	 * hdr is in a separate sg buffer, and data sg buffer shares same page
 338	 * with this header sg. This padding makes next sg 16 byte aligned
 339	 * after the header.
 340	 */
 341	char padding[12];
 342};
 343
 344struct virtio_net_common_hdr {
 345	union {
 346		struct virtio_net_hdr hdr;
 347		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
 348		struct virtio_net_hdr_v1_hash hash_v1_hdr;
 349	};
 350};
 351
 352static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 353
 354static bool is_xdp_frame(void *ptr)
 355{
 356	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
 357}
 358
 359static void *xdp_to_ptr(struct xdp_frame *ptr)
 360{
 361	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
 362}
 363
 364static struct xdp_frame *ptr_to_xdp(void *ptr)
 365{
 366	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
 367}
 368
 369/* Converting between virtqueue no. and kernel tx/rx queue no.
 370 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 371 */
 372static int vq2txq(struct virtqueue *vq)
 373{
 374	return (vq->index - 1) / 2;
 375}
 376
 377static int txq2vq(int txq)
 378{
 379	return txq * 2 + 1;
 380}
 381
 382static int vq2rxq(struct virtqueue *vq)
 383{
 384	return vq->index / 2;
 385}
 386
 387static int rxq2vq(int rxq)
 388{
 389	return rxq * 2;
 390}
 391
 392static inline struct virtio_net_common_hdr *
 393skb_vnet_common_hdr(struct sk_buff *skb)
 394{
 395	return (struct virtio_net_common_hdr *)skb->cb;
 396}
 397
 398/*
 399 * private is used to chain pages for big packets, put the whole
 400 * most recent used list in the beginning for reuse
 401 */
 402static void give_pages(struct receive_queue *rq, struct page *page)
 403{
 404	struct page *end;
 405
 406	/* Find end of list, sew whole thing into vi->rq.pages. */
 407	for (end = page; end->private; end = (struct page *)end->private);
 408	end->private = (unsigned long)rq->pages;
 409	rq->pages = page;
 410}
 411
 412static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
 413{
 414	struct page *p = rq->pages;
 415
 416	if (p) {
 417		rq->pages = (struct page *)p->private;
 418		/* clear private here, it is used to chain pages */
 419		p->private = 0;
 420	} else
 421		p = alloc_page(gfp_mask);
 422	return p;
 423}
 424
 425static void virtnet_rq_free_buf(struct virtnet_info *vi,
 426				struct receive_queue *rq, void *buf)
 427{
 428	if (vi->mergeable_rx_bufs)
 429		put_page(virt_to_head_page(buf));
 430	else if (vi->big_packets)
 431		give_pages(rq, buf);
 432	else
 433		put_page(virt_to_head_page(buf));
 434}
 435
 436static void enable_delayed_refill(struct virtnet_info *vi)
 437{
 438	spin_lock_bh(&vi->refill_lock);
 439	vi->refill_enabled = true;
 440	spin_unlock_bh(&vi->refill_lock);
 441}
 442
 443static void disable_delayed_refill(struct virtnet_info *vi)
 444{
 445	spin_lock_bh(&vi->refill_lock);
 446	vi->refill_enabled = false;
 447	spin_unlock_bh(&vi->refill_lock);
 448}
 449
 450static void virtqueue_napi_schedule(struct napi_struct *napi,
 451				    struct virtqueue *vq)
 452{
 453	if (napi_schedule_prep(napi)) {
 454		virtqueue_disable_cb(vq);
 455		__napi_schedule(napi);
 456	}
 457}
 458
 459static bool virtqueue_napi_complete(struct napi_struct *napi,
 460				    struct virtqueue *vq, int processed)
 461{
 462	int opaque;
 463
 464	opaque = virtqueue_enable_cb_prepare(vq);
 465	if (napi_complete_done(napi, processed)) {
 466		if (unlikely(virtqueue_poll(vq, opaque)))
 467			virtqueue_napi_schedule(napi, vq);
 468		else
 469			return true;
 470	} else {
 471		virtqueue_disable_cb(vq);
 472	}
 473
 474	return false;
 475}
 476
 477static void skb_xmit_done(struct virtqueue *vq)
 478{
 479	struct virtnet_info *vi = vq->vdev->priv;
 480	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
 481
 482	/* Suppress further interrupts. */
 483	virtqueue_disable_cb(vq);
 484
 485	if (napi->weight)
 486		virtqueue_napi_schedule(napi, vq);
 487	else
 488		/* We were probably waiting for more output buffers. */
 489		netif_wake_subqueue(vi->dev, vq2txq(vq));
 490}
 491
 492#define MRG_CTX_HEADER_SHIFT 22
 493static void *mergeable_len_to_ctx(unsigned int truesize,
 494				  unsigned int headroom)
 495{
 496	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
 
 497}
 498
 499static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
 500{
 501	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
 502}
 503
 504static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 505{
 506	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
 507}
 508
 509static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
 510					 unsigned int headroom,
 511					 unsigned int len)
 512{
 513	struct sk_buff *skb;
 514
 515	skb = build_skb(buf, buflen);
 516	if (unlikely(!skb))
 517		return NULL;
 518
 519	skb_reserve(skb, headroom);
 520	skb_put(skb, len);
 521
 522	return skb;
 523}
 524
 525/* Called from bottom half context */
 526static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 527				   struct receive_queue *rq,
 528				   struct page *page, unsigned int offset,
 529				   unsigned int len, unsigned int truesize,
 530				   unsigned int headroom)
 531{
 532	struct sk_buff *skb;
 533	struct virtio_net_common_hdr *hdr;
 534	unsigned int copy, hdr_len, hdr_padded_len;
 535	struct page *page_to_free = NULL;
 536	int tailroom, shinfo_size;
 537	char *p, *hdr_p, *buf;
 538
 539	p = page_address(page) + offset;
 540	hdr_p = p;
 
 
 
 
 
 
 541
 542	hdr_len = vi->hdr_len;
 543	if (vi->mergeable_rx_bufs)
 544		hdr_padded_len = hdr_len;
 545	else
 546		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 547
 548	buf = p - headroom;
 
 549	len -= hdr_len;
 550	offset += hdr_padded_len;
 551	p += hdr_padded_len;
 552	tailroom = truesize - headroom  - hdr_padded_len - len;
 553
 554	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 555
 556	/* copy small packet so we can reuse these pages */
 557	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
 558		skb = virtnet_build_skb(buf, truesize, p - buf, len);
 559		if (unlikely(!skb))
 560			return NULL;
 561
 562		page = (struct page *)page->private;
 563		if (page)
 564			give_pages(rq, page);
 565		goto ok;
 566	}
 567
 568	/* copy small packet so we can reuse these pages for small data */
 569	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
 570	if (unlikely(!skb))
 571		return NULL;
 572
 573	/* Copy all frame if it fits skb->head, otherwise
 574	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
 575	 */
 576	if (len <= skb_tailroom(skb))
 577		copy = len;
 578	else
 579		copy = ETH_HLEN;
 580	skb_put_data(skb, p, copy);
 581
 582	len -= copy;
 583	offset += copy;
 584
 585	if (vi->mergeable_rx_bufs) {
 586		if (len)
 587			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
 588		else
 589			page_to_free = page;
 590		goto ok;
 591	}
 592
 593	/*
 594	 * Verify that we can indeed put this data into a skb.
 595	 * This is here to handle cases when the device erroneously
 596	 * tries to receive more than is possible. This is usually
 597	 * the case of a broken device.
 598	 */
 599	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
 600		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
 601		dev_kfree_skb(skb);
 602		return NULL;
 603	}
 604	BUG_ON(offset >= PAGE_SIZE);
 605	while (len) {
 606		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
 607		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
 608				frag_size, truesize);
 609		len -= frag_size;
 610		page = (struct page *)page->private;
 611		offset = 0;
 612	}
 613
 614	if (page)
 615		give_pages(rq, page);
 616
 617ok:
 618	hdr = skb_vnet_common_hdr(skb);
 619	memcpy(hdr, hdr_p, hdr_len);
 620	if (page_to_free)
 621		put_page(page_to_free);
 622
 623	return skb;
 624}
 625
 626static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
 627{
 628	struct page *page = virt_to_head_page(buf);
 629	struct virtnet_rq_dma *dma;
 630	void *head;
 631	int offset;
 632
 633	head = page_address(page);
 634
 635	dma = head;
 636
 637	--dma->ref;
 638
 639	if (dma->need_sync && len) {
 640		offset = buf - (head + sizeof(*dma));
 641
 642		virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
 643							offset, len,
 644							DMA_FROM_DEVICE);
 645	}
 646
 647	if (dma->ref)
 648		return;
 649
 650	virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
 651					 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 652	put_page(page);
 653}
 654
 655static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
 656{
 657	void *buf;
 658
 659	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
 660	if (buf && rq->do_dma)
 661		virtnet_rq_unmap(rq, buf, *len);
 662
 663	return buf;
 664}
 665
 666static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 667{
 668	struct virtnet_rq_dma *dma;
 669	dma_addr_t addr;
 670	u32 offset;
 671	void *head;
 672
 673	if (!rq->do_dma) {
 674		sg_init_one(rq->sg, buf, len);
 675		return;
 676	}
 677
 678	head = page_address(rq->alloc_frag.page);
 679
 680	offset = buf - head;
 681
 682	dma = head;
 683
 684	addr = dma->addr - sizeof(*dma) + offset;
 685
 686	sg_init_table(rq->sg, 1);
 687	rq->sg[0].dma_address = addr;
 688	rq->sg[0].length = len;
 689}
 690
 691static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
 692{
 693	struct page_frag *alloc_frag = &rq->alloc_frag;
 694	struct virtnet_rq_dma *dma;
 695	void *buf, *head;
 696	dma_addr_t addr;
 697
 698	if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
 699		return NULL;
 700
 701	head = page_address(alloc_frag->page);
 702
 703	if (rq->do_dma) {
 704		dma = head;
 705
 706		/* new pages */
 707		if (!alloc_frag->offset) {
 708			if (rq->last_dma) {
 709				/* Now, the new page is allocated, the last dma
 710				 * will not be used. So the dma can be unmapped
 711				 * if the ref is 0.
 712				 */
 713				virtnet_rq_unmap(rq, rq->last_dma, 0);
 714				rq->last_dma = NULL;
 715			}
 716
 717			dma->len = alloc_frag->size - sizeof(*dma);
 718
 719			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
 720							      dma->len, DMA_FROM_DEVICE, 0);
 721			if (virtqueue_dma_mapping_error(rq->vq, addr))
 722				return NULL;
 723
 724			dma->addr = addr;
 725			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
 726
 727			/* Add a reference to dma to prevent the entire dma from
 728			 * being released during error handling. This reference
 729			 * will be freed after the pages are no longer used.
 730			 */
 731			get_page(alloc_frag->page);
 732			dma->ref = 1;
 733			alloc_frag->offset = sizeof(*dma);
 734
 735			rq->last_dma = dma;
 736		}
 737
 738		++dma->ref;
 739	}
 740
 741	buf = head + alloc_frag->offset;
 742
 743	get_page(alloc_frag->page);
 744	alloc_frag->offset += size;
 745
 746	return buf;
 747}
 748
 749static void virtnet_rq_set_premapped(struct virtnet_info *vi)
 750{
 751	int i;
 752
 753	/* disable for big mode */
 754	if (!vi->mergeable_rx_bufs && vi->big_packets)
 755		return;
 756
 757	for (i = 0; i < vi->max_queue_pairs; i++) {
 758		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
 759			continue;
 760
 761		vi->rq[i].do_dma = true;
 762	}
 763}
 764
 765static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
 766{
 767	struct virtnet_info *vi = vq->vdev->priv;
 768	struct receive_queue *rq;
 769	int i = vq2rxq(vq);
 770
 771	rq = &vi->rq[i];
 772
 773	if (rq->do_dma)
 774		virtnet_rq_unmap(rq, buf, 0);
 775
 776	virtnet_rq_free_buf(vi, rq, buf);
 777}
 778
 779static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 780{
 781	unsigned int len;
 782	unsigned int packets = 0;
 783	unsigned int bytes = 0;
 784	void *ptr;
 785
 786	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 787		if (likely(!is_xdp_frame(ptr))) {
 788			struct sk_buff *skb = ptr;
 789
 790			pr_debug("Sent skb %p\n", skb);
 791
 792			bytes += skb->len;
 793			napi_consume_skb(skb, in_napi);
 794		} else {
 795			struct xdp_frame *frame = ptr_to_xdp(ptr);
 796
 797			bytes += xdp_get_frame_len(frame);
 798			xdp_return_frame(frame);
 799		}
 800		packets++;
 801	}
 802
 803	/* Avoid overhead when no packets have been processed
 804	 * happens when called speculatively from start_xmit.
 805	 */
 806	if (!packets)
 807		return;
 808
 809	u64_stats_update_begin(&sq->stats.syncp);
 810	u64_stats_add(&sq->stats.bytes, bytes);
 811	u64_stats_add(&sq->stats.packets, packets);
 812	u64_stats_update_end(&sq->stats.syncp);
 813}
 814
 815static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
 816{
 817	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
 818		return false;
 819	else if (q < vi->curr_queue_pairs)
 820		return true;
 821	else
 822		return false;
 823}
 824
 825static void check_sq_full_and_disable(struct virtnet_info *vi,
 826				      struct net_device *dev,
 827				      struct send_queue *sq)
 828{
 829	bool use_napi = sq->napi.weight;
 830	int qnum;
 831
 832	qnum = sq - vi->sq;
 833
 834	/* If running out of space, stop queue to avoid getting packets that we
 835	 * are then unable to transmit.
 836	 * An alternative would be to force queuing layer to requeue the skb by
 837	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
 838	 * returned in a normal path of operation: it means that driver is not
 839	 * maintaining the TX queue stop/start state properly, and causes
 840	 * the stack to do a non-trivial amount of useless work.
 841	 * Since most packets only take 1 or 2 ring slots, stopping the queue
 842	 * early means 16 slots are typically wasted.
 843	 */
 844	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 845		netif_stop_subqueue(dev, qnum);
 846		if (use_napi) {
 847			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
 848				virtqueue_napi_schedule(&sq->napi, sq->vq);
 849		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 850			/* More just got used, free them then recheck. */
 851			free_old_xmit_skbs(sq, false);
 852			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 853				netif_start_subqueue(dev, qnum);
 854				virtqueue_disable_cb(sq->vq);
 855			}
 856		}
 857	}
 858}
 859
 860static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 861				   struct send_queue *sq,
 862				   struct xdp_frame *xdpf)
 863{
 864	struct virtio_net_hdr_mrg_rxbuf *hdr;
 865	struct skb_shared_info *shinfo;
 866	u8 nr_frags = 0;
 867	int err, i;
 868
 869	if (unlikely(xdpf->headroom < vi->hdr_len))
 870		return -EOVERFLOW;
 871
 872	if (unlikely(xdp_frame_has_frags(xdpf))) {
 873		shinfo = xdp_get_shared_info_from_frame(xdpf);
 874		nr_frags = shinfo->nr_frags;
 875	}
 876
 877	/* In wrapping function virtnet_xdp_xmit(), we need to free
 878	 * up the pending old buffers, where we need to calculate the
 879	 * position of skb_shared_info in xdp_get_frame_len() and
 880	 * xdp_return_frame(), which will involve to xdpf->data and
 881	 * xdpf->headroom. Therefore, we need to update the value of
 882	 * headroom synchronously here.
 883	 */
 884	xdpf->headroom -= vi->hdr_len;
 885	xdpf->data -= vi->hdr_len;
 886	/* Zero header and leave csum up to XDP layers */
 887	hdr = xdpf->data;
 888	memset(hdr, 0, vi->hdr_len);
 889	xdpf->len   += vi->hdr_len;
 890
 891	sg_init_table(sq->sg, nr_frags + 1);
 892	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
 893	for (i = 0; i < nr_frags; i++) {
 894		skb_frag_t *frag = &shinfo->frags[i];
 895
 896		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
 897			    skb_frag_size(frag), skb_frag_off(frag));
 898	}
 899
 900	err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
 901				   xdp_to_ptr(xdpf), GFP_ATOMIC);
 902	if (unlikely(err))
 903		return -ENOSPC; /* Caller handle free/refcnt */
 904
 905	return 0;
 906}
 907
 908/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
 909 * the current cpu, so it does not need to be locked.
 910 *
 911 * Here we use marco instead of inline functions because we have to deal with
 912 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
 913 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
 914 * functions to perfectly solve these three problems at the same time.
 915 */
 916#define virtnet_xdp_get_sq(vi) ({                                       \
 917	int cpu = smp_processor_id();                                   \
 918	struct netdev_queue *txq;                                       \
 919	typeof(vi) v = (vi);                                            \
 920	unsigned int qp;                                                \
 921									\
 922	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
 923		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
 924		qp += cpu;                                              \
 925		txq = netdev_get_tx_queue(v->dev, qp);                  \
 926		__netif_tx_acquire(txq);                                \
 927	} else {                                                        \
 928		qp = cpu % v->curr_queue_pairs;                         \
 929		txq = netdev_get_tx_queue(v->dev, qp);                  \
 930		__netif_tx_lock(txq, cpu);                              \
 931	}                                                               \
 932	v->sq + qp;                                                     \
 933})
 934
 935#define virtnet_xdp_put_sq(vi, q) {                                     \
 936	struct netdev_queue *txq;                                       \
 937	typeof(vi) v = (vi);                                            \
 938									\
 939	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
 940	if (v->curr_queue_pairs > nr_cpu_ids)                           \
 941		__netif_tx_release(txq);                                \
 942	else                                                            \
 943		__netif_tx_unlock(txq);                                 \
 944}
 945
 946static int virtnet_xdp_xmit(struct net_device *dev,
 947			    int n, struct xdp_frame **frames, u32 flags)
 948{
 949	struct virtnet_info *vi = netdev_priv(dev);
 950	struct receive_queue *rq = vi->rq;
 951	struct bpf_prog *xdp_prog;
 952	struct send_queue *sq;
 953	unsigned int len;
 954	int packets = 0;
 955	int bytes = 0;
 956	int nxmit = 0;
 957	int kicks = 0;
 958	void *ptr;
 959	int ret;
 960	int i;
 961
 962	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
 963	 * indicate XDP resources have been successfully allocated.
 964	 */
 965	xdp_prog = rcu_access_pointer(rq->xdp_prog);
 966	if (!xdp_prog)
 967		return -ENXIO;
 968
 969	sq = virtnet_xdp_get_sq(vi);
 970
 971	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
 972		ret = -EINVAL;
 973		goto out;
 974	}
 975
 976	/* Free up any pending old buffers before queueing new ones. */
 977	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 978		if (likely(is_xdp_frame(ptr))) {
 979			struct xdp_frame *frame = ptr_to_xdp(ptr);
 980
 981			bytes += xdp_get_frame_len(frame);
 982			xdp_return_frame(frame);
 983		} else {
 984			struct sk_buff *skb = ptr;
 985
 986			bytes += skb->len;
 987			napi_consume_skb(skb, false);
 988		}
 989		packets++;
 990	}
 991
 992	for (i = 0; i < n; i++) {
 993		struct xdp_frame *xdpf = frames[i];
 994
 995		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
 996			break;
 997		nxmit++;
 
 
 
 
 
 
 
 
 
 
 
 
 998	}
 999	ret = nxmit;
1000
1001	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1002		check_sq_full_and_disable(vi, dev, sq);
 
1003
1004	if (flags & XDP_XMIT_FLUSH) {
1005		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1006			kicks = 1;
 
1007	}
1008out:
1009	u64_stats_update_begin(&sq->stats.syncp);
1010	u64_stats_add(&sq->stats.bytes, bytes);
1011	u64_stats_add(&sq->stats.packets, packets);
1012	u64_stats_add(&sq->stats.xdp_tx, n);
1013	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1014	u64_stats_add(&sq->stats.kicks, kicks);
1015	u64_stats_update_end(&sq->stats.syncp);
1016
1017	virtnet_xdp_put_sq(vi, sq);
1018	return ret;
1019}
1020
1021static void put_xdp_frags(struct xdp_buff *xdp)
1022{
1023	struct skb_shared_info *shinfo;
1024	struct page *xdp_page;
1025	int i;
1026
1027	if (xdp_buff_has_frags(xdp)) {
1028		shinfo = xdp_get_shared_info_from_buff(xdp);
1029		for (i = 0; i < shinfo->nr_frags; i++) {
1030			xdp_page = skb_frag_page(&shinfo->frags[i]);
1031			put_page(xdp_page);
1032		}
1033	}
1034}
1035
1036static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1037			       struct net_device *dev,
1038			       unsigned int *xdp_xmit,
1039			       struct virtnet_rq_stats *stats)
1040{
1041	struct xdp_frame *xdpf;
1042	int err;
 
 
1043	u32 act;
1044
1045	act = bpf_prog_run_xdp(xdp_prog, xdp);
1046	u64_stats_inc(&stats->xdp_packets);
 
 
 
 
 
 
 
 
 
 
1047
 
1048	switch (act) {
1049	case XDP_PASS:
1050		return act;
1051
1052	case XDP_TX:
1053		u64_stats_inc(&stats->xdp_tx);
1054		xdpf = xdp_convert_buff_to_frame(xdp);
1055		if (unlikely(!xdpf)) {
1056			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1057			return XDP_DROP;
1058		}
1059
1060		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1061		if (unlikely(!err)) {
1062			xdp_return_frame_rx_napi(xdpf);
1063		} else if (unlikely(err < 0)) {
1064			trace_xdp_exception(dev, xdp_prog, act);
1065			return XDP_DROP;
1066		}
1067		*xdp_xmit |= VIRTIO_XDP_TX;
1068		return act;
1069
1070	case XDP_REDIRECT:
1071		u64_stats_inc(&stats->xdp_redirects);
1072		err = xdp_do_redirect(dev, xdp, xdp_prog);
1073		if (err)
1074			return XDP_DROP;
1075
1076		*xdp_xmit |= VIRTIO_XDP_REDIR;
1077		return act;
1078
1079	default:
1080		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1081		fallthrough;
1082	case XDP_ABORTED:
1083		trace_xdp_exception(dev, xdp_prog, act);
1084		fallthrough;
1085	case XDP_DROP:
1086		return XDP_DROP;
1087	}
1088}
1089
1090static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1091{
1092	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1093}
1094
1095/* We copy the packet for XDP in the following cases:
1096 *
1097 * 1) Packet is scattered across multiple rx buffers.
1098 * 2) Headroom space is insufficient.
1099 *
1100 * This is inefficient but it's a temporary condition that
1101 * we hit right after XDP is enabled and until queue is refilled
1102 * with large buffers with sufficient headroom - so it should affect
1103 * at most queue size packets.
1104 * Afterwards, the conditions to enable
1105 * XDP should preclude the underlying device from sending packets
1106 * across multiple buffers (num_buf > 1), and we make sure buffers
1107 * have enough headroom.
1108 */
1109static struct page *xdp_linearize_page(struct receive_queue *rq,
1110				       int *num_buf,
1111				       struct page *p,
1112				       int offset,
1113				       int page_off,
1114				       unsigned int *len)
1115{
1116	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1117	struct page *page;
1118
1119	if (page_off + *len + tailroom > PAGE_SIZE)
1120		return NULL;
1121
1122	page = alloc_page(GFP_ATOMIC);
1123	if (!page)
1124		return NULL;
1125
1126	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1127	page_off += *len;
1128
1129	while (--*num_buf) {
1130		unsigned int buflen;
1131		void *buf;
1132		int off;
1133
1134		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1135		if (unlikely(!buf))
1136			goto err_buf;
1137
1138		p = virt_to_head_page(buf);
1139		off = buf - page_address(p);
1140
1141		/* guard against a misconfigured or uncooperative backend that
1142		 * is sending packet larger than the MTU.
1143		 */
1144		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1145			put_page(p);
1146			goto err_buf;
1147		}
1148
1149		memcpy(page_address(page) + page_off,
1150		       page_address(p) + off, buflen);
1151		page_off += buflen;
1152		put_page(p);
1153	}
1154
1155	/* Headroom does not contribute to packet length */
1156	*len = page_off - VIRTIO_XDP_HEADROOM;
1157	return page;
1158err_buf:
1159	__free_pages(page, 0);
1160	return NULL;
1161}
1162
1163static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1164					       unsigned int xdp_headroom,
1165					       void *buf,
1166					       unsigned int len)
1167{
1168	unsigned int header_offset;
1169	unsigned int headroom;
1170	unsigned int buflen;
1171	struct sk_buff *skb;
1172
1173	header_offset = VIRTNET_RX_PAD + xdp_headroom;
1174	headroom = vi->hdr_len + header_offset;
1175	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1176		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1177
1178	skb = virtnet_build_skb(buf, buflen, headroom, len);
1179	if (unlikely(!skb))
1180		return NULL;
1181
1182	buf += header_offset;
1183	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1184
1185	return skb;
1186}
1187
1188static struct sk_buff *receive_small_xdp(struct net_device *dev,
1189					 struct virtnet_info *vi,
1190					 struct receive_queue *rq,
1191					 struct bpf_prog *xdp_prog,
1192					 void *buf,
1193					 unsigned int xdp_headroom,
1194					 unsigned int len,
1195					 unsigned int *xdp_xmit,
1196					 struct virtnet_rq_stats *stats)
1197{
1198	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1199	unsigned int headroom = vi->hdr_len + header_offset;
1200	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1201	struct page *page = virt_to_head_page(buf);
1202	struct page *xdp_page;
1203	unsigned int buflen;
1204	struct xdp_buff xdp;
1205	struct sk_buff *skb;
1206	unsigned int metasize = 0;
1207	u32 act;
1208
1209	if (unlikely(hdr->hdr.gso_type))
1210		goto err_xdp;
1211
1212	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1213		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1214
1215	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1216		int offset = buf - page_address(page) + header_offset;
1217		unsigned int tlen = len + vi->hdr_len;
1218		int num_buf = 1;
1219
1220		xdp_headroom = virtnet_get_headroom(vi);
1221		header_offset = VIRTNET_RX_PAD + xdp_headroom;
1222		headroom = vi->hdr_len + header_offset;
1223		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1224			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1225		xdp_page = xdp_linearize_page(rq, &num_buf, page,
1226					      offset, header_offset,
1227					      &tlen);
1228		if (!xdp_page)
1229			goto err_xdp;
1230
1231		buf = page_address(xdp_page);
1232		put_page(page);
1233		page = xdp_page;
1234	}
1235
1236	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1237	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1238			 xdp_headroom, len, true);
1239
1240	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1241
1242	switch (act) {
1243	case XDP_PASS:
1244		/* Recalculate length in case bpf program changed it */
1245		len = xdp.data_end - xdp.data;
1246		metasize = xdp.data - xdp.data_meta;
1247		break;
1248
1249	case XDP_TX:
1250	case XDP_REDIRECT:
1251		goto xdp_xmit;
1252
1253	default:
1254		goto err_xdp;
1255	}
1256
1257	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1258	if (unlikely(!skb))
1259		goto err;
1260
1261	if (metasize)
1262		skb_metadata_set(skb, metasize);
1263
1264	return skb;
1265
1266err_xdp:
1267	u64_stats_inc(&stats->xdp_drops);
1268err:
1269	u64_stats_inc(&stats->drops);
1270	put_page(page);
1271xdp_xmit:
1272	return NULL;
1273}
1274
1275static struct sk_buff *receive_small(struct net_device *dev,
1276				     struct virtnet_info *vi,
1277				     struct receive_queue *rq,
1278				     void *buf, void *ctx,
1279				     unsigned int len,
1280				     unsigned int *xdp_xmit,
1281				     struct virtnet_rq_stats *stats)
1282{
1283	unsigned int xdp_headroom = (unsigned long)ctx;
1284	struct page *page = virt_to_head_page(buf);
1285	struct sk_buff *skb;
1286
1287	len -= vi->hdr_len;
1288	u64_stats_add(&stats->bytes, len);
1289
1290	if (unlikely(len > GOOD_PACKET_LEN)) {
1291		pr_debug("%s: rx error: len %u exceeds max size %d\n",
1292			 dev->name, len, GOOD_PACKET_LEN);
1293		DEV_STATS_INC(dev, rx_length_errors);
1294		goto err;
1295	}
1296
1297	if (unlikely(vi->xdp_enabled)) {
1298		struct bpf_prog *xdp_prog;
 
 
 
1299
1300		rcu_read_lock();
1301		xdp_prog = rcu_dereference(rq->xdp_prog);
1302		if (xdp_prog) {
1303			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1304						xdp_headroom, len, xdp_xmit,
1305						stats);
 
1306			rcu_read_unlock();
1307			return skb;
 
 
 
1308		}
1309		rcu_read_unlock();
1310	}
 
1311
1312	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1313	if (likely(skb))
1314		return skb;
1315
1316err:
1317	u64_stats_inc(&stats->drops);
1318	put_page(page);
 
 
1319	return NULL;
1320}
1321
1322static struct sk_buff *receive_big(struct net_device *dev,
1323				   struct virtnet_info *vi,
1324				   struct receive_queue *rq,
1325				   void *buf,
1326				   unsigned int len,
1327				   struct virtnet_rq_stats *stats)
1328{
1329	struct page *page = buf;
1330	struct sk_buff *skb =
1331		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1332
1333	u64_stats_add(&stats->bytes, len - vi->hdr_len);
1334	if (unlikely(!skb))
1335		goto err;
1336
1337	return skb;
1338
1339err:
1340	u64_stats_inc(&stats->drops);
1341	give_pages(rq, page);
1342	return NULL;
1343}
1344
1345static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1346			       struct net_device *dev,
1347			       struct virtnet_rq_stats *stats)
1348{
1349	struct page *page;
1350	void *buf;
1351	int len;
1352
1353	while (num_buf-- > 1) {
1354		buf = virtnet_rq_get_buf(rq, &len, NULL);
1355		if (unlikely(!buf)) {
1356			pr_debug("%s: rx error: %d buffers missing\n",
1357				 dev->name, num_buf);
1358			DEV_STATS_INC(dev, rx_length_errors);
1359			break;
1360		}
1361		u64_stats_add(&stats->bytes, len);
1362		page = virt_to_head_page(buf);
1363		put_page(page);
1364	}
1365}
1366
1367/* Why not use xdp_build_skb_from_frame() ?
1368 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1369 * virtio-net there are 2 points that do not match its requirements:
1370 *  1. The size of the prefilled buffer is not fixed before xdp is set.
1371 *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1372 *     like eth_type_trans() (which virtio-net does in receive_buf()).
1373 */
1374static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1375					       struct virtnet_info *vi,
1376					       struct xdp_buff *xdp,
1377					       unsigned int xdp_frags_truesz)
 
1378{
1379	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1380	unsigned int headroom, data_len;
1381	struct sk_buff *skb;
1382	int metasize;
1383	u8 nr_frags;
1384
1385	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1386		pr_debug("Error building skb as missing reserved tailroom for xdp");
1387		return NULL;
1388	}
1389
1390	if (unlikely(xdp_buff_has_frags(xdp)))
1391		nr_frags = sinfo->nr_frags;
1392
1393	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1394	if (unlikely(!skb))
1395		return NULL;
1396
1397	headroom = xdp->data - xdp->data_hard_start;
1398	data_len = xdp->data_end - xdp->data;
1399	skb_reserve(skb, headroom);
1400	__skb_put(skb, data_len);
1401
1402	metasize = xdp->data - xdp->data_meta;
1403	metasize = metasize > 0 ? metasize : 0;
1404	if (metasize)
1405		skb_metadata_set(skb, metasize);
1406
1407	if (unlikely(xdp_buff_has_frags(xdp)))
1408		xdp_update_skb_shared_info(skb, nr_frags,
1409					   sinfo->xdp_frags_size,
1410					   xdp_frags_truesz,
1411					   xdp_buff_is_frag_pfmemalloc(xdp));
1412
1413	return skb;
1414}
 
 
 
1415
1416/* TODO: build xdp in big mode */
1417static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1418				      struct virtnet_info *vi,
1419				      struct receive_queue *rq,
1420				      struct xdp_buff *xdp,
1421				      void *buf,
1422				      unsigned int len,
1423				      unsigned int frame_sz,
1424				      int *num_buf,
1425				      unsigned int *xdp_frags_truesize,
1426				      struct virtnet_rq_stats *stats)
1427{
1428	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1429	unsigned int headroom, tailroom, room;
1430	unsigned int truesize, cur_frag_size;
1431	struct skb_shared_info *shinfo;
1432	unsigned int xdp_frags_truesz = 0;
1433	struct page *page;
1434	skb_frag_t *frag;
1435	int offset;
1436	void *ctx;
1437
1438	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1439	xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1440			 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1441
1442	if (!*num_buf)
1443		return 0;
 
1444
1445	if (*num_buf > 1) {
1446		/* If we want to build multi-buffer xdp, we need
1447		 * to specify that the flags of xdp_buff have the
1448		 * XDP_FLAGS_HAS_FRAG bit.
1449		 */
1450		if (!xdp_buff_has_frags(xdp))
1451			xdp_buff_set_frags_flag(xdp);
1452
1453		shinfo = xdp_get_shared_info_from_buff(xdp);
1454		shinfo->nr_frags = 0;
1455		shinfo->xdp_frags_size = 0;
1456	}
1457
1458	if (*num_buf > MAX_SKB_FRAGS + 1)
1459		return -EINVAL;
1460
1461	while (--*num_buf > 0) {
1462		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1463		if (unlikely(!buf)) {
1464			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1465				 dev->name, *num_buf,
1466				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1467			DEV_STATS_INC(dev, rx_length_errors);
1468			goto err;
1469		}
1470
1471		u64_stats_add(&stats->bytes, len);
1472		page = virt_to_head_page(buf);
1473		offset = buf - page_address(page);
1474
1475		truesize = mergeable_ctx_to_truesize(ctx);
1476		headroom = mergeable_ctx_to_headroom(ctx);
1477		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1478		room = SKB_DATA_ALIGN(headroom + tailroom);
1479
1480		cur_frag_size = truesize;
1481		xdp_frags_truesz += cur_frag_size;
1482		if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1483			put_page(page);
1484			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1485				 dev->name, len, (unsigned long)(truesize - room));
1486			DEV_STATS_INC(dev, rx_length_errors);
1487			goto err;
1488		}
1489
1490		frag = &shinfo->frags[shinfo->nr_frags++];
1491		skb_frag_fill_page_desc(frag, page, offset, len);
1492		if (page_is_pfmemalloc(page))
1493			xdp_buff_set_frag_pfmemalloc(xdp);
1494
1495		shinfo->xdp_frags_size += len;
1496	}
1497
1498	*xdp_frags_truesize = xdp_frags_truesz;
1499	return 0;
1500
1501err:
1502	put_xdp_frags(xdp);
1503	return -EINVAL;
1504}
1505
1506static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1507				   struct receive_queue *rq,
1508				   struct bpf_prog *xdp_prog,
1509				   void *ctx,
1510				   unsigned int *frame_sz,
1511				   int *num_buf,
1512				   struct page **page,
1513				   int offset,
1514				   unsigned int *len,
1515				   struct virtio_net_hdr_mrg_rxbuf *hdr)
1516{
1517	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1518	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1519	struct page *xdp_page;
1520	unsigned int xdp_room;
1521
1522	/* Transient failure which in theory could occur if
1523	 * in-flight packets from before XDP was enabled reach
1524	 * the receive path after XDP is loaded.
1525	 */
1526	if (unlikely(hdr->hdr.gso_type))
1527		return NULL;
1528
1529	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1530	 * with headroom may add hole in truesize, which
1531	 * make their length exceed PAGE_SIZE. So we disabled the
1532	 * hole mechanism for xdp. See add_recvbuf_mergeable().
1533	 */
1534	*frame_sz = truesize;
1535
1536	if (likely(headroom >= virtnet_get_headroom(vi) &&
1537		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1538		return page_address(*page) + offset;
1539	}
1540
1541	/* This happens when headroom is not enough because
1542	 * of the buffer was prefilled before XDP is set.
1543	 * This should only happen for the first several packets.
1544	 * In fact, vq reset can be used here to help us clean up
1545	 * the prefilled buffers, but many existing devices do not
1546	 * support it, and we don't want to bother users who are
1547	 * using xdp normally.
1548	 */
1549	if (!xdp_prog->aux->xdp_has_frags) {
1550		/* linearize data for XDP */
1551		xdp_page = xdp_linearize_page(rq, num_buf,
1552					      *page, offset,
1553					      VIRTIO_XDP_HEADROOM,
1554					      len);
1555		if (!xdp_page)
1556			return NULL;
1557	} else {
1558		xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1559					  sizeof(struct skb_shared_info));
1560		if (*len + xdp_room > PAGE_SIZE)
1561			return NULL;
1562
1563		xdp_page = alloc_page(GFP_ATOMIC);
1564		if (!xdp_page)
1565			return NULL;
1566
1567		memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1568		       page_address(*page) + offset, *len);
1569	}
1570
1571	*frame_sz = PAGE_SIZE;
1572
1573	put_page(*page);
1574
1575	*page = xdp_page;
1576
1577	return page_address(*page) + VIRTIO_XDP_HEADROOM;
1578}
1579
1580static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1581					     struct virtnet_info *vi,
1582					     struct receive_queue *rq,
1583					     struct bpf_prog *xdp_prog,
1584					     void *buf,
1585					     void *ctx,
1586					     unsigned int len,
1587					     unsigned int *xdp_xmit,
1588					     struct virtnet_rq_stats *stats)
1589{
1590	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1591	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1592	struct page *page = virt_to_head_page(buf);
1593	int offset = buf - page_address(page);
1594	unsigned int xdp_frags_truesz = 0;
1595	struct sk_buff *head_skb;
1596	unsigned int frame_sz;
1597	struct xdp_buff xdp;
1598	void *data;
1599	u32 act;
1600	int err;
1601
1602	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1603				     offset, &len, hdr);
1604	if (unlikely(!data))
1605		goto err_xdp;
1606
1607	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1608					 &num_buf, &xdp_frags_truesz, stats);
1609	if (unlikely(err))
1610		goto err_xdp;
1611
1612	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1613
1614	switch (act) {
1615	case XDP_PASS:
1616		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1617		if (unlikely(!head_skb))
1618			break;
1619		return head_skb;
1620
1621	case XDP_TX:
1622	case XDP_REDIRECT:
1623		return NULL;
1624
1625	default:
1626		break;
1627	}
1628
1629	put_xdp_frags(&xdp);
1630
1631err_xdp:
1632	put_page(page);
1633	mergeable_buf_free(rq, num_buf, dev, stats);
1634
1635	u64_stats_inc(&stats->xdp_drops);
1636	u64_stats_inc(&stats->drops);
1637	return NULL;
1638}
1639
1640static struct sk_buff *receive_mergeable(struct net_device *dev,
1641					 struct virtnet_info *vi,
1642					 struct receive_queue *rq,
1643					 void *buf,
1644					 void *ctx,
1645					 unsigned int len,
1646					 unsigned int *xdp_xmit,
1647					 struct virtnet_rq_stats *stats)
1648{
 
1649	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1650	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1651	struct page *page = virt_to_head_page(buf);
1652	int offset = buf - page_address(page);
1653	struct sk_buff *head_skb, *curr_skb;
1654	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1655	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1656	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1657	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1658
1659	head_skb = NULL;
1660	u64_stats_add(&stats->bytes, len - vi->hdr_len);
1661
1662	if (unlikely(len > truesize - room)) {
1663		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1664			 dev->name, len, (unsigned long)(truesize - room));
1665		DEV_STATS_INC(dev, rx_length_errors);
1666		goto err_skb;
1667	}
 
 
 
 
 
 
 
 
 
 
 
1668
1669	if (unlikely(vi->xdp_enabled)) {
1670		struct bpf_prog *xdp_prog;
 
 
 
 
 
1671
1672		rcu_read_lock();
1673		xdp_prog = rcu_dereference(rq->xdp_prog);
1674		if (xdp_prog) {
1675			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1676							 len, xdp_xmit, stats);
 
 
 
 
 
 
 
 
 
 
 
 
 
1677			rcu_read_unlock();
1678			return head_skb;
 
 
 
 
 
 
1679		}
1680		rcu_read_unlock();
1681	}
 
1682
1683	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
 
1684	curr_skb = head_skb;
1685
1686	if (unlikely(!curr_skb))
1687		goto err_skb;
1688	while (--num_buf) {
1689		int num_skb_frags;
1690
1691		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1692		if (unlikely(!buf)) {
1693			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1694				 dev->name, num_buf,
1695				 virtio16_to_cpu(vi->vdev,
1696						 hdr->num_buffers));
1697			DEV_STATS_INC(dev, rx_length_errors);
1698			goto err_buf;
1699		}
1700
1701		u64_stats_add(&stats->bytes, len);
1702		page = virt_to_head_page(buf);
1703
1704		truesize = mergeable_ctx_to_truesize(ctx);
1705		headroom = mergeable_ctx_to_headroom(ctx);
1706		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1707		room = SKB_DATA_ALIGN(headroom + tailroom);
1708		if (unlikely(len > truesize - room)) {
1709			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1710				 dev->name, len, (unsigned long)(truesize - room));
1711			DEV_STATS_INC(dev, rx_length_errors);
1712			goto err_skb;
1713		}
1714
1715		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1716		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1717			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1718
1719			if (unlikely(!nskb))
1720				goto err_skb;
1721			if (curr_skb == head_skb)
1722				skb_shinfo(curr_skb)->frag_list = nskb;
1723			else
1724				curr_skb->next = nskb;
1725			curr_skb = nskb;
1726			head_skb->truesize += nskb->truesize;
1727			num_skb_frags = 0;
1728		}
 
1729		if (curr_skb != head_skb) {
1730			head_skb->data_len += len;
1731			head_skb->len += len;
1732			head_skb->truesize += truesize;
1733		}
1734		offset = buf - page_address(page);
1735		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1736			put_page(page);
1737			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1738					     len, truesize);
1739		} else {
1740			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1741					offset, len, truesize);
1742		}
1743	}
1744
1745	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1746	return head_skb;
1747
 
 
1748err_skb:
1749	put_page(page);
1750	mergeable_buf_free(rq, num_buf, dev, stats);
1751
 
 
 
 
 
 
 
 
 
1752err_buf:
1753	u64_stats_inc(&stats->drops);
1754	dev_kfree_skb(head_skb);
 
1755	return NULL;
1756}
1757
1758static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1759				struct sk_buff *skb)
1760{
1761	enum pkt_hash_types rss_hash_type;
1762
1763	if (!hdr_hash || !skb)
1764		return;
1765
1766	switch (__le16_to_cpu(hdr_hash->hash_report)) {
1767	case VIRTIO_NET_HASH_REPORT_TCPv4:
1768	case VIRTIO_NET_HASH_REPORT_UDPv4:
1769	case VIRTIO_NET_HASH_REPORT_TCPv6:
1770	case VIRTIO_NET_HASH_REPORT_UDPv6:
1771	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1772	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1773		rss_hash_type = PKT_HASH_TYPE_L4;
1774		break;
1775	case VIRTIO_NET_HASH_REPORT_IPv4:
1776	case VIRTIO_NET_HASH_REPORT_IPv6:
1777	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1778		rss_hash_type = PKT_HASH_TYPE_L3;
1779		break;
1780	case VIRTIO_NET_HASH_REPORT_NONE:
1781	default:
1782		rss_hash_type = PKT_HASH_TYPE_NONE;
1783	}
1784	skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1785}
1786
1787static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1788			void *buf, unsigned int len, void **ctx,
1789			unsigned int *xdp_xmit,
1790			struct virtnet_rq_stats *stats)
1791{
1792	struct net_device *dev = vi->dev;
 
1793	struct sk_buff *skb;
1794	struct virtio_net_common_hdr *hdr;
1795
1796	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1797		pr_debug("%s: short packet %i\n", dev->name, len);
1798		DEV_STATS_INC(dev, rx_length_errors);
1799		virtnet_rq_free_buf(vi, rq, buf);
 
 
 
 
 
 
 
 
1800		return;
1801	}
1802
1803	if (vi->mergeable_rx_bufs)
1804		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1805					stats);
1806	else if (vi->big_packets)
1807		skb = receive_big(dev, vi, rq, buf, len, stats);
1808	else
1809		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1810
1811	if (unlikely(!skb))
1812		return;
1813
1814	hdr = skb_vnet_common_hdr(skb);
1815	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1816		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
 
 
 
1817
1818	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1819		skb->ip_summed = CHECKSUM_UNNECESSARY;
1820
1821	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1822				  virtio_is_little_endian(vi->vdev))) {
1823		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1824				     dev->name, hdr->hdr.gso_type,
1825				     hdr->hdr.gso_size);
1826		goto frame_err;
1827	}
1828
1829	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1830	skb->protocol = eth_type_trans(skb, dev);
1831	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1832		 ntohs(skb->protocol), skb->len, skb->pkt_type);
1833
1834	napi_gro_receive(&rq->napi, skb);
1835	return;
1836
1837frame_err:
1838	DEV_STATS_INC(dev, rx_frame_errors);
1839	dev_kfree_skb(skb);
1840}
1841
1842/* Unlike mergeable buffers, all buffers are allocated to the
1843 * same size, except for the headroom. For this reason we do
1844 * not need to use  mergeable_len_to_ctx here - it is enough
1845 * to store the headroom as the context ignoring the truesize.
1846 */
1847static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1848			     gfp_t gfp)
1849{
1850	char *buf;
1851	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1852	void *ctx = (void *)(unsigned long)xdp_headroom;
1853	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1854	int err;
1855
1856	len = SKB_DATA_ALIGN(len) +
1857	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1858
1859	buf = virtnet_rq_alloc(rq, len, gfp);
1860	if (unlikely(!buf))
1861		return -ENOMEM;
1862
1863	virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1864			       vi->hdr_len + GOOD_PACKET_LEN);
1865
1866	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1867	if (err < 0) {
1868		if (rq->do_dma)
1869			virtnet_rq_unmap(rq, buf, 0);
1870		put_page(virt_to_head_page(buf));
1871	}
 
 
1872
1873	return err;
1874}
1875
1876static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1877			   gfp_t gfp)
1878{
1879	struct page *first, *list = NULL;
1880	char *p;
1881	int i, err, offset;
1882
1883	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1884
1885	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1886	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1887		first = get_a_page(rq, gfp);
1888		if (!first) {
1889			if (list)
1890				give_pages(rq, list);
1891			return -ENOMEM;
1892		}
1893		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1894
1895		/* chain new page in list head to match sg */
1896		first->private = (unsigned long)list;
1897		list = first;
1898	}
1899
1900	first = get_a_page(rq, gfp);
1901	if (!first) {
1902		give_pages(rq, list);
1903		return -ENOMEM;
1904	}
1905	p = page_address(first);
1906
1907	/* rq->sg[0], rq->sg[1] share the same page */
1908	/* a separated rq->sg[0] for header - required in case !any_header_sg */
1909	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1910
1911	/* rq->sg[1] for data packet, from offset */
1912	offset = sizeof(struct padded_vnet_hdr);
1913	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1914
1915	/* chain first in list head */
1916	first->private = (unsigned long)list;
1917	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1918				  first, gfp);
1919	if (err < 0)
1920		give_pages(rq, first);
1921
1922	return err;
1923}
1924
1925static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1926					  struct ewma_pkt_len *avg_pkt_len,
1927					  unsigned int room)
1928{
1929	struct virtnet_info *vi = rq->vq->vdev->priv;
1930	const size_t hdr_len = vi->hdr_len;
1931	unsigned int len;
1932
1933	if (room)
1934		return PAGE_SIZE - room;
1935
1936	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1937				rq->min_buf_len, PAGE_SIZE - hdr_len);
1938
1939	return ALIGN(len, L1_CACHE_BYTES);
1940}
1941
1942static int add_recvbuf_mergeable(struct virtnet_info *vi,
1943				 struct receive_queue *rq, gfp_t gfp)
1944{
1945	struct page_frag *alloc_frag = &rq->alloc_frag;
1946	unsigned int headroom = virtnet_get_headroom(vi);
1947	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1948	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1949	unsigned int len, hole;
1950	void *ctx;
1951	char *buf;
 
1952	int err;
 
1953
1954	/* Extra tailroom is needed to satisfy XDP's assumption. This
1955	 * means rx frags coalescing won't work, but consider we've
1956	 * disabled GSO for XDP, it won't be a big issue.
1957	 */
1958	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1959
1960	buf = virtnet_rq_alloc(rq, len + room, gfp);
1961	if (unlikely(!buf))
1962		return -ENOMEM;
1963
1964	buf += headroom; /* advance address leaving hole at front of pkt */
 
 
 
1965	hole = alloc_frag->size - alloc_frag->offset;
1966	if (hole < len + room) {
1967		/* To avoid internal fragmentation, if there is very likely not
1968		 * enough space for another buffer, add the remaining space to
1969		 * the current buffer.
1970		 * XDP core assumes that frame_size of xdp_buff and the length
1971		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1972		 */
1973		if (!headroom)
1974			len += hole;
1975		alloc_frag->offset += hole;
1976	}
1977
1978	virtnet_rq_init_one_sg(rq, buf, len);
1979
1980	ctx = mergeable_len_to_ctx(len + room, headroom);
1981	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1982	if (err < 0) {
1983		if (rq->do_dma)
1984			virtnet_rq_unmap(rq, buf, 0);
1985		put_page(virt_to_head_page(buf));
1986	}
1987
1988	return err;
1989}
1990
1991/*
1992 * Returns false if we couldn't fill entirely (OOM).
1993 *
1994 * Normally run in the receive path, but can also be run from ndo_open
1995 * before we're receiving packets, or from refill_work which is
1996 * careful to disable receiving (using napi_disable).
1997 */
1998static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1999			  gfp_t gfp)
2000{
2001	int err;
2002	bool oom;
2003
 
2004	do {
2005		if (vi->mergeable_rx_bufs)
2006			err = add_recvbuf_mergeable(vi, rq, gfp);
2007		else if (vi->big_packets)
2008			err = add_recvbuf_big(vi, rq, gfp);
2009		else
2010			err = add_recvbuf_small(vi, rq, gfp);
2011
2012		oom = err == -ENOMEM;
2013		if (err)
2014			break;
2015	} while (rq->vq->num_free);
2016	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2017		unsigned long flags;
2018
2019		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2020		u64_stats_inc(&rq->stats.kicks);
2021		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2022	}
2023
2024	return !oom;
2025}
2026
2027static void skb_recv_done(struct virtqueue *rvq)
2028{
2029	struct virtnet_info *vi = rvq->vdev->priv;
2030	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2031
2032	rq->calls++;
2033	virtqueue_napi_schedule(&rq->napi, rvq);
 
 
 
2034}
2035
2036static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2037{
2038	napi_enable(napi);
2039
2040	/* If all buffers were filled by other side before we napi_enabled, we
2041	 * won't get another interrupt, so process any outstanding packets now.
2042	 * Call local_bh_enable after to trigger softIRQ processing.
2043	 */
2044	local_bh_disable();
2045	virtqueue_napi_schedule(napi, vq);
2046	local_bh_enable();
2047}
2048
2049static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2050				   struct virtqueue *vq,
2051				   struct napi_struct *napi)
2052{
2053	if (!napi->weight)
2054		return;
2055
2056	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2057	 * enable the feature if this is likely affine with the transmit path.
2058	 */
2059	if (!vi->affinity_hint_set) {
2060		napi->weight = 0;
2061		return;
2062	}
2063
2064	return virtnet_napi_enable(vq, napi);
2065}
2066
2067static void virtnet_napi_tx_disable(struct napi_struct *napi)
2068{
2069	if (napi->weight)
2070		napi_disable(napi);
2071}
2072
2073static void refill_work(struct work_struct *work)
2074{
2075	struct virtnet_info *vi =
2076		container_of(work, struct virtnet_info, refill.work);
2077	bool still_empty;
2078	int i;
2079
2080	for (i = 0; i < vi->curr_queue_pairs; i++) {
2081		struct receive_queue *rq = &vi->rq[i];
2082
2083		napi_disable(&rq->napi);
2084		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2085		virtnet_napi_enable(rq->vq, &rq->napi);
2086
2087		/* In theory, this can happen: if we don't get any buffers in
2088		 * we will *never* try to fill again.
2089		 */
2090		if (still_empty)
2091			schedule_delayed_work(&vi->refill, HZ/2);
2092	}
2093}
2094
2095static int virtnet_receive(struct receive_queue *rq, int budget,
2096			   unsigned int *xdp_xmit)
2097{
2098	struct virtnet_info *vi = rq->vq->vdev->priv;
2099	struct virtnet_rq_stats stats = {};
2100	unsigned int len;
2101	int packets = 0;
2102	void *buf;
2103	int i;
2104
2105	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2106		void *ctx;
2107
2108		while (packets < budget &&
2109		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2110			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2111			packets++;
2112		}
2113	} else {
2114		while (packets < budget &&
2115		       (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2116			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2117			packets++;
2118		}
2119	}
2120
2121	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2122		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2123			spin_lock(&vi->refill_lock);
2124			if (vi->refill_enabled)
2125				schedule_delayed_work(&vi->refill, 0);
2126			spin_unlock(&vi->refill_lock);
2127		}
2128	}
2129
2130	u64_stats_set(&stats.packets, packets);
2131	u64_stats_update_begin(&rq->stats.syncp);
2132	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2133		size_t offset = virtnet_rq_stats_desc[i].offset;
2134		u64_stats_t *item, *src;
2135
2136		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2137		src = (u64_stats_t *)((u8 *)&stats + offset);
2138		u64_stats_add(item, u64_stats_read(src));
2139	}
2140	u64_stats_update_end(&rq->stats.syncp);
2141
2142	return packets;
2143}
2144
2145static void virtnet_poll_cleantx(struct receive_queue *rq)
2146{
2147	struct virtnet_info *vi = rq->vq->vdev->priv;
2148	unsigned int index = vq2rxq(rq->vq);
2149	struct send_queue *sq = &vi->sq[index];
2150	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2151
2152	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2153		return;
2154
2155	if (__netif_tx_trylock(txq)) {
2156		if (sq->reset) {
2157			__netif_tx_unlock(txq);
2158			return;
2159		}
2160
2161		do {
2162			virtqueue_disable_cb(sq->vq);
2163			free_old_xmit_skbs(sq, true);
2164		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2165
2166		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2167			netif_tx_wake_queue(txq);
2168
2169		__netif_tx_unlock(txq);
2170	}
2171}
2172
2173static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2174{
2175	struct dim_sample cur_sample = {};
2176
2177	if (!rq->packets_in_napi)
2178		return;
2179
2180	u64_stats_update_begin(&rq->stats.syncp);
2181	dim_update_sample(rq->calls,
2182			  u64_stats_read(&rq->stats.packets),
2183			  u64_stats_read(&rq->stats.bytes),
2184			  &cur_sample);
2185	u64_stats_update_end(&rq->stats.syncp);
2186
2187	net_dim(&rq->dim, cur_sample);
2188	rq->packets_in_napi = 0;
2189}
2190
2191static int virtnet_poll(struct napi_struct *napi, int budget)
2192{
2193	struct receive_queue *rq =
2194		container_of(napi, struct receive_queue, napi);
2195	struct virtnet_info *vi = rq->vq->vdev->priv;
2196	struct send_queue *sq;
2197	unsigned int received;
2198	unsigned int xdp_xmit = 0;
2199	bool napi_complete;
2200
2201	virtnet_poll_cleantx(rq);
2202
2203	received = virtnet_receive(rq, budget, &xdp_xmit);
2204	rq->packets_in_napi += received;
2205
2206	if (xdp_xmit & VIRTIO_XDP_REDIR)
2207		xdp_do_flush();
2208
2209	/* Out of packets? */
2210	if (received < budget) {
2211		napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2212		if (napi_complete && rq->dim_enabled)
2213			virtnet_rx_dim_update(vi, rq);
2214	}
2215
2216	if (xdp_xmit & VIRTIO_XDP_TX) {
2217		sq = virtnet_xdp_get_sq(vi);
2218		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2219			u64_stats_update_begin(&sq->stats.syncp);
2220			u64_stats_inc(&sq->stats.kicks);
2221			u64_stats_update_end(&sq->stats.syncp);
2222		}
2223		virtnet_xdp_put_sq(vi, sq);
2224	}
2225
2226	return received;
2227}
2228
2229static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2230{
2231	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2232	napi_disable(&vi->rq[qp_index].napi);
2233	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2234}
2235
2236static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2237{
2238	struct net_device *dev = vi->dev;
2239	int err;
2240
2241	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2242			       vi->rq[qp_index].napi.napi_id);
2243	if (err < 0)
2244		return err;
2245
2246	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2247					 MEM_TYPE_PAGE_SHARED, NULL);
2248	if (err < 0)
2249		goto err_xdp_reg_mem_model;
2250
2251	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2252	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2253
2254	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2255
2256err_xdp_reg_mem_model:
2257	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2258	return err;
2259}
 
2260
2261static int virtnet_open(struct net_device *dev)
2262{
2263	struct virtnet_info *vi = netdev_priv(dev);
2264	int i, err;
2265
2266	enable_delayed_refill(vi);
2267
2268	for (i = 0; i < vi->max_queue_pairs; i++) {
2269		if (i < vi->curr_queue_pairs)
2270			/* Make sure we have some buffers: if oom use wq. */
2271			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2272				schedule_delayed_work(&vi->refill, 0);
2273
2274		err = virtnet_enable_queue_pair(vi, i);
2275		if (err < 0)
2276			goto err_enable_qp;
2277	}
2278
2279	return 0;
2280
2281err_enable_qp:
2282	disable_delayed_refill(vi);
2283	cancel_delayed_work_sync(&vi->refill);
2284
2285	for (i--; i >= 0; i--) {
2286		virtnet_disable_queue_pair(vi, i);
2287		cancel_work_sync(&vi->rq[i].dim.work);
2288	}
2289
2290	return err;
2291}
2292
2293static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2294{
2295	struct send_queue *sq = container_of(napi, struct send_queue, napi);
 
2296	struct virtnet_info *vi = sq->vq->vdev->priv;
2297	unsigned int index = vq2txq(sq->vq);
2298	struct netdev_queue *txq;
2299	int opaque;
2300	bool done;
2301
2302	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2303		/* We don't need to enable cb for XDP */
2304		napi_complete_done(napi, 0);
2305		return 0;
2306	}
2307
2308	txq = netdev_get_tx_queue(vi->dev, index);
2309	__netif_tx_lock(txq, raw_smp_processor_id());
2310	virtqueue_disable_cb(sq->vq);
2311	free_old_xmit_skbs(sq, true);
2312
2313	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2314		netif_tx_wake_queue(txq);
 
 
2315
2316	opaque = virtqueue_enable_cb_prepare(sq->vq);
2317
2318	done = napi_complete_done(napi, 0);
2319
2320	if (!done)
2321		virtqueue_disable_cb(sq->vq);
2322
2323	__netif_tx_unlock(txq);
2324
2325	if (done) {
2326		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2327			if (napi_schedule_prep(napi)) {
2328				__netif_tx_lock(txq, raw_smp_processor_id());
2329				virtqueue_disable_cb(sq->vq);
2330				__netif_tx_unlock(txq);
2331				__napi_schedule(napi);
2332			}
2333		}
2334	}
2335
2336	return 0;
2337}
2338
2339static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2340{
2341	struct virtio_net_hdr_mrg_rxbuf *hdr;
2342	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2343	struct virtnet_info *vi = sq->vq->vdev->priv;
2344	int num_sg;
2345	unsigned hdr_len = vi->hdr_len;
2346	bool can_push;
2347
2348	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2349
2350	can_push = vi->any_header_sg &&
2351		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2352		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2353	/* Even if we can, don't push here yet as this would skew
2354	 * csum_start offset below. */
2355	if (can_push)
2356		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2357	else
2358		hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2359
2360	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2361				    virtio_is_little_endian(vi->vdev), false,
2362				    0))
2363		return -EPROTO;
2364
2365	if (vi->mergeable_rx_bufs)
2366		hdr->num_buffers = 0;
2367
2368	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2369	if (can_push) {
2370		__skb_push(skb, hdr_len);
2371		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2372		if (unlikely(num_sg < 0))
2373			return num_sg;
2374		/* Pull header back to avoid skew in tx bytes calculations. */
2375		__skb_pull(skb, hdr_len);
2376	} else {
2377		sg_set_buf(sq->sg, hdr, hdr_len);
2378		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2379		if (unlikely(num_sg < 0))
2380			return num_sg;
2381		num_sg++;
2382	}
2383	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2384}
2385
2386static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2387{
2388	struct virtnet_info *vi = netdev_priv(dev);
2389	int qnum = skb_get_queue_mapping(skb);
2390	struct send_queue *sq = &vi->sq[qnum];
2391	int err;
2392	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2393	bool kick = !netdev_xmit_more();
2394	bool use_napi = sq->napi.weight;
2395
2396	/* Free up any pending old buffers before queueing new ones. */
2397	do {
2398		if (use_napi)
2399			virtqueue_disable_cb(sq->vq);
2400
2401		free_old_xmit_skbs(sq, false);
2402
2403	} while (use_napi && kick &&
2404	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2405
2406	/* timestamp packet in software */
2407	skb_tx_timestamp(skb);
2408
2409	/* Try to transmit */
2410	err = xmit_skb(sq, skb);
2411
2412	/* This should not happen! */
2413	if (unlikely(err)) {
2414		DEV_STATS_INC(dev, tx_fifo_errors);
2415		if (net_ratelimit())
2416			dev_warn(&dev->dev,
2417				 "Unexpected TXQ (%d) queue failure: %d\n",
2418				 qnum, err);
2419		DEV_STATS_INC(dev, tx_dropped);
2420		dev_kfree_skb_any(skb);
2421		return NETDEV_TX_OK;
2422	}
2423
2424	/* Don't wait up for transmitted skbs to be freed. */
2425	if (!use_napi) {
2426		skb_orphan(skb);
2427		nf_reset_ct(skb);
2428	}
2429
2430	check_sq_full_and_disable(vi, dev, sq);
2431
2432	if (kick || netif_xmit_stopped(txq)) {
2433		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2434			u64_stats_update_begin(&sq->stats.syncp);
2435			u64_stats_inc(&sq->stats.kicks);
2436			u64_stats_update_end(&sq->stats.syncp);
 
 
 
 
 
 
 
 
 
 
 
 
2437		}
2438	}
2439
2440	return NETDEV_TX_OK;
2441}
2442
2443static int virtnet_rx_resize(struct virtnet_info *vi,
2444			     struct receive_queue *rq, u32 ring_num)
2445{
2446	bool running = netif_running(vi->dev);
2447	int err, qindex;
2448
2449	qindex = rq - vi->rq;
2450
2451	if (running) {
2452		napi_disable(&rq->napi);
2453		cancel_work_sync(&rq->dim.work);
2454	}
2455
2456	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2457	if (err)
2458		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2459
2460	if (!try_fill_recv(vi, rq, GFP_KERNEL))
2461		schedule_delayed_work(&vi->refill, 0);
2462
2463	if (running)
2464		virtnet_napi_enable(rq->vq, &rq->napi);
2465	return err;
2466}
2467
2468static int virtnet_tx_resize(struct virtnet_info *vi,
2469			     struct send_queue *sq, u32 ring_num)
2470{
2471	bool running = netif_running(vi->dev);
2472	struct netdev_queue *txq;
2473	int err, qindex;
2474
2475	qindex = sq - vi->sq;
2476
2477	if (running)
2478		virtnet_napi_tx_disable(&sq->napi);
2479
2480	txq = netdev_get_tx_queue(vi->dev, qindex);
2481
2482	/* 1. wait all ximt complete
2483	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2484	 */
2485	__netif_tx_lock_bh(txq);
2486
2487	/* Prevent rx poll from accessing sq. */
2488	sq->reset = true;
2489
2490	/* Prevent the upper layer from trying to send packets. */
2491	netif_stop_subqueue(vi->dev, qindex);
2492
2493	__netif_tx_unlock_bh(txq);
2494
2495	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2496	if (err)
2497		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2498
2499	__netif_tx_lock_bh(txq);
2500	sq->reset = false;
2501	netif_tx_wake_queue(txq);
2502	__netif_tx_unlock_bh(txq);
2503
2504	if (running)
2505		virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2506	return err;
2507}
2508
2509/*
2510 * Send command via the control virtqueue and check status.  Commands
2511 * supported by the hypervisor, as indicated by feature bits, should
2512 * never fail unless improperly formatted.
2513 */
2514static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2515				 struct scatterlist *out)
2516{
2517	struct scatterlist *sgs[4], hdr, stat;
2518	unsigned out_num = 0, tmp;
2519	int ret;
2520
2521	/* Caller should know better */
2522	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2523
2524	vi->ctrl->status = ~0;
2525	vi->ctrl->hdr.class = class;
2526	vi->ctrl->hdr.cmd = cmd;
2527	/* Add header */
2528	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2529	sgs[out_num++] = &hdr;
2530
2531	if (out)
2532		sgs[out_num++] = out;
2533
2534	/* Add return status. */
2535	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2536	sgs[out_num] = &stat;
2537
2538	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2539	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2540	if (ret < 0) {
2541		dev_warn(&vi->vdev->dev,
2542			 "Failed to add sgs for command vq: %d\n.", ret);
2543		return false;
2544	}
2545
2546	if (unlikely(!virtqueue_kick(vi->cvq)))
2547		return vi->ctrl->status == VIRTIO_NET_OK;
2548
2549	/* Spin for a response, the kick causes an ioport write, trapping
2550	 * into the hypervisor, so the request should be handled immediately.
2551	 */
2552	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553	       !virtqueue_is_broken(vi->cvq))
2554		cpu_relax();
2555
2556	return vi->ctrl->status == VIRTIO_NET_OK;
2557}
2558
2559static int virtnet_set_mac_address(struct net_device *dev, void *p)
2560{
2561	struct virtnet_info *vi = netdev_priv(dev);
2562	struct virtio_device *vdev = vi->vdev;
2563	int ret;
2564	struct sockaddr *addr;
2565	struct scatterlist sg;
2566
2567	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2568		return -EOPNOTSUPP;
2569
2570	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2571	if (!addr)
2572		return -ENOMEM;
 
2573
2574	ret = eth_prepare_mac_addr_change(dev, addr);
2575	if (ret)
2576		goto out;
2577
2578	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2579		sg_init_one(&sg, addr->sa_data, dev->addr_len);
2580		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2581					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2582			dev_warn(&vdev->dev,
2583				 "Failed to set mac address by vq command.\n");
2584			ret = -EINVAL;
2585			goto out;
2586		}
2587	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2588		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589		unsigned int i;
2590
2591		/* Naturally, this has an atomicity problem. */
2592		for (i = 0; i < dev->addr_len; i++)
2593			virtio_cwrite8(vdev,
2594				       offsetof(struct virtio_net_config, mac) +
2595				       i, addr->sa_data[i]);
2596	}
2597
2598	eth_commit_mac_addr_change(dev, p);
2599	ret = 0;
2600
2601out:
2602	kfree(addr);
2603	return ret;
2604}
2605
2606static void virtnet_stats(struct net_device *dev,
2607			  struct rtnl_link_stats64 *tot)
2608{
2609	struct virtnet_info *vi = netdev_priv(dev);
 
2610	unsigned int start;
2611	int i;
2612
2613	for (i = 0; i < vi->max_queue_pairs; i++) {
2614		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2615		struct receive_queue *rq = &vi->rq[i];
2616		struct send_queue *sq = &vi->sq[i];
2617
2618		do {
2619			start = u64_stats_fetch_begin(&sq->stats.syncp);
2620			tpackets = u64_stats_read(&sq->stats.packets);
2621			tbytes   = u64_stats_read(&sq->stats.bytes);
2622			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
2623		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2624
2625		do {
2626			start = u64_stats_fetch_begin(&rq->stats.syncp);
2627			rpackets = u64_stats_read(&rq->stats.packets);
2628			rbytes   = u64_stats_read(&rq->stats.bytes);
2629			rdrops   = u64_stats_read(&rq->stats.drops);
2630		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2631
2632		tot->rx_packets += rpackets;
2633		tot->tx_packets += tpackets;
2634		tot->rx_bytes   += rbytes;
2635		tot->tx_bytes   += tbytes;
2636		tot->rx_dropped += rdrops;
2637		tot->tx_errors  += terrors;
2638	}
2639
2640	tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2641	tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2642	tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2643	tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
 
 
 
2644}
2645
 
 
 
 
 
 
 
 
 
 
 
2646static void virtnet_ack_link_announce(struct virtnet_info *vi)
2647{
2648	rtnl_lock();
2649	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2650				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2651		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2652	rtnl_unlock();
2653}
2654
2655static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2656{
2657	struct scatterlist sg;
2658	struct net_device *dev = vi->dev;
2659
2660	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2661		return 0;
2662
2663	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2664	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2665
2666	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2667				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2668		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2669			 queue_pairs);
2670		return -EINVAL;
2671	} else {
2672		vi->curr_queue_pairs = queue_pairs;
2673		/* virtnet_open() will refill when device is going to up. */
2674		if (dev->flags & IFF_UP)
2675			schedule_delayed_work(&vi->refill, 0);
2676	}
2677
2678	return 0;
2679}
2680
2681static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2682{
2683	int err;
2684
2685	rtnl_lock();
2686	err = _virtnet_set_queues(vi, queue_pairs);
2687	rtnl_unlock();
2688	return err;
2689}
2690
2691static int virtnet_close(struct net_device *dev)
2692{
2693	struct virtnet_info *vi = netdev_priv(dev);
2694	int i;
2695
2696	/* Make sure NAPI doesn't schedule refill work */
2697	disable_delayed_refill(vi);
2698	/* Make sure refill_work doesn't re-enable napi! */
2699	cancel_delayed_work_sync(&vi->refill);
2700
2701	for (i = 0; i < vi->max_queue_pairs; i++) {
2702		virtnet_disable_queue_pair(vi, i);
2703		cancel_work_sync(&vi->rq[i].dim.work);
2704	}
2705
2706	return 0;
2707}
2708
2709static void virtnet_set_rx_mode(struct net_device *dev)
2710{
2711	struct virtnet_info *vi = netdev_priv(dev);
2712	struct scatterlist sg[2];
2713	struct virtio_net_ctrl_mac *mac_data;
2714	struct netdev_hw_addr *ha;
2715	int uc_count;
2716	int mc_count;
2717	void *buf;
2718	int i;
2719
2720	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2721	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2722		return;
2723
2724	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2725	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2726
2727	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2728
2729	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2730				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
2731		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2732			 vi->ctrl->promisc ? "en" : "dis");
2733
2734	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2735
2736	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2737				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2738		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2739			 vi->ctrl->allmulti ? "en" : "dis");
2740
2741	uc_count = netdev_uc_count(dev);
2742	mc_count = netdev_mc_count(dev);
2743	/* MAC filter - use one buffer for both lists */
2744	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2745		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2746	mac_data = buf;
2747	if (!buf)
2748		return;
2749
2750	sg_init_table(sg, 2);
2751
2752	/* Store the unicast list and count in the front of the buffer */
2753	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2754	i = 0;
2755	netdev_for_each_uc_addr(ha, dev)
2756		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2757
2758	sg_set_buf(&sg[0], mac_data,
2759		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2760
2761	/* multicast list and count fill the end */
2762	mac_data = (void *)&mac_data->macs[uc_count][0];
2763
2764	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2765	i = 0;
2766	netdev_for_each_mc_addr(ha, dev)
2767		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2768
2769	sg_set_buf(&sg[1], mac_data,
2770		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2771
2772	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2773				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2774		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2775
2776	kfree(buf);
2777}
2778
2779static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2780				   __be16 proto, u16 vid)
2781{
2782	struct virtnet_info *vi = netdev_priv(dev);
2783	struct scatterlist sg;
2784
2785	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2786	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2787
2788	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2789				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2790		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2791	return 0;
2792}
2793
2794static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2795				    __be16 proto, u16 vid)
2796{
2797	struct virtnet_info *vi = netdev_priv(dev);
2798	struct scatterlist sg;
2799
2800	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2801	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2802
2803	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2804				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2805		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2806	return 0;
2807}
2808
2809static void virtnet_clean_affinity(struct virtnet_info *vi)
2810{
2811	int i;
2812
2813	if (vi->affinity_hint_set) {
2814		for (i = 0; i < vi->max_queue_pairs; i++) {
2815			virtqueue_set_affinity(vi->rq[i].vq, NULL);
2816			virtqueue_set_affinity(vi->sq[i].vq, NULL);
2817		}
2818
2819		vi->affinity_hint_set = false;
2820	}
2821}
2822
2823static void virtnet_set_affinity(struct virtnet_info *vi)
2824{
2825	cpumask_var_t mask;
2826	int stragglers;
2827	int group_size;
2828	int i, j, cpu;
2829	int num_cpu;
2830	int stride;
2831
2832	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2833		virtnet_clean_affinity(vi);
 
 
 
 
 
2834		return;
2835	}
2836
2837	num_cpu = num_online_cpus();
2838	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2839	stragglers = num_cpu >= vi->curr_queue_pairs ?
2840			num_cpu % vi->curr_queue_pairs :
2841			0;
2842	cpu = cpumask_first(cpu_online_mask);
2843
2844	for (i = 0; i < vi->curr_queue_pairs; i++) {
2845		group_size = stride + (i < stragglers ? 1 : 0);
2846
2847		for (j = 0; j < group_size; j++) {
2848			cpumask_set_cpu(cpu, mask);
2849			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2850						nr_cpu_ids, false);
2851		}
2852		virtqueue_set_affinity(vi->rq[i].vq, mask);
2853		virtqueue_set_affinity(vi->sq[i].vq, mask);
2854		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2855		cpumask_clear(mask);
2856	}
2857
2858	vi->affinity_hint_set = true;
2859	free_cpumask_var(mask);
2860}
2861
2862static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2863{
2864	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2865						   node);
2866	virtnet_set_affinity(vi);
2867	return 0;
2868}
2869
2870static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2871{
2872	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2873						   node_dead);
2874	virtnet_set_affinity(vi);
2875	return 0;
2876}
2877
2878static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2879{
2880	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2881						   node);
2882
2883	virtnet_clean_affinity(vi);
2884	return 0;
2885}
2886
2887static enum cpuhp_state virtionet_online;
2888
2889static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2890{
2891	int ret;
2892
2893	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2894	if (ret)
2895		return ret;
2896	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2897					       &vi->node_dead);
2898	if (!ret)
2899		return ret;
2900	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2901	return ret;
2902}
2903
2904static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2905{
2906	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2907	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2908					    &vi->node_dead);
2909}
2910
2911static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2912					 u16 vqn, u32 max_usecs, u32 max_packets)
2913{
2914	struct scatterlist sgs;
2915
2916	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
2917	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
2918	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
2919	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
2920
2921	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2922				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
2923				  &sgs))
2924		return -EINVAL;
2925
2926	return 0;
2927}
2928
2929static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2930					    u16 queue, u32 max_usecs,
2931					    u32 max_packets)
2932{
2933	int err;
2934
2935	err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
2936					    max_usecs, max_packets);
2937	if (err)
2938		return err;
2939
2940	vi->rq[queue].intr_coal.max_usecs = max_usecs;
2941	vi->rq[queue].intr_coal.max_packets = max_packets;
2942
2943	return 0;
2944}
2945
2946static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2947					    u16 queue, u32 max_usecs,
2948					    u32 max_packets)
2949{
2950	int err;
2951
2952	err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
2953					    max_usecs, max_packets);
2954	if (err)
2955		return err;
2956
2957	vi->sq[queue].intr_coal.max_usecs = max_usecs;
2958	vi->sq[queue].intr_coal.max_packets = max_packets;
2959
2960	return 0;
2961}
2962
2963static void virtnet_get_ringparam(struct net_device *dev,
2964				  struct ethtool_ringparam *ring,
2965				  struct kernel_ethtool_ringparam *kernel_ring,
2966				  struct netlink_ext_ack *extack)
2967{
2968	struct virtnet_info *vi = netdev_priv(dev);
2969
2970	ring->rx_max_pending = vi->rq[0].vq->num_max;
2971	ring->tx_max_pending = vi->sq[0].vq->num_max;
2972	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2973	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2974}
2975
2976static int virtnet_set_ringparam(struct net_device *dev,
2977				 struct ethtool_ringparam *ring,
2978				 struct kernel_ethtool_ringparam *kernel_ring,
2979				 struct netlink_ext_ack *extack)
2980{
2981	struct virtnet_info *vi = netdev_priv(dev);
2982	u32 rx_pending, tx_pending;
2983	struct receive_queue *rq;
2984	struct send_queue *sq;
2985	int i, err;
2986
2987	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2988		return -EINVAL;
2989
2990	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2991	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2992
2993	if (ring->rx_pending == rx_pending &&
2994	    ring->tx_pending == tx_pending)
2995		return 0;
2996
2997	if (ring->rx_pending > vi->rq[0].vq->num_max)
2998		return -EINVAL;
2999
3000	if (ring->tx_pending > vi->sq[0].vq->num_max)
3001		return -EINVAL;
3002
3003	for (i = 0; i < vi->max_queue_pairs; i++) {
3004		rq = vi->rq + i;
3005		sq = vi->sq + i;
3006
3007		if (ring->tx_pending != tx_pending) {
3008			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3009			if (err)
3010				return err;
3011
3012			/* Upon disabling and re-enabling a transmit virtqueue, the device must
3013			 * set the coalescing parameters of the virtqueue to those configured
3014			 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3015			 * did not set any TX coalescing parameters, to 0.
3016			 */
3017			err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3018							       vi->intr_coal_tx.max_usecs,
3019							       vi->intr_coal_tx.max_packets);
3020			if (err)
3021				return err;
3022		}
3023
3024		if (ring->rx_pending != rx_pending) {
3025			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3026			if (err)
3027				return err;
3028
3029			/* The reason is same as the transmit virtqueue reset */
3030			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3031							       vi->intr_coal_rx.max_usecs,
3032							       vi->intr_coal_rx.max_packets);
3033			if (err)
3034				return err;
3035		}
3036	}
3037
3038	return 0;
3039}
3040
3041static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3042{
3043	struct net_device *dev = vi->dev;
3044	struct scatterlist sgs[4];
3045	unsigned int sg_buf_size;
3046
3047	/* prepare sgs */
3048	sg_init_table(sgs, 4);
3049
3050	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3051	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
3052
3053	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
3054	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
3055
3056	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3057			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3058	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
3059
3060	sg_buf_size = vi->rss_key_size;
3061	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
3062
3063	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3064				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3065				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
3066		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3067		return false;
3068	}
3069	return true;
3070}
3071
3072static void virtnet_init_default_rss(struct virtnet_info *vi)
3073{
3074	u32 indir_val = 0;
3075	int i = 0;
3076
3077	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3078	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3079	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3080						? vi->rss_indir_table_size - 1 : 0;
3081	vi->ctrl->rss.unclassified_queue = 0;
3082
3083	for (; i < vi->rss_indir_table_size; ++i) {
3084		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3085		vi->ctrl->rss.indirection_table[i] = indir_val;
3086	}
3087
3088	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3089	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3090
3091	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3092}
3093
3094static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3095{
3096	info->data = 0;
3097	switch (info->flow_type) {
3098	case TCP_V4_FLOW:
3099		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3100			info->data = RXH_IP_SRC | RXH_IP_DST |
3101						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3102		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3103			info->data = RXH_IP_SRC | RXH_IP_DST;
3104		}
3105		break;
3106	case TCP_V6_FLOW:
3107		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3108			info->data = RXH_IP_SRC | RXH_IP_DST |
3109						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3110		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3111			info->data = RXH_IP_SRC | RXH_IP_DST;
3112		}
3113		break;
3114	case UDP_V4_FLOW:
3115		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3116			info->data = RXH_IP_SRC | RXH_IP_DST |
3117						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3118		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3119			info->data = RXH_IP_SRC | RXH_IP_DST;
3120		}
3121		break;
3122	case UDP_V6_FLOW:
3123		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3124			info->data = RXH_IP_SRC | RXH_IP_DST |
3125						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3126		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3127			info->data = RXH_IP_SRC | RXH_IP_DST;
3128		}
3129		break;
3130	case IPV4_FLOW:
3131		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3132			info->data = RXH_IP_SRC | RXH_IP_DST;
3133
3134		break;
3135	case IPV6_FLOW:
3136		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3137			info->data = RXH_IP_SRC | RXH_IP_DST;
3138
3139		break;
3140	default:
3141		info->data = 0;
3142		break;
3143	}
3144}
3145
3146static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3147{
3148	u32 new_hashtypes = vi->rss_hash_types_saved;
3149	bool is_disable = info->data & RXH_DISCARD;
3150	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3151
3152	/* supports only 'sd', 'sdfn' and 'r' */
3153	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3154		return false;
3155
3156	switch (info->flow_type) {
3157	case TCP_V4_FLOW:
3158		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3159		if (!is_disable)
3160			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3161				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3162		break;
3163	case UDP_V4_FLOW:
3164		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3165		if (!is_disable)
3166			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3167				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3168		break;
3169	case IPV4_FLOW:
3170		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3171		if (!is_disable)
3172			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3173		break;
3174	case TCP_V6_FLOW:
3175		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3176		if (!is_disable)
3177			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3178				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3179		break;
3180	case UDP_V6_FLOW:
3181		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3182		if (!is_disable)
3183			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3184				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3185		break;
3186	case IPV6_FLOW:
3187		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3188		if (!is_disable)
3189			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3190		break;
3191	default:
3192		/* unsupported flow */
3193		return false;
3194	}
3195
3196	/* if unsupported hashtype was set */
3197	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3198		return false;
3199
3200	if (new_hashtypes != vi->rss_hash_types_saved) {
3201		vi->rss_hash_types_saved = new_hashtypes;
3202		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3203		if (vi->dev->features & NETIF_F_RXHASH)
3204			return virtnet_commit_rss_command(vi);
3205	}
3206
3207	return true;
3208}
3209
3210static void virtnet_get_drvinfo(struct net_device *dev,
3211				struct ethtool_drvinfo *info)
3212{
3213	struct virtnet_info *vi = netdev_priv(dev);
3214	struct virtio_device *vdev = vi->vdev;
3215
3216	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3217	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3218	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3219
3220}
3221
3222/* TODO: Eliminate OOO packets during switching */
3223static int virtnet_set_channels(struct net_device *dev,
3224				struct ethtool_channels *channels)
3225{
3226	struct virtnet_info *vi = netdev_priv(dev);
3227	u16 queue_pairs = channels->combined_count;
3228	int err;
3229
3230	/* We don't support separate rx/tx channels.
3231	 * We don't allow setting 'other' channels.
3232	 */
3233	if (channels->rx_count || channels->tx_count || channels->other_count)
3234		return -EINVAL;
3235
3236	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3237		return -EINVAL;
3238
3239	/* For now we don't support modifying channels while XDP is loaded
3240	 * also when XDP is loaded all RX queues have XDP programs so we only
3241	 * need to check a single RX queue.
3242	 */
3243	if (vi->rq[0].xdp_prog)
3244		return -EINVAL;
3245
3246	cpus_read_lock();
3247	err = _virtnet_set_queues(vi, queue_pairs);
3248	if (err) {
3249		cpus_read_unlock();
3250		goto err;
3251	}
3252	virtnet_set_affinity(vi);
3253	cpus_read_unlock();
3254
3255	netif_set_real_num_tx_queues(dev, queue_pairs);
3256	netif_set_real_num_rx_queues(dev, queue_pairs);
3257 err:
3258	return err;
3259}
3260
3261static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3262{
3263	struct virtnet_info *vi = netdev_priv(dev);
3264	unsigned int i, j;
3265	u8 *p = data;
3266
3267	switch (stringset) {
3268	case ETH_SS_STATS:
3269		for (i = 0; i < vi->curr_queue_pairs; i++) {
3270			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3271				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3272						virtnet_rq_stats_desc[j].desc);
3273		}
3274
3275		for (i = 0; i < vi->curr_queue_pairs; i++) {
3276			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3277				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3278						virtnet_sq_stats_desc[j].desc);
3279		}
3280		break;
3281	}
3282}
3283
3284static int virtnet_get_sset_count(struct net_device *dev, int sset)
3285{
3286	struct virtnet_info *vi = netdev_priv(dev);
3287
3288	switch (sset) {
3289	case ETH_SS_STATS:
3290		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3291					       VIRTNET_SQ_STATS_LEN);
3292	default:
3293		return -EOPNOTSUPP;
3294	}
3295}
3296
3297static void virtnet_get_ethtool_stats(struct net_device *dev,
3298				      struct ethtool_stats *stats, u64 *data)
3299{
3300	struct virtnet_info *vi = netdev_priv(dev);
3301	unsigned int idx = 0, start, i, j;
3302	const u8 *stats_base;
3303	const u64_stats_t *p;
3304	size_t offset;
3305
3306	for (i = 0; i < vi->curr_queue_pairs; i++) {
3307		struct receive_queue *rq = &vi->rq[i];
3308
3309		stats_base = (const u8 *)&rq->stats;
3310		do {
3311			start = u64_stats_fetch_begin(&rq->stats.syncp);
3312			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3313				offset = virtnet_rq_stats_desc[j].offset;
3314				p = (const u64_stats_t *)(stats_base + offset);
3315				data[idx + j] = u64_stats_read(p);
3316			}
3317		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3318		idx += VIRTNET_RQ_STATS_LEN;
3319	}
3320
3321	for (i = 0; i < vi->curr_queue_pairs; i++) {
3322		struct send_queue *sq = &vi->sq[i];
3323
3324		stats_base = (const u8 *)&sq->stats;
3325		do {
3326			start = u64_stats_fetch_begin(&sq->stats.syncp);
3327			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3328				offset = virtnet_sq_stats_desc[j].offset;
3329				p = (const u64_stats_t *)(stats_base + offset);
3330				data[idx + j] = u64_stats_read(p);
3331			}
3332		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3333		idx += VIRTNET_SQ_STATS_LEN;
3334	}
3335}
3336
3337static void virtnet_get_channels(struct net_device *dev,
3338				 struct ethtool_channels *channels)
3339{
3340	struct virtnet_info *vi = netdev_priv(dev);
3341
3342	channels->combined_count = vi->curr_queue_pairs;
3343	channels->max_combined = vi->max_queue_pairs;
3344	channels->max_other = 0;
3345	channels->rx_count = 0;
3346	channels->tx_count = 0;
3347	channels->other_count = 0;
3348}
3349
3350static int virtnet_set_link_ksettings(struct net_device *dev,
3351				      const struct ethtool_link_ksettings *cmd)
3352{
3353	struct virtnet_info *vi = netdev_priv(dev);
3354
3355	return ethtool_virtdev_set_link_ksettings(dev, cmd,
3356						  &vi->speed, &vi->duplex);
3357}
3358
3359static int virtnet_get_link_ksettings(struct net_device *dev,
3360				      struct ethtool_link_ksettings *cmd)
3361{
3362	struct virtnet_info *vi = netdev_priv(dev);
3363
3364	cmd->base.speed = vi->speed;
3365	cmd->base.duplex = vi->duplex;
3366	cmd->base.port = PORT_OTHER;
3367
3368	return 0;
3369}
3370
3371static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
3372					  struct ethtool_coalesce *ec)
3373{
3374	struct scatterlist sgs_tx;
3375	int i;
3376
3377	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3378	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3379	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3380
3381	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3382				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3383				  &sgs_tx))
3384		return -EINVAL;
3385
3386	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3387	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3388	for (i = 0; i < vi->max_queue_pairs; i++) {
3389		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3390		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3391	}
3392
3393	return 0;
3394}
3395
3396static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
3397					  struct ethtool_coalesce *ec)
3398{
3399	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3400	struct scatterlist sgs_rx;
3401	int i;
3402
3403	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3404		return -EOPNOTSUPP;
3405
3406	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3407			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3408		return -EINVAL;
3409
3410	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3411		vi->rx_dim_enabled = true;
3412		for (i = 0; i < vi->max_queue_pairs; i++)
3413			vi->rq[i].dim_enabled = true;
3414		return 0;
3415	}
3416
3417	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3418		vi->rx_dim_enabled = false;
3419		for (i = 0; i < vi->max_queue_pairs; i++)
3420			vi->rq[i].dim_enabled = false;
3421	}
3422
3423	/* Since the per-queue coalescing params can be set,
3424	 * we need apply the global new params even if they
3425	 * are not updated.
3426	 */
3427	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3428	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3429	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3430
3431	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3432				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3433				  &sgs_rx))
3434		return -EINVAL;
3435
3436	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3437	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3438	for (i = 0; i < vi->max_queue_pairs; i++) {
3439		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3440		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3441	}
3442
3443	return 0;
3444}
3445
3446static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3447				       struct ethtool_coalesce *ec)
3448{
3449	int err;
3450
3451	err = virtnet_send_tx_notf_coal_cmds(vi, ec);
3452	if (err)
3453		return err;
3454
3455	err = virtnet_send_rx_notf_coal_cmds(vi, ec);
3456	if (err)
3457		return err;
3458
3459	return 0;
3460}
3461
3462static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3463					     struct ethtool_coalesce *ec,
3464					     u16 queue)
3465{
3466	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3467	bool cur_rx_dim = vi->rq[queue].dim_enabled;
3468	u32 max_usecs, max_packets;
3469	int err;
3470
3471	max_usecs = vi->rq[queue].intr_coal.max_usecs;
3472	max_packets = vi->rq[queue].intr_coal.max_packets;
3473
3474	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3475			       ec->rx_max_coalesced_frames != max_packets))
3476		return -EINVAL;
3477
3478	if (rx_ctrl_dim_on && !cur_rx_dim) {
3479		vi->rq[queue].dim_enabled = true;
3480		return 0;
3481	}
3482
3483	if (!rx_ctrl_dim_on && cur_rx_dim)
3484		vi->rq[queue].dim_enabled = false;
3485
3486	/* If no params are updated, userspace ethtool will
3487	 * reject the modification.
3488	 */
3489	err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
3490					       ec->rx_coalesce_usecs,
3491					       ec->rx_max_coalesced_frames);
3492	if (err)
3493		return err;
3494
3495	return 0;
3496}
3497
3498static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3499					  struct ethtool_coalesce *ec,
3500					  u16 queue)
3501{
3502	int err;
3503
3504	err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3505	if (err)
3506		return err;
3507
3508	err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
3509					       ec->tx_coalesce_usecs,
3510					       ec->tx_max_coalesced_frames);
3511	if (err)
3512		return err;
3513
3514	return 0;
3515}
3516
3517static void virtnet_rx_dim_work(struct work_struct *work)
3518{
3519	struct dim *dim = container_of(work, struct dim, work);
3520	struct receive_queue *rq = container_of(dim,
3521			struct receive_queue, dim);
3522	struct virtnet_info *vi = rq->vq->vdev->priv;
3523	struct net_device *dev = vi->dev;
3524	struct dim_cq_moder update_moder;
3525	int i, qnum, err;
3526
3527	if (!rtnl_trylock())
3528		return;
3529
3530	/* Each rxq's work is queued by "net_dim()->schedule_work()"
3531	 * in response to NAPI traffic changes. Note that dim->profile_ix
3532	 * for each rxq is updated prior to the queuing action.
3533	 * So we only need to traverse and update profiles for all rxqs
3534	 * in the work which is holding rtnl_lock.
3535	 */
3536	for (i = 0; i < vi->curr_queue_pairs; i++) {
3537		rq = &vi->rq[i];
3538		dim = &rq->dim;
3539		qnum = rq - vi->rq;
3540
3541		if (!rq->dim_enabled)
3542			continue;
3543
3544		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3545		if (update_moder.usec != rq->intr_coal.max_usecs ||
3546		    update_moder.pkts != rq->intr_coal.max_packets) {
3547			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3548							       update_moder.usec,
3549							       update_moder.pkts);
3550			if (err)
3551				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3552					 dev->name, qnum);
3553			dim->state = DIM_START_MEASURE;
3554		}
3555	}
3556
3557	rtnl_unlock();
3558}
3559
3560static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3561{
3562	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3563	 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
3564	 */
3565	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3566		return -EOPNOTSUPP;
3567
3568	if (ec->tx_max_coalesced_frames > 1 ||
3569	    ec->rx_max_coalesced_frames != 1)
3570		return -EINVAL;
3571
3572	return 0;
3573}
3574
3575static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3576					   int vq_weight, bool *should_update)
3577{
3578	if (weight ^ vq_weight) {
3579		if (dev_flags & IFF_UP)
3580			return -EBUSY;
3581		*should_update = true;
3582	}
3583
3584	return 0;
3585}
3586
3587static int virtnet_set_coalesce(struct net_device *dev,
3588				struct ethtool_coalesce *ec,
3589				struct kernel_ethtool_coalesce *kernel_coal,
3590				struct netlink_ext_ack *extack)
3591{
3592	struct virtnet_info *vi = netdev_priv(dev);
3593	int ret, queue_number, napi_weight;
3594	bool update_napi = false;
3595
3596	/* Can't change NAPI weight if the link is up */
3597	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3598	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3599		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3600						      vi->sq[queue_number].napi.weight,
3601						      &update_napi);
3602		if (ret)
3603			return ret;
3604
3605		if (update_napi) {
3606			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3607			 * updated for the sake of simplicity, which might not be necessary
3608			 */
3609			break;
3610		}
3611	}
3612
3613	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3614		ret = virtnet_send_notf_coal_cmds(vi, ec);
3615	else
3616		ret = virtnet_coal_params_supported(ec);
3617
3618	if (ret)
3619		return ret;
3620
3621	if (update_napi) {
3622		for (; queue_number < vi->max_queue_pairs; queue_number++)
3623			vi->sq[queue_number].napi.weight = napi_weight;
3624	}
3625
3626	return ret;
3627}
3628
3629static int virtnet_get_coalesce(struct net_device *dev,
3630				struct ethtool_coalesce *ec,
3631				struct kernel_ethtool_coalesce *kernel_coal,
3632				struct netlink_ext_ack *extack)
3633{
3634	struct virtnet_info *vi = netdev_priv(dev);
 
3635
3636	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3637		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3638		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3639		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3640		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3641		ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
3642	} else {
3643		ec->rx_max_coalesced_frames = 1;
3644
3645		if (vi->sq[0].napi.weight)
3646			ec->tx_max_coalesced_frames = 1;
3647	}
3648
3649	return 0;
3650}
3651
3652static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3653					  u32 queue,
3654					  struct ethtool_coalesce *ec)
3655{
3656	struct virtnet_info *vi = netdev_priv(dev);
3657	int ret, napi_weight;
3658	bool update_napi = false;
3659
3660	if (queue >= vi->max_queue_pairs)
3661		return -EINVAL;
3662
3663	/* Can't change NAPI weight if the link is up */
3664	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3665	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3666					      vi->sq[queue].napi.weight,
3667					      &update_napi);
3668	if (ret)
3669		return ret;
3670
3671	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3672		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3673	else
3674		ret = virtnet_coal_params_supported(ec);
3675
3676	if (ret)
3677		return ret;
3678
3679	if (update_napi)
3680		vi->sq[queue].napi.weight = napi_weight;
3681
3682	return 0;
3683}
3684
3685static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3686					  u32 queue,
3687					  struct ethtool_coalesce *ec)
3688{
3689	struct virtnet_info *vi = netdev_priv(dev);
3690
3691	if (queue >= vi->max_queue_pairs)
3692		return -EINVAL;
3693
3694	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3695		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3696		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3697		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3698		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3699		ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
3700	} else {
3701		ec->rx_max_coalesced_frames = 1;
3702
3703		if (vi->sq[queue].napi.weight)
3704			ec->tx_max_coalesced_frames = 1;
3705	}
3706
3707	return 0;
3708}
3709
3710static void virtnet_init_settings(struct net_device *dev)
3711{
3712	struct virtnet_info *vi = netdev_priv(dev);
3713
3714	vi->speed = SPEED_UNKNOWN;
3715	vi->duplex = DUPLEX_UNKNOWN;
3716}
3717
3718static void virtnet_update_settings(struct virtnet_info *vi)
3719{
3720	u32 speed;
3721	u8 duplex;
3722
3723	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3724		return;
3725
3726	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3727
3728	if (ethtool_validate_speed(speed))
3729		vi->speed = speed;
3730
3731	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3732
3733	if (ethtool_validate_duplex(duplex))
3734		vi->duplex = duplex;
3735}
3736
3737static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3738{
3739	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3740}
3741
3742static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3743{
3744	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3745}
3746
3747static int virtnet_get_rxfh(struct net_device *dev,
3748			    struct ethtool_rxfh_param *rxfh)
3749{
3750	struct virtnet_info *vi = netdev_priv(dev);
3751	int i;
3752
3753	if (rxfh->indir) {
3754		for (i = 0; i < vi->rss_indir_table_size; ++i)
3755			rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
3756	}
3757
3758	if (rxfh->key)
3759		memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
3760
3761	rxfh->hfunc = ETH_RSS_HASH_TOP;
3762
3763	return 0;
3764}
3765
3766static int virtnet_set_rxfh(struct net_device *dev,
3767			    struct ethtool_rxfh_param *rxfh,
3768			    struct netlink_ext_ack *extack)
3769{
3770	struct virtnet_info *vi = netdev_priv(dev);
3771	int i;
3772
3773	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3774	    rxfh->hfunc != ETH_RSS_HASH_TOP)
3775		return -EOPNOTSUPP;
3776
3777	if (rxfh->indir) {
3778		for (i = 0; i < vi->rss_indir_table_size; ++i)
3779			vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
3780	}
3781	if (rxfh->key)
3782		memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
3783
3784	virtnet_commit_rss_command(vi);
3785
3786	return 0;
3787}
3788
3789static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3790{
3791	struct virtnet_info *vi = netdev_priv(dev);
3792	int rc = 0;
3793
3794	switch (info->cmd) {
3795	case ETHTOOL_GRXRINGS:
3796		info->data = vi->curr_queue_pairs;
3797		break;
3798	case ETHTOOL_GRXFH:
3799		virtnet_get_hashflow(vi, info);
3800		break;
3801	default:
3802		rc = -EOPNOTSUPP;
3803	}
3804
3805	return rc;
3806}
3807
3808static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3809{
3810	struct virtnet_info *vi = netdev_priv(dev);
3811	int rc = 0;
3812
3813	switch (info->cmd) {
3814	case ETHTOOL_SRXFH:
3815		if (!virtnet_set_hashflow(vi, info))
3816			rc = -EINVAL;
3817
3818		break;
3819	default:
3820		rc = -EOPNOTSUPP;
3821	}
3822
3823	return rc;
3824}
3825
3826static const struct ethtool_ops virtnet_ethtool_ops = {
3827	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3828		ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
3829	.get_drvinfo = virtnet_get_drvinfo,
3830	.get_link = ethtool_op_get_link,
3831	.get_ringparam = virtnet_get_ringparam,
3832	.set_ringparam = virtnet_set_ringparam,
3833	.get_strings = virtnet_get_strings,
3834	.get_sset_count = virtnet_get_sset_count,
3835	.get_ethtool_stats = virtnet_get_ethtool_stats,
3836	.set_channels = virtnet_set_channels,
3837	.get_channels = virtnet_get_channels,
3838	.get_ts_info = ethtool_op_get_ts_info,
3839	.get_link_ksettings = virtnet_get_link_ksettings,
3840	.set_link_ksettings = virtnet_set_link_ksettings,
3841	.set_coalesce = virtnet_set_coalesce,
3842	.get_coalesce = virtnet_get_coalesce,
3843	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3844	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3845	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
3846	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3847	.get_rxfh = virtnet_get_rxfh,
3848	.set_rxfh = virtnet_set_rxfh,
3849	.get_rxnfc = virtnet_get_rxnfc,
3850	.set_rxnfc = virtnet_set_rxnfc,
3851};
3852
3853static void virtnet_freeze_down(struct virtio_device *vdev)
3854{
3855	struct virtnet_info *vi = vdev->priv;
3856
3857	/* Make sure no work handler is accessing the device */
3858	flush_work(&vi->config_work);
3859
3860	netif_tx_lock_bh(vi->dev);
3861	netif_device_detach(vi->dev);
3862	netif_tx_unlock_bh(vi->dev);
3863	if (netif_running(vi->dev))
3864		virtnet_close(vi->dev);
3865}
3866
3867static int init_vqs(struct virtnet_info *vi);
3868
3869static int virtnet_restore_up(struct virtio_device *vdev)
3870{
3871	struct virtnet_info *vi = vdev->priv;
3872	int err;
3873
3874	err = init_vqs(vi);
3875	if (err)
3876		return err;
3877
3878	virtio_device_ready(vdev);
3879
3880	enable_delayed_refill(vi);
3881
3882	if (netif_running(vi->dev)) {
3883		err = virtnet_open(vi->dev);
3884		if (err)
3885			return err;
3886	}
3887
3888	netif_tx_lock_bh(vi->dev);
3889	netif_device_attach(vi->dev);
3890	netif_tx_unlock_bh(vi->dev);
3891	return err;
3892}
3893
3894static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3895{
3896	struct scatterlist sg;
3897	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3898
3899	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3900
3901	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3902				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3903		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3904		return -EINVAL;
3905	}
3906
3907	return 0;
3908}
3909
3910static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3911{
3912	u64 offloads = 0;
3913
3914	if (!vi->guest_offloads)
3915		return 0;
3916
3917	return virtnet_set_guest_offloads(vi, offloads);
3918}
3919
3920static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3921{
3922	u64 offloads = vi->guest_offloads;
3923
3924	if (!vi->guest_offloads)
3925		return 0;
3926
3927	return virtnet_set_guest_offloads(vi, offloads);
3928}
3929
3930static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3931			   struct netlink_ext_ack *extack)
3932{
3933	unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3934					   sizeof(struct skb_shared_info));
3935	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3936	struct virtnet_info *vi = netdev_priv(dev);
3937	struct bpf_prog *old_prog;
3938	u16 xdp_qp = 0, curr_qp;
3939	int i, err;
3940
3941	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3942	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3943	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3944	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3945		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3946		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3947		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3948		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3949		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
 
3950		return -EOPNOTSUPP;
3951	}
3952
3953	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3954		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3955		return -EINVAL;
3956	}
3957
3958	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3959		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3960		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3961		return -EINVAL;
3962	}
3963
3964	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3965	if (prog)
3966		xdp_qp = nr_cpu_ids;
3967
3968	/* XDP requires extra queues for XDP_TX */
3969	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3970		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3971				 curr_qp + xdp_qp, vi->max_queue_pairs);
3972		xdp_qp = 0;
3973	}
3974
3975	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3976	if (!prog && !old_prog)
3977		return 0;
3978
3979	if (prog)
3980		bpf_prog_add(prog, vi->max_queue_pairs - 1);
3981
3982	/* Make sure NAPI is not using any XDP TX queues for RX. */
3983	if (netif_running(dev)) {
3984		for (i = 0; i < vi->max_queue_pairs; i++) {
3985			napi_disable(&vi->rq[i].napi);
3986			virtnet_napi_tx_disable(&vi->sq[i].napi);
3987		}
3988	}
3989
3990	if (!prog) {
3991		for (i = 0; i < vi->max_queue_pairs; i++) {
3992			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3993			if (i == 0)
3994				virtnet_restore_guest_offloads(vi);
3995		}
3996		synchronize_net();
3997	}
3998
3999	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
4000	if (err)
4001		goto err;
4002	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4003	vi->xdp_queue_pairs = xdp_qp;
4004
4005	if (prog) {
4006		vi->xdp_enabled = true;
4007		for (i = 0; i < vi->max_queue_pairs; i++) {
4008			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4009			if (i == 0 && !old_prog)
4010				virtnet_clear_guest_offloads(vi);
4011		}
4012		if (!old_prog)
4013			xdp_features_set_redirect_target(dev, true);
4014	} else {
4015		xdp_features_clear_redirect_target(dev);
4016		vi->xdp_enabled = false;
4017	}
4018
4019	for (i = 0; i < vi->max_queue_pairs; i++) {
 
 
4020		if (old_prog)
4021			bpf_prog_put(old_prog);
4022		if (netif_running(dev)) {
4023			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4024			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4025					       &vi->sq[i].napi);
4026		}
4027	}
4028
4029	return 0;
 
4030
4031err:
4032	if (!prog) {
4033		virtnet_clear_guest_offloads(vi);
4034		for (i = 0; i < vi->max_queue_pairs; i++)
4035			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
4036	}
4037
4038	if (netif_running(dev)) {
4039		for (i = 0; i < vi->max_queue_pairs; i++) {
4040			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4041			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4042					       &vi->sq[i].napi);
4043		}
4044	}
4045	if (prog)
4046		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
4047	return err;
4048}
4049
4050static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4051{
4052	switch (xdp->command) {
4053	case XDP_SETUP_PROG:
4054		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
 
 
 
4055	default:
4056		return -EINVAL;
4057	}
4058}
4059
4060static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
4061				      size_t len)
4062{
4063	struct virtnet_info *vi = netdev_priv(dev);
4064	int ret;
4065
4066	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
4067		return -EOPNOTSUPP;
4068
4069	ret = snprintf(buf, len, "sby");
4070	if (ret >= len)
4071		return -EOPNOTSUPP;
4072
4073	return 0;
4074}
4075
4076static int virtnet_set_features(struct net_device *dev,
4077				netdev_features_t features)
4078{
4079	struct virtnet_info *vi = netdev_priv(dev);
4080	u64 offloads;
4081	int err;
4082
4083	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
4084		if (vi->xdp_enabled)
4085			return -EBUSY;
4086
4087		if (features & NETIF_F_GRO_HW)
4088			offloads = vi->guest_offloads_capable;
4089		else
4090			offloads = vi->guest_offloads_capable &
4091				   ~GUEST_OFFLOAD_GRO_HW_MASK;
4092
4093		err = virtnet_set_guest_offloads(vi, offloads);
4094		if (err)
4095			return err;
4096		vi->guest_offloads = offloads;
4097	}
4098
4099	if ((dev->features ^ features) & NETIF_F_RXHASH) {
4100		if (features & NETIF_F_RXHASH)
4101			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
4102		else
4103			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
4104
4105		if (!virtnet_commit_rss_command(vi))
4106			return -EINVAL;
4107	}
4108
4109	return 0;
4110}
4111
4112static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
4113{
4114	struct virtnet_info *priv = netdev_priv(dev);
4115	struct send_queue *sq = &priv->sq[txqueue];
4116	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
4117
4118	u64_stats_update_begin(&sq->stats.syncp);
4119	u64_stats_inc(&sq->stats.tx_timeouts);
4120	u64_stats_update_end(&sq->stats.syncp);
4121
4122	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
4123		   txqueue, sq->name, sq->vq->index, sq->vq->name,
4124		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
4125}
4126
4127static const struct net_device_ops virtnet_netdev = {
4128	.ndo_open            = virtnet_open,
4129	.ndo_stop   	     = virtnet_close,
4130	.ndo_start_xmit      = start_xmit,
4131	.ndo_validate_addr   = eth_validate_addr,
4132	.ndo_set_mac_address = virtnet_set_mac_address,
4133	.ndo_set_rx_mode     = virtnet_set_rx_mode,
4134	.ndo_get_stats64     = virtnet_stats,
4135	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
4136	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
4137	.ndo_bpf		= virtnet_xdp,
4138	.ndo_xdp_xmit		= virtnet_xdp_xmit,
4139	.ndo_features_check	= passthru_features_check,
4140	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
4141	.ndo_set_features	= virtnet_set_features,
4142	.ndo_tx_timeout		= virtnet_tx_timeout,
 
4143};
4144
4145static void virtnet_config_changed_work(struct work_struct *work)
4146{
4147	struct virtnet_info *vi =
4148		container_of(work, struct virtnet_info, config_work);
4149	u16 v;
4150
4151	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
4152				 struct virtio_net_config, status, &v) < 0)
4153		return;
4154
4155	if (v & VIRTIO_NET_S_ANNOUNCE) {
4156		netdev_notify_peers(vi->dev);
4157		virtnet_ack_link_announce(vi);
4158	}
4159
4160	/* Ignore unknown (future) status bits */
4161	v &= VIRTIO_NET_S_LINK_UP;
4162
4163	if (vi->status == v)
4164		return;
4165
4166	vi->status = v;
4167
4168	if (vi->status & VIRTIO_NET_S_LINK_UP) {
4169		virtnet_update_settings(vi);
4170		netif_carrier_on(vi->dev);
4171		netif_tx_wake_all_queues(vi->dev);
4172	} else {
4173		netif_carrier_off(vi->dev);
4174		netif_tx_stop_all_queues(vi->dev);
4175	}
4176}
4177
4178static void virtnet_config_changed(struct virtio_device *vdev)
4179{
4180	struct virtnet_info *vi = vdev->priv;
4181
4182	schedule_work(&vi->config_work);
4183}
4184
4185static void virtnet_free_queues(struct virtnet_info *vi)
4186{
4187	int i;
4188
4189	for (i = 0; i < vi->max_queue_pairs; i++) {
4190		__netif_napi_del(&vi->rq[i].napi);
4191		__netif_napi_del(&vi->sq[i].napi);
4192	}
4193
4194	/* We called __netif_napi_del(),
4195	 * we need to respect an RCU grace period before freeing vi->rq
4196	 */
4197	synchronize_net();
4198
4199	kfree(vi->rq);
4200	kfree(vi->sq);
4201	kfree(vi->ctrl);
4202}
4203
4204static void _free_receive_bufs(struct virtnet_info *vi)
4205{
4206	struct bpf_prog *old_prog;
4207	int i;
4208
 
4209	for (i = 0; i < vi->max_queue_pairs; i++) {
4210		while (vi->rq[i].pages)
4211			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4212
4213		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4214		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4215		if (old_prog)
4216			bpf_prog_put(old_prog);
4217	}
4218}
4219
4220static void free_receive_bufs(struct virtnet_info *vi)
4221{
4222	rtnl_lock();
4223	_free_receive_bufs(vi);
4224	rtnl_unlock();
4225}
4226
4227static void free_receive_page_frags(struct virtnet_info *vi)
4228{
4229	int i;
4230	for (i = 0; i < vi->max_queue_pairs; i++)
4231		if (vi->rq[i].alloc_frag.page) {
4232			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4233				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4234			put_page(vi->rq[i].alloc_frag.page);
4235		}
4236}
4237
4238static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4239{
4240	if (!is_xdp_frame(buf))
4241		dev_kfree_skb(buf);
 
 
 
 
 
 
4242	else
4243		xdp_return_frame(ptr_to_xdp(buf));
4244}
4245
4246static void free_unused_bufs(struct virtnet_info *vi)
4247{
4248	void *buf;
4249	int i;
4250
4251	for (i = 0; i < vi->max_queue_pairs; i++) {
4252		struct virtqueue *vq = vi->sq[i].vq;
4253		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4254			virtnet_sq_free_unused_buf(vq, buf);
4255		cond_resched();
 
 
 
4256	}
4257
4258	for (i = 0; i < vi->max_queue_pairs; i++) {
4259		struct virtqueue *vq = vi->rq[i].vq;
4260
4261		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4262			virtnet_rq_unmap_free_buf(vq, buf);
4263		cond_resched();
 
 
 
 
 
 
 
 
4264	}
4265}
4266
4267static void virtnet_del_vqs(struct virtnet_info *vi)
4268{
4269	struct virtio_device *vdev = vi->vdev;
4270
4271	virtnet_clean_affinity(vi);
4272
4273	vdev->config->del_vqs(vdev);
4274
4275	virtnet_free_queues(vi);
4276}
4277
4278/* How large should a single buffer be so a queue full of these can fit at
4279 * least one full packet?
4280 * Logic below assumes the mergeable buffer header is used.
4281 */
4282static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4283{
4284	const unsigned int hdr_len = vi->hdr_len;
4285	unsigned int rq_size = virtqueue_get_vring_size(vq);
4286	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4287	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4288	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4289
4290	return max(max(min_buf_len, hdr_len) - hdr_len,
4291		   (unsigned int)GOOD_PACKET_LEN);
4292}
4293
4294static int virtnet_find_vqs(struct virtnet_info *vi)
4295{
4296	vq_callback_t **callbacks;
4297	struct virtqueue **vqs;
4298	const char **names;
4299	int ret = -ENOMEM;
4300	int total_vqs;
4301	bool *ctx;
4302	u16 i;
4303
4304	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4305	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4306	 * possible control vq.
4307	 */
4308	total_vqs = vi->max_queue_pairs * 2 +
4309		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4310
4311	/* Allocate space for find_vqs parameters */
4312	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4313	if (!vqs)
4314		goto err_vq;
4315	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4316	if (!callbacks)
4317		goto err_callback;
4318	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4319	if (!names)
4320		goto err_names;
4321	if (!vi->big_packets || vi->mergeable_rx_bufs) {
4322		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4323		if (!ctx)
4324			goto err_ctx;
4325	} else {
4326		ctx = NULL;
4327	}
4328
4329	/* Parameters for control virtqueue, if any */
4330	if (vi->has_cvq) {
4331		callbacks[total_vqs - 1] = NULL;
4332		names[total_vqs - 1] = "control";
4333	}
4334
4335	/* Allocate/initialize parameters for send/receive virtqueues */
4336	for (i = 0; i < vi->max_queue_pairs; i++) {
4337		callbacks[rxq2vq(i)] = skb_recv_done;
4338		callbacks[txq2vq(i)] = skb_xmit_done;
4339		sprintf(vi->rq[i].name, "input.%u", i);
4340		sprintf(vi->sq[i].name, "output.%u", i);
4341		names[rxq2vq(i)] = vi->rq[i].name;
4342		names[txq2vq(i)] = vi->sq[i].name;
4343		if (ctx)
4344			ctx[rxq2vq(i)] = true;
4345	}
4346
4347	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4348				  names, ctx, NULL);
4349	if (ret)
4350		goto err_find;
4351
4352	if (vi->has_cvq) {
4353		vi->cvq = vqs[total_vqs - 1];
4354		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4355			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4356	}
4357
4358	for (i = 0; i < vi->max_queue_pairs; i++) {
4359		vi->rq[i].vq = vqs[rxq2vq(i)];
4360		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4361		vi->sq[i].vq = vqs[txq2vq(i)];
4362	}
4363
4364	/* run here: ret == 0. */
 
 
4365
 
4366
4367err_find:
4368	kfree(ctx);
4369err_ctx:
4370	kfree(names);
4371err_names:
4372	kfree(callbacks);
4373err_callback:
4374	kfree(vqs);
4375err_vq:
4376	return ret;
4377}
4378
4379static int virtnet_alloc_queues(struct virtnet_info *vi)
4380{
4381	int i;
4382
4383	if (vi->has_cvq) {
4384		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4385		if (!vi->ctrl)
4386			goto err_ctrl;
4387	} else {
4388		vi->ctrl = NULL;
4389	}
4390	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4391	if (!vi->sq)
4392		goto err_sq;
4393	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4394	if (!vi->rq)
4395		goto err_rq;
4396
4397	INIT_DELAYED_WORK(&vi->refill, refill_work);
4398	for (i = 0; i < vi->max_queue_pairs; i++) {
4399		vi->rq[i].pages = NULL;
4400		netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4401				      napi_weight);
4402		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4403					 virtnet_poll_tx,
4404					 napi_tx ? napi_weight : 0);
4405
4406		INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4407		vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4408
4409		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4410		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4411		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4412
4413		u64_stats_init(&vi->rq[i].stats.syncp);
4414		u64_stats_init(&vi->sq[i].stats.syncp);
4415	}
4416
4417	return 0;
4418
4419err_rq:
4420	kfree(vi->sq);
4421err_sq:
4422	kfree(vi->ctrl);
4423err_ctrl:
4424	return -ENOMEM;
4425}
4426
4427static int init_vqs(struct virtnet_info *vi)
4428{
4429	int ret;
4430
4431	/* Allocate send & receive queues */
4432	ret = virtnet_alloc_queues(vi);
4433	if (ret)
4434		goto err;
4435
4436	ret = virtnet_find_vqs(vi);
4437	if (ret)
4438		goto err_free;
4439
4440	virtnet_rq_set_premapped(vi);
4441
4442	cpus_read_lock();
4443	virtnet_set_affinity(vi);
4444	cpus_read_unlock();
4445
4446	return 0;
4447
4448err_free:
4449	virtnet_free_queues(vi);
4450err:
4451	return ret;
4452}
4453
4454#ifdef CONFIG_SYSFS
4455static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4456		char *buf)
4457{
4458	struct virtnet_info *vi = netdev_priv(queue->dev);
4459	unsigned int queue_index = get_netdev_rx_queue_index(queue);
4460	unsigned int headroom = virtnet_get_headroom(vi);
4461	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4462	struct ewma_pkt_len *avg;
4463
4464	BUG_ON(queue_index >= vi->max_queue_pairs);
4465	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4466	return sprintf(buf, "%u\n",
4467		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
4468				       SKB_DATA_ALIGN(headroom + tailroom)));
4469}
4470
4471static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4472	__ATTR_RO(mergeable_rx_buffer_size);
4473
4474static struct attribute *virtio_net_mrg_rx_attrs[] = {
4475	&mergeable_rx_buffer_size_attribute.attr,
4476	NULL
4477};
4478
4479static const struct attribute_group virtio_net_mrg_rx_group = {
4480	.name = "virtio_net",
4481	.attrs = virtio_net_mrg_rx_attrs
4482};
4483#endif
4484
4485static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4486				    unsigned int fbit,
4487				    const char *fname, const char *dname)
4488{
4489	if (!virtio_has_feature(vdev, fbit))
4490		return false;
4491
4492	dev_err(&vdev->dev, "device advertises feature %s but not %s",
4493		fname, dname);
4494
4495	return true;
4496}
4497
4498#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
4499	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4500
4501static bool virtnet_validate_features(struct virtio_device *vdev)
4502{
4503	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4504	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4505			     "VIRTIO_NET_F_CTRL_VQ") ||
4506	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4507			     "VIRTIO_NET_F_CTRL_VQ") ||
4508	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4509			     "VIRTIO_NET_F_CTRL_VQ") ||
4510	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4511	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4512			     "VIRTIO_NET_F_CTRL_VQ") ||
4513	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4514			     "VIRTIO_NET_F_CTRL_VQ") ||
4515	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4516			     "VIRTIO_NET_F_CTRL_VQ") ||
4517	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4518			     "VIRTIO_NET_F_CTRL_VQ") ||
4519	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4520			     "VIRTIO_NET_F_CTRL_VQ"))) {
4521		return false;
4522	}
4523
4524	return true;
4525}
4526
4527#define MIN_MTU ETH_MIN_MTU
4528#define MAX_MTU ETH_MAX_MTU
4529
4530static int virtnet_validate(struct virtio_device *vdev)
4531{
 
 
 
 
 
 
4532	if (!vdev->config->get) {
4533		dev_err(&vdev->dev, "%s failure: config access disabled\n",
4534			__func__);
4535		return -EINVAL;
4536	}
4537
4538	if (!virtnet_validate_features(vdev))
4539		return -EINVAL;
4540
4541	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4542		int mtu = virtio_cread16(vdev,
4543					 offsetof(struct virtio_net_config,
4544						  mtu));
4545		if (mtu < MIN_MTU)
4546			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4547	}
4548
4549	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4550	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4551		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4552		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4553	}
4554
4555	return 0;
4556}
4557
4558static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4559{
4560	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4561		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4562		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4563		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4564		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4565		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4566}
4567
4568static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4569{
4570	bool guest_gso = virtnet_check_guest_gso(vi);
4571
4572	/* If device can receive ANY guest GSO packets, regardless of mtu,
4573	 * allocate packets of maximum size, otherwise limit it to only
4574	 * mtu size worth only.
4575	 */
4576	if (mtu > ETH_DATA_LEN || guest_gso) {
4577		vi->big_packets = true;
4578		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4579	}
4580}
4581
4582static int virtnet_probe(struct virtio_device *vdev)
4583{
4584	int i, err = -ENOMEM;
4585	struct net_device *dev;
4586	struct virtnet_info *vi;
4587	u16 max_queue_pairs;
4588	int mtu = 0;
4589
4590	/* Find if host supports multiqueue/rss virtio_net device */
4591	max_queue_pairs = 1;
4592	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4593		max_queue_pairs =
4594		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4595
4596	/* We need at least 2 queue's */
4597	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4598	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4599	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4600		max_queue_pairs = 1;
4601
4602	/* Allocate ourselves a network device with room for our info */
4603	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4604	if (!dev)
4605		return -ENOMEM;
4606
4607	/* Set up network device as normal. */
4608	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4609			   IFF_TX_SKB_NO_LINEAR;
4610	dev->netdev_ops = &virtnet_netdev;
4611	dev->features = NETIF_F_HIGHDMA;
4612
4613	dev->ethtool_ops = &virtnet_ethtool_ops;
4614	SET_NETDEV_DEV(dev, &vdev->dev);
4615
4616	/* Do we support "hardware" checksums? */
4617	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4618		/* This opens up the world of extra features. */
4619		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4620		if (csum)
4621			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4622
4623		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4624			dev->hw_features |= NETIF_F_TSO
4625				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
4626		}
4627		/* Individual feature bits: what can host handle? */
4628		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4629			dev->hw_features |= NETIF_F_TSO;
4630		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4631			dev->hw_features |= NETIF_F_TSO6;
4632		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4633			dev->hw_features |= NETIF_F_TSO_ECN;
4634		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4635			dev->hw_features |= NETIF_F_GSO_UDP_L4;
4636
4637		dev->features |= NETIF_F_GSO_ROBUST;
4638
4639		if (gso)
4640			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4641		/* (!csum && gso) case will be fixed by register_netdev() */
4642	}
4643	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4644		dev->features |= NETIF_F_RXCSUM;
4645	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4646	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4647		dev->features |= NETIF_F_GRO_HW;
4648	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4649		dev->hw_features |= NETIF_F_GRO_HW;
4650
4651	dev->vlan_features = dev->features;
4652	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4653
4654	/* MTU range: 68 - 65535 */
4655	dev->min_mtu = MIN_MTU;
4656	dev->max_mtu = MAX_MTU;
4657
4658	/* Configuration may specify what MAC to use.  Otherwise random. */
4659	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4660		u8 addr[ETH_ALEN];
4661
4662		virtio_cread_bytes(vdev,
4663				   offsetof(struct virtio_net_config, mac),
4664				   addr, ETH_ALEN);
4665		eth_hw_addr_set(dev, addr);
4666	} else {
4667		eth_hw_addr_random(dev);
4668		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4669			 dev->dev_addr);
4670	}
4671
4672	/* Set up our device-specific information */
4673	vi = netdev_priv(dev);
4674	vi->dev = dev;
4675	vi->vdev = vdev;
4676	vdev->priv = vi;
 
 
 
 
4677
4678	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4679	spin_lock_init(&vi->refill_lock);
4680
4681	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4682		vi->mergeable_rx_bufs = true;
4683		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4684	}
4685
4686	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4687		vi->has_rss_hash_report = true;
4688
4689	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4690		vi->has_rss = true;
4691
4692	if (vi->has_rss || vi->has_rss_hash_report) {
4693		vi->rss_indir_table_size =
4694			virtio_cread16(vdev, offsetof(struct virtio_net_config,
4695				rss_max_indirection_table_length));
4696		vi->rss_key_size =
4697			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4698
4699		vi->rss_hash_types_supported =
4700		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4701		vi->rss_hash_types_supported &=
4702				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4703				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4704				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4705
4706		dev->hw_features |= NETIF_F_RXHASH;
4707	}
4708
4709	if (vi->has_rss_hash_report)
4710		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4711	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4712		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4713		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4714	else
4715		vi->hdr_len = sizeof(struct virtio_net_hdr);
4716
4717	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4718	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4719		vi->any_header_sg = true;
4720
4721	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4722		vi->has_cvq = true;
4723
4724	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4725		mtu = virtio_cread16(vdev,
4726				     offsetof(struct virtio_net_config,
4727					      mtu));
4728		if (mtu < dev->min_mtu) {
4729			/* Should never trigger: MTU was previously validated
4730			 * in virtnet_validate.
4731			 */
4732			dev_err(&vdev->dev,
4733				"device MTU appears to have changed it is now %d < %d",
4734				mtu, dev->min_mtu);
4735			err = -EINVAL;
4736			goto free;
4737		}
4738
4739		dev->mtu = mtu;
4740		dev->max_mtu = mtu;
4741	}
4742
4743	virtnet_set_big_packets(vi, mtu);
4744
4745	if (vi->any_header_sg)
4746		dev->needed_headroom = vi->hdr_len;
4747
4748	/* Enable multiqueue by default */
4749	if (num_online_cpus() >= max_queue_pairs)
4750		vi->curr_queue_pairs = max_queue_pairs;
4751	else
4752		vi->curr_queue_pairs = num_online_cpus();
4753	vi->max_queue_pairs = max_queue_pairs;
4754
4755	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4756	err = init_vqs(vi);
4757	if (err)
4758		goto free;
4759
4760	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4761		vi->intr_coal_rx.max_usecs = 0;
4762		vi->intr_coal_tx.max_usecs = 0;
4763		vi->intr_coal_rx.max_packets = 0;
4764
4765		/* Keep the default values of the coalescing parameters
4766		 * aligned with the default napi_tx state.
4767		 */
4768		if (vi->sq[0].napi.weight)
4769			vi->intr_coal_tx.max_packets = 1;
4770		else
4771			vi->intr_coal_tx.max_packets = 0;
4772	}
4773
4774	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4775		/* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
4776		for (i = 0; i < vi->max_queue_pairs; i++)
4777			if (vi->sq[i].napi.weight)
4778				vi->sq[i].intr_coal.max_packets = 1;
4779	}
4780
4781#ifdef CONFIG_SYSFS
4782	if (vi->mergeable_rx_bufs)
4783		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4784#endif
4785	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4786	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4787
4788	virtnet_init_settings(dev);
4789
4790	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4791		vi->failover = net_failover_create(vi->dev);
4792		if (IS_ERR(vi->failover)) {
4793			err = PTR_ERR(vi->failover);
4794			goto free_vqs;
4795		}
4796	}
4797
4798	if (vi->has_rss || vi->has_rss_hash_report)
4799		virtnet_init_default_rss(vi);
4800
4801	/* serialize netdev register + virtio_device_ready() with ndo_open() */
4802	rtnl_lock();
4803
4804	err = register_netdevice(dev);
4805	if (err) {
4806		pr_debug("virtio_net: registering device failed\n");
4807		rtnl_unlock();
4808		goto free_failover;
4809	}
4810
4811	virtio_device_ready(vdev);
4812
4813	_virtnet_set_queues(vi, vi->curr_queue_pairs);
4814
4815	/* a random MAC address has been assigned, notify the device.
4816	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4817	 * because many devices work fine without getting MAC explicitly
4818	 */
4819	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4820	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4821		struct scatterlist sg;
4822
4823		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4824		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4825					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4826			pr_debug("virtio_net: setting MAC address failed\n");
4827			rtnl_unlock();
4828			err = -EINVAL;
4829			goto free_unregister_netdev;
4830		}
4831	}
4832
4833	rtnl_unlock();
4834
4835	err = virtnet_cpu_notif_add(vi);
4836	if (err) {
4837		pr_debug("virtio_net: registering cpu notifier failed\n");
4838		goto free_unregister_netdev;
4839	}
4840
 
 
 
 
4841	/* Assume link up if device can't report link status,
4842	   otherwise get link status from config. */
4843	netif_carrier_off(dev);
4844	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
 
4845		schedule_work(&vi->config_work);
4846	} else {
4847		vi->status = VIRTIO_NET_S_LINK_UP;
4848		virtnet_update_settings(vi);
4849		netif_carrier_on(dev);
4850	}
4851
4852	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4853		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4854			set_bit(guest_offloads[i], &vi->guest_offloads);
4855	vi->guest_offloads_capable = vi->guest_offloads;
4856
4857	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4858		 dev->name, max_queue_pairs);
4859
4860	return 0;
4861
4862free_unregister_netdev:
 
 
4863	unregister_netdev(dev);
4864free_failover:
4865	net_failover_destroy(vi->failover);
4866free_vqs:
4867	virtio_reset_device(vdev);
4868	cancel_delayed_work_sync(&vi->refill);
4869	free_receive_page_frags(vi);
4870	virtnet_del_vqs(vi);
 
 
4871free:
4872	free_netdev(dev);
4873	return err;
4874}
4875
4876static void remove_vq_common(struct virtnet_info *vi)
4877{
4878	virtio_reset_device(vi->vdev);
4879
4880	/* Free unused buffers in both send and recv, if any. */
4881	free_unused_bufs(vi);
4882
4883	free_receive_bufs(vi);
4884
4885	free_receive_page_frags(vi);
4886
4887	virtnet_del_vqs(vi);
4888}
4889
4890static void virtnet_remove(struct virtio_device *vdev)
4891{
4892	struct virtnet_info *vi = vdev->priv;
4893
4894	virtnet_cpu_notif_remove(vi);
4895
4896	/* Make sure no work handler is accessing the device. */
4897	flush_work(&vi->config_work);
4898
4899	unregister_netdev(vi->dev);
4900
4901	net_failover_destroy(vi->failover);
4902
4903	remove_vq_common(vi);
4904
 
4905	free_netdev(vi->dev);
4906}
4907
4908static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
 
4909{
4910	struct virtnet_info *vi = vdev->priv;
 
4911
4912	virtnet_cpu_notif_remove(vi);
4913	virtnet_freeze_down(vdev);
 
 
 
 
 
 
 
 
 
 
 
4914	remove_vq_common(vi);
4915
4916	return 0;
4917}
4918
4919static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4920{
4921	struct virtnet_info *vi = vdev->priv;
4922	int err;
4923
4924	err = virtnet_restore_up(vdev);
4925	if (err)
4926		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4927	virtnet_set_queues(vi, vi->curr_queue_pairs);
 
4928
4929	err = virtnet_cpu_notif_add(vi);
4930	if (err) {
4931		virtnet_freeze_down(vdev);
4932		remove_vq_common(vi);
4933		return err;
4934	}
4935
4936	return 0;
4937}
 
4938
4939static struct virtio_device_id id_table[] = {
4940	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4941	{ 0 },
4942};
4943
4944#define VIRTNET_FEATURES \
4945	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4946	VIRTIO_NET_F_MAC, \
4947	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4948	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4949	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4950	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4951	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4952	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4953	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4954	VIRTIO_NET_F_CTRL_MAC_ADDR, \
4955	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4956	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4957	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4958	VIRTIO_NET_F_VQ_NOTF_COAL, \
4959	VIRTIO_NET_F_GUEST_HDRLEN
4960
4961static unsigned int features[] = {
4962	VIRTNET_FEATURES,
4963};
4964
4965static unsigned int features_legacy[] = {
4966	VIRTNET_FEATURES,
4967	VIRTIO_NET_F_GSO,
4968	VIRTIO_F_ANY_LAYOUT,
4969};
4970
4971static struct virtio_driver virtio_net_driver = {
4972	.feature_table = features,
4973	.feature_table_size = ARRAY_SIZE(features),
4974	.feature_table_legacy = features_legacy,
4975	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4976	.driver.name =	KBUILD_MODNAME,
4977	.driver.owner =	THIS_MODULE,
4978	.id_table =	id_table,
4979	.validate =	virtnet_validate,
4980	.probe =	virtnet_probe,
4981	.remove =	virtnet_remove,
4982	.config_changed = virtnet_config_changed,
4983#ifdef CONFIG_PM_SLEEP
4984	.freeze =	virtnet_freeze,
4985	.restore =	virtnet_restore,
4986#endif
4987};
4988
4989static __init int virtio_net_driver_init(void)
4990{
4991	int ret;
4992
4993	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4994				      virtnet_cpu_online,
4995				      virtnet_cpu_down_prep);
4996	if (ret < 0)
4997		goto out;
4998	virtionet_online = ret;
4999	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
5000				      NULL, virtnet_cpu_dead);
5001	if (ret)
5002		goto err_dead;
5003	ret = register_virtio_driver(&virtio_net_driver);
 
5004	if (ret)
5005		goto err_virtio;
5006	return 0;
5007err_virtio:
5008	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5009err_dead:
5010	cpuhp_remove_multi_state(virtionet_online);
5011out:
5012	return ret;
5013}
5014module_init(virtio_net_driver_init);
5015
5016static __exit void virtio_net_driver_exit(void)
5017{
5018	unregister_virtio_driver(&virtio_net_driver);
5019	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5020	cpuhp_remove_multi_state(virtionet_online);
 
5021}
5022module_exit(virtio_net_driver_exit);
5023
5024MODULE_DEVICE_TABLE(virtio, id_table);
5025MODULE_DESCRIPTION("Virtio network driver");
5026MODULE_LICENSE("GPL");
v4.10.11
 
   1/* A network driver using virtio.
   2 *
   3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation; either version 2 of the License, or
   8 * (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18//#define DEBUG
  19#include <linux/netdevice.h>
  20#include <linux/etherdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/module.h>
  23#include <linux/virtio.h>
  24#include <linux/virtio_net.h>
  25#include <linux/bpf.h>
 
  26#include <linux/scatterlist.h>
  27#include <linux/if_vlan.h>
  28#include <linux/slab.h>
  29#include <linux/cpu.h>
  30#include <linux/average.h>
  31#include <net/busy_poll.h>
 
 
 
 
 
 
  32
  33static int napi_weight = NAPI_POLL_WEIGHT;
  34module_param(napi_weight, int, 0444);
  35
  36static bool csum = true, gso = true;
  37module_param(csum, bool, 0444);
  38module_param(gso, bool, 0444);
 
  39
  40/* FIXME: MTU in config. */
  41#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  42#define GOOD_COPY_LEN	128
  43
 
 
 
 
 
 
 
 
 
 
 
  44/* RX packet size EWMA. The average packet size is used to determine the packet
  45 * buffer size when refilling RX rings. As the entire RX ring may be refilled
  46 * at once, the weight is chosen so that the EWMA will be insensitive to short-
  47 * term, transient changes in packet size.
  48 */
  49DECLARE_EWMA(pkt_len, 1, 64)
 
 
 
 
 
 
 
 
 
 
 
 
 
  50
  51/* With mergeable buffers we align buffer address and use the low bits to
  52 * encode its true size. Buffer size is up to 1 page so we need to align to
  53 * square root of page size to ensure we reserve enough bits to encode the true
  54 * size.
  55 */
  56#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  57
  58/* Minimum alignment for mergeable packet buffers. */
  59#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
  60				   1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
 
 
 
 
 
  61
  62#define VIRTNET_DRIVER_VERSION "1.0.0"
 
 
 
 
 
 
 
 
 
 
 
 
  63
  64struct virtnet_stats {
  65	struct u64_stats_sync tx_syncp;
  66	struct u64_stats_sync rx_syncp;
  67	u64 tx_bytes;
  68	u64 tx_packets;
  69
  70	u64 rx_bytes;
  71	u64 rx_packets;
 
 
 
 
  72};
  73
  74/* Internal representation of a send virtqueue */
  75struct send_queue {
  76	/* Virtqueue associated with this send _queue */
  77	struct virtqueue *vq;
  78
  79	/* TX: fragments + linear part + virtio header */
  80	struct scatterlist sg[MAX_SKB_FRAGS + 2];
  81
  82	/* Name of the send queue: output.$index */
  83	char name[40];
 
 
 
 
 
 
 
 
 
  84};
  85
  86/* Internal representation of a receive virtqueue */
  87struct receive_queue {
  88	/* Virtqueue associated with this receive_queue */
  89	struct virtqueue *vq;
  90
  91	struct napi_struct napi;
  92
  93	struct bpf_prog __rcu *xdp_prog;
  94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95	/* Chain pages by the private ptr. */
  96	struct page *pages;
  97
  98	/* Average packet length for mergeable receive buffers. */
  99	struct ewma_pkt_len mrg_avg_pkt_len;
 100
 101	/* Page frag for packet buffer allocation. */
 102	struct page_frag alloc_frag;
 103
 104	/* RX: fragments + linear part + virtio header */
 105	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 106
 
 
 
 107	/* Name of this receive queue: input.$index */
 108	char name[40];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 109};
 110
 111struct virtnet_info {
 112	struct virtio_device *vdev;
 113	struct virtqueue *cvq;
 114	struct net_device *dev;
 115	struct send_queue *sq;
 116	struct receive_queue *rq;
 117	unsigned int status;
 118
 119	/* Max # of queue pairs supported by the device */
 120	u16 max_queue_pairs;
 121
 122	/* # of queue pairs currently used by the driver */
 123	u16 curr_queue_pairs;
 124
 125	/* # of XDP queue pairs currently used by the driver */
 126	u16 xdp_queue_pairs;
 127
 
 
 
 128	/* I like... big packets and I cannot lie! */
 129	bool big_packets;
 130
 
 
 
 131	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 132	bool mergeable_rx_bufs;
 133
 
 
 
 
 
 
 
 
 134	/* Has control virtqueue */
 135	bool has_cvq;
 136
 137	/* Host can handle any s/g split between our header and packet data */
 138	bool any_header_sg;
 139
 140	/* Packet virtio header size */
 141	u8 hdr_len;
 142
 143	/* Active statistics */
 144	struct virtnet_stats __percpu *stats;
 
 
 
 145
 146	/* Work struct for refilling if we run low on memory. */
 147	struct delayed_work refill;
 148
 149	/* Work struct for config space updates */
 150	struct work_struct config_work;
 151
 152	/* Does the affinity hint is set for virtqueues? */
 153	bool affinity_hint_set;
 154
 155	/* CPU hotplug instances for online & dead */
 156	struct hlist_node node;
 157	struct hlist_node node_dead;
 158
 159	/* Control VQ buffers: protected by the rtnl lock */
 160	struct virtio_net_ctrl_hdr ctrl_hdr;
 161	virtio_net_ctrl_ack ctrl_status;
 162	struct virtio_net_ctrl_mq ctrl_mq;
 163	u8 ctrl_promisc;
 164	u8 ctrl_allmulti;
 165	u16 ctrl_vid;
 166
 167	/* Ethtool settings */
 168	u8 duplex;
 169	u32 speed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 170};
 171
 172struct padded_vnet_hdr {
 173	struct virtio_net_hdr_mrg_rxbuf hdr;
 174	/*
 175	 * hdr is in a separate sg buffer, and data sg buffer shares same page
 176	 * with this header sg. This padding makes next sg 16 byte aligned
 177	 * after the header.
 178	 */
 179	char padding[4];
 
 
 
 
 
 
 
 
 180};
 181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 182/* Converting between virtqueue no. and kernel tx/rx queue no.
 183 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 184 */
 185static int vq2txq(struct virtqueue *vq)
 186{
 187	return (vq->index - 1) / 2;
 188}
 189
 190static int txq2vq(int txq)
 191{
 192	return txq * 2 + 1;
 193}
 194
 195static int vq2rxq(struct virtqueue *vq)
 196{
 197	return vq->index / 2;
 198}
 199
 200static int rxq2vq(int rxq)
 201{
 202	return rxq * 2;
 203}
 204
 205static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
 
 206{
 207	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
 208}
 209
 210/*
 211 * private is used to chain pages for big packets, put the whole
 212 * most recent used list in the beginning for reuse
 213 */
 214static void give_pages(struct receive_queue *rq, struct page *page)
 215{
 216	struct page *end;
 217
 218	/* Find end of list, sew whole thing into vi->rq.pages. */
 219	for (end = page; end->private; end = (struct page *)end->private);
 220	end->private = (unsigned long)rq->pages;
 221	rq->pages = page;
 222}
 223
 224static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
 225{
 226	struct page *p = rq->pages;
 227
 228	if (p) {
 229		rq->pages = (struct page *)p->private;
 230		/* clear private here, it is used to chain pages */
 231		p->private = 0;
 232	} else
 233		p = alloc_page(gfp_mask);
 234	return p;
 235}
 236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237static void skb_xmit_done(struct virtqueue *vq)
 238{
 239	struct virtnet_info *vi = vq->vdev->priv;
 
 240
 241	/* Suppress further interrupts. */
 242	virtqueue_disable_cb(vq);
 243
 244	/* We were probably waiting for more output buffers. */
 245	netif_wake_subqueue(vi->dev, vq2txq(vq));
 
 
 
 246}
 247
 248static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
 
 
 249{
 250	unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
 251	return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
 252}
 253
 254static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
 255{
 256	return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
 
 257
 
 
 
 258}
 259
 260static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
 
 
 261{
 262	unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
 263	return (unsigned long)buf | (size - 1);
 
 
 
 
 
 
 
 
 264}
 265
 266/* Called from bottom half context */
 267static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 268				   struct receive_queue *rq,
 269				   struct page *page, unsigned int offset,
 270				   unsigned int len, unsigned int truesize)
 
 271{
 272	struct sk_buff *skb;
 273	struct virtio_net_hdr_mrg_rxbuf *hdr;
 274	unsigned int copy, hdr_len, hdr_padded_len;
 275	char *p;
 
 
 276
 277	p = page_address(page) + offset;
 278
 279	/* copy small packet so we can reuse these pages for small data */
 280	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
 281	if (unlikely(!skb))
 282		return NULL;
 283
 284	hdr = skb_vnet_hdr(skb);
 285
 286	hdr_len = vi->hdr_len;
 287	if (vi->mergeable_rx_bufs)
 288		hdr_padded_len = sizeof *hdr;
 289	else
 290		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 291
 292	memcpy(hdr, p, hdr_len);
 293
 294	len -= hdr_len;
 295	offset += hdr_padded_len;
 296	p += hdr_padded_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297
 298	copy = len;
 299	if (copy > skb_tailroom(skb))
 300		copy = skb_tailroom(skb);
 301	memcpy(skb_put(skb, copy), p, copy);
 
 
 
 
 302
 303	len -= copy;
 304	offset += copy;
 305
 306	if (vi->mergeable_rx_bufs) {
 307		if (len)
 308			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
 309		else
 310			put_page(page);
 311		return skb;
 312	}
 313
 314	/*
 315	 * Verify that we can indeed put this data into a skb.
 316	 * This is here to handle cases when the device erroneously
 317	 * tries to receive more than is possible. This is usually
 318	 * the case of a broken device.
 319	 */
 320	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
 321		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
 322		dev_kfree_skb(skb);
 323		return NULL;
 324	}
 325	BUG_ON(offset >= PAGE_SIZE);
 326	while (len) {
 327		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
 328		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
 329				frag_size, truesize);
 330		len -= frag_size;
 331		page = (struct page *)page->private;
 332		offset = 0;
 333	}
 334
 335	if (page)
 336		give_pages(rq, page);
 337
 
 
 
 
 
 
 338	return skb;
 339}
 340
 341static void virtnet_xdp_xmit(struct virtnet_info *vi,
 342			     struct receive_queue *rq,
 343			     struct send_queue *sq,
 344			     struct xdp_buff *xdp,
 345			     void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 346{
 347	struct virtio_net_hdr_mrg_rxbuf *hdr;
 348	unsigned int num_sg, len;
 349	void *xdp_sent;
 350	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 351
 352	/* Free up any pending old buffers before queueing new ones. */
 353	while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 354		if (vi->mergeable_rx_bufs) {
 355			struct page *sent_page = virt_to_head_page(xdp_sent);
 356
 357			put_page(sent_page);
 358		} else { /* small buffer */
 359			struct sk_buff *skb = xdp_sent;
 
 360
 361			kfree_skb(skb);
 
 362		}
 
 363	}
 364
 365	if (vi->mergeable_rx_bufs) {
 366		/* Zero header and leave csum up to XDP layers */
 367		hdr = xdp->data;
 368		memset(hdr, 0, vi->hdr_len);
 369
 370		num_sg = 1;
 371		sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
 372	} else { /* small buffer */
 373		struct sk_buff *skb = data;
 374
 375		/* Zero header and leave csum up to XDP layers */
 376		hdr = skb_vnet_hdr(skb);
 377		memset(hdr, 0, vi->hdr_len);
 378
 379		num_sg = 2;
 380		sg_init_table(sq->sg, 2);
 381		sg_set_buf(sq->sg, hdr, vi->hdr_len);
 382		skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
 383	}
 384	err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
 385				   data, GFP_ATOMIC);
 386	if (unlikely(err)) {
 387		if (vi->mergeable_rx_bufs) {
 388			struct page *page = virt_to_head_page(xdp->data);
 389
 390			put_page(page);
 391		} else /* small buffer */
 392			kfree_skb(data);
 393		return; // On error abort to avoid unnecessary kick
 394	}
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396	virtqueue_kick(sq->vq);
 
 
 
 
 
 
 
 
 
 
 
 
 397}
 398
 399static u32 do_xdp_prog(struct virtnet_info *vi,
 400		       struct receive_queue *rq,
 401		       struct bpf_prog *xdp_prog,
 402		       void *data, int len)
 403{
 404	int hdr_padded_len;
 405	struct xdp_buff xdp;
 406	void *buf;
 407	unsigned int qp;
 408	u32 act;
 409
 410	if (vi->mergeable_rx_bufs) {
 411		hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 412		xdp.data = data + hdr_padded_len;
 413		xdp.data_end = xdp.data + (len - vi->hdr_len);
 414		buf = data;
 415	} else { /* small buffers */
 416		struct sk_buff *skb = data;
 417
 418		xdp.data = skb->data;
 419		xdp.data_end = xdp.data + len;
 420		buf = skb->data;
 421	}
 422
 423	act = bpf_prog_run_xdp(xdp_prog, &xdp);
 424	switch (act) {
 425	case XDP_PASS:
 426		return XDP_PASS;
 
 427	case XDP_TX:
 428		qp = vi->curr_queue_pairs -
 429			vi->xdp_queue_pairs +
 430			smp_processor_id();
 431		xdp.data = buf;
 432		virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
 433		return XDP_TX;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434	default:
 435		bpf_warn_invalid_xdp_action(act);
 
 436	case XDP_ABORTED:
 
 
 437	case XDP_DROP:
 438		return XDP_DROP;
 439	}
 440}
 441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442static struct sk_buff *receive_small(struct net_device *dev,
 443				     struct virtnet_info *vi,
 444				     struct receive_queue *rq,
 445				     void *buf, unsigned int len)
 
 
 
 446{
 447	struct sk_buff * skb = buf;
 448	struct bpf_prog *xdp_prog;
 
 449
 450	len -= vi->hdr_len;
 451	skb_trim(skb, len);
 
 
 
 
 
 
 
 452
 453	rcu_read_lock();
 454	xdp_prog = rcu_dereference(rq->xdp_prog);
 455	if (xdp_prog) {
 456		struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
 457		u32 act;
 458
 459		if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
 460			goto err_xdp;
 461		act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
 462		switch (act) {
 463		case XDP_PASS:
 464			break;
 465		case XDP_TX:
 466			rcu_read_unlock();
 467			goto xdp_xmit;
 468		case XDP_DROP:
 469		default:
 470			goto err_xdp;
 471		}
 
 472	}
 473	rcu_read_unlock();
 474
 475	return skb;
 
 
 476
 477err_xdp:
 478	rcu_read_unlock();
 479	dev->stats.rx_dropped++;
 480	kfree_skb(skb);
 481xdp_xmit:
 482	return NULL;
 483}
 484
 485static struct sk_buff *receive_big(struct net_device *dev,
 486				   struct virtnet_info *vi,
 487				   struct receive_queue *rq,
 488				   void *buf,
 489				   unsigned int len)
 
 490{
 491	struct page *page = buf;
 492	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
 
 493
 
 494	if (unlikely(!skb))
 495		goto err;
 496
 497	return skb;
 498
 499err:
 500	dev->stats.rx_dropped++;
 501	give_pages(rq, page);
 502	return NULL;
 503}
 504
 505/* The conditions to enable XDP should preclude the underlying device from
 506 * sending packets across multiple buffers (num_buf > 1). However per spec
 507 * it does not appear to be illegal to do so but rather just against convention.
 508 * So in order to avoid making a system unresponsive the packets are pushed
 509 * into a page and the XDP program is run. This will be extremely slow and we
 510 * push a warning to the user to fix this as soon as possible. Fixing this may
 511 * require resolving the underlying hardware to determine why multiple buffers
 512 * are being received or simply loading the XDP program in the ingress stack
 513 * after the skb is built because there is no advantage to running it here
 514 * anymore.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515 */
 516static struct page *xdp_linearize_page(struct receive_queue *rq,
 517				       u16 *num_buf,
 518				       struct page *p,
 519				       int offset,
 520				       unsigned int *len)
 521{
 522	struct page *page = alloc_page(GFP_ATOMIC);
 523	unsigned int page_off = 0;
 
 
 
 524
 525	if (!page)
 
 
 
 
 
 
 
 
 
 526		return NULL;
 527
 528	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
 529	page_off += *len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 530
 531	while (--*num_buf) {
 532		unsigned int buflen;
 533		unsigned long ctx;
 534		void *buf;
 535		int off;
 536
 537		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
 538		if (unlikely(!ctx))
 539			goto err_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540
 541		buf = mergeable_ctx_to_buf_address(ctx);
 542		p = virt_to_head_page(buf);
 543		off = buf - page_address(p);
 544
 545		/* guard against a misconfigured or uncooperative backend that
 546		 * is sending packet larger than the MTU.
 
 
 547		 */
 548		if ((page_off + buflen) > PAGE_SIZE) {
 549			put_page(p);
 550			goto err_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551		}
 552
 553		memcpy(page_address(page) + page_off,
 554		       page_address(p) + off, buflen);
 555		page_off += buflen;
 556		put_page(p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557	}
 558
 559	*len = page_off;
 560	return page;
 561err_buf:
 562	__free_pages(page, 0);
 
 
 
 
 563	return NULL;
 564}
 565
 566static struct sk_buff *receive_mergeable(struct net_device *dev,
 567					 struct virtnet_info *vi,
 568					 struct receive_queue *rq,
 569					 unsigned long ctx,
 570					 unsigned int len)
 
 
 
 571{
 572	void *buf = mergeable_ctx_to_buf_address(ctx);
 573	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
 574	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
 575	struct page *page = virt_to_head_page(buf);
 576	int offset = buf - page_address(page);
 577	struct sk_buff *head_skb, *curr_skb;
 578	struct bpf_prog *xdp_prog;
 579	unsigned int truesize;
 
 
 580
 581	head_skb = NULL;
 
 582
 583	rcu_read_lock();
 584	xdp_prog = rcu_dereference(rq->xdp_prog);
 585	if (xdp_prog) {
 586		struct page *xdp_page;
 587		u32 act;
 588
 589		/* This happens when rx buffer size is underestimated */
 590		if (unlikely(num_buf > 1)) {
 591			/* linearize data for XDP */
 592			xdp_page = xdp_linearize_page(rq, &num_buf,
 593						      page, offset, &len);
 594			if (!xdp_page)
 595				goto err_xdp;
 596			offset = 0;
 597		} else {
 598			xdp_page = page;
 599		}
 600
 601		/* Transient failure which in theory could occur if
 602		 * in-flight packets from before XDP was enabled reach
 603		 * the receive path after XDP is loaded. In practice I
 604		 * was not able to create this condition.
 605		 */
 606		if (unlikely(hdr->hdr.gso_type))
 607			goto err_xdp;
 608
 609		act = do_xdp_prog(vi, rq, xdp_prog,
 610				  page_address(xdp_page) + offset, len);
 611		switch (act) {
 612		case XDP_PASS:
 613			/* We can only create skb based on xdp_page. */
 614			if (unlikely(xdp_page != page)) {
 615				rcu_read_unlock();
 616				put_page(page);
 617				head_skb = page_to_skb(vi, rq, xdp_page,
 618						       0, len, PAGE_SIZE);
 619				ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
 620				return head_skb;
 621			}
 622			break;
 623		case XDP_TX:
 624			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
 625			if (unlikely(xdp_page != page))
 626				goto err_xdp;
 627			rcu_read_unlock();
 628			goto xdp_xmit;
 629		case XDP_DROP:
 630		default:
 631			if (unlikely(xdp_page != page))
 632				__free_pages(xdp_page, 0);
 633			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
 634			goto err_xdp;
 635		}
 
 636	}
 637	rcu_read_unlock();
 638
 639	truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
 640	head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
 641	curr_skb = head_skb;
 642
 643	if (unlikely(!curr_skb))
 644		goto err_skb;
 645	while (--num_buf) {
 646		int num_skb_frags;
 647
 648		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
 649		if (unlikely(!ctx)) {
 650			pr_debug("%s: rx error: %d buffers out of %d missing\n",
 651				 dev->name, num_buf,
 652				 virtio16_to_cpu(vi->vdev,
 653						 hdr->num_buffers));
 654			dev->stats.rx_length_errors++;
 655			goto err_buf;
 656		}
 657
 658		buf = mergeable_ctx_to_buf_address(ctx);
 659		page = virt_to_head_page(buf);
 660
 
 
 
 
 
 
 
 
 
 
 
 661		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
 662		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
 663			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
 664
 665			if (unlikely(!nskb))
 666				goto err_skb;
 667			if (curr_skb == head_skb)
 668				skb_shinfo(curr_skb)->frag_list = nskb;
 669			else
 670				curr_skb->next = nskb;
 671			curr_skb = nskb;
 672			head_skb->truesize += nskb->truesize;
 673			num_skb_frags = 0;
 674		}
 675		truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
 676		if (curr_skb != head_skb) {
 677			head_skb->data_len += len;
 678			head_skb->len += len;
 679			head_skb->truesize += truesize;
 680		}
 681		offset = buf - page_address(page);
 682		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
 683			put_page(page);
 684			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
 685					     len, truesize);
 686		} else {
 687			skb_add_rx_frag(curr_skb, num_skb_frags, page,
 688					offset, len, truesize);
 689		}
 690	}
 691
 692	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 693	return head_skb;
 694
 695err_xdp:
 696	rcu_read_unlock();
 697err_skb:
 698	put_page(page);
 699	while (--num_buf) {
 700		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
 701		if (unlikely(!ctx)) {
 702			pr_debug("%s: rx error: %d buffers missing\n",
 703				 dev->name, num_buf);
 704			dev->stats.rx_length_errors++;
 705			break;
 706		}
 707		page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
 708		put_page(page);
 709	}
 710err_buf:
 711	dev->stats.rx_dropped++;
 712	dev_kfree_skb(head_skb);
 713xdp_xmit:
 714	return NULL;
 715}
 716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
 718			void *buf, unsigned int len)
 
 
 719{
 720	struct net_device *dev = vi->dev;
 721	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
 722	struct sk_buff *skb;
 723	struct virtio_net_hdr_mrg_rxbuf *hdr;
 724
 725	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
 726		pr_debug("%s: short packet %i\n", dev->name, len);
 727		dev->stats.rx_length_errors++;
 728		if (vi->mergeable_rx_bufs) {
 729			unsigned long ctx = (unsigned long)buf;
 730			void *base = mergeable_ctx_to_buf_address(ctx);
 731			put_page(virt_to_head_page(base));
 732		} else if (vi->big_packets) {
 733			give_pages(rq, buf);
 734		} else {
 735			dev_kfree_skb(buf);
 736		}
 737		return;
 738	}
 739
 740	if (vi->mergeable_rx_bufs)
 741		skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
 
 742	else if (vi->big_packets)
 743		skb = receive_big(dev, vi, rq, buf, len);
 744	else
 745		skb = receive_small(dev, vi, rq, buf, len);
 746
 747	if (unlikely(!skb))
 748		return;
 749
 750	hdr = skb_vnet_hdr(skb);
 751
 752	u64_stats_update_begin(&stats->rx_syncp);
 753	stats->rx_bytes += skb->len;
 754	stats->rx_packets++;
 755	u64_stats_update_end(&stats->rx_syncp);
 756
 757	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
 758		skb->ip_summed = CHECKSUM_UNNECESSARY;
 759
 760	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
 761				  virtio_is_little_endian(vi->vdev))) {
 762		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
 763				     dev->name, hdr->hdr.gso_type,
 764				     hdr->hdr.gso_size);
 765		goto frame_err;
 766	}
 767
 
 768	skb->protocol = eth_type_trans(skb, dev);
 769	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
 770		 ntohs(skb->protocol), skb->len, skb->pkt_type);
 771
 772	napi_gro_receive(&rq->napi, skb);
 773	return;
 774
 775frame_err:
 776	dev->stats.rx_frame_errors++;
 777	dev_kfree_skb(skb);
 778}
 779
 
 
 
 
 
 780static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
 781			     gfp_t gfp)
 782{
 783	struct sk_buff *skb;
 784	struct virtio_net_hdr_mrg_rxbuf *hdr;
 
 
 785	int err;
 786
 787	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
 788	if (unlikely(!skb))
 
 
 
 789		return -ENOMEM;
 790
 791	skb_put(skb, GOOD_PACKET_LEN);
 
 792
 793	hdr = skb_vnet_hdr(skb);
 794	sg_init_table(rq->sg, 2);
 795	sg_set_buf(rq->sg, hdr, vi->hdr_len);
 796	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
 797
 798	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
 799	if (err < 0)
 800		dev_kfree_skb(skb);
 801
 802	return err;
 803}
 804
 805static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
 806			   gfp_t gfp)
 807{
 808	struct page *first, *list = NULL;
 809	char *p;
 810	int i, err, offset;
 811
 812	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
 813
 814	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
 815	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
 816		first = get_a_page(rq, gfp);
 817		if (!first) {
 818			if (list)
 819				give_pages(rq, list);
 820			return -ENOMEM;
 821		}
 822		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
 823
 824		/* chain new page in list head to match sg */
 825		first->private = (unsigned long)list;
 826		list = first;
 827	}
 828
 829	first = get_a_page(rq, gfp);
 830	if (!first) {
 831		give_pages(rq, list);
 832		return -ENOMEM;
 833	}
 834	p = page_address(first);
 835
 836	/* rq->sg[0], rq->sg[1] share the same page */
 837	/* a separated rq->sg[0] for header - required in case !any_header_sg */
 838	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
 839
 840	/* rq->sg[1] for data packet, from offset */
 841	offset = sizeof(struct padded_vnet_hdr);
 842	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
 843
 844	/* chain first in list head */
 845	first->private = (unsigned long)list;
 846	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
 847				  first, gfp);
 848	if (err < 0)
 849		give_pages(rq, first);
 850
 851	return err;
 852}
 853
 854static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
 
 
 855{
 856	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 
 857	unsigned int len;
 858
 859	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
 860			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
 861	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 
 
 
 
 862}
 863
 864static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
 
 865{
 866	struct page_frag *alloc_frag = &rq->alloc_frag;
 
 
 
 
 
 867	char *buf;
 868	unsigned long ctx;
 869	int err;
 870	unsigned int len, hole;
 871
 872	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
 873	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
 
 
 
 
 
 
 874		return -ENOMEM;
 875
 876	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
 877	ctx = mergeable_buf_to_ctx(buf, len);
 878	get_page(alloc_frag->page);
 879	alloc_frag->offset += len;
 880	hole = alloc_frag->size - alloc_frag->offset;
 881	if (hole < len) {
 882		/* To avoid internal fragmentation, if there is very likely not
 883		 * enough space for another buffer, add the remaining space to
 884		 * the current buffer. This extra space is not included in
 885		 * the truesize stored in ctx.
 
 886		 */
 887		len += hole;
 
 888		alloc_frag->offset += hole;
 889	}
 890
 891	sg_init_one(rq->sg, buf, len);
 892	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
 893	if (err < 0)
 
 
 
 
 894		put_page(virt_to_head_page(buf));
 
 895
 896	return err;
 897}
 898
 899/*
 900 * Returns false if we couldn't fill entirely (OOM).
 901 *
 902 * Normally run in the receive path, but can also be run from ndo_open
 903 * before we're receiving packets, or from refill_work which is
 904 * careful to disable receiving (using napi_disable).
 905 */
 906static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
 907			  gfp_t gfp)
 908{
 909	int err;
 910	bool oom;
 911
 912	gfp |= __GFP_COLD;
 913	do {
 914		if (vi->mergeable_rx_bufs)
 915			err = add_recvbuf_mergeable(rq, gfp);
 916		else if (vi->big_packets)
 917			err = add_recvbuf_big(vi, rq, gfp);
 918		else
 919			err = add_recvbuf_small(vi, rq, gfp);
 920
 921		oom = err == -ENOMEM;
 922		if (err)
 923			break;
 924	} while (rq->vq->num_free);
 925	virtqueue_kick(rq->vq);
 
 
 
 
 
 
 
 926	return !oom;
 927}
 928
 929static void skb_recv_done(struct virtqueue *rvq)
 930{
 931	struct virtnet_info *vi = rvq->vdev->priv;
 932	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
 933
 934	/* Schedule NAPI, Suppress further interrupts if successful. */
 935	if (napi_schedule_prep(&rq->napi)) {
 936		virtqueue_disable_cb(rvq);
 937		__napi_schedule(&rq->napi);
 938	}
 939}
 940
 941static void virtnet_napi_enable(struct receive_queue *rq)
 942{
 943	napi_enable(&rq->napi);
 944
 945	/* If all buffers were filled by other side before we napi_enabled, we
 946	 * won't get another interrupt, so process any outstanding packets
 947	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
 948	 * We synchronize against interrupts via NAPI_STATE_SCHED */
 949	if (napi_schedule_prep(&rq->napi)) {
 950		virtqueue_disable_cb(rq->vq);
 951		local_bh_disable();
 952		__napi_schedule(&rq->napi);
 953		local_bh_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 954	}
 
 
 
 
 
 
 
 
 955}
 956
 957static void refill_work(struct work_struct *work)
 958{
 959	struct virtnet_info *vi =
 960		container_of(work, struct virtnet_info, refill.work);
 961	bool still_empty;
 962	int i;
 963
 964	for (i = 0; i < vi->curr_queue_pairs; i++) {
 965		struct receive_queue *rq = &vi->rq[i];
 966
 967		napi_disable(&rq->napi);
 968		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
 969		virtnet_napi_enable(rq);
 970
 971		/* In theory, this can happen: if we don't get any buffers in
 972		 * we will *never* try to fill again.
 973		 */
 974		if (still_empty)
 975			schedule_delayed_work(&vi->refill, HZ/2);
 976	}
 977}
 978
 979static int virtnet_receive(struct receive_queue *rq, int budget)
 
 980{
 981	struct virtnet_info *vi = rq->vq->vdev->priv;
 982	unsigned int len, received = 0;
 
 
 983	void *buf;
 
 
 
 
 984
 985	while (received < budget &&
 986	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
 987		receive_buf(vi, rq, buf, len);
 988		received++;
 
 
 
 
 
 
 
 989	}
 990
 991	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
 992		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
 993			schedule_delayed_work(&vi->refill, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 994	}
 
 995
 996	return received;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997}
 998
 999static int virtnet_poll(struct napi_struct *napi, int budget)
1000{
1001	struct receive_queue *rq =
1002		container_of(napi, struct receive_queue, napi);
1003	unsigned int r, received;
 
 
 
 
 
 
1004
1005	received = virtnet_receive(rq, budget);
 
 
 
 
1006
1007	/* Out of packets? */
1008	if (received < budget) {
1009		r = virtqueue_enable_cb_prepare(rq->vq);
1010		napi_complete_done(napi, received);
1011		if (unlikely(virtqueue_poll(rq->vq, r)) &&
1012		    napi_schedule_prep(napi)) {
1013			virtqueue_disable_cb(rq->vq);
1014			__napi_schedule(napi);
 
 
 
 
 
1015		}
 
1016	}
1017
1018	return received;
1019}
1020
1021#ifdef CONFIG_NET_RX_BUSY_POLL
1022/* must be called with local_bh_disable()d */
1023static int virtnet_busy_poll(struct napi_struct *napi)
 
 
 
 
 
1024{
1025	struct receive_queue *rq =
1026		container_of(napi, struct receive_queue, napi);
1027	struct virtnet_info *vi = rq->vq->vdev->priv;
1028	int r, received = 0, budget = 4;
 
 
 
1029
1030	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
1031		return LL_FLUSH_FAILED;
 
 
1032
1033	if (!napi_schedule_prep(napi))
1034		return LL_FLUSH_BUSY;
1035
1036	virtqueue_disable_cb(rq->vq);
1037
1038again:
1039	received += virtnet_receive(rq, budget);
1040
1041	r = virtqueue_enable_cb_prepare(rq->vq);
1042	clear_bit(NAPI_STATE_SCHED, &napi->state);
1043	if (unlikely(virtqueue_poll(rq->vq, r)) &&
1044	    napi_schedule_prep(napi)) {
1045		virtqueue_disable_cb(rq->vq);
1046		if (received < budget) {
1047			budget -= received;
1048			goto again;
1049		} else {
1050			__napi_schedule(napi);
1051		}
1052	}
1053
1054	return received;
 
 
1055}
1056#endif	/* CONFIG_NET_RX_BUSY_POLL */
1057
1058static int virtnet_open(struct net_device *dev)
1059{
1060	struct virtnet_info *vi = netdev_priv(dev);
1061	int i;
 
 
1062
1063	for (i = 0; i < vi->max_queue_pairs; i++) {
1064		if (i < vi->curr_queue_pairs)
1065			/* Make sure we have some buffers: if oom use wq. */
1066			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1067				schedule_delayed_work(&vi->refill, 0);
1068		virtnet_napi_enable(&vi->rq[i]);
 
 
 
1069	}
1070
1071	return 0;
 
 
 
 
 
 
 
 
 
 
 
1072}
1073
1074static void free_old_xmit_skbs(struct send_queue *sq)
1075{
1076	struct sk_buff *skb;
1077	unsigned int len;
1078	struct virtnet_info *vi = sq->vq->vdev->priv;
1079	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
 
 
 
 
 
 
 
 
 
1080
1081	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1082		pr_debug("Sent skb %p\n", skb);
 
 
1083
1084		u64_stats_update_begin(&stats->tx_syncp);
1085		stats->tx_bytes += skb->len;
1086		stats->tx_packets++;
1087		u64_stats_update_end(&stats->tx_syncp);
1088
1089		dev_kfree_skb_any(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1090	}
 
 
1091}
1092
1093static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1094{
1095	struct virtio_net_hdr_mrg_rxbuf *hdr;
1096	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1097	struct virtnet_info *vi = sq->vq->vdev->priv;
1098	unsigned num_sg;
1099	unsigned hdr_len = vi->hdr_len;
1100	bool can_push;
1101
1102	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1103
1104	can_push = vi->any_header_sg &&
1105		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1106		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1107	/* Even if we can, don't push here yet as this would skew
1108	 * csum_start offset below. */
1109	if (can_push)
1110		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1111	else
1112		hdr = skb_vnet_hdr(skb);
1113
1114	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1115				    virtio_is_little_endian(vi->vdev), false))
1116		BUG();
 
1117
1118	if (vi->mergeable_rx_bufs)
1119		hdr->num_buffers = 0;
1120
1121	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1122	if (can_push) {
1123		__skb_push(skb, hdr_len);
1124		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
 
 
1125		/* Pull header back to avoid skew in tx bytes calculations. */
1126		__skb_pull(skb, hdr_len);
1127	} else {
1128		sg_set_buf(sq->sg, hdr, hdr_len);
1129		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
 
 
 
1130	}
1131	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1132}
1133
1134static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1135{
1136	struct virtnet_info *vi = netdev_priv(dev);
1137	int qnum = skb_get_queue_mapping(skb);
1138	struct send_queue *sq = &vi->sq[qnum];
1139	int err;
1140	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1141	bool kick = !skb->xmit_more;
 
1142
1143	/* Free up any pending old buffers before queueing new ones. */
1144	free_old_xmit_skbs(sq);
 
 
 
 
 
 
 
1145
1146	/* timestamp packet in software */
1147	skb_tx_timestamp(skb);
1148
1149	/* Try to transmit */
1150	err = xmit_skb(sq, skb);
1151
1152	/* This should not happen! */
1153	if (unlikely(err)) {
1154		dev->stats.tx_fifo_errors++;
1155		if (net_ratelimit())
1156			dev_warn(&dev->dev,
1157				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1158		dev->stats.tx_dropped++;
 
1159		dev_kfree_skb_any(skb);
1160		return NETDEV_TX_OK;
1161	}
1162
1163	/* Don't wait up for transmitted skbs to be freed. */
1164	skb_orphan(skb);
1165	nf_reset(skb);
 
 
1166
1167	/* If running out of space, stop queue to avoid getting packets that we
1168	 * are then unable to transmit.
1169	 * An alternative would be to force queuing layer to requeue the skb by
1170	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1171	 * returned in a normal path of operation: it means that driver is not
1172	 * maintaining the TX queue stop/start state properly, and causes
1173	 * the stack to do a non-trivial amount of useless work.
1174	 * Since most packets only take 1 or 2 ring slots, stopping the queue
1175	 * early means 16 slots are typically wasted.
1176	 */
1177	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1178		netif_stop_subqueue(dev, qnum);
1179		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1180			/* More just got used, free them then recheck. */
1181			free_old_xmit_skbs(sq);
1182			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1183				netif_start_subqueue(dev, qnum);
1184				virtqueue_disable_cb(sq->vq);
1185			}
1186		}
1187	}
1188
1189	if (kick || netif_xmit_stopped(txq))
1190		virtqueue_kick(sq->vq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1191
1192	return NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
1193}
1194
1195/*
1196 * Send command via the control virtqueue and check status.  Commands
1197 * supported by the hypervisor, as indicated by feature bits, should
1198 * never fail unless improperly formatted.
1199 */
1200static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1201				 struct scatterlist *out)
1202{
1203	struct scatterlist *sgs[4], hdr, stat;
1204	unsigned out_num = 0, tmp;
 
1205
1206	/* Caller should know better */
1207	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1208
1209	vi->ctrl_status = ~0;
1210	vi->ctrl_hdr.class = class;
1211	vi->ctrl_hdr.cmd = cmd;
1212	/* Add header */
1213	sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
1214	sgs[out_num++] = &hdr;
1215
1216	if (out)
1217		sgs[out_num++] = out;
1218
1219	/* Add return status. */
1220	sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
1221	sgs[out_num] = &stat;
1222
1223	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1224	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
 
 
 
 
 
1225
1226	if (unlikely(!virtqueue_kick(vi->cvq)))
1227		return vi->ctrl_status == VIRTIO_NET_OK;
1228
1229	/* Spin for a response, the kick causes an ioport write, trapping
1230	 * into the hypervisor, so the request should be handled immediately.
1231	 */
1232	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1233	       !virtqueue_is_broken(vi->cvq))
1234		cpu_relax();
1235
1236	return vi->ctrl_status == VIRTIO_NET_OK;
1237}
1238
1239static int virtnet_set_mac_address(struct net_device *dev, void *p)
1240{
1241	struct virtnet_info *vi = netdev_priv(dev);
1242	struct virtio_device *vdev = vi->vdev;
1243	int ret;
1244	struct sockaddr *addr;
1245	struct scatterlist sg;
1246
1247	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
 
 
 
1248	if (!addr)
1249		return -ENOMEM;
1250	memcpy(addr, p, sizeof(*addr));
1251
1252	ret = eth_prepare_mac_addr_change(dev, addr);
1253	if (ret)
1254		goto out;
1255
1256	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1257		sg_init_one(&sg, addr->sa_data, dev->addr_len);
1258		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1259					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1260			dev_warn(&vdev->dev,
1261				 "Failed to set mac address by vq command.\n");
1262			ret = -EINVAL;
1263			goto out;
1264		}
1265	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1266		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1267		unsigned int i;
1268
1269		/* Naturally, this has an atomicity problem. */
1270		for (i = 0; i < dev->addr_len; i++)
1271			virtio_cwrite8(vdev,
1272				       offsetof(struct virtio_net_config, mac) +
1273				       i, addr->sa_data[i]);
1274	}
1275
1276	eth_commit_mac_addr_change(dev, p);
1277	ret = 0;
1278
1279out:
1280	kfree(addr);
1281	return ret;
1282}
1283
1284static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
1285					       struct rtnl_link_stats64 *tot)
1286{
1287	struct virtnet_info *vi = netdev_priv(dev);
1288	int cpu;
1289	unsigned int start;
 
1290
1291	for_each_possible_cpu(cpu) {
1292		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
1293		u64 tpackets, tbytes, rpackets, rbytes;
 
1294
1295		do {
1296			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1297			tpackets = stats->tx_packets;
1298			tbytes   = stats->tx_bytes;
1299		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
 
1300
1301		do {
1302			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1303			rpackets = stats->rx_packets;
1304			rbytes   = stats->rx_bytes;
1305		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
 
1306
1307		tot->rx_packets += rpackets;
1308		tot->tx_packets += tpackets;
1309		tot->rx_bytes   += rbytes;
1310		tot->tx_bytes   += tbytes;
 
 
1311	}
1312
1313	tot->tx_dropped = dev->stats.tx_dropped;
1314	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1315	tot->rx_dropped = dev->stats.rx_dropped;
1316	tot->rx_length_errors = dev->stats.rx_length_errors;
1317	tot->rx_frame_errors = dev->stats.rx_frame_errors;
1318
1319	return tot;
1320}
1321
1322#ifdef CONFIG_NET_POLL_CONTROLLER
1323static void virtnet_netpoll(struct net_device *dev)
1324{
1325	struct virtnet_info *vi = netdev_priv(dev);
1326	int i;
1327
1328	for (i = 0; i < vi->curr_queue_pairs; i++)
1329		napi_schedule(&vi->rq[i].napi);
1330}
1331#endif
1332
1333static void virtnet_ack_link_announce(struct virtnet_info *vi)
1334{
1335	rtnl_lock();
1336	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1337				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1338		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1339	rtnl_unlock();
1340}
1341
1342static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1343{
1344	struct scatterlist sg;
1345	struct net_device *dev = vi->dev;
1346
1347	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1348		return 0;
1349
1350	vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1351	sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
1352
1353	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1354				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1355		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1356			 queue_pairs);
1357		return -EINVAL;
1358	} else {
1359		vi->curr_queue_pairs = queue_pairs;
1360		/* virtnet_open() will refill when device is going to up. */
1361		if (dev->flags & IFF_UP)
1362			schedule_delayed_work(&vi->refill, 0);
1363	}
1364
1365	return 0;
1366}
1367
 
 
 
 
 
 
 
 
 
 
1368static int virtnet_close(struct net_device *dev)
1369{
1370	struct virtnet_info *vi = netdev_priv(dev);
1371	int i;
1372
 
 
1373	/* Make sure refill_work doesn't re-enable napi! */
1374	cancel_delayed_work_sync(&vi->refill);
1375
1376	for (i = 0; i < vi->max_queue_pairs; i++)
1377		napi_disable(&vi->rq[i].napi);
 
 
1378
1379	return 0;
1380}
1381
1382static void virtnet_set_rx_mode(struct net_device *dev)
1383{
1384	struct virtnet_info *vi = netdev_priv(dev);
1385	struct scatterlist sg[2];
1386	struct virtio_net_ctrl_mac *mac_data;
1387	struct netdev_hw_addr *ha;
1388	int uc_count;
1389	int mc_count;
1390	void *buf;
1391	int i;
1392
1393	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1394	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1395		return;
1396
1397	vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1398	vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1399
1400	sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1401
1402	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1403				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1404		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1405			 vi->ctrl_promisc ? "en" : "dis");
1406
1407	sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1408
1409	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1410				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1411		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1412			 vi->ctrl_allmulti ? "en" : "dis");
1413
1414	uc_count = netdev_uc_count(dev);
1415	mc_count = netdev_mc_count(dev);
1416	/* MAC filter - use one buffer for both lists */
1417	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1418		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1419	mac_data = buf;
1420	if (!buf)
1421		return;
1422
1423	sg_init_table(sg, 2);
1424
1425	/* Store the unicast list and count in the front of the buffer */
1426	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1427	i = 0;
1428	netdev_for_each_uc_addr(ha, dev)
1429		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1430
1431	sg_set_buf(&sg[0], mac_data,
1432		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1433
1434	/* multicast list and count fill the end */
1435	mac_data = (void *)&mac_data->macs[uc_count][0];
1436
1437	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1438	i = 0;
1439	netdev_for_each_mc_addr(ha, dev)
1440		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1441
1442	sg_set_buf(&sg[1], mac_data,
1443		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1444
1445	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1446				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1447		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1448
1449	kfree(buf);
1450}
1451
1452static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1453				   __be16 proto, u16 vid)
1454{
1455	struct virtnet_info *vi = netdev_priv(dev);
1456	struct scatterlist sg;
1457
1458	vi->ctrl_vid = vid;
1459	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1460
1461	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1462				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1463		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1464	return 0;
1465}
1466
1467static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1468				    __be16 proto, u16 vid)
1469{
1470	struct virtnet_info *vi = netdev_priv(dev);
1471	struct scatterlist sg;
1472
1473	vi->ctrl_vid = vid;
1474	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1475
1476	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1477				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1478		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1479	return 0;
1480}
1481
1482static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1483{
1484	int i;
1485
1486	if (vi->affinity_hint_set) {
1487		for (i = 0; i < vi->max_queue_pairs; i++) {
1488			virtqueue_set_affinity(vi->rq[i].vq, -1);
1489			virtqueue_set_affinity(vi->sq[i].vq, -1);
1490		}
1491
1492		vi->affinity_hint_set = false;
1493	}
1494}
1495
1496static void virtnet_set_affinity(struct virtnet_info *vi)
1497{
1498	int i;
1499	int cpu;
 
 
 
 
1500
1501	/* In multiqueue mode, when the number of cpu is equal to the number of
1502	 * queue pairs, we let the queue pairs to be private to one cpu by
1503	 * setting the affinity hint to eliminate the contention.
1504	 */
1505	if (vi->curr_queue_pairs == 1 ||
1506	    vi->max_queue_pairs != num_online_cpus()) {
1507		virtnet_clean_affinity(vi, -1);
1508		return;
1509	}
1510
1511	i = 0;
1512	for_each_online_cpu(cpu) {
1513		virtqueue_set_affinity(vi->rq[i].vq, cpu);
1514		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1515		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1516		i++;
 
 
 
 
 
 
 
 
 
 
 
 
 
1517	}
1518
1519	vi->affinity_hint_set = true;
 
1520}
1521
1522static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1523{
1524	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1525						   node);
1526	virtnet_set_affinity(vi);
1527	return 0;
1528}
1529
1530static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1531{
1532	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1533						   node_dead);
1534	virtnet_set_affinity(vi);
1535	return 0;
1536}
1537
1538static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1539{
1540	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1541						   node);
1542
1543	virtnet_clean_affinity(vi, cpu);
1544	return 0;
1545}
1546
1547static enum cpuhp_state virtionet_online;
1548
1549static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1550{
1551	int ret;
1552
1553	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1554	if (ret)
1555		return ret;
1556	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1557					       &vi->node_dead);
1558	if (!ret)
1559		return ret;
1560	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1561	return ret;
1562}
1563
1564static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1565{
1566	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1567	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1568					    &vi->node_dead);
1569}
1570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1571static void virtnet_get_ringparam(struct net_device *dev,
1572				struct ethtool_ringparam *ring)
 
 
1573{
1574	struct virtnet_info *vi = netdev_priv(dev);
1575
1576	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1577	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1578	ring->rx_pending = ring->rx_max_pending;
1579	ring->tx_pending = ring->tx_max_pending;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580}
1581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582
1583static void virtnet_get_drvinfo(struct net_device *dev,
1584				struct ethtool_drvinfo *info)
1585{
1586	struct virtnet_info *vi = netdev_priv(dev);
1587	struct virtio_device *vdev = vi->vdev;
1588
1589	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1590	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1591	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1592
1593}
1594
1595/* TODO: Eliminate OOO packets during switching */
1596static int virtnet_set_channels(struct net_device *dev,
1597				struct ethtool_channels *channels)
1598{
1599	struct virtnet_info *vi = netdev_priv(dev);
1600	u16 queue_pairs = channels->combined_count;
1601	int err;
1602
1603	/* We don't support separate rx/tx channels.
1604	 * We don't allow setting 'other' channels.
1605	 */
1606	if (channels->rx_count || channels->tx_count || channels->other_count)
1607		return -EINVAL;
1608
1609	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1610		return -EINVAL;
1611
1612	/* For now we don't support modifying channels while XDP is loaded
1613	 * also when XDP is loaded all RX queues have XDP programs so we only
1614	 * need to check a single RX queue.
1615	 */
1616	if (vi->rq[0].xdp_prog)
1617		return -EINVAL;
1618
1619	get_online_cpus();
1620	err = virtnet_set_queues(vi, queue_pairs);
1621	if (!err) {
1622		netif_set_real_num_tx_queues(dev, queue_pairs);
1623		netif_set_real_num_rx_queues(dev, queue_pairs);
 
 
 
1624
1625		virtnet_set_affinity(vi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626	}
1627	put_online_cpus();
1628
1629	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1630}
1631
1632static void virtnet_get_channels(struct net_device *dev,
1633				 struct ethtool_channels *channels)
1634{
1635	struct virtnet_info *vi = netdev_priv(dev);
1636
1637	channels->combined_count = vi->curr_queue_pairs;
1638	channels->max_combined = vi->max_queue_pairs;
1639	channels->max_other = 0;
1640	channels->rx_count = 0;
1641	channels->tx_count = 0;
1642	channels->other_count = 0;
1643}
1644
1645/* Check if the user is trying to change anything besides speed/duplex */
1646static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647{
1648	struct ethtool_cmd diff1 = *cmd;
1649	struct ethtool_cmd diff2 = {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1650
1651	/* cmd is always set so we need to clear it, validate the port type
1652	 * and also without autonegotiation we can ignore advertising
 
 
1653	 */
1654	ethtool_cmd_speed_set(&diff1, 0);
1655	diff2.port = PORT_OTHER;
1656	diff1.advertising = 0;
1657	diff1.duplex = 0;
1658	diff1.cmd = 0;
 
1659
1660	return !memcmp(&diff1, &diff2, sizeof(diff1));
1661}
1662
1663static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1664{
1665	struct virtnet_info *vi = netdev_priv(dev);
1666	u32 speed;
1667
1668	speed = ethtool_cmd_speed(cmd);
1669	/* don't allow custom speed and duplex */
1670	if (!ethtool_validate_speed(speed) ||
1671	    !ethtool_validate_duplex(cmd->duplex) ||
1672	    !virtnet_validate_ethtool_cmd(cmd))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1673		return -EINVAL;
1674	vi->speed = speed;
1675	vi->duplex = cmd->duplex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676
1677	return 0;
1678}
1679
1680static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 
1681{
1682	struct virtnet_info *vi = netdev_priv(dev);
1683
1684	ethtool_cmd_speed_set(cmd, vi->speed);
1685	cmd->duplex = vi->duplex;
1686	cmd->port = PORT_OTHER;
 
 
 
 
 
 
 
 
 
 
 
 
1687
1688	return 0;
1689}
1690
1691static void virtnet_init_settings(struct net_device *dev)
1692{
1693	struct virtnet_info *vi = netdev_priv(dev);
1694
1695	vi->speed = SPEED_UNKNOWN;
1696	vi->duplex = DUPLEX_UNKNOWN;
1697}
1698
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1699static const struct ethtool_ops virtnet_ethtool_ops = {
 
 
1700	.get_drvinfo = virtnet_get_drvinfo,
1701	.get_link = ethtool_op_get_link,
1702	.get_ringparam = virtnet_get_ringparam,
 
 
 
 
1703	.set_channels = virtnet_set_channels,
1704	.get_channels = virtnet_get_channels,
1705	.get_ts_info = ethtool_op_get_ts_info,
1706	.get_settings = virtnet_get_settings,
1707	.set_settings = virtnet_set_settings,
 
 
 
 
 
 
 
 
 
 
1708};
1709
1710static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1711{
1712	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1713	struct virtnet_info *vi = netdev_priv(dev);
1714	struct bpf_prog *old_prog;
1715	u16 xdp_qp = 0, curr_qp;
1716	int i, err;
1717
1718	if (prog && prog->xdp_adjust_head) {
1719		netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1720		return -EOPNOTSUPP;
1721	}
1722
1723	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1724	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1725	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
1726	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
1727		netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
1728		return -EOPNOTSUPP;
1729	}
1730
1731	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
1732		netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
1733		return -EINVAL;
1734	}
1735
1736	if (dev->mtu > max_sz) {
1737		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
 
1738		return -EINVAL;
1739	}
1740
1741	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
1742	if (prog)
1743		xdp_qp = nr_cpu_ids;
1744
1745	/* XDP requires extra queues for XDP_TX */
1746	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
1747		netdev_warn(dev, "request %i queues but max is %i\n",
1748			    curr_qp + xdp_qp, vi->max_queue_pairs);
1749		return -ENOMEM;
1750	}
1751
1752	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
1753	if (err) {
1754		dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
1755		return err;
 
 
 
 
 
 
 
 
 
1756	}
1757
1758	if (prog) {
1759		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
1760		if (IS_ERR(prog)) {
1761			virtnet_set_queues(vi, curr_qp);
1762			return PTR_ERR(prog);
1763		}
 
1764	}
1765
 
 
 
 
1766	vi->xdp_queue_pairs = xdp_qp;
1767	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 
 
 
 
 
 
 
 
 
 
 
 
 
1768
1769	for (i = 0; i < vi->max_queue_pairs; i++) {
1770		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1771		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
1772		if (old_prog)
1773			bpf_prog_put(old_prog);
 
 
 
 
 
1774	}
1775
1776	return 0;
1777}
1778
1779static bool virtnet_xdp_query(struct net_device *dev)
1780{
1781	struct virtnet_info *vi = netdev_priv(dev);
1782	int i;
 
 
1783
1784	for (i = 0; i < vi->max_queue_pairs; i++) {
1785		if (vi->rq[i].xdp_prog)
1786			return true;
 
 
 
1787	}
1788	return false;
 
 
1789}
1790
1791static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1792{
1793	switch (xdp->command) {
1794	case XDP_SETUP_PROG:
1795		return virtnet_xdp_set(dev, xdp->prog);
1796	case XDP_QUERY_PROG:
1797		xdp->prog_attached = virtnet_xdp_query(dev);
1798		return 0;
1799	default:
1800		return -EINVAL;
1801	}
1802}
1803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804static const struct net_device_ops virtnet_netdev = {
1805	.ndo_open            = virtnet_open,
1806	.ndo_stop   	     = virtnet_close,
1807	.ndo_start_xmit      = start_xmit,
1808	.ndo_validate_addr   = eth_validate_addr,
1809	.ndo_set_mac_address = virtnet_set_mac_address,
1810	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1811	.ndo_get_stats64     = virtnet_stats,
1812	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1813	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1814#ifdef CONFIG_NET_POLL_CONTROLLER
1815	.ndo_poll_controller = virtnet_netpoll,
1816#endif
1817#ifdef CONFIG_NET_RX_BUSY_POLL
1818	.ndo_busy_poll		= virtnet_busy_poll,
1819#endif
1820	.ndo_xdp		= virtnet_xdp,
1821};
1822
1823static void virtnet_config_changed_work(struct work_struct *work)
1824{
1825	struct virtnet_info *vi =
1826		container_of(work, struct virtnet_info, config_work);
1827	u16 v;
1828
1829	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1830				 struct virtio_net_config, status, &v) < 0)
1831		return;
1832
1833	if (v & VIRTIO_NET_S_ANNOUNCE) {
1834		netdev_notify_peers(vi->dev);
1835		virtnet_ack_link_announce(vi);
1836	}
1837
1838	/* Ignore unknown (future) status bits */
1839	v &= VIRTIO_NET_S_LINK_UP;
1840
1841	if (vi->status == v)
1842		return;
1843
1844	vi->status = v;
1845
1846	if (vi->status & VIRTIO_NET_S_LINK_UP) {
 
1847		netif_carrier_on(vi->dev);
1848		netif_tx_wake_all_queues(vi->dev);
1849	} else {
1850		netif_carrier_off(vi->dev);
1851		netif_tx_stop_all_queues(vi->dev);
1852	}
1853}
1854
1855static void virtnet_config_changed(struct virtio_device *vdev)
1856{
1857	struct virtnet_info *vi = vdev->priv;
1858
1859	schedule_work(&vi->config_work);
1860}
1861
1862static void virtnet_free_queues(struct virtnet_info *vi)
1863{
1864	int i;
1865
1866	for (i = 0; i < vi->max_queue_pairs; i++) {
1867		napi_hash_del(&vi->rq[i].napi);
1868		netif_napi_del(&vi->rq[i].napi);
1869	}
1870
1871	/* We called napi_hash_del() before netif_napi_del(),
1872	 * we need to respect an RCU grace period before freeing vi->rq
1873	 */
1874	synchronize_net();
1875
1876	kfree(vi->rq);
1877	kfree(vi->sq);
 
1878}
1879
1880static void free_receive_bufs(struct virtnet_info *vi)
1881{
1882	struct bpf_prog *old_prog;
1883	int i;
1884
1885	rtnl_lock();
1886	for (i = 0; i < vi->max_queue_pairs; i++) {
1887		while (vi->rq[i].pages)
1888			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1889
1890		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1891		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
1892		if (old_prog)
1893			bpf_prog_put(old_prog);
1894	}
 
 
 
 
 
 
1895	rtnl_unlock();
1896}
1897
1898static void free_receive_page_frags(struct virtnet_info *vi)
1899{
1900	int i;
1901	for (i = 0; i < vi->max_queue_pairs; i++)
1902		if (vi->rq[i].alloc_frag.page)
 
 
1903			put_page(vi->rq[i].alloc_frag.page);
 
1904}
1905
1906static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1907{
1908	/* For small receive mode always use kfree_skb variants */
1909	if (!vi->mergeable_rx_bufs)
1910		return false;
1911
1912	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1913		return false;
1914	else if (q < vi->curr_queue_pairs)
1915		return true;
1916	else
1917		return false;
1918}
1919
1920static void free_unused_bufs(struct virtnet_info *vi)
1921{
1922	void *buf;
1923	int i;
1924
1925	for (i = 0; i < vi->max_queue_pairs; i++) {
1926		struct virtqueue *vq = vi->sq[i].vq;
1927		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1928			if (!is_xdp_raw_buffer_queue(vi, i))
1929				dev_kfree_skb(buf);
1930			else
1931				put_page(virt_to_head_page(buf));
1932		}
1933	}
1934
1935	for (i = 0; i < vi->max_queue_pairs; i++) {
1936		struct virtqueue *vq = vi->rq[i].vq;
1937
1938		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1939			if (vi->mergeable_rx_bufs) {
1940				unsigned long ctx = (unsigned long)buf;
1941				void *base = mergeable_ctx_to_buf_address(ctx);
1942				put_page(virt_to_head_page(base));
1943			} else if (vi->big_packets) {
1944				give_pages(&vi->rq[i], buf);
1945			} else {
1946				dev_kfree_skb(buf);
1947			}
1948		}
1949	}
1950}
1951
1952static void virtnet_del_vqs(struct virtnet_info *vi)
1953{
1954	struct virtio_device *vdev = vi->vdev;
1955
1956	virtnet_clean_affinity(vi, -1);
1957
1958	vdev->config->del_vqs(vdev);
1959
1960	virtnet_free_queues(vi);
1961}
1962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963static int virtnet_find_vqs(struct virtnet_info *vi)
1964{
1965	vq_callback_t **callbacks;
1966	struct virtqueue **vqs;
 
1967	int ret = -ENOMEM;
1968	int i, total_vqs;
1969	const char **names;
 
1970
1971	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1972	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1973	 * possible control vq.
1974	 */
1975	total_vqs = vi->max_queue_pairs * 2 +
1976		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1977
1978	/* Allocate space for find_vqs parameters */
1979	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1980	if (!vqs)
1981		goto err_vq;
1982	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1983	if (!callbacks)
1984		goto err_callback;
1985	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1986	if (!names)
1987		goto err_names;
 
 
 
 
 
 
 
1988
1989	/* Parameters for control virtqueue, if any */
1990	if (vi->has_cvq) {
1991		callbacks[total_vqs - 1] = NULL;
1992		names[total_vqs - 1] = "control";
1993	}
1994
1995	/* Allocate/initialize parameters for send/receive virtqueues */
1996	for (i = 0; i < vi->max_queue_pairs; i++) {
1997		callbacks[rxq2vq(i)] = skb_recv_done;
1998		callbacks[txq2vq(i)] = skb_xmit_done;
1999		sprintf(vi->rq[i].name, "input.%d", i);
2000		sprintf(vi->sq[i].name, "output.%d", i);
2001		names[rxq2vq(i)] = vi->rq[i].name;
2002		names[txq2vq(i)] = vi->sq[i].name;
 
 
2003	}
2004
2005	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2006					 names);
2007	if (ret)
2008		goto err_find;
2009
2010	if (vi->has_cvq) {
2011		vi->cvq = vqs[total_vqs - 1];
2012		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2013			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2014	}
2015
2016	for (i = 0; i < vi->max_queue_pairs; i++) {
2017		vi->rq[i].vq = vqs[rxq2vq(i)];
 
2018		vi->sq[i].vq = vqs[txq2vq(i)];
2019	}
2020
2021	kfree(names);
2022	kfree(callbacks);
2023	kfree(vqs);
2024
2025	return 0;
2026
2027err_find:
 
 
2028	kfree(names);
2029err_names:
2030	kfree(callbacks);
2031err_callback:
2032	kfree(vqs);
2033err_vq:
2034	return ret;
2035}
2036
2037static int virtnet_alloc_queues(struct virtnet_info *vi)
2038{
2039	int i;
2040
2041	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
 
 
 
 
 
 
 
2042	if (!vi->sq)
2043		goto err_sq;
2044	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
2045	if (!vi->rq)
2046		goto err_rq;
2047
2048	INIT_DELAYED_WORK(&vi->refill, refill_work);
2049	for (i = 0; i < vi->max_queue_pairs; i++) {
2050		vi->rq[i].pages = NULL;
2051		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2052			       napi_weight);
 
 
 
 
 
 
2053
2054		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2055		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2056		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
 
 
 
2057	}
2058
2059	return 0;
2060
2061err_rq:
2062	kfree(vi->sq);
2063err_sq:
 
 
2064	return -ENOMEM;
2065}
2066
2067static int init_vqs(struct virtnet_info *vi)
2068{
2069	int ret;
2070
2071	/* Allocate send & receive queues */
2072	ret = virtnet_alloc_queues(vi);
2073	if (ret)
2074		goto err;
2075
2076	ret = virtnet_find_vqs(vi);
2077	if (ret)
2078		goto err_free;
2079
2080	get_online_cpus();
 
 
2081	virtnet_set_affinity(vi);
2082	put_online_cpus();
2083
2084	return 0;
2085
2086err_free:
2087	virtnet_free_queues(vi);
2088err:
2089	return ret;
2090}
2091
2092#ifdef CONFIG_SYSFS
2093static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2094		struct rx_queue_attribute *attribute, char *buf)
2095{
2096	struct virtnet_info *vi = netdev_priv(queue->dev);
2097	unsigned int queue_index = get_netdev_rx_queue_index(queue);
 
 
2098	struct ewma_pkt_len *avg;
2099
2100	BUG_ON(queue_index >= vi->max_queue_pairs);
2101	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2102	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
 
 
2103}
2104
2105static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2106	__ATTR_RO(mergeable_rx_buffer_size);
2107
2108static struct attribute *virtio_net_mrg_rx_attrs[] = {
2109	&mergeable_rx_buffer_size_attribute.attr,
2110	NULL
2111};
2112
2113static const struct attribute_group virtio_net_mrg_rx_group = {
2114	.name = "virtio_net",
2115	.attrs = virtio_net_mrg_rx_attrs
2116};
2117#endif
2118
2119static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2120				    unsigned int fbit,
2121				    const char *fname, const char *dname)
2122{
2123	if (!virtio_has_feature(vdev, fbit))
2124		return false;
2125
2126	dev_err(&vdev->dev, "device advertises feature %s but not %s",
2127		fname, dname);
2128
2129	return true;
2130}
2131
2132#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
2133	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2134
2135static bool virtnet_validate_features(struct virtio_device *vdev)
2136{
2137	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2138	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2139			     "VIRTIO_NET_F_CTRL_VQ") ||
2140	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2141			     "VIRTIO_NET_F_CTRL_VQ") ||
2142	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2143			     "VIRTIO_NET_F_CTRL_VQ") ||
2144	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2145	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
 
 
 
 
 
 
 
 
2146			     "VIRTIO_NET_F_CTRL_VQ"))) {
2147		return false;
2148	}
2149
2150	return true;
2151}
2152
2153#define MIN_MTU ETH_MIN_MTU
2154#define MAX_MTU ETH_MAX_MTU
2155
2156static int virtnet_probe(struct virtio_device *vdev)
2157{
2158	int i, err;
2159	struct net_device *dev;
2160	struct virtnet_info *vi;
2161	u16 max_queue_pairs;
2162	int mtu;
2163
2164	if (!vdev->config->get) {
2165		dev_err(&vdev->dev, "%s failure: config access disabled\n",
2166			__func__);
2167		return -EINVAL;
2168	}
2169
2170	if (!virtnet_validate_features(vdev))
2171		return -EINVAL;
2172
2173	/* Find if host supports multiqueue virtio_net device */
2174	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2175				   struct virtio_net_config,
2176				   max_virtqueue_pairs, &max_queue_pairs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2177
2178	/* We need at least 2 queue's */
2179	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2180	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2181	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2182		max_queue_pairs = 1;
2183
2184	/* Allocate ourselves a network device with room for our info */
2185	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
2186	if (!dev)
2187		return -ENOMEM;
2188
2189	/* Set up network device as normal. */
2190	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
 
2191	dev->netdev_ops = &virtnet_netdev;
2192	dev->features = NETIF_F_HIGHDMA;
2193
2194	dev->ethtool_ops = &virtnet_ethtool_ops;
2195	SET_NETDEV_DEV(dev, &vdev->dev);
2196
2197	/* Do we support "hardware" checksums? */
2198	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
2199		/* This opens up the world of extra features. */
2200		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2201		if (csum)
2202			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2203
2204		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2205			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
2206				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
2207		}
2208		/* Individual feature bits: what can host handle? */
2209		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2210			dev->hw_features |= NETIF_F_TSO;
2211		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2212			dev->hw_features |= NETIF_F_TSO6;
2213		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2214			dev->hw_features |= NETIF_F_TSO_ECN;
2215		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
2216			dev->hw_features |= NETIF_F_UFO;
2217
2218		dev->features |= NETIF_F_GSO_ROBUST;
2219
2220		if (gso)
2221			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
2222		/* (!csum && gso) case will be fixed by register_netdev() */
2223	}
2224	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2225		dev->features |= NETIF_F_RXCSUM;
 
 
 
 
 
2226
2227	dev->vlan_features = dev->features;
 
2228
2229	/* MTU range: 68 - 65535 */
2230	dev->min_mtu = MIN_MTU;
2231	dev->max_mtu = MAX_MTU;
2232
2233	/* Configuration may specify what MAC to use.  Otherwise random. */
2234	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
 
 
2235		virtio_cread_bytes(vdev,
2236				   offsetof(struct virtio_net_config, mac),
2237				   dev->dev_addr, dev->addr_len);
2238	else
 
2239		eth_hw_addr_random(dev);
 
 
 
2240
2241	/* Set up our device-specific information */
2242	vi = netdev_priv(dev);
2243	vi->dev = dev;
2244	vi->vdev = vdev;
2245	vdev->priv = vi;
2246	vi->stats = alloc_percpu(struct virtnet_stats);
2247	err = -ENOMEM;
2248	if (vi->stats == NULL)
2249		goto free;
2250
2251	for_each_possible_cpu(i) {
2252		struct virtnet_stats *virtnet_stats;
2253		virtnet_stats = per_cpu_ptr(vi->stats, i);
2254		u64_stats_init(&virtnet_stats->tx_syncp);
2255		u64_stats_init(&virtnet_stats->rx_syncp);
 
2256	}
2257
2258	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 
 
 
 
 
 
 
 
 
 
 
2259
2260	/* If we can receive ANY GSO packets, we must allocate large ones. */
2261	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2262	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2263	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2264	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2265		vi->big_packets = true;
2266
2267	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2268		vi->mergeable_rx_bufs = true;
2269
2270	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2271	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
 
 
2272		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2273	else
2274		vi->hdr_len = sizeof(struct virtio_net_hdr);
2275
2276	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2277	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2278		vi->any_header_sg = true;
2279
2280	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2281		vi->has_cvq = true;
2282
2283	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2284		mtu = virtio_cread16(vdev,
2285				     offsetof(struct virtio_net_config,
2286					      mtu));
2287		if (mtu < dev->min_mtu) {
2288			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2289		} else {
2290			dev->mtu = mtu;
2291			dev->max_mtu = mtu;
 
 
 
 
2292		}
 
 
 
2293	}
2294
 
 
2295	if (vi->any_header_sg)
2296		dev->needed_headroom = vi->hdr_len;
2297
2298	/* Enable multiqueue by default */
2299	if (num_online_cpus() >= max_queue_pairs)
2300		vi->curr_queue_pairs = max_queue_pairs;
2301	else
2302		vi->curr_queue_pairs = num_online_cpus();
2303	vi->max_queue_pairs = max_queue_pairs;
2304
2305	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2306	err = init_vqs(vi);
2307	if (err)
2308		goto free_stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2309
2310#ifdef CONFIG_SYSFS
2311	if (vi->mergeable_rx_bufs)
2312		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2313#endif
2314	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2315	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
2316
2317	virtnet_init_settings(dev);
2318
2319	err = register_netdev(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2320	if (err) {
2321		pr_debug("virtio_net: registering device failed\n");
2322		goto free_vqs;
 
2323	}
2324
2325	virtio_device_ready(vdev);
2326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2327	err = virtnet_cpu_notif_add(vi);
2328	if (err) {
2329		pr_debug("virtio_net: registering cpu notifier failed\n");
2330		goto free_unregister_netdev;
2331	}
2332
2333	rtnl_lock();
2334	virtnet_set_queues(vi, vi->curr_queue_pairs);
2335	rtnl_unlock();
2336
2337	/* Assume link up if device can't report link status,
2338	   otherwise get link status from config. */
 
2339	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2340		netif_carrier_off(dev);
2341		schedule_work(&vi->config_work);
2342	} else {
2343		vi->status = VIRTIO_NET_S_LINK_UP;
 
2344		netif_carrier_on(dev);
2345	}
2346
 
 
 
 
 
2347	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2348		 dev->name, max_queue_pairs);
2349
2350	return 0;
2351
2352free_unregister_netdev:
2353	vi->vdev->config->reset(vdev);
2354
2355	unregister_netdev(dev);
 
 
2356free_vqs:
 
2357	cancel_delayed_work_sync(&vi->refill);
2358	free_receive_page_frags(vi);
2359	virtnet_del_vqs(vi);
2360free_stats:
2361	free_percpu(vi->stats);
2362free:
2363	free_netdev(dev);
2364	return err;
2365}
2366
2367static void remove_vq_common(struct virtnet_info *vi)
2368{
2369	vi->vdev->config->reset(vi->vdev);
2370
2371	/* Free unused buffers in both send and recv, if any. */
2372	free_unused_bufs(vi);
2373
2374	free_receive_bufs(vi);
2375
2376	free_receive_page_frags(vi);
2377
2378	virtnet_del_vqs(vi);
2379}
2380
2381static void virtnet_remove(struct virtio_device *vdev)
2382{
2383	struct virtnet_info *vi = vdev->priv;
2384
2385	virtnet_cpu_notif_remove(vi);
2386
2387	/* Make sure no work handler is accessing the device. */
2388	flush_work(&vi->config_work);
2389
2390	unregister_netdev(vi->dev);
2391
 
 
2392	remove_vq_common(vi);
2393
2394	free_percpu(vi->stats);
2395	free_netdev(vi->dev);
2396}
2397
2398#ifdef CONFIG_PM_SLEEP
2399static int virtnet_freeze(struct virtio_device *vdev)
2400{
2401	struct virtnet_info *vi = vdev->priv;
2402	int i;
2403
2404	virtnet_cpu_notif_remove(vi);
2405
2406	/* Make sure no work handler is accessing the device */
2407	flush_work(&vi->config_work);
2408
2409	netif_device_detach(vi->dev);
2410	cancel_delayed_work_sync(&vi->refill);
2411
2412	if (netif_running(vi->dev)) {
2413		for (i = 0; i < vi->max_queue_pairs; i++)
2414			napi_disable(&vi->rq[i].napi);
2415	}
2416
2417	remove_vq_common(vi);
2418
2419	return 0;
2420}
2421
2422static int virtnet_restore(struct virtio_device *vdev)
2423{
2424	struct virtnet_info *vi = vdev->priv;
2425	int err, i;
2426
2427	err = init_vqs(vi);
2428	if (err)
2429		return err;
2430
2431	virtio_device_ready(vdev);
2432
2433	if (netif_running(vi->dev)) {
2434		for (i = 0; i < vi->curr_queue_pairs; i++)
2435			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2436				schedule_delayed_work(&vi->refill, 0);
2437
2438		for (i = 0; i < vi->max_queue_pairs; i++)
2439			virtnet_napi_enable(&vi->rq[i]);
2440	}
2441
2442	netif_device_attach(vi->dev);
2443
2444	rtnl_lock();
2445	virtnet_set_queues(vi, vi->curr_queue_pairs);
2446	rtnl_unlock();
2447
2448	err = virtnet_cpu_notif_add(vi);
2449	if (err)
 
 
2450		return err;
 
2451
2452	return 0;
2453}
2454#endif
2455
2456static struct virtio_device_id id_table[] = {
2457	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2458	{ 0 },
2459};
2460
2461#define VIRTNET_FEATURES \
2462	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2463	VIRTIO_NET_F_MAC, \
2464	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2465	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2466	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
 
2467	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2468	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2469	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2470	VIRTIO_NET_F_CTRL_MAC_ADDR, \
2471	VIRTIO_NET_F_MTU
 
 
 
 
2472
2473static unsigned int features[] = {
2474	VIRTNET_FEATURES,
2475};
2476
2477static unsigned int features_legacy[] = {
2478	VIRTNET_FEATURES,
2479	VIRTIO_NET_F_GSO,
2480	VIRTIO_F_ANY_LAYOUT,
2481};
2482
2483static struct virtio_driver virtio_net_driver = {
2484	.feature_table = features,
2485	.feature_table_size = ARRAY_SIZE(features),
2486	.feature_table_legacy = features_legacy,
2487	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2488	.driver.name =	KBUILD_MODNAME,
2489	.driver.owner =	THIS_MODULE,
2490	.id_table =	id_table,
 
2491	.probe =	virtnet_probe,
2492	.remove =	virtnet_remove,
2493	.config_changed = virtnet_config_changed,
2494#ifdef CONFIG_PM_SLEEP
2495	.freeze =	virtnet_freeze,
2496	.restore =	virtnet_restore,
2497#endif
2498};
2499
2500static __init int virtio_net_driver_init(void)
2501{
2502	int ret;
2503
2504	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
2505				      virtnet_cpu_online,
2506				      virtnet_cpu_down_prep);
2507	if (ret < 0)
2508		goto out;
2509	virtionet_online = ret;
2510	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
2511				      NULL, virtnet_cpu_dead);
2512	if (ret)
2513		goto err_dead;
2514
2515        ret = register_virtio_driver(&virtio_net_driver);
2516	if (ret)
2517		goto err_virtio;
2518	return 0;
2519err_virtio:
2520	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2521err_dead:
2522	cpuhp_remove_multi_state(virtionet_online);
2523out:
2524	return ret;
2525}
2526module_init(virtio_net_driver_init);
2527
2528static __exit void virtio_net_driver_exit(void)
2529{
 
2530	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2531	cpuhp_remove_multi_state(virtionet_online);
2532	unregister_virtio_driver(&virtio_net_driver);
2533}
2534module_exit(virtio_net_driver_exit);
2535
2536MODULE_DEVICE_TABLE(virtio, id_table);
2537MODULE_DESCRIPTION("Virtio network driver");
2538MODULE_LICENSE("GPL");