Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* A network driver using virtio.
   3 *
   4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
   5 */
   6//#define DEBUG
   7#include <linux/netdevice.h>
   8#include <linux/etherdevice.h>
   9#include <linux/ethtool.h>
  10#include <linux/module.h>
  11#include <linux/virtio.h>
  12#include <linux/virtio_net.h>
  13#include <linux/bpf.h>
  14#include <linux/bpf_trace.h>
  15#include <linux/scatterlist.h>
  16#include <linux/if_vlan.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/average.h>
  20#include <linux/filter.h>
  21#include <linux/kernel.h>
  22#include <linux/dim.h>
  23#include <net/route.h>
  24#include <net/xdp.h>
  25#include <net/net_failover.h>
  26#include <net/netdev_rx_queue.h>
 
 
  27
  28static int napi_weight = NAPI_POLL_WEIGHT;
  29module_param(napi_weight, int, 0444);
  30
  31static bool csum = true, gso = true, napi_tx = true;
  32module_param(csum, bool, 0444);
  33module_param(gso, bool, 0444);
  34module_param(napi_tx, bool, 0644);
  35
  36/* FIXME: MTU in config. */
  37#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  38#define GOOD_COPY_LEN	128
  39
  40#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  41
  42/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
  43#define VIRTIO_XDP_HEADROOM 256
  44
  45/* Separating two types of XDP xmit */
  46#define VIRTIO_XDP_TX		BIT(0)
  47#define VIRTIO_XDP_REDIR	BIT(1)
  48
  49#define VIRTIO_XDP_FLAG	BIT(0)
  50
  51/* RX packet size EWMA. The average packet size is used to determine the packet
  52 * buffer size when refilling RX rings. As the entire RX ring may be refilled
  53 * at once, the weight is chosen so that the EWMA will be insensitive to short-
  54 * term, transient changes in packet size.
  55 */
  56DECLARE_EWMA(pkt_len, 0, 64)
  57
  58#define VIRTNET_DRIVER_VERSION "1.0.0"
  59
  60static const unsigned long guest_offloads[] = {
  61	VIRTIO_NET_F_GUEST_TSO4,
  62	VIRTIO_NET_F_GUEST_TSO6,
  63	VIRTIO_NET_F_GUEST_ECN,
  64	VIRTIO_NET_F_GUEST_UFO,
  65	VIRTIO_NET_F_GUEST_CSUM,
  66	VIRTIO_NET_F_GUEST_USO4,
  67	VIRTIO_NET_F_GUEST_USO6,
  68	VIRTIO_NET_F_GUEST_HDRLEN
  69};
  70
  71#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
  72				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
  73				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
  74				(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
  75				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
  76				(1ULL << VIRTIO_NET_F_GUEST_USO6))
  77
  78struct virtnet_stat_desc {
  79	char desc[ETH_GSTRING_LEN];
  80	size_t offset;
 
 
 
 
 
 
 
 
 
  81};
  82
  83struct virtnet_sq_stats {
  84	struct u64_stats_sync syncp;
  85	u64_stats_t packets;
  86	u64_stats_t bytes;
  87	u64_stats_t xdp_tx;
  88	u64_stats_t xdp_tx_drops;
  89	u64_stats_t kicks;
  90	u64_stats_t tx_timeouts;
 
 
  91};
  92
  93struct virtnet_rq_stats {
  94	struct u64_stats_sync syncp;
  95	u64_stats_t packets;
  96	u64_stats_t bytes;
  97	u64_stats_t drops;
  98	u64_stats_t xdp_packets;
  99	u64_stats_t xdp_tx;
 100	u64_stats_t xdp_redirects;
 101	u64_stats_t xdp_drops;
 102	u64_stats_t kicks;
 103};
 104
 105#define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
 106#define VIRTNET_RQ_STAT(m)	offsetof(struct virtnet_rq_stats, m)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107
 108static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 109	{ "packets",		VIRTNET_SQ_STAT(packets) },
 110	{ "bytes",		VIRTNET_SQ_STAT(bytes) },
 111	{ "xdp_tx",		VIRTNET_SQ_STAT(xdp_tx) },
 112	{ "xdp_tx_drops",	VIRTNET_SQ_STAT(xdp_tx_drops) },
 113	{ "kicks",		VIRTNET_SQ_STAT(kicks) },
 114	{ "tx_timeouts",	VIRTNET_SQ_STAT(tx_timeouts) },
 115};
 116
 117static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 118	{ "packets",		VIRTNET_RQ_STAT(packets) },
 119	{ "bytes",		VIRTNET_RQ_STAT(bytes) },
 120	{ "drops",		VIRTNET_RQ_STAT(drops) },
 121	{ "xdp_packets",	VIRTNET_RQ_STAT(xdp_packets) },
 122	{ "xdp_tx",		VIRTNET_RQ_STAT(xdp_tx) },
 123	{ "xdp_redirects",	VIRTNET_RQ_STAT(xdp_redirects) },
 124	{ "xdp_drops",		VIRTNET_RQ_STAT(xdp_drops) },
 125	{ "kicks",		VIRTNET_RQ_STAT(kicks) },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126};
 127
 128#define VIRTNET_SQ_STATS_LEN	ARRAY_SIZE(virtnet_sq_stats_desc)
 129#define VIRTNET_RQ_STATS_LEN	ARRAY_SIZE(virtnet_rq_stats_desc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130
 131struct virtnet_interrupt_coalesce {
 132	u32 max_packets;
 133	u32 max_usecs;
 134};
 135
 136/* The dma information of pages allocated at a time. */
 137struct virtnet_rq_dma {
 138	dma_addr_t addr;
 139	u32 ref;
 140	u16 len;
 141	u16 need_sync;
 142};
 143
 144/* Internal representation of a send virtqueue */
 145struct send_queue {
 146	/* Virtqueue associated with this send _queue */
 147	struct virtqueue *vq;
 148
 149	/* TX: fragments + linear part + virtio header */
 150	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 151
 152	/* Name of the send queue: output.$index */
 153	char name[16];
 154
 155	struct virtnet_sq_stats stats;
 156
 157	struct virtnet_interrupt_coalesce intr_coal;
 158
 159	struct napi_struct napi;
 160
 161	/* Record whether sq is in reset state. */
 162	bool reset;
 
 
 
 
 163};
 164
 165/* Internal representation of a receive virtqueue */
 166struct receive_queue {
 167	/* Virtqueue associated with this receive_queue */
 168	struct virtqueue *vq;
 169
 170	struct napi_struct napi;
 171
 172	struct bpf_prog __rcu *xdp_prog;
 173
 174	struct virtnet_rq_stats stats;
 175
 176	/* The number of rx notifications */
 177	u16 calls;
 178
 179	/* Is dynamic interrupt moderation enabled? */
 180	bool dim_enabled;
 181
 
 
 
 182	/* Dynamic Interrupt Moderation */
 183	struct dim dim;
 184
 185	u32 packets_in_napi;
 186
 187	struct virtnet_interrupt_coalesce intr_coal;
 188
 189	/* Chain pages by the private ptr. */
 190	struct page *pages;
 191
 192	/* Average packet length for mergeable receive buffers. */
 193	struct ewma_pkt_len mrg_avg_pkt_len;
 194
 195	/* Page frag for packet buffer allocation. */
 196	struct page_frag alloc_frag;
 197
 198	/* RX: fragments + linear part + virtio header */
 199	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 200
 201	/* Min single buffer size for mergeable buffers case. */
 202	unsigned int min_buf_len;
 203
 204	/* Name of this receive queue: input.$index */
 205	char name[16];
 206
 207	struct xdp_rxq_info xdp_rxq;
 208
 209	/* Record the last dma info to free after new pages is allocated. */
 210	struct virtnet_rq_dma *last_dma;
 211
 212	/* Do dma by self */
 213	bool do_dma;
 
 
 
 
 214};
 215
 216/* This structure can contain rss message with maximum settings for indirection table and keysize
 217 * Note, that default structure that describes RSS configuration virtio_net_rss_config
 218 * contains same info but can't handle table values.
 219 * In any case, structure would be passed to virtio hw through sg_buf split by parts
 220 * because table sizes may be differ according to the device configuration.
 221 */
 222#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
 223#define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
 224struct virtio_net_ctrl_rss {
 225	u32 hash_types;
 226	u16 indirection_table_mask;
 227	u16 unclassified_queue;
 228	u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
 229	u16 max_tx_vq;
 230	u8 hash_key_length;
 231	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
 
 
 232};
 233
 234/* Control VQ buffers: protected by the rtnl lock */
 235struct control_buf {
 236	struct virtio_net_ctrl_hdr hdr;
 237	virtio_net_ctrl_ack status;
 238	struct virtio_net_ctrl_mq mq;
 239	u8 promisc;
 240	u8 allmulti;
 241	__virtio16 vid;
 242	__virtio64 offloads;
 243	struct virtio_net_ctrl_rss rss;
 244	struct virtio_net_ctrl_coal_tx coal_tx;
 245	struct virtio_net_ctrl_coal_rx coal_rx;
 246	struct virtio_net_ctrl_coal_vq coal_vq;
 247};
 248
 249struct virtnet_info {
 250	struct virtio_device *vdev;
 251	struct virtqueue *cvq;
 252	struct net_device *dev;
 253	struct send_queue *sq;
 254	struct receive_queue *rq;
 255	unsigned int status;
 256
 257	/* Max # of queue pairs supported by the device */
 258	u16 max_queue_pairs;
 259
 260	/* # of queue pairs currently used by the driver */
 261	u16 curr_queue_pairs;
 262
 263	/* # of XDP queue pairs currently used by the driver */
 264	u16 xdp_queue_pairs;
 265
 266	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
 267	bool xdp_enabled;
 268
 269	/* I like... big packets and I cannot lie! */
 270	bool big_packets;
 271
 272	/* number of sg entries allocated for big packets */
 273	unsigned int big_packets_num_skbfrags;
 274
 275	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 276	bool mergeable_rx_bufs;
 277
 278	/* Host supports rss and/or hash report */
 279	bool has_rss;
 280	bool has_rss_hash_report;
 281	u8 rss_key_size;
 282	u16 rss_indir_table_size;
 283	u32 rss_hash_types_supported;
 284	u32 rss_hash_types_saved;
 
 285
 286	/* Has control virtqueue */
 287	bool has_cvq;
 288
 
 
 
 289	/* Host can handle any s/g split between our header and packet data */
 290	bool any_header_sg;
 291
 292	/* Packet virtio header size */
 293	u8 hdr_len;
 294
 295	/* Work struct for delayed refilling if we run low on memory. */
 296	struct delayed_work refill;
 297
 298	/* Is delayed refill enabled? */
 299	bool refill_enabled;
 300
 301	/* The lock to synchronize the access to refill_enabled */
 302	spinlock_t refill_lock;
 303
 304	/* Work struct for config space updates */
 305	struct work_struct config_work;
 306
 
 
 
 
 
 
 307	/* Does the affinity hint is set for virtqueues? */
 308	bool affinity_hint_set;
 309
 310	/* CPU hotplug instances for online & dead */
 311	struct hlist_node node;
 312	struct hlist_node node_dead;
 313
 314	struct control_buf *ctrl;
 315
 316	/* Ethtool settings */
 317	u8 duplex;
 318	u32 speed;
 319
 320	/* Is rx dynamic interrupt moderation enabled? */
 321	bool rx_dim_enabled;
 322
 323	/* Interrupt coalescing settings */
 324	struct virtnet_interrupt_coalesce intr_coal_tx;
 325	struct virtnet_interrupt_coalesce intr_coal_rx;
 326
 327	unsigned long guest_offloads;
 328	unsigned long guest_offloads_capable;
 329
 330	/* failover when STANDBY feature enabled */
 331	struct failover *failover;
 
 
 332};
 333
 334struct padded_vnet_hdr {
 335	struct virtio_net_hdr_v1_hash hdr;
 336	/*
 337	 * hdr is in a separate sg buffer, and data sg buffer shares same page
 338	 * with this header sg. This padding makes next sg 16 byte aligned
 339	 * after the header.
 340	 */
 341	char padding[12];
 342};
 343
 344struct virtio_net_common_hdr {
 345	union {
 346		struct virtio_net_hdr hdr;
 347		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
 348		struct virtio_net_hdr_v1_hash hash_v1_hdr;
 349	};
 350};
 351
 
 
 352static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353
 354static bool is_xdp_frame(void *ptr)
 355{
 356	return (unsigned long)ptr & VIRTIO_XDP_FLAG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357}
 358
 359static void *xdp_to_ptr(struct xdp_frame *ptr)
 360{
 361	return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362}
 363
 364static struct xdp_frame *ptr_to_xdp(void *ptr)
 
 
 
 365{
 366	return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
 
 
 
 367}
 368
 369/* Converting between virtqueue no. and kernel tx/rx queue no.
 370 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 371 */
 372static int vq2txq(struct virtqueue *vq)
 373{
 374	return (vq->index - 1) / 2;
 375}
 376
 377static int txq2vq(int txq)
 378{
 379	return txq * 2 + 1;
 380}
 381
 382static int vq2rxq(struct virtqueue *vq)
 383{
 384	return vq->index / 2;
 385}
 386
 387static int rxq2vq(int rxq)
 388{
 389	return rxq * 2;
 390}
 391
 
 
 
 
 
 
 
 
 
 
 
 392static inline struct virtio_net_common_hdr *
 393skb_vnet_common_hdr(struct sk_buff *skb)
 394{
 395	return (struct virtio_net_common_hdr *)skb->cb;
 396}
 397
 398/*
 399 * private is used to chain pages for big packets, put the whole
 400 * most recent used list in the beginning for reuse
 401 */
 402static void give_pages(struct receive_queue *rq, struct page *page)
 403{
 404	struct page *end;
 405
 406	/* Find end of list, sew whole thing into vi->rq.pages. */
 407	for (end = page; end->private; end = (struct page *)end->private);
 408	end->private = (unsigned long)rq->pages;
 409	rq->pages = page;
 410}
 411
 412static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
 413{
 414	struct page *p = rq->pages;
 415
 416	if (p) {
 417		rq->pages = (struct page *)p->private;
 418		/* clear private here, it is used to chain pages */
 419		p->private = 0;
 420	} else
 421		p = alloc_page(gfp_mask);
 422	return p;
 423}
 424
 425static void virtnet_rq_free_buf(struct virtnet_info *vi,
 426				struct receive_queue *rq, void *buf)
 427{
 428	if (vi->mergeable_rx_bufs)
 429		put_page(virt_to_head_page(buf));
 430	else if (vi->big_packets)
 431		give_pages(rq, buf);
 432	else
 433		put_page(virt_to_head_page(buf));
 434}
 435
 436static void enable_delayed_refill(struct virtnet_info *vi)
 437{
 438	spin_lock_bh(&vi->refill_lock);
 439	vi->refill_enabled = true;
 440	spin_unlock_bh(&vi->refill_lock);
 441}
 442
 443static void disable_delayed_refill(struct virtnet_info *vi)
 444{
 445	spin_lock_bh(&vi->refill_lock);
 446	vi->refill_enabled = false;
 447	spin_unlock_bh(&vi->refill_lock);
 448}
 449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450static void virtqueue_napi_schedule(struct napi_struct *napi,
 451				    struct virtqueue *vq)
 452{
 453	if (napi_schedule_prep(napi)) {
 454		virtqueue_disable_cb(vq);
 455		__napi_schedule(napi);
 456	}
 457}
 458
 459static bool virtqueue_napi_complete(struct napi_struct *napi,
 460				    struct virtqueue *vq, int processed)
 461{
 462	int opaque;
 463
 464	opaque = virtqueue_enable_cb_prepare(vq);
 465	if (napi_complete_done(napi, processed)) {
 466		if (unlikely(virtqueue_poll(vq, opaque)))
 467			virtqueue_napi_schedule(napi, vq);
 468		else
 469			return true;
 470	} else {
 471		virtqueue_disable_cb(vq);
 472	}
 473
 474	return false;
 475}
 476
 477static void skb_xmit_done(struct virtqueue *vq)
 478{
 479	struct virtnet_info *vi = vq->vdev->priv;
 480	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
 481
 482	/* Suppress further interrupts. */
 483	virtqueue_disable_cb(vq);
 484
 485	if (napi->weight)
 486		virtqueue_napi_schedule(napi, vq);
 487	else
 488		/* We were probably waiting for more output buffers. */
 489		netif_wake_subqueue(vi->dev, vq2txq(vq));
 490}
 491
 492#define MRG_CTX_HEADER_SHIFT 22
 493static void *mergeable_len_to_ctx(unsigned int truesize,
 494				  unsigned int headroom)
 495{
 496	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
 497}
 498
 499static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
 500{
 501	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
 502}
 503
 504static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 505{
 506	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
 507}
 508
 509static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
 510					 unsigned int headroom,
 511					 unsigned int len)
 512{
 513	struct sk_buff *skb;
 514
 515	skb = build_skb(buf, buflen);
 516	if (unlikely(!skb))
 517		return NULL;
 518
 519	skb_reserve(skb, headroom);
 520	skb_put(skb, len);
 521
 522	return skb;
 523}
 524
 525/* Called from bottom half context */
 526static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 527				   struct receive_queue *rq,
 528				   struct page *page, unsigned int offset,
 529				   unsigned int len, unsigned int truesize,
 530				   unsigned int headroom)
 531{
 532	struct sk_buff *skb;
 533	struct virtio_net_common_hdr *hdr;
 534	unsigned int copy, hdr_len, hdr_padded_len;
 535	struct page *page_to_free = NULL;
 536	int tailroom, shinfo_size;
 537	char *p, *hdr_p, *buf;
 538
 539	p = page_address(page) + offset;
 540	hdr_p = p;
 541
 542	hdr_len = vi->hdr_len;
 543	if (vi->mergeable_rx_bufs)
 544		hdr_padded_len = hdr_len;
 545	else
 546		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 547
 548	buf = p - headroom;
 549	len -= hdr_len;
 550	offset += hdr_padded_len;
 551	p += hdr_padded_len;
 552	tailroom = truesize - headroom  - hdr_padded_len - len;
 553
 554	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 555
 556	/* copy small packet so we can reuse these pages */
 557	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
 558		skb = virtnet_build_skb(buf, truesize, p - buf, len);
 559		if (unlikely(!skb))
 560			return NULL;
 561
 562		page = (struct page *)page->private;
 563		if (page)
 564			give_pages(rq, page);
 565		goto ok;
 566	}
 567
 568	/* copy small packet so we can reuse these pages for small data */
 569	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
 570	if (unlikely(!skb))
 571		return NULL;
 572
 573	/* Copy all frame if it fits skb->head, otherwise
 574	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
 575	 */
 576	if (len <= skb_tailroom(skb))
 577		copy = len;
 578	else
 579		copy = ETH_HLEN;
 580	skb_put_data(skb, p, copy);
 581
 582	len -= copy;
 583	offset += copy;
 584
 585	if (vi->mergeable_rx_bufs) {
 586		if (len)
 587			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
 588		else
 589			page_to_free = page;
 590		goto ok;
 591	}
 592
 593	/*
 594	 * Verify that we can indeed put this data into a skb.
 595	 * This is here to handle cases when the device erroneously
 596	 * tries to receive more than is possible. This is usually
 597	 * the case of a broken device.
 598	 */
 599	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
 600		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
 601		dev_kfree_skb(skb);
 602		return NULL;
 603	}
 604	BUG_ON(offset >= PAGE_SIZE);
 605	while (len) {
 606		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
 607		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
 608				frag_size, truesize);
 609		len -= frag_size;
 610		page = (struct page *)page->private;
 611		offset = 0;
 612	}
 613
 614	if (page)
 615		give_pages(rq, page);
 616
 617ok:
 618	hdr = skb_vnet_common_hdr(skb);
 619	memcpy(hdr, hdr_p, hdr_len);
 620	if (page_to_free)
 621		put_page(page_to_free);
 622
 623	return skb;
 624}
 625
 626static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
 627{
 
 628	struct page *page = virt_to_head_page(buf);
 629	struct virtnet_rq_dma *dma;
 630	void *head;
 631	int offset;
 632
 
 
 633	head = page_address(page);
 634
 635	dma = head;
 636
 637	--dma->ref;
 638
 639	if (dma->need_sync && len) {
 640		offset = buf - (head + sizeof(*dma));
 641
 642		virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
 643							offset, len,
 644							DMA_FROM_DEVICE);
 645	}
 646
 647	if (dma->ref)
 648		return;
 649
 650	virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
 651					 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 652	put_page(page);
 653}
 654
 655static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
 656{
 
 657	void *buf;
 658
 
 
 659	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
 660	if (buf && rq->do_dma)
 661		virtnet_rq_unmap(rq, buf, *len);
 662
 663	return buf;
 664}
 665
 666static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 667{
 
 668	struct virtnet_rq_dma *dma;
 669	dma_addr_t addr;
 670	u32 offset;
 671	void *head;
 672
 673	if (!rq->do_dma) {
 674		sg_init_one(rq->sg, buf, len);
 675		return;
 676	}
 677
 678	head = page_address(rq->alloc_frag.page);
 679
 680	offset = buf - head;
 681
 682	dma = head;
 683
 684	addr = dma->addr - sizeof(*dma) + offset;
 685
 686	sg_init_table(rq->sg, 1);
 687	rq->sg[0].dma_address = addr;
 688	rq->sg[0].length = len;
 689}
 690
 691static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
 692{
 693	struct page_frag *alloc_frag = &rq->alloc_frag;
 
 694	struct virtnet_rq_dma *dma;
 695	void *buf, *head;
 696	dma_addr_t addr;
 697
 698	if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
 699		return NULL;
 700
 701	head = page_address(alloc_frag->page);
 702
 703	if (rq->do_dma) {
 704		dma = head;
 705
 706		/* new pages */
 707		if (!alloc_frag->offset) {
 708			if (rq->last_dma) {
 709				/* Now, the new page is allocated, the last dma
 710				 * will not be used. So the dma can be unmapped
 711				 * if the ref is 0.
 712				 */
 713				virtnet_rq_unmap(rq, rq->last_dma, 0);
 714				rq->last_dma = NULL;
 715			}
 716
 717			dma->len = alloc_frag->size - sizeof(*dma);
 718
 719			addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
 720							      dma->len, DMA_FROM_DEVICE, 0);
 721			if (virtqueue_dma_mapping_error(rq->vq, addr))
 722				return NULL;
 723
 724			dma->addr = addr;
 725			dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
 726
 727			/* Add a reference to dma to prevent the entire dma from
 728			 * being released during error handling. This reference
 729			 * will be freed after the pages are no longer used.
 730			 */
 731			get_page(alloc_frag->page);
 732			dma->ref = 1;
 733			alloc_frag->offset = sizeof(*dma);
 734
 735			rq->last_dma = dma;
 736		}
 737
 738		++dma->ref;
 
 
 
 
 
 
 
 
 739	}
 740
 
 
 741	buf = head + alloc_frag->offset;
 742
 743	get_page(alloc_frag->page);
 744	alloc_frag->offset += size;
 745
 746	return buf;
 747}
 748
 749static void virtnet_rq_set_premapped(struct virtnet_info *vi)
 750{
 751	int i;
 752
 753	/* disable for big mode */
 754	if (!vi->mergeable_rx_bufs && vi->big_packets)
 755		return;
 756
 757	for (i = 0; i < vi->max_queue_pairs; i++) {
 758		if (virtqueue_set_dma_premapped(vi->rq[i].vq))
 759			continue;
 760
 761		vi->rq[i].do_dma = true;
 762	}
 763}
 764
 765static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
 766{
 767	struct virtnet_info *vi = vq->vdev->priv;
 768	struct receive_queue *rq;
 769	int i = vq2rxq(vq);
 770
 771	rq = &vi->rq[i];
 772
 773	if (rq->do_dma)
 
 
 
 
 
 774		virtnet_rq_unmap(rq, buf, 0);
 775
 776	virtnet_rq_free_buf(vi, rq, buf);
 777}
 778
 779static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 
 780{
 781	unsigned int len;
 782	unsigned int packets = 0;
 783	unsigned int bytes = 0;
 784	void *ptr;
 785
 786	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 787		if (likely(!is_xdp_frame(ptr))) {
 788			struct sk_buff *skb = ptr;
 789
 790			pr_debug("Sent skb %p\n", skb);
 791
 792			bytes += skb->len;
 793			napi_consume_skb(skb, in_napi);
 794		} else {
 795			struct xdp_frame *frame = ptr_to_xdp(ptr);
 796
 797			bytes += xdp_get_frame_len(frame);
 798			xdp_return_frame(frame);
 799		}
 800		packets++;
 801	}
 802
 803	/* Avoid overhead when no packets have been processed
 804	 * happens when called speculatively from start_xmit.
 805	 */
 806	if (!packets)
 807		return;
 808
 809	u64_stats_update_begin(&sq->stats.syncp);
 810	u64_stats_add(&sq->stats.bytes, bytes);
 811	u64_stats_add(&sq->stats.packets, packets);
 812	u64_stats_update_end(&sq->stats.syncp);
 813}
 814
 815static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
 816{
 817	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
 818		return false;
 819	else if (q < vi->curr_queue_pairs)
 820		return true;
 821	else
 822		return false;
 823}
 824
 825static void check_sq_full_and_disable(struct virtnet_info *vi,
 826				      struct net_device *dev,
 827				      struct send_queue *sq)
 828{
 829	bool use_napi = sq->napi.weight;
 830	int qnum;
 831
 832	qnum = sq - vi->sq;
 833
 834	/* If running out of space, stop queue to avoid getting packets that we
 835	 * are then unable to transmit.
 836	 * An alternative would be to force queuing layer to requeue the skb by
 837	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
 838	 * returned in a normal path of operation: it means that driver is not
 839	 * maintaining the TX queue stop/start state properly, and causes
 840	 * the stack to do a non-trivial amount of useless work.
 841	 * Since most packets only take 1 or 2 ring slots, stopping the queue
 842	 * early means 16 slots are typically wasted.
 843	 */
 844	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
 845		netif_stop_subqueue(dev, qnum);
 
 
 
 
 
 846		if (use_napi) {
 847			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
 848				virtqueue_napi_schedule(&sq->napi, sq->vq);
 849		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
 850			/* More just got used, free them then recheck. */
 851			free_old_xmit_skbs(sq, false);
 852			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
 853				netif_start_subqueue(dev, qnum);
 
 
 
 854				virtqueue_disable_cb(sq->vq);
 855			}
 856		}
 857	}
 858}
 859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 861				   struct send_queue *sq,
 862				   struct xdp_frame *xdpf)
 863{
 864	struct virtio_net_hdr_mrg_rxbuf *hdr;
 865	struct skb_shared_info *shinfo;
 866	u8 nr_frags = 0;
 867	int err, i;
 868
 869	if (unlikely(xdpf->headroom < vi->hdr_len))
 870		return -EOVERFLOW;
 871
 872	if (unlikely(xdp_frame_has_frags(xdpf))) {
 873		shinfo = xdp_get_shared_info_from_frame(xdpf);
 874		nr_frags = shinfo->nr_frags;
 875	}
 876
 877	/* In wrapping function virtnet_xdp_xmit(), we need to free
 878	 * up the pending old buffers, where we need to calculate the
 879	 * position of skb_shared_info in xdp_get_frame_len() and
 880	 * xdp_return_frame(), which will involve to xdpf->data and
 881	 * xdpf->headroom. Therefore, we need to update the value of
 882	 * headroom synchronously here.
 883	 */
 884	xdpf->headroom -= vi->hdr_len;
 885	xdpf->data -= vi->hdr_len;
 886	/* Zero header and leave csum up to XDP layers */
 887	hdr = xdpf->data;
 888	memset(hdr, 0, vi->hdr_len);
 889	xdpf->len   += vi->hdr_len;
 890
 891	sg_init_table(sq->sg, nr_frags + 1);
 892	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
 893	for (i = 0; i < nr_frags; i++) {
 894		skb_frag_t *frag = &shinfo->frags[i];
 895
 896		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
 897			    skb_frag_size(frag), skb_frag_off(frag));
 898	}
 899
 900	err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
 901				   xdp_to_ptr(xdpf), GFP_ATOMIC);
 902	if (unlikely(err))
 903		return -ENOSPC; /* Caller handle free/refcnt */
 904
 905	return 0;
 906}
 907
 908/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
 909 * the current cpu, so it does not need to be locked.
 910 *
 911 * Here we use marco instead of inline functions because we have to deal with
 912 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
 913 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
 914 * functions to perfectly solve these three problems at the same time.
 915 */
 916#define virtnet_xdp_get_sq(vi) ({                                       \
 917	int cpu = smp_processor_id();                                   \
 918	struct netdev_queue *txq;                                       \
 919	typeof(vi) v = (vi);                                            \
 920	unsigned int qp;                                                \
 921									\
 922	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
 923		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
 924		qp += cpu;                                              \
 925		txq = netdev_get_tx_queue(v->dev, qp);                  \
 926		__netif_tx_acquire(txq);                                \
 927	} else {                                                        \
 928		qp = cpu % v->curr_queue_pairs;                         \
 929		txq = netdev_get_tx_queue(v->dev, qp);                  \
 930		__netif_tx_lock(txq, cpu);                              \
 931	}                                                               \
 932	v->sq + qp;                                                     \
 933})
 934
 935#define virtnet_xdp_put_sq(vi, q) {                                     \
 936	struct netdev_queue *txq;                                       \
 937	typeof(vi) v = (vi);                                            \
 938									\
 939	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
 940	if (v->curr_queue_pairs > nr_cpu_ids)                           \
 941		__netif_tx_release(txq);                                \
 942	else                                                            \
 943		__netif_tx_unlock(txq);                                 \
 944}
 945
 946static int virtnet_xdp_xmit(struct net_device *dev,
 947			    int n, struct xdp_frame **frames, u32 flags)
 948{
 949	struct virtnet_info *vi = netdev_priv(dev);
 
 950	struct receive_queue *rq = vi->rq;
 951	struct bpf_prog *xdp_prog;
 952	struct send_queue *sq;
 953	unsigned int len;
 954	int packets = 0;
 955	int bytes = 0;
 956	int nxmit = 0;
 957	int kicks = 0;
 958	void *ptr;
 959	int ret;
 960	int i;
 961
 962	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
 963	 * indicate XDP resources have been successfully allocated.
 964	 */
 965	xdp_prog = rcu_access_pointer(rq->xdp_prog);
 966	if (!xdp_prog)
 967		return -ENXIO;
 968
 969	sq = virtnet_xdp_get_sq(vi);
 970
 971	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
 972		ret = -EINVAL;
 973		goto out;
 974	}
 975
 976	/* Free up any pending old buffers before queueing new ones. */
 977	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 978		if (likely(is_xdp_frame(ptr))) {
 979			struct xdp_frame *frame = ptr_to_xdp(ptr);
 980
 981			bytes += xdp_get_frame_len(frame);
 982			xdp_return_frame(frame);
 983		} else {
 984			struct sk_buff *skb = ptr;
 985
 986			bytes += skb->len;
 987			napi_consume_skb(skb, false);
 988		}
 989		packets++;
 990	}
 991
 992	for (i = 0; i < n; i++) {
 993		struct xdp_frame *xdpf = frames[i];
 994
 995		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
 996			break;
 997		nxmit++;
 998	}
 999	ret = nxmit;
1000
1001	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1002		check_sq_full_and_disable(vi, dev, sq);
1003
1004	if (flags & XDP_XMIT_FLUSH) {
1005		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1006			kicks = 1;
1007	}
1008out:
1009	u64_stats_update_begin(&sq->stats.syncp);
1010	u64_stats_add(&sq->stats.bytes, bytes);
1011	u64_stats_add(&sq->stats.packets, packets);
1012	u64_stats_add(&sq->stats.xdp_tx, n);
1013	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1014	u64_stats_add(&sq->stats.kicks, kicks);
1015	u64_stats_update_end(&sq->stats.syncp);
1016
1017	virtnet_xdp_put_sq(vi, sq);
1018	return ret;
1019}
1020
1021static void put_xdp_frags(struct xdp_buff *xdp)
1022{
1023	struct skb_shared_info *shinfo;
1024	struct page *xdp_page;
1025	int i;
1026
1027	if (xdp_buff_has_frags(xdp)) {
1028		shinfo = xdp_get_shared_info_from_buff(xdp);
1029		for (i = 0; i < shinfo->nr_frags; i++) {
1030			xdp_page = skb_frag_page(&shinfo->frags[i]);
1031			put_page(xdp_page);
1032		}
1033	}
1034}
1035
1036static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1037			       struct net_device *dev,
1038			       unsigned int *xdp_xmit,
1039			       struct virtnet_rq_stats *stats)
1040{
1041	struct xdp_frame *xdpf;
1042	int err;
1043	u32 act;
1044
1045	act = bpf_prog_run_xdp(xdp_prog, xdp);
1046	u64_stats_inc(&stats->xdp_packets);
1047
1048	switch (act) {
1049	case XDP_PASS:
1050		return act;
1051
1052	case XDP_TX:
1053		u64_stats_inc(&stats->xdp_tx);
1054		xdpf = xdp_convert_buff_to_frame(xdp);
1055		if (unlikely(!xdpf)) {
1056			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1057			return XDP_DROP;
1058		}
1059
1060		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1061		if (unlikely(!err)) {
1062			xdp_return_frame_rx_napi(xdpf);
1063		} else if (unlikely(err < 0)) {
1064			trace_xdp_exception(dev, xdp_prog, act);
1065			return XDP_DROP;
1066		}
1067		*xdp_xmit |= VIRTIO_XDP_TX;
1068		return act;
1069
1070	case XDP_REDIRECT:
1071		u64_stats_inc(&stats->xdp_redirects);
1072		err = xdp_do_redirect(dev, xdp, xdp_prog);
1073		if (err)
1074			return XDP_DROP;
1075
1076		*xdp_xmit |= VIRTIO_XDP_REDIR;
1077		return act;
1078
1079	default:
1080		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1081		fallthrough;
1082	case XDP_ABORTED:
1083		trace_xdp_exception(dev, xdp_prog, act);
1084		fallthrough;
1085	case XDP_DROP:
1086		return XDP_DROP;
1087	}
1088}
1089
1090static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1091{
1092	return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1093}
1094
1095/* We copy the packet for XDP in the following cases:
1096 *
1097 * 1) Packet is scattered across multiple rx buffers.
1098 * 2) Headroom space is insufficient.
1099 *
1100 * This is inefficient but it's a temporary condition that
1101 * we hit right after XDP is enabled and until queue is refilled
1102 * with large buffers with sufficient headroom - so it should affect
1103 * at most queue size packets.
1104 * Afterwards, the conditions to enable
1105 * XDP should preclude the underlying device from sending packets
1106 * across multiple buffers (num_buf > 1), and we make sure buffers
1107 * have enough headroom.
1108 */
1109static struct page *xdp_linearize_page(struct receive_queue *rq,
1110				       int *num_buf,
1111				       struct page *p,
1112				       int offset,
1113				       int page_off,
1114				       unsigned int *len)
1115{
1116	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1117	struct page *page;
1118
1119	if (page_off + *len + tailroom > PAGE_SIZE)
1120		return NULL;
1121
1122	page = alloc_page(GFP_ATOMIC);
1123	if (!page)
1124		return NULL;
1125
1126	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1127	page_off += *len;
1128
1129	while (--*num_buf) {
1130		unsigned int buflen;
1131		void *buf;
1132		int off;
1133
1134		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1135		if (unlikely(!buf))
1136			goto err_buf;
1137
1138		p = virt_to_head_page(buf);
1139		off = buf - page_address(p);
1140
1141		/* guard against a misconfigured or uncooperative backend that
1142		 * is sending packet larger than the MTU.
1143		 */
1144		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1145			put_page(p);
1146			goto err_buf;
1147		}
1148
1149		memcpy(page_address(page) + page_off,
1150		       page_address(p) + off, buflen);
1151		page_off += buflen;
1152		put_page(p);
1153	}
1154
1155	/* Headroom does not contribute to packet length */
1156	*len = page_off - VIRTIO_XDP_HEADROOM;
1157	return page;
1158err_buf:
1159	__free_pages(page, 0);
1160	return NULL;
1161}
1162
1163static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1164					       unsigned int xdp_headroom,
1165					       void *buf,
1166					       unsigned int len)
1167{
1168	unsigned int header_offset;
1169	unsigned int headroom;
1170	unsigned int buflen;
1171	struct sk_buff *skb;
1172
1173	header_offset = VIRTNET_RX_PAD + xdp_headroom;
1174	headroom = vi->hdr_len + header_offset;
1175	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1176		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1177
1178	skb = virtnet_build_skb(buf, buflen, headroom, len);
1179	if (unlikely(!skb))
1180		return NULL;
1181
1182	buf += header_offset;
1183	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1184
1185	return skb;
1186}
1187
1188static struct sk_buff *receive_small_xdp(struct net_device *dev,
1189					 struct virtnet_info *vi,
1190					 struct receive_queue *rq,
1191					 struct bpf_prog *xdp_prog,
1192					 void *buf,
1193					 unsigned int xdp_headroom,
1194					 unsigned int len,
1195					 unsigned int *xdp_xmit,
1196					 struct virtnet_rq_stats *stats)
1197{
1198	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1199	unsigned int headroom = vi->hdr_len + header_offset;
1200	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1201	struct page *page = virt_to_head_page(buf);
1202	struct page *xdp_page;
1203	unsigned int buflen;
1204	struct xdp_buff xdp;
1205	struct sk_buff *skb;
1206	unsigned int metasize = 0;
1207	u32 act;
1208
1209	if (unlikely(hdr->hdr.gso_type))
1210		goto err_xdp;
1211
 
 
 
 
1212	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1213		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1214
1215	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1216		int offset = buf - page_address(page) + header_offset;
1217		unsigned int tlen = len + vi->hdr_len;
1218		int num_buf = 1;
1219
1220		xdp_headroom = virtnet_get_headroom(vi);
1221		header_offset = VIRTNET_RX_PAD + xdp_headroom;
1222		headroom = vi->hdr_len + header_offset;
1223		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1224			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1225		xdp_page = xdp_linearize_page(rq, &num_buf, page,
1226					      offset, header_offset,
1227					      &tlen);
1228		if (!xdp_page)
1229			goto err_xdp;
1230
1231		buf = page_address(xdp_page);
1232		put_page(page);
1233		page = xdp_page;
1234	}
1235
1236	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1237	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1238			 xdp_headroom, len, true);
1239
1240	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1241
1242	switch (act) {
1243	case XDP_PASS:
1244		/* Recalculate length in case bpf program changed it */
1245		len = xdp.data_end - xdp.data;
1246		metasize = xdp.data - xdp.data_meta;
1247		break;
1248
1249	case XDP_TX:
1250	case XDP_REDIRECT:
1251		goto xdp_xmit;
1252
1253	default:
1254		goto err_xdp;
1255	}
1256
1257	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1258	if (unlikely(!skb))
1259		goto err;
1260
1261	if (metasize)
1262		skb_metadata_set(skb, metasize);
1263
1264	return skb;
1265
1266err_xdp:
1267	u64_stats_inc(&stats->xdp_drops);
1268err:
1269	u64_stats_inc(&stats->drops);
1270	put_page(page);
1271xdp_xmit:
1272	return NULL;
1273}
1274
1275static struct sk_buff *receive_small(struct net_device *dev,
1276				     struct virtnet_info *vi,
1277				     struct receive_queue *rq,
1278				     void *buf, void *ctx,
1279				     unsigned int len,
1280				     unsigned int *xdp_xmit,
1281				     struct virtnet_rq_stats *stats)
1282{
1283	unsigned int xdp_headroom = (unsigned long)ctx;
1284	struct page *page = virt_to_head_page(buf);
1285	struct sk_buff *skb;
1286
 
 
 
 
 
1287	len -= vi->hdr_len;
1288	u64_stats_add(&stats->bytes, len);
1289
1290	if (unlikely(len > GOOD_PACKET_LEN)) {
1291		pr_debug("%s: rx error: len %u exceeds max size %d\n",
1292			 dev->name, len, GOOD_PACKET_LEN);
1293		DEV_STATS_INC(dev, rx_length_errors);
1294		goto err;
1295	}
1296
1297	if (unlikely(vi->xdp_enabled)) {
1298		struct bpf_prog *xdp_prog;
1299
1300		rcu_read_lock();
1301		xdp_prog = rcu_dereference(rq->xdp_prog);
1302		if (xdp_prog) {
1303			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1304						xdp_headroom, len, xdp_xmit,
1305						stats);
1306			rcu_read_unlock();
1307			return skb;
1308		}
1309		rcu_read_unlock();
1310	}
1311
1312	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1313	if (likely(skb))
1314		return skb;
1315
1316err:
1317	u64_stats_inc(&stats->drops);
1318	put_page(page);
1319	return NULL;
1320}
1321
1322static struct sk_buff *receive_big(struct net_device *dev,
1323				   struct virtnet_info *vi,
1324				   struct receive_queue *rq,
1325				   void *buf,
1326				   unsigned int len,
1327				   struct virtnet_rq_stats *stats)
1328{
1329	struct page *page = buf;
1330	struct sk_buff *skb =
1331		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1332
1333	u64_stats_add(&stats->bytes, len - vi->hdr_len);
1334	if (unlikely(!skb))
1335		goto err;
1336
1337	return skb;
1338
1339err:
1340	u64_stats_inc(&stats->drops);
1341	give_pages(rq, page);
1342	return NULL;
1343}
1344
1345static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1346			       struct net_device *dev,
1347			       struct virtnet_rq_stats *stats)
1348{
1349	struct page *page;
1350	void *buf;
1351	int len;
1352
1353	while (num_buf-- > 1) {
1354		buf = virtnet_rq_get_buf(rq, &len, NULL);
1355		if (unlikely(!buf)) {
1356			pr_debug("%s: rx error: %d buffers missing\n",
1357				 dev->name, num_buf);
1358			DEV_STATS_INC(dev, rx_length_errors);
1359			break;
1360		}
1361		u64_stats_add(&stats->bytes, len);
1362		page = virt_to_head_page(buf);
1363		put_page(page);
1364	}
1365}
1366
1367/* Why not use xdp_build_skb_from_frame() ?
1368 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1369 * virtio-net there are 2 points that do not match its requirements:
1370 *  1. The size of the prefilled buffer is not fixed before xdp is set.
1371 *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1372 *     like eth_type_trans() (which virtio-net does in receive_buf()).
1373 */
1374static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1375					       struct virtnet_info *vi,
1376					       struct xdp_buff *xdp,
1377					       unsigned int xdp_frags_truesz)
1378{
1379	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1380	unsigned int headroom, data_len;
1381	struct sk_buff *skb;
1382	int metasize;
1383	u8 nr_frags;
1384
1385	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1386		pr_debug("Error building skb as missing reserved tailroom for xdp");
1387		return NULL;
1388	}
1389
1390	if (unlikely(xdp_buff_has_frags(xdp)))
1391		nr_frags = sinfo->nr_frags;
1392
1393	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1394	if (unlikely(!skb))
1395		return NULL;
1396
1397	headroom = xdp->data - xdp->data_hard_start;
1398	data_len = xdp->data_end - xdp->data;
1399	skb_reserve(skb, headroom);
1400	__skb_put(skb, data_len);
1401
1402	metasize = xdp->data - xdp->data_meta;
1403	metasize = metasize > 0 ? metasize : 0;
1404	if (metasize)
1405		skb_metadata_set(skb, metasize);
1406
1407	if (unlikely(xdp_buff_has_frags(xdp)))
1408		xdp_update_skb_shared_info(skb, nr_frags,
1409					   sinfo->xdp_frags_size,
1410					   xdp_frags_truesz,
1411					   xdp_buff_is_frag_pfmemalloc(xdp));
1412
1413	return skb;
1414}
1415
1416/* TODO: build xdp in big mode */
1417static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1418				      struct virtnet_info *vi,
1419				      struct receive_queue *rq,
1420				      struct xdp_buff *xdp,
1421				      void *buf,
1422				      unsigned int len,
1423				      unsigned int frame_sz,
1424				      int *num_buf,
1425				      unsigned int *xdp_frags_truesize,
1426				      struct virtnet_rq_stats *stats)
1427{
1428	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1429	unsigned int headroom, tailroom, room;
1430	unsigned int truesize, cur_frag_size;
1431	struct skb_shared_info *shinfo;
1432	unsigned int xdp_frags_truesz = 0;
1433	struct page *page;
1434	skb_frag_t *frag;
1435	int offset;
1436	void *ctx;
1437
1438	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1439	xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1440			 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1441
1442	if (!*num_buf)
1443		return 0;
1444
1445	if (*num_buf > 1) {
1446		/* If we want to build multi-buffer xdp, we need
1447		 * to specify that the flags of xdp_buff have the
1448		 * XDP_FLAGS_HAS_FRAG bit.
1449		 */
1450		if (!xdp_buff_has_frags(xdp))
1451			xdp_buff_set_frags_flag(xdp);
1452
1453		shinfo = xdp_get_shared_info_from_buff(xdp);
1454		shinfo->nr_frags = 0;
1455		shinfo->xdp_frags_size = 0;
1456	}
1457
1458	if (*num_buf > MAX_SKB_FRAGS + 1)
1459		return -EINVAL;
1460
1461	while (--*num_buf > 0) {
1462		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1463		if (unlikely(!buf)) {
1464			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1465				 dev->name, *num_buf,
1466				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1467			DEV_STATS_INC(dev, rx_length_errors);
1468			goto err;
1469		}
1470
1471		u64_stats_add(&stats->bytes, len);
1472		page = virt_to_head_page(buf);
1473		offset = buf - page_address(page);
1474
1475		truesize = mergeable_ctx_to_truesize(ctx);
1476		headroom = mergeable_ctx_to_headroom(ctx);
1477		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1478		room = SKB_DATA_ALIGN(headroom + tailroom);
1479
1480		cur_frag_size = truesize;
1481		xdp_frags_truesz += cur_frag_size;
1482		if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1483			put_page(page);
1484			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1485				 dev->name, len, (unsigned long)(truesize - room));
1486			DEV_STATS_INC(dev, rx_length_errors);
1487			goto err;
1488		}
1489
1490		frag = &shinfo->frags[shinfo->nr_frags++];
1491		skb_frag_fill_page_desc(frag, page, offset, len);
1492		if (page_is_pfmemalloc(page))
1493			xdp_buff_set_frag_pfmemalloc(xdp);
1494
1495		shinfo->xdp_frags_size += len;
1496	}
1497
1498	*xdp_frags_truesize = xdp_frags_truesz;
1499	return 0;
1500
1501err:
1502	put_xdp_frags(xdp);
1503	return -EINVAL;
1504}
1505
1506static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1507				   struct receive_queue *rq,
1508				   struct bpf_prog *xdp_prog,
1509				   void *ctx,
1510				   unsigned int *frame_sz,
1511				   int *num_buf,
1512				   struct page **page,
1513				   int offset,
1514				   unsigned int *len,
1515				   struct virtio_net_hdr_mrg_rxbuf *hdr)
1516{
1517	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1518	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1519	struct page *xdp_page;
1520	unsigned int xdp_room;
1521
1522	/* Transient failure which in theory could occur if
1523	 * in-flight packets from before XDP was enabled reach
1524	 * the receive path after XDP is loaded.
1525	 */
1526	if (unlikely(hdr->hdr.gso_type))
1527		return NULL;
1528
 
 
 
 
1529	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1530	 * with headroom may add hole in truesize, which
1531	 * make their length exceed PAGE_SIZE. So we disabled the
1532	 * hole mechanism for xdp. See add_recvbuf_mergeable().
1533	 */
1534	*frame_sz = truesize;
1535
1536	if (likely(headroom >= virtnet_get_headroom(vi) &&
1537		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1538		return page_address(*page) + offset;
1539	}
1540
1541	/* This happens when headroom is not enough because
1542	 * of the buffer was prefilled before XDP is set.
1543	 * This should only happen for the first several packets.
1544	 * In fact, vq reset can be used here to help us clean up
1545	 * the prefilled buffers, but many existing devices do not
1546	 * support it, and we don't want to bother users who are
1547	 * using xdp normally.
1548	 */
1549	if (!xdp_prog->aux->xdp_has_frags) {
1550		/* linearize data for XDP */
1551		xdp_page = xdp_linearize_page(rq, num_buf,
1552					      *page, offset,
1553					      VIRTIO_XDP_HEADROOM,
1554					      len);
1555		if (!xdp_page)
1556			return NULL;
1557	} else {
1558		xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1559					  sizeof(struct skb_shared_info));
1560		if (*len + xdp_room > PAGE_SIZE)
1561			return NULL;
1562
1563		xdp_page = alloc_page(GFP_ATOMIC);
1564		if (!xdp_page)
1565			return NULL;
1566
1567		memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1568		       page_address(*page) + offset, *len);
1569	}
1570
1571	*frame_sz = PAGE_SIZE;
1572
1573	put_page(*page);
1574
1575	*page = xdp_page;
1576
1577	return page_address(*page) + VIRTIO_XDP_HEADROOM;
1578}
1579
1580static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1581					     struct virtnet_info *vi,
1582					     struct receive_queue *rq,
1583					     struct bpf_prog *xdp_prog,
1584					     void *buf,
1585					     void *ctx,
1586					     unsigned int len,
1587					     unsigned int *xdp_xmit,
1588					     struct virtnet_rq_stats *stats)
1589{
1590	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1591	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1592	struct page *page = virt_to_head_page(buf);
1593	int offset = buf - page_address(page);
1594	unsigned int xdp_frags_truesz = 0;
1595	struct sk_buff *head_skb;
1596	unsigned int frame_sz;
1597	struct xdp_buff xdp;
1598	void *data;
1599	u32 act;
1600	int err;
1601
1602	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1603				     offset, &len, hdr);
1604	if (unlikely(!data))
1605		goto err_xdp;
1606
1607	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1608					 &num_buf, &xdp_frags_truesz, stats);
1609	if (unlikely(err))
1610		goto err_xdp;
1611
1612	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1613
1614	switch (act) {
1615	case XDP_PASS:
1616		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1617		if (unlikely(!head_skb))
1618			break;
1619		return head_skb;
1620
1621	case XDP_TX:
1622	case XDP_REDIRECT:
1623		return NULL;
1624
1625	default:
1626		break;
1627	}
1628
1629	put_xdp_frags(&xdp);
1630
1631err_xdp:
1632	put_page(page);
1633	mergeable_buf_free(rq, num_buf, dev, stats);
1634
1635	u64_stats_inc(&stats->xdp_drops);
1636	u64_stats_inc(&stats->drops);
1637	return NULL;
1638}
1639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1640static struct sk_buff *receive_mergeable(struct net_device *dev,
1641					 struct virtnet_info *vi,
1642					 struct receive_queue *rq,
1643					 void *buf,
1644					 void *ctx,
1645					 unsigned int len,
1646					 unsigned int *xdp_xmit,
1647					 struct virtnet_rq_stats *stats)
1648{
1649	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1650	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1651	struct page *page = virt_to_head_page(buf);
1652	int offset = buf - page_address(page);
1653	struct sk_buff *head_skb, *curr_skb;
1654	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1655	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1656	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1657	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1658
1659	head_skb = NULL;
1660	u64_stats_add(&stats->bytes, len - vi->hdr_len);
1661
1662	if (unlikely(len > truesize - room)) {
1663		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1664			 dev->name, len, (unsigned long)(truesize - room));
1665		DEV_STATS_INC(dev, rx_length_errors);
1666		goto err_skb;
1667	}
1668
1669	if (unlikely(vi->xdp_enabled)) {
1670		struct bpf_prog *xdp_prog;
1671
1672		rcu_read_lock();
1673		xdp_prog = rcu_dereference(rq->xdp_prog);
1674		if (xdp_prog) {
1675			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1676							 len, xdp_xmit, stats);
1677			rcu_read_unlock();
1678			return head_skb;
1679		}
1680		rcu_read_unlock();
1681	}
1682
1683	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1684	curr_skb = head_skb;
1685
1686	if (unlikely(!curr_skb))
1687		goto err_skb;
1688	while (--num_buf) {
1689		int num_skb_frags;
1690
1691		buf = virtnet_rq_get_buf(rq, &len, &ctx);
1692		if (unlikely(!buf)) {
1693			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1694				 dev->name, num_buf,
1695				 virtio16_to_cpu(vi->vdev,
1696						 hdr->num_buffers));
1697			DEV_STATS_INC(dev, rx_length_errors);
1698			goto err_buf;
1699		}
1700
1701		u64_stats_add(&stats->bytes, len);
1702		page = virt_to_head_page(buf);
1703
1704		truesize = mergeable_ctx_to_truesize(ctx);
1705		headroom = mergeable_ctx_to_headroom(ctx);
1706		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1707		room = SKB_DATA_ALIGN(headroom + tailroom);
1708		if (unlikely(len > truesize - room)) {
1709			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1710				 dev->name, len, (unsigned long)(truesize - room));
1711			DEV_STATS_INC(dev, rx_length_errors);
1712			goto err_skb;
1713		}
1714
1715		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1716		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1717			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1718
1719			if (unlikely(!nskb))
1720				goto err_skb;
1721			if (curr_skb == head_skb)
1722				skb_shinfo(curr_skb)->frag_list = nskb;
1723			else
1724				curr_skb->next = nskb;
1725			curr_skb = nskb;
1726			head_skb->truesize += nskb->truesize;
1727			num_skb_frags = 0;
1728		}
1729		if (curr_skb != head_skb) {
1730			head_skb->data_len += len;
1731			head_skb->len += len;
1732			head_skb->truesize += truesize;
1733		}
1734		offset = buf - page_address(page);
1735		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1736			put_page(page);
1737			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1738					     len, truesize);
1739		} else {
1740			skb_add_rx_frag(curr_skb, num_skb_frags, page,
1741					offset, len, truesize);
1742		}
1743	}
1744
1745	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1746	return head_skb;
1747
1748err_skb:
1749	put_page(page);
1750	mergeable_buf_free(rq, num_buf, dev, stats);
1751
1752err_buf:
1753	u64_stats_inc(&stats->drops);
1754	dev_kfree_skb(head_skb);
1755	return NULL;
1756}
1757
1758static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1759				struct sk_buff *skb)
1760{
1761	enum pkt_hash_types rss_hash_type;
1762
1763	if (!hdr_hash || !skb)
1764		return;
1765
1766	switch (__le16_to_cpu(hdr_hash->hash_report)) {
1767	case VIRTIO_NET_HASH_REPORT_TCPv4:
1768	case VIRTIO_NET_HASH_REPORT_UDPv4:
1769	case VIRTIO_NET_HASH_REPORT_TCPv6:
1770	case VIRTIO_NET_HASH_REPORT_UDPv6:
1771	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1772	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1773		rss_hash_type = PKT_HASH_TYPE_L4;
1774		break;
1775	case VIRTIO_NET_HASH_REPORT_IPv4:
1776	case VIRTIO_NET_HASH_REPORT_IPv6:
1777	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1778		rss_hash_type = PKT_HASH_TYPE_L3;
1779		break;
1780	case VIRTIO_NET_HASH_REPORT_NONE:
1781	default:
1782		rss_hash_type = PKT_HASH_TYPE_NONE;
1783	}
1784	skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1785}
1786
1787static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1788			void *buf, unsigned int len, void **ctx,
1789			unsigned int *xdp_xmit,
1790			struct virtnet_rq_stats *stats)
1791{
1792	struct net_device *dev = vi->dev;
1793	struct sk_buff *skb;
1794	struct virtio_net_common_hdr *hdr;
1795
1796	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1797		pr_debug("%s: short packet %i\n", dev->name, len);
1798		DEV_STATS_INC(dev, rx_length_errors);
1799		virtnet_rq_free_buf(vi, rq, buf);
1800		return;
1801	}
1802
1803	if (vi->mergeable_rx_bufs)
1804		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1805					stats);
1806	else if (vi->big_packets)
1807		skb = receive_big(dev, vi, rq, buf, len, stats);
1808	else
1809		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1810
1811	if (unlikely(!skb))
1812		return;
1813
1814	hdr = skb_vnet_common_hdr(skb);
1815	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1816		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1817
1818	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1819		skb->ip_summed = CHECKSUM_UNNECESSARY;
1820
1821	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1822				  virtio_is_little_endian(vi->vdev))) {
1823		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1824				     dev->name, hdr->hdr.gso_type,
1825				     hdr->hdr.gso_size);
1826		goto frame_err;
1827	}
1828
1829	skb_record_rx_queue(skb, vq2rxq(rq->vq));
1830	skb->protocol = eth_type_trans(skb, dev);
1831	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1832		 ntohs(skb->protocol), skb->len, skb->pkt_type);
1833
1834	napi_gro_receive(&rq->napi, skb);
1835	return;
1836
1837frame_err:
1838	DEV_STATS_INC(dev, rx_frame_errors);
1839	dev_kfree_skb(skb);
1840}
1841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1842/* Unlike mergeable buffers, all buffers are allocated to the
1843 * same size, except for the headroom. For this reason we do
1844 * not need to use  mergeable_len_to_ctx here - it is enough
1845 * to store the headroom as the context ignoring the truesize.
1846 */
1847static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1848			     gfp_t gfp)
1849{
1850	char *buf;
1851	unsigned int xdp_headroom = virtnet_get_headroom(vi);
1852	void *ctx = (void *)(unsigned long)xdp_headroom;
1853	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1854	int err;
1855
1856	len = SKB_DATA_ALIGN(len) +
1857	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1858
 
 
 
1859	buf = virtnet_rq_alloc(rq, len, gfp);
1860	if (unlikely(!buf))
1861		return -ENOMEM;
1862
1863	virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1864			       vi->hdr_len + GOOD_PACKET_LEN);
1865
1866	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
 
 
1867	if (err < 0) {
1868		if (rq->do_dma)
1869			virtnet_rq_unmap(rq, buf, 0);
1870		put_page(virt_to_head_page(buf));
1871	}
1872
1873	return err;
1874}
1875
1876static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1877			   gfp_t gfp)
1878{
1879	struct page *first, *list = NULL;
1880	char *p;
1881	int i, err, offset;
1882
1883	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1884
1885	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1886	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1887		first = get_a_page(rq, gfp);
1888		if (!first) {
1889			if (list)
1890				give_pages(rq, list);
1891			return -ENOMEM;
1892		}
1893		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1894
1895		/* chain new page in list head to match sg */
1896		first->private = (unsigned long)list;
1897		list = first;
1898	}
1899
1900	first = get_a_page(rq, gfp);
1901	if (!first) {
1902		give_pages(rq, list);
1903		return -ENOMEM;
1904	}
1905	p = page_address(first);
1906
1907	/* rq->sg[0], rq->sg[1] share the same page */
1908	/* a separated rq->sg[0] for header - required in case !any_header_sg */
1909	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1910
1911	/* rq->sg[1] for data packet, from offset */
1912	offset = sizeof(struct padded_vnet_hdr);
1913	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1914
1915	/* chain first in list head */
1916	first->private = (unsigned long)list;
1917	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1918				  first, gfp);
1919	if (err < 0)
1920		give_pages(rq, first);
1921
1922	return err;
1923}
1924
1925static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1926					  struct ewma_pkt_len *avg_pkt_len,
1927					  unsigned int room)
1928{
1929	struct virtnet_info *vi = rq->vq->vdev->priv;
1930	const size_t hdr_len = vi->hdr_len;
1931	unsigned int len;
1932
1933	if (room)
1934		return PAGE_SIZE - room;
1935
1936	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1937				rq->min_buf_len, PAGE_SIZE - hdr_len);
1938
1939	return ALIGN(len, L1_CACHE_BYTES);
1940}
1941
1942static int add_recvbuf_mergeable(struct virtnet_info *vi,
1943				 struct receive_queue *rq, gfp_t gfp)
1944{
1945	struct page_frag *alloc_frag = &rq->alloc_frag;
1946	unsigned int headroom = virtnet_get_headroom(vi);
1947	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1948	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1949	unsigned int len, hole;
1950	void *ctx;
1951	char *buf;
1952	int err;
1953
1954	/* Extra tailroom is needed to satisfy XDP's assumption. This
1955	 * means rx frags coalescing won't work, but consider we've
1956	 * disabled GSO for XDP, it won't be a big issue.
1957	 */
1958	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1959
 
 
 
 
 
 
1960	buf = virtnet_rq_alloc(rq, len + room, gfp);
1961	if (unlikely(!buf))
1962		return -ENOMEM;
1963
1964	buf += headroom; /* advance address leaving hole at front of pkt */
1965	hole = alloc_frag->size - alloc_frag->offset;
1966	if (hole < len + room) {
1967		/* To avoid internal fragmentation, if there is very likely not
1968		 * enough space for another buffer, add the remaining space to
1969		 * the current buffer.
1970		 * XDP core assumes that frame_size of xdp_buff and the length
1971		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1972		 */
1973		if (!headroom)
1974			len += hole;
1975		alloc_frag->offset += hole;
1976	}
1977
1978	virtnet_rq_init_one_sg(rq, buf, len);
1979
1980	ctx = mergeable_len_to_ctx(len + room, headroom);
1981	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1982	if (err < 0) {
1983		if (rq->do_dma)
1984			virtnet_rq_unmap(rq, buf, 0);
1985		put_page(virt_to_head_page(buf));
1986	}
1987
1988	return err;
1989}
1990
1991/*
1992 * Returns false if we couldn't fill entirely (OOM).
1993 *
1994 * Normally run in the receive path, but can also be run from ndo_open
1995 * before we're receiving packets, or from refill_work which is
1996 * careful to disable receiving (using napi_disable).
1997 */
1998static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1999			  gfp_t gfp)
2000{
2001	int err;
2002	bool oom;
 
 
 
 
2003
2004	do {
2005		if (vi->mergeable_rx_bufs)
2006			err = add_recvbuf_mergeable(vi, rq, gfp);
2007		else if (vi->big_packets)
2008			err = add_recvbuf_big(vi, rq, gfp);
2009		else
2010			err = add_recvbuf_small(vi, rq, gfp);
2011
2012		oom = err == -ENOMEM;
2013		if (err)
2014			break;
2015	} while (rq->vq->num_free);
 
 
2016	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2017		unsigned long flags;
2018
2019		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2020		u64_stats_inc(&rq->stats.kicks);
2021		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2022	}
2023
2024	return !oom;
2025}
2026
2027static void skb_recv_done(struct virtqueue *rvq)
2028{
2029	struct virtnet_info *vi = rvq->vdev->priv;
2030	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2031
2032	rq->calls++;
2033	virtqueue_napi_schedule(&rq->napi, rvq);
2034}
2035
2036static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2037{
2038	napi_enable(napi);
2039
2040	/* If all buffers were filled by other side before we napi_enabled, we
2041	 * won't get another interrupt, so process any outstanding packets now.
2042	 * Call local_bh_enable after to trigger softIRQ processing.
2043	 */
2044	local_bh_disable();
2045	virtqueue_napi_schedule(napi, vq);
2046	local_bh_enable();
2047}
2048
2049static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2050				   struct virtqueue *vq,
2051				   struct napi_struct *napi)
2052{
2053	if (!napi->weight)
2054		return;
2055
2056	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2057	 * enable the feature if this is likely affine with the transmit path.
2058	 */
2059	if (!vi->affinity_hint_set) {
2060		napi->weight = 0;
2061		return;
2062	}
2063
2064	return virtnet_napi_enable(vq, napi);
2065}
2066
2067static void virtnet_napi_tx_disable(struct napi_struct *napi)
2068{
2069	if (napi->weight)
2070		napi_disable(napi);
2071}
2072
2073static void refill_work(struct work_struct *work)
2074{
2075	struct virtnet_info *vi =
2076		container_of(work, struct virtnet_info, refill.work);
2077	bool still_empty;
2078	int i;
2079
2080	for (i = 0; i < vi->curr_queue_pairs; i++) {
2081		struct receive_queue *rq = &vi->rq[i];
2082
2083		napi_disable(&rq->napi);
2084		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2085		virtnet_napi_enable(rq->vq, &rq->napi);
2086
2087		/* In theory, this can happen: if we don't get any buffers in
2088		 * we will *never* try to fill again.
2089		 */
2090		if (still_empty)
2091			schedule_delayed_work(&vi->refill, HZ/2);
2092	}
2093}
2094
2095static int virtnet_receive(struct receive_queue *rq, int budget,
2096			   unsigned int *xdp_xmit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2097{
2098	struct virtnet_info *vi = rq->vq->vdev->priv;
2099	struct virtnet_rq_stats stats = {};
2100	unsigned int len;
2101	int packets = 0;
2102	void *buf;
2103	int i;
2104
2105	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2106		void *ctx;
2107
2108		while (packets < budget &&
2109		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2110			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2111			packets++;
2112		}
2113	} else {
2114		while (packets < budget &&
2115		       (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2116			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2117			packets++;
2118		}
2119	}
2120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2121	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2122		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2123			spin_lock(&vi->refill_lock);
2124			if (vi->refill_enabled)
2125				schedule_delayed_work(&vi->refill, 0);
2126			spin_unlock(&vi->refill_lock);
2127		}
2128	}
2129
2130	u64_stats_set(&stats.packets, packets);
2131	u64_stats_update_begin(&rq->stats.syncp);
2132	for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2133		size_t offset = virtnet_rq_stats_desc[i].offset;
2134		u64_stats_t *item, *src;
2135
2136		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2137		src = (u64_stats_t *)((u8 *)&stats + offset);
2138		u64_stats_add(item, u64_stats_read(src));
2139	}
 
 
 
 
2140	u64_stats_update_end(&rq->stats.syncp);
2141
2142	return packets;
2143}
2144
2145static void virtnet_poll_cleantx(struct receive_queue *rq)
2146{
2147	struct virtnet_info *vi = rq->vq->vdev->priv;
2148	unsigned int index = vq2rxq(rq->vq);
2149	struct send_queue *sq = &vi->sq[index];
2150	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2151
2152	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2153		return;
2154
2155	if (__netif_tx_trylock(txq)) {
2156		if (sq->reset) {
2157			__netif_tx_unlock(txq);
2158			return;
2159		}
2160
2161		do {
2162			virtqueue_disable_cb(sq->vq);
2163			free_old_xmit_skbs(sq, true);
2164		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2165
2166		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
 
 
 
 
 
2167			netif_tx_wake_queue(txq);
 
2168
2169		__netif_tx_unlock(txq);
2170	}
2171}
2172
2173static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2174{
2175	struct dim_sample cur_sample = {};
2176
2177	if (!rq->packets_in_napi)
2178		return;
2179
2180	u64_stats_update_begin(&rq->stats.syncp);
 
 
2181	dim_update_sample(rq->calls,
2182			  u64_stats_read(&rq->stats.packets),
2183			  u64_stats_read(&rq->stats.bytes),
2184			  &cur_sample);
2185	u64_stats_update_end(&rq->stats.syncp);
2186
2187	net_dim(&rq->dim, cur_sample);
2188	rq->packets_in_napi = 0;
2189}
2190
2191static int virtnet_poll(struct napi_struct *napi, int budget)
2192{
2193	struct receive_queue *rq =
2194		container_of(napi, struct receive_queue, napi);
2195	struct virtnet_info *vi = rq->vq->vdev->priv;
2196	struct send_queue *sq;
2197	unsigned int received;
2198	unsigned int xdp_xmit = 0;
2199	bool napi_complete;
2200
2201	virtnet_poll_cleantx(rq);
2202
2203	received = virtnet_receive(rq, budget, &xdp_xmit);
2204	rq->packets_in_napi += received;
2205
2206	if (xdp_xmit & VIRTIO_XDP_REDIR)
2207		xdp_do_flush();
2208
2209	/* Out of packets? */
2210	if (received < budget) {
2211		napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
 
 
 
 
2212		if (napi_complete && rq->dim_enabled)
2213			virtnet_rx_dim_update(vi, rq);
2214	}
2215
2216	if (xdp_xmit & VIRTIO_XDP_TX) {
2217		sq = virtnet_xdp_get_sq(vi);
2218		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2219			u64_stats_update_begin(&sq->stats.syncp);
2220			u64_stats_inc(&sq->stats.kicks);
2221			u64_stats_update_end(&sq->stats.syncp);
2222		}
2223		virtnet_xdp_put_sq(vi, sq);
2224	}
2225
2226	return received;
2227}
2228
2229static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2230{
2231	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2232	napi_disable(&vi->rq[qp_index].napi);
2233	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2234}
2235
2236static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2237{
2238	struct net_device *dev = vi->dev;
2239	int err;
2240
2241	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2242			       vi->rq[qp_index].napi.napi_id);
2243	if (err < 0)
2244		return err;
2245
2246	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2247					 MEM_TYPE_PAGE_SHARED, NULL);
2248	if (err < 0)
2249		goto err_xdp_reg_mem_model;
2250
2251	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2252	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2253
2254	return 0;
2255
2256err_xdp_reg_mem_model:
2257	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2258	return err;
2259}
2260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261static int virtnet_open(struct net_device *dev)
2262{
2263	struct virtnet_info *vi = netdev_priv(dev);
2264	int i, err;
2265
2266	enable_delayed_refill(vi);
2267
2268	for (i = 0; i < vi->max_queue_pairs; i++) {
2269		if (i < vi->curr_queue_pairs)
2270			/* Make sure we have some buffers: if oom use wq. */
2271			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2272				schedule_delayed_work(&vi->refill, 0);
2273
2274		err = virtnet_enable_queue_pair(vi, i);
2275		if (err < 0)
2276			goto err_enable_qp;
2277	}
2278
 
 
 
 
 
 
 
 
 
2279	return 0;
2280
2281err_enable_qp:
2282	disable_delayed_refill(vi);
2283	cancel_delayed_work_sync(&vi->refill);
2284
2285	for (i--; i >= 0; i--) {
2286		virtnet_disable_queue_pair(vi, i);
2287		cancel_work_sync(&vi->rq[i].dim.work);
2288	}
2289
2290	return err;
2291}
2292
2293static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2294{
2295	struct send_queue *sq = container_of(napi, struct send_queue, napi);
2296	struct virtnet_info *vi = sq->vq->vdev->priv;
2297	unsigned int index = vq2txq(sq->vq);
2298	struct netdev_queue *txq;
2299	int opaque;
2300	bool done;
2301
2302	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2303		/* We don't need to enable cb for XDP */
2304		napi_complete_done(napi, 0);
2305		return 0;
2306	}
2307
2308	txq = netdev_get_tx_queue(vi->dev, index);
2309	__netif_tx_lock(txq, raw_smp_processor_id());
2310	virtqueue_disable_cb(sq->vq);
2311	free_old_xmit_skbs(sq, true);
2312
2313	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
 
 
 
 
 
 
 
 
 
 
2314		netif_tx_wake_queue(txq);
 
 
 
 
 
 
2315
2316	opaque = virtqueue_enable_cb_prepare(sq->vq);
2317
2318	done = napi_complete_done(napi, 0);
2319
2320	if (!done)
2321		virtqueue_disable_cb(sq->vq);
2322
2323	__netif_tx_unlock(txq);
2324
2325	if (done) {
2326		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2327			if (napi_schedule_prep(napi)) {
2328				__netif_tx_lock(txq, raw_smp_processor_id());
2329				virtqueue_disable_cb(sq->vq);
2330				__netif_tx_unlock(txq);
2331				__napi_schedule(napi);
2332			}
2333		}
2334	}
2335
2336	return 0;
2337}
2338
2339static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2340{
2341	struct virtio_net_hdr_mrg_rxbuf *hdr;
2342	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2343	struct virtnet_info *vi = sq->vq->vdev->priv;
2344	int num_sg;
2345	unsigned hdr_len = vi->hdr_len;
2346	bool can_push;
2347
2348	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2349
2350	can_push = vi->any_header_sg &&
2351		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2352		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2353	/* Even if we can, don't push here yet as this would skew
2354	 * csum_start offset below. */
2355	if (can_push)
2356		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2357	else
2358		hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2359
2360	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2361				    virtio_is_little_endian(vi->vdev), false,
2362				    0))
2363		return -EPROTO;
2364
2365	if (vi->mergeable_rx_bufs)
2366		hdr->num_buffers = 0;
2367
2368	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2369	if (can_push) {
2370		__skb_push(skb, hdr_len);
2371		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2372		if (unlikely(num_sg < 0))
2373			return num_sg;
2374		/* Pull header back to avoid skew in tx bytes calculations. */
2375		__skb_pull(skb, hdr_len);
2376	} else {
2377		sg_set_buf(sq->sg, hdr, hdr_len);
2378		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2379		if (unlikely(num_sg < 0))
2380			return num_sg;
2381		num_sg++;
2382	}
2383	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
 
 
2384}
2385
2386static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2387{
2388	struct virtnet_info *vi = netdev_priv(dev);
2389	int qnum = skb_get_queue_mapping(skb);
2390	struct send_queue *sq = &vi->sq[qnum];
2391	int err;
2392	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2393	bool kick = !netdev_xmit_more();
2394	bool use_napi = sq->napi.weight;
 
2395
2396	/* Free up any pending old buffers before queueing new ones. */
2397	do {
2398		if (use_napi)
2399			virtqueue_disable_cb(sq->vq);
2400
2401		free_old_xmit_skbs(sq, false);
2402
2403	} while (use_napi && kick &&
2404	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2405
2406	/* timestamp packet in software */
2407	skb_tx_timestamp(skb);
2408
2409	/* Try to transmit */
2410	err = xmit_skb(sq, skb);
2411
2412	/* This should not happen! */
2413	if (unlikely(err)) {
2414		DEV_STATS_INC(dev, tx_fifo_errors);
2415		if (net_ratelimit())
2416			dev_warn(&dev->dev,
2417				 "Unexpected TXQ (%d) queue failure: %d\n",
2418				 qnum, err);
2419		DEV_STATS_INC(dev, tx_dropped);
2420		dev_kfree_skb_any(skb);
2421		return NETDEV_TX_OK;
2422	}
2423
2424	/* Don't wait up for transmitted skbs to be freed. */
2425	if (!use_napi) {
2426		skb_orphan(skb);
2427		nf_reset_ct(skb);
2428	}
2429
2430	check_sq_full_and_disable(vi, dev, sq);
2431
2432	if (kick || netif_xmit_stopped(txq)) {
 
 
2433		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2434			u64_stats_update_begin(&sq->stats.syncp);
2435			u64_stats_inc(&sq->stats.kicks);
2436			u64_stats_update_end(&sq->stats.syncp);
2437		}
2438	}
2439
2440	return NETDEV_TX_OK;
2441}
2442
2443static int virtnet_rx_resize(struct virtnet_info *vi,
2444			     struct receive_queue *rq, u32 ring_num)
2445{
2446	bool running = netif_running(vi->dev);
2447	int err, qindex;
2448
2449	qindex = rq - vi->rq;
2450
2451	if (running) {
2452		napi_disable(&rq->napi);
2453		cancel_work_sync(&rq->dim.work);
2454	}
 
2455
2456	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2457	if (err)
2458		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2459
2460	if (!try_fill_recv(vi, rq, GFP_KERNEL))
2461		schedule_delayed_work(&vi->refill, 0);
2462
2463	if (running)
2464		virtnet_napi_enable(rq->vq, &rq->napi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2465	return err;
2466}
2467
2468static int virtnet_tx_resize(struct virtnet_info *vi,
2469			     struct send_queue *sq, u32 ring_num)
2470{
2471	bool running = netif_running(vi->dev);
2472	struct netdev_queue *txq;
2473	int err, qindex;
2474
2475	qindex = sq - vi->sq;
2476
2477	if (running)
2478		virtnet_napi_tx_disable(&sq->napi);
2479
2480	txq = netdev_get_tx_queue(vi->dev, qindex);
2481
2482	/* 1. wait all ximt complete
2483	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2484	 */
2485	__netif_tx_lock_bh(txq);
2486
2487	/* Prevent rx poll from accessing sq. */
2488	sq->reset = true;
2489
2490	/* Prevent the upper layer from trying to send packets. */
2491	netif_stop_subqueue(vi->dev, qindex);
2492
2493	__netif_tx_unlock_bh(txq);
 
2494
2495	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2496	if (err)
2497		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
 
 
 
 
 
 
2498
2499	__netif_tx_lock_bh(txq);
2500	sq->reset = false;
2501	netif_tx_wake_queue(txq);
2502	__netif_tx_unlock_bh(txq);
2503
2504	if (running)
2505		virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2506	return err;
2507}
2508
2509/*
2510 * Send command via the control virtqueue and check status.  Commands
2511 * supported by the hypervisor, as indicated by feature bits, should
2512 * never fail unless improperly formatted.
2513 */
2514static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2515				 struct scatterlist *out)
2516{
2517	struct scatterlist *sgs[4], hdr, stat;
2518	unsigned out_num = 0, tmp;
 
 
2519	int ret;
2520
2521	/* Caller should know better */
2522	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2523
 
2524	vi->ctrl->status = ~0;
2525	vi->ctrl->hdr.class = class;
2526	vi->ctrl->hdr.cmd = cmd;
2527	/* Add header */
2528	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2529	sgs[out_num++] = &hdr;
2530
2531	if (out)
2532		sgs[out_num++] = out;
2533
2534	/* Add return status. */
2535	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2536	sgs[out_num] = &stat;
 
 
 
2537
2538	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2539	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2540	if (ret < 0) {
2541		dev_warn(&vi->vdev->dev,
2542			 "Failed to add sgs for command vq: %d\n.", ret);
 
2543		return false;
2544	}
2545
2546	if (unlikely(!virtqueue_kick(vi->cvq)))
2547		return vi->ctrl->status == VIRTIO_NET_OK;
2548
2549	/* Spin for a response, the kick causes an ioport write, trapping
2550	 * into the hypervisor, so the request should be handled immediately.
2551	 */
2552	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553	       !virtqueue_is_broken(vi->cvq))
 
2554		cpu_relax();
 
2555
2556	return vi->ctrl->status == VIRTIO_NET_OK;
 
 
 
 
 
 
 
 
 
2557}
2558
2559static int virtnet_set_mac_address(struct net_device *dev, void *p)
2560{
2561	struct virtnet_info *vi = netdev_priv(dev);
2562	struct virtio_device *vdev = vi->vdev;
2563	int ret;
2564	struct sockaddr *addr;
2565	struct scatterlist sg;
2566
2567	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2568		return -EOPNOTSUPP;
2569
2570	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2571	if (!addr)
2572		return -ENOMEM;
2573
2574	ret = eth_prepare_mac_addr_change(dev, addr);
2575	if (ret)
2576		goto out;
2577
2578	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2579		sg_init_one(&sg, addr->sa_data, dev->addr_len);
2580		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2581					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2582			dev_warn(&vdev->dev,
2583				 "Failed to set mac address by vq command.\n");
2584			ret = -EINVAL;
2585			goto out;
2586		}
2587	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2588		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589		unsigned int i;
2590
2591		/* Naturally, this has an atomicity problem. */
2592		for (i = 0; i < dev->addr_len; i++)
2593			virtio_cwrite8(vdev,
2594				       offsetof(struct virtio_net_config, mac) +
2595				       i, addr->sa_data[i]);
2596	}
2597
2598	eth_commit_mac_addr_change(dev, p);
2599	ret = 0;
2600
2601out:
2602	kfree(addr);
2603	return ret;
2604}
2605
2606static void virtnet_stats(struct net_device *dev,
2607			  struct rtnl_link_stats64 *tot)
2608{
2609	struct virtnet_info *vi = netdev_priv(dev);
2610	unsigned int start;
2611	int i;
2612
2613	for (i = 0; i < vi->max_queue_pairs; i++) {
2614		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2615		struct receive_queue *rq = &vi->rq[i];
2616		struct send_queue *sq = &vi->sq[i];
2617
2618		do {
2619			start = u64_stats_fetch_begin(&sq->stats.syncp);
2620			tpackets = u64_stats_read(&sq->stats.packets);
2621			tbytes   = u64_stats_read(&sq->stats.bytes);
2622			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
2623		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2624
2625		do {
2626			start = u64_stats_fetch_begin(&rq->stats.syncp);
2627			rpackets = u64_stats_read(&rq->stats.packets);
2628			rbytes   = u64_stats_read(&rq->stats.bytes);
2629			rdrops   = u64_stats_read(&rq->stats.drops);
2630		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2631
2632		tot->rx_packets += rpackets;
2633		tot->tx_packets += tpackets;
2634		tot->rx_bytes   += rbytes;
2635		tot->tx_bytes   += tbytes;
2636		tot->rx_dropped += rdrops;
2637		tot->tx_errors  += terrors;
2638	}
2639
2640	tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2641	tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2642	tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2643	tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
2644}
2645
2646static void virtnet_ack_link_announce(struct virtnet_info *vi)
2647{
2648	rtnl_lock();
2649	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2650				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2651		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2652	rtnl_unlock();
2653}
2654
2655static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2656{
2657	struct scatterlist sg;
 
2658	struct net_device *dev = vi->dev;
 
2659
2660	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2661		return 0;
2662
2663	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2664	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2665
2666	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2667				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2668		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2669			 queue_pairs);
2670		return -EINVAL;
2671	} else {
2672		vi->curr_queue_pairs = queue_pairs;
2673		/* virtnet_open() will refill when device is going to up. */
2674		if (dev->flags & IFF_UP)
2675			schedule_delayed_work(&vi->refill, 0);
2676	}
 
 
 
 
 
2677
2678	return 0;
2679}
2680
2681static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2682{
2683	int err;
2684
2685	rtnl_lock();
2686	err = _virtnet_set_queues(vi, queue_pairs);
2687	rtnl_unlock();
2688	return err;
2689}
2690
2691static int virtnet_close(struct net_device *dev)
2692{
2693	struct virtnet_info *vi = netdev_priv(dev);
2694	int i;
2695
2696	/* Make sure NAPI doesn't schedule refill work */
2697	disable_delayed_refill(vi);
2698	/* Make sure refill_work doesn't re-enable napi! */
2699	cancel_delayed_work_sync(&vi->refill);
 
 
 
 
 
 
 
 
2700
2701	for (i = 0; i < vi->max_queue_pairs; i++) {
2702		virtnet_disable_queue_pair(vi, i);
2703		cancel_work_sync(&vi->rq[i].dim.work);
2704	}
2705
 
 
2706	return 0;
2707}
2708
2709static void virtnet_set_rx_mode(struct net_device *dev)
2710{
2711	struct virtnet_info *vi = netdev_priv(dev);
 
 
 
2712	struct scatterlist sg[2];
2713	struct virtio_net_ctrl_mac *mac_data;
2714	struct netdev_hw_addr *ha;
2715	int uc_count;
2716	int mc_count;
2717	void *buf;
2718	int i;
2719
2720	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2721	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2722		return;
2723
2724	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2725	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
 
 
 
 
 
2726
2727	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
 
2728
2729	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2730				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
2731		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2732			 vi->ctrl->promisc ? "en" : "dis");
2733
2734	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
 
2735
2736	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2737				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2738		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2739			 vi->ctrl->allmulti ? "en" : "dis");
 
 
2740
2741	uc_count = netdev_uc_count(dev);
2742	mc_count = netdev_mc_count(dev);
2743	/* MAC filter - use one buffer for both lists */
2744	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2745		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2746	mac_data = buf;
2747	if (!buf)
 
 
2748		return;
 
2749
2750	sg_init_table(sg, 2);
2751
2752	/* Store the unicast list and count in the front of the buffer */
2753	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2754	i = 0;
2755	netdev_for_each_uc_addr(ha, dev)
2756		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2757
2758	sg_set_buf(&sg[0], mac_data,
2759		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2760
2761	/* multicast list and count fill the end */
2762	mac_data = (void *)&mac_data->macs[uc_count][0];
2763
2764	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2765	i = 0;
2766	netdev_for_each_mc_addr(ha, dev)
2767		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2768
 
 
2769	sg_set_buf(&sg[1], mac_data,
2770		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2771
2772	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2773				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2774		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2775
 
 
2776	kfree(buf);
2777}
2778
 
 
 
 
 
 
 
 
2779static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2780				   __be16 proto, u16 vid)
2781{
2782	struct virtnet_info *vi = netdev_priv(dev);
 
2783	struct scatterlist sg;
2784
2785	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2786	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 
 
 
2787
2788	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2789				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2790		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2791	return 0;
2792}
2793
2794static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2795				    __be16 proto, u16 vid)
2796{
2797	struct virtnet_info *vi = netdev_priv(dev);
 
2798	struct scatterlist sg;
2799
2800	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2801	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
 
 
 
 
2802
2803	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2804				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2805		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2806	return 0;
2807}
2808
2809static void virtnet_clean_affinity(struct virtnet_info *vi)
2810{
2811	int i;
2812
2813	if (vi->affinity_hint_set) {
2814		for (i = 0; i < vi->max_queue_pairs; i++) {
2815			virtqueue_set_affinity(vi->rq[i].vq, NULL);
2816			virtqueue_set_affinity(vi->sq[i].vq, NULL);
2817		}
2818
2819		vi->affinity_hint_set = false;
2820	}
2821}
2822
2823static void virtnet_set_affinity(struct virtnet_info *vi)
2824{
2825	cpumask_var_t mask;
2826	int stragglers;
2827	int group_size;
2828	int i, j, cpu;
2829	int num_cpu;
2830	int stride;
2831
2832	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2833		virtnet_clean_affinity(vi);
2834		return;
2835	}
2836
2837	num_cpu = num_online_cpus();
2838	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2839	stragglers = num_cpu >= vi->curr_queue_pairs ?
2840			num_cpu % vi->curr_queue_pairs :
2841			0;
2842	cpu = cpumask_first(cpu_online_mask);
2843
2844	for (i = 0; i < vi->curr_queue_pairs; i++) {
2845		group_size = stride + (i < stragglers ? 1 : 0);
2846
2847		for (j = 0; j < group_size; j++) {
2848			cpumask_set_cpu(cpu, mask);
2849			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2850						nr_cpu_ids, false);
2851		}
2852		virtqueue_set_affinity(vi->rq[i].vq, mask);
2853		virtqueue_set_affinity(vi->sq[i].vq, mask);
2854		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2855		cpumask_clear(mask);
2856	}
2857
2858	vi->affinity_hint_set = true;
2859	free_cpumask_var(mask);
2860}
2861
2862static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2863{
2864	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2865						   node);
2866	virtnet_set_affinity(vi);
2867	return 0;
2868}
2869
2870static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2871{
2872	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2873						   node_dead);
2874	virtnet_set_affinity(vi);
2875	return 0;
2876}
2877
2878static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2879{
2880	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2881						   node);
2882
2883	virtnet_clean_affinity(vi);
2884	return 0;
2885}
2886
2887static enum cpuhp_state virtionet_online;
2888
2889static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2890{
2891	int ret;
2892
2893	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2894	if (ret)
2895		return ret;
2896	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2897					       &vi->node_dead);
2898	if (!ret)
2899		return ret;
2900	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2901	return ret;
2902}
2903
2904static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2905{
2906	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2907	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2908					    &vi->node_dead);
2909}
2910
2911static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2912					 u16 vqn, u32 max_usecs, u32 max_packets)
2913{
 
2914	struct scatterlist sgs;
2915
2916	vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
2917	vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
2918	vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
2919	sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
 
 
 
 
2920
2921	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2922				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
2923				  &sgs))
2924		return -EINVAL;
2925
2926	return 0;
2927}
2928
2929static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2930					    u16 queue, u32 max_usecs,
2931					    u32 max_packets)
2932{
2933	int err;
2934
 
 
 
2935	err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
2936					    max_usecs, max_packets);
2937	if (err)
2938		return err;
2939
2940	vi->rq[queue].intr_coal.max_usecs = max_usecs;
2941	vi->rq[queue].intr_coal.max_packets = max_packets;
2942
2943	return 0;
2944}
2945
2946static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2947					    u16 queue, u32 max_usecs,
2948					    u32 max_packets)
2949{
2950	int err;
2951
 
 
 
2952	err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
2953					    max_usecs, max_packets);
2954	if (err)
2955		return err;
2956
2957	vi->sq[queue].intr_coal.max_usecs = max_usecs;
2958	vi->sq[queue].intr_coal.max_packets = max_packets;
2959
2960	return 0;
2961}
2962
2963static void virtnet_get_ringparam(struct net_device *dev,
2964				  struct ethtool_ringparam *ring,
2965				  struct kernel_ethtool_ringparam *kernel_ring,
2966				  struct netlink_ext_ack *extack)
2967{
2968	struct virtnet_info *vi = netdev_priv(dev);
2969
2970	ring->rx_max_pending = vi->rq[0].vq->num_max;
2971	ring->tx_max_pending = vi->sq[0].vq->num_max;
2972	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2973	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2974}
2975
2976static int virtnet_set_ringparam(struct net_device *dev,
2977				 struct ethtool_ringparam *ring,
2978				 struct kernel_ethtool_ringparam *kernel_ring,
2979				 struct netlink_ext_ack *extack)
2980{
2981	struct virtnet_info *vi = netdev_priv(dev);
2982	u32 rx_pending, tx_pending;
2983	struct receive_queue *rq;
2984	struct send_queue *sq;
2985	int i, err;
2986
2987	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2988		return -EINVAL;
2989
2990	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2991	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2992
2993	if (ring->rx_pending == rx_pending &&
2994	    ring->tx_pending == tx_pending)
2995		return 0;
2996
2997	if (ring->rx_pending > vi->rq[0].vq->num_max)
2998		return -EINVAL;
2999
3000	if (ring->tx_pending > vi->sq[0].vq->num_max)
3001		return -EINVAL;
3002
3003	for (i = 0; i < vi->max_queue_pairs; i++) {
3004		rq = vi->rq + i;
3005		sq = vi->sq + i;
3006
3007		if (ring->tx_pending != tx_pending) {
3008			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3009			if (err)
3010				return err;
3011
3012			/* Upon disabling and re-enabling a transmit virtqueue, the device must
3013			 * set the coalescing parameters of the virtqueue to those configured
3014			 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3015			 * did not set any TX coalescing parameters, to 0.
3016			 */
3017			err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3018							       vi->intr_coal_tx.max_usecs,
3019							       vi->intr_coal_tx.max_packets);
3020			if (err)
 
 
 
 
3021				return err;
3022		}
3023
3024		if (ring->rx_pending != rx_pending) {
3025			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3026			if (err)
3027				return err;
3028
3029			/* The reason is same as the transmit virtqueue reset */
 
3030			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3031							       vi->intr_coal_rx.max_usecs,
3032							       vi->intr_coal_rx.max_packets);
3033			if (err)
 
3034				return err;
3035		}
3036	}
3037
3038	return 0;
3039}
3040
3041static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3042{
3043	struct net_device *dev = vi->dev;
3044	struct scatterlist sgs[4];
3045	unsigned int sg_buf_size;
3046
3047	/* prepare sgs */
3048	sg_init_table(sgs, 4);
3049
3050	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3051	sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
3052
3053	sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
3054	sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
 
 
 
 
3055
3056	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3057			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3058	sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
3059
3060	sg_buf_size = vi->rss_key_size;
3061	sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
3062
3063	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3064				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3065				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
3066		dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3067		return false;
3068	}
3069	return true;
 
 
 
 
 
3070}
3071
3072static void virtnet_init_default_rss(struct virtnet_info *vi)
3073{
3074	u32 indir_val = 0;
3075	int i = 0;
3076
3077	vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3078	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3079	vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3080						? vi->rss_indir_table_size - 1 : 0;
3081	vi->ctrl->rss.unclassified_queue = 0;
3082
3083	for (; i < vi->rss_indir_table_size; ++i) {
3084		indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3085		vi->ctrl->rss.indirection_table[i] = indir_val;
3086	}
3087
3088	vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3089	vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3090
3091	netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3092}
3093
3094static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3095{
3096	info->data = 0;
3097	switch (info->flow_type) {
3098	case TCP_V4_FLOW:
3099		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3100			info->data = RXH_IP_SRC | RXH_IP_DST |
3101						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3102		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3103			info->data = RXH_IP_SRC | RXH_IP_DST;
3104		}
3105		break;
3106	case TCP_V6_FLOW:
3107		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3108			info->data = RXH_IP_SRC | RXH_IP_DST |
3109						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3110		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3111			info->data = RXH_IP_SRC | RXH_IP_DST;
3112		}
3113		break;
3114	case UDP_V4_FLOW:
3115		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3116			info->data = RXH_IP_SRC | RXH_IP_DST |
3117						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3118		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3119			info->data = RXH_IP_SRC | RXH_IP_DST;
3120		}
3121		break;
3122	case UDP_V6_FLOW:
3123		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3124			info->data = RXH_IP_SRC | RXH_IP_DST |
3125						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3126		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3127			info->data = RXH_IP_SRC | RXH_IP_DST;
3128		}
3129		break;
3130	case IPV4_FLOW:
3131		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3132			info->data = RXH_IP_SRC | RXH_IP_DST;
3133
3134		break;
3135	case IPV6_FLOW:
3136		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3137			info->data = RXH_IP_SRC | RXH_IP_DST;
3138
3139		break;
3140	default:
3141		info->data = 0;
3142		break;
3143	}
3144}
3145
3146static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3147{
3148	u32 new_hashtypes = vi->rss_hash_types_saved;
3149	bool is_disable = info->data & RXH_DISCARD;
3150	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3151
3152	/* supports only 'sd', 'sdfn' and 'r' */
3153	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3154		return false;
3155
3156	switch (info->flow_type) {
3157	case TCP_V4_FLOW:
3158		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3159		if (!is_disable)
3160			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3161				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3162		break;
3163	case UDP_V4_FLOW:
3164		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3165		if (!is_disable)
3166			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3167				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3168		break;
3169	case IPV4_FLOW:
3170		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3171		if (!is_disable)
3172			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3173		break;
3174	case TCP_V6_FLOW:
3175		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3176		if (!is_disable)
3177			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3178				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3179		break;
3180	case UDP_V6_FLOW:
3181		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3182		if (!is_disable)
3183			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3184				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3185		break;
3186	case IPV6_FLOW:
3187		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3188		if (!is_disable)
3189			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3190		break;
3191	default:
3192		/* unsupported flow */
3193		return false;
3194	}
3195
3196	/* if unsupported hashtype was set */
3197	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3198		return false;
3199
3200	if (new_hashtypes != vi->rss_hash_types_saved) {
3201		vi->rss_hash_types_saved = new_hashtypes;
3202		vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3203		if (vi->dev->features & NETIF_F_RXHASH)
3204			return virtnet_commit_rss_command(vi);
3205	}
3206
3207	return true;
3208}
3209
3210static void virtnet_get_drvinfo(struct net_device *dev,
3211				struct ethtool_drvinfo *info)
3212{
3213	struct virtnet_info *vi = netdev_priv(dev);
3214	struct virtio_device *vdev = vi->vdev;
3215
3216	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3217	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3218	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3219
3220}
3221
3222/* TODO: Eliminate OOO packets during switching */
3223static int virtnet_set_channels(struct net_device *dev,
3224				struct ethtool_channels *channels)
3225{
3226	struct virtnet_info *vi = netdev_priv(dev);
3227	u16 queue_pairs = channels->combined_count;
3228	int err;
3229
3230	/* We don't support separate rx/tx channels.
3231	 * We don't allow setting 'other' channels.
3232	 */
3233	if (channels->rx_count || channels->tx_count || channels->other_count)
3234		return -EINVAL;
3235
3236	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3237		return -EINVAL;
3238
3239	/* For now we don't support modifying channels while XDP is loaded
3240	 * also when XDP is loaded all RX queues have XDP programs so we only
3241	 * need to check a single RX queue.
3242	 */
3243	if (vi->rq[0].xdp_prog)
3244		return -EINVAL;
3245
3246	cpus_read_lock();
3247	err = _virtnet_set_queues(vi, queue_pairs);
3248	if (err) {
3249		cpus_read_unlock();
3250		goto err;
3251	}
3252	virtnet_set_affinity(vi);
3253	cpus_read_unlock();
3254
3255	netif_set_real_num_tx_queues(dev, queue_pairs);
3256	netif_set_real_num_rx_queues(dev, queue_pairs);
3257 err:
3258	return err;
3259}
3260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3261static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3262{
3263	struct virtnet_info *vi = netdev_priv(dev);
3264	unsigned int i, j;
3265	u8 *p = data;
3266
3267	switch (stringset) {
3268	case ETH_SS_STATS:
3269		for (i = 0; i < vi->curr_queue_pairs; i++) {
3270			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3271				ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3272						virtnet_rq_stats_desc[j].desc);
3273		}
3274
3275		for (i = 0; i < vi->curr_queue_pairs; i++) {
3276			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3277				ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3278						virtnet_sq_stats_desc[j].desc);
3279		}
 
 
3280		break;
3281	}
3282}
3283
3284static int virtnet_get_sset_count(struct net_device *dev, int sset)
3285{
3286	struct virtnet_info *vi = netdev_priv(dev);
 
 
3287
3288	switch (sset) {
3289	case ETH_SS_STATS:
3290		return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3291					       VIRTNET_SQ_STATS_LEN);
 
 
 
 
3292	default:
3293		return -EOPNOTSUPP;
3294	}
3295}
3296
3297static void virtnet_get_ethtool_stats(struct net_device *dev,
3298				      struct ethtool_stats *stats, u64 *data)
3299{
3300	struct virtnet_info *vi = netdev_priv(dev);
3301	unsigned int idx = 0, start, i, j;
 
3302	const u8 *stats_base;
3303	const u64_stats_t *p;
3304	size_t offset;
 
 
3305
3306	for (i = 0; i < vi->curr_queue_pairs; i++) {
3307		struct receive_queue *rq = &vi->rq[i];
 
3308
3309		stats_base = (const u8 *)&rq->stats;
3310		do {
3311			start = u64_stats_fetch_begin(&rq->stats.syncp);
3312			for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3313				offset = virtnet_rq_stats_desc[j].offset;
3314				p = (const u64_stats_t *)(stats_base + offset);
3315				data[idx + j] = u64_stats_read(p);
3316			}
3317		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3318		idx += VIRTNET_RQ_STATS_LEN;
3319	}
3320
3321	for (i = 0; i < vi->curr_queue_pairs; i++) {
3322		struct send_queue *sq = &vi->sq[i];
3323
3324		stats_base = (const u8 *)&sq->stats;
3325		do {
3326			start = u64_stats_fetch_begin(&sq->stats.syncp);
3327			for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3328				offset = virtnet_sq_stats_desc[j].offset;
3329				p = (const u64_stats_t *)(stats_base + offset);
3330				data[idx + j] = u64_stats_read(p);
3331			}
3332		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3333		idx += VIRTNET_SQ_STATS_LEN;
3334	}
 
 
3335}
3336
3337static void virtnet_get_channels(struct net_device *dev,
3338				 struct ethtool_channels *channels)
3339{
3340	struct virtnet_info *vi = netdev_priv(dev);
3341
3342	channels->combined_count = vi->curr_queue_pairs;
3343	channels->max_combined = vi->max_queue_pairs;
3344	channels->max_other = 0;
3345	channels->rx_count = 0;
3346	channels->tx_count = 0;
3347	channels->other_count = 0;
3348}
3349
3350static int virtnet_set_link_ksettings(struct net_device *dev,
3351				      const struct ethtool_link_ksettings *cmd)
3352{
3353	struct virtnet_info *vi = netdev_priv(dev);
3354
3355	return ethtool_virtdev_set_link_ksettings(dev, cmd,
3356						  &vi->speed, &vi->duplex);
3357}
3358
3359static int virtnet_get_link_ksettings(struct net_device *dev,
3360				      struct ethtool_link_ksettings *cmd)
3361{
3362	struct virtnet_info *vi = netdev_priv(dev);
3363
3364	cmd->base.speed = vi->speed;
3365	cmd->base.duplex = vi->duplex;
3366	cmd->base.port = PORT_OTHER;
3367
3368	return 0;
3369}
3370
3371static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
3372					  struct ethtool_coalesce *ec)
3373{
 
3374	struct scatterlist sgs_tx;
3375	int i;
3376
3377	vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3378	vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3379	sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
 
 
 
 
3380
3381	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3382				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3383				  &sgs_tx))
3384		return -EINVAL;
3385
3386	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3387	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3388	for (i = 0; i < vi->max_queue_pairs; i++) {
3389		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3390		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3391	}
3392
3393	return 0;
3394}
3395
3396static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
3397					  struct ethtool_coalesce *ec)
3398{
 
3399	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3400	struct scatterlist sgs_rx;
3401	int i;
3402
3403	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3404		return -EOPNOTSUPP;
3405
3406	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3407			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3408		return -EINVAL;
3409
3410	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3411		vi->rx_dim_enabled = true;
3412		for (i = 0; i < vi->max_queue_pairs; i++)
 
3413			vi->rq[i].dim_enabled = true;
 
 
3414		return 0;
3415	}
3416
 
 
 
 
3417	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3418		vi->rx_dim_enabled = false;
3419		for (i = 0; i < vi->max_queue_pairs; i++)
 
3420			vi->rq[i].dim_enabled = false;
 
 
3421	}
3422
3423	/* Since the per-queue coalescing params can be set,
3424	 * we need apply the global new params even if they
3425	 * are not updated.
3426	 */
3427	vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3428	vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3429	sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3430
3431	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3432				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3433				  &sgs_rx))
3434		return -EINVAL;
3435
3436	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3437	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3438	for (i = 0; i < vi->max_queue_pairs; i++) {
 
3439		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3440		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
 
3441	}
3442
3443	return 0;
3444}
3445
3446static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3447				       struct ethtool_coalesce *ec)
3448{
3449	int err;
3450
3451	err = virtnet_send_tx_notf_coal_cmds(vi, ec);
3452	if (err)
3453		return err;
3454
3455	err = virtnet_send_rx_notf_coal_cmds(vi, ec);
3456	if (err)
3457		return err;
3458
3459	return 0;
3460}
3461
3462static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3463					     struct ethtool_coalesce *ec,
3464					     u16 queue)
3465{
3466	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3467	bool cur_rx_dim = vi->rq[queue].dim_enabled;
3468	u32 max_usecs, max_packets;
 
3469	int err;
3470
 
 
3471	max_usecs = vi->rq[queue].intr_coal.max_usecs;
3472	max_packets = vi->rq[queue].intr_coal.max_packets;
3473
3474	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3475			       ec->rx_max_coalesced_frames != max_packets))
 
3476		return -EINVAL;
 
3477
3478	if (rx_ctrl_dim_on && !cur_rx_dim) {
3479		vi->rq[queue].dim_enabled = true;
 
3480		return 0;
3481	}
3482
3483	if (!rx_ctrl_dim_on && cur_rx_dim)
3484		vi->rq[queue].dim_enabled = false;
3485
3486	/* If no params are updated, userspace ethtool will
3487	 * reject the modification.
3488	 */
3489	err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
3490					       ec->rx_coalesce_usecs,
3491					       ec->rx_max_coalesced_frames);
3492	if (err)
3493		return err;
3494
3495	return 0;
3496}
3497
3498static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3499					  struct ethtool_coalesce *ec,
3500					  u16 queue)
3501{
3502	int err;
3503
3504	err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3505	if (err)
3506		return err;
3507
3508	err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
3509					       ec->tx_coalesce_usecs,
3510					       ec->tx_max_coalesced_frames);
3511	if (err)
3512		return err;
3513
3514	return 0;
3515}
3516
3517static void virtnet_rx_dim_work(struct work_struct *work)
3518{
3519	struct dim *dim = container_of(work, struct dim, work);
3520	struct receive_queue *rq = container_of(dim,
3521			struct receive_queue, dim);
3522	struct virtnet_info *vi = rq->vq->vdev->priv;
3523	struct net_device *dev = vi->dev;
3524	struct dim_cq_moder update_moder;
3525	int i, qnum, err;
3526
3527	if (!rtnl_trylock())
3528		return;
3529
3530	/* Each rxq's work is queued by "net_dim()->schedule_work()"
3531	 * in response to NAPI traffic changes. Note that dim->profile_ix
3532	 * for each rxq is updated prior to the queuing action.
3533	 * So we only need to traverse and update profiles for all rxqs
3534	 * in the work which is holding rtnl_lock.
3535	 */
3536	for (i = 0; i < vi->curr_queue_pairs; i++) {
3537		rq = &vi->rq[i];
3538		dim = &rq->dim;
3539		qnum = rq - vi->rq;
3540
3541		if (!rq->dim_enabled)
3542			continue;
3543
3544		update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3545		if (update_moder.usec != rq->intr_coal.max_usecs ||
3546		    update_moder.pkts != rq->intr_coal.max_packets) {
3547			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3548							       update_moder.usec,
3549							       update_moder.pkts);
3550			if (err)
3551				pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3552					 dev->name, qnum);
3553			dim->state = DIM_START_MEASURE;
3554		}
3555	}
3556
3557	rtnl_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
3558}
3559
3560static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3561{
3562	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3563	 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
3564	 */
3565	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3566		return -EOPNOTSUPP;
3567
3568	if (ec->tx_max_coalesced_frames > 1 ||
3569	    ec->rx_max_coalesced_frames != 1)
3570		return -EINVAL;
3571
3572	return 0;
3573}
3574
3575static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3576					   int vq_weight, bool *should_update)
3577{
3578	if (weight ^ vq_weight) {
3579		if (dev_flags & IFF_UP)
3580			return -EBUSY;
3581		*should_update = true;
3582	}
3583
3584	return 0;
3585}
3586
3587static int virtnet_set_coalesce(struct net_device *dev,
3588				struct ethtool_coalesce *ec,
3589				struct kernel_ethtool_coalesce *kernel_coal,
3590				struct netlink_ext_ack *extack)
3591{
3592	struct virtnet_info *vi = netdev_priv(dev);
3593	int ret, queue_number, napi_weight;
3594	bool update_napi = false;
3595
3596	/* Can't change NAPI weight if the link is up */
3597	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3598	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3599		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3600						      vi->sq[queue_number].napi.weight,
3601						      &update_napi);
3602		if (ret)
3603			return ret;
3604
3605		if (update_napi) {
3606			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3607			 * updated for the sake of simplicity, which might not be necessary
3608			 */
3609			break;
3610		}
3611	}
3612
3613	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3614		ret = virtnet_send_notf_coal_cmds(vi, ec);
3615	else
3616		ret = virtnet_coal_params_supported(ec);
3617
3618	if (ret)
3619		return ret;
3620
3621	if (update_napi) {
 
 
 
 
 
 
 
 
3622		for (; queue_number < vi->max_queue_pairs; queue_number++)
3623			vi->sq[queue_number].napi.weight = napi_weight;
3624	}
3625
3626	return ret;
3627}
3628
3629static int virtnet_get_coalesce(struct net_device *dev,
3630				struct ethtool_coalesce *ec,
3631				struct kernel_ethtool_coalesce *kernel_coal,
3632				struct netlink_ext_ack *extack)
3633{
3634	struct virtnet_info *vi = netdev_priv(dev);
3635
3636	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3637		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3638		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3639		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3640		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3641		ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
3642	} else {
3643		ec->rx_max_coalesced_frames = 1;
3644
3645		if (vi->sq[0].napi.weight)
3646			ec->tx_max_coalesced_frames = 1;
3647	}
3648
3649	return 0;
3650}
3651
3652static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3653					  u32 queue,
3654					  struct ethtool_coalesce *ec)
3655{
3656	struct virtnet_info *vi = netdev_priv(dev);
3657	int ret, napi_weight;
3658	bool update_napi = false;
3659
3660	if (queue >= vi->max_queue_pairs)
3661		return -EINVAL;
3662
3663	/* Can't change NAPI weight if the link is up */
3664	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3665	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3666					      vi->sq[queue].napi.weight,
3667					      &update_napi);
3668	if (ret)
3669		return ret;
3670
3671	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3672		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3673	else
3674		ret = virtnet_coal_params_supported(ec);
3675
3676	if (ret)
3677		return ret;
3678
3679	if (update_napi)
3680		vi->sq[queue].napi.weight = napi_weight;
3681
3682	return 0;
3683}
3684
3685static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3686					  u32 queue,
3687					  struct ethtool_coalesce *ec)
3688{
3689	struct virtnet_info *vi = netdev_priv(dev);
3690
3691	if (queue >= vi->max_queue_pairs)
3692		return -EINVAL;
3693
3694	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
 
3695		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3696		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3697		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3698		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3699		ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
 
3700	} else {
3701		ec->rx_max_coalesced_frames = 1;
3702
3703		if (vi->sq[queue].napi.weight)
3704			ec->tx_max_coalesced_frames = 1;
3705	}
3706
3707	return 0;
3708}
3709
3710static void virtnet_init_settings(struct net_device *dev)
3711{
3712	struct virtnet_info *vi = netdev_priv(dev);
3713
3714	vi->speed = SPEED_UNKNOWN;
3715	vi->duplex = DUPLEX_UNKNOWN;
3716}
3717
3718static void virtnet_update_settings(struct virtnet_info *vi)
3719{
3720	u32 speed;
3721	u8 duplex;
3722
3723	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3724		return;
3725
3726	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3727
3728	if (ethtool_validate_speed(speed))
3729		vi->speed = speed;
3730
3731	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3732
3733	if (ethtool_validate_duplex(duplex))
3734		vi->duplex = duplex;
3735}
3736
3737static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3738{
3739	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3740}
3741
3742static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3743{
3744	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3745}
3746
3747static int virtnet_get_rxfh(struct net_device *dev,
3748			    struct ethtool_rxfh_param *rxfh)
3749{
3750	struct virtnet_info *vi = netdev_priv(dev);
3751	int i;
3752
3753	if (rxfh->indir) {
3754		for (i = 0; i < vi->rss_indir_table_size; ++i)
3755			rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
3756	}
3757
3758	if (rxfh->key)
3759		memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
3760
3761	rxfh->hfunc = ETH_RSS_HASH_TOP;
3762
3763	return 0;
3764}
3765
3766static int virtnet_set_rxfh(struct net_device *dev,
3767			    struct ethtool_rxfh_param *rxfh,
3768			    struct netlink_ext_ack *extack)
3769{
3770	struct virtnet_info *vi = netdev_priv(dev);
 
3771	int i;
3772
3773	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3774	    rxfh->hfunc != ETH_RSS_HASH_TOP)
3775		return -EOPNOTSUPP;
3776
3777	if (rxfh->indir) {
 
 
 
3778		for (i = 0; i < vi->rss_indir_table_size; ++i)
3779			vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
3780	}
3781	if (rxfh->key)
3782		memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
3783
3784	virtnet_commit_rss_command(vi);
 
3785
3786	return 0;
3787}
3788
3789static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3790{
3791	struct virtnet_info *vi = netdev_priv(dev);
3792	int rc = 0;
3793
3794	switch (info->cmd) {
3795	case ETHTOOL_GRXRINGS:
3796		info->data = vi->curr_queue_pairs;
3797		break;
3798	case ETHTOOL_GRXFH:
3799		virtnet_get_hashflow(vi, info);
3800		break;
3801	default:
3802		rc = -EOPNOTSUPP;
3803	}
3804
3805	return rc;
3806}
3807
3808static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3809{
3810	struct virtnet_info *vi = netdev_priv(dev);
3811	int rc = 0;
3812
3813	switch (info->cmd) {
3814	case ETHTOOL_SRXFH:
3815		if (!virtnet_set_hashflow(vi, info))
3816			rc = -EINVAL;
3817
3818		break;
3819	default:
3820		rc = -EOPNOTSUPP;
3821	}
3822
3823	return rc;
3824}
3825
3826static const struct ethtool_ops virtnet_ethtool_ops = {
3827	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3828		ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
3829	.get_drvinfo = virtnet_get_drvinfo,
3830	.get_link = ethtool_op_get_link,
3831	.get_ringparam = virtnet_get_ringparam,
3832	.set_ringparam = virtnet_set_ringparam,
3833	.get_strings = virtnet_get_strings,
3834	.get_sset_count = virtnet_get_sset_count,
3835	.get_ethtool_stats = virtnet_get_ethtool_stats,
3836	.set_channels = virtnet_set_channels,
3837	.get_channels = virtnet_get_channels,
3838	.get_ts_info = ethtool_op_get_ts_info,
3839	.get_link_ksettings = virtnet_get_link_ksettings,
3840	.set_link_ksettings = virtnet_set_link_ksettings,
3841	.set_coalesce = virtnet_set_coalesce,
3842	.get_coalesce = virtnet_get_coalesce,
3843	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3844	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3845	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
3846	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3847	.get_rxfh = virtnet_get_rxfh,
3848	.set_rxfh = virtnet_set_rxfh,
3849	.get_rxnfc = virtnet_get_rxnfc,
3850	.set_rxnfc = virtnet_set_rxnfc,
3851};
3852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3853static void virtnet_freeze_down(struct virtio_device *vdev)
3854{
3855	struct virtnet_info *vi = vdev->priv;
3856
3857	/* Make sure no work handler is accessing the device */
3858	flush_work(&vi->config_work);
 
 
3859
3860	netif_tx_lock_bh(vi->dev);
3861	netif_device_detach(vi->dev);
3862	netif_tx_unlock_bh(vi->dev);
3863	if (netif_running(vi->dev))
3864		virtnet_close(vi->dev);
3865}
3866
3867static int init_vqs(struct virtnet_info *vi);
3868
3869static int virtnet_restore_up(struct virtio_device *vdev)
3870{
3871	struct virtnet_info *vi = vdev->priv;
3872	int err;
3873
3874	err = init_vqs(vi);
3875	if (err)
3876		return err;
3877
3878	virtio_device_ready(vdev);
3879
3880	enable_delayed_refill(vi);
 
3881
3882	if (netif_running(vi->dev)) {
3883		err = virtnet_open(vi->dev);
3884		if (err)
3885			return err;
3886	}
3887
3888	netif_tx_lock_bh(vi->dev);
3889	netif_device_attach(vi->dev);
3890	netif_tx_unlock_bh(vi->dev);
3891	return err;
3892}
3893
3894static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3895{
 
3896	struct scatterlist sg;
3897	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3898
3899	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
 
 
 
 
 
 
3900
3901	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3902				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3903		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3904		return -EINVAL;
3905	}
3906
3907	return 0;
3908}
3909
3910static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3911{
3912	u64 offloads = 0;
3913
3914	if (!vi->guest_offloads)
3915		return 0;
3916
3917	return virtnet_set_guest_offloads(vi, offloads);
3918}
3919
3920static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3921{
3922	u64 offloads = vi->guest_offloads;
3923
3924	if (!vi->guest_offloads)
3925		return 0;
3926
3927	return virtnet_set_guest_offloads(vi, offloads);
3928}
3929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3930static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3931			   struct netlink_ext_ack *extack)
3932{
3933	unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3934					   sizeof(struct skb_shared_info));
3935	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3936	struct virtnet_info *vi = netdev_priv(dev);
3937	struct bpf_prog *old_prog;
3938	u16 xdp_qp = 0, curr_qp;
3939	int i, err;
3940
3941	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3942	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3943	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3944	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3945		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3946		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3947		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3948		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3949		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3950		return -EOPNOTSUPP;
3951	}
3952
3953	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3954		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3955		return -EINVAL;
3956	}
3957
3958	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3959		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3960		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3961		return -EINVAL;
3962	}
3963
3964	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3965	if (prog)
3966		xdp_qp = nr_cpu_ids;
3967
3968	/* XDP requires extra queues for XDP_TX */
3969	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3970		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3971				 curr_qp + xdp_qp, vi->max_queue_pairs);
3972		xdp_qp = 0;
3973	}
3974
3975	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3976	if (!prog && !old_prog)
3977		return 0;
3978
3979	if (prog)
3980		bpf_prog_add(prog, vi->max_queue_pairs - 1);
3981
3982	/* Make sure NAPI is not using any XDP TX queues for RX. */
3983	if (netif_running(dev)) {
3984		for (i = 0; i < vi->max_queue_pairs; i++) {
3985			napi_disable(&vi->rq[i].napi);
3986			virtnet_napi_tx_disable(&vi->sq[i].napi);
3987		}
3988	}
3989
3990	if (!prog) {
3991		for (i = 0; i < vi->max_queue_pairs; i++) {
3992			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3993			if (i == 0)
3994				virtnet_restore_guest_offloads(vi);
3995		}
3996		synchronize_net();
3997	}
3998
3999	err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
4000	if (err)
4001		goto err;
4002	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4003	vi->xdp_queue_pairs = xdp_qp;
4004
4005	if (prog) {
4006		vi->xdp_enabled = true;
4007		for (i = 0; i < vi->max_queue_pairs; i++) {
4008			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4009			if (i == 0 && !old_prog)
4010				virtnet_clear_guest_offloads(vi);
4011		}
4012		if (!old_prog)
4013			xdp_features_set_redirect_target(dev, true);
4014	} else {
4015		xdp_features_clear_redirect_target(dev);
4016		vi->xdp_enabled = false;
4017	}
4018
4019	for (i = 0; i < vi->max_queue_pairs; i++) {
4020		if (old_prog)
4021			bpf_prog_put(old_prog);
4022		if (netif_running(dev)) {
4023			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4024			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4025					       &vi->sq[i].napi);
4026		}
4027	}
4028
4029	return 0;
4030
4031err:
4032	if (!prog) {
4033		virtnet_clear_guest_offloads(vi);
4034		for (i = 0; i < vi->max_queue_pairs; i++)
4035			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
4036	}
4037
4038	if (netif_running(dev)) {
4039		for (i = 0; i < vi->max_queue_pairs; i++) {
4040			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4041			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4042					       &vi->sq[i].napi);
4043		}
4044	}
4045	if (prog)
4046		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
4047	return err;
4048}
4049
4050static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4051{
4052	switch (xdp->command) {
4053	case XDP_SETUP_PROG:
4054		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
 
 
4055	default:
4056		return -EINVAL;
4057	}
4058}
4059
4060static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
4061				      size_t len)
4062{
4063	struct virtnet_info *vi = netdev_priv(dev);
4064	int ret;
4065
4066	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
4067		return -EOPNOTSUPP;
4068
4069	ret = snprintf(buf, len, "sby");
4070	if (ret >= len)
4071		return -EOPNOTSUPP;
4072
4073	return 0;
4074}
4075
4076static int virtnet_set_features(struct net_device *dev,
4077				netdev_features_t features)
4078{
4079	struct virtnet_info *vi = netdev_priv(dev);
4080	u64 offloads;
4081	int err;
4082
4083	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
4084		if (vi->xdp_enabled)
4085			return -EBUSY;
4086
4087		if (features & NETIF_F_GRO_HW)
4088			offloads = vi->guest_offloads_capable;
4089		else
4090			offloads = vi->guest_offloads_capable &
4091				   ~GUEST_OFFLOAD_GRO_HW_MASK;
4092
4093		err = virtnet_set_guest_offloads(vi, offloads);
4094		if (err)
4095			return err;
4096		vi->guest_offloads = offloads;
4097	}
4098
4099	if ((dev->features ^ features) & NETIF_F_RXHASH) {
4100		if (features & NETIF_F_RXHASH)
4101			vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
4102		else
4103			vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
4104
4105		if (!virtnet_commit_rss_command(vi))
4106			return -EINVAL;
4107	}
4108
4109	return 0;
4110}
4111
4112static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
4113{
4114	struct virtnet_info *priv = netdev_priv(dev);
4115	struct send_queue *sq = &priv->sq[txqueue];
4116	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
4117
4118	u64_stats_update_begin(&sq->stats.syncp);
4119	u64_stats_inc(&sq->stats.tx_timeouts);
4120	u64_stats_update_end(&sq->stats.syncp);
4121
4122	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
4123		   txqueue, sq->name, sq->vq->index, sq->vq->name,
4124		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
4125}
4126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4127static const struct net_device_ops virtnet_netdev = {
4128	.ndo_open            = virtnet_open,
4129	.ndo_stop   	     = virtnet_close,
4130	.ndo_start_xmit      = start_xmit,
4131	.ndo_validate_addr   = eth_validate_addr,
4132	.ndo_set_mac_address = virtnet_set_mac_address,
4133	.ndo_set_rx_mode     = virtnet_set_rx_mode,
4134	.ndo_get_stats64     = virtnet_stats,
4135	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
4136	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
4137	.ndo_bpf		= virtnet_xdp,
4138	.ndo_xdp_xmit		= virtnet_xdp_xmit,
 
4139	.ndo_features_check	= passthru_features_check,
4140	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
4141	.ndo_set_features	= virtnet_set_features,
4142	.ndo_tx_timeout		= virtnet_tx_timeout,
4143};
4144
4145static void virtnet_config_changed_work(struct work_struct *work)
4146{
4147	struct virtnet_info *vi =
4148		container_of(work, struct virtnet_info, config_work);
4149	u16 v;
4150
4151	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
4152				 struct virtio_net_config, status, &v) < 0)
4153		return;
4154
4155	if (v & VIRTIO_NET_S_ANNOUNCE) {
4156		netdev_notify_peers(vi->dev);
4157		virtnet_ack_link_announce(vi);
4158	}
4159
4160	/* Ignore unknown (future) status bits */
4161	v &= VIRTIO_NET_S_LINK_UP;
4162
4163	if (vi->status == v)
4164		return;
4165
4166	vi->status = v;
4167
4168	if (vi->status & VIRTIO_NET_S_LINK_UP) {
4169		virtnet_update_settings(vi);
4170		netif_carrier_on(vi->dev);
4171		netif_tx_wake_all_queues(vi->dev);
4172	} else {
4173		netif_carrier_off(vi->dev);
4174		netif_tx_stop_all_queues(vi->dev);
4175	}
4176}
4177
4178static void virtnet_config_changed(struct virtio_device *vdev)
4179{
4180	struct virtnet_info *vi = vdev->priv;
4181
4182	schedule_work(&vi->config_work);
4183}
4184
4185static void virtnet_free_queues(struct virtnet_info *vi)
4186{
4187	int i;
4188
4189	for (i = 0; i < vi->max_queue_pairs; i++) {
4190		__netif_napi_del(&vi->rq[i].napi);
4191		__netif_napi_del(&vi->sq[i].napi);
4192	}
4193
4194	/* We called __netif_napi_del(),
4195	 * we need to respect an RCU grace period before freeing vi->rq
4196	 */
4197	synchronize_net();
4198
4199	kfree(vi->rq);
4200	kfree(vi->sq);
4201	kfree(vi->ctrl);
4202}
4203
4204static void _free_receive_bufs(struct virtnet_info *vi)
4205{
4206	struct bpf_prog *old_prog;
4207	int i;
4208
4209	for (i = 0; i < vi->max_queue_pairs; i++) {
4210		while (vi->rq[i].pages)
4211			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4212
4213		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4214		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4215		if (old_prog)
4216			bpf_prog_put(old_prog);
4217	}
4218}
4219
4220static void free_receive_bufs(struct virtnet_info *vi)
4221{
4222	rtnl_lock();
4223	_free_receive_bufs(vi);
4224	rtnl_unlock();
4225}
4226
4227static void free_receive_page_frags(struct virtnet_info *vi)
4228{
4229	int i;
4230	for (i = 0; i < vi->max_queue_pairs; i++)
4231		if (vi->rq[i].alloc_frag.page) {
4232			if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4233				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4234			put_page(vi->rq[i].alloc_frag.page);
4235		}
4236}
4237
4238static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4239{
4240	if (!is_xdp_frame(buf))
 
 
 
 
 
 
 
 
4241		dev_kfree_skb(buf);
4242	else
4243		xdp_return_frame(ptr_to_xdp(buf));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4244}
4245
4246static void free_unused_bufs(struct virtnet_info *vi)
4247{
4248	void *buf;
4249	int i;
4250
4251	for (i = 0; i < vi->max_queue_pairs; i++) {
4252		struct virtqueue *vq = vi->sq[i].vq;
4253		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4254			virtnet_sq_free_unused_buf(vq, buf);
4255		cond_resched();
4256	}
4257
4258	for (i = 0; i < vi->max_queue_pairs; i++) {
4259		struct virtqueue *vq = vi->rq[i].vq;
4260
4261		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4262			virtnet_rq_unmap_free_buf(vq, buf);
4263		cond_resched();
4264	}
4265}
4266
4267static void virtnet_del_vqs(struct virtnet_info *vi)
4268{
4269	struct virtio_device *vdev = vi->vdev;
4270
4271	virtnet_clean_affinity(vi);
4272
4273	vdev->config->del_vqs(vdev);
4274
4275	virtnet_free_queues(vi);
4276}
4277
4278/* How large should a single buffer be so a queue full of these can fit at
4279 * least one full packet?
4280 * Logic below assumes the mergeable buffer header is used.
4281 */
4282static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4283{
4284	const unsigned int hdr_len = vi->hdr_len;
4285	unsigned int rq_size = virtqueue_get_vring_size(vq);
4286	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4287	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4288	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4289
4290	return max(max(min_buf_len, hdr_len) - hdr_len,
4291		   (unsigned int)GOOD_PACKET_LEN);
4292}
4293
4294static int virtnet_find_vqs(struct virtnet_info *vi)
4295{
4296	vq_callback_t **callbacks;
4297	struct virtqueue **vqs;
4298	const char **names;
4299	int ret = -ENOMEM;
4300	int total_vqs;
4301	bool *ctx;
4302	u16 i;
4303
4304	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4305	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4306	 * possible control vq.
4307	 */
4308	total_vqs = vi->max_queue_pairs * 2 +
4309		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4310
4311	/* Allocate space for find_vqs parameters */
4312	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4313	if (!vqs)
4314		goto err_vq;
4315	callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4316	if (!callbacks)
4317		goto err_callback;
4318	names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4319	if (!names)
4320		goto err_names;
4321	if (!vi->big_packets || vi->mergeable_rx_bufs) {
4322		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4323		if (!ctx)
4324			goto err_ctx;
4325	} else {
4326		ctx = NULL;
4327	}
4328
4329	/* Parameters for control virtqueue, if any */
4330	if (vi->has_cvq) {
4331		callbacks[total_vqs - 1] = NULL;
4332		names[total_vqs - 1] = "control";
4333	}
4334
4335	/* Allocate/initialize parameters for send/receive virtqueues */
4336	for (i = 0; i < vi->max_queue_pairs; i++) {
4337		callbacks[rxq2vq(i)] = skb_recv_done;
4338		callbacks[txq2vq(i)] = skb_xmit_done;
4339		sprintf(vi->rq[i].name, "input.%u", i);
4340		sprintf(vi->sq[i].name, "output.%u", i);
4341		names[rxq2vq(i)] = vi->rq[i].name;
4342		names[txq2vq(i)] = vi->sq[i].name;
4343		if (ctx)
4344			ctx[rxq2vq(i)] = true;
4345	}
4346
4347	ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4348				  names, ctx, NULL);
4349	if (ret)
4350		goto err_find;
4351
4352	if (vi->has_cvq) {
4353		vi->cvq = vqs[total_vqs - 1];
4354		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4355			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4356	}
4357
4358	for (i = 0; i < vi->max_queue_pairs; i++) {
4359		vi->rq[i].vq = vqs[rxq2vq(i)];
4360		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4361		vi->sq[i].vq = vqs[txq2vq(i)];
4362	}
4363
4364	/* run here: ret == 0. */
4365
4366
4367err_find:
4368	kfree(ctx);
4369err_ctx:
4370	kfree(names);
4371err_names:
4372	kfree(callbacks);
4373err_callback:
4374	kfree(vqs);
4375err_vq:
4376	return ret;
4377}
4378
4379static int virtnet_alloc_queues(struct virtnet_info *vi)
4380{
4381	int i;
4382
4383	if (vi->has_cvq) {
4384		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4385		if (!vi->ctrl)
4386			goto err_ctrl;
4387	} else {
4388		vi->ctrl = NULL;
4389	}
4390	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4391	if (!vi->sq)
4392		goto err_sq;
4393	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4394	if (!vi->rq)
4395		goto err_rq;
4396
4397	INIT_DELAYED_WORK(&vi->refill, refill_work);
4398	for (i = 0; i < vi->max_queue_pairs; i++) {
4399		vi->rq[i].pages = NULL;
4400		netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4401				      napi_weight);
4402		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4403					 virtnet_poll_tx,
4404					 napi_tx ? napi_weight : 0);
4405
4406		INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4407		vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4408
4409		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4410		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4411		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4412
4413		u64_stats_init(&vi->rq[i].stats.syncp);
4414		u64_stats_init(&vi->sq[i].stats.syncp);
 
4415	}
4416
4417	return 0;
4418
4419err_rq:
4420	kfree(vi->sq);
4421err_sq:
4422	kfree(vi->ctrl);
4423err_ctrl:
4424	return -ENOMEM;
4425}
4426
4427static int init_vqs(struct virtnet_info *vi)
4428{
4429	int ret;
4430
4431	/* Allocate send & receive queues */
4432	ret = virtnet_alloc_queues(vi);
4433	if (ret)
4434		goto err;
4435
4436	ret = virtnet_find_vqs(vi);
4437	if (ret)
4438		goto err_free;
4439
4440	virtnet_rq_set_premapped(vi);
4441
4442	cpus_read_lock();
4443	virtnet_set_affinity(vi);
4444	cpus_read_unlock();
4445
4446	return 0;
4447
4448err_free:
4449	virtnet_free_queues(vi);
4450err:
4451	return ret;
4452}
4453
4454#ifdef CONFIG_SYSFS
4455static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4456		char *buf)
4457{
4458	struct virtnet_info *vi = netdev_priv(queue->dev);
4459	unsigned int queue_index = get_netdev_rx_queue_index(queue);
4460	unsigned int headroom = virtnet_get_headroom(vi);
4461	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4462	struct ewma_pkt_len *avg;
4463
4464	BUG_ON(queue_index >= vi->max_queue_pairs);
4465	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4466	return sprintf(buf, "%u\n",
4467		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
4468				       SKB_DATA_ALIGN(headroom + tailroom)));
4469}
4470
4471static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4472	__ATTR_RO(mergeable_rx_buffer_size);
4473
4474static struct attribute *virtio_net_mrg_rx_attrs[] = {
4475	&mergeable_rx_buffer_size_attribute.attr,
4476	NULL
4477};
4478
4479static const struct attribute_group virtio_net_mrg_rx_group = {
4480	.name = "virtio_net",
4481	.attrs = virtio_net_mrg_rx_attrs
4482};
4483#endif
4484
4485static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4486				    unsigned int fbit,
4487				    const char *fname, const char *dname)
4488{
4489	if (!virtio_has_feature(vdev, fbit))
4490		return false;
4491
4492	dev_err(&vdev->dev, "device advertises feature %s but not %s",
4493		fname, dname);
4494
4495	return true;
4496}
4497
4498#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
4499	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4500
4501static bool virtnet_validate_features(struct virtio_device *vdev)
4502{
4503	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4504	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4505			     "VIRTIO_NET_F_CTRL_VQ") ||
4506	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4507			     "VIRTIO_NET_F_CTRL_VQ") ||
4508	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4509			     "VIRTIO_NET_F_CTRL_VQ") ||
4510	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4511	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4512			     "VIRTIO_NET_F_CTRL_VQ") ||
4513	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4514			     "VIRTIO_NET_F_CTRL_VQ") ||
4515	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4516			     "VIRTIO_NET_F_CTRL_VQ") ||
4517	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4518			     "VIRTIO_NET_F_CTRL_VQ") ||
4519	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4520			     "VIRTIO_NET_F_CTRL_VQ"))) {
4521		return false;
4522	}
4523
4524	return true;
4525}
4526
4527#define MIN_MTU ETH_MIN_MTU
4528#define MAX_MTU ETH_MAX_MTU
4529
4530static int virtnet_validate(struct virtio_device *vdev)
4531{
4532	if (!vdev->config->get) {
4533		dev_err(&vdev->dev, "%s failure: config access disabled\n",
4534			__func__);
4535		return -EINVAL;
4536	}
4537
4538	if (!virtnet_validate_features(vdev))
4539		return -EINVAL;
4540
4541	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4542		int mtu = virtio_cread16(vdev,
4543					 offsetof(struct virtio_net_config,
4544						  mtu));
4545		if (mtu < MIN_MTU)
4546			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4547	}
4548
4549	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4550	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4551		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4552		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4553	}
4554
4555	return 0;
4556}
4557
4558static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4559{
4560	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4561		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4562		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4563		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4564		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4565		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4566}
4567
4568static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4569{
4570	bool guest_gso = virtnet_check_guest_gso(vi);
4571
4572	/* If device can receive ANY guest GSO packets, regardless of mtu,
4573	 * allocate packets of maximum size, otherwise limit it to only
4574	 * mtu size worth only.
4575	 */
4576	if (mtu > ETH_DATA_LEN || guest_gso) {
4577		vi->big_packets = true;
4578		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4579	}
4580}
4581
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4582static int virtnet_probe(struct virtio_device *vdev)
4583{
4584	int i, err = -ENOMEM;
4585	struct net_device *dev;
4586	struct virtnet_info *vi;
4587	u16 max_queue_pairs;
4588	int mtu = 0;
4589
4590	/* Find if host supports multiqueue/rss virtio_net device */
4591	max_queue_pairs = 1;
4592	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4593		max_queue_pairs =
4594		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4595
4596	/* We need at least 2 queue's */
4597	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4598	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4599	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4600		max_queue_pairs = 1;
4601
4602	/* Allocate ourselves a network device with room for our info */
4603	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4604	if (!dev)
4605		return -ENOMEM;
4606
4607	/* Set up network device as normal. */
4608	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4609			   IFF_TX_SKB_NO_LINEAR;
4610	dev->netdev_ops = &virtnet_netdev;
 
4611	dev->features = NETIF_F_HIGHDMA;
4612
4613	dev->ethtool_ops = &virtnet_ethtool_ops;
4614	SET_NETDEV_DEV(dev, &vdev->dev);
4615
4616	/* Do we support "hardware" checksums? */
4617	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4618		/* This opens up the world of extra features. */
4619		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4620		if (csum)
4621			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4622
4623		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4624			dev->hw_features |= NETIF_F_TSO
4625				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
4626		}
4627		/* Individual feature bits: what can host handle? */
4628		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4629			dev->hw_features |= NETIF_F_TSO;
4630		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4631			dev->hw_features |= NETIF_F_TSO6;
4632		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4633			dev->hw_features |= NETIF_F_TSO_ECN;
4634		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4635			dev->hw_features |= NETIF_F_GSO_UDP_L4;
4636
4637		dev->features |= NETIF_F_GSO_ROBUST;
4638
4639		if (gso)
4640			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4641		/* (!csum && gso) case will be fixed by register_netdev() */
4642	}
4643	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4644		dev->features |= NETIF_F_RXCSUM;
 
 
 
 
 
 
 
 
4645	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4646	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4647		dev->features |= NETIF_F_GRO_HW;
4648	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4649		dev->hw_features |= NETIF_F_GRO_HW;
4650
4651	dev->vlan_features = dev->features;
4652	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
 
4653
4654	/* MTU range: 68 - 65535 */
4655	dev->min_mtu = MIN_MTU;
4656	dev->max_mtu = MAX_MTU;
4657
4658	/* Configuration may specify what MAC to use.  Otherwise random. */
4659	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4660		u8 addr[ETH_ALEN];
4661
4662		virtio_cread_bytes(vdev,
4663				   offsetof(struct virtio_net_config, mac),
4664				   addr, ETH_ALEN);
4665		eth_hw_addr_set(dev, addr);
4666	} else {
4667		eth_hw_addr_random(dev);
4668		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4669			 dev->dev_addr);
4670	}
4671
4672	/* Set up our device-specific information */
4673	vi = netdev_priv(dev);
4674	vi->dev = dev;
4675	vi->vdev = vdev;
4676	vdev->priv = vi;
4677
4678	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
 
4679	spin_lock_init(&vi->refill_lock);
4680
4681	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4682		vi->mergeable_rx_bufs = true;
4683		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4684	}
4685
4686	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4687		vi->has_rss_hash_report = true;
4688
4689	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4690		vi->has_rss = true;
4691
4692	if (vi->has_rss || vi->has_rss_hash_report) {
4693		vi->rss_indir_table_size =
4694			virtio_cread16(vdev, offsetof(struct virtio_net_config,
4695				rss_max_indirection_table_length));
 
 
 
 
 
 
4696		vi->rss_key_size =
4697			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
 
 
 
 
 
 
4698
4699		vi->rss_hash_types_supported =
4700		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4701		vi->rss_hash_types_supported &=
4702				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4703				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4704				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4705
4706		dev->hw_features |= NETIF_F_RXHASH;
 
4707	}
4708
4709	if (vi->has_rss_hash_report)
4710		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4711	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4712		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4713		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4714	else
4715		vi->hdr_len = sizeof(struct virtio_net_hdr);
4716
4717	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4718	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4719		vi->any_header_sg = true;
4720
4721	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4722		vi->has_cvq = true;
4723
 
 
4724	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4725		mtu = virtio_cread16(vdev,
4726				     offsetof(struct virtio_net_config,
4727					      mtu));
4728		if (mtu < dev->min_mtu) {
4729			/* Should never trigger: MTU was previously validated
4730			 * in virtnet_validate.
4731			 */
4732			dev_err(&vdev->dev,
4733				"device MTU appears to have changed it is now %d < %d",
4734				mtu, dev->min_mtu);
4735			err = -EINVAL;
4736			goto free;
4737		}
4738
4739		dev->mtu = mtu;
4740		dev->max_mtu = mtu;
4741	}
4742
4743	virtnet_set_big_packets(vi, mtu);
4744
4745	if (vi->any_header_sg)
4746		dev->needed_headroom = vi->hdr_len;
4747
4748	/* Enable multiqueue by default */
4749	if (num_online_cpus() >= max_queue_pairs)
4750		vi->curr_queue_pairs = max_queue_pairs;
4751	else
4752		vi->curr_queue_pairs = num_online_cpus();
4753	vi->max_queue_pairs = max_queue_pairs;
4754
4755	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4756	err = init_vqs(vi);
4757	if (err)
4758		goto free;
4759
4760	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4761		vi->intr_coal_rx.max_usecs = 0;
4762		vi->intr_coal_tx.max_usecs = 0;
4763		vi->intr_coal_rx.max_packets = 0;
4764
4765		/* Keep the default values of the coalescing parameters
4766		 * aligned with the default napi_tx state.
4767		 */
4768		if (vi->sq[0].napi.weight)
4769			vi->intr_coal_tx.max_packets = 1;
4770		else
4771			vi->intr_coal_tx.max_packets = 0;
4772	}
4773
4774	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4775		/* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
4776		for (i = 0; i < vi->max_queue_pairs; i++)
4777			if (vi->sq[i].napi.weight)
4778				vi->sq[i].intr_coal.max_packets = 1;
 
 
 
 
4779	}
4780
4781#ifdef CONFIG_SYSFS
4782	if (vi->mergeable_rx_bufs)
4783		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4784#endif
4785	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4786	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4787
4788	virtnet_init_settings(dev);
4789
4790	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4791		vi->failover = net_failover_create(vi->dev);
4792		if (IS_ERR(vi->failover)) {
4793			err = PTR_ERR(vi->failover);
4794			goto free_vqs;
4795		}
4796	}
4797
4798	if (vi->has_rss || vi->has_rss_hash_report)
4799		virtnet_init_default_rss(vi);
4800
 
 
4801	/* serialize netdev register + virtio_device_ready() with ndo_open() */
4802	rtnl_lock();
4803
4804	err = register_netdevice(dev);
4805	if (err) {
4806		pr_debug("virtio_net: registering device failed\n");
4807		rtnl_unlock();
4808		goto free_failover;
4809	}
4810
 
 
 
4811	virtio_device_ready(vdev);
4812
4813	_virtnet_set_queues(vi, vi->curr_queue_pairs);
 
 
 
 
 
 
 
 
 
4814
4815	/* a random MAC address has been assigned, notify the device.
4816	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4817	 * because many devices work fine without getting MAC explicitly
4818	 */
4819	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4820	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4821		struct scatterlist sg;
4822
4823		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4824		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4825					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4826			pr_debug("virtio_net: setting MAC address failed\n");
4827			rtnl_unlock();
4828			err = -EINVAL;
4829			goto free_unregister_netdev;
4830		}
4831	}
4832
4833	rtnl_unlock();
 
 
 
4834
4835	err = virtnet_cpu_notif_add(vi);
4836	if (err) {
4837		pr_debug("virtio_net: registering cpu notifier failed\n");
4838		goto free_unregister_netdev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4839	}
4840
4841	/* Assume link up if device can't report link status,
4842	   otherwise get link status from config. */
4843	netif_carrier_off(dev);
4844	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4845		schedule_work(&vi->config_work);
4846	} else {
4847		vi->status = VIRTIO_NET_S_LINK_UP;
4848		virtnet_update_settings(vi);
4849		netif_carrier_on(dev);
4850	}
4851
4852	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4853		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4854			set_bit(guest_offloads[i], &vi->guest_offloads);
4855	vi->guest_offloads_capable = vi->guest_offloads;
4856
 
 
 
 
 
 
 
 
4857	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4858		 dev->name, max_queue_pairs);
4859
4860	return 0;
4861
4862free_unregister_netdev:
4863	unregister_netdev(dev);
4864free_failover:
4865	net_failover_destroy(vi->failover);
4866free_vqs:
4867	virtio_reset_device(vdev);
4868	cancel_delayed_work_sync(&vi->refill);
4869	free_receive_page_frags(vi);
4870	virtnet_del_vqs(vi);
4871free:
4872	free_netdev(dev);
4873	return err;
4874}
4875
4876static void remove_vq_common(struct virtnet_info *vi)
4877{
 
 
4878	virtio_reset_device(vi->vdev);
4879
4880	/* Free unused buffers in both send and recv, if any. */
4881	free_unused_bufs(vi);
4882
 
 
 
 
 
 
 
4883	free_receive_bufs(vi);
4884
4885	free_receive_page_frags(vi);
4886
4887	virtnet_del_vqs(vi);
4888}
4889
4890static void virtnet_remove(struct virtio_device *vdev)
4891{
4892	struct virtnet_info *vi = vdev->priv;
4893
4894	virtnet_cpu_notif_remove(vi);
4895
4896	/* Make sure no work handler is accessing the device. */
4897	flush_work(&vi->config_work);
 
 
 
 
4898
4899	unregister_netdev(vi->dev);
4900
4901	net_failover_destroy(vi->failover);
4902
4903	remove_vq_common(vi);
4904
 
 
4905	free_netdev(vi->dev);
4906}
4907
4908static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4909{
4910	struct virtnet_info *vi = vdev->priv;
4911
4912	virtnet_cpu_notif_remove(vi);
4913	virtnet_freeze_down(vdev);
4914	remove_vq_common(vi);
4915
4916	return 0;
4917}
4918
4919static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4920{
4921	struct virtnet_info *vi = vdev->priv;
4922	int err;
4923
4924	err = virtnet_restore_up(vdev);
4925	if (err)
4926		return err;
4927	virtnet_set_queues(vi, vi->curr_queue_pairs);
4928
4929	err = virtnet_cpu_notif_add(vi);
4930	if (err) {
4931		virtnet_freeze_down(vdev);
4932		remove_vq_common(vi);
4933		return err;
4934	}
4935
4936	return 0;
4937}
4938
4939static struct virtio_device_id id_table[] = {
4940	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4941	{ 0 },
4942};
4943
4944#define VIRTNET_FEATURES \
4945	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4946	VIRTIO_NET_F_MAC, \
4947	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4948	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4949	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4950	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4951	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4952	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4953	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4954	VIRTIO_NET_F_CTRL_MAC_ADDR, \
4955	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4956	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4957	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4958	VIRTIO_NET_F_VQ_NOTF_COAL, \
4959	VIRTIO_NET_F_GUEST_HDRLEN
4960
4961static unsigned int features[] = {
4962	VIRTNET_FEATURES,
4963};
4964
4965static unsigned int features_legacy[] = {
4966	VIRTNET_FEATURES,
4967	VIRTIO_NET_F_GSO,
4968	VIRTIO_F_ANY_LAYOUT,
4969};
4970
4971static struct virtio_driver virtio_net_driver = {
4972	.feature_table = features,
4973	.feature_table_size = ARRAY_SIZE(features),
4974	.feature_table_legacy = features_legacy,
4975	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4976	.driver.name =	KBUILD_MODNAME,
4977	.driver.owner =	THIS_MODULE,
4978	.id_table =	id_table,
4979	.validate =	virtnet_validate,
4980	.probe =	virtnet_probe,
4981	.remove =	virtnet_remove,
4982	.config_changed = virtnet_config_changed,
4983#ifdef CONFIG_PM_SLEEP
4984	.freeze =	virtnet_freeze,
4985	.restore =	virtnet_restore,
4986#endif
4987};
4988
4989static __init int virtio_net_driver_init(void)
4990{
4991	int ret;
4992
4993	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4994				      virtnet_cpu_online,
4995				      virtnet_cpu_down_prep);
4996	if (ret < 0)
4997		goto out;
4998	virtionet_online = ret;
4999	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
5000				      NULL, virtnet_cpu_dead);
5001	if (ret)
5002		goto err_dead;
5003	ret = register_virtio_driver(&virtio_net_driver);
5004	if (ret)
5005		goto err_virtio;
5006	return 0;
5007err_virtio:
5008	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5009err_dead:
5010	cpuhp_remove_multi_state(virtionet_online);
5011out:
5012	return ret;
5013}
5014module_init(virtio_net_driver_init);
5015
5016static __exit void virtio_net_driver_exit(void)
5017{
5018	unregister_virtio_driver(&virtio_net_driver);
5019	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5020	cpuhp_remove_multi_state(virtionet_online);
5021}
5022module_exit(virtio_net_driver_exit);
5023
5024MODULE_DEVICE_TABLE(virtio, id_table);
5025MODULE_DESCRIPTION("Virtio network driver");
5026MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* A network driver using virtio.
   3 *
   4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
   5 */
   6//#define DEBUG
   7#include <linux/netdevice.h>
   8#include <linux/etherdevice.h>
   9#include <linux/ethtool.h>
  10#include <linux/module.h>
  11#include <linux/virtio.h>
  12#include <linux/virtio_net.h>
  13#include <linux/bpf.h>
  14#include <linux/bpf_trace.h>
  15#include <linux/scatterlist.h>
  16#include <linux/if_vlan.h>
  17#include <linux/slab.h>
  18#include <linux/cpu.h>
  19#include <linux/average.h>
  20#include <linux/filter.h>
  21#include <linux/kernel.h>
  22#include <linux/dim.h>
  23#include <net/route.h>
  24#include <net/xdp.h>
  25#include <net/net_failover.h>
  26#include <net/netdev_rx_queue.h>
  27#include <net/netdev_queues.h>
  28#include <net/xdp_sock_drv.h>
  29
  30static int napi_weight = NAPI_POLL_WEIGHT;
  31module_param(napi_weight, int, 0444);
  32
  33static bool csum = true, gso = true, napi_tx = true;
  34module_param(csum, bool, 0444);
  35module_param(gso, bool, 0444);
  36module_param(napi_tx, bool, 0644);
  37
  38/* FIXME: MTU in config. */
  39#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
  40#define GOOD_COPY_LEN	128
  41
  42#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  43
 
 
 
  44/* Separating two types of XDP xmit */
  45#define VIRTIO_XDP_TX		BIT(0)
  46#define VIRTIO_XDP_REDIR	BIT(1)
  47
 
 
  48/* RX packet size EWMA. The average packet size is used to determine the packet
  49 * buffer size when refilling RX rings. As the entire RX ring may be refilled
  50 * at once, the weight is chosen so that the EWMA will be insensitive to short-
  51 * term, transient changes in packet size.
  52 */
  53DECLARE_EWMA(pkt_len, 0, 64)
  54
  55#define VIRTNET_DRIVER_VERSION "1.0.0"
  56
  57static const unsigned long guest_offloads[] = {
  58	VIRTIO_NET_F_GUEST_TSO4,
  59	VIRTIO_NET_F_GUEST_TSO6,
  60	VIRTIO_NET_F_GUEST_ECN,
  61	VIRTIO_NET_F_GUEST_UFO,
  62	VIRTIO_NET_F_GUEST_CSUM,
  63	VIRTIO_NET_F_GUEST_USO4,
  64	VIRTIO_NET_F_GUEST_USO6,
  65	VIRTIO_NET_F_GUEST_HDRLEN
  66};
  67
  68#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
  69				(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
  70				(1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
  71				(1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
  72				(1ULL << VIRTIO_NET_F_GUEST_USO4) | \
  73				(1ULL << VIRTIO_NET_F_GUEST_USO6))
  74
  75struct virtnet_stat_desc {
  76	char desc[ETH_GSTRING_LEN];
  77	size_t offset;
  78	size_t qstat_offset;
  79};
  80
  81struct virtnet_sq_free_stats {
  82	u64 packets;
  83	u64 bytes;
  84	u64 napi_packets;
  85	u64 napi_bytes;
  86	u64 xsk;
  87};
  88
  89struct virtnet_sq_stats {
  90	struct u64_stats_sync syncp;
  91	u64_stats_t packets;
  92	u64_stats_t bytes;
  93	u64_stats_t xdp_tx;
  94	u64_stats_t xdp_tx_drops;
  95	u64_stats_t kicks;
  96	u64_stats_t tx_timeouts;
  97	u64_stats_t stop;
  98	u64_stats_t wake;
  99};
 100
 101struct virtnet_rq_stats {
 102	struct u64_stats_sync syncp;
 103	u64_stats_t packets;
 104	u64_stats_t bytes;
 105	u64_stats_t drops;
 106	u64_stats_t xdp_packets;
 107	u64_stats_t xdp_tx;
 108	u64_stats_t xdp_redirects;
 109	u64_stats_t xdp_drops;
 110	u64_stats_t kicks;
 111};
 112
 113#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
 114#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
 115
 116#define VIRTNET_SQ_STAT_QSTAT(name, m)				\
 117	{							\
 118		name,						\
 119		offsetof(struct virtnet_sq_stats, m),		\
 120		offsetof(struct netdev_queue_stats_tx, m),	\
 121	}
 122
 123#define VIRTNET_RQ_STAT_QSTAT(name, m)				\
 124	{							\
 125		name,						\
 126		offsetof(struct virtnet_rq_stats, m),		\
 127		offsetof(struct netdev_queue_stats_rx, m),	\
 128	}
 129
 130static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
 131	VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
 132	VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
 133	VIRTNET_SQ_STAT("kicks",        kicks),
 134	VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),
 
 
 135};
 136
 137static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
 138	VIRTNET_RQ_STAT("drops",         drops),
 139	VIRTNET_RQ_STAT("xdp_packets",   xdp_packets),
 140	VIRTNET_RQ_STAT("xdp_tx",        xdp_tx),
 141	VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
 142	VIRTNET_RQ_STAT("xdp_drops",     xdp_drops),
 143	VIRTNET_RQ_STAT("kicks",         kicks),
 144};
 145
 146static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
 147	VIRTNET_SQ_STAT_QSTAT("packets", packets),
 148	VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
 149	VIRTNET_SQ_STAT_QSTAT("stop",	 stop),
 150	VIRTNET_SQ_STAT_QSTAT("wake",	 wake),
 151};
 152
 153static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
 154	VIRTNET_RQ_STAT_QSTAT("packets", packets),
 155	VIRTNET_RQ_STAT_QSTAT("bytes",   bytes),
 156};
 157
 158#define VIRTNET_STATS_DESC_CQ(name) \
 159	{#name, offsetof(struct virtio_net_stats_cvq, name), -1}
 160
 161#define VIRTNET_STATS_DESC_RX(class, name) \
 162	{#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
 163
 164#define VIRTNET_STATS_DESC_TX(class, name) \
 165	{#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
 166
 167
 168static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
 169	VIRTNET_STATS_DESC_CQ(command_num),
 170	VIRTNET_STATS_DESC_CQ(ok_num),
 171};
 172
 173static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
 174	VIRTNET_STATS_DESC_RX(basic, packets),
 175	VIRTNET_STATS_DESC_RX(basic, bytes),
 176
 177	VIRTNET_STATS_DESC_RX(basic, notifications),
 178	VIRTNET_STATS_DESC_RX(basic, interrupts),
 179};
 180
 181static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
 182	VIRTNET_STATS_DESC_TX(basic, packets),
 183	VIRTNET_STATS_DESC_TX(basic, bytes),
 184
 185	VIRTNET_STATS_DESC_TX(basic, notifications),
 186	VIRTNET_STATS_DESC_TX(basic, interrupts),
 187};
 188
 189static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
 190	VIRTNET_STATS_DESC_RX(csum, needs_csum),
 191};
 192
 193static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
 194	VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
 195	VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
 196};
 197
 198static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
 199	VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
 200};
 201
 202static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
 203	VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
 204};
 205
 206#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field)			\
 207	{									\
 208		#name,								\
 209		offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name),	\
 210		offsetof(struct netdev_queue_stats_rx, qstat_field),		\
 211	}
 212
 213#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field)			\
 214	{									\
 215		#name,								\
 216		offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name),	\
 217		offsetof(struct netdev_queue_stats_tx, qstat_field),		\
 218	}
 219
 220static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
 221	VIRTNET_STATS_DESC_RX_QSTAT(basic, drops,         hw_drops),
 222	VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
 223};
 224
 225static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
 226	VIRTNET_STATS_DESC_TX_QSTAT(basic, drops,          hw_drops),
 227	VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
 228};
 229
 230static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
 231	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
 232	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none,  csum_none),
 233	VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad,   csum_bad),
 234};
 235
 236static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
 237	VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none,  csum_none),
 238	VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
 239};
 240
 241static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
 242	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets,           hw_gro_packets),
 243	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes,             hw_gro_bytes),
 244	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
 245	VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced,   hw_gro_wire_bytes),
 246};
 247
 248static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
 249	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets,        hw_gso_packets),
 250	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes,          hw_gso_bytes),
 251	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments,       hw_gso_wire_packets),
 252	VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
 253};
 254
 255static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
 256	VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
 257};
 258
 259static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
 260	VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
 261};
 262
 263#define VIRTNET_Q_TYPE_RX 0
 264#define VIRTNET_Q_TYPE_TX 1
 265#define VIRTNET_Q_TYPE_CQ 2
 266
 267struct virtnet_interrupt_coalesce {
 268	u32 max_packets;
 269	u32 max_usecs;
 270};
 271
 272/* The dma information of pages allocated at a time. */
 273struct virtnet_rq_dma {
 274	dma_addr_t addr;
 275	u32 ref;
 276	u16 len;
 277	u16 need_sync;
 278};
 279
 280/* Internal representation of a send virtqueue */
 281struct send_queue {
 282	/* Virtqueue associated with this send _queue */
 283	struct virtqueue *vq;
 284
 285	/* TX: fragments + linear part + virtio header */
 286	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 287
 288	/* Name of the send queue: output.$index */
 289	char name[16];
 290
 291	struct virtnet_sq_stats stats;
 292
 293	struct virtnet_interrupt_coalesce intr_coal;
 294
 295	struct napi_struct napi;
 296
 297	/* Record whether sq is in reset state. */
 298	bool reset;
 299
 300	struct xsk_buff_pool *xsk_pool;
 301
 302	dma_addr_t xsk_hdr_dma_addr;
 303};
 304
 305/* Internal representation of a receive virtqueue */
 306struct receive_queue {
 307	/* Virtqueue associated with this receive_queue */
 308	struct virtqueue *vq;
 309
 310	struct napi_struct napi;
 311
 312	struct bpf_prog __rcu *xdp_prog;
 313
 314	struct virtnet_rq_stats stats;
 315
 316	/* The number of rx notifications */
 317	u16 calls;
 318
 319	/* Is dynamic interrupt moderation enabled? */
 320	bool dim_enabled;
 321
 322	/* Used to protect dim_enabled and inter_coal */
 323	struct mutex dim_lock;
 324
 325	/* Dynamic Interrupt Moderation */
 326	struct dim dim;
 327
 328	u32 packets_in_napi;
 329
 330	struct virtnet_interrupt_coalesce intr_coal;
 331
 332	/* Chain pages by the private ptr. */
 333	struct page *pages;
 334
 335	/* Average packet length for mergeable receive buffers. */
 336	struct ewma_pkt_len mrg_avg_pkt_len;
 337
 338	/* Page frag for packet buffer allocation. */
 339	struct page_frag alloc_frag;
 340
 341	/* RX: fragments + linear part + virtio header */
 342	struct scatterlist sg[MAX_SKB_FRAGS + 2];
 343
 344	/* Min single buffer size for mergeable buffers case. */
 345	unsigned int min_buf_len;
 346
 347	/* Name of this receive queue: input.$index */
 348	char name[16];
 349
 350	struct xdp_rxq_info xdp_rxq;
 351
 352	/* Record the last dma info to free after new pages is allocated. */
 353	struct virtnet_rq_dma *last_dma;
 354
 355	struct xsk_buff_pool *xsk_pool;
 356
 357	/* xdp rxq used by xsk */
 358	struct xdp_rxq_info xsk_rxq_info;
 359
 360	struct xdp_buff **xsk_buffs;
 361};
 362
 363/* This structure can contain rss message with maximum settings for indirection table and keysize
 364 * Note, that default structure that describes RSS configuration virtio_net_rss_config
 365 * contains same info but can't handle table values.
 366 * In any case, structure would be passed to virtio hw through sg_buf split by parts
 367 * because table sizes may be differ according to the device configuration.
 368 */
 369#define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
 
 370struct virtio_net_ctrl_rss {
 371	u32 hash_types;
 372	u16 indirection_table_mask;
 373	u16 unclassified_queue;
 374	u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
 375	u16 max_tx_vq;
 376	u8 hash_key_length;
 377	u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
 378
 379	u16 *indirection_table;
 380};
 381
 382/* Control VQ buffers: protected by the rtnl lock */
 383struct control_buf {
 384	struct virtio_net_ctrl_hdr hdr;
 385	virtio_net_ctrl_ack status;
 
 
 
 
 
 
 
 
 
 386};
 387
 388struct virtnet_info {
 389	struct virtio_device *vdev;
 390	struct virtqueue *cvq;
 391	struct net_device *dev;
 392	struct send_queue *sq;
 393	struct receive_queue *rq;
 394	unsigned int status;
 395
 396	/* Max # of queue pairs supported by the device */
 397	u16 max_queue_pairs;
 398
 399	/* # of queue pairs currently used by the driver */
 400	u16 curr_queue_pairs;
 401
 402	/* # of XDP queue pairs currently used by the driver */
 403	u16 xdp_queue_pairs;
 404
 405	/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
 406	bool xdp_enabled;
 407
 408	/* I like... big packets and I cannot lie! */
 409	bool big_packets;
 410
 411	/* number of sg entries allocated for big packets */
 412	unsigned int big_packets_num_skbfrags;
 413
 414	/* Host will merge rx buffers for big packets (shake it! shake it!) */
 415	bool mergeable_rx_bufs;
 416
 417	/* Host supports rss and/or hash report */
 418	bool has_rss;
 419	bool has_rss_hash_report;
 420	u8 rss_key_size;
 421	u16 rss_indir_table_size;
 422	u32 rss_hash_types_supported;
 423	u32 rss_hash_types_saved;
 424	struct virtio_net_ctrl_rss rss;
 425
 426	/* Has control virtqueue */
 427	bool has_cvq;
 428
 429	/* Lock to protect the control VQ */
 430	struct mutex cvq_lock;
 431
 432	/* Host can handle any s/g split between our header and packet data */
 433	bool any_header_sg;
 434
 435	/* Packet virtio header size */
 436	u8 hdr_len;
 437
 438	/* Work struct for delayed refilling if we run low on memory. */
 439	struct delayed_work refill;
 440
 441	/* Is delayed refill enabled? */
 442	bool refill_enabled;
 443
 444	/* The lock to synchronize the access to refill_enabled */
 445	spinlock_t refill_lock;
 446
 447	/* Work struct for config space updates */
 448	struct work_struct config_work;
 449
 450	/* Work struct for setting rx mode */
 451	struct work_struct rx_mode_work;
 452
 453	/* OK to queue work setting RX mode? */
 454	bool rx_mode_work_enabled;
 455
 456	/* Does the affinity hint is set for virtqueues? */
 457	bool affinity_hint_set;
 458
 459	/* CPU hotplug instances for online & dead */
 460	struct hlist_node node;
 461	struct hlist_node node_dead;
 462
 463	struct control_buf *ctrl;
 464
 465	/* Ethtool settings */
 466	u8 duplex;
 467	u32 speed;
 468
 469	/* Is rx dynamic interrupt moderation enabled? */
 470	bool rx_dim_enabled;
 471
 472	/* Interrupt coalescing settings */
 473	struct virtnet_interrupt_coalesce intr_coal_tx;
 474	struct virtnet_interrupt_coalesce intr_coal_rx;
 475
 476	unsigned long guest_offloads;
 477	unsigned long guest_offloads_capable;
 478
 479	/* failover when STANDBY feature enabled */
 480	struct failover *failover;
 481
 482	u64 device_stats_cap;
 483};
 484
 485struct padded_vnet_hdr {
 486	struct virtio_net_hdr_v1_hash hdr;
 487	/*
 488	 * hdr is in a separate sg buffer, and data sg buffer shares same page
 489	 * with this header sg. This padding makes next sg 16 byte aligned
 490	 * after the header.
 491	 */
 492	char padding[12];
 493};
 494
 495struct virtio_net_common_hdr {
 496	union {
 497		struct virtio_net_hdr hdr;
 498		struct virtio_net_hdr_mrg_rxbuf	mrg_hdr;
 499		struct virtio_net_hdr_v1_hash hash_v1_hdr;
 500	};
 501};
 502
 503static struct virtio_net_common_hdr xsk_hdr;
 504
 505static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
 506static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
 507static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
 508			       struct net_device *dev,
 509			       unsigned int *xdp_xmit,
 510			       struct virtnet_rq_stats *stats);
 511static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
 512				 struct sk_buff *skb, u8 flags);
 513static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
 514					       struct sk_buff *curr_skb,
 515					       struct page *page, void *buf,
 516					       int len, int truesize);
 517static void virtnet_xsk_completed(struct send_queue *sq, int num);
 518
 519enum virtnet_xmit_type {
 520	VIRTNET_XMIT_TYPE_SKB,
 521	VIRTNET_XMIT_TYPE_SKB_ORPHAN,
 522	VIRTNET_XMIT_TYPE_XDP,
 523	VIRTNET_XMIT_TYPE_XSK,
 524};
 525
 526static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
 527{
 528	if (!indir_table_size) {
 529		rss->indirection_table = NULL;
 530		return 0;
 531	}
 532
 533	rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
 534	if (!rss->indirection_table)
 535		return -ENOMEM;
 536
 537	return 0;
 538}
 539
 540static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
 541{
 542	kfree(rss->indirection_table);
 543}
 544
 545/* We use the last two bits of the pointer to distinguish the xmit type. */
 546#define VIRTNET_XMIT_TYPE_MASK (BIT(0) | BIT(1))
 547
 548#define VIRTIO_XSK_FLAG_OFFSET 2
 549
 550static enum virtnet_xmit_type virtnet_xmit_ptr_unpack(void **ptr)
 551{
 552	unsigned long p = (unsigned long)*ptr;
 553
 554	*ptr = (void *)(p & ~VIRTNET_XMIT_TYPE_MASK);
 555
 556	return p & VIRTNET_XMIT_TYPE_MASK;
 557}
 558
 559static void *virtnet_xmit_ptr_pack(void *ptr, enum virtnet_xmit_type type)
 560{
 561	return (void *)((unsigned long)ptr | type);
 562}
 563
 564static int virtnet_add_outbuf(struct send_queue *sq, int num, void *data,
 565			      enum virtnet_xmit_type type)
 566{
 567	return virtqueue_add_outbuf(sq->vq, sq->sg, num,
 568				    virtnet_xmit_ptr_pack(data, type),
 569				    GFP_ATOMIC);
 570}
 571
 572static u32 virtnet_ptr_to_xsk_buff_len(void *ptr)
 573{
 574	return ((unsigned long)ptr) >> VIRTIO_XSK_FLAG_OFFSET;
 575}
 576
 577static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
 578{
 579	sg_dma_address(sg) = addr;
 580	sg_dma_len(sg) = len;
 581}
 582
 583static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
 584			    bool in_napi, struct virtnet_sq_free_stats *stats)
 585{
 586	struct xdp_frame *frame;
 587	struct sk_buff *skb;
 588	unsigned int len;
 589	void *ptr;
 590
 591	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 592		switch (virtnet_xmit_ptr_unpack(&ptr)) {
 593		case VIRTNET_XMIT_TYPE_SKB:
 594			skb = ptr;
 595
 596			pr_debug("Sent skb %p\n", skb);
 597			stats->napi_packets++;
 598			stats->napi_bytes += skb->len;
 599			napi_consume_skb(skb, in_napi);
 600			break;
 601
 602		case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
 603			skb = ptr;
 604
 605			stats->packets++;
 606			stats->bytes += skb->len;
 607			napi_consume_skb(skb, in_napi);
 608			break;
 609
 610		case VIRTNET_XMIT_TYPE_XDP:
 611			frame = ptr;
 612
 613			stats->packets++;
 614			stats->bytes += xdp_get_frame_len(frame);
 615			xdp_return_frame(frame);
 616			break;
 617
 618		case VIRTNET_XMIT_TYPE_XSK:
 619			stats->bytes += virtnet_ptr_to_xsk_buff_len(ptr);
 620			stats->xsk++;
 621			break;
 622		}
 623	}
 624	netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
 625}
 626
 627static void virtnet_free_old_xmit(struct send_queue *sq,
 628				  struct netdev_queue *txq,
 629				  bool in_napi,
 630				  struct virtnet_sq_free_stats *stats)
 631{
 632	__free_old_xmit(sq, txq, in_napi, stats);
 633
 634	if (stats->xsk)
 635		virtnet_xsk_completed(sq, stats->xsk);
 636}
 637
 638/* Converting between virtqueue no. and kernel tx/rx queue no.
 639 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
 640 */
 641static int vq2txq(struct virtqueue *vq)
 642{
 643	return (vq->index - 1) / 2;
 644}
 645
 646static int txq2vq(int txq)
 647{
 648	return txq * 2 + 1;
 649}
 650
 651static int vq2rxq(struct virtqueue *vq)
 652{
 653	return vq->index / 2;
 654}
 655
 656static int rxq2vq(int rxq)
 657{
 658	return rxq * 2;
 659}
 660
 661static int vq_type(struct virtnet_info *vi, int qid)
 662{
 663	if (qid == vi->max_queue_pairs * 2)
 664		return VIRTNET_Q_TYPE_CQ;
 665
 666	if (qid % 2)
 667		return VIRTNET_Q_TYPE_TX;
 668
 669	return VIRTNET_Q_TYPE_RX;
 670}
 671
 672static inline struct virtio_net_common_hdr *
 673skb_vnet_common_hdr(struct sk_buff *skb)
 674{
 675	return (struct virtio_net_common_hdr *)skb->cb;
 676}
 677
 678/*
 679 * private is used to chain pages for big packets, put the whole
 680 * most recent used list in the beginning for reuse
 681 */
 682static void give_pages(struct receive_queue *rq, struct page *page)
 683{
 684	struct page *end;
 685
 686	/* Find end of list, sew whole thing into vi->rq.pages. */
 687	for (end = page; end->private; end = (struct page *)end->private);
 688	end->private = (unsigned long)rq->pages;
 689	rq->pages = page;
 690}
 691
 692static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
 693{
 694	struct page *p = rq->pages;
 695
 696	if (p) {
 697		rq->pages = (struct page *)p->private;
 698		/* clear private here, it is used to chain pages */
 699		p->private = 0;
 700	} else
 701		p = alloc_page(gfp_mask);
 702	return p;
 703}
 704
 705static void virtnet_rq_free_buf(struct virtnet_info *vi,
 706				struct receive_queue *rq, void *buf)
 707{
 708	if (vi->mergeable_rx_bufs)
 709		put_page(virt_to_head_page(buf));
 710	else if (vi->big_packets)
 711		give_pages(rq, buf);
 712	else
 713		put_page(virt_to_head_page(buf));
 714}
 715
 716static void enable_delayed_refill(struct virtnet_info *vi)
 717{
 718	spin_lock_bh(&vi->refill_lock);
 719	vi->refill_enabled = true;
 720	spin_unlock_bh(&vi->refill_lock);
 721}
 722
 723static void disable_delayed_refill(struct virtnet_info *vi)
 724{
 725	spin_lock_bh(&vi->refill_lock);
 726	vi->refill_enabled = false;
 727	spin_unlock_bh(&vi->refill_lock);
 728}
 729
 730static void enable_rx_mode_work(struct virtnet_info *vi)
 731{
 732	rtnl_lock();
 733	vi->rx_mode_work_enabled = true;
 734	rtnl_unlock();
 735}
 736
 737static void disable_rx_mode_work(struct virtnet_info *vi)
 738{
 739	rtnl_lock();
 740	vi->rx_mode_work_enabled = false;
 741	rtnl_unlock();
 742}
 743
 744static void virtqueue_napi_schedule(struct napi_struct *napi,
 745				    struct virtqueue *vq)
 746{
 747	if (napi_schedule_prep(napi)) {
 748		virtqueue_disable_cb(vq);
 749		__napi_schedule(napi);
 750	}
 751}
 752
 753static bool virtqueue_napi_complete(struct napi_struct *napi,
 754				    struct virtqueue *vq, int processed)
 755{
 756	int opaque;
 757
 758	opaque = virtqueue_enable_cb_prepare(vq);
 759	if (napi_complete_done(napi, processed)) {
 760		if (unlikely(virtqueue_poll(vq, opaque)))
 761			virtqueue_napi_schedule(napi, vq);
 762		else
 763			return true;
 764	} else {
 765		virtqueue_disable_cb(vq);
 766	}
 767
 768	return false;
 769}
 770
 771static void skb_xmit_done(struct virtqueue *vq)
 772{
 773	struct virtnet_info *vi = vq->vdev->priv;
 774	struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
 775
 776	/* Suppress further interrupts. */
 777	virtqueue_disable_cb(vq);
 778
 779	if (napi->weight)
 780		virtqueue_napi_schedule(napi, vq);
 781	else
 782		/* We were probably waiting for more output buffers. */
 783		netif_wake_subqueue(vi->dev, vq2txq(vq));
 784}
 785
 786#define MRG_CTX_HEADER_SHIFT 22
 787static void *mergeable_len_to_ctx(unsigned int truesize,
 788				  unsigned int headroom)
 789{
 790	return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
 791}
 792
 793static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
 794{
 795	return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
 796}
 797
 798static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 799{
 800	return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
 801}
 802
 803static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
 804					 unsigned int headroom,
 805					 unsigned int len)
 806{
 807	struct sk_buff *skb;
 808
 809	skb = build_skb(buf, buflen);
 810	if (unlikely(!skb))
 811		return NULL;
 812
 813	skb_reserve(skb, headroom);
 814	skb_put(skb, len);
 815
 816	return skb;
 817}
 818
 819/* Called from bottom half context */
 820static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 821				   struct receive_queue *rq,
 822				   struct page *page, unsigned int offset,
 823				   unsigned int len, unsigned int truesize,
 824				   unsigned int headroom)
 825{
 826	struct sk_buff *skb;
 827	struct virtio_net_common_hdr *hdr;
 828	unsigned int copy, hdr_len, hdr_padded_len;
 829	struct page *page_to_free = NULL;
 830	int tailroom, shinfo_size;
 831	char *p, *hdr_p, *buf;
 832
 833	p = page_address(page) + offset;
 834	hdr_p = p;
 835
 836	hdr_len = vi->hdr_len;
 837	if (vi->mergeable_rx_bufs)
 838		hdr_padded_len = hdr_len;
 839	else
 840		hdr_padded_len = sizeof(struct padded_vnet_hdr);
 841
 842	buf = p - headroom;
 843	len -= hdr_len;
 844	offset += hdr_padded_len;
 845	p += hdr_padded_len;
 846	tailroom = truesize - headroom  - hdr_padded_len - len;
 847
 848	shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 849
 
 850	if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
 851		skb = virtnet_build_skb(buf, truesize, p - buf, len);
 852		if (unlikely(!skb))
 853			return NULL;
 854
 855		page = (struct page *)page->private;
 856		if (page)
 857			give_pages(rq, page);
 858		goto ok;
 859	}
 860
 861	/* copy small packet so we can reuse these pages for small data */
 862	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
 863	if (unlikely(!skb))
 864		return NULL;
 865
 866	/* Copy all frame if it fits skb->head, otherwise
 867	 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
 868	 */
 869	if (len <= skb_tailroom(skb))
 870		copy = len;
 871	else
 872		copy = ETH_HLEN;
 873	skb_put_data(skb, p, copy);
 874
 875	len -= copy;
 876	offset += copy;
 877
 878	if (vi->mergeable_rx_bufs) {
 879		if (len)
 880			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
 881		else
 882			page_to_free = page;
 883		goto ok;
 884	}
 885
 886	/*
 887	 * Verify that we can indeed put this data into a skb.
 888	 * This is here to handle cases when the device erroneously
 889	 * tries to receive more than is possible. This is usually
 890	 * the case of a broken device.
 891	 */
 892	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
 893		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
 894		dev_kfree_skb(skb);
 895		return NULL;
 896	}
 897	BUG_ON(offset >= PAGE_SIZE);
 898	while (len) {
 899		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
 900		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
 901				frag_size, truesize);
 902		len -= frag_size;
 903		page = (struct page *)page->private;
 904		offset = 0;
 905	}
 906
 907	if (page)
 908		give_pages(rq, page);
 909
 910ok:
 911	hdr = skb_vnet_common_hdr(skb);
 912	memcpy(hdr, hdr_p, hdr_len);
 913	if (page_to_free)
 914		put_page(page_to_free);
 915
 916	return skb;
 917}
 918
 919static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
 920{
 921	struct virtnet_info *vi = rq->vq->vdev->priv;
 922	struct page *page = virt_to_head_page(buf);
 923	struct virtnet_rq_dma *dma;
 924	void *head;
 925	int offset;
 926
 927	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
 928
 929	head = page_address(page);
 930
 931	dma = head;
 932
 933	--dma->ref;
 934
 935	if (dma->need_sync && len) {
 936		offset = buf - (head + sizeof(*dma));
 937
 938		virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
 939							offset, len,
 940							DMA_FROM_DEVICE);
 941	}
 942
 943	if (dma->ref)
 944		return;
 945
 946	virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
 947					 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 948	put_page(page);
 949}
 950
 951static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
 952{
 953	struct virtnet_info *vi = rq->vq->vdev->priv;
 954	void *buf;
 955
 956	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
 957
 958	buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
 959	if (buf)
 960		virtnet_rq_unmap(rq, buf, *len);
 961
 962	return buf;
 963}
 964
 965static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
 966{
 967	struct virtnet_info *vi = rq->vq->vdev->priv;
 968	struct virtnet_rq_dma *dma;
 969	dma_addr_t addr;
 970	u32 offset;
 971	void *head;
 972
 973	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
 
 
 
 974
 975	head = page_address(rq->alloc_frag.page);
 976
 977	offset = buf - head;
 978
 979	dma = head;
 980
 981	addr = dma->addr - sizeof(*dma) + offset;
 982
 983	sg_init_table(rq->sg, 1);
 984	sg_fill_dma(rq->sg, addr, len);
 
 985}
 986
 987static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
 988{
 989	struct page_frag *alloc_frag = &rq->alloc_frag;
 990	struct virtnet_info *vi = rq->vq->vdev->priv;
 991	struct virtnet_rq_dma *dma;
 992	void *buf, *head;
 993	dma_addr_t addr;
 994
 995	BUG_ON(vi->big_packets && !vi->mergeable_rx_bufs);
 
 996
 997	head = page_address(alloc_frag->page);
 998
 999	dma = head;
 
1000
1001	/* new pages */
1002	if (!alloc_frag->offset) {
1003		if (rq->last_dma) {
1004			/* Now, the new page is allocated, the last dma
1005			 * will not be used. So the dma can be unmapped
1006			 * if the ref is 0.
1007			 */
1008			virtnet_rq_unmap(rq, rq->last_dma, 0);
1009			rq->last_dma = NULL;
1010		}
1011
1012		dma->len = alloc_frag->size - sizeof(*dma);
1013
1014		addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
1015						      dma->len, DMA_FROM_DEVICE, 0);
1016		if (virtqueue_dma_mapping_error(rq->vq, addr))
1017			return NULL;
 
 
 
 
 
 
 
 
 
 
 
1018
1019		dma->addr = addr;
1020		dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
1021
1022		/* Add a reference to dma to prevent the entire dma from
1023		 * being released during error handling. This reference
1024		 * will be freed after the pages are no longer used.
1025		 */
1026		get_page(alloc_frag->page);
1027		dma->ref = 1;
1028		alloc_frag->offset = sizeof(*dma);
1029
1030		rq->last_dma = dma;
1031	}
1032
1033	++dma->ref;
1034
1035	buf = head + alloc_frag->offset;
1036
1037	get_page(alloc_frag->page);
1038	alloc_frag->offset += size;
1039
1040	return buf;
1041}
1042
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
1044{
1045	struct virtnet_info *vi = vq->vdev->priv;
1046	struct receive_queue *rq;
1047	int i = vq2rxq(vq);
1048
1049	rq = &vi->rq[i];
1050
1051	if (rq->xsk_pool) {
1052		xsk_buff_free((struct xdp_buff *)buf);
1053		return;
1054	}
1055
1056	if (!vi->big_packets || vi->mergeable_rx_bufs)
1057		virtnet_rq_unmap(rq, buf, 0);
1058
1059	virtnet_rq_free_buf(vi, rq, buf);
1060}
1061
1062static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
1063			  bool in_napi)
1064{
1065	struct virtnet_sq_free_stats stats = {0};
 
 
 
 
 
 
 
 
 
1066
1067	virtnet_free_old_xmit(sq, txq, in_napi, &stats);
 
 
 
 
 
 
 
 
 
1068
1069	/* Avoid overhead when no packets have been processed
1070	 * happens when called speculatively from start_xmit.
1071	 */
1072	if (!stats.packets && !stats.napi_packets)
1073		return;
1074
1075	u64_stats_update_begin(&sq->stats.syncp);
1076	u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
1077	u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
1078	u64_stats_update_end(&sq->stats.syncp);
1079}
1080
1081static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1082{
1083	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1084		return false;
1085	else if (q < vi->curr_queue_pairs)
1086		return true;
1087	else
1088		return false;
1089}
1090
1091static void check_sq_full_and_disable(struct virtnet_info *vi,
1092				      struct net_device *dev,
1093				      struct send_queue *sq)
1094{
1095	bool use_napi = sq->napi.weight;
1096	int qnum;
1097
1098	qnum = sq - vi->sq;
1099
1100	/* If running out of space, stop queue to avoid getting packets that we
1101	 * are then unable to transmit.
1102	 * An alternative would be to force queuing layer to requeue the skb by
1103	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1104	 * returned in a normal path of operation: it means that driver is not
1105	 * maintaining the TX queue stop/start state properly, and causes
1106	 * the stack to do a non-trivial amount of useless work.
1107	 * Since most packets only take 1 or 2 ring slots, stopping the queue
1108	 * early means 16 slots are typically wasted.
1109	 */
1110	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1111		struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1112
1113		netif_tx_stop_queue(txq);
1114		u64_stats_update_begin(&sq->stats.syncp);
1115		u64_stats_inc(&sq->stats.stop);
1116		u64_stats_update_end(&sq->stats.syncp);
1117		if (use_napi) {
1118			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1119				virtqueue_napi_schedule(&sq->napi, sq->vq);
1120		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1121			/* More just got used, free them then recheck. */
1122			free_old_xmit(sq, txq, false);
1123			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1124				netif_start_subqueue(dev, qnum);
1125				u64_stats_update_begin(&sq->stats.syncp);
1126				u64_stats_inc(&sq->stats.wake);
1127				u64_stats_update_end(&sq->stats.syncp);
1128				virtqueue_disable_cb(sq->vq);
1129			}
1130		}
1131	}
1132}
1133
1134static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
1135				   struct receive_queue *rq, void *buf, u32 len)
1136{
1137	struct xdp_buff *xdp;
1138	u32 bufsize;
1139
1140	xdp = (struct xdp_buff *)buf;
1141
1142	bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
1143
1144	if (unlikely(len > bufsize)) {
1145		pr_debug("%s: rx error: len %u exceeds truesize %u\n",
1146			 vi->dev->name, len, bufsize);
1147		DEV_STATS_INC(vi->dev, rx_length_errors);
1148		xsk_buff_free(xdp);
1149		return NULL;
1150	}
1151
1152	xsk_buff_set_size(xdp, len);
1153	xsk_buff_dma_sync_for_cpu(xdp);
1154
1155	return xdp;
1156}
1157
1158static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
1159					 struct xdp_buff *xdp)
1160{
1161	unsigned int metasize = xdp->data - xdp->data_meta;
1162	struct sk_buff *skb;
1163	unsigned int size;
1164
1165	size = xdp->data_end - xdp->data_hard_start;
1166	skb = napi_alloc_skb(&rq->napi, size);
1167	if (unlikely(!skb)) {
1168		xsk_buff_free(xdp);
1169		return NULL;
1170	}
1171
1172	skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
1173
1174	size = xdp->data_end - xdp->data_meta;
1175	memcpy(__skb_put(skb, size), xdp->data_meta, size);
1176
1177	if (metasize) {
1178		__skb_pull(skb, metasize);
1179		skb_metadata_set(skb, metasize);
1180	}
1181
1182	xsk_buff_free(xdp);
1183
1184	return skb;
1185}
1186
1187static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
1188						 struct receive_queue *rq, struct xdp_buff *xdp,
1189						 unsigned int *xdp_xmit,
1190						 struct virtnet_rq_stats *stats)
1191{
1192	struct bpf_prog *prog;
1193	u32 ret;
1194
1195	ret = XDP_PASS;
1196	rcu_read_lock();
1197	prog = rcu_dereference(rq->xdp_prog);
1198	if (prog)
1199		ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1200	rcu_read_unlock();
1201
1202	switch (ret) {
1203	case XDP_PASS:
1204		return xsk_construct_skb(rq, xdp);
1205
1206	case XDP_TX:
1207	case XDP_REDIRECT:
1208		return NULL;
1209
1210	default:
1211		/* drop packet */
1212		xsk_buff_free(xdp);
1213		u64_stats_inc(&stats->drops);
1214		return NULL;
1215	}
1216}
1217
1218static void xsk_drop_follow_bufs(struct net_device *dev,
1219				 struct receive_queue *rq,
1220				 u32 num_buf,
1221				 struct virtnet_rq_stats *stats)
1222{
1223	struct xdp_buff *xdp;
1224	u32 len;
1225
1226	while (num_buf-- > 1) {
1227		xdp = virtqueue_get_buf(rq->vq, &len);
1228		if (unlikely(!xdp)) {
1229			pr_debug("%s: rx error: %d buffers missing\n",
1230				 dev->name, num_buf);
1231			DEV_STATS_INC(dev, rx_length_errors);
1232			break;
1233		}
1234		u64_stats_add(&stats->bytes, len);
1235		xsk_buff_free(xdp);
1236	}
1237}
1238
1239static int xsk_append_merge_buffer(struct virtnet_info *vi,
1240				   struct receive_queue *rq,
1241				   struct sk_buff *head_skb,
1242				   u32 num_buf,
1243				   struct virtio_net_hdr_mrg_rxbuf *hdr,
1244				   struct virtnet_rq_stats *stats)
1245{
1246	struct sk_buff *curr_skb;
1247	struct xdp_buff *xdp;
1248	u32 len, truesize;
1249	struct page *page;
1250	void *buf;
1251
1252	curr_skb = head_skb;
1253
1254	while (--num_buf) {
1255		buf = virtqueue_get_buf(rq->vq, &len);
1256		if (unlikely(!buf)) {
1257			pr_debug("%s: rx error: %d buffers out of %d missing\n",
1258				 vi->dev->name, num_buf,
1259				 virtio16_to_cpu(vi->vdev,
1260						 hdr->num_buffers));
1261			DEV_STATS_INC(vi->dev, rx_length_errors);
1262			return -EINVAL;
1263		}
1264
1265		u64_stats_add(&stats->bytes, len);
1266
1267		xdp = buf_to_xdp(vi, rq, buf, len);
1268		if (!xdp)
1269			goto err;
1270
1271		buf = napi_alloc_frag(len);
1272		if (!buf) {
1273			xsk_buff_free(xdp);
1274			goto err;
1275		}
1276
1277		memcpy(buf, xdp->data - vi->hdr_len, len);
1278
1279		xsk_buff_free(xdp);
1280
1281		page = virt_to_page(buf);
1282
1283		truesize = len;
1284
1285		curr_skb  = virtnet_skb_append_frag(head_skb, curr_skb, page,
1286						    buf, len, truesize);
1287		if (!curr_skb) {
1288			put_page(page);
1289			goto err;
1290		}
1291	}
1292
1293	return 0;
1294
1295err:
1296	xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
1297	return -EINVAL;
1298}
1299
1300static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
1301						 struct receive_queue *rq, struct xdp_buff *xdp,
1302						 unsigned int *xdp_xmit,
1303						 struct virtnet_rq_stats *stats)
1304{
1305	struct virtio_net_hdr_mrg_rxbuf *hdr;
1306	struct bpf_prog *prog;
1307	struct sk_buff *skb;
1308	u32 ret, num_buf;
1309
1310	hdr = xdp->data - vi->hdr_len;
1311	num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1312
1313	ret = XDP_PASS;
1314	rcu_read_lock();
1315	prog = rcu_dereference(rq->xdp_prog);
1316	/* TODO: support multi buffer. */
1317	if (prog && num_buf == 1)
1318		ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
1319	rcu_read_unlock();
1320
1321	switch (ret) {
1322	case XDP_PASS:
1323		skb = xsk_construct_skb(rq, xdp);
1324		if (!skb)
1325			goto drop_bufs;
1326
1327		if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
1328			dev_kfree_skb(skb);
1329			goto drop;
1330		}
1331
1332		return skb;
1333
1334	case XDP_TX:
1335	case XDP_REDIRECT:
1336		return NULL;
1337
1338	default:
1339		/* drop packet */
1340		xsk_buff_free(xdp);
1341	}
1342
1343drop_bufs:
1344	xsk_drop_follow_bufs(dev, rq, num_buf, stats);
1345
1346drop:
1347	u64_stats_inc(&stats->drops);
1348	return NULL;
1349}
1350
1351static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
1352				    void *buf, u32 len,
1353				    unsigned int *xdp_xmit,
1354				    struct virtnet_rq_stats *stats)
1355{
1356	struct net_device *dev = vi->dev;
1357	struct sk_buff *skb = NULL;
1358	struct xdp_buff *xdp;
1359	u8 flags;
1360
1361	len -= vi->hdr_len;
1362
1363	u64_stats_add(&stats->bytes, len);
1364
1365	xdp = buf_to_xdp(vi, rq, buf, len);
1366	if (!xdp)
1367		return;
1368
1369	if (unlikely(len < ETH_HLEN)) {
1370		pr_debug("%s: short packet %i\n", dev->name, len);
1371		DEV_STATS_INC(dev, rx_length_errors);
1372		xsk_buff_free(xdp);
1373		return;
1374	}
1375
1376	flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
1377
1378	if (!vi->mergeable_rx_bufs)
1379		skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
1380	else
1381		skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
1382
1383	if (skb)
1384		virtnet_receive_done(vi, rq, skb, flags);
1385}
1386
1387static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
1388				   struct xsk_buff_pool *pool, gfp_t gfp)
1389{
1390	struct xdp_buff **xsk_buffs;
1391	dma_addr_t addr;
1392	int err = 0;
1393	u32 len, i;
1394	int num;
1395
1396	xsk_buffs = rq->xsk_buffs;
1397
1398	num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
1399	if (!num)
1400		return -ENOMEM;
1401
1402	len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
1403
1404	for (i = 0; i < num; ++i) {
1405		/* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
1406		 * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
1407		 * (see function virtnet_xsk_pool_enable)
1408		 */
1409		addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
1410
1411		sg_init_table(rq->sg, 1);
1412		sg_fill_dma(rq->sg, addr, len);
1413
1414		err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1,
1415						    xsk_buffs[i], NULL, gfp);
1416		if (err)
1417			goto err;
1418	}
1419
1420	return num;
1421
1422err:
1423	for (; i < num; ++i)
1424		xsk_buff_free(xsk_buffs[i]);
1425
1426	return err;
1427}
1428
1429static void *virtnet_xsk_to_ptr(u32 len)
1430{
1431	unsigned long p;
1432
1433	p = len << VIRTIO_XSK_FLAG_OFFSET;
1434
1435	return virtnet_xmit_ptr_pack((void *)p, VIRTNET_XMIT_TYPE_XSK);
1436}
1437
1438static int virtnet_xsk_xmit_one(struct send_queue *sq,
1439				struct xsk_buff_pool *pool,
1440				struct xdp_desc *desc)
1441{
1442	struct virtnet_info *vi;
1443	dma_addr_t addr;
1444
1445	vi = sq->vq->vdev->priv;
1446
1447	addr = xsk_buff_raw_get_dma(pool, desc->addr);
1448	xsk_buff_raw_dma_sync_for_device(pool, addr, desc->len);
1449
1450	sg_init_table(sq->sg, 2);
1451	sg_fill_dma(sq->sg, sq->xsk_hdr_dma_addr, vi->hdr_len);
1452	sg_fill_dma(sq->sg + 1, addr, desc->len);
1453
1454	return virtqueue_add_outbuf_premapped(sq->vq, sq->sg, 2,
1455					      virtnet_xsk_to_ptr(desc->len),
1456					      GFP_ATOMIC);
1457}
1458
1459static int virtnet_xsk_xmit_batch(struct send_queue *sq,
1460				  struct xsk_buff_pool *pool,
1461				  unsigned int budget,
1462				  u64 *kicks)
1463{
1464	struct xdp_desc *descs = pool->tx_descs;
1465	bool kick = false;
1466	u32 nb_pkts, i;
1467	int err;
1468
1469	budget = min_t(u32, budget, sq->vq->num_free);
1470
1471	nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
1472	if (!nb_pkts)
1473		return 0;
1474
1475	for (i = 0; i < nb_pkts; i++) {
1476		err = virtnet_xsk_xmit_one(sq, pool, &descs[i]);
1477		if (unlikely(err)) {
1478			xsk_tx_completed(sq->xsk_pool, nb_pkts - i);
1479			break;
1480		}
1481
1482		kick = true;
1483	}
1484
1485	if (kick && virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1486		(*kicks)++;
1487
1488	return i;
1489}
1490
1491static bool virtnet_xsk_xmit(struct send_queue *sq, struct xsk_buff_pool *pool,
1492			     int budget)
1493{
1494	struct virtnet_info *vi = sq->vq->vdev->priv;
1495	struct virtnet_sq_free_stats stats = {};
1496	struct net_device *dev = vi->dev;
1497	u64 kicks = 0;
1498	int sent;
1499
1500	/* Avoid to wakeup napi meanless, so call __free_old_xmit instead of
1501	 * free_old_xmit().
1502	 */
1503	__free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), true, &stats);
1504
1505	if (stats.xsk)
1506		xsk_tx_completed(sq->xsk_pool, stats.xsk);
1507
1508	sent = virtnet_xsk_xmit_batch(sq, pool, budget, &kicks);
1509
1510	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1511		check_sq_full_and_disable(vi, vi->dev, sq);
1512
1513	if (sent) {
1514		struct netdev_queue *txq;
1515
1516		txq = netdev_get_tx_queue(vi->dev, sq - vi->sq);
1517		txq_trans_cond_update(txq);
1518	}
1519
1520	u64_stats_update_begin(&sq->stats.syncp);
1521	u64_stats_add(&sq->stats.packets, stats.packets);
1522	u64_stats_add(&sq->stats.bytes,   stats.bytes);
1523	u64_stats_add(&sq->stats.kicks,   kicks);
1524	u64_stats_add(&sq->stats.xdp_tx,  sent);
1525	u64_stats_update_end(&sq->stats.syncp);
1526
1527	if (xsk_uses_need_wakeup(pool))
1528		xsk_set_tx_need_wakeup(pool);
1529
1530	return sent;
1531}
1532
1533static void xsk_wakeup(struct send_queue *sq)
1534{
1535	if (napi_if_scheduled_mark_missed(&sq->napi))
1536		return;
1537
1538	local_bh_disable();
1539	virtqueue_napi_schedule(&sq->napi, sq->vq);
1540	local_bh_enable();
1541}
1542
1543static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
1544{
1545	struct virtnet_info *vi = netdev_priv(dev);
1546	struct send_queue *sq;
1547
1548	if (!netif_running(dev))
1549		return -ENETDOWN;
1550
1551	if (qid >= vi->curr_queue_pairs)
1552		return -EINVAL;
1553
1554	sq = &vi->sq[qid];
1555
1556	xsk_wakeup(sq);
1557	return 0;
1558}
1559
1560static void virtnet_xsk_completed(struct send_queue *sq, int num)
1561{
1562	xsk_tx_completed(sq->xsk_pool, num);
1563
1564	/* If this is called by rx poll, start_xmit and xdp xmit we should
1565	 * wakeup the tx napi to consume the xsk tx queue, because the tx
1566	 * interrupt may not be triggered.
1567	 */
1568	xsk_wakeup(sq);
1569}
1570
1571static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1572				   struct send_queue *sq,
1573				   struct xdp_frame *xdpf)
1574{
1575	struct virtio_net_hdr_mrg_rxbuf *hdr;
1576	struct skb_shared_info *shinfo;
1577	u8 nr_frags = 0;
1578	int err, i;
1579
1580	if (unlikely(xdpf->headroom < vi->hdr_len))
1581		return -EOVERFLOW;
1582
1583	if (unlikely(xdp_frame_has_frags(xdpf))) {
1584		shinfo = xdp_get_shared_info_from_frame(xdpf);
1585		nr_frags = shinfo->nr_frags;
1586	}
1587
1588	/* In wrapping function virtnet_xdp_xmit(), we need to free
1589	 * up the pending old buffers, where we need to calculate the
1590	 * position of skb_shared_info in xdp_get_frame_len() and
1591	 * xdp_return_frame(), which will involve to xdpf->data and
1592	 * xdpf->headroom. Therefore, we need to update the value of
1593	 * headroom synchronously here.
1594	 */
1595	xdpf->headroom -= vi->hdr_len;
1596	xdpf->data -= vi->hdr_len;
1597	/* Zero header and leave csum up to XDP layers */
1598	hdr = xdpf->data;
1599	memset(hdr, 0, vi->hdr_len);
1600	xdpf->len   += vi->hdr_len;
1601
1602	sg_init_table(sq->sg, nr_frags + 1);
1603	sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1604	for (i = 0; i < nr_frags; i++) {
1605		skb_frag_t *frag = &shinfo->frags[i];
1606
1607		sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1608			    skb_frag_size(frag), skb_frag_off(frag));
1609	}
1610
1611	err = virtnet_add_outbuf(sq, nr_frags + 1, xdpf, VIRTNET_XMIT_TYPE_XDP);
 
1612	if (unlikely(err))
1613		return -ENOSPC; /* Caller handle free/refcnt */
1614
1615	return 0;
1616}
1617
1618/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1619 * the current cpu, so it does not need to be locked.
1620 *
1621 * Here we use marco instead of inline functions because we have to deal with
1622 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1623 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1624 * functions to perfectly solve these three problems at the same time.
1625 */
1626#define virtnet_xdp_get_sq(vi) ({                                       \
1627	int cpu = smp_processor_id();                                   \
1628	struct netdev_queue *txq;                                       \
1629	typeof(vi) v = (vi);                                            \
1630	unsigned int qp;                                                \
1631									\
1632	if (v->curr_queue_pairs > nr_cpu_ids) {                         \
1633		qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
1634		qp += cpu;                                              \
1635		txq = netdev_get_tx_queue(v->dev, qp);                  \
1636		__netif_tx_acquire(txq);                                \
1637	} else {                                                        \
1638		qp = cpu % v->curr_queue_pairs;                         \
1639		txq = netdev_get_tx_queue(v->dev, qp);                  \
1640		__netif_tx_lock(txq, cpu);                              \
1641	}                                                               \
1642	v->sq + qp;                                                     \
1643})
1644
1645#define virtnet_xdp_put_sq(vi, q) {                                     \
1646	struct netdev_queue *txq;                                       \
1647	typeof(vi) v = (vi);                                            \
1648									\
1649	txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
1650	if (v->curr_queue_pairs > nr_cpu_ids)                           \
1651		__netif_tx_release(txq);                                \
1652	else                                                            \
1653		__netif_tx_unlock(txq);                                 \
1654}
1655
1656static int virtnet_xdp_xmit(struct net_device *dev,
1657			    int n, struct xdp_frame **frames, u32 flags)
1658{
1659	struct virtnet_info *vi = netdev_priv(dev);
1660	struct virtnet_sq_free_stats stats = {0};
1661	struct receive_queue *rq = vi->rq;
1662	struct bpf_prog *xdp_prog;
1663	struct send_queue *sq;
 
 
 
1664	int nxmit = 0;
1665	int kicks = 0;
 
1666	int ret;
1667	int i;
1668
1669	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
1670	 * indicate XDP resources have been successfully allocated.
1671	 */
1672	xdp_prog = rcu_access_pointer(rq->xdp_prog);
1673	if (!xdp_prog)
1674		return -ENXIO;
1675
1676	sq = virtnet_xdp_get_sq(vi);
1677
1678	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1679		ret = -EINVAL;
1680		goto out;
1681	}
1682
1683	/* Free up any pending old buffers before queueing new ones. */
1684	virtnet_free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
1685			      false, &stats);
 
 
 
 
 
 
 
 
 
 
 
 
1686
1687	for (i = 0; i < n; i++) {
1688		struct xdp_frame *xdpf = frames[i];
1689
1690		if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1691			break;
1692		nxmit++;
1693	}
1694	ret = nxmit;
1695
1696	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1697		check_sq_full_and_disable(vi, dev, sq);
1698
1699	if (flags & XDP_XMIT_FLUSH) {
1700		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1701			kicks = 1;
1702	}
1703out:
1704	u64_stats_update_begin(&sq->stats.syncp);
1705	u64_stats_add(&sq->stats.bytes, stats.bytes);
1706	u64_stats_add(&sq->stats.packets, stats.packets);
1707	u64_stats_add(&sq->stats.xdp_tx, n);
1708	u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1709	u64_stats_add(&sq->stats.kicks, kicks);
1710	u64_stats_update_end(&sq->stats.syncp);
1711
1712	virtnet_xdp_put_sq(vi, sq);
1713	return ret;
1714}
1715
1716static void put_xdp_frags(struct xdp_buff *xdp)
1717{
1718	struct skb_shared_info *shinfo;
1719	struct page *xdp_page;
1720	int i;
1721
1722	if (xdp_buff_has_frags(xdp)) {
1723		shinfo = xdp_get_shared_info_from_buff(xdp);
1724		for (i = 0; i < shinfo->nr_frags; i++) {
1725			xdp_page = skb_frag_page(&shinfo->frags[i]);
1726			put_page(xdp_page);
1727		}
1728	}
1729}
1730
1731static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1732			       struct net_device *dev,
1733			       unsigned int *xdp_xmit,
1734			       struct virtnet_rq_stats *stats)
1735{
1736	struct xdp_frame *xdpf;
1737	int err;
1738	u32 act;
1739
1740	act = bpf_prog_run_xdp(xdp_prog, xdp);
1741	u64_stats_inc(&stats->xdp_packets);
1742
1743	switch (act) {
1744	case XDP_PASS:
1745		return act;
1746
1747	case XDP_TX:
1748		u64_stats_inc(&stats->xdp_tx);
1749		xdpf = xdp_convert_buff_to_frame(xdp);
1750		if (unlikely(!xdpf)) {
1751			netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1752			return XDP_DROP;
1753		}
1754
1755		err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1756		if (unlikely(!err)) {
1757			xdp_return_frame_rx_napi(xdpf);
1758		} else if (unlikely(err < 0)) {
1759			trace_xdp_exception(dev, xdp_prog, act);
1760			return XDP_DROP;
1761		}
1762		*xdp_xmit |= VIRTIO_XDP_TX;
1763		return act;
1764
1765	case XDP_REDIRECT:
1766		u64_stats_inc(&stats->xdp_redirects);
1767		err = xdp_do_redirect(dev, xdp, xdp_prog);
1768		if (err)
1769			return XDP_DROP;
1770
1771		*xdp_xmit |= VIRTIO_XDP_REDIR;
1772		return act;
1773
1774	default:
1775		bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1776		fallthrough;
1777	case XDP_ABORTED:
1778		trace_xdp_exception(dev, xdp_prog, act);
1779		fallthrough;
1780	case XDP_DROP:
1781		return XDP_DROP;
1782	}
1783}
1784
1785static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1786{
1787	return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
1788}
1789
1790/* We copy the packet for XDP in the following cases:
1791 *
1792 * 1) Packet is scattered across multiple rx buffers.
1793 * 2) Headroom space is insufficient.
1794 *
1795 * This is inefficient but it's a temporary condition that
1796 * we hit right after XDP is enabled and until queue is refilled
1797 * with large buffers with sufficient headroom - so it should affect
1798 * at most queue size packets.
1799 * Afterwards, the conditions to enable
1800 * XDP should preclude the underlying device from sending packets
1801 * across multiple buffers (num_buf > 1), and we make sure buffers
1802 * have enough headroom.
1803 */
1804static struct page *xdp_linearize_page(struct receive_queue *rq,
1805				       int *num_buf,
1806				       struct page *p,
1807				       int offset,
1808				       int page_off,
1809				       unsigned int *len)
1810{
1811	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1812	struct page *page;
1813
1814	if (page_off + *len + tailroom > PAGE_SIZE)
1815		return NULL;
1816
1817	page = alloc_page(GFP_ATOMIC);
1818	if (!page)
1819		return NULL;
1820
1821	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1822	page_off += *len;
1823
1824	while (--*num_buf) {
1825		unsigned int buflen;
1826		void *buf;
1827		int off;
1828
1829		buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1830		if (unlikely(!buf))
1831			goto err_buf;
1832
1833		p = virt_to_head_page(buf);
1834		off = buf - page_address(p);
1835
1836		/* guard against a misconfigured or uncooperative backend that
1837		 * is sending packet larger than the MTU.
1838		 */
1839		if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1840			put_page(p);
1841			goto err_buf;
1842		}
1843
1844		memcpy(page_address(page) + page_off,
1845		       page_address(p) + off, buflen);
1846		page_off += buflen;
1847		put_page(p);
1848	}
1849
1850	/* Headroom does not contribute to packet length */
1851	*len = page_off - XDP_PACKET_HEADROOM;
1852	return page;
1853err_buf:
1854	__free_pages(page, 0);
1855	return NULL;
1856}
1857
1858static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1859					       unsigned int xdp_headroom,
1860					       void *buf,
1861					       unsigned int len)
1862{
1863	unsigned int header_offset;
1864	unsigned int headroom;
1865	unsigned int buflen;
1866	struct sk_buff *skb;
1867
1868	header_offset = VIRTNET_RX_PAD + xdp_headroom;
1869	headroom = vi->hdr_len + header_offset;
1870	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1871		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1872
1873	skb = virtnet_build_skb(buf, buflen, headroom, len);
1874	if (unlikely(!skb))
1875		return NULL;
1876
1877	buf += header_offset;
1878	memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1879
1880	return skb;
1881}
1882
1883static struct sk_buff *receive_small_xdp(struct net_device *dev,
1884					 struct virtnet_info *vi,
1885					 struct receive_queue *rq,
1886					 struct bpf_prog *xdp_prog,
1887					 void *buf,
1888					 unsigned int xdp_headroom,
1889					 unsigned int len,
1890					 unsigned int *xdp_xmit,
1891					 struct virtnet_rq_stats *stats)
1892{
1893	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1894	unsigned int headroom = vi->hdr_len + header_offset;
1895	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1896	struct page *page = virt_to_head_page(buf);
1897	struct page *xdp_page;
1898	unsigned int buflen;
1899	struct xdp_buff xdp;
1900	struct sk_buff *skb;
1901	unsigned int metasize = 0;
1902	u32 act;
1903
1904	if (unlikely(hdr->hdr.gso_type))
1905		goto err_xdp;
1906
1907	/* Partially checksummed packets must be dropped. */
1908	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
1909		goto err_xdp;
1910
1911	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1912		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1913
1914	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1915		int offset = buf - page_address(page) + header_offset;
1916		unsigned int tlen = len + vi->hdr_len;
1917		int num_buf = 1;
1918
1919		xdp_headroom = virtnet_get_headroom(vi);
1920		header_offset = VIRTNET_RX_PAD + xdp_headroom;
1921		headroom = vi->hdr_len + header_offset;
1922		buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1923			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1924		xdp_page = xdp_linearize_page(rq, &num_buf, page,
1925					      offset, header_offset,
1926					      &tlen);
1927		if (!xdp_page)
1928			goto err_xdp;
1929
1930		buf = page_address(xdp_page);
1931		put_page(page);
1932		page = xdp_page;
1933	}
1934
1935	xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1936	xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1937			 xdp_headroom, len, true);
1938
1939	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1940
1941	switch (act) {
1942	case XDP_PASS:
1943		/* Recalculate length in case bpf program changed it */
1944		len = xdp.data_end - xdp.data;
1945		metasize = xdp.data - xdp.data_meta;
1946		break;
1947
1948	case XDP_TX:
1949	case XDP_REDIRECT:
1950		goto xdp_xmit;
1951
1952	default:
1953		goto err_xdp;
1954	}
1955
1956	skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1957	if (unlikely(!skb))
1958		goto err;
1959
1960	if (metasize)
1961		skb_metadata_set(skb, metasize);
1962
1963	return skb;
1964
1965err_xdp:
1966	u64_stats_inc(&stats->xdp_drops);
1967err:
1968	u64_stats_inc(&stats->drops);
1969	put_page(page);
1970xdp_xmit:
1971	return NULL;
1972}
1973
1974static struct sk_buff *receive_small(struct net_device *dev,
1975				     struct virtnet_info *vi,
1976				     struct receive_queue *rq,
1977				     void *buf, void *ctx,
1978				     unsigned int len,
1979				     unsigned int *xdp_xmit,
1980				     struct virtnet_rq_stats *stats)
1981{
1982	unsigned int xdp_headroom = (unsigned long)ctx;
1983	struct page *page = virt_to_head_page(buf);
1984	struct sk_buff *skb;
1985
1986	/* We passed the address of virtnet header to virtio-core,
1987	 * so truncate the padding.
1988	 */
1989	buf -= VIRTNET_RX_PAD + xdp_headroom;
1990
1991	len -= vi->hdr_len;
1992	u64_stats_add(&stats->bytes, len);
1993
1994	if (unlikely(len > GOOD_PACKET_LEN)) {
1995		pr_debug("%s: rx error: len %u exceeds max size %d\n",
1996			 dev->name, len, GOOD_PACKET_LEN);
1997		DEV_STATS_INC(dev, rx_length_errors);
1998		goto err;
1999	}
2000
2001	if (unlikely(vi->xdp_enabled)) {
2002		struct bpf_prog *xdp_prog;
2003
2004		rcu_read_lock();
2005		xdp_prog = rcu_dereference(rq->xdp_prog);
2006		if (xdp_prog) {
2007			skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
2008						xdp_headroom, len, xdp_xmit,
2009						stats);
2010			rcu_read_unlock();
2011			return skb;
2012		}
2013		rcu_read_unlock();
2014	}
2015
2016	skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
2017	if (likely(skb))
2018		return skb;
2019
2020err:
2021	u64_stats_inc(&stats->drops);
2022	put_page(page);
2023	return NULL;
2024}
2025
2026static struct sk_buff *receive_big(struct net_device *dev,
2027				   struct virtnet_info *vi,
2028				   struct receive_queue *rq,
2029				   void *buf,
2030				   unsigned int len,
2031				   struct virtnet_rq_stats *stats)
2032{
2033	struct page *page = buf;
2034	struct sk_buff *skb =
2035		page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
2036
2037	u64_stats_add(&stats->bytes, len - vi->hdr_len);
2038	if (unlikely(!skb))
2039		goto err;
2040
2041	return skb;
2042
2043err:
2044	u64_stats_inc(&stats->drops);
2045	give_pages(rq, page);
2046	return NULL;
2047}
2048
2049static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
2050			       struct net_device *dev,
2051			       struct virtnet_rq_stats *stats)
2052{
2053	struct page *page;
2054	void *buf;
2055	int len;
2056
2057	while (num_buf-- > 1) {
2058		buf = virtnet_rq_get_buf(rq, &len, NULL);
2059		if (unlikely(!buf)) {
2060			pr_debug("%s: rx error: %d buffers missing\n",
2061				 dev->name, num_buf);
2062			DEV_STATS_INC(dev, rx_length_errors);
2063			break;
2064		}
2065		u64_stats_add(&stats->bytes, len);
2066		page = virt_to_head_page(buf);
2067		put_page(page);
2068	}
2069}
2070
2071/* Why not use xdp_build_skb_from_frame() ?
2072 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
2073 * virtio-net there are 2 points that do not match its requirements:
2074 *  1. The size of the prefilled buffer is not fixed before xdp is set.
2075 *  2. xdp_build_skb_from_frame() does more checks that we don't need,
2076 *     like eth_type_trans() (which virtio-net does in receive_buf()).
2077 */
2078static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
2079					       struct virtnet_info *vi,
2080					       struct xdp_buff *xdp,
2081					       unsigned int xdp_frags_truesz)
2082{
2083	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2084	unsigned int headroom, data_len;
2085	struct sk_buff *skb;
2086	int metasize;
2087	u8 nr_frags;
2088
2089	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
2090		pr_debug("Error building skb as missing reserved tailroom for xdp");
2091		return NULL;
2092	}
2093
2094	if (unlikely(xdp_buff_has_frags(xdp)))
2095		nr_frags = sinfo->nr_frags;
2096
2097	skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
2098	if (unlikely(!skb))
2099		return NULL;
2100
2101	headroom = xdp->data - xdp->data_hard_start;
2102	data_len = xdp->data_end - xdp->data;
2103	skb_reserve(skb, headroom);
2104	__skb_put(skb, data_len);
2105
2106	metasize = xdp->data - xdp->data_meta;
2107	metasize = metasize > 0 ? metasize : 0;
2108	if (metasize)
2109		skb_metadata_set(skb, metasize);
2110
2111	if (unlikely(xdp_buff_has_frags(xdp)))
2112		xdp_update_skb_shared_info(skb, nr_frags,
2113					   sinfo->xdp_frags_size,
2114					   xdp_frags_truesz,
2115					   xdp_buff_is_frag_pfmemalloc(xdp));
2116
2117	return skb;
2118}
2119
2120/* TODO: build xdp in big mode */
2121static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
2122				      struct virtnet_info *vi,
2123				      struct receive_queue *rq,
2124				      struct xdp_buff *xdp,
2125				      void *buf,
2126				      unsigned int len,
2127				      unsigned int frame_sz,
2128				      int *num_buf,
2129				      unsigned int *xdp_frags_truesize,
2130				      struct virtnet_rq_stats *stats)
2131{
2132	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2133	unsigned int headroom, tailroom, room;
2134	unsigned int truesize, cur_frag_size;
2135	struct skb_shared_info *shinfo;
2136	unsigned int xdp_frags_truesz = 0;
2137	struct page *page;
2138	skb_frag_t *frag;
2139	int offset;
2140	void *ctx;
2141
2142	xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
2143	xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
2144			 XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
2145
2146	if (!*num_buf)
2147		return 0;
2148
2149	if (*num_buf > 1) {
2150		/* If we want to build multi-buffer xdp, we need
2151		 * to specify that the flags of xdp_buff have the
2152		 * XDP_FLAGS_HAS_FRAG bit.
2153		 */
2154		if (!xdp_buff_has_frags(xdp))
2155			xdp_buff_set_frags_flag(xdp);
2156
2157		shinfo = xdp_get_shared_info_from_buff(xdp);
2158		shinfo->nr_frags = 0;
2159		shinfo->xdp_frags_size = 0;
2160	}
2161
2162	if (*num_buf > MAX_SKB_FRAGS + 1)
2163		return -EINVAL;
2164
2165	while (--*num_buf > 0) {
2166		buf = virtnet_rq_get_buf(rq, &len, &ctx);
2167		if (unlikely(!buf)) {
2168			pr_debug("%s: rx error: %d buffers out of %d missing\n",
2169				 dev->name, *num_buf,
2170				 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
2171			DEV_STATS_INC(dev, rx_length_errors);
2172			goto err;
2173		}
2174
2175		u64_stats_add(&stats->bytes, len);
2176		page = virt_to_head_page(buf);
2177		offset = buf - page_address(page);
2178
2179		truesize = mergeable_ctx_to_truesize(ctx);
2180		headroom = mergeable_ctx_to_headroom(ctx);
2181		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2182		room = SKB_DATA_ALIGN(headroom + tailroom);
2183
2184		cur_frag_size = truesize;
2185		xdp_frags_truesz += cur_frag_size;
2186		if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
2187			put_page(page);
2188			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2189				 dev->name, len, (unsigned long)(truesize - room));
2190			DEV_STATS_INC(dev, rx_length_errors);
2191			goto err;
2192		}
2193
2194		frag = &shinfo->frags[shinfo->nr_frags++];
2195		skb_frag_fill_page_desc(frag, page, offset, len);
2196		if (page_is_pfmemalloc(page))
2197			xdp_buff_set_frag_pfmemalloc(xdp);
2198
2199		shinfo->xdp_frags_size += len;
2200	}
2201
2202	*xdp_frags_truesize = xdp_frags_truesz;
2203	return 0;
2204
2205err:
2206	put_xdp_frags(xdp);
2207	return -EINVAL;
2208}
2209
2210static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
2211				   struct receive_queue *rq,
2212				   struct bpf_prog *xdp_prog,
2213				   void *ctx,
2214				   unsigned int *frame_sz,
2215				   int *num_buf,
2216				   struct page **page,
2217				   int offset,
2218				   unsigned int *len,
2219				   struct virtio_net_hdr_mrg_rxbuf *hdr)
2220{
2221	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2222	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2223	struct page *xdp_page;
2224	unsigned int xdp_room;
2225
2226	/* Transient failure which in theory could occur if
2227	 * in-flight packets from before XDP was enabled reach
2228	 * the receive path after XDP is loaded.
2229	 */
2230	if (unlikely(hdr->hdr.gso_type))
2231		return NULL;
2232
2233	/* Partially checksummed packets must be dropped. */
2234	if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
2235		return NULL;
2236
2237	/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
2238	 * with headroom may add hole in truesize, which
2239	 * make their length exceed PAGE_SIZE. So we disabled the
2240	 * hole mechanism for xdp. See add_recvbuf_mergeable().
2241	 */
2242	*frame_sz = truesize;
2243
2244	if (likely(headroom >= virtnet_get_headroom(vi) &&
2245		   (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
2246		return page_address(*page) + offset;
2247	}
2248
2249	/* This happens when headroom is not enough because
2250	 * of the buffer was prefilled before XDP is set.
2251	 * This should only happen for the first several packets.
2252	 * In fact, vq reset can be used here to help us clean up
2253	 * the prefilled buffers, but many existing devices do not
2254	 * support it, and we don't want to bother users who are
2255	 * using xdp normally.
2256	 */
2257	if (!xdp_prog->aux->xdp_has_frags) {
2258		/* linearize data for XDP */
2259		xdp_page = xdp_linearize_page(rq, num_buf,
2260					      *page, offset,
2261					      XDP_PACKET_HEADROOM,
2262					      len);
2263		if (!xdp_page)
2264			return NULL;
2265	} else {
2266		xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
2267					  sizeof(struct skb_shared_info));
2268		if (*len + xdp_room > PAGE_SIZE)
2269			return NULL;
2270
2271		xdp_page = alloc_page(GFP_ATOMIC);
2272		if (!xdp_page)
2273			return NULL;
2274
2275		memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
2276		       page_address(*page) + offset, *len);
2277	}
2278
2279	*frame_sz = PAGE_SIZE;
2280
2281	put_page(*page);
2282
2283	*page = xdp_page;
2284
2285	return page_address(*page) + XDP_PACKET_HEADROOM;
2286}
2287
2288static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
2289					     struct virtnet_info *vi,
2290					     struct receive_queue *rq,
2291					     struct bpf_prog *xdp_prog,
2292					     void *buf,
2293					     void *ctx,
2294					     unsigned int len,
2295					     unsigned int *xdp_xmit,
2296					     struct virtnet_rq_stats *stats)
2297{
2298	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2299	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2300	struct page *page = virt_to_head_page(buf);
2301	int offset = buf - page_address(page);
2302	unsigned int xdp_frags_truesz = 0;
2303	struct sk_buff *head_skb;
2304	unsigned int frame_sz;
2305	struct xdp_buff xdp;
2306	void *data;
2307	u32 act;
2308	int err;
2309
2310	data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
2311				     offset, &len, hdr);
2312	if (unlikely(!data))
2313		goto err_xdp;
2314
2315	err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
2316					 &num_buf, &xdp_frags_truesz, stats);
2317	if (unlikely(err))
2318		goto err_xdp;
2319
2320	act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
2321
2322	switch (act) {
2323	case XDP_PASS:
2324		head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
2325		if (unlikely(!head_skb))
2326			break;
2327		return head_skb;
2328
2329	case XDP_TX:
2330	case XDP_REDIRECT:
2331		return NULL;
2332
2333	default:
2334		break;
2335	}
2336
2337	put_xdp_frags(&xdp);
2338
2339err_xdp:
2340	put_page(page);
2341	mergeable_buf_free(rq, num_buf, dev, stats);
2342
2343	u64_stats_inc(&stats->xdp_drops);
2344	u64_stats_inc(&stats->drops);
2345	return NULL;
2346}
2347
2348static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
2349					       struct sk_buff *curr_skb,
2350					       struct page *page, void *buf,
2351					       int len, int truesize)
2352{
2353	int num_skb_frags;
2354	int offset;
2355
2356	num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
2357	if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
2358		struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
2359
2360		if (unlikely(!nskb))
2361			return NULL;
2362
2363		if (curr_skb == head_skb)
2364			skb_shinfo(curr_skb)->frag_list = nskb;
2365		else
2366			curr_skb->next = nskb;
2367		curr_skb = nskb;
2368		head_skb->truesize += nskb->truesize;
2369		num_skb_frags = 0;
2370	}
2371
2372	if (curr_skb != head_skb) {
2373		head_skb->data_len += len;
2374		head_skb->len += len;
2375		head_skb->truesize += truesize;
2376	}
2377
2378	offset = buf - page_address(page);
2379	if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
2380		put_page(page);
2381		skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
2382				     len, truesize);
2383	} else {
2384		skb_add_rx_frag(curr_skb, num_skb_frags, page,
2385				offset, len, truesize);
2386	}
2387
2388	return curr_skb;
2389}
2390
2391static struct sk_buff *receive_mergeable(struct net_device *dev,
2392					 struct virtnet_info *vi,
2393					 struct receive_queue *rq,
2394					 void *buf,
2395					 void *ctx,
2396					 unsigned int len,
2397					 unsigned int *xdp_xmit,
2398					 struct virtnet_rq_stats *stats)
2399{
2400	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
2401	int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
2402	struct page *page = virt_to_head_page(buf);
2403	int offset = buf - page_address(page);
2404	struct sk_buff *head_skb, *curr_skb;
2405	unsigned int truesize = mergeable_ctx_to_truesize(ctx);
2406	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
2407	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2408	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2409
2410	head_skb = NULL;
2411	u64_stats_add(&stats->bytes, len - vi->hdr_len);
2412
2413	if (unlikely(len > truesize - room)) {
2414		pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2415			 dev->name, len, (unsigned long)(truesize - room));
2416		DEV_STATS_INC(dev, rx_length_errors);
2417		goto err_skb;
2418	}
2419
2420	if (unlikely(vi->xdp_enabled)) {
2421		struct bpf_prog *xdp_prog;
2422
2423		rcu_read_lock();
2424		xdp_prog = rcu_dereference(rq->xdp_prog);
2425		if (xdp_prog) {
2426			head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
2427							 len, xdp_xmit, stats);
2428			rcu_read_unlock();
2429			return head_skb;
2430		}
2431		rcu_read_unlock();
2432	}
2433
2434	head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
2435	curr_skb = head_skb;
2436
2437	if (unlikely(!curr_skb))
2438		goto err_skb;
2439	while (--num_buf) {
 
 
2440		buf = virtnet_rq_get_buf(rq, &len, &ctx);
2441		if (unlikely(!buf)) {
2442			pr_debug("%s: rx error: %d buffers out of %d missing\n",
2443				 dev->name, num_buf,
2444				 virtio16_to_cpu(vi->vdev,
2445						 hdr->num_buffers));
2446			DEV_STATS_INC(dev, rx_length_errors);
2447			goto err_buf;
2448		}
2449
2450		u64_stats_add(&stats->bytes, len);
2451		page = virt_to_head_page(buf);
2452
2453		truesize = mergeable_ctx_to_truesize(ctx);
2454		headroom = mergeable_ctx_to_headroom(ctx);
2455		tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2456		room = SKB_DATA_ALIGN(headroom + tailroom);
2457		if (unlikely(len > truesize - room)) {
2458			pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
2459				 dev->name, len, (unsigned long)(truesize - room));
2460			DEV_STATS_INC(dev, rx_length_errors);
2461			goto err_skb;
2462		}
2463
2464		curr_skb  = virtnet_skb_append_frag(head_skb, curr_skb, page,
2465						    buf, len, truesize);
2466		if (!curr_skb)
2467			goto err_skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2468	}
2469
2470	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
2471	return head_skb;
2472
2473err_skb:
2474	put_page(page);
2475	mergeable_buf_free(rq, num_buf, dev, stats);
2476
2477err_buf:
2478	u64_stats_inc(&stats->drops);
2479	dev_kfree_skb(head_skb);
2480	return NULL;
2481}
2482
2483static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
2484				struct sk_buff *skb)
2485{
2486	enum pkt_hash_types rss_hash_type;
2487
2488	if (!hdr_hash || !skb)
2489		return;
2490
2491	switch (__le16_to_cpu(hdr_hash->hash_report)) {
2492	case VIRTIO_NET_HASH_REPORT_TCPv4:
2493	case VIRTIO_NET_HASH_REPORT_UDPv4:
2494	case VIRTIO_NET_HASH_REPORT_TCPv6:
2495	case VIRTIO_NET_HASH_REPORT_UDPv6:
2496	case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
2497	case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
2498		rss_hash_type = PKT_HASH_TYPE_L4;
2499		break;
2500	case VIRTIO_NET_HASH_REPORT_IPv4:
2501	case VIRTIO_NET_HASH_REPORT_IPv6:
2502	case VIRTIO_NET_HASH_REPORT_IPv6_EX:
2503		rss_hash_type = PKT_HASH_TYPE_L3;
2504		break;
2505	case VIRTIO_NET_HASH_REPORT_NONE:
2506	default:
2507		rss_hash_type = PKT_HASH_TYPE_NONE;
2508	}
2509	skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
2510}
2511
2512static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
2513				 struct sk_buff *skb, u8 flags)
 
 
2514{
 
 
2515	struct virtio_net_common_hdr *hdr;
2516	struct net_device *dev = vi->dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2517
2518	hdr = skb_vnet_common_hdr(skb);
2519	if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
2520		virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
2521
2522	if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
2523		skb->ip_summed = CHECKSUM_UNNECESSARY;
2524
2525	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
2526				  virtio_is_little_endian(vi->vdev))) {
2527		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
2528				     dev->name, hdr->hdr.gso_type,
2529				     hdr->hdr.gso_size);
2530		goto frame_err;
2531	}
2532
2533	skb_record_rx_queue(skb, vq2rxq(rq->vq));
2534	skb->protocol = eth_type_trans(skb, dev);
2535	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
2536		 ntohs(skb->protocol), skb->len, skb->pkt_type);
2537
2538	napi_gro_receive(&rq->napi, skb);
2539	return;
2540
2541frame_err:
2542	DEV_STATS_INC(dev, rx_frame_errors);
2543	dev_kfree_skb(skb);
2544}
2545
2546static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
2547			void *buf, unsigned int len, void **ctx,
2548			unsigned int *xdp_xmit,
2549			struct virtnet_rq_stats *stats)
2550{
2551	struct net_device *dev = vi->dev;
2552	struct sk_buff *skb;
2553	u8 flags;
2554
2555	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
2556		pr_debug("%s: short packet %i\n", dev->name, len);
2557		DEV_STATS_INC(dev, rx_length_errors);
2558		virtnet_rq_free_buf(vi, rq, buf);
2559		return;
2560	}
2561
2562	/* 1. Save the flags early, as the XDP program might overwrite them.
2563	 * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
2564	 * stay valid after XDP processing.
2565	 * 2. XDP doesn't work with partially checksummed packets (refer to
2566	 * virtnet_xdp_set()), so packets marked as
2567	 * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
2568	 */
2569	flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
2570
2571	if (vi->mergeable_rx_bufs)
2572		skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
2573					stats);
2574	else if (vi->big_packets)
2575		skb = receive_big(dev, vi, rq, buf, len, stats);
2576	else
2577		skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
2578
2579	if (unlikely(!skb))
2580		return;
2581
2582	virtnet_receive_done(vi, rq, skb, flags);
2583}
2584
2585/* Unlike mergeable buffers, all buffers are allocated to the
2586 * same size, except for the headroom. For this reason we do
2587 * not need to use  mergeable_len_to_ctx here - it is enough
2588 * to store the headroom as the context ignoring the truesize.
2589 */
2590static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
2591			     gfp_t gfp)
2592{
2593	char *buf;
2594	unsigned int xdp_headroom = virtnet_get_headroom(vi);
2595	void *ctx = (void *)(unsigned long)xdp_headroom;
2596	int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
2597	int err;
2598
2599	len = SKB_DATA_ALIGN(len) +
2600	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2601
2602	if (unlikely(!skb_page_frag_refill(len, &rq->alloc_frag, gfp)))
2603		return -ENOMEM;
2604
2605	buf = virtnet_rq_alloc(rq, len, gfp);
2606	if (unlikely(!buf))
2607		return -ENOMEM;
2608
2609	buf += VIRTNET_RX_PAD + xdp_headroom;
 
2610
2611	virtnet_rq_init_one_sg(rq, buf, vi->hdr_len + GOOD_PACKET_LEN);
2612
2613	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2614	if (err < 0) {
2615		virtnet_rq_unmap(rq, buf, 0);
 
2616		put_page(virt_to_head_page(buf));
2617	}
2618
2619	return err;
2620}
2621
2622static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2623			   gfp_t gfp)
2624{
2625	struct page *first, *list = NULL;
2626	char *p;
2627	int i, err, offset;
2628
2629	sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2630
2631	/* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2632	for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
2633		first = get_a_page(rq, gfp);
2634		if (!first) {
2635			if (list)
2636				give_pages(rq, list);
2637			return -ENOMEM;
2638		}
2639		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2640
2641		/* chain new page in list head to match sg */
2642		first->private = (unsigned long)list;
2643		list = first;
2644	}
2645
2646	first = get_a_page(rq, gfp);
2647	if (!first) {
2648		give_pages(rq, list);
2649		return -ENOMEM;
2650	}
2651	p = page_address(first);
2652
2653	/* rq->sg[0], rq->sg[1] share the same page */
2654	/* a separated rq->sg[0] for header - required in case !any_header_sg */
2655	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2656
2657	/* rq->sg[1] for data packet, from offset */
2658	offset = sizeof(struct padded_vnet_hdr);
2659	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2660
2661	/* chain first in list head */
2662	first->private = (unsigned long)list;
2663	err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2664				  first, gfp);
2665	if (err < 0)
2666		give_pages(rq, first);
2667
2668	return err;
2669}
2670
2671static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2672					  struct ewma_pkt_len *avg_pkt_len,
2673					  unsigned int room)
2674{
2675	struct virtnet_info *vi = rq->vq->vdev->priv;
2676	const size_t hdr_len = vi->hdr_len;
2677	unsigned int len;
2678
2679	if (room)
2680		return PAGE_SIZE - room;
2681
2682	len = hdr_len +	clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
2683				rq->min_buf_len, PAGE_SIZE - hdr_len);
2684
2685	return ALIGN(len, L1_CACHE_BYTES);
2686}
2687
2688static int add_recvbuf_mergeable(struct virtnet_info *vi,
2689				 struct receive_queue *rq, gfp_t gfp)
2690{
2691	struct page_frag *alloc_frag = &rq->alloc_frag;
2692	unsigned int headroom = virtnet_get_headroom(vi);
2693	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2694	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2695	unsigned int len, hole;
2696	void *ctx;
2697	char *buf;
2698	int err;
2699
2700	/* Extra tailroom is needed to satisfy XDP's assumption. This
2701	 * means rx frags coalescing won't work, but consider we've
2702	 * disabled GSO for XDP, it won't be a big issue.
2703	 */
2704	len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2705
2706	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
2707		return -ENOMEM;
2708
2709	if (!alloc_frag->offset && len + room + sizeof(struct virtnet_rq_dma) > alloc_frag->size)
2710		len -= sizeof(struct virtnet_rq_dma);
2711
2712	buf = virtnet_rq_alloc(rq, len + room, gfp);
2713	if (unlikely(!buf))
2714		return -ENOMEM;
2715
2716	buf += headroom; /* advance address leaving hole at front of pkt */
2717	hole = alloc_frag->size - alloc_frag->offset;
2718	if (hole < len + room) {
2719		/* To avoid internal fragmentation, if there is very likely not
2720		 * enough space for another buffer, add the remaining space to
2721		 * the current buffer.
2722		 * XDP core assumes that frame_size of xdp_buff and the length
2723		 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
2724		 */
2725		if (!headroom)
2726			len += hole;
2727		alloc_frag->offset += hole;
2728	}
2729
2730	virtnet_rq_init_one_sg(rq, buf, len);
2731
2732	ctx = mergeable_len_to_ctx(len + room, headroom);
2733	err = virtqueue_add_inbuf_premapped(rq->vq, rq->sg, 1, buf, ctx, gfp);
2734	if (err < 0) {
2735		virtnet_rq_unmap(rq, buf, 0);
 
2736		put_page(virt_to_head_page(buf));
2737	}
2738
2739	return err;
2740}
2741
2742/*
2743 * Returns false if we couldn't fill entirely (OOM).
2744 *
2745 * Normally run in the receive path, but can also be run from ndo_open
2746 * before we're receiving packets, or from refill_work which is
2747 * careful to disable receiving (using napi_disable).
2748 */
2749static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2750			  gfp_t gfp)
2751{
2752	int err;
2753
2754	if (rq->xsk_pool) {
2755		err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
2756		goto kick;
2757	}
2758
2759	do {
2760		if (vi->mergeable_rx_bufs)
2761			err = add_recvbuf_mergeable(vi, rq, gfp);
2762		else if (vi->big_packets)
2763			err = add_recvbuf_big(vi, rq, gfp);
2764		else
2765			err = add_recvbuf_small(vi, rq, gfp);
2766
 
2767		if (err)
2768			break;
2769	} while (rq->vq->num_free);
2770
2771kick:
2772	if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2773		unsigned long flags;
2774
2775		flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2776		u64_stats_inc(&rq->stats.kicks);
2777		u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2778	}
2779
2780	return err != -ENOMEM;
2781}
2782
2783static void skb_recv_done(struct virtqueue *rvq)
2784{
2785	struct virtnet_info *vi = rvq->vdev->priv;
2786	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2787
2788	rq->calls++;
2789	virtqueue_napi_schedule(&rq->napi, rvq);
2790}
2791
2792static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2793{
2794	napi_enable(napi);
2795
2796	/* If all buffers were filled by other side before we napi_enabled, we
2797	 * won't get another interrupt, so process any outstanding packets now.
2798	 * Call local_bh_enable after to trigger softIRQ processing.
2799	 */
2800	local_bh_disable();
2801	virtqueue_napi_schedule(napi, vq);
2802	local_bh_enable();
2803}
2804
2805static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2806				   struct virtqueue *vq,
2807				   struct napi_struct *napi)
2808{
2809	if (!napi->weight)
2810		return;
2811
2812	/* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2813	 * enable the feature if this is likely affine with the transmit path.
2814	 */
2815	if (!vi->affinity_hint_set) {
2816		napi->weight = 0;
2817		return;
2818	}
2819
2820	return virtnet_napi_enable(vq, napi);
2821}
2822
2823static void virtnet_napi_tx_disable(struct napi_struct *napi)
2824{
2825	if (napi->weight)
2826		napi_disable(napi);
2827}
2828
2829static void refill_work(struct work_struct *work)
2830{
2831	struct virtnet_info *vi =
2832		container_of(work, struct virtnet_info, refill.work);
2833	bool still_empty;
2834	int i;
2835
2836	for (i = 0; i < vi->curr_queue_pairs; i++) {
2837		struct receive_queue *rq = &vi->rq[i];
2838
2839		napi_disable(&rq->napi);
2840		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2841		virtnet_napi_enable(rq->vq, &rq->napi);
2842
2843		/* In theory, this can happen: if we don't get any buffers in
2844		 * we will *never* try to fill again.
2845		 */
2846		if (still_empty)
2847			schedule_delayed_work(&vi->refill, HZ/2);
2848	}
2849}
2850
2851static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
2852				    struct receive_queue *rq,
2853				    int budget,
2854				    unsigned int *xdp_xmit,
2855				    struct virtnet_rq_stats *stats)
2856{
2857	unsigned int len;
2858	int packets = 0;
2859	void *buf;
2860
2861	while (packets < budget) {
2862		buf = virtqueue_get_buf(rq->vq, &len);
2863		if (!buf)
2864			break;
2865
2866		virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
2867		packets++;
2868	}
2869
2870	return packets;
2871}
2872
2873static int virtnet_receive_packets(struct virtnet_info *vi,
2874				   struct receive_queue *rq,
2875				   int budget,
2876				   unsigned int *xdp_xmit,
2877				   struct virtnet_rq_stats *stats)
2878{
 
 
2879	unsigned int len;
2880	int packets = 0;
2881	void *buf;
 
2882
2883	if (!vi->big_packets || vi->mergeable_rx_bufs) {
2884		void *ctx;
 
2885		while (packets < budget &&
2886		       (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2887			receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
2888			packets++;
2889		}
2890	} else {
2891		while (packets < budget &&
2892		       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2893			receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
2894			packets++;
2895		}
2896	}
2897
2898	return packets;
2899}
2900
2901static int virtnet_receive(struct receive_queue *rq, int budget,
2902			   unsigned int *xdp_xmit)
2903{
2904	struct virtnet_info *vi = rq->vq->vdev->priv;
2905	struct virtnet_rq_stats stats = {};
2906	int i, packets;
2907
2908	if (rq->xsk_pool)
2909		packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
2910	else
2911		packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
2912
2913	if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2914		if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2915			spin_lock(&vi->refill_lock);
2916			if (vi->refill_enabled)
2917				schedule_delayed_work(&vi->refill, 0);
2918			spin_unlock(&vi->refill_lock);
2919		}
2920	}
2921
2922	u64_stats_set(&stats.packets, packets);
2923	u64_stats_update_begin(&rq->stats.syncp);
2924	for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
2925		size_t offset = virtnet_rq_stats_desc[i].offset;
2926		u64_stats_t *item, *src;
2927
2928		item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2929		src = (u64_stats_t *)((u8 *)&stats + offset);
2930		u64_stats_add(item, u64_stats_read(src));
2931	}
2932
2933	u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
2934	u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
2935
2936	u64_stats_update_end(&rq->stats.syncp);
2937
2938	return packets;
2939}
2940
2941static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
2942{
2943	struct virtnet_info *vi = rq->vq->vdev->priv;
2944	unsigned int index = vq2rxq(rq->vq);
2945	struct send_queue *sq = &vi->sq[index];
2946	struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2947
2948	if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2949		return;
2950
2951	if (__netif_tx_trylock(txq)) {
2952		if (sq->reset) {
2953			__netif_tx_unlock(txq);
2954			return;
2955		}
2956
2957		do {
2958			virtqueue_disable_cb(sq->vq);
2959			free_old_xmit(sq, txq, !!budget);
2960		} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2961
2962		if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
2963			if (netif_tx_queue_stopped(txq)) {
2964				u64_stats_update_begin(&sq->stats.syncp);
2965				u64_stats_inc(&sq->stats.wake);
2966				u64_stats_update_end(&sq->stats.syncp);
2967			}
2968			netif_tx_wake_queue(txq);
2969		}
2970
2971		__netif_tx_unlock(txq);
2972	}
2973}
2974
2975static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2976{
2977	struct dim_sample cur_sample = {};
2978
2979	if (!rq->packets_in_napi)
2980		return;
2981
2982	/* Don't need protection when fetching stats, since fetcher and
2983	 * updater of the stats are in same context
2984	 */
2985	dim_update_sample(rq->calls,
2986			  u64_stats_read(&rq->stats.packets),
2987			  u64_stats_read(&rq->stats.bytes),
2988			  &cur_sample);
 
2989
2990	net_dim(&rq->dim, &cur_sample);
2991	rq->packets_in_napi = 0;
2992}
2993
2994static int virtnet_poll(struct napi_struct *napi, int budget)
2995{
2996	struct receive_queue *rq =
2997		container_of(napi, struct receive_queue, napi);
2998	struct virtnet_info *vi = rq->vq->vdev->priv;
2999	struct send_queue *sq;
3000	unsigned int received;
3001	unsigned int xdp_xmit = 0;
3002	bool napi_complete;
3003
3004	virtnet_poll_cleantx(rq, budget);
3005
3006	received = virtnet_receive(rq, budget, &xdp_xmit);
3007	rq->packets_in_napi += received;
3008
3009	if (xdp_xmit & VIRTIO_XDP_REDIR)
3010		xdp_do_flush();
3011
3012	/* Out of packets? */
3013	if (received < budget) {
3014		napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
3015		/* Intentionally not taking dim_lock here. This may result in a
3016		 * spurious net_dim call. But if that happens virtnet_rx_dim_work
3017		 * will not act on the scheduled work.
3018		 */
3019		if (napi_complete && rq->dim_enabled)
3020			virtnet_rx_dim_update(vi, rq);
3021	}
3022
3023	if (xdp_xmit & VIRTIO_XDP_TX) {
3024		sq = virtnet_xdp_get_sq(vi);
3025		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3026			u64_stats_update_begin(&sq->stats.syncp);
3027			u64_stats_inc(&sq->stats.kicks);
3028			u64_stats_update_end(&sq->stats.syncp);
3029		}
3030		virtnet_xdp_put_sq(vi, sq);
3031	}
3032
3033	return received;
3034}
3035
3036static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
3037{
3038	virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
3039	napi_disable(&vi->rq[qp_index].napi);
3040	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3041}
3042
3043static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
3044{
3045	struct net_device *dev = vi->dev;
3046	int err;
3047
3048	err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
3049			       vi->rq[qp_index].napi.napi_id);
3050	if (err < 0)
3051		return err;
3052
3053	err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
3054					 MEM_TYPE_PAGE_SHARED, NULL);
3055	if (err < 0)
3056		goto err_xdp_reg_mem_model;
3057
3058	virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
3059	virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
3060
3061	return 0;
3062
3063err_xdp_reg_mem_model:
3064	xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
3065	return err;
3066}
3067
3068static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
3069{
3070	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3071		return;
3072	net_dim_work_cancel(dim);
3073}
3074
3075static void virtnet_update_settings(struct virtnet_info *vi)
3076{
3077	u32 speed;
3078	u8 duplex;
3079
3080	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3081		return;
3082
3083	virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3084
3085	if (ethtool_validate_speed(speed))
3086		vi->speed = speed;
3087
3088	virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3089
3090	if (ethtool_validate_duplex(duplex))
3091		vi->duplex = duplex;
3092}
3093
3094static int virtnet_open(struct net_device *dev)
3095{
3096	struct virtnet_info *vi = netdev_priv(dev);
3097	int i, err;
3098
3099	enable_delayed_refill(vi);
3100
3101	for (i = 0; i < vi->max_queue_pairs; i++) {
3102		if (i < vi->curr_queue_pairs)
3103			/* Make sure we have some buffers: if oom use wq. */
3104			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
3105				schedule_delayed_work(&vi->refill, 0);
3106
3107		err = virtnet_enable_queue_pair(vi, i);
3108		if (err < 0)
3109			goto err_enable_qp;
3110	}
3111
3112	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3113		if (vi->status & VIRTIO_NET_S_LINK_UP)
3114			netif_carrier_on(vi->dev);
3115		virtio_config_driver_enable(vi->vdev);
3116	} else {
3117		vi->status = VIRTIO_NET_S_LINK_UP;
3118		netif_carrier_on(dev);
3119	}
3120
3121	return 0;
3122
3123err_enable_qp:
3124	disable_delayed_refill(vi);
3125	cancel_delayed_work_sync(&vi->refill);
3126
3127	for (i--; i >= 0; i--) {
3128		virtnet_disable_queue_pair(vi, i);
3129		virtnet_cancel_dim(vi, &vi->rq[i].dim);
3130	}
3131
3132	return err;
3133}
3134
3135static int virtnet_poll_tx(struct napi_struct *napi, int budget)
3136{
3137	struct send_queue *sq = container_of(napi, struct send_queue, napi);
3138	struct virtnet_info *vi = sq->vq->vdev->priv;
3139	unsigned int index = vq2txq(sq->vq);
3140	struct netdev_queue *txq;
3141	int opaque, xsk_done = 0;
3142	bool done;
3143
3144	if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
3145		/* We don't need to enable cb for XDP */
3146		napi_complete_done(napi, 0);
3147		return 0;
3148	}
3149
3150	txq = netdev_get_tx_queue(vi->dev, index);
3151	__netif_tx_lock(txq, raw_smp_processor_id());
3152	virtqueue_disable_cb(sq->vq);
 
3153
3154	if (sq->xsk_pool)
3155		xsk_done = virtnet_xsk_xmit(sq, sq->xsk_pool, budget);
3156	else
3157		free_old_xmit(sq, txq, !!budget);
3158
3159	if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
3160		if (netif_tx_queue_stopped(txq)) {
3161			u64_stats_update_begin(&sq->stats.syncp);
3162			u64_stats_inc(&sq->stats.wake);
3163			u64_stats_update_end(&sq->stats.syncp);
3164		}
3165		netif_tx_wake_queue(txq);
3166	}
3167
3168	if (xsk_done >= budget) {
3169		__netif_tx_unlock(txq);
3170		return budget;
3171	}
3172
3173	opaque = virtqueue_enable_cb_prepare(sq->vq);
3174
3175	done = napi_complete_done(napi, 0);
3176
3177	if (!done)
3178		virtqueue_disable_cb(sq->vq);
3179
3180	__netif_tx_unlock(txq);
3181
3182	if (done) {
3183		if (unlikely(virtqueue_poll(sq->vq, opaque))) {
3184			if (napi_schedule_prep(napi)) {
3185				__netif_tx_lock(txq, raw_smp_processor_id());
3186				virtqueue_disable_cb(sq->vq);
3187				__netif_tx_unlock(txq);
3188				__napi_schedule(napi);
3189			}
3190		}
3191	}
3192
3193	return 0;
3194}
3195
3196static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
3197{
3198	struct virtio_net_hdr_mrg_rxbuf *hdr;
3199	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
3200	struct virtnet_info *vi = sq->vq->vdev->priv;
3201	int num_sg;
3202	unsigned hdr_len = vi->hdr_len;
3203	bool can_push;
3204
3205	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
3206
3207	can_push = vi->any_header_sg &&
3208		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
3209		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
3210	/* Even if we can, don't push here yet as this would skew
3211	 * csum_start offset below. */
3212	if (can_push)
3213		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
3214	else
3215		hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
3216
3217	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
3218				    virtio_is_little_endian(vi->vdev), false,
3219				    0))
3220		return -EPROTO;
3221
3222	if (vi->mergeable_rx_bufs)
3223		hdr->num_buffers = 0;
3224
3225	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
3226	if (can_push) {
3227		__skb_push(skb, hdr_len);
3228		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
3229		if (unlikely(num_sg < 0))
3230			return num_sg;
3231		/* Pull header back to avoid skew in tx bytes calculations. */
3232		__skb_pull(skb, hdr_len);
3233	} else {
3234		sg_set_buf(sq->sg, hdr, hdr_len);
3235		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
3236		if (unlikely(num_sg < 0))
3237			return num_sg;
3238		num_sg++;
3239	}
3240
3241	return virtnet_add_outbuf(sq, num_sg, skb,
3242				  orphan ? VIRTNET_XMIT_TYPE_SKB_ORPHAN : VIRTNET_XMIT_TYPE_SKB);
3243}
3244
3245static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
3246{
3247	struct virtnet_info *vi = netdev_priv(dev);
3248	int qnum = skb_get_queue_mapping(skb);
3249	struct send_queue *sq = &vi->sq[qnum];
3250	int err;
3251	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
3252	bool xmit_more = netdev_xmit_more();
3253	bool use_napi = sq->napi.weight;
3254	bool kick;
3255
3256	/* Free up any pending old buffers before queueing new ones. */
3257	do {
3258		if (use_napi)
3259			virtqueue_disable_cb(sq->vq);
3260
3261		free_old_xmit(sq, txq, false);
3262
3263	} while (use_napi && !xmit_more &&
3264	       unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
3265
3266	/* timestamp packet in software */
3267	skb_tx_timestamp(skb);
3268
3269	/* Try to transmit */
3270	err = xmit_skb(sq, skb, !use_napi);
3271
3272	/* This should not happen! */
3273	if (unlikely(err)) {
3274		DEV_STATS_INC(dev, tx_fifo_errors);
3275		if (net_ratelimit())
3276			dev_warn(&dev->dev,
3277				 "Unexpected TXQ (%d) queue failure: %d\n",
3278				 qnum, err);
3279		DEV_STATS_INC(dev, tx_dropped);
3280		dev_kfree_skb_any(skb);
3281		return NETDEV_TX_OK;
3282	}
3283
3284	/* Don't wait up for transmitted skbs to be freed. */
3285	if (!use_napi) {
3286		skb_orphan(skb);
3287		nf_reset_ct(skb);
3288	}
3289
3290	check_sq_full_and_disable(vi, dev, sq);
3291
3292	kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
3293			  !xmit_more || netif_xmit_stopped(txq);
3294	if (kick) {
3295		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
3296			u64_stats_update_begin(&sq->stats.syncp);
3297			u64_stats_inc(&sq->stats.kicks);
3298			u64_stats_update_end(&sq->stats.syncp);
3299		}
3300	}
3301
3302	return NETDEV_TX_OK;
3303}
3304
3305static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
 
3306{
3307	bool running = netif_running(vi->dev);
 
 
 
3308
3309	if (running) {
3310		napi_disable(&rq->napi);
3311		virtnet_cancel_dim(vi, &rq->dim);
3312	}
3313}
3314
3315static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
3316{
3317	bool running = netif_running(vi->dev);
3318
3319	if (!try_fill_recv(vi, rq, GFP_KERNEL))
3320		schedule_delayed_work(&vi->refill, 0);
3321
3322	if (running)
3323		virtnet_napi_enable(rq->vq, &rq->napi);
3324}
3325
3326static int virtnet_rx_resize(struct virtnet_info *vi,
3327			     struct receive_queue *rq, u32 ring_num)
3328{
3329	int err, qindex;
3330
3331	qindex = rq - vi->rq;
3332
3333	virtnet_rx_pause(vi, rq);
3334
3335	err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf, NULL);
3336	if (err)
3337		netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
3338
3339	virtnet_rx_resume(vi, rq);
3340	return err;
3341}
3342
3343static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
 
3344{
3345	bool running = netif_running(vi->dev);
3346	struct netdev_queue *txq;
3347	int qindex;
3348
3349	qindex = sq - vi->sq;
3350
3351	if (running)
3352		virtnet_napi_tx_disable(&sq->napi);
3353
3354	txq = netdev_get_tx_queue(vi->dev, qindex);
3355
3356	/* 1. wait all ximt complete
3357	 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
3358	 */
3359	__netif_tx_lock_bh(txq);
3360
3361	/* Prevent rx poll from accessing sq. */
3362	sq->reset = true;
3363
3364	/* Prevent the upper layer from trying to send packets. */
3365	netif_stop_subqueue(vi->dev, qindex);
3366
3367	__netif_tx_unlock_bh(txq);
3368}
3369
3370static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
3371{
3372	bool running = netif_running(vi->dev);
3373	struct netdev_queue *txq;
3374	int qindex;
3375
3376	qindex = sq - vi->sq;
3377
3378	txq = netdev_get_tx_queue(vi->dev, qindex);
3379
3380	__netif_tx_lock_bh(txq);
3381	sq->reset = false;
3382	netif_tx_wake_queue(txq);
3383	__netif_tx_unlock_bh(txq);
3384
3385	if (running)
3386		virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
3387}
3388
3389static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
3390			     u32 ring_num)
3391{
3392	int qindex, err;
3393
3394	qindex = sq - vi->sq;
3395
3396	virtnet_tx_pause(vi, sq);
3397
3398	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf,
3399			       virtnet_sq_free_unused_buf_done);
3400	if (err)
3401		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
3402
3403	virtnet_tx_resume(vi, sq);
3404
3405	return err;
3406}
3407
3408/*
3409 * Send command via the control virtqueue and check status.  Commands
3410 * supported by the hypervisor, as indicated by feature bits, should
3411 * never fail unless improperly formatted.
3412 */
3413static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
3414				       struct scatterlist *out,
3415				       struct scatterlist *in)
3416{
3417	struct scatterlist *sgs[5], hdr, stat;
3418	u32 out_num = 0, tmp, in_num = 0;
3419	bool ok;
3420	int ret;
3421
3422	/* Caller should know better */
3423	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
3424
3425	mutex_lock(&vi->cvq_lock);
3426	vi->ctrl->status = ~0;
3427	vi->ctrl->hdr.class = class;
3428	vi->ctrl->hdr.cmd = cmd;
3429	/* Add header */
3430	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
3431	sgs[out_num++] = &hdr;
3432
3433	if (out)
3434		sgs[out_num++] = out;
3435
3436	/* Add return status. */
3437	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
3438	sgs[out_num + in_num++] = &stat;
3439
3440	if (in)
3441		sgs[out_num + in_num++] = in;
3442
3443	BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
3444	ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
3445	if (ret < 0) {
3446		dev_warn(&vi->vdev->dev,
3447			 "Failed to add sgs for command vq: %d\n.", ret);
3448		mutex_unlock(&vi->cvq_lock);
3449		return false;
3450	}
3451
3452	if (unlikely(!virtqueue_kick(vi->cvq)))
3453		goto unlock;
3454
3455	/* Spin for a response, the kick causes an ioport write, trapping
3456	 * into the hypervisor, so the request should be handled immediately.
3457	 */
3458	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
3459	       !virtqueue_is_broken(vi->cvq)) {
3460		cond_resched();
3461		cpu_relax();
3462	}
3463
3464unlock:
3465	ok = vi->ctrl->status == VIRTIO_NET_OK;
3466	mutex_unlock(&vi->cvq_lock);
3467	return ok;
3468}
3469
3470static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
3471				 struct scatterlist *out)
3472{
3473	return virtnet_send_command_reply(vi, class, cmd, out, NULL);
3474}
3475
3476static int virtnet_set_mac_address(struct net_device *dev, void *p)
3477{
3478	struct virtnet_info *vi = netdev_priv(dev);
3479	struct virtio_device *vdev = vi->vdev;
3480	int ret;
3481	struct sockaddr *addr;
3482	struct scatterlist sg;
3483
3484	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3485		return -EOPNOTSUPP;
3486
3487	addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
3488	if (!addr)
3489		return -ENOMEM;
3490
3491	ret = eth_prepare_mac_addr_change(dev, addr);
3492	if (ret)
3493		goto out;
3494
3495	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
3496		sg_init_one(&sg, addr->sa_data, dev->addr_len);
3497		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3498					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
3499			dev_warn(&vdev->dev,
3500				 "Failed to set mac address by vq command.\n");
3501			ret = -EINVAL;
3502			goto out;
3503		}
3504	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
3505		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3506		unsigned int i;
3507
3508		/* Naturally, this has an atomicity problem. */
3509		for (i = 0; i < dev->addr_len; i++)
3510			virtio_cwrite8(vdev,
3511				       offsetof(struct virtio_net_config, mac) +
3512				       i, addr->sa_data[i]);
3513	}
3514
3515	eth_commit_mac_addr_change(dev, p);
3516	ret = 0;
3517
3518out:
3519	kfree(addr);
3520	return ret;
3521}
3522
3523static void virtnet_stats(struct net_device *dev,
3524			  struct rtnl_link_stats64 *tot)
3525{
3526	struct virtnet_info *vi = netdev_priv(dev);
3527	unsigned int start;
3528	int i;
3529
3530	for (i = 0; i < vi->max_queue_pairs; i++) {
3531		u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
3532		struct receive_queue *rq = &vi->rq[i];
3533		struct send_queue *sq = &vi->sq[i];
3534
3535		do {
3536			start = u64_stats_fetch_begin(&sq->stats.syncp);
3537			tpackets = u64_stats_read(&sq->stats.packets);
3538			tbytes   = u64_stats_read(&sq->stats.bytes);
3539			terrors  = u64_stats_read(&sq->stats.tx_timeouts);
3540		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3541
3542		do {
3543			start = u64_stats_fetch_begin(&rq->stats.syncp);
3544			rpackets = u64_stats_read(&rq->stats.packets);
3545			rbytes   = u64_stats_read(&rq->stats.bytes);
3546			rdrops   = u64_stats_read(&rq->stats.drops);
3547		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3548
3549		tot->rx_packets += rpackets;
3550		tot->tx_packets += tpackets;
3551		tot->rx_bytes   += rbytes;
3552		tot->tx_bytes   += tbytes;
3553		tot->rx_dropped += rdrops;
3554		tot->tx_errors  += terrors;
3555	}
3556
3557	tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3558	tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
3559	tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
3560	tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
3561}
3562
3563static void virtnet_ack_link_announce(struct virtnet_info *vi)
3564{
 
3565	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
3566				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
3567		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
 
3568}
3569
3570static bool virtnet_commit_rss_command(struct virtnet_info *vi);
3571
3572static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pairs)
3573{
3574	u32 indir_val = 0;
3575	int i = 0;
3576
3577	for (; i < vi->rss_indir_table_size; ++i) {
3578		indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
3579		vi->rss.indirection_table[i] = indir_val;
3580	}
3581	vi->rss.max_tx_vq = queue_pairs;
3582}
3583
3584static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
3585{
3586	struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
3587	struct virtio_net_ctrl_rss old_rss;
3588	struct net_device *dev = vi->dev;
3589	struct scatterlist sg;
3590
3591	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
3592		return 0;
3593
3594	/* Firstly check if we need update rss. Do updating if both (1) rss enabled and
3595	 * (2) no user configuration.
3596	 *
3597	 * During rss command processing, device updates queue_pairs using rss.max_tx_vq. That is,
3598	 * the device updates queue_pairs together with rss, so we can skip the sperate queue_pairs
3599	 * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
3600	 */
3601	if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
3602		memcpy(&old_rss, &vi->rss, sizeof(old_rss));
3603		if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
3604			vi->rss.indirection_table = old_rss.indirection_table;
3605			return -ENOMEM;
3606		}
3607
3608		virtnet_rss_update_by_qpairs(vi, queue_pairs);
3609
3610		if (!virtnet_commit_rss_command(vi)) {
3611			/* restore ctrl_rss if commit_rss_command failed */
3612			rss_indirection_table_free(&vi->rss);
3613			memcpy(&vi->rss, &old_rss, sizeof(old_rss));
3614
3615			dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
3616				 queue_pairs);
3617			return -EINVAL;
3618		}
3619		rss_indirection_table_free(&old_rss);
3620		goto succ;
3621	}
3622
3623	mq = kzalloc(sizeof(*mq), GFP_KERNEL);
3624	if (!mq)
3625		return -ENOMEM;
3626
3627	mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
3628	sg_init_one(&sg, mq, sizeof(*mq));
3629
3630	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3631				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
3632		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
3633			 queue_pairs);
3634		return -EINVAL;
 
 
 
 
 
3635	}
3636succ:
3637	vi->curr_queue_pairs = queue_pairs;
3638	/* virtnet_open() will refill when device is going to up. */
3639	if (dev->flags & IFF_UP)
3640		schedule_delayed_work(&vi->refill, 0);
3641
3642	return 0;
3643}
3644
 
 
 
 
 
 
 
 
 
 
3645static int virtnet_close(struct net_device *dev)
3646{
3647	struct virtnet_info *vi = netdev_priv(dev);
3648	int i;
3649
3650	/* Make sure NAPI doesn't schedule refill work */
3651	disable_delayed_refill(vi);
3652	/* Make sure refill_work doesn't re-enable napi! */
3653	cancel_delayed_work_sync(&vi->refill);
3654	/* Prevent the config change callback from changing carrier
3655	 * after close
3656	 */
3657	virtio_config_driver_disable(vi->vdev);
3658	/* Stop getting status/speed updates: we don't care until next
3659	 * open
3660	 */
3661	cancel_work_sync(&vi->config_work);
3662
3663	for (i = 0; i < vi->max_queue_pairs; i++) {
3664		virtnet_disable_queue_pair(vi, i);
3665		virtnet_cancel_dim(vi, &vi->rq[i].dim);
3666	}
3667
3668	netif_carrier_off(dev);
3669
3670	return 0;
3671}
3672
3673static void virtnet_rx_mode_work(struct work_struct *work)
3674{
3675	struct virtnet_info *vi =
3676		container_of(work, struct virtnet_info, rx_mode_work);
3677	u8 *promisc_allmulti  __free(kfree) = NULL;
3678	struct net_device *dev = vi->dev;
3679	struct scatterlist sg[2];
3680	struct virtio_net_ctrl_mac *mac_data;
3681	struct netdev_hw_addr *ha;
3682	int uc_count;
3683	int mc_count;
3684	void *buf;
3685	int i;
3686
3687	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
3688	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
3689		return;
3690
3691	promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL);
3692	if (!promisc_allmulti) {
3693		dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
3694		return;
3695	}
3696
3697	rtnl_lock();
3698
3699	*promisc_allmulti = !!(dev->flags & IFF_PROMISC);
3700	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3701
3702	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3703				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
3704		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
3705			 *promisc_allmulti ? "en" : "dis");
3706
3707	*promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
3708	sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
3709
3710	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
3711				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
3712		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
3713			 *promisc_allmulti ? "en" : "dis");
3714
3715	netif_addr_lock_bh(dev);
3716
3717	uc_count = netdev_uc_count(dev);
3718	mc_count = netdev_mc_count(dev);
3719	/* MAC filter - use one buffer for both lists */
3720	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
3721		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
3722	mac_data = buf;
3723	if (!buf) {
3724		netif_addr_unlock_bh(dev);
3725		rtnl_unlock();
3726		return;
3727	}
3728
3729	sg_init_table(sg, 2);
3730
3731	/* Store the unicast list and count in the front of the buffer */
3732	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
3733	i = 0;
3734	netdev_for_each_uc_addr(ha, dev)
3735		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3736
3737	sg_set_buf(&sg[0], mac_data,
3738		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
3739
3740	/* multicast list and count fill the end */
3741	mac_data = (void *)&mac_data->macs[uc_count][0];
3742
3743	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
3744	i = 0;
3745	netdev_for_each_mc_addr(ha, dev)
3746		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
3747
3748	netif_addr_unlock_bh(dev);
3749
3750	sg_set_buf(&sg[1], mac_data,
3751		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
3752
3753	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
3754				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
3755		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
3756
3757	rtnl_unlock();
3758
3759	kfree(buf);
3760}
3761
3762static void virtnet_set_rx_mode(struct net_device *dev)
3763{
3764	struct virtnet_info *vi = netdev_priv(dev);
3765
3766	if (vi->rx_mode_work_enabled)
3767		schedule_work(&vi->rx_mode_work);
3768}
3769
3770static int virtnet_vlan_rx_add_vid(struct net_device *dev,
3771				   __be16 proto, u16 vid)
3772{
3773	struct virtnet_info *vi = netdev_priv(dev);
3774	__virtio16 *_vid __free(kfree) = NULL;
3775	struct scatterlist sg;
3776
3777	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3778	if (!_vid)
3779		return -ENOMEM;
3780
3781	*_vid = cpu_to_virtio16(vi->vdev, vid);
3782	sg_init_one(&sg, _vid, sizeof(*_vid));
3783
3784	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3785				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3786		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
3787	return 0;
3788}
3789
3790static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3791				    __be16 proto, u16 vid)
3792{
3793	struct virtnet_info *vi = netdev_priv(dev);
3794	__virtio16 *_vid __free(kfree) = NULL;
3795	struct scatterlist sg;
3796
3797	_vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3798	if (!_vid)
3799		return -ENOMEM;
3800
3801	*_vid = cpu_to_virtio16(vi->vdev, vid);
3802	sg_init_one(&sg, _vid, sizeof(*_vid));
3803
3804	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3805				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3806		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
3807	return 0;
3808}
3809
3810static void virtnet_clean_affinity(struct virtnet_info *vi)
3811{
3812	int i;
3813
3814	if (vi->affinity_hint_set) {
3815		for (i = 0; i < vi->max_queue_pairs; i++) {
3816			virtqueue_set_affinity(vi->rq[i].vq, NULL);
3817			virtqueue_set_affinity(vi->sq[i].vq, NULL);
3818		}
3819
3820		vi->affinity_hint_set = false;
3821	}
3822}
3823
3824static void virtnet_set_affinity(struct virtnet_info *vi)
3825{
3826	cpumask_var_t mask;
3827	int stragglers;
3828	int group_size;
3829	int i, j, cpu;
3830	int num_cpu;
3831	int stride;
3832
3833	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3834		virtnet_clean_affinity(vi);
3835		return;
3836	}
3837
3838	num_cpu = num_online_cpus();
3839	stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
3840	stragglers = num_cpu >= vi->curr_queue_pairs ?
3841			num_cpu % vi->curr_queue_pairs :
3842			0;
3843	cpu = cpumask_first(cpu_online_mask);
3844
3845	for (i = 0; i < vi->curr_queue_pairs; i++) {
3846		group_size = stride + (i < stragglers ? 1 : 0);
3847
3848		for (j = 0; j < group_size; j++) {
3849			cpumask_set_cpu(cpu, mask);
3850			cpu = cpumask_next_wrap(cpu, cpu_online_mask,
3851						nr_cpu_ids, false);
3852		}
3853		virtqueue_set_affinity(vi->rq[i].vq, mask);
3854		virtqueue_set_affinity(vi->sq[i].vq, mask);
3855		__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
3856		cpumask_clear(mask);
3857	}
3858
3859	vi->affinity_hint_set = true;
3860	free_cpumask_var(mask);
3861}
3862
3863static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
3864{
3865	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3866						   node);
3867	virtnet_set_affinity(vi);
3868	return 0;
3869}
3870
3871static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
3872{
3873	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3874						   node_dead);
3875	virtnet_set_affinity(vi);
3876	return 0;
3877}
3878
3879static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
3880{
3881	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3882						   node);
3883
3884	virtnet_clean_affinity(vi);
3885	return 0;
3886}
3887
3888static enum cpuhp_state virtionet_online;
3889
3890static int virtnet_cpu_notif_add(struct virtnet_info *vi)
3891{
3892	int ret;
3893
3894	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
3895	if (ret)
3896		return ret;
3897	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3898					       &vi->node_dead);
3899	if (!ret)
3900		return ret;
3901	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3902	return ret;
3903}
3904
3905static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
3906{
3907	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3908	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3909					    &vi->node_dead);
3910}
3911
3912static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3913					 u16 vqn, u32 max_usecs, u32 max_packets)
3914{
3915	struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
3916	struct scatterlist sgs;
3917
3918	coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
3919	if (!coal_vq)
3920		return -ENOMEM;
3921
3922	coal_vq->vqn = cpu_to_le16(vqn);
3923	coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
3924	coal_vq->coal.max_packets = cpu_to_le32(max_packets);
3925	sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
3926
3927	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3928				  VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3929				  &sgs))
3930		return -EINVAL;
3931
3932	return 0;
3933}
3934
3935static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3936					    u16 queue, u32 max_usecs,
3937					    u32 max_packets)
3938{
3939	int err;
3940
3941	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3942		return -EOPNOTSUPP;
3943
3944	err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3945					    max_usecs, max_packets);
3946	if (err)
3947		return err;
3948
3949	vi->rq[queue].intr_coal.max_usecs = max_usecs;
3950	vi->rq[queue].intr_coal.max_packets = max_packets;
3951
3952	return 0;
3953}
3954
3955static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3956					    u16 queue, u32 max_usecs,
3957					    u32 max_packets)
3958{
3959	int err;
3960
3961	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3962		return -EOPNOTSUPP;
3963
3964	err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3965					    max_usecs, max_packets);
3966	if (err)
3967		return err;
3968
3969	vi->sq[queue].intr_coal.max_usecs = max_usecs;
3970	vi->sq[queue].intr_coal.max_packets = max_packets;
3971
3972	return 0;
3973}
3974
3975static void virtnet_get_ringparam(struct net_device *dev,
3976				  struct ethtool_ringparam *ring,
3977				  struct kernel_ethtool_ringparam *kernel_ring,
3978				  struct netlink_ext_ack *extack)
3979{
3980	struct virtnet_info *vi = netdev_priv(dev);
3981
3982	ring->rx_max_pending = vi->rq[0].vq->num_max;
3983	ring->tx_max_pending = vi->sq[0].vq->num_max;
3984	ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
3985	ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
3986}
3987
3988static int virtnet_set_ringparam(struct net_device *dev,
3989				 struct ethtool_ringparam *ring,
3990				 struct kernel_ethtool_ringparam *kernel_ring,
3991				 struct netlink_ext_ack *extack)
3992{
3993	struct virtnet_info *vi = netdev_priv(dev);
3994	u32 rx_pending, tx_pending;
3995	struct receive_queue *rq;
3996	struct send_queue *sq;
3997	int i, err;
3998
3999	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
4000		return -EINVAL;
4001
4002	rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
4003	tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
4004
4005	if (ring->rx_pending == rx_pending &&
4006	    ring->tx_pending == tx_pending)
4007		return 0;
4008
4009	if (ring->rx_pending > vi->rq[0].vq->num_max)
4010		return -EINVAL;
4011
4012	if (ring->tx_pending > vi->sq[0].vq->num_max)
4013		return -EINVAL;
4014
4015	for (i = 0; i < vi->max_queue_pairs; i++) {
4016		rq = vi->rq + i;
4017		sq = vi->sq + i;
4018
4019		if (ring->tx_pending != tx_pending) {
4020			err = virtnet_tx_resize(vi, sq, ring->tx_pending);
4021			if (err)
4022				return err;
4023
4024			/* Upon disabling and re-enabling a transmit virtqueue, the device must
4025			 * set the coalescing parameters of the virtqueue to those configured
4026			 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
4027			 * did not set any TX coalescing parameters, to 0.
4028			 */
4029			err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
4030							       vi->intr_coal_tx.max_usecs,
4031							       vi->intr_coal_tx.max_packets);
4032
4033			/* Don't break the tx resize action if the vq coalescing is not
4034			 * supported. The same is true for rx resize below.
4035			 */
4036			if (err && err != -EOPNOTSUPP)
4037				return err;
4038		}
4039
4040		if (ring->rx_pending != rx_pending) {
4041			err = virtnet_rx_resize(vi, rq, ring->rx_pending);
4042			if (err)
4043				return err;
4044
4045			/* The reason is same as the transmit virtqueue reset */
4046			mutex_lock(&vi->rq[i].dim_lock);
4047			err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
4048							       vi->intr_coal_rx.max_usecs,
4049							       vi->intr_coal_rx.max_packets);
4050			mutex_unlock(&vi->rq[i].dim_lock);
4051			if (err && err != -EOPNOTSUPP)
4052				return err;
4053		}
4054	}
4055
4056	return 0;
4057}
4058
4059static bool virtnet_commit_rss_command(struct virtnet_info *vi)
4060{
4061	struct net_device *dev = vi->dev;
4062	struct scatterlist sgs[4];
4063	unsigned int sg_buf_size;
4064
4065	/* prepare sgs */
4066	sg_init_table(sgs, 4);
4067
4068	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
4069	sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
4070
4071	if (vi->has_rss) {
4072		sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
4073		sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
4074	} else {
4075		sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
4076	}
4077
4078	sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
4079			- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
4080	sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
4081
4082	sg_buf_size = vi->rss_key_size;
4083	sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
4084
4085	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
4086				  vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
4087				  : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
4088		goto err;
4089
 
4090	return true;
4091
4092err:
4093	dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
4094	return false;
4095
4096}
4097
4098static void virtnet_init_default_rss(struct virtnet_info *vi)
4099{
4100	vi->rss.hash_types = vi->rss_hash_types_supported;
 
 
 
4101	vi->rss_hash_types_saved = vi->rss_hash_types_supported;
4102	vi->rss.indirection_table_mask = vi->rss_indir_table_size
4103						? vi->rss_indir_table_size - 1 : 0;
4104	vi->rss.unclassified_queue = 0;
4105
4106	virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
 
 
 
4107
4108	vi->rss.hash_key_length = vi->rss_key_size;
 
4109
4110	netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
4111}
4112
4113static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
4114{
4115	info->data = 0;
4116	switch (info->flow_type) {
4117	case TCP_V4_FLOW:
4118		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
4119			info->data = RXH_IP_SRC | RXH_IP_DST |
4120						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4121		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4122			info->data = RXH_IP_SRC | RXH_IP_DST;
4123		}
4124		break;
4125	case TCP_V6_FLOW:
4126		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
4127			info->data = RXH_IP_SRC | RXH_IP_DST |
4128						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4129		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4130			info->data = RXH_IP_SRC | RXH_IP_DST;
4131		}
4132		break;
4133	case UDP_V4_FLOW:
4134		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
4135			info->data = RXH_IP_SRC | RXH_IP_DST |
4136						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4137		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
4138			info->data = RXH_IP_SRC | RXH_IP_DST;
4139		}
4140		break;
4141	case UDP_V6_FLOW:
4142		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
4143			info->data = RXH_IP_SRC | RXH_IP_DST |
4144						 RXH_L4_B_0_1 | RXH_L4_B_2_3;
4145		} else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
4146			info->data = RXH_IP_SRC | RXH_IP_DST;
4147		}
4148		break;
4149	case IPV4_FLOW:
4150		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
4151			info->data = RXH_IP_SRC | RXH_IP_DST;
4152
4153		break;
4154	case IPV6_FLOW:
4155		if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
4156			info->data = RXH_IP_SRC | RXH_IP_DST;
4157
4158		break;
4159	default:
4160		info->data = 0;
4161		break;
4162	}
4163}
4164
4165static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
4166{
4167	u32 new_hashtypes = vi->rss_hash_types_saved;
4168	bool is_disable = info->data & RXH_DISCARD;
4169	bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
4170
4171	/* supports only 'sd', 'sdfn' and 'r' */
4172	if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
4173		return false;
4174
4175	switch (info->flow_type) {
4176	case TCP_V4_FLOW:
4177		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
4178		if (!is_disable)
4179			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4180				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
4181		break;
4182	case UDP_V4_FLOW:
4183		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
4184		if (!is_disable)
4185			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
4186				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
4187		break;
4188	case IPV4_FLOW:
4189		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4190		if (!is_disable)
4191			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
4192		break;
4193	case TCP_V6_FLOW:
4194		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
4195		if (!is_disable)
4196			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4197				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
4198		break;
4199	case UDP_V6_FLOW:
4200		new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
4201		if (!is_disable)
4202			new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
4203				| (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
4204		break;
4205	case IPV6_FLOW:
4206		new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4207		if (!is_disable)
4208			new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
4209		break;
4210	default:
4211		/* unsupported flow */
4212		return false;
4213	}
4214
4215	/* if unsupported hashtype was set */
4216	if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
4217		return false;
4218
4219	if (new_hashtypes != vi->rss_hash_types_saved) {
4220		vi->rss_hash_types_saved = new_hashtypes;
4221		vi->rss.hash_types = vi->rss_hash_types_saved;
4222		if (vi->dev->features & NETIF_F_RXHASH)
4223			return virtnet_commit_rss_command(vi);
4224	}
4225
4226	return true;
4227}
4228
4229static void virtnet_get_drvinfo(struct net_device *dev,
4230				struct ethtool_drvinfo *info)
4231{
4232	struct virtnet_info *vi = netdev_priv(dev);
4233	struct virtio_device *vdev = vi->vdev;
4234
4235	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
4236	strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
4237	strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
4238
4239}
4240
4241/* TODO: Eliminate OOO packets during switching */
4242static int virtnet_set_channels(struct net_device *dev,
4243				struct ethtool_channels *channels)
4244{
4245	struct virtnet_info *vi = netdev_priv(dev);
4246	u16 queue_pairs = channels->combined_count;
4247	int err;
4248
4249	/* We don't support separate rx/tx channels.
4250	 * We don't allow setting 'other' channels.
4251	 */
4252	if (channels->rx_count || channels->tx_count || channels->other_count)
4253		return -EINVAL;
4254
4255	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
4256		return -EINVAL;
4257
4258	/* For now we don't support modifying channels while XDP is loaded
4259	 * also when XDP is loaded all RX queues have XDP programs so we only
4260	 * need to check a single RX queue.
4261	 */
4262	if (vi->rq[0].xdp_prog)
4263		return -EINVAL;
4264
4265	cpus_read_lock();
4266	err = virtnet_set_queues(vi, queue_pairs);
4267	if (err) {
4268		cpus_read_unlock();
4269		goto err;
4270	}
4271	virtnet_set_affinity(vi);
4272	cpus_read_unlock();
4273
4274	netif_set_real_num_tx_queues(dev, queue_pairs);
4275	netif_set_real_num_rx_queues(dev, queue_pairs);
4276 err:
4277	return err;
4278}
4279
4280static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
4281				  int num, int qid, const struct virtnet_stat_desc *desc)
4282{
4283	int i;
4284
4285	if (qid < 0) {
4286		for (i = 0; i < num; ++i)
4287			ethtool_sprintf(p, noq_fmt, desc[i].desc);
4288	} else {
4289		for (i = 0; i < num; ++i)
4290			ethtool_sprintf(p, fmt, qid, desc[i].desc);
4291	}
4292}
4293
4294/* qid == -1: for rx/tx queue total field */
4295static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
4296{
4297	const struct virtnet_stat_desc *desc;
4298	const char *fmt, *noq_fmt;
4299	u8 *p = *data;
4300	u32 num;
4301
4302	if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
4303		noq_fmt = "cq_hw_%s";
4304
4305		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4306			desc = &virtnet_stats_cvq_desc[0];
4307			num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4308
4309			virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
4310		}
4311	}
4312
4313	if (type == VIRTNET_Q_TYPE_RX) {
4314		fmt = "rx%u_%s";
4315		noq_fmt = "rx_%s";
4316
4317		desc = &virtnet_rq_stats_desc[0];
4318		num = ARRAY_SIZE(virtnet_rq_stats_desc);
4319
4320		virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4321
4322		fmt = "rx%u_hw_%s";
4323		noq_fmt = "rx_hw_%s";
4324
4325		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4326			desc = &virtnet_stats_rx_basic_desc[0];
4327			num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4328
4329			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4330		}
4331
4332		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4333			desc = &virtnet_stats_rx_csum_desc[0];
4334			num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4335
4336			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4337		}
4338
4339		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4340			desc = &virtnet_stats_rx_speed_desc[0];
4341			num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4342
4343			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4344		}
4345	}
4346
4347	if (type == VIRTNET_Q_TYPE_TX) {
4348		fmt = "tx%u_%s";
4349		noq_fmt = "tx_%s";
4350
4351		desc = &virtnet_sq_stats_desc[0];
4352		num = ARRAY_SIZE(virtnet_sq_stats_desc);
4353
4354		virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4355
4356		fmt = "tx%u_hw_%s";
4357		noq_fmt = "tx_hw_%s";
4358
4359		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4360			desc = &virtnet_stats_tx_basic_desc[0];
4361			num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4362
4363			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4364		}
4365
4366		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4367			desc = &virtnet_stats_tx_gso_desc[0];
4368			num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4369
4370			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4371		}
4372
4373		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4374			desc = &virtnet_stats_tx_speed_desc[0];
4375			num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4376
4377			virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
4378		}
4379	}
4380
4381	*data = p;
4382}
4383
4384struct virtnet_stats_ctx {
4385	/* The stats are write to qstats or ethtool -S */
4386	bool to_qstat;
4387
4388	/* Used to calculate the offset inside the output buffer. */
4389	u32 desc_num[3];
4390
4391	/* The actual supported stat types. */
4392	u64 bitmap[3];
4393
4394	/* Used to calculate the reply buffer size. */
4395	u32 size[3];
4396
4397	/* Record the output buffer. */
4398	u64 *data;
4399};
4400
4401static void virtnet_stats_ctx_init(struct virtnet_info *vi,
4402				   struct virtnet_stats_ctx *ctx,
4403				   u64 *data, bool to_qstat)
4404{
4405	u32 queue_type;
4406
4407	ctx->data = data;
4408	ctx->to_qstat = to_qstat;
4409
4410	if (to_qstat) {
4411		ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4412		ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4413
4414		queue_type = VIRTNET_Q_TYPE_RX;
4415
4416		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4417			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4418			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4419			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
4420		}
4421
4422		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4423			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4424			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4425			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
4426		}
4427
4428		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4429			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_GSO;
4430			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4431			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_gso);
4432		}
4433
4434		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4435			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4436			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4437			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
4438		}
4439
4440		queue_type = VIRTNET_Q_TYPE_TX;
4441
4442		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4443			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4444			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4445			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
4446		}
4447
4448		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4449			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
4450			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4451			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_csum);
4452		}
4453
4454		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4455			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4456			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4457			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
4458		}
4459
4460		if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4461			ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4462			ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4463			ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
4464		}
4465
4466		return;
4467	}
4468
4469	ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
4470	ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
4471
4472	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
4473		queue_type = VIRTNET_Q_TYPE_CQ;
4474
4475		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_CVQ;
4476		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
4477		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_cvq);
4478	}
4479
4480	queue_type = VIRTNET_Q_TYPE_RX;
4481
4482	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4483		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
4484		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4485		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
4486	}
4487
4488	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4489		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
4490		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4491		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
4492	}
4493
4494	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4495		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
4496		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4497		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
4498	}
4499
4500	queue_type = VIRTNET_Q_TYPE_TX;
4501
4502	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4503		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
4504		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4505		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
4506	}
4507
4508	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4509		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
4510		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4511		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
4512	}
4513
4514	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4515		ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
4516		ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4517		ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
4518	}
4519}
4520
4521/* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
4522 * @sum: the position to store the sum values
4523 * @num: field num
4524 * @q_value: the first queue fields
4525 * @q_num: number of the queues
4526 */
4527static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
4528{
4529	u32 step = num;
4530	int i, j;
4531	u64 *p;
4532
4533	for (i = 0; i < num; ++i) {
4534		p = sum + i;
4535		*p = 0;
4536
4537		for (j = 0; j < q_num; ++j)
4538			*p += *(q_value + i + j * step);
4539	}
4540}
4541
4542static void virtnet_fill_total_fields(struct virtnet_info *vi,
4543				      struct virtnet_stats_ctx *ctx)
4544{
4545	u64 *data, *first_rx_q, *first_tx_q;
4546	u32 num_cq, num_rx, num_tx;
4547
4548	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4549	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4550	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4551
4552	first_rx_q = ctx->data + num_rx + num_tx + num_cq;
4553	first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
4554
4555	data = ctx->data;
4556
4557	stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
4558
4559	data = ctx->data + num_rx;
4560
4561	stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
4562}
4563
4564static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
4565				     struct virtnet_stats_ctx *ctx,
4566				     const u8 *base, bool drv_stats, u8 reply_type)
4567{
4568	const struct virtnet_stat_desc *desc;
4569	const u64_stats_t *v_stat;
4570	u64 offset, bitmap;
4571	const __le64 *v;
4572	u32 queue_type;
4573	int i, num;
4574
4575	queue_type = vq_type(vi, qid);
4576	bitmap = ctx->bitmap[queue_type];
4577
4578	if (drv_stats) {
4579		if (queue_type == VIRTNET_Q_TYPE_RX) {
4580			desc = &virtnet_rq_stats_desc_qstat[0];
4581			num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
4582		} else {
4583			desc = &virtnet_sq_stats_desc_qstat[0];
4584			num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
4585		}
4586
4587		for (i = 0; i < num; ++i) {
4588			offset = desc[i].qstat_offset / sizeof(*ctx->data);
4589			v_stat = (const u64_stats_t *)(base + desc[i].offset);
4590			ctx->data[offset] = u64_stats_read(v_stat);
4591		}
4592		return;
4593	}
4594
4595	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4596		desc = &virtnet_stats_rx_basic_desc_qstat[0];
4597		num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
4598		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4599			goto found;
4600	}
4601
4602	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4603		desc = &virtnet_stats_rx_csum_desc_qstat[0];
4604		num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
4605		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4606			goto found;
4607	}
4608
4609	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4610		desc = &virtnet_stats_rx_gso_desc_qstat[0];
4611		num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
4612		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
4613			goto found;
4614	}
4615
4616	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4617		desc = &virtnet_stats_rx_speed_desc_qstat[0];
4618		num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
4619		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4620			goto found;
4621	}
4622
4623	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4624		desc = &virtnet_stats_tx_basic_desc_qstat[0];
4625		num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
4626		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4627			goto found;
4628	}
4629
4630	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4631		desc = &virtnet_stats_tx_csum_desc_qstat[0];
4632		num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
4633		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
4634			goto found;
4635	}
4636
4637	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4638		desc = &virtnet_stats_tx_gso_desc_qstat[0];
4639		num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
4640		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4641			goto found;
4642	}
4643
4644	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4645		desc = &virtnet_stats_tx_speed_desc_qstat[0];
4646		num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
4647		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4648			goto found;
4649	}
4650
4651	return;
4652
4653found:
4654	for (i = 0; i < num; ++i) {
4655		offset = desc[i].qstat_offset / sizeof(*ctx->data);
4656		v = (const __le64 *)(base + desc[i].offset);
4657		ctx->data[offset] = le64_to_cpu(*v);
4658	}
4659}
4660
4661/* virtnet_fill_stats - copy the stats to qstats or ethtool -S
4662 * The stats source is the device or the driver.
4663 *
4664 * @vi: virtio net info
4665 * @qid: the vq id
4666 * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
4667 * @base: pointer to the device reply or the driver stats structure.
4668 * @drv_stats: designate the base type (device reply, driver stats)
4669 * @type: the type of the device reply (if drv_stats is true, this must be zero)
4670 */
4671static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
4672			       struct virtnet_stats_ctx *ctx,
4673			       const u8 *base, bool drv_stats, u8 reply_type)
4674{
4675	u32 queue_type, num_rx, num_tx, num_cq;
4676	const struct virtnet_stat_desc *desc;
4677	const u64_stats_t *v_stat;
4678	u64 offset, bitmap;
4679	const __le64 *v;
4680	int i, num;
4681
4682	if (ctx->to_qstat)
4683		return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
4684
4685	num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
4686	num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
4687	num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
4688
4689	queue_type = vq_type(vi, qid);
4690	bitmap = ctx->bitmap[queue_type];
4691
4692	/* skip the total fields of pairs */
4693	offset = num_rx + num_tx;
4694
4695	if (queue_type == VIRTNET_Q_TYPE_TX) {
4696		offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
4697
4698		num = ARRAY_SIZE(virtnet_sq_stats_desc);
4699		if (drv_stats) {
4700			desc = &virtnet_sq_stats_desc[0];
4701			goto drv_stats;
4702		}
4703
4704		offset += num;
4705
4706	} else if (queue_type == VIRTNET_Q_TYPE_RX) {
4707		offset += num_cq + num_rx * (qid / 2);
4708
4709		num = ARRAY_SIZE(virtnet_rq_stats_desc);
4710		if (drv_stats) {
4711			desc = &virtnet_rq_stats_desc[0];
4712			goto drv_stats;
4713		}
4714
4715		offset += num;
4716	}
4717
4718	if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
4719		desc = &virtnet_stats_cvq_desc[0];
4720		num = ARRAY_SIZE(virtnet_stats_cvq_desc);
4721		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
4722			goto found;
4723
4724		offset += num;
4725	}
4726
4727	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4728		desc = &virtnet_stats_rx_basic_desc[0];
4729		num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
4730		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
4731			goto found;
4732
4733		offset += num;
4734	}
4735
4736	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4737		desc = &virtnet_stats_rx_csum_desc[0];
4738		num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
4739		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
4740			goto found;
4741
4742		offset += num;
4743	}
4744
4745	if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
4746		desc = &virtnet_stats_rx_speed_desc[0];
4747		num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
4748		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
4749			goto found;
4750
4751		offset += num;
4752	}
4753
4754	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4755		desc = &virtnet_stats_tx_basic_desc[0];
4756		num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
4757		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
4758			goto found;
4759
4760		offset += num;
4761	}
4762
4763	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4764		desc = &virtnet_stats_tx_gso_desc[0];
4765		num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
4766		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
4767			goto found;
4768
4769		offset += num;
4770	}
4771
4772	if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
4773		desc = &virtnet_stats_tx_speed_desc[0];
4774		num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
4775		if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
4776			goto found;
4777
4778		offset += num;
4779	}
4780
4781	return;
4782
4783found:
4784	for (i = 0; i < num; ++i) {
4785		v = (const __le64 *)(base + desc[i].offset);
4786		ctx->data[offset + i] = le64_to_cpu(*v);
4787	}
4788
4789	return;
4790
4791drv_stats:
4792	for (i = 0; i < num; ++i) {
4793		v_stat = (const u64_stats_t *)(base + desc[i].offset);
4794		ctx->data[offset + i] = u64_stats_read(v_stat);
4795	}
4796}
4797
4798static int __virtnet_get_hw_stats(struct virtnet_info *vi,
4799				  struct virtnet_stats_ctx *ctx,
4800				  struct virtio_net_ctrl_queue_stats *req,
4801				  int req_size, void *reply, int res_size)
4802{
4803	struct virtio_net_stats_reply_hdr *hdr;
4804	struct scatterlist sgs_in, sgs_out;
4805	void *p;
4806	u32 qid;
4807	int ok;
4808
4809	sg_init_one(&sgs_out, req, req_size);
4810	sg_init_one(&sgs_in, reply, res_size);
4811
4812	ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
4813					VIRTIO_NET_CTRL_STATS_GET,
4814					&sgs_out, &sgs_in);
4815
4816	if (!ok)
4817		return ok;
4818
4819	for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
4820		hdr = p;
4821		qid = le16_to_cpu(hdr->vq_index);
4822		virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
4823	}
4824
4825	return 0;
4826}
4827
4828static void virtnet_make_stat_req(struct virtnet_info *vi,
4829				  struct virtnet_stats_ctx *ctx,
4830				  struct virtio_net_ctrl_queue_stats *req,
4831				  int qid, int *idx)
4832{
4833	int qtype = vq_type(vi, qid);
4834	u64 bitmap = ctx->bitmap[qtype];
4835
4836	if (!bitmap)
4837		return;
4838
4839	req->stats[*idx].vq_index = cpu_to_le16(qid);
4840	req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
4841	*idx += 1;
4842}
4843
4844/* qid: -1: get stats of all vq.
4845 *     > 0: get the stats for the special vq. This must not be cvq.
4846 */
4847static int virtnet_get_hw_stats(struct virtnet_info *vi,
4848				struct virtnet_stats_ctx *ctx, int qid)
4849{
4850	int qnum, i, j, res_size, qtype, last_vq, first_vq;
4851	struct virtio_net_ctrl_queue_stats *req;
4852	bool enable_cvq;
4853	void *reply;
4854	int ok;
4855
4856	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
4857		return 0;
4858
4859	if (qid == -1) {
4860		last_vq = vi->curr_queue_pairs * 2 - 1;
4861		first_vq = 0;
4862		enable_cvq = true;
4863	} else {
4864		last_vq = qid;
4865		first_vq = qid;
4866		enable_cvq = false;
4867	}
4868
4869	qnum = 0;
4870	res_size = 0;
4871	for (i = first_vq; i <= last_vq ; ++i) {
4872		qtype = vq_type(vi, i);
4873		if (ctx->bitmap[qtype]) {
4874			++qnum;
4875			res_size += ctx->size[qtype];
4876		}
4877	}
4878
4879	if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
4880		res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
4881		qnum += 1;
4882	}
4883
4884	req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
4885	if (!req)
4886		return -ENOMEM;
4887
4888	reply = kmalloc(res_size, GFP_KERNEL);
4889	if (!reply) {
4890		kfree(req);
4891		return -ENOMEM;
4892	}
4893
4894	j = 0;
4895	for (i = first_vq; i <= last_vq ; ++i)
4896		virtnet_make_stat_req(vi, ctx, req, i, &j);
4897
4898	if (enable_cvq)
4899		virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
4900
4901	ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
4902
4903	kfree(req);
4904	kfree(reply);
4905
4906	return ok;
4907}
4908
4909static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4910{
4911	struct virtnet_info *vi = netdev_priv(dev);
4912	unsigned int i;
4913	u8 *p = data;
4914
4915	switch (stringset) {
4916	case ETH_SS_STATS:
4917		/* Generate the total field names. */
4918		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
4919		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
 
 
4920
4921		virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
4922
4923		for (i = 0; i < vi->curr_queue_pairs; ++i)
4924			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
4925
4926		for (i = 0; i < vi->curr_queue_pairs; ++i)
4927			virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
4928		break;
4929	}
4930}
4931
4932static int virtnet_get_sset_count(struct net_device *dev, int sset)
4933{
4934	struct virtnet_info *vi = netdev_priv(dev);
4935	struct virtnet_stats_ctx ctx = {0};
4936	u32 pair_count;
4937
4938	switch (sset) {
4939	case ETH_SS_STATS:
4940		virtnet_stats_ctx_init(vi, &ctx, NULL, false);
4941
4942		pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
4943
4944		return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
4945			vi->curr_queue_pairs * pair_count;
4946	default:
4947		return -EOPNOTSUPP;
4948	}
4949}
4950
4951static void virtnet_get_ethtool_stats(struct net_device *dev,
4952				      struct ethtool_stats *stats, u64 *data)
4953{
4954	struct virtnet_info *vi = netdev_priv(dev);
4955	struct virtnet_stats_ctx ctx = {0};
4956	unsigned int start, i;
4957	const u8 *stats_base;
4958
4959	virtnet_stats_ctx_init(vi, &ctx, data, false);
4960	if (virtnet_get_hw_stats(vi, &ctx, -1))
4961		dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
4962
4963	for (i = 0; i < vi->curr_queue_pairs; i++) {
4964		struct receive_queue *rq = &vi->rq[i];
4965		struct send_queue *sq = &vi->sq[i];
4966
4967		stats_base = (const u8 *)&rq->stats;
4968		do {
4969			start = u64_stats_fetch_begin(&rq->stats.syncp);
4970			virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
 
 
 
 
4971		} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
 
 
 
 
 
4972
4973		stats_base = (const u8 *)&sq->stats;
4974		do {
4975			start = u64_stats_fetch_begin(&sq->stats.syncp);
4976			virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
 
 
 
 
4977		} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
 
4978	}
4979
4980	virtnet_fill_total_fields(vi, &ctx);
4981}
4982
4983static void virtnet_get_channels(struct net_device *dev,
4984				 struct ethtool_channels *channels)
4985{
4986	struct virtnet_info *vi = netdev_priv(dev);
4987
4988	channels->combined_count = vi->curr_queue_pairs;
4989	channels->max_combined = vi->max_queue_pairs;
4990	channels->max_other = 0;
4991	channels->rx_count = 0;
4992	channels->tx_count = 0;
4993	channels->other_count = 0;
4994}
4995
4996static int virtnet_set_link_ksettings(struct net_device *dev,
4997				      const struct ethtool_link_ksettings *cmd)
4998{
4999	struct virtnet_info *vi = netdev_priv(dev);
5000
5001	return ethtool_virtdev_set_link_ksettings(dev, cmd,
5002						  &vi->speed, &vi->duplex);
5003}
5004
5005static int virtnet_get_link_ksettings(struct net_device *dev,
5006				      struct ethtool_link_ksettings *cmd)
5007{
5008	struct virtnet_info *vi = netdev_priv(dev);
5009
5010	cmd->base.speed = vi->speed;
5011	cmd->base.duplex = vi->duplex;
5012	cmd->base.port = PORT_OTHER;
5013
5014	return 0;
5015}
5016
5017static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
5018					  struct ethtool_coalesce *ec)
5019{
5020	struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
5021	struct scatterlist sgs_tx;
5022	int i;
5023
5024	coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
5025	if (!coal_tx)
5026		return -ENOMEM;
5027
5028	coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
5029	coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
5030	sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
5031
5032	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5033				  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
5034				  &sgs_tx))
5035		return -EINVAL;
5036
5037	vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
5038	vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
5039	for (i = 0; i < vi->max_queue_pairs; i++) {
5040		vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
5041		vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
5042	}
5043
5044	return 0;
5045}
5046
5047static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
5048					  struct ethtool_coalesce *ec)
5049{
5050	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
5051	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
5052	struct scatterlist sgs_rx;
5053	int i;
5054
5055	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5056		return -EOPNOTSUPP;
5057
5058	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
5059			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
5060		return -EINVAL;
5061
5062	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
5063		vi->rx_dim_enabled = true;
5064		for (i = 0; i < vi->max_queue_pairs; i++) {
5065			mutex_lock(&vi->rq[i].dim_lock);
5066			vi->rq[i].dim_enabled = true;
5067			mutex_unlock(&vi->rq[i].dim_lock);
5068		}
5069		return 0;
5070	}
5071
5072	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
5073	if (!coal_rx)
5074		return -ENOMEM;
5075
5076	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
5077		vi->rx_dim_enabled = false;
5078		for (i = 0; i < vi->max_queue_pairs; i++) {
5079			mutex_lock(&vi->rq[i].dim_lock);
5080			vi->rq[i].dim_enabled = false;
5081			mutex_unlock(&vi->rq[i].dim_lock);
5082		}
5083	}
5084
5085	/* Since the per-queue coalescing params can be set,
5086	 * we need apply the global new params even if they
5087	 * are not updated.
5088	 */
5089	coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
5090	coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
5091	sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
5092
5093	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
5094				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
5095				  &sgs_rx))
5096		return -EINVAL;
5097
5098	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
5099	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
5100	for (i = 0; i < vi->max_queue_pairs; i++) {
5101		mutex_lock(&vi->rq[i].dim_lock);
5102		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
5103		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
5104		mutex_unlock(&vi->rq[i].dim_lock);
5105	}
5106
5107	return 0;
5108}
5109
5110static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
5111				       struct ethtool_coalesce *ec)
5112{
5113	int err;
5114
5115	err = virtnet_send_tx_notf_coal_cmds(vi, ec);
5116	if (err)
5117		return err;
5118
5119	err = virtnet_send_rx_notf_coal_cmds(vi, ec);
5120	if (err)
5121		return err;
5122
5123	return 0;
5124}
5125
5126static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
5127					     struct ethtool_coalesce *ec,
5128					     u16 queue)
5129{
5130	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
 
5131	u32 max_usecs, max_packets;
5132	bool cur_rx_dim;
5133	int err;
5134
5135	mutex_lock(&vi->rq[queue].dim_lock);
5136	cur_rx_dim = vi->rq[queue].dim_enabled;
5137	max_usecs = vi->rq[queue].intr_coal.max_usecs;
5138	max_packets = vi->rq[queue].intr_coal.max_packets;
5139
5140	if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
5141			       ec->rx_max_coalesced_frames != max_packets)) {
5142		mutex_unlock(&vi->rq[queue].dim_lock);
5143		return -EINVAL;
5144	}
5145
5146	if (rx_ctrl_dim_on && !cur_rx_dim) {
5147		vi->rq[queue].dim_enabled = true;
5148		mutex_unlock(&vi->rq[queue].dim_lock);
5149		return 0;
5150	}
5151
5152	if (!rx_ctrl_dim_on && cur_rx_dim)
5153		vi->rq[queue].dim_enabled = false;
5154
5155	/* If no params are updated, userspace ethtool will
5156	 * reject the modification.
5157	 */
5158	err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
5159					       ec->rx_coalesce_usecs,
5160					       ec->rx_max_coalesced_frames);
5161	mutex_unlock(&vi->rq[queue].dim_lock);
5162	return err;
 
 
5163}
5164
5165static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
5166					  struct ethtool_coalesce *ec,
5167					  u16 queue)
5168{
5169	int err;
5170
5171	err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
5172	if (err)
5173		return err;
5174
5175	err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
5176					       ec->tx_coalesce_usecs,
5177					       ec->tx_max_coalesced_frames);
5178	if (err)
5179		return err;
5180
5181	return 0;
5182}
5183
5184static void virtnet_rx_dim_work(struct work_struct *work)
5185{
5186	struct dim *dim = container_of(work, struct dim, work);
5187	struct receive_queue *rq = container_of(dim,
5188			struct receive_queue, dim);
5189	struct virtnet_info *vi = rq->vq->vdev->priv;
5190	struct net_device *dev = vi->dev;
5191	struct dim_cq_moder update_moder;
5192	int qnum, err;
5193
5194	qnum = rq - vi->rq;
 
5195
5196	mutex_lock(&rq->dim_lock);
5197	if (!rq->dim_enabled)
5198		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5199
5200	update_moder = net_dim_get_rx_irq_moder(dev, dim);
5201	if (update_moder.usec != rq->intr_coal.max_usecs ||
5202	    update_moder.pkts != rq->intr_coal.max_packets) {
5203		err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
5204						       update_moder.usec,
5205						       update_moder.pkts);
5206		if (err)
5207			pr_debug("%s: Failed to send dim parameters on rxq%d\n",
5208				 dev->name, qnum);
5209	}
5210out:
5211	dim->state = DIM_START_MEASURE;
5212	mutex_unlock(&rq->dim_lock);
5213}
5214
5215static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
5216{
5217	/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
5218	 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
5219	 */
5220	if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
5221		return -EOPNOTSUPP;
5222
5223	if (ec->tx_max_coalesced_frames > 1 ||
5224	    ec->rx_max_coalesced_frames != 1)
5225		return -EINVAL;
5226
5227	return 0;
5228}
5229
5230static int virtnet_should_update_vq_weight(int dev_flags, int weight,
5231					   int vq_weight, bool *should_update)
5232{
5233	if (weight ^ vq_weight) {
5234		if (dev_flags & IFF_UP)
5235			return -EBUSY;
5236		*should_update = true;
5237	}
5238
5239	return 0;
5240}
5241
5242static int virtnet_set_coalesce(struct net_device *dev,
5243				struct ethtool_coalesce *ec,
5244				struct kernel_ethtool_coalesce *kernel_coal,
5245				struct netlink_ext_ack *extack)
5246{
5247	struct virtnet_info *vi = netdev_priv(dev);
5248	int ret, queue_number, napi_weight, i;
5249	bool update_napi = false;
5250
5251	/* Can't change NAPI weight if the link is up */
5252	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5253	for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
5254		ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5255						      vi->sq[queue_number].napi.weight,
5256						      &update_napi);
5257		if (ret)
5258			return ret;
5259
5260		if (update_napi) {
5261			/* All queues that belong to [queue_number, vi->max_queue_pairs] will be
5262			 * updated for the sake of simplicity, which might not be necessary
5263			 */
5264			break;
5265		}
5266	}
5267
5268	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
5269		ret = virtnet_send_notf_coal_cmds(vi, ec);
5270	else
5271		ret = virtnet_coal_params_supported(ec);
5272
5273	if (ret)
5274		return ret;
5275
5276	if (update_napi) {
5277		/* xsk xmit depends on the tx napi. So if xsk is active,
5278		 * prevent modifications to tx napi.
5279		 */
5280		for (i = queue_number; i < vi->max_queue_pairs; i++) {
5281			if (vi->sq[i].xsk_pool)
5282				return -EBUSY;
5283		}
5284
5285		for (; queue_number < vi->max_queue_pairs; queue_number++)
5286			vi->sq[queue_number].napi.weight = napi_weight;
5287	}
5288
5289	return ret;
5290}
5291
5292static int virtnet_get_coalesce(struct net_device *dev,
5293				struct ethtool_coalesce *ec,
5294				struct kernel_ethtool_coalesce *kernel_coal,
5295				struct netlink_ext_ack *extack)
5296{
5297	struct virtnet_info *vi = netdev_priv(dev);
5298
5299	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
5300		ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
5301		ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
5302		ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
5303		ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
5304		ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
5305	} else {
5306		ec->rx_max_coalesced_frames = 1;
5307
5308		if (vi->sq[0].napi.weight)
5309			ec->tx_max_coalesced_frames = 1;
5310	}
5311
5312	return 0;
5313}
5314
5315static int virtnet_set_per_queue_coalesce(struct net_device *dev,
5316					  u32 queue,
5317					  struct ethtool_coalesce *ec)
5318{
5319	struct virtnet_info *vi = netdev_priv(dev);
5320	int ret, napi_weight;
5321	bool update_napi = false;
5322
5323	if (queue >= vi->max_queue_pairs)
5324		return -EINVAL;
5325
5326	/* Can't change NAPI weight if the link is up */
5327	napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
5328	ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
5329					      vi->sq[queue].napi.weight,
5330					      &update_napi);
5331	if (ret)
5332		return ret;
5333
5334	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
5335		ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
5336	else
5337		ret = virtnet_coal_params_supported(ec);
5338
5339	if (ret)
5340		return ret;
5341
5342	if (update_napi)
5343		vi->sq[queue].napi.weight = napi_weight;
5344
5345	return 0;
5346}
5347
5348static int virtnet_get_per_queue_coalesce(struct net_device *dev,
5349					  u32 queue,
5350					  struct ethtool_coalesce *ec)
5351{
5352	struct virtnet_info *vi = netdev_priv(dev);
5353
5354	if (queue >= vi->max_queue_pairs)
5355		return -EINVAL;
5356
5357	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
5358		mutex_lock(&vi->rq[queue].dim_lock);
5359		ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
5360		ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
5361		ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
5362		ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
5363		ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
5364		mutex_unlock(&vi->rq[queue].dim_lock);
5365	} else {
5366		ec->rx_max_coalesced_frames = 1;
5367
5368		if (vi->sq[queue].napi.weight)
5369			ec->tx_max_coalesced_frames = 1;
5370	}
5371
5372	return 0;
5373}
5374
5375static void virtnet_init_settings(struct net_device *dev)
5376{
5377	struct virtnet_info *vi = netdev_priv(dev);
5378
5379	vi->speed = SPEED_UNKNOWN;
5380	vi->duplex = DUPLEX_UNKNOWN;
5381}
5382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5383static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
5384{
5385	return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
5386}
5387
5388static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
5389{
5390	return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
5391}
5392
5393static int virtnet_get_rxfh(struct net_device *dev,
5394			    struct ethtool_rxfh_param *rxfh)
5395{
5396	struct virtnet_info *vi = netdev_priv(dev);
5397	int i;
5398
5399	if (rxfh->indir) {
5400		for (i = 0; i < vi->rss_indir_table_size; ++i)
5401			rxfh->indir[i] = vi->rss.indirection_table[i];
5402	}
5403
5404	if (rxfh->key)
5405		memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
5406
5407	rxfh->hfunc = ETH_RSS_HASH_TOP;
5408
5409	return 0;
5410}
5411
5412static int virtnet_set_rxfh(struct net_device *dev,
5413			    struct ethtool_rxfh_param *rxfh,
5414			    struct netlink_ext_ack *extack)
5415{
5416	struct virtnet_info *vi = netdev_priv(dev);
5417	bool update = false;
5418	int i;
5419
5420	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
5421	    rxfh->hfunc != ETH_RSS_HASH_TOP)
5422		return -EOPNOTSUPP;
5423
5424	if (rxfh->indir) {
5425		if (!vi->has_rss)
5426			return -EOPNOTSUPP;
5427
5428		for (i = 0; i < vi->rss_indir_table_size; ++i)
5429			vi->rss.indirection_table[i] = rxfh->indir[i];
5430		update = true;
5431	}
5432
5433	if (rxfh->key) {
5434		/* If either _F_HASH_REPORT or _F_RSS are negotiated, the
5435		 * device provides hash calculation capabilities, that is,
5436		 * hash_key is configured.
5437		 */
5438		if (!vi->has_rss && !vi->has_rss_hash_report)
5439			return -EOPNOTSUPP;
5440
5441		memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
5442		update = true;
5443	}
 
 
5444
5445	if (update)
5446		virtnet_commit_rss_command(vi);
5447
5448	return 0;
5449}
5450
5451static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
5452{
5453	struct virtnet_info *vi = netdev_priv(dev);
5454	int rc = 0;
5455
5456	switch (info->cmd) {
5457	case ETHTOOL_GRXRINGS:
5458		info->data = vi->curr_queue_pairs;
5459		break;
5460	case ETHTOOL_GRXFH:
5461		virtnet_get_hashflow(vi, info);
5462		break;
5463	default:
5464		rc = -EOPNOTSUPP;
5465	}
5466
5467	return rc;
5468}
5469
5470static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
5471{
5472	struct virtnet_info *vi = netdev_priv(dev);
5473	int rc = 0;
5474
5475	switch (info->cmd) {
5476	case ETHTOOL_SRXFH:
5477		if (!virtnet_set_hashflow(vi, info))
5478			rc = -EINVAL;
5479
5480		break;
5481	default:
5482		rc = -EOPNOTSUPP;
5483	}
5484
5485	return rc;
5486}
5487
5488static const struct ethtool_ops virtnet_ethtool_ops = {
5489	.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
5490		ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
5491	.get_drvinfo = virtnet_get_drvinfo,
5492	.get_link = ethtool_op_get_link,
5493	.get_ringparam = virtnet_get_ringparam,
5494	.set_ringparam = virtnet_set_ringparam,
5495	.get_strings = virtnet_get_strings,
5496	.get_sset_count = virtnet_get_sset_count,
5497	.get_ethtool_stats = virtnet_get_ethtool_stats,
5498	.set_channels = virtnet_set_channels,
5499	.get_channels = virtnet_get_channels,
5500	.get_ts_info = ethtool_op_get_ts_info,
5501	.get_link_ksettings = virtnet_get_link_ksettings,
5502	.set_link_ksettings = virtnet_set_link_ksettings,
5503	.set_coalesce = virtnet_set_coalesce,
5504	.get_coalesce = virtnet_get_coalesce,
5505	.set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
5506	.get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
5507	.get_rxfh_key_size = virtnet_get_rxfh_key_size,
5508	.get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
5509	.get_rxfh = virtnet_get_rxfh,
5510	.set_rxfh = virtnet_set_rxfh,
5511	.get_rxnfc = virtnet_get_rxnfc,
5512	.set_rxnfc = virtnet_set_rxnfc,
5513};
5514
5515static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
5516				       struct netdev_queue_stats_rx *stats)
5517{
5518	struct virtnet_info *vi = netdev_priv(dev);
5519	struct receive_queue *rq = &vi->rq[i];
5520	struct virtnet_stats_ctx ctx = {0};
5521
5522	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5523
5524	virtnet_get_hw_stats(vi, &ctx, i * 2);
5525	virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
5526}
5527
5528static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
5529				       struct netdev_queue_stats_tx *stats)
5530{
5531	struct virtnet_info *vi = netdev_priv(dev);
5532	struct send_queue *sq = &vi->sq[i];
5533	struct virtnet_stats_ctx ctx = {0};
5534
5535	virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
5536
5537	virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
5538	virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
5539}
5540
5541static void virtnet_get_base_stats(struct net_device *dev,
5542				   struct netdev_queue_stats_rx *rx,
5543				   struct netdev_queue_stats_tx *tx)
5544{
5545	struct virtnet_info *vi = netdev_priv(dev);
5546
5547	/* The queue stats of the virtio-net will not be reset. So here we
5548	 * return 0.
5549	 */
5550	rx->bytes = 0;
5551	rx->packets = 0;
5552
5553	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
5554		rx->hw_drops = 0;
5555		rx->hw_drop_overruns = 0;
5556	}
5557
5558	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
5559		rx->csum_unnecessary = 0;
5560		rx->csum_none = 0;
5561		rx->csum_bad = 0;
5562	}
5563
5564	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
5565		rx->hw_gro_packets = 0;
5566		rx->hw_gro_bytes = 0;
5567		rx->hw_gro_wire_packets = 0;
5568		rx->hw_gro_wire_bytes = 0;
5569	}
5570
5571	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
5572		rx->hw_drop_ratelimits = 0;
5573
5574	tx->bytes = 0;
5575	tx->packets = 0;
5576	tx->stop = 0;
5577	tx->wake = 0;
5578
5579	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
5580		tx->hw_drops = 0;
5581		tx->hw_drop_errors = 0;
5582	}
5583
5584	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
5585		tx->csum_none = 0;
5586		tx->needs_csum = 0;
5587	}
5588
5589	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
5590		tx->hw_gso_packets = 0;
5591		tx->hw_gso_bytes = 0;
5592		tx->hw_gso_wire_packets = 0;
5593		tx->hw_gso_wire_bytes = 0;
5594	}
5595
5596	if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
5597		tx->hw_drop_ratelimits = 0;
5598}
5599
5600static const struct netdev_stat_ops virtnet_stat_ops = {
5601	.get_queue_stats_rx	= virtnet_get_queue_stats_rx,
5602	.get_queue_stats_tx	= virtnet_get_queue_stats_tx,
5603	.get_base_stats		= virtnet_get_base_stats,
5604};
5605
5606static void virtnet_freeze_down(struct virtio_device *vdev)
5607{
5608	struct virtnet_info *vi = vdev->priv;
5609
5610	/* Make sure no work handler is accessing the device */
5611	flush_work(&vi->config_work);
5612	disable_rx_mode_work(vi);
5613	flush_work(&vi->rx_mode_work);
5614
5615	netif_tx_lock_bh(vi->dev);
5616	netif_device_detach(vi->dev);
5617	netif_tx_unlock_bh(vi->dev);
5618	if (netif_running(vi->dev))
5619		virtnet_close(vi->dev);
5620}
5621
5622static int init_vqs(struct virtnet_info *vi);
5623
5624static int virtnet_restore_up(struct virtio_device *vdev)
5625{
5626	struct virtnet_info *vi = vdev->priv;
5627	int err;
5628
5629	err = init_vqs(vi);
5630	if (err)
5631		return err;
5632
5633	virtio_device_ready(vdev);
5634
5635	enable_delayed_refill(vi);
5636	enable_rx_mode_work(vi);
5637
5638	if (netif_running(vi->dev)) {
5639		err = virtnet_open(vi->dev);
5640		if (err)
5641			return err;
5642	}
5643
5644	netif_tx_lock_bh(vi->dev);
5645	netif_device_attach(vi->dev);
5646	netif_tx_unlock_bh(vi->dev);
5647	return err;
5648}
5649
5650static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
5651{
5652	__virtio64 *_offloads __free(kfree) = NULL;
5653	struct scatterlist sg;
 
5654
5655	_offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
5656	if (!_offloads)
5657		return -ENOMEM;
5658
5659	*_offloads = cpu_to_virtio64(vi->vdev, offloads);
5660
5661	sg_init_one(&sg, _offloads, sizeof(*_offloads));
5662
5663	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
5664				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
5665		dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
5666		return -EINVAL;
5667	}
5668
5669	return 0;
5670}
5671
5672static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
5673{
5674	u64 offloads = 0;
5675
5676	if (!vi->guest_offloads)
5677		return 0;
5678
5679	return virtnet_set_guest_offloads(vi, offloads);
5680}
5681
5682static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
5683{
5684	u64 offloads = vi->guest_offloads;
5685
5686	if (!vi->guest_offloads)
5687		return 0;
5688
5689	return virtnet_set_guest_offloads(vi, offloads);
5690}
5691
5692static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
5693				    struct xsk_buff_pool *pool)
5694{
5695	int err, qindex;
5696
5697	qindex = rq - vi->rq;
5698
5699	if (pool) {
5700		err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
5701		if (err < 0)
5702			return err;
5703
5704		err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
5705						 MEM_TYPE_XSK_BUFF_POOL, NULL);
5706		if (err < 0)
5707			goto unreg;
5708
5709		xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
5710	}
5711
5712	virtnet_rx_pause(vi, rq);
5713
5714	err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf, NULL);
5715	if (err) {
5716		netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
5717
5718		pool = NULL;
5719	}
5720
5721	rq->xsk_pool = pool;
5722
5723	virtnet_rx_resume(vi, rq);
5724
5725	if (pool)
5726		return 0;
5727
5728unreg:
5729	xdp_rxq_info_unreg(&rq->xsk_rxq_info);
5730	return err;
5731}
5732
5733static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
5734				    struct send_queue *sq,
5735				    struct xsk_buff_pool *pool)
5736{
5737	int err, qindex;
5738
5739	qindex = sq - vi->sq;
5740
5741	virtnet_tx_pause(vi, sq);
5742
5743	err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf,
5744			      virtnet_sq_free_unused_buf_done);
5745	if (err) {
5746		netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
5747		pool = NULL;
5748	}
5749
5750	sq->xsk_pool = pool;
5751
5752	virtnet_tx_resume(vi, sq);
5753
5754	return err;
5755}
5756
5757static int virtnet_xsk_pool_enable(struct net_device *dev,
5758				   struct xsk_buff_pool *pool,
5759				   u16 qid)
5760{
5761	struct virtnet_info *vi = netdev_priv(dev);
5762	struct receive_queue *rq;
5763	struct device *dma_dev;
5764	struct send_queue *sq;
5765	dma_addr_t hdr_dma;
5766	int err, size;
5767
5768	if (vi->hdr_len > xsk_pool_get_headroom(pool))
5769		return -EINVAL;
5770
5771	/* In big_packets mode, xdp cannot work, so there is no need to
5772	 * initialize xsk of rq.
5773	 */
5774	if (vi->big_packets && !vi->mergeable_rx_bufs)
5775		return -ENOENT;
5776
5777	if (qid >= vi->curr_queue_pairs)
5778		return -EINVAL;
5779
5780	sq = &vi->sq[qid];
5781	rq = &vi->rq[qid];
5782
5783	/* xsk assumes that tx and rx must have the same dma device. The af-xdp
5784	 * may use one buffer to receive from the rx and reuse this buffer to
5785	 * send by the tx. So the dma dev of sq and rq must be the same one.
5786	 *
5787	 * But vq->dma_dev allows every vq has the respective dma dev. So I
5788	 * check the dma dev of vq and sq is the same dev.
5789	 */
5790	if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
5791		return -EINVAL;
5792
5793	dma_dev = virtqueue_dma_dev(rq->vq);
5794	if (!dma_dev)
5795		return -EINVAL;
5796
5797	size = virtqueue_get_vring_size(rq->vq);
5798
5799	rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL);
5800	if (!rq->xsk_buffs)
5801		return -ENOMEM;
5802
5803	hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
5804						 DMA_TO_DEVICE, 0);
5805	if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
5806		return -ENOMEM;
5807
5808	err = xsk_pool_dma_map(pool, dma_dev, 0);
5809	if (err)
5810		goto err_xsk_map;
5811
5812	err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
5813	if (err)
5814		goto err_rq;
5815
5816	err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
5817	if (err)
5818		goto err_sq;
5819
5820	/* Now, we do not support tx offload(such as tx csum), so all the tx
5821	 * virtnet hdr is zero. So all the tx packets can share a single hdr.
5822	 */
5823	sq->xsk_hdr_dma_addr = hdr_dma;
5824
5825	return 0;
5826
5827err_sq:
5828	virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5829err_rq:
5830	xsk_pool_dma_unmap(pool, 0);
5831err_xsk_map:
5832	virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
5833					 DMA_TO_DEVICE, 0);
5834	return err;
5835}
5836
5837static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5838{
5839	struct virtnet_info *vi = netdev_priv(dev);
5840	struct xsk_buff_pool *pool;
5841	struct receive_queue *rq;
5842	struct send_queue *sq;
5843	int err;
5844
5845	if (qid >= vi->curr_queue_pairs)
5846		return -EINVAL;
5847
5848	sq = &vi->sq[qid];
5849	rq = &vi->rq[qid];
5850
5851	pool = rq->xsk_pool;
5852
5853	err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
5854	err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
5855
5856	xsk_pool_dma_unmap(pool, 0);
5857
5858	virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
5859					 vi->hdr_len, DMA_TO_DEVICE, 0);
5860	kvfree(rq->xsk_buffs);
5861
5862	return err;
5863}
5864
5865static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
5866{
5867	if (xdp->xsk.pool)
5868		return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
5869					       xdp->xsk.queue_id);
5870	else
5871		return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
5872}
5873
5874static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5875			   struct netlink_ext_ack *extack)
5876{
5877	unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
5878					   sizeof(struct skb_shared_info));
5879	unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
5880	struct virtnet_info *vi = netdev_priv(dev);
5881	struct bpf_prog *old_prog;
5882	u16 xdp_qp = 0, curr_qp;
5883	int i, err;
5884
5885	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
5886	    && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5887	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
5888	        virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
5889		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
5890		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
5891		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
5892		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
5893		NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
5894		return -EOPNOTSUPP;
5895	}
5896
5897	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
5898		NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
5899		return -EINVAL;
5900	}
5901
5902	if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
5903		NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
5904		netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
5905		return -EINVAL;
5906	}
5907
5908	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
5909	if (prog)
5910		xdp_qp = nr_cpu_ids;
5911
5912	/* XDP requires extra queues for XDP_TX */
5913	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
5914		netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
5915				 curr_qp + xdp_qp, vi->max_queue_pairs);
5916		xdp_qp = 0;
5917	}
5918
5919	old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
5920	if (!prog && !old_prog)
5921		return 0;
5922
5923	if (prog)
5924		bpf_prog_add(prog, vi->max_queue_pairs - 1);
5925
5926	/* Make sure NAPI is not using any XDP TX queues for RX. */
5927	if (netif_running(dev)) {
5928		for (i = 0; i < vi->max_queue_pairs; i++) {
5929			napi_disable(&vi->rq[i].napi);
5930			virtnet_napi_tx_disable(&vi->sq[i].napi);
5931		}
5932	}
5933
5934	if (!prog) {
5935		for (i = 0; i < vi->max_queue_pairs; i++) {
5936			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
5937			if (i == 0)
5938				virtnet_restore_guest_offloads(vi);
5939		}
5940		synchronize_net();
5941	}
5942
5943	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
5944	if (err)
5945		goto err;
5946	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
5947	vi->xdp_queue_pairs = xdp_qp;
5948
5949	if (prog) {
5950		vi->xdp_enabled = true;
5951		for (i = 0; i < vi->max_queue_pairs; i++) {
5952			rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
5953			if (i == 0 && !old_prog)
5954				virtnet_clear_guest_offloads(vi);
5955		}
5956		if (!old_prog)
5957			xdp_features_set_redirect_target(dev, true);
5958	} else {
5959		xdp_features_clear_redirect_target(dev);
5960		vi->xdp_enabled = false;
5961	}
5962
5963	for (i = 0; i < vi->max_queue_pairs; i++) {
5964		if (old_prog)
5965			bpf_prog_put(old_prog);
5966		if (netif_running(dev)) {
5967			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5968			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5969					       &vi->sq[i].napi);
5970		}
5971	}
5972
5973	return 0;
5974
5975err:
5976	if (!prog) {
5977		virtnet_clear_guest_offloads(vi);
5978		for (i = 0; i < vi->max_queue_pairs; i++)
5979			rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
5980	}
5981
5982	if (netif_running(dev)) {
5983		for (i = 0; i < vi->max_queue_pairs; i++) {
5984			virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5985			virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5986					       &vi->sq[i].napi);
5987		}
5988	}
5989	if (prog)
5990		bpf_prog_sub(prog, vi->max_queue_pairs - 1);
5991	return err;
5992}
5993
5994static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5995{
5996	switch (xdp->command) {
5997	case XDP_SETUP_PROG:
5998		return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
5999	case XDP_SETUP_XSK_POOL:
6000		return virtnet_xsk_pool_setup(dev, xdp);
6001	default:
6002		return -EINVAL;
6003	}
6004}
6005
6006static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
6007				      size_t len)
6008{
6009	struct virtnet_info *vi = netdev_priv(dev);
6010	int ret;
6011
6012	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
6013		return -EOPNOTSUPP;
6014
6015	ret = snprintf(buf, len, "sby");
6016	if (ret >= len)
6017		return -EOPNOTSUPP;
6018
6019	return 0;
6020}
6021
6022static int virtnet_set_features(struct net_device *dev,
6023				netdev_features_t features)
6024{
6025	struct virtnet_info *vi = netdev_priv(dev);
6026	u64 offloads;
6027	int err;
6028
6029	if ((dev->features ^ features) & NETIF_F_GRO_HW) {
6030		if (vi->xdp_enabled)
6031			return -EBUSY;
6032
6033		if (features & NETIF_F_GRO_HW)
6034			offloads = vi->guest_offloads_capable;
6035		else
6036			offloads = vi->guest_offloads_capable &
6037				   ~GUEST_OFFLOAD_GRO_HW_MASK;
6038
6039		err = virtnet_set_guest_offloads(vi, offloads);
6040		if (err)
6041			return err;
6042		vi->guest_offloads = offloads;
6043	}
6044
6045	if ((dev->features ^ features) & NETIF_F_RXHASH) {
6046		if (features & NETIF_F_RXHASH)
6047			vi->rss.hash_types = vi->rss_hash_types_saved;
6048		else
6049			vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
6050
6051		if (!virtnet_commit_rss_command(vi))
6052			return -EINVAL;
6053	}
6054
6055	return 0;
6056}
6057
6058static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
6059{
6060	struct virtnet_info *priv = netdev_priv(dev);
6061	struct send_queue *sq = &priv->sq[txqueue];
6062	struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
6063
6064	u64_stats_update_begin(&sq->stats.syncp);
6065	u64_stats_inc(&sq->stats.tx_timeouts);
6066	u64_stats_update_end(&sq->stats.syncp);
6067
6068	netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
6069		   txqueue, sq->name, sq->vq->index, sq->vq->name,
6070		   jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
6071}
6072
6073static int virtnet_init_irq_moder(struct virtnet_info *vi)
6074{
6075	u8 profile_flags = 0, coal_flags = 0;
6076	int ret, i;
6077
6078	profile_flags |= DIM_PROFILE_RX;
6079	coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
6080	ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
6081				     DIM_CQ_PERIOD_MODE_START_FROM_EQE,
6082				     0, virtnet_rx_dim_work, NULL);
6083
6084	if (ret)
6085		return ret;
6086
6087	for (i = 0; i < vi->max_queue_pairs; i++)
6088		net_dim_setting(vi->dev, &vi->rq[i].dim, false);
6089
6090	return 0;
6091}
6092
6093static void virtnet_free_irq_moder(struct virtnet_info *vi)
6094{
6095	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
6096		return;
6097
6098	rtnl_lock();
6099	net_dim_free_irq_moder(vi->dev);
6100	rtnl_unlock();
6101}
6102
6103static const struct net_device_ops virtnet_netdev = {
6104	.ndo_open            = virtnet_open,
6105	.ndo_stop   	     = virtnet_close,
6106	.ndo_start_xmit      = start_xmit,
6107	.ndo_validate_addr   = eth_validate_addr,
6108	.ndo_set_mac_address = virtnet_set_mac_address,
6109	.ndo_set_rx_mode     = virtnet_set_rx_mode,
6110	.ndo_get_stats64     = virtnet_stats,
6111	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
6112	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
6113	.ndo_bpf		= virtnet_xdp,
6114	.ndo_xdp_xmit		= virtnet_xdp_xmit,
6115	.ndo_xsk_wakeup         = virtnet_xsk_wakeup,
6116	.ndo_features_check	= passthru_features_check,
6117	.ndo_get_phys_port_name	= virtnet_get_phys_port_name,
6118	.ndo_set_features	= virtnet_set_features,
6119	.ndo_tx_timeout		= virtnet_tx_timeout,
6120};
6121
6122static void virtnet_config_changed_work(struct work_struct *work)
6123{
6124	struct virtnet_info *vi =
6125		container_of(work, struct virtnet_info, config_work);
6126	u16 v;
6127
6128	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
6129				 struct virtio_net_config, status, &v) < 0)
6130		return;
6131
6132	if (v & VIRTIO_NET_S_ANNOUNCE) {
6133		netdev_notify_peers(vi->dev);
6134		virtnet_ack_link_announce(vi);
6135	}
6136
6137	/* Ignore unknown (future) status bits */
6138	v &= VIRTIO_NET_S_LINK_UP;
6139
6140	if (vi->status == v)
6141		return;
6142
6143	vi->status = v;
6144
6145	if (vi->status & VIRTIO_NET_S_LINK_UP) {
6146		virtnet_update_settings(vi);
6147		netif_carrier_on(vi->dev);
6148		netif_tx_wake_all_queues(vi->dev);
6149	} else {
6150		netif_carrier_off(vi->dev);
6151		netif_tx_stop_all_queues(vi->dev);
6152	}
6153}
6154
6155static void virtnet_config_changed(struct virtio_device *vdev)
6156{
6157	struct virtnet_info *vi = vdev->priv;
6158
6159	schedule_work(&vi->config_work);
6160}
6161
6162static void virtnet_free_queues(struct virtnet_info *vi)
6163{
6164	int i;
6165
6166	for (i = 0; i < vi->max_queue_pairs; i++) {
6167		__netif_napi_del(&vi->rq[i].napi);
6168		__netif_napi_del(&vi->sq[i].napi);
6169	}
6170
6171	/* We called __netif_napi_del(),
6172	 * we need to respect an RCU grace period before freeing vi->rq
6173	 */
6174	synchronize_net();
6175
6176	kfree(vi->rq);
6177	kfree(vi->sq);
6178	kfree(vi->ctrl);
6179}
6180
6181static void _free_receive_bufs(struct virtnet_info *vi)
6182{
6183	struct bpf_prog *old_prog;
6184	int i;
6185
6186	for (i = 0; i < vi->max_queue_pairs; i++) {
6187		while (vi->rq[i].pages)
6188			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
6189
6190		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
6191		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
6192		if (old_prog)
6193			bpf_prog_put(old_prog);
6194	}
6195}
6196
6197static void free_receive_bufs(struct virtnet_info *vi)
6198{
6199	rtnl_lock();
6200	_free_receive_bufs(vi);
6201	rtnl_unlock();
6202}
6203
6204static void free_receive_page_frags(struct virtnet_info *vi)
6205{
6206	int i;
6207	for (i = 0; i < vi->max_queue_pairs; i++)
6208		if (vi->rq[i].alloc_frag.page) {
6209			if (vi->rq[i].last_dma)
6210				virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
6211			put_page(vi->rq[i].alloc_frag.page);
6212		}
6213}
6214
6215static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
6216{
6217	struct virtnet_info *vi = vq->vdev->priv;
6218	struct send_queue *sq;
6219	int i = vq2txq(vq);
6220
6221	sq = &vi->sq[i];
6222
6223	switch (virtnet_xmit_ptr_unpack(&buf)) {
6224	case VIRTNET_XMIT_TYPE_SKB:
6225	case VIRTNET_XMIT_TYPE_SKB_ORPHAN:
6226		dev_kfree_skb(buf);
6227		break;
6228
6229	case VIRTNET_XMIT_TYPE_XDP:
6230		xdp_return_frame(buf);
6231		break;
6232
6233	case VIRTNET_XMIT_TYPE_XSK:
6234		xsk_tx_completed(sq->xsk_pool, 1);
6235		break;
6236	}
6237}
6238
6239static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq)
6240{
6241	struct virtnet_info *vi = vq->vdev->priv;
6242	int i = vq2txq(vq);
6243
6244	netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
6245}
6246
6247static void free_unused_bufs(struct virtnet_info *vi)
6248{
6249	void *buf;
6250	int i;
6251
6252	for (i = 0; i < vi->max_queue_pairs; i++) {
6253		struct virtqueue *vq = vi->sq[i].vq;
6254		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6255			virtnet_sq_free_unused_buf(vq, buf);
6256		cond_resched();
6257	}
6258
6259	for (i = 0; i < vi->max_queue_pairs; i++) {
6260		struct virtqueue *vq = vi->rq[i].vq;
6261
6262		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
6263			virtnet_rq_unmap_free_buf(vq, buf);
6264		cond_resched();
6265	}
6266}
6267
6268static void virtnet_del_vqs(struct virtnet_info *vi)
6269{
6270	struct virtio_device *vdev = vi->vdev;
6271
6272	virtnet_clean_affinity(vi);
6273
6274	vdev->config->del_vqs(vdev);
6275
6276	virtnet_free_queues(vi);
6277}
6278
6279/* How large should a single buffer be so a queue full of these can fit at
6280 * least one full packet?
6281 * Logic below assumes the mergeable buffer header is used.
6282 */
6283static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
6284{
6285	const unsigned int hdr_len = vi->hdr_len;
6286	unsigned int rq_size = virtqueue_get_vring_size(vq);
6287	unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
6288	unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
6289	unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
6290
6291	return max(max(min_buf_len, hdr_len) - hdr_len,
6292		   (unsigned int)GOOD_PACKET_LEN);
6293}
6294
6295static int virtnet_find_vqs(struct virtnet_info *vi)
6296{
6297	struct virtqueue_info *vqs_info;
6298	struct virtqueue **vqs;
 
6299	int ret = -ENOMEM;
6300	int total_vqs;
6301	bool *ctx;
6302	u16 i;
6303
6304	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
6305	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
6306	 * possible control vq.
6307	 */
6308	total_vqs = vi->max_queue_pairs * 2 +
6309		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
6310
6311	/* Allocate space for find_vqs parameters */
6312	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
6313	if (!vqs)
6314		goto err_vq;
6315	vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
6316	if (!vqs_info)
6317		goto err_vqs_info;
 
 
 
6318	if (!vi->big_packets || vi->mergeable_rx_bufs) {
6319		ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
6320		if (!ctx)
6321			goto err_ctx;
6322	} else {
6323		ctx = NULL;
6324	}
6325
6326	/* Parameters for control virtqueue, if any */
6327	if (vi->has_cvq) {
6328		vqs_info[total_vqs - 1].name = "control";
 
6329	}
6330
6331	/* Allocate/initialize parameters for send/receive virtqueues */
6332	for (i = 0; i < vi->max_queue_pairs; i++) {
6333		vqs_info[rxq2vq(i)].callback = skb_recv_done;
6334		vqs_info[txq2vq(i)].callback = skb_xmit_done;
6335		sprintf(vi->rq[i].name, "input.%u", i);
6336		sprintf(vi->sq[i].name, "output.%u", i);
6337		vqs_info[rxq2vq(i)].name = vi->rq[i].name;
6338		vqs_info[txq2vq(i)].name = vi->sq[i].name;
6339		if (ctx)
6340			vqs_info[rxq2vq(i)].ctx = true;
6341	}
6342
6343	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL);
 
6344	if (ret)
6345		goto err_find;
6346
6347	if (vi->has_cvq) {
6348		vi->cvq = vqs[total_vqs - 1];
6349		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
6350			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6351	}
6352
6353	for (i = 0; i < vi->max_queue_pairs; i++) {
6354		vi->rq[i].vq = vqs[rxq2vq(i)];
6355		vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
6356		vi->sq[i].vq = vqs[txq2vq(i)];
6357	}
6358
6359	/* run here: ret == 0. */
6360
6361
6362err_find:
6363	kfree(ctx);
6364err_ctx:
6365	kfree(vqs_info);
6366err_vqs_info:
 
 
6367	kfree(vqs);
6368err_vq:
6369	return ret;
6370}
6371
6372static int virtnet_alloc_queues(struct virtnet_info *vi)
6373{
6374	int i;
6375
6376	if (vi->has_cvq) {
6377		vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
6378		if (!vi->ctrl)
6379			goto err_ctrl;
6380	} else {
6381		vi->ctrl = NULL;
6382	}
6383	vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
6384	if (!vi->sq)
6385		goto err_sq;
6386	vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
6387	if (!vi->rq)
6388		goto err_rq;
6389
6390	INIT_DELAYED_WORK(&vi->refill, refill_work);
6391	for (i = 0; i < vi->max_queue_pairs; i++) {
6392		vi->rq[i].pages = NULL;
6393		netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
6394				      napi_weight);
6395		netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
6396					 virtnet_poll_tx,
6397					 napi_tx ? napi_weight : 0);
6398
 
 
 
6399		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
6400		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
6401		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
6402
6403		u64_stats_init(&vi->rq[i].stats.syncp);
6404		u64_stats_init(&vi->sq[i].stats.syncp);
6405		mutex_init(&vi->rq[i].dim_lock);
6406	}
6407
6408	return 0;
6409
6410err_rq:
6411	kfree(vi->sq);
6412err_sq:
6413	kfree(vi->ctrl);
6414err_ctrl:
6415	return -ENOMEM;
6416}
6417
6418static int init_vqs(struct virtnet_info *vi)
6419{
6420	int ret;
6421
6422	/* Allocate send & receive queues */
6423	ret = virtnet_alloc_queues(vi);
6424	if (ret)
6425		goto err;
6426
6427	ret = virtnet_find_vqs(vi);
6428	if (ret)
6429		goto err_free;
6430
 
 
6431	cpus_read_lock();
6432	virtnet_set_affinity(vi);
6433	cpus_read_unlock();
6434
6435	return 0;
6436
6437err_free:
6438	virtnet_free_queues(vi);
6439err:
6440	return ret;
6441}
6442
6443#ifdef CONFIG_SYSFS
6444static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
6445		char *buf)
6446{
6447	struct virtnet_info *vi = netdev_priv(queue->dev);
6448	unsigned int queue_index = get_netdev_rx_queue_index(queue);
6449	unsigned int headroom = virtnet_get_headroom(vi);
6450	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
6451	struct ewma_pkt_len *avg;
6452
6453	BUG_ON(queue_index >= vi->max_queue_pairs);
6454	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
6455	return sprintf(buf, "%u\n",
6456		       get_mergeable_buf_len(&vi->rq[queue_index], avg,
6457				       SKB_DATA_ALIGN(headroom + tailroom)));
6458}
6459
6460static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
6461	__ATTR_RO(mergeable_rx_buffer_size);
6462
6463static struct attribute *virtio_net_mrg_rx_attrs[] = {
6464	&mergeable_rx_buffer_size_attribute.attr,
6465	NULL
6466};
6467
6468static const struct attribute_group virtio_net_mrg_rx_group = {
6469	.name = "virtio_net",
6470	.attrs = virtio_net_mrg_rx_attrs
6471};
6472#endif
6473
6474static bool virtnet_fail_on_feature(struct virtio_device *vdev,
6475				    unsigned int fbit,
6476				    const char *fname, const char *dname)
6477{
6478	if (!virtio_has_feature(vdev, fbit))
6479		return false;
6480
6481	dev_err(&vdev->dev, "device advertises feature %s but not %s",
6482		fname, dname);
6483
6484	return true;
6485}
6486
6487#define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
6488	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
6489
6490static bool virtnet_validate_features(struct virtio_device *vdev)
6491{
6492	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
6493	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
6494			     "VIRTIO_NET_F_CTRL_VQ") ||
6495	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
6496			     "VIRTIO_NET_F_CTRL_VQ") ||
6497	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
6498			     "VIRTIO_NET_F_CTRL_VQ") ||
6499	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
6500	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
6501			     "VIRTIO_NET_F_CTRL_VQ") ||
6502	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
6503			     "VIRTIO_NET_F_CTRL_VQ") ||
6504	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
6505			     "VIRTIO_NET_F_CTRL_VQ") ||
6506	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
6507			     "VIRTIO_NET_F_CTRL_VQ") ||
6508	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
6509			     "VIRTIO_NET_F_CTRL_VQ"))) {
6510		return false;
6511	}
6512
6513	return true;
6514}
6515
6516#define MIN_MTU ETH_MIN_MTU
6517#define MAX_MTU ETH_MAX_MTU
6518
6519static int virtnet_validate(struct virtio_device *vdev)
6520{
6521	if (!vdev->config->get) {
6522		dev_err(&vdev->dev, "%s failure: config access disabled\n",
6523			__func__);
6524		return -EINVAL;
6525	}
6526
6527	if (!virtnet_validate_features(vdev))
6528		return -EINVAL;
6529
6530	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6531		int mtu = virtio_cread16(vdev,
6532					 offsetof(struct virtio_net_config,
6533						  mtu));
6534		if (mtu < MIN_MTU)
6535			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
6536	}
6537
6538	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
6539	    !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6540		dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
6541		__virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
6542	}
6543
6544	return 0;
6545}
6546
6547static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
6548{
6549	return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6550		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
6551		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
6552		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
6553		(virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
6554		virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
6555}
6556
6557static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
6558{
6559	bool guest_gso = virtnet_check_guest_gso(vi);
6560
6561	/* If device can receive ANY guest GSO packets, regardless of mtu,
6562	 * allocate packets of maximum size, otherwise limit it to only
6563	 * mtu size worth only.
6564	 */
6565	if (mtu > ETH_DATA_LEN || guest_gso) {
6566		vi->big_packets = true;
6567		vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
6568	}
6569}
6570
6571#define VIRTIO_NET_HASH_REPORT_MAX_TABLE      10
6572static enum xdp_rss_hash_type
6573virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
6574	[VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
6575	[VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
6576	[VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6577	[VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6578	[VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
6579	[VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6580	[VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6581	[VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6582	[VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6583	[VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
6584};
6585
6586static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6587			       enum xdp_rss_hash_type *rss_type)
6588{
6589	const struct xdp_buff *xdp = (void *)_ctx;
6590	struct virtio_net_hdr_v1_hash *hdr_hash;
6591	struct virtnet_info *vi;
6592	u16 hash_report;
6593
6594	if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
6595		return -ENODATA;
6596
6597	vi = netdev_priv(xdp->rxq->dev);
6598	hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
6599	hash_report = __le16_to_cpu(hdr_hash->hash_report);
6600
6601	if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
6602		hash_report = VIRTIO_NET_HASH_REPORT_NONE;
6603
6604	*rss_type = virtnet_xdp_rss_type[hash_report];
6605	*hash = __le32_to_cpu(hdr_hash->hash_value);
6606	return 0;
6607}
6608
6609static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
6610	.xmo_rx_hash			= virtnet_xdp_rx_hash,
6611};
6612
6613static int virtnet_probe(struct virtio_device *vdev)
6614{
6615	int i, err = -ENOMEM;
6616	struct net_device *dev;
6617	struct virtnet_info *vi;
6618	u16 max_queue_pairs;
6619	int mtu = 0;
6620
6621	/* Find if host supports multiqueue/rss virtio_net device */
6622	max_queue_pairs = 1;
6623	if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
6624		max_queue_pairs =
6625		     virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
6626
6627	/* We need at least 2 queue's */
6628	if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
6629	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
6630	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6631		max_queue_pairs = 1;
6632
6633	/* Allocate ourselves a network device with room for our info */
6634	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
6635	if (!dev)
6636		return -ENOMEM;
6637
6638	/* Set up network device as normal. */
6639	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
6640			   IFF_TX_SKB_NO_LINEAR;
6641	dev->netdev_ops = &virtnet_netdev;
6642	dev->stat_ops = &virtnet_stat_ops;
6643	dev->features = NETIF_F_HIGHDMA;
6644
6645	dev->ethtool_ops = &virtnet_ethtool_ops;
6646	SET_NETDEV_DEV(dev, &vdev->dev);
6647
6648	/* Do we support "hardware" checksums? */
6649	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
6650		/* This opens up the world of extra features. */
6651		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6652		if (csum)
6653			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6654
6655		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
6656			dev->hw_features |= NETIF_F_TSO
6657				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
6658		}
6659		/* Individual feature bits: what can host handle? */
6660		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
6661			dev->hw_features |= NETIF_F_TSO;
6662		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
6663			dev->hw_features |= NETIF_F_TSO6;
6664		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
6665			dev->hw_features |= NETIF_F_TSO_ECN;
6666		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
6667			dev->hw_features |= NETIF_F_GSO_UDP_L4;
6668
6669		dev->features |= NETIF_F_GSO_ROBUST;
6670
6671		if (gso)
6672			dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
6673		/* (!csum && gso) case will be fixed by register_netdev() */
6674	}
6675
6676	/* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
6677	 * need to calculate checksums for partially checksummed packets,
6678	 * as they're considered valid by the upper layer.
6679	 * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
6680	 * receives fully checksummed packets. The device may assist in
6681	 * validating these packets' checksums, so the driver won't have to.
6682	 */
6683	dev->features |= NETIF_F_RXCSUM;
6684
6685	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
6686	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
6687		dev->features |= NETIF_F_GRO_HW;
6688	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
6689		dev->hw_features |= NETIF_F_GRO_HW;
6690
6691	dev->vlan_features = dev->features;
6692	dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6693		NETDEV_XDP_ACT_XSK_ZEROCOPY;
6694
6695	/* MTU range: 68 - 65535 */
6696	dev->min_mtu = MIN_MTU;
6697	dev->max_mtu = MAX_MTU;
6698
6699	/* Configuration may specify what MAC to use.  Otherwise random. */
6700	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
6701		u8 addr[ETH_ALEN];
6702
6703		virtio_cread_bytes(vdev,
6704				   offsetof(struct virtio_net_config, mac),
6705				   addr, ETH_ALEN);
6706		eth_hw_addr_set(dev, addr);
6707	} else {
6708		eth_hw_addr_random(dev);
6709		dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
6710			 dev->dev_addr);
6711	}
6712
6713	/* Set up our device-specific information */
6714	vi = netdev_priv(dev);
6715	vi->dev = dev;
6716	vi->vdev = vdev;
6717	vdev->priv = vi;
6718
6719	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
6720	INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
6721	spin_lock_init(&vi->refill_lock);
6722
6723	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
6724		vi->mergeable_rx_bufs = true;
6725		dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
6726	}
6727
6728	if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
6729		vi->has_rss_hash_report = true;
6730
6731	if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
6732		vi->has_rss = true;
6733
 
6734		vi->rss_indir_table_size =
6735			virtio_cread16(vdev, offsetof(struct virtio_net_config,
6736				rss_max_indirection_table_length));
6737	}
6738	err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
6739	if (err)
6740		goto free;
6741
6742	if (vi->has_rss || vi->has_rss_hash_report) {
6743		vi->rss_key_size =
6744			virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
6745		if (vi->rss_key_size > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
6746			dev_err(&vdev->dev, "rss_max_key_size=%u exceeds the limit %u.\n",
6747				vi->rss_key_size, VIRTIO_NET_RSS_MAX_KEY_SIZE);
6748			err = -EINVAL;
6749			goto free;
6750		}
6751
6752		vi->rss_hash_types_supported =
6753		    virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
6754		vi->rss_hash_types_supported &=
6755				~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
6756				  VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
6757				  VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
6758
6759		dev->hw_features |= NETIF_F_RXHASH;
6760		dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
6761	}
6762
6763	if (vi->has_rss_hash_report)
6764		vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
6765	else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
6766		 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6767		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
6768	else
6769		vi->hdr_len = sizeof(struct virtio_net_hdr);
6770
6771	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
6772	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
6773		vi->any_header_sg = true;
6774
6775	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
6776		vi->has_cvq = true;
6777
6778	mutex_init(&vi->cvq_lock);
6779
6780	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
6781		mtu = virtio_cread16(vdev,
6782				     offsetof(struct virtio_net_config,
6783					      mtu));
6784		if (mtu < dev->min_mtu) {
6785			/* Should never trigger: MTU was previously validated
6786			 * in virtnet_validate.
6787			 */
6788			dev_err(&vdev->dev,
6789				"device MTU appears to have changed it is now %d < %d",
6790				mtu, dev->min_mtu);
6791			err = -EINVAL;
6792			goto free;
6793		}
6794
6795		dev->mtu = mtu;
6796		dev->max_mtu = mtu;
6797	}
6798
6799	virtnet_set_big_packets(vi, mtu);
6800
6801	if (vi->any_header_sg)
6802		dev->needed_headroom = vi->hdr_len;
6803
6804	/* Enable multiqueue by default */
6805	if (num_online_cpus() >= max_queue_pairs)
6806		vi->curr_queue_pairs = max_queue_pairs;
6807	else
6808		vi->curr_queue_pairs = num_online_cpus();
6809	vi->max_queue_pairs = max_queue_pairs;
6810
6811	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
6812	err = init_vqs(vi);
6813	if (err)
6814		goto free;
6815
6816	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
6817		vi->intr_coal_rx.max_usecs = 0;
6818		vi->intr_coal_tx.max_usecs = 0;
6819		vi->intr_coal_rx.max_packets = 0;
6820
6821		/* Keep the default values of the coalescing parameters
6822		 * aligned with the default napi_tx state.
6823		 */
6824		if (vi->sq[0].napi.weight)
6825			vi->intr_coal_tx.max_packets = 1;
6826		else
6827			vi->intr_coal_tx.max_packets = 0;
6828	}
6829
6830	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
6831		/* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
6832		for (i = 0; i < vi->max_queue_pairs; i++)
6833			if (vi->sq[i].napi.weight)
6834				vi->sq[i].intr_coal.max_packets = 1;
6835
6836		err = virtnet_init_irq_moder(vi);
6837		if (err)
6838			goto free;
6839	}
6840
6841#ifdef CONFIG_SYSFS
6842	if (vi->mergeable_rx_bufs)
6843		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
6844#endif
6845	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
6846	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
6847
6848	virtnet_init_settings(dev);
6849
6850	if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
6851		vi->failover = net_failover_create(vi->dev);
6852		if (IS_ERR(vi->failover)) {
6853			err = PTR_ERR(vi->failover);
6854			goto free_vqs;
6855		}
6856	}
6857
6858	if (vi->has_rss || vi->has_rss_hash_report)
6859		virtnet_init_default_rss(vi);
6860
6861	enable_rx_mode_work(vi);
6862
6863	/* serialize netdev register + virtio_device_ready() with ndo_open() */
6864	rtnl_lock();
6865
6866	err = register_netdevice(dev);
6867	if (err) {
6868		pr_debug("virtio_net: registering device failed\n");
6869		rtnl_unlock();
6870		goto free_failover;
6871	}
6872
6873	/* Disable config change notification until ndo_open. */
6874	virtio_config_driver_disable(vi->vdev);
6875
6876	virtio_device_ready(vdev);
6877
6878	if (vi->has_rss || vi->has_rss_hash_report) {
6879		if (!virtnet_commit_rss_command(vi)) {
6880			dev_warn(&vdev->dev, "RSS disabled because committing failed.\n");
6881			dev->hw_features &= ~NETIF_F_RXHASH;
6882			vi->has_rss_hash_report = false;
6883			vi->has_rss = false;
6884		}
6885	}
6886
6887	virtnet_set_queues(vi, vi->curr_queue_pairs);
6888
6889	/* a random MAC address has been assigned, notify the device.
6890	 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
6891	 * because many devices work fine without getting MAC explicitly
6892	 */
6893	if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
6894	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
6895		struct scatterlist sg;
6896
6897		sg_init_one(&sg, dev->dev_addr, dev->addr_len);
6898		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
6899					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
6900			pr_debug("virtio_net: setting MAC address failed\n");
6901			rtnl_unlock();
6902			err = -EINVAL;
6903			goto free_unregister_netdev;
6904		}
6905	}
6906
6907	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
6908		struct virtio_net_stats_capabilities *stats_cap  __free(kfree) = NULL;
6909		struct scatterlist sg;
6910		__le64 v;
6911
6912		stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
6913		if (!stats_cap) {
6914			rtnl_unlock();
6915			err = -ENOMEM;
6916			goto free_unregister_netdev;
6917		}
6918
6919		sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
6920
6921		if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
6922						VIRTIO_NET_CTRL_STATS_QUERY,
6923						NULL, &sg)) {
6924			pr_debug("virtio_net: fail to get stats capability\n");
6925			rtnl_unlock();
6926			err = -EINVAL;
6927			goto free_unregister_netdev;
6928		}
6929
6930		v = stats_cap->supported_stats_types[0];
6931		vi->device_stats_cap = le64_to_cpu(v);
6932	}
6933
6934	/* Assume link up if device can't report link status,
6935	   otherwise get link status from config. */
6936	netif_carrier_off(dev);
6937	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
6938		virtnet_config_changed_work(&vi->config_work);
6939	} else {
6940		vi->status = VIRTIO_NET_S_LINK_UP;
6941		virtnet_update_settings(vi);
6942		netif_carrier_on(dev);
6943	}
6944
6945	for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
6946		if (virtio_has_feature(vi->vdev, guest_offloads[i]))
6947			set_bit(guest_offloads[i], &vi->guest_offloads);
6948	vi->guest_offloads_capable = vi->guest_offloads;
6949
6950	rtnl_unlock();
6951
6952	err = virtnet_cpu_notif_add(vi);
6953	if (err) {
6954		pr_debug("virtio_net: registering cpu notifier failed\n");
6955		goto free_unregister_netdev;
6956	}
6957
6958	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
6959		 dev->name, max_queue_pairs);
6960
6961	return 0;
6962
6963free_unregister_netdev:
6964	unregister_netdev(dev);
6965free_failover:
6966	net_failover_destroy(vi->failover);
6967free_vqs:
6968	virtio_reset_device(vdev);
6969	cancel_delayed_work_sync(&vi->refill);
6970	free_receive_page_frags(vi);
6971	virtnet_del_vqs(vi);
6972free:
6973	free_netdev(dev);
6974	return err;
6975}
6976
6977static void remove_vq_common(struct virtnet_info *vi)
6978{
6979	int i;
6980
6981	virtio_reset_device(vi->vdev);
6982
6983	/* Free unused buffers in both send and recv, if any. */
6984	free_unused_bufs(vi);
6985
6986	/*
6987	 * Rule of thumb is netdev_tx_reset_queue() should follow any
6988	 * skb freeing not followed by netdev_tx_completed_queue()
6989	 */
6990	for (i = 0; i < vi->max_queue_pairs; i++)
6991		netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, i));
6992
6993	free_receive_bufs(vi);
6994
6995	free_receive_page_frags(vi);
6996
6997	virtnet_del_vqs(vi);
6998}
6999
7000static void virtnet_remove(struct virtio_device *vdev)
7001{
7002	struct virtnet_info *vi = vdev->priv;
7003
7004	virtnet_cpu_notif_remove(vi);
7005
7006	/* Make sure no work handler is accessing the device. */
7007	flush_work(&vi->config_work);
7008	disable_rx_mode_work(vi);
7009	flush_work(&vi->rx_mode_work);
7010
7011	virtnet_free_irq_moder(vi);
7012
7013	unregister_netdev(vi->dev);
7014
7015	net_failover_destroy(vi->failover);
7016
7017	remove_vq_common(vi);
7018
7019	rss_indirection_table_free(&vi->rss);
7020
7021	free_netdev(vi->dev);
7022}
7023
7024static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
7025{
7026	struct virtnet_info *vi = vdev->priv;
7027
7028	virtnet_cpu_notif_remove(vi);
7029	virtnet_freeze_down(vdev);
7030	remove_vq_common(vi);
7031
7032	return 0;
7033}
7034
7035static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
7036{
7037	struct virtnet_info *vi = vdev->priv;
7038	int err;
7039
7040	err = virtnet_restore_up(vdev);
7041	if (err)
7042		return err;
7043	virtnet_set_queues(vi, vi->curr_queue_pairs);
7044
7045	err = virtnet_cpu_notif_add(vi);
7046	if (err) {
7047		virtnet_freeze_down(vdev);
7048		remove_vq_common(vi);
7049		return err;
7050	}
7051
7052	return 0;
7053}
7054
7055static struct virtio_device_id id_table[] = {
7056	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
7057	{ 0 },
7058};
7059
7060#define VIRTNET_FEATURES \
7061	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
7062	VIRTIO_NET_F_MAC, \
7063	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
7064	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
7065	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
7066	VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
7067	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
7068	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
7069	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
7070	VIRTIO_NET_F_CTRL_MAC_ADDR, \
7071	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
7072	VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
7073	VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
7074	VIRTIO_NET_F_VQ_NOTF_COAL, \
7075	VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
7076
7077static unsigned int features[] = {
7078	VIRTNET_FEATURES,
7079};
7080
7081static unsigned int features_legacy[] = {
7082	VIRTNET_FEATURES,
7083	VIRTIO_NET_F_GSO,
7084	VIRTIO_F_ANY_LAYOUT,
7085};
7086
7087static struct virtio_driver virtio_net_driver = {
7088	.feature_table = features,
7089	.feature_table_size = ARRAY_SIZE(features),
7090	.feature_table_legacy = features_legacy,
7091	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
7092	.driver.name =	KBUILD_MODNAME,
 
7093	.id_table =	id_table,
7094	.validate =	virtnet_validate,
7095	.probe =	virtnet_probe,
7096	.remove =	virtnet_remove,
7097	.config_changed = virtnet_config_changed,
7098#ifdef CONFIG_PM_SLEEP
7099	.freeze =	virtnet_freeze,
7100	.restore =	virtnet_restore,
7101#endif
7102};
7103
7104static __init int virtio_net_driver_init(void)
7105{
7106	int ret;
7107
7108	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
7109				      virtnet_cpu_online,
7110				      virtnet_cpu_down_prep);
7111	if (ret < 0)
7112		goto out;
7113	virtionet_online = ret;
7114	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
7115				      NULL, virtnet_cpu_dead);
7116	if (ret)
7117		goto err_dead;
7118	ret = register_virtio_driver(&virtio_net_driver);
7119	if (ret)
7120		goto err_virtio;
7121	return 0;
7122err_virtio:
7123	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7124err_dead:
7125	cpuhp_remove_multi_state(virtionet_online);
7126out:
7127	return ret;
7128}
7129module_init(virtio_net_driver_init);
7130
7131static __exit void virtio_net_driver_exit(void)
7132{
7133	unregister_virtio_driver(&virtio_net_driver);
7134	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
7135	cpuhp_remove_multi_state(virtionet_online);
7136}
7137module_exit(virtio_net_driver_exit);
7138
7139MODULE_DEVICE_TABLE(virtio, id_table);
7140MODULE_DESCRIPTION("Virtio network driver");
7141MODULE_LICENSE("GPL");