Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
13#include <linux/bpf.h>
14#include <linux/bpf_trace.h>
15#include <linux/scatterlist.h>
16#include <linux/if_vlan.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/average.h>
20#include <linux/filter.h>
21#include <linux/kernel.h>
22#include <linux/dim.h>
23#include <net/route.h>
24#include <net/xdp.h>
25#include <net/net_failover.h>
26#include <net/netdev_rx_queue.h>
27
28static int napi_weight = NAPI_POLL_WEIGHT;
29module_param(napi_weight, int, 0444);
30
31static bool csum = true, gso = true, napi_tx = true;
32module_param(csum, bool, 0444);
33module_param(gso, bool, 0444);
34module_param(napi_tx, bool, 0644);
35
36/* FIXME: MTU in config. */
37#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38#define GOOD_COPY_LEN 128
39
40#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
41
42/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
43#define VIRTIO_XDP_HEADROOM 256
44
45/* Separating two types of XDP xmit */
46#define VIRTIO_XDP_TX BIT(0)
47#define VIRTIO_XDP_REDIR BIT(1)
48
49#define VIRTIO_XDP_FLAG BIT(0)
50
51/* RX packet size EWMA. The average packet size is used to determine the packet
52 * buffer size when refilling RX rings. As the entire RX ring may be refilled
53 * at once, the weight is chosen so that the EWMA will be insensitive to short-
54 * term, transient changes in packet size.
55 */
56DECLARE_EWMA(pkt_len, 0, 64)
57
58#define VIRTNET_DRIVER_VERSION "1.0.0"
59
60static const unsigned long guest_offloads[] = {
61 VIRTIO_NET_F_GUEST_TSO4,
62 VIRTIO_NET_F_GUEST_TSO6,
63 VIRTIO_NET_F_GUEST_ECN,
64 VIRTIO_NET_F_GUEST_UFO,
65 VIRTIO_NET_F_GUEST_CSUM,
66 VIRTIO_NET_F_GUEST_USO4,
67 VIRTIO_NET_F_GUEST_USO6,
68 VIRTIO_NET_F_GUEST_HDRLEN
69};
70
71#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
72 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
73 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
74 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
75 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
76 (1ULL << VIRTIO_NET_F_GUEST_USO6))
77
78struct virtnet_stat_desc {
79 char desc[ETH_GSTRING_LEN];
80 size_t offset;
81};
82
83struct virtnet_sq_stats {
84 struct u64_stats_sync syncp;
85 u64_stats_t packets;
86 u64_stats_t bytes;
87 u64_stats_t xdp_tx;
88 u64_stats_t xdp_tx_drops;
89 u64_stats_t kicks;
90 u64_stats_t tx_timeouts;
91};
92
93struct virtnet_rq_stats {
94 struct u64_stats_sync syncp;
95 u64_stats_t packets;
96 u64_stats_t bytes;
97 u64_stats_t drops;
98 u64_stats_t xdp_packets;
99 u64_stats_t xdp_tx;
100 u64_stats_t xdp_redirects;
101 u64_stats_t xdp_drops;
102 u64_stats_t kicks;
103};
104
105#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
106#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
107
108static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
109 { "packets", VIRTNET_SQ_STAT(packets) },
110 { "bytes", VIRTNET_SQ_STAT(bytes) },
111 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
112 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
113 { "kicks", VIRTNET_SQ_STAT(kicks) },
114 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
115};
116
117static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
118 { "packets", VIRTNET_RQ_STAT(packets) },
119 { "bytes", VIRTNET_RQ_STAT(bytes) },
120 { "drops", VIRTNET_RQ_STAT(drops) },
121 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
122 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
123 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
124 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
125 { "kicks", VIRTNET_RQ_STAT(kicks) },
126};
127
128#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
129#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
130
131struct virtnet_interrupt_coalesce {
132 u32 max_packets;
133 u32 max_usecs;
134};
135
136/* The dma information of pages allocated at a time. */
137struct virtnet_rq_dma {
138 dma_addr_t addr;
139 u32 ref;
140 u16 len;
141 u16 need_sync;
142};
143
144/* Internal representation of a send virtqueue */
145struct send_queue {
146 /* Virtqueue associated with this send _queue */
147 struct virtqueue *vq;
148
149 /* TX: fragments + linear part + virtio header */
150 struct scatterlist sg[MAX_SKB_FRAGS + 2];
151
152 /* Name of the send queue: output.$index */
153 char name[16];
154
155 struct virtnet_sq_stats stats;
156
157 struct virtnet_interrupt_coalesce intr_coal;
158
159 struct napi_struct napi;
160
161 /* Record whether sq is in reset state. */
162 bool reset;
163};
164
165/* Internal representation of a receive virtqueue */
166struct receive_queue {
167 /* Virtqueue associated with this receive_queue */
168 struct virtqueue *vq;
169
170 struct napi_struct napi;
171
172 struct bpf_prog __rcu *xdp_prog;
173
174 struct virtnet_rq_stats stats;
175
176 /* The number of rx notifications */
177 u16 calls;
178
179 /* Is dynamic interrupt moderation enabled? */
180 bool dim_enabled;
181
182 /* Dynamic Interrupt Moderation */
183 struct dim dim;
184
185 u32 packets_in_napi;
186
187 struct virtnet_interrupt_coalesce intr_coal;
188
189 /* Chain pages by the private ptr. */
190 struct page *pages;
191
192 /* Average packet length for mergeable receive buffers. */
193 struct ewma_pkt_len mrg_avg_pkt_len;
194
195 /* Page frag for packet buffer allocation. */
196 struct page_frag alloc_frag;
197
198 /* RX: fragments + linear part + virtio header */
199 struct scatterlist sg[MAX_SKB_FRAGS + 2];
200
201 /* Min single buffer size for mergeable buffers case. */
202 unsigned int min_buf_len;
203
204 /* Name of this receive queue: input.$index */
205 char name[16];
206
207 struct xdp_rxq_info xdp_rxq;
208
209 /* Record the last dma info to free after new pages is allocated. */
210 struct virtnet_rq_dma *last_dma;
211
212 /* Do dma by self */
213 bool do_dma;
214};
215
216/* This structure can contain rss message with maximum settings for indirection table and keysize
217 * Note, that default structure that describes RSS configuration virtio_net_rss_config
218 * contains same info but can't handle table values.
219 * In any case, structure would be passed to virtio hw through sg_buf split by parts
220 * because table sizes may be differ according to the device configuration.
221 */
222#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
223#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
224struct virtio_net_ctrl_rss {
225 u32 hash_types;
226 u16 indirection_table_mask;
227 u16 unclassified_queue;
228 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
229 u16 max_tx_vq;
230 u8 hash_key_length;
231 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
232};
233
234/* Control VQ buffers: protected by the rtnl lock */
235struct control_buf {
236 struct virtio_net_ctrl_hdr hdr;
237 virtio_net_ctrl_ack status;
238 struct virtio_net_ctrl_mq mq;
239 u8 promisc;
240 u8 allmulti;
241 __virtio16 vid;
242 __virtio64 offloads;
243 struct virtio_net_ctrl_rss rss;
244 struct virtio_net_ctrl_coal_tx coal_tx;
245 struct virtio_net_ctrl_coal_rx coal_rx;
246 struct virtio_net_ctrl_coal_vq coal_vq;
247};
248
249struct virtnet_info {
250 struct virtio_device *vdev;
251 struct virtqueue *cvq;
252 struct net_device *dev;
253 struct send_queue *sq;
254 struct receive_queue *rq;
255 unsigned int status;
256
257 /* Max # of queue pairs supported by the device */
258 u16 max_queue_pairs;
259
260 /* # of queue pairs currently used by the driver */
261 u16 curr_queue_pairs;
262
263 /* # of XDP queue pairs currently used by the driver */
264 u16 xdp_queue_pairs;
265
266 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
267 bool xdp_enabled;
268
269 /* I like... big packets and I cannot lie! */
270 bool big_packets;
271
272 /* number of sg entries allocated for big packets */
273 unsigned int big_packets_num_skbfrags;
274
275 /* Host will merge rx buffers for big packets (shake it! shake it!) */
276 bool mergeable_rx_bufs;
277
278 /* Host supports rss and/or hash report */
279 bool has_rss;
280 bool has_rss_hash_report;
281 u8 rss_key_size;
282 u16 rss_indir_table_size;
283 u32 rss_hash_types_supported;
284 u32 rss_hash_types_saved;
285
286 /* Has control virtqueue */
287 bool has_cvq;
288
289 /* Host can handle any s/g split between our header and packet data */
290 bool any_header_sg;
291
292 /* Packet virtio header size */
293 u8 hdr_len;
294
295 /* Work struct for delayed refilling if we run low on memory. */
296 struct delayed_work refill;
297
298 /* Is delayed refill enabled? */
299 bool refill_enabled;
300
301 /* The lock to synchronize the access to refill_enabled */
302 spinlock_t refill_lock;
303
304 /* Work struct for config space updates */
305 struct work_struct config_work;
306
307 /* Does the affinity hint is set for virtqueues? */
308 bool affinity_hint_set;
309
310 /* CPU hotplug instances for online & dead */
311 struct hlist_node node;
312 struct hlist_node node_dead;
313
314 struct control_buf *ctrl;
315
316 /* Ethtool settings */
317 u8 duplex;
318 u32 speed;
319
320 /* Is rx dynamic interrupt moderation enabled? */
321 bool rx_dim_enabled;
322
323 /* Interrupt coalescing settings */
324 struct virtnet_interrupt_coalesce intr_coal_tx;
325 struct virtnet_interrupt_coalesce intr_coal_rx;
326
327 unsigned long guest_offloads;
328 unsigned long guest_offloads_capable;
329
330 /* failover when STANDBY feature enabled */
331 struct failover *failover;
332};
333
334struct padded_vnet_hdr {
335 struct virtio_net_hdr_v1_hash hdr;
336 /*
337 * hdr is in a separate sg buffer, and data sg buffer shares same page
338 * with this header sg. This padding makes next sg 16 byte aligned
339 * after the header.
340 */
341 char padding[12];
342};
343
344struct virtio_net_common_hdr {
345 union {
346 struct virtio_net_hdr hdr;
347 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
348 struct virtio_net_hdr_v1_hash hash_v1_hdr;
349 };
350};
351
352static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
353
354static bool is_xdp_frame(void *ptr)
355{
356 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
357}
358
359static void *xdp_to_ptr(struct xdp_frame *ptr)
360{
361 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
362}
363
364static struct xdp_frame *ptr_to_xdp(void *ptr)
365{
366 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
367}
368
369/* Converting between virtqueue no. and kernel tx/rx queue no.
370 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
371 */
372static int vq2txq(struct virtqueue *vq)
373{
374 return (vq->index - 1) / 2;
375}
376
377static int txq2vq(int txq)
378{
379 return txq * 2 + 1;
380}
381
382static int vq2rxq(struct virtqueue *vq)
383{
384 return vq->index / 2;
385}
386
387static int rxq2vq(int rxq)
388{
389 return rxq * 2;
390}
391
392static inline struct virtio_net_common_hdr *
393skb_vnet_common_hdr(struct sk_buff *skb)
394{
395 return (struct virtio_net_common_hdr *)skb->cb;
396}
397
398/*
399 * private is used to chain pages for big packets, put the whole
400 * most recent used list in the beginning for reuse
401 */
402static void give_pages(struct receive_queue *rq, struct page *page)
403{
404 struct page *end;
405
406 /* Find end of list, sew whole thing into vi->rq.pages. */
407 for (end = page; end->private; end = (struct page *)end->private);
408 end->private = (unsigned long)rq->pages;
409 rq->pages = page;
410}
411
412static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
413{
414 struct page *p = rq->pages;
415
416 if (p) {
417 rq->pages = (struct page *)p->private;
418 /* clear private here, it is used to chain pages */
419 p->private = 0;
420 } else
421 p = alloc_page(gfp_mask);
422 return p;
423}
424
425static void virtnet_rq_free_buf(struct virtnet_info *vi,
426 struct receive_queue *rq, void *buf)
427{
428 if (vi->mergeable_rx_bufs)
429 put_page(virt_to_head_page(buf));
430 else if (vi->big_packets)
431 give_pages(rq, buf);
432 else
433 put_page(virt_to_head_page(buf));
434}
435
436static void enable_delayed_refill(struct virtnet_info *vi)
437{
438 spin_lock_bh(&vi->refill_lock);
439 vi->refill_enabled = true;
440 spin_unlock_bh(&vi->refill_lock);
441}
442
443static void disable_delayed_refill(struct virtnet_info *vi)
444{
445 spin_lock_bh(&vi->refill_lock);
446 vi->refill_enabled = false;
447 spin_unlock_bh(&vi->refill_lock);
448}
449
450static void virtqueue_napi_schedule(struct napi_struct *napi,
451 struct virtqueue *vq)
452{
453 if (napi_schedule_prep(napi)) {
454 virtqueue_disable_cb(vq);
455 __napi_schedule(napi);
456 }
457}
458
459static bool virtqueue_napi_complete(struct napi_struct *napi,
460 struct virtqueue *vq, int processed)
461{
462 int opaque;
463
464 opaque = virtqueue_enable_cb_prepare(vq);
465 if (napi_complete_done(napi, processed)) {
466 if (unlikely(virtqueue_poll(vq, opaque)))
467 virtqueue_napi_schedule(napi, vq);
468 else
469 return true;
470 } else {
471 virtqueue_disable_cb(vq);
472 }
473
474 return false;
475}
476
477static void skb_xmit_done(struct virtqueue *vq)
478{
479 struct virtnet_info *vi = vq->vdev->priv;
480 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
481
482 /* Suppress further interrupts. */
483 virtqueue_disable_cb(vq);
484
485 if (napi->weight)
486 virtqueue_napi_schedule(napi, vq);
487 else
488 /* We were probably waiting for more output buffers. */
489 netif_wake_subqueue(vi->dev, vq2txq(vq));
490}
491
492#define MRG_CTX_HEADER_SHIFT 22
493static void *mergeable_len_to_ctx(unsigned int truesize,
494 unsigned int headroom)
495{
496 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
497}
498
499static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
500{
501 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
502}
503
504static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
505{
506 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
507}
508
509static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
510 unsigned int headroom,
511 unsigned int len)
512{
513 struct sk_buff *skb;
514
515 skb = build_skb(buf, buflen);
516 if (unlikely(!skb))
517 return NULL;
518
519 skb_reserve(skb, headroom);
520 skb_put(skb, len);
521
522 return skb;
523}
524
525/* Called from bottom half context */
526static struct sk_buff *page_to_skb(struct virtnet_info *vi,
527 struct receive_queue *rq,
528 struct page *page, unsigned int offset,
529 unsigned int len, unsigned int truesize,
530 unsigned int headroom)
531{
532 struct sk_buff *skb;
533 struct virtio_net_common_hdr *hdr;
534 unsigned int copy, hdr_len, hdr_padded_len;
535 struct page *page_to_free = NULL;
536 int tailroom, shinfo_size;
537 char *p, *hdr_p, *buf;
538
539 p = page_address(page) + offset;
540 hdr_p = p;
541
542 hdr_len = vi->hdr_len;
543 if (vi->mergeable_rx_bufs)
544 hdr_padded_len = hdr_len;
545 else
546 hdr_padded_len = sizeof(struct padded_vnet_hdr);
547
548 buf = p - headroom;
549 len -= hdr_len;
550 offset += hdr_padded_len;
551 p += hdr_padded_len;
552 tailroom = truesize - headroom - hdr_padded_len - len;
553
554 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
555
556 /* copy small packet so we can reuse these pages */
557 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
558 skb = virtnet_build_skb(buf, truesize, p - buf, len);
559 if (unlikely(!skb))
560 return NULL;
561
562 page = (struct page *)page->private;
563 if (page)
564 give_pages(rq, page);
565 goto ok;
566 }
567
568 /* copy small packet so we can reuse these pages for small data */
569 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
570 if (unlikely(!skb))
571 return NULL;
572
573 /* Copy all frame if it fits skb->head, otherwise
574 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
575 */
576 if (len <= skb_tailroom(skb))
577 copy = len;
578 else
579 copy = ETH_HLEN;
580 skb_put_data(skb, p, copy);
581
582 len -= copy;
583 offset += copy;
584
585 if (vi->mergeable_rx_bufs) {
586 if (len)
587 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
588 else
589 page_to_free = page;
590 goto ok;
591 }
592
593 /*
594 * Verify that we can indeed put this data into a skb.
595 * This is here to handle cases when the device erroneously
596 * tries to receive more than is possible. This is usually
597 * the case of a broken device.
598 */
599 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
600 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
601 dev_kfree_skb(skb);
602 return NULL;
603 }
604 BUG_ON(offset >= PAGE_SIZE);
605 while (len) {
606 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
607 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
608 frag_size, truesize);
609 len -= frag_size;
610 page = (struct page *)page->private;
611 offset = 0;
612 }
613
614 if (page)
615 give_pages(rq, page);
616
617ok:
618 hdr = skb_vnet_common_hdr(skb);
619 memcpy(hdr, hdr_p, hdr_len);
620 if (page_to_free)
621 put_page(page_to_free);
622
623 return skb;
624}
625
626static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
627{
628 struct page *page = virt_to_head_page(buf);
629 struct virtnet_rq_dma *dma;
630 void *head;
631 int offset;
632
633 head = page_address(page);
634
635 dma = head;
636
637 --dma->ref;
638
639 if (dma->need_sync && len) {
640 offset = buf - (head + sizeof(*dma));
641
642 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
643 offset, len,
644 DMA_FROM_DEVICE);
645 }
646
647 if (dma->ref)
648 return;
649
650 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
651 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
652 put_page(page);
653}
654
655static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
656{
657 void *buf;
658
659 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
660 if (buf && rq->do_dma)
661 virtnet_rq_unmap(rq, buf, *len);
662
663 return buf;
664}
665
666static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
667{
668 struct virtnet_rq_dma *dma;
669 dma_addr_t addr;
670 u32 offset;
671 void *head;
672
673 if (!rq->do_dma) {
674 sg_init_one(rq->sg, buf, len);
675 return;
676 }
677
678 head = page_address(rq->alloc_frag.page);
679
680 offset = buf - head;
681
682 dma = head;
683
684 addr = dma->addr - sizeof(*dma) + offset;
685
686 sg_init_table(rq->sg, 1);
687 rq->sg[0].dma_address = addr;
688 rq->sg[0].length = len;
689}
690
691static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
692{
693 struct page_frag *alloc_frag = &rq->alloc_frag;
694 struct virtnet_rq_dma *dma;
695 void *buf, *head;
696 dma_addr_t addr;
697
698 if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
699 return NULL;
700
701 head = page_address(alloc_frag->page);
702
703 if (rq->do_dma) {
704 dma = head;
705
706 /* new pages */
707 if (!alloc_frag->offset) {
708 if (rq->last_dma) {
709 /* Now, the new page is allocated, the last dma
710 * will not be used. So the dma can be unmapped
711 * if the ref is 0.
712 */
713 virtnet_rq_unmap(rq, rq->last_dma, 0);
714 rq->last_dma = NULL;
715 }
716
717 dma->len = alloc_frag->size - sizeof(*dma);
718
719 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
720 dma->len, DMA_FROM_DEVICE, 0);
721 if (virtqueue_dma_mapping_error(rq->vq, addr))
722 return NULL;
723
724 dma->addr = addr;
725 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
726
727 /* Add a reference to dma to prevent the entire dma from
728 * being released during error handling. This reference
729 * will be freed after the pages are no longer used.
730 */
731 get_page(alloc_frag->page);
732 dma->ref = 1;
733 alloc_frag->offset = sizeof(*dma);
734
735 rq->last_dma = dma;
736 }
737
738 ++dma->ref;
739 }
740
741 buf = head + alloc_frag->offset;
742
743 get_page(alloc_frag->page);
744 alloc_frag->offset += size;
745
746 return buf;
747}
748
749static void virtnet_rq_set_premapped(struct virtnet_info *vi)
750{
751 int i;
752
753 /* disable for big mode */
754 if (!vi->mergeable_rx_bufs && vi->big_packets)
755 return;
756
757 for (i = 0; i < vi->max_queue_pairs; i++) {
758 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
759 continue;
760
761 vi->rq[i].do_dma = true;
762 }
763}
764
765static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
766{
767 struct virtnet_info *vi = vq->vdev->priv;
768 struct receive_queue *rq;
769 int i = vq2rxq(vq);
770
771 rq = &vi->rq[i];
772
773 if (rq->do_dma)
774 virtnet_rq_unmap(rq, buf, 0);
775
776 virtnet_rq_free_buf(vi, rq, buf);
777}
778
779static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
780{
781 unsigned int len;
782 unsigned int packets = 0;
783 unsigned int bytes = 0;
784 void *ptr;
785
786 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
787 if (likely(!is_xdp_frame(ptr))) {
788 struct sk_buff *skb = ptr;
789
790 pr_debug("Sent skb %p\n", skb);
791
792 bytes += skb->len;
793 napi_consume_skb(skb, in_napi);
794 } else {
795 struct xdp_frame *frame = ptr_to_xdp(ptr);
796
797 bytes += xdp_get_frame_len(frame);
798 xdp_return_frame(frame);
799 }
800 packets++;
801 }
802
803 /* Avoid overhead when no packets have been processed
804 * happens when called speculatively from start_xmit.
805 */
806 if (!packets)
807 return;
808
809 u64_stats_update_begin(&sq->stats.syncp);
810 u64_stats_add(&sq->stats.bytes, bytes);
811 u64_stats_add(&sq->stats.packets, packets);
812 u64_stats_update_end(&sq->stats.syncp);
813}
814
815static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
816{
817 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
818 return false;
819 else if (q < vi->curr_queue_pairs)
820 return true;
821 else
822 return false;
823}
824
825static void check_sq_full_and_disable(struct virtnet_info *vi,
826 struct net_device *dev,
827 struct send_queue *sq)
828{
829 bool use_napi = sq->napi.weight;
830 int qnum;
831
832 qnum = sq - vi->sq;
833
834 /* If running out of space, stop queue to avoid getting packets that we
835 * are then unable to transmit.
836 * An alternative would be to force queuing layer to requeue the skb by
837 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
838 * returned in a normal path of operation: it means that driver is not
839 * maintaining the TX queue stop/start state properly, and causes
840 * the stack to do a non-trivial amount of useless work.
841 * Since most packets only take 1 or 2 ring slots, stopping the queue
842 * early means 16 slots are typically wasted.
843 */
844 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
845 netif_stop_subqueue(dev, qnum);
846 if (use_napi) {
847 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
848 virtqueue_napi_schedule(&sq->napi, sq->vq);
849 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
850 /* More just got used, free them then recheck. */
851 free_old_xmit_skbs(sq, false);
852 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
853 netif_start_subqueue(dev, qnum);
854 virtqueue_disable_cb(sq->vq);
855 }
856 }
857 }
858}
859
860static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
861 struct send_queue *sq,
862 struct xdp_frame *xdpf)
863{
864 struct virtio_net_hdr_mrg_rxbuf *hdr;
865 struct skb_shared_info *shinfo;
866 u8 nr_frags = 0;
867 int err, i;
868
869 if (unlikely(xdpf->headroom < vi->hdr_len))
870 return -EOVERFLOW;
871
872 if (unlikely(xdp_frame_has_frags(xdpf))) {
873 shinfo = xdp_get_shared_info_from_frame(xdpf);
874 nr_frags = shinfo->nr_frags;
875 }
876
877 /* In wrapping function virtnet_xdp_xmit(), we need to free
878 * up the pending old buffers, where we need to calculate the
879 * position of skb_shared_info in xdp_get_frame_len() and
880 * xdp_return_frame(), which will involve to xdpf->data and
881 * xdpf->headroom. Therefore, we need to update the value of
882 * headroom synchronously here.
883 */
884 xdpf->headroom -= vi->hdr_len;
885 xdpf->data -= vi->hdr_len;
886 /* Zero header and leave csum up to XDP layers */
887 hdr = xdpf->data;
888 memset(hdr, 0, vi->hdr_len);
889 xdpf->len += vi->hdr_len;
890
891 sg_init_table(sq->sg, nr_frags + 1);
892 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
893 for (i = 0; i < nr_frags; i++) {
894 skb_frag_t *frag = &shinfo->frags[i];
895
896 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
897 skb_frag_size(frag), skb_frag_off(frag));
898 }
899
900 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
901 xdp_to_ptr(xdpf), GFP_ATOMIC);
902 if (unlikely(err))
903 return -ENOSPC; /* Caller handle free/refcnt */
904
905 return 0;
906}
907
908/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
909 * the current cpu, so it does not need to be locked.
910 *
911 * Here we use marco instead of inline functions because we have to deal with
912 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
913 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
914 * functions to perfectly solve these three problems at the same time.
915 */
916#define virtnet_xdp_get_sq(vi) ({ \
917 int cpu = smp_processor_id(); \
918 struct netdev_queue *txq; \
919 typeof(vi) v = (vi); \
920 unsigned int qp; \
921 \
922 if (v->curr_queue_pairs > nr_cpu_ids) { \
923 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
924 qp += cpu; \
925 txq = netdev_get_tx_queue(v->dev, qp); \
926 __netif_tx_acquire(txq); \
927 } else { \
928 qp = cpu % v->curr_queue_pairs; \
929 txq = netdev_get_tx_queue(v->dev, qp); \
930 __netif_tx_lock(txq, cpu); \
931 } \
932 v->sq + qp; \
933})
934
935#define virtnet_xdp_put_sq(vi, q) { \
936 struct netdev_queue *txq; \
937 typeof(vi) v = (vi); \
938 \
939 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
940 if (v->curr_queue_pairs > nr_cpu_ids) \
941 __netif_tx_release(txq); \
942 else \
943 __netif_tx_unlock(txq); \
944}
945
946static int virtnet_xdp_xmit(struct net_device *dev,
947 int n, struct xdp_frame **frames, u32 flags)
948{
949 struct virtnet_info *vi = netdev_priv(dev);
950 struct receive_queue *rq = vi->rq;
951 struct bpf_prog *xdp_prog;
952 struct send_queue *sq;
953 unsigned int len;
954 int packets = 0;
955 int bytes = 0;
956 int nxmit = 0;
957 int kicks = 0;
958 void *ptr;
959 int ret;
960 int i;
961
962 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
963 * indicate XDP resources have been successfully allocated.
964 */
965 xdp_prog = rcu_access_pointer(rq->xdp_prog);
966 if (!xdp_prog)
967 return -ENXIO;
968
969 sq = virtnet_xdp_get_sq(vi);
970
971 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
972 ret = -EINVAL;
973 goto out;
974 }
975
976 /* Free up any pending old buffers before queueing new ones. */
977 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
978 if (likely(is_xdp_frame(ptr))) {
979 struct xdp_frame *frame = ptr_to_xdp(ptr);
980
981 bytes += xdp_get_frame_len(frame);
982 xdp_return_frame(frame);
983 } else {
984 struct sk_buff *skb = ptr;
985
986 bytes += skb->len;
987 napi_consume_skb(skb, false);
988 }
989 packets++;
990 }
991
992 for (i = 0; i < n; i++) {
993 struct xdp_frame *xdpf = frames[i];
994
995 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
996 break;
997 nxmit++;
998 }
999 ret = nxmit;
1000
1001 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1002 check_sq_full_and_disable(vi, dev, sq);
1003
1004 if (flags & XDP_XMIT_FLUSH) {
1005 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1006 kicks = 1;
1007 }
1008out:
1009 u64_stats_update_begin(&sq->stats.syncp);
1010 u64_stats_add(&sq->stats.bytes, bytes);
1011 u64_stats_add(&sq->stats.packets, packets);
1012 u64_stats_add(&sq->stats.xdp_tx, n);
1013 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1014 u64_stats_add(&sq->stats.kicks, kicks);
1015 u64_stats_update_end(&sq->stats.syncp);
1016
1017 virtnet_xdp_put_sq(vi, sq);
1018 return ret;
1019}
1020
1021static void put_xdp_frags(struct xdp_buff *xdp)
1022{
1023 struct skb_shared_info *shinfo;
1024 struct page *xdp_page;
1025 int i;
1026
1027 if (xdp_buff_has_frags(xdp)) {
1028 shinfo = xdp_get_shared_info_from_buff(xdp);
1029 for (i = 0; i < shinfo->nr_frags; i++) {
1030 xdp_page = skb_frag_page(&shinfo->frags[i]);
1031 put_page(xdp_page);
1032 }
1033 }
1034}
1035
1036static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1037 struct net_device *dev,
1038 unsigned int *xdp_xmit,
1039 struct virtnet_rq_stats *stats)
1040{
1041 struct xdp_frame *xdpf;
1042 int err;
1043 u32 act;
1044
1045 act = bpf_prog_run_xdp(xdp_prog, xdp);
1046 u64_stats_inc(&stats->xdp_packets);
1047
1048 switch (act) {
1049 case XDP_PASS:
1050 return act;
1051
1052 case XDP_TX:
1053 u64_stats_inc(&stats->xdp_tx);
1054 xdpf = xdp_convert_buff_to_frame(xdp);
1055 if (unlikely(!xdpf)) {
1056 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1057 return XDP_DROP;
1058 }
1059
1060 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1061 if (unlikely(!err)) {
1062 xdp_return_frame_rx_napi(xdpf);
1063 } else if (unlikely(err < 0)) {
1064 trace_xdp_exception(dev, xdp_prog, act);
1065 return XDP_DROP;
1066 }
1067 *xdp_xmit |= VIRTIO_XDP_TX;
1068 return act;
1069
1070 case XDP_REDIRECT:
1071 u64_stats_inc(&stats->xdp_redirects);
1072 err = xdp_do_redirect(dev, xdp, xdp_prog);
1073 if (err)
1074 return XDP_DROP;
1075
1076 *xdp_xmit |= VIRTIO_XDP_REDIR;
1077 return act;
1078
1079 default:
1080 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1081 fallthrough;
1082 case XDP_ABORTED:
1083 trace_xdp_exception(dev, xdp_prog, act);
1084 fallthrough;
1085 case XDP_DROP:
1086 return XDP_DROP;
1087 }
1088}
1089
1090static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1091{
1092 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1093}
1094
1095/* We copy the packet for XDP in the following cases:
1096 *
1097 * 1) Packet is scattered across multiple rx buffers.
1098 * 2) Headroom space is insufficient.
1099 *
1100 * This is inefficient but it's a temporary condition that
1101 * we hit right after XDP is enabled and until queue is refilled
1102 * with large buffers with sufficient headroom - so it should affect
1103 * at most queue size packets.
1104 * Afterwards, the conditions to enable
1105 * XDP should preclude the underlying device from sending packets
1106 * across multiple buffers (num_buf > 1), and we make sure buffers
1107 * have enough headroom.
1108 */
1109static struct page *xdp_linearize_page(struct receive_queue *rq,
1110 int *num_buf,
1111 struct page *p,
1112 int offset,
1113 int page_off,
1114 unsigned int *len)
1115{
1116 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1117 struct page *page;
1118
1119 if (page_off + *len + tailroom > PAGE_SIZE)
1120 return NULL;
1121
1122 page = alloc_page(GFP_ATOMIC);
1123 if (!page)
1124 return NULL;
1125
1126 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1127 page_off += *len;
1128
1129 while (--*num_buf) {
1130 unsigned int buflen;
1131 void *buf;
1132 int off;
1133
1134 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1135 if (unlikely(!buf))
1136 goto err_buf;
1137
1138 p = virt_to_head_page(buf);
1139 off = buf - page_address(p);
1140
1141 /* guard against a misconfigured or uncooperative backend that
1142 * is sending packet larger than the MTU.
1143 */
1144 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1145 put_page(p);
1146 goto err_buf;
1147 }
1148
1149 memcpy(page_address(page) + page_off,
1150 page_address(p) + off, buflen);
1151 page_off += buflen;
1152 put_page(p);
1153 }
1154
1155 /* Headroom does not contribute to packet length */
1156 *len = page_off - VIRTIO_XDP_HEADROOM;
1157 return page;
1158err_buf:
1159 __free_pages(page, 0);
1160 return NULL;
1161}
1162
1163static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1164 unsigned int xdp_headroom,
1165 void *buf,
1166 unsigned int len)
1167{
1168 unsigned int header_offset;
1169 unsigned int headroom;
1170 unsigned int buflen;
1171 struct sk_buff *skb;
1172
1173 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1174 headroom = vi->hdr_len + header_offset;
1175 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1176 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1177
1178 skb = virtnet_build_skb(buf, buflen, headroom, len);
1179 if (unlikely(!skb))
1180 return NULL;
1181
1182 buf += header_offset;
1183 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1184
1185 return skb;
1186}
1187
1188static struct sk_buff *receive_small_xdp(struct net_device *dev,
1189 struct virtnet_info *vi,
1190 struct receive_queue *rq,
1191 struct bpf_prog *xdp_prog,
1192 void *buf,
1193 unsigned int xdp_headroom,
1194 unsigned int len,
1195 unsigned int *xdp_xmit,
1196 struct virtnet_rq_stats *stats)
1197{
1198 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1199 unsigned int headroom = vi->hdr_len + header_offset;
1200 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1201 struct page *page = virt_to_head_page(buf);
1202 struct page *xdp_page;
1203 unsigned int buflen;
1204 struct xdp_buff xdp;
1205 struct sk_buff *skb;
1206 unsigned int metasize = 0;
1207 u32 act;
1208
1209 if (unlikely(hdr->hdr.gso_type))
1210 goto err_xdp;
1211
1212 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1213 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1214
1215 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1216 int offset = buf - page_address(page) + header_offset;
1217 unsigned int tlen = len + vi->hdr_len;
1218 int num_buf = 1;
1219
1220 xdp_headroom = virtnet_get_headroom(vi);
1221 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1222 headroom = vi->hdr_len + header_offset;
1223 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1224 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1225 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1226 offset, header_offset,
1227 &tlen);
1228 if (!xdp_page)
1229 goto err_xdp;
1230
1231 buf = page_address(xdp_page);
1232 put_page(page);
1233 page = xdp_page;
1234 }
1235
1236 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1237 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1238 xdp_headroom, len, true);
1239
1240 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1241
1242 switch (act) {
1243 case XDP_PASS:
1244 /* Recalculate length in case bpf program changed it */
1245 len = xdp.data_end - xdp.data;
1246 metasize = xdp.data - xdp.data_meta;
1247 break;
1248
1249 case XDP_TX:
1250 case XDP_REDIRECT:
1251 goto xdp_xmit;
1252
1253 default:
1254 goto err_xdp;
1255 }
1256
1257 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1258 if (unlikely(!skb))
1259 goto err;
1260
1261 if (metasize)
1262 skb_metadata_set(skb, metasize);
1263
1264 return skb;
1265
1266err_xdp:
1267 u64_stats_inc(&stats->xdp_drops);
1268err:
1269 u64_stats_inc(&stats->drops);
1270 put_page(page);
1271xdp_xmit:
1272 return NULL;
1273}
1274
1275static struct sk_buff *receive_small(struct net_device *dev,
1276 struct virtnet_info *vi,
1277 struct receive_queue *rq,
1278 void *buf, void *ctx,
1279 unsigned int len,
1280 unsigned int *xdp_xmit,
1281 struct virtnet_rq_stats *stats)
1282{
1283 unsigned int xdp_headroom = (unsigned long)ctx;
1284 struct page *page = virt_to_head_page(buf);
1285 struct sk_buff *skb;
1286
1287 len -= vi->hdr_len;
1288 u64_stats_add(&stats->bytes, len);
1289
1290 if (unlikely(len > GOOD_PACKET_LEN)) {
1291 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1292 dev->name, len, GOOD_PACKET_LEN);
1293 DEV_STATS_INC(dev, rx_length_errors);
1294 goto err;
1295 }
1296
1297 if (unlikely(vi->xdp_enabled)) {
1298 struct bpf_prog *xdp_prog;
1299
1300 rcu_read_lock();
1301 xdp_prog = rcu_dereference(rq->xdp_prog);
1302 if (xdp_prog) {
1303 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1304 xdp_headroom, len, xdp_xmit,
1305 stats);
1306 rcu_read_unlock();
1307 return skb;
1308 }
1309 rcu_read_unlock();
1310 }
1311
1312 skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1313 if (likely(skb))
1314 return skb;
1315
1316err:
1317 u64_stats_inc(&stats->drops);
1318 put_page(page);
1319 return NULL;
1320}
1321
1322static struct sk_buff *receive_big(struct net_device *dev,
1323 struct virtnet_info *vi,
1324 struct receive_queue *rq,
1325 void *buf,
1326 unsigned int len,
1327 struct virtnet_rq_stats *stats)
1328{
1329 struct page *page = buf;
1330 struct sk_buff *skb =
1331 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1332
1333 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1334 if (unlikely(!skb))
1335 goto err;
1336
1337 return skb;
1338
1339err:
1340 u64_stats_inc(&stats->drops);
1341 give_pages(rq, page);
1342 return NULL;
1343}
1344
1345static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1346 struct net_device *dev,
1347 struct virtnet_rq_stats *stats)
1348{
1349 struct page *page;
1350 void *buf;
1351 int len;
1352
1353 while (num_buf-- > 1) {
1354 buf = virtnet_rq_get_buf(rq, &len, NULL);
1355 if (unlikely(!buf)) {
1356 pr_debug("%s: rx error: %d buffers missing\n",
1357 dev->name, num_buf);
1358 DEV_STATS_INC(dev, rx_length_errors);
1359 break;
1360 }
1361 u64_stats_add(&stats->bytes, len);
1362 page = virt_to_head_page(buf);
1363 put_page(page);
1364 }
1365}
1366
1367/* Why not use xdp_build_skb_from_frame() ?
1368 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1369 * virtio-net there are 2 points that do not match its requirements:
1370 * 1. The size of the prefilled buffer is not fixed before xdp is set.
1371 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
1372 * like eth_type_trans() (which virtio-net does in receive_buf()).
1373 */
1374static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1375 struct virtnet_info *vi,
1376 struct xdp_buff *xdp,
1377 unsigned int xdp_frags_truesz)
1378{
1379 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1380 unsigned int headroom, data_len;
1381 struct sk_buff *skb;
1382 int metasize;
1383 u8 nr_frags;
1384
1385 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1386 pr_debug("Error building skb as missing reserved tailroom for xdp");
1387 return NULL;
1388 }
1389
1390 if (unlikely(xdp_buff_has_frags(xdp)))
1391 nr_frags = sinfo->nr_frags;
1392
1393 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1394 if (unlikely(!skb))
1395 return NULL;
1396
1397 headroom = xdp->data - xdp->data_hard_start;
1398 data_len = xdp->data_end - xdp->data;
1399 skb_reserve(skb, headroom);
1400 __skb_put(skb, data_len);
1401
1402 metasize = xdp->data - xdp->data_meta;
1403 metasize = metasize > 0 ? metasize : 0;
1404 if (metasize)
1405 skb_metadata_set(skb, metasize);
1406
1407 if (unlikely(xdp_buff_has_frags(xdp)))
1408 xdp_update_skb_shared_info(skb, nr_frags,
1409 sinfo->xdp_frags_size,
1410 xdp_frags_truesz,
1411 xdp_buff_is_frag_pfmemalloc(xdp));
1412
1413 return skb;
1414}
1415
1416/* TODO: build xdp in big mode */
1417static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1418 struct virtnet_info *vi,
1419 struct receive_queue *rq,
1420 struct xdp_buff *xdp,
1421 void *buf,
1422 unsigned int len,
1423 unsigned int frame_sz,
1424 int *num_buf,
1425 unsigned int *xdp_frags_truesize,
1426 struct virtnet_rq_stats *stats)
1427{
1428 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1429 unsigned int headroom, tailroom, room;
1430 unsigned int truesize, cur_frag_size;
1431 struct skb_shared_info *shinfo;
1432 unsigned int xdp_frags_truesz = 0;
1433 struct page *page;
1434 skb_frag_t *frag;
1435 int offset;
1436 void *ctx;
1437
1438 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1439 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1440 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1441
1442 if (!*num_buf)
1443 return 0;
1444
1445 if (*num_buf > 1) {
1446 /* If we want to build multi-buffer xdp, we need
1447 * to specify that the flags of xdp_buff have the
1448 * XDP_FLAGS_HAS_FRAG bit.
1449 */
1450 if (!xdp_buff_has_frags(xdp))
1451 xdp_buff_set_frags_flag(xdp);
1452
1453 shinfo = xdp_get_shared_info_from_buff(xdp);
1454 shinfo->nr_frags = 0;
1455 shinfo->xdp_frags_size = 0;
1456 }
1457
1458 if (*num_buf > MAX_SKB_FRAGS + 1)
1459 return -EINVAL;
1460
1461 while (--*num_buf > 0) {
1462 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1463 if (unlikely(!buf)) {
1464 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1465 dev->name, *num_buf,
1466 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1467 DEV_STATS_INC(dev, rx_length_errors);
1468 goto err;
1469 }
1470
1471 u64_stats_add(&stats->bytes, len);
1472 page = virt_to_head_page(buf);
1473 offset = buf - page_address(page);
1474
1475 truesize = mergeable_ctx_to_truesize(ctx);
1476 headroom = mergeable_ctx_to_headroom(ctx);
1477 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1478 room = SKB_DATA_ALIGN(headroom + tailroom);
1479
1480 cur_frag_size = truesize;
1481 xdp_frags_truesz += cur_frag_size;
1482 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1483 put_page(page);
1484 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1485 dev->name, len, (unsigned long)(truesize - room));
1486 DEV_STATS_INC(dev, rx_length_errors);
1487 goto err;
1488 }
1489
1490 frag = &shinfo->frags[shinfo->nr_frags++];
1491 skb_frag_fill_page_desc(frag, page, offset, len);
1492 if (page_is_pfmemalloc(page))
1493 xdp_buff_set_frag_pfmemalloc(xdp);
1494
1495 shinfo->xdp_frags_size += len;
1496 }
1497
1498 *xdp_frags_truesize = xdp_frags_truesz;
1499 return 0;
1500
1501err:
1502 put_xdp_frags(xdp);
1503 return -EINVAL;
1504}
1505
1506static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1507 struct receive_queue *rq,
1508 struct bpf_prog *xdp_prog,
1509 void *ctx,
1510 unsigned int *frame_sz,
1511 int *num_buf,
1512 struct page **page,
1513 int offset,
1514 unsigned int *len,
1515 struct virtio_net_hdr_mrg_rxbuf *hdr)
1516{
1517 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1518 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1519 struct page *xdp_page;
1520 unsigned int xdp_room;
1521
1522 /* Transient failure which in theory could occur if
1523 * in-flight packets from before XDP was enabled reach
1524 * the receive path after XDP is loaded.
1525 */
1526 if (unlikely(hdr->hdr.gso_type))
1527 return NULL;
1528
1529 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1530 * with headroom may add hole in truesize, which
1531 * make their length exceed PAGE_SIZE. So we disabled the
1532 * hole mechanism for xdp. See add_recvbuf_mergeable().
1533 */
1534 *frame_sz = truesize;
1535
1536 if (likely(headroom >= virtnet_get_headroom(vi) &&
1537 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1538 return page_address(*page) + offset;
1539 }
1540
1541 /* This happens when headroom is not enough because
1542 * of the buffer was prefilled before XDP is set.
1543 * This should only happen for the first several packets.
1544 * In fact, vq reset can be used here to help us clean up
1545 * the prefilled buffers, but many existing devices do not
1546 * support it, and we don't want to bother users who are
1547 * using xdp normally.
1548 */
1549 if (!xdp_prog->aux->xdp_has_frags) {
1550 /* linearize data for XDP */
1551 xdp_page = xdp_linearize_page(rq, num_buf,
1552 *page, offset,
1553 VIRTIO_XDP_HEADROOM,
1554 len);
1555 if (!xdp_page)
1556 return NULL;
1557 } else {
1558 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1559 sizeof(struct skb_shared_info));
1560 if (*len + xdp_room > PAGE_SIZE)
1561 return NULL;
1562
1563 xdp_page = alloc_page(GFP_ATOMIC);
1564 if (!xdp_page)
1565 return NULL;
1566
1567 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1568 page_address(*page) + offset, *len);
1569 }
1570
1571 *frame_sz = PAGE_SIZE;
1572
1573 put_page(*page);
1574
1575 *page = xdp_page;
1576
1577 return page_address(*page) + VIRTIO_XDP_HEADROOM;
1578}
1579
1580static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1581 struct virtnet_info *vi,
1582 struct receive_queue *rq,
1583 struct bpf_prog *xdp_prog,
1584 void *buf,
1585 void *ctx,
1586 unsigned int len,
1587 unsigned int *xdp_xmit,
1588 struct virtnet_rq_stats *stats)
1589{
1590 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1591 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1592 struct page *page = virt_to_head_page(buf);
1593 int offset = buf - page_address(page);
1594 unsigned int xdp_frags_truesz = 0;
1595 struct sk_buff *head_skb;
1596 unsigned int frame_sz;
1597 struct xdp_buff xdp;
1598 void *data;
1599 u32 act;
1600 int err;
1601
1602 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1603 offset, &len, hdr);
1604 if (unlikely(!data))
1605 goto err_xdp;
1606
1607 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1608 &num_buf, &xdp_frags_truesz, stats);
1609 if (unlikely(err))
1610 goto err_xdp;
1611
1612 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1613
1614 switch (act) {
1615 case XDP_PASS:
1616 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1617 if (unlikely(!head_skb))
1618 break;
1619 return head_skb;
1620
1621 case XDP_TX:
1622 case XDP_REDIRECT:
1623 return NULL;
1624
1625 default:
1626 break;
1627 }
1628
1629 put_xdp_frags(&xdp);
1630
1631err_xdp:
1632 put_page(page);
1633 mergeable_buf_free(rq, num_buf, dev, stats);
1634
1635 u64_stats_inc(&stats->xdp_drops);
1636 u64_stats_inc(&stats->drops);
1637 return NULL;
1638}
1639
1640static struct sk_buff *receive_mergeable(struct net_device *dev,
1641 struct virtnet_info *vi,
1642 struct receive_queue *rq,
1643 void *buf,
1644 void *ctx,
1645 unsigned int len,
1646 unsigned int *xdp_xmit,
1647 struct virtnet_rq_stats *stats)
1648{
1649 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1650 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1651 struct page *page = virt_to_head_page(buf);
1652 int offset = buf - page_address(page);
1653 struct sk_buff *head_skb, *curr_skb;
1654 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1655 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1656 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1657 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1658
1659 head_skb = NULL;
1660 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1661
1662 if (unlikely(len > truesize - room)) {
1663 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1664 dev->name, len, (unsigned long)(truesize - room));
1665 DEV_STATS_INC(dev, rx_length_errors);
1666 goto err_skb;
1667 }
1668
1669 if (unlikely(vi->xdp_enabled)) {
1670 struct bpf_prog *xdp_prog;
1671
1672 rcu_read_lock();
1673 xdp_prog = rcu_dereference(rq->xdp_prog);
1674 if (xdp_prog) {
1675 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1676 len, xdp_xmit, stats);
1677 rcu_read_unlock();
1678 return head_skb;
1679 }
1680 rcu_read_unlock();
1681 }
1682
1683 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1684 curr_skb = head_skb;
1685
1686 if (unlikely(!curr_skb))
1687 goto err_skb;
1688 while (--num_buf) {
1689 int num_skb_frags;
1690
1691 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1692 if (unlikely(!buf)) {
1693 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1694 dev->name, num_buf,
1695 virtio16_to_cpu(vi->vdev,
1696 hdr->num_buffers));
1697 DEV_STATS_INC(dev, rx_length_errors);
1698 goto err_buf;
1699 }
1700
1701 u64_stats_add(&stats->bytes, len);
1702 page = virt_to_head_page(buf);
1703
1704 truesize = mergeable_ctx_to_truesize(ctx);
1705 headroom = mergeable_ctx_to_headroom(ctx);
1706 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1707 room = SKB_DATA_ALIGN(headroom + tailroom);
1708 if (unlikely(len > truesize - room)) {
1709 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1710 dev->name, len, (unsigned long)(truesize - room));
1711 DEV_STATS_INC(dev, rx_length_errors);
1712 goto err_skb;
1713 }
1714
1715 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1716 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1717 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1718
1719 if (unlikely(!nskb))
1720 goto err_skb;
1721 if (curr_skb == head_skb)
1722 skb_shinfo(curr_skb)->frag_list = nskb;
1723 else
1724 curr_skb->next = nskb;
1725 curr_skb = nskb;
1726 head_skb->truesize += nskb->truesize;
1727 num_skb_frags = 0;
1728 }
1729 if (curr_skb != head_skb) {
1730 head_skb->data_len += len;
1731 head_skb->len += len;
1732 head_skb->truesize += truesize;
1733 }
1734 offset = buf - page_address(page);
1735 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1736 put_page(page);
1737 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1738 len, truesize);
1739 } else {
1740 skb_add_rx_frag(curr_skb, num_skb_frags, page,
1741 offset, len, truesize);
1742 }
1743 }
1744
1745 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1746 return head_skb;
1747
1748err_skb:
1749 put_page(page);
1750 mergeable_buf_free(rq, num_buf, dev, stats);
1751
1752err_buf:
1753 u64_stats_inc(&stats->drops);
1754 dev_kfree_skb(head_skb);
1755 return NULL;
1756}
1757
1758static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1759 struct sk_buff *skb)
1760{
1761 enum pkt_hash_types rss_hash_type;
1762
1763 if (!hdr_hash || !skb)
1764 return;
1765
1766 switch (__le16_to_cpu(hdr_hash->hash_report)) {
1767 case VIRTIO_NET_HASH_REPORT_TCPv4:
1768 case VIRTIO_NET_HASH_REPORT_UDPv4:
1769 case VIRTIO_NET_HASH_REPORT_TCPv6:
1770 case VIRTIO_NET_HASH_REPORT_UDPv6:
1771 case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1772 case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1773 rss_hash_type = PKT_HASH_TYPE_L4;
1774 break;
1775 case VIRTIO_NET_HASH_REPORT_IPv4:
1776 case VIRTIO_NET_HASH_REPORT_IPv6:
1777 case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1778 rss_hash_type = PKT_HASH_TYPE_L3;
1779 break;
1780 case VIRTIO_NET_HASH_REPORT_NONE:
1781 default:
1782 rss_hash_type = PKT_HASH_TYPE_NONE;
1783 }
1784 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1785}
1786
1787static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1788 void *buf, unsigned int len, void **ctx,
1789 unsigned int *xdp_xmit,
1790 struct virtnet_rq_stats *stats)
1791{
1792 struct net_device *dev = vi->dev;
1793 struct sk_buff *skb;
1794 struct virtio_net_common_hdr *hdr;
1795
1796 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1797 pr_debug("%s: short packet %i\n", dev->name, len);
1798 DEV_STATS_INC(dev, rx_length_errors);
1799 virtnet_rq_free_buf(vi, rq, buf);
1800 return;
1801 }
1802
1803 if (vi->mergeable_rx_bufs)
1804 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1805 stats);
1806 else if (vi->big_packets)
1807 skb = receive_big(dev, vi, rq, buf, len, stats);
1808 else
1809 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1810
1811 if (unlikely(!skb))
1812 return;
1813
1814 hdr = skb_vnet_common_hdr(skb);
1815 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1816 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1817
1818 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1819 skb->ip_summed = CHECKSUM_UNNECESSARY;
1820
1821 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1822 virtio_is_little_endian(vi->vdev))) {
1823 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1824 dev->name, hdr->hdr.gso_type,
1825 hdr->hdr.gso_size);
1826 goto frame_err;
1827 }
1828
1829 skb_record_rx_queue(skb, vq2rxq(rq->vq));
1830 skb->protocol = eth_type_trans(skb, dev);
1831 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1832 ntohs(skb->protocol), skb->len, skb->pkt_type);
1833
1834 napi_gro_receive(&rq->napi, skb);
1835 return;
1836
1837frame_err:
1838 DEV_STATS_INC(dev, rx_frame_errors);
1839 dev_kfree_skb(skb);
1840}
1841
1842/* Unlike mergeable buffers, all buffers are allocated to the
1843 * same size, except for the headroom. For this reason we do
1844 * not need to use mergeable_len_to_ctx here - it is enough
1845 * to store the headroom as the context ignoring the truesize.
1846 */
1847static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1848 gfp_t gfp)
1849{
1850 char *buf;
1851 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1852 void *ctx = (void *)(unsigned long)xdp_headroom;
1853 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1854 int err;
1855
1856 len = SKB_DATA_ALIGN(len) +
1857 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1858
1859 buf = virtnet_rq_alloc(rq, len, gfp);
1860 if (unlikely(!buf))
1861 return -ENOMEM;
1862
1863 virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1864 vi->hdr_len + GOOD_PACKET_LEN);
1865
1866 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1867 if (err < 0) {
1868 if (rq->do_dma)
1869 virtnet_rq_unmap(rq, buf, 0);
1870 put_page(virt_to_head_page(buf));
1871 }
1872
1873 return err;
1874}
1875
1876static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1877 gfp_t gfp)
1878{
1879 struct page *first, *list = NULL;
1880 char *p;
1881 int i, err, offset;
1882
1883 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1884
1885 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1886 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1887 first = get_a_page(rq, gfp);
1888 if (!first) {
1889 if (list)
1890 give_pages(rq, list);
1891 return -ENOMEM;
1892 }
1893 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1894
1895 /* chain new page in list head to match sg */
1896 first->private = (unsigned long)list;
1897 list = first;
1898 }
1899
1900 first = get_a_page(rq, gfp);
1901 if (!first) {
1902 give_pages(rq, list);
1903 return -ENOMEM;
1904 }
1905 p = page_address(first);
1906
1907 /* rq->sg[0], rq->sg[1] share the same page */
1908 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1909 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1910
1911 /* rq->sg[1] for data packet, from offset */
1912 offset = sizeof(struct padded_vnet_hdr);
1913 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1914
1915 /* chain first in list head */
1916 first->private = (unsigned long)list;
1917 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1918 first, gfp);
1919 if (err < 0)
1920 give_pages(rq, first);
1921
1922 return err;
1923}
1924
1925static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1926 struct ewma_pkt_len *avg_pkt_len,
1927 unsigned int room)
1928{
1929 struct virtnet_info *vi = rq->vq->vdev->priv;
1930 const size_t hdr_len = vi->hdr_len;
1931 unsigned int len;
1932
1933 if (room)
1934 return PAGE_SIZE - room;
1935
1936 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1937 rq->min_buf_len, PAGE_SIZE - hdr_len);
1938
1939 return ALIGN(len, L1_CACHE_BYTES);
1940}
1941
1942static int add_recvbuf_mergeable(struct virtnet_info *vi,
1943 struct receive_queue *rq, gfp_t gfp)
1944{
1945 struct page_frag *alloc_frag = &rq->alloc_frag;
1946 unsigned int headroom = virtnet_get_headroom(vi);
1947 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1948 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1949 unsigned int len, hole;
1950 void *ctx;
1951 char *buf;
1952 int err;
1953
1954 /* Extra tailroom is needed to satisfy XDP's assumption. This
1955 * means rx frags coalescing won't work, but consider we've
1956 * disabled GSO for XDP, it won't be a big issue.
1957 */
1958 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1959
1960 buf = virtnet_rq_alloc(rq, len + room, gfp);
1961 if (unlikely(!buf))
1962 return -ENOMEM;
1963
1964 buf += headroom; /* advance address leaving hole at front of pkt */
1965 hole = alloc_frag->size - alloc_frag->offset;
1966 if (hole < len + room) {
1967 /* To avoid internal fragmentation, if there is very likely not
1968 * enough space for another buffer, add the remaining space to
1969 * the current buffer.
1970 * XDP core assumes that frame_size of xdp_buff and the length
1971 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1972 */
1973 if (!headroom)
1974 len += hole;
1975 alloc_frag->offset += hole;
1976 }
1977
1978 virtnet_rq_init_one_sg(rq, buf, len);
1979
1980 ctx = mergeable_len_to_ctx(len + room, headroom);
1981 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1982 if (err < 0) {
1983 if (rq->do_dma)
1984 virtnet_rq_unmap(rq, buf, 0);
1985 put_page(virt_to_head_page(buf));
1986 }
1987
1988 return err;
1989}
1990
1991/*
1992 * Returns false if we couldn't fill entirely (OOM).
1993 *
1994 * Normally run in the receive path, but can also be run from ndo_open
1995 * before we're receiving packets, or from refill_work which is
1996 * careful to disable receiving (using napi_disable).
1997 */
1998static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1999 gfp_t gfp)
2000{
2001 int err;
2002 bool oom;
2003
2004 do {
2005 if (vi->mergeable_rx_bufs)
2006 err = add_recvbuf_mergeable(vi, rq, gfp);
2007 else if (vi->big_packets)
2008 err = add_recvbuf_big(vi, rq, gfp);
2009 else
2010 err = add_recvbuf_small(vi, rq, gfp);
2011
2012 oom = err == -ENOMEM;
2013 if (err)
2014 break;
2015 } while (rq->vq->num_free);
2016 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2017 unsigned long flags;
2018
2019 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2020 u64_stats_inc(&rq->stats.kicks);
2021 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2022 }
2023
2024 return !oom;
2025}
2026
2027static void skb_recv_done(struct virtqueue *rvq)
2028{
2029 struct virtnet_info *vi = rvq->vdev->priv;
2030 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2031
2032 rq->calls++;
2033 virtqueue_napi_schedule(&rq->napi, rvq);
2034}
2035
2036static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2037{
2038 napi_enable(napi);
2039
2040 /* If all buffers were filled by other side before we napi_enabled, we
2041 * won't get another interrupt, so process any outstanding packets now.
2042 * Call local_bh_enable after to trigger softIRQ processing.
2043 */
2044 local_bh_disable();
2045 virtqueue_napi_schedule(napi, vq);
2046 local_bh_enable();
2047}
2048
2049static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2050 struct virtqueue *vq,
2051 struct napi_struct *napi)
2052{
2053 if (!napi->weight)
2054 return;
2055
2056 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2057 * enable the feature if this is likely affine with the transmit path.
2058 */
2059 if (!vi->affinity_hint_set) {
2060 napi->weight = 0;
2061 return;
2062 }
2063
2064 return virtnet_napi_enable(vq, napi);
2065}
2066
2067static void virtnet_napi_tx_disable(struct napi_struct *napi)
2068{
2069 if (napi->weight)
2070 napi_disable(napi);
2071}
2072
2073static void refill_work(struct work_struct *work)
2074{
2075 struct virtnet_info *vi =
2076 container_of(work, struct virtnet_info, refill.work);
2077 bool still_empty;
2078 int i;
2079
2080 for (i = 0; i < vi->curr_queue_pairs; i++) {
2081 struct receive_queue *rq = &vi->rq[i];
2082
2083 napi_disable(&rq->napi);
2084 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2085 virtnet_napi_enable(rq->vq, &rq->napi);
2086
2087 /* In theory, this can happen: if we don't get any buffers in
2088 * we will *never* try to fill again.
2089 */
2090 if (still_empty)
2091 schedule_delayed_work(&vi->refill, HZ/2);
2092 }
2093}
2094
2095static int virtnet_receive(struct receive_queue *rq, int budget,
2096 unsigned int *xdp_xmit)
2097{
2098 struct virtnet_info *vi = rq->vq->vdev->priv;
2099 struct virtnet_rq_stats stats = {};
2100 unsigned int len;
2101 int packets = 0;
2102 void *buf;
2103 int i;
2104
2105 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2106 void *ctx;
2107
2108 while (packets < budget &&
2109 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2110 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2111 packets++;
2112 }
2113 } else {
2114 while (packets < budget &&
2115 (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2116 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2117 packets++;
2118 }
2119 }
2120
2121 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2122 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2123 spin_lock(&vi->refill_lock);
2124 if (vi->refill_enabled)
2125 schedule_delayed_work(&vi->refill, 0);
2126 spin_unlock(&vi->refill_lock);
2127 }
2128 }
2129
2130 u64_stats_set(&stats.packets, packets);
2131 u64_stats_update_begin(&rq->stats.syncp);
2132 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2133 size_t offset = virtnet_rq_stats_desc[i].offset;
2134 u64_stats_t *item, *src;
2135
2136 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2137 src = (u64_stats_t *)((u8 *)&stats + offset);
2138 u64_stats_add(item, u64_stats_read(src));
2139 }
2140 u64_stats_update_end(&rq->stats.syncp);
2141
2142 return packets;
2143}
2144
2145static void virtnet_poll_cleantx(struct receive_queue *rq)
2146{
2147 struct virtnet_info *vi = rq->vq->vdev->priv;
2148 unsigned int index = vq2rxq(rq->vq);
2149 struct send_queue *sq = &vi->sq[index];
2150 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2151
2152 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2153 return;
2154
2155 if (__netif_tx_trylock(txq)) {
2156 if (sq->reset) {
2157 __netif_tx_unlock(txq);
2158 return;
2159 }
2160
2161 do {
2162 virtqueue_disable_cb(sq->vq);
2163 free_old_xmit_skbs(sq, true);
2164 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2165
2166 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2167 netif_tx_wake_queue(txq);
2168
2169 __netif_tx_unlock(txq);
2170 }
2171}
2172
2173static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2174{
2175 struct dim_sample cur_sample = {};
2176
2177 if (!rq->packets_in_napi)
2178 return;
2179
2180 u64_stats_update_begin(&rq->stats.syncp);
2181 dim_update_sample(rq->calls,
2182 u64_stats_read(&rq->stats.packets),
2183 u64_stats_read(&rq->stats.bytes),
2184 &cur_sample);
2185 u64_stats_update_end(&rq->stats.syncp);
2186
2187 net_dim(&rq->dim, cur_sample);
2188 rq->packets_in_napi = 0;
2189}
2190
2191static int virtnet_poll(struct napi_struct *napi, int budget)
2192{
2193 struct receive_queue *rq =
2194 container_of(napi, struct receive_queue, napi);
2195 struct virtnet_info *vi = rq->vq->vdev->priv;
2196 struct send_queue *sq;
2197 unsigned int received;
2198 unsigned int xdp_xmit = 0;
2199 bool napi_complete;
2200
2201 virtnet_poll_cleantx(rq);
2202
2203 received = virtnet_receive(rq, budget, &xdp_xmit);
2204 rq->packets_in_napi += received;
2205
2206 if (xdp_xmit & VIRTIO_XDP_REDIR)
2207 xdp_do_flush();
2208
2209 /* Out of packets? */
2210 if (received < budget) {
2211 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2212 if (napi_complete && rq->dim_enabled)
2213 virtnet_rx_dim_update(vi, rq);
2214 }
2215
2216 if (xdp_xmit & VIRTIO_XDP_TX) {
2217 sq = virtnet_xdp_get_sq(vi);
2218 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2219 u64_stats_update_begin(&sq->stats.syncp);
2220 u64_stats_inc(&sq->stats.kicks);
2221 u64_stats_update_end(&sq->stats.syncp);
2222 }
2223 virtnet_xdp_put_sq(vi, sq);
2224 }
2225
2226 return received;
2227}
2228
2229static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2230{
2231 virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2232 napi_disable(&vi->rq[qp_index].napi);
2233 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2234}
2235
2236static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2237{
2238 struct net_device *dev = vi->dev;
2239 int err;
2240
2241 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2242 vi->rq[qp_index].napi.napi_id);
2243 if (err < 0)
2244 return err;
2245
2246 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2247 MEM_TYPE_PAGE_SHARED, NULL);
2248 if (err < 0)
2249 goto err_xdp_reg_mem_model;
2250
2251 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2252 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2253
2254 return 0;
2255
2256err_xdp_reg_mem_model:
2257 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2258 return err;
2259}
2260
2261static int virtnet_open(struct net_device *dev)
2262{
2263 struct virtnet_info *vi = netdev_priv(dev);
2264 int i, err;
2265
2266 enable_delayed_refill(vi);
2267
2268 for (i = 0; i < vi->max_queue_pairs; i++) {
2269 if (i < vi->curr_queue_pairs)
2270 /* Make sure we have some buffers: if oom use wq. */
2271 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2272 schedule_delayed_work(&vi->refill, 0);
2273
2274 err = virtnet_enable_queue_pair(vi, i);
2275 if (err < 0)
2276 goto err_enable_qp;
2277 }
2278
2279 return 0;
2280
2281err_enable_qp:
2282 disable_delayed_refill(vi);
2283 cancel_delayed_work_sync(&vi->refill);
2284
2285 for (i--; i >= 0; i--) {
2286 virtnet_disable_queue_pair(vi, i);
2287 cancel_work_sync(&vi->rq[i].dim.work);
2288 }
2289
2290 return err;
2291}
2292
2293static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2294{
2295 struct send_queue *sq = container_of(napi, struct send_queue, napi);
2296 struct virtnet_info *vi = sq->vq->vdev->priv;
2297 unsigned int index = vq2txq(sq->vq);
2298 struct netdev_queue *txq;
2299 int opaque;
2300 bool done;
2301
2302 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2303 /* We don't need to enable cb for XDP */
2304 napi_complete_done(napi, 0);
2305 return 0;
2306 }
2307
2308 txq = netdev_get_tx_queue(vi->dev, index);
2309 __netif_tx_lock(txq, raw_smp_processor_id());
2310 virtqueue_disable_cb(sq->vq);
2311 free_old_xmit_skbs(sq, true);
2312
2313 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2314 netif_tx_wake_queue(txq);
2315
2316 opaque = virtqueue_enable_cb_prepare(sq->vq);
2317
2318 done = napi_complete_done(napi, 0);
2319
2320 if (!done)
2321 virtqueue_disable_cb(sq->vq);
2322
2323 __netif_tx_unlock(txq);
2324
2325 if (done) {
2326 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2327 if (napi_schedule_prep(napi)) {
2328 __netif_tx_lock(txq, raw_smp_processor_id());
2329 virtqueue_disable_cb(sq->vq);
2330 __netif_tx_unlock(txq);
2331 __napi_schedule(napi);
2332 }
2333 }
2334 }
2335
2336 return 0;
2337}
2338
2339static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2340{
2341 struct virtio_net_hdr_mrg_rxbuf *hdr;
2342 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2343 struct virtnet_info *vi = sq->vq->vdev->priv;
2344 int num_sg;
2345 unsigned hdr_len = vi->hdr_len;
2346 bool can_push;
2347
2348 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2349
2350 can_push = vi->any_header_sg &&
2351 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2352 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2353 /* Even if we can, don't push here yet as this would skew
2354 * csum_start offset below. */
2355 if (can_push)
2356 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2357 else
2358 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2359
2360 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2361 virtio_is_little_endian(vi->vdev), false,
2362 0))
2363 return -EPROTO;
2364
2365 if (vi->mergeable_rx_bufs)
2366 hdr->num_buffers = 0;
2367
2368 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2369 if (can_push) {
2370 __skb_push(skb, hdr_len);
2371 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2372 if (unlikely(num_sg < 0))
2373 return num_sg;
2374 /* Pull header back to avoid skew in tx bytes calculations. */
2375 __skb_pull(skb, hdr_len);
2376 } else {
2377 sg_set_buf(sq->sg, hdr, hdr_len);
2378 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2379 if (unlikely(num_sg < 0))
2380 return num_sg;
2381 num_sg++;
2382 }
2383 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2384}
2385
2386static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2387{
2388 struct virtnet_info *vi = netdev_priv(dev);
2389 int qnum = skb_get_queue_mapping(skb);
2390 struct send_queue *sq = &vi->sq[qnum];
2391 int err;
2392 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2393 bool kick = !netdev_xmit_more();
2394 bool use_napi = sq->napi.weight;
2395
2396 /* Free up any pending old buffers before queueing new ones. */
2397 do {
2398 if (use_napi)
2399 virtqueue_disable_cb(sq->vq);
2400
2401 free_old_xmit_skbs(sq, false);
2402
2403 } while (use_napi && kick &&
2404 unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2405
2406 /* timestamp packet in software */
2407 skb_tx_timestamp(skb);
2408
2409 /* Try to transmit */
2410 err = xmit_skb(sq, skb);
2411
2412 /* This should not happen! */
2413 if (unlikely(err)) {
2414 DEV_STATS_INC(dev, tx_fifo_errors);
2415 if (net_ratelimit())
2416 dev_warn(&dev->dev,
2417 "Unexpected TXQ (%d) queue failure: %d\n",
2418 qnum, err);
2419 DEV_STATS_INC(dev, tx_dropped);
2420 dev_kfree_skb_any(skb);
2421 return NETDEV_TX_OK;
2422 }
2423
2424 /* Don't wait up for transmitted skbs to be freed. */
2425 if (!use_napi) {
2426 skb_orphan(skb);
2427 nf_reset_ct(skb);
2428 }
2429
2430 check_sq_full_and_disable(vi, dev, sq);
2431
2432 if (kick || netif_xmit_stopped(txq)) {
2433 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2434 u64_stats_update_begin(&sq->stats.syncp);
2435 u64_stats_inc(&sq->stats.kicks);
2436 u64_stats_update_end(&sq->stats.syncp);
2437 }
2438 }
2439
2440 return NETDEV_TX_OK;
2441}
2442
2443static int virtnet_rx_resize(struct virtnet_info *vi,
2444 struct receive_queue *rq, u32 ring_num)
2445{
2446 bool running = netif_running(vi->dev);
2447 int err, qindex;
2448
2449 qindex = rq - vi->rq;
2450
2451 if (running) {
2452 napi_disable(&rq->napi);
2453 cancel_work_sync(&rq->dim.work);
2454 }
2455
2456 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2457 if (err)
2458 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2459
2460 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2461 schedule_delayed_work(&vi->refill, 0);
2462
2463 if (running)
2464 virtnet_napi_enable(rq->vq, &rq->napi);
2465 return err;
2466}
2467
2468static int virtnet_tx_resize(struct virtnet_info *vi,
2469 struct send_queue *sq, u32 ring_num)
2470{
2471 bool running = netif_running(vi->dev);
2472 struct netdev_queue *txq;
2473 int err, qindex;
2474
2475 qindex = sq - vi->sq;
2476
2477 if (running)
2478 virtnet_napi_tx_disable(&sq->napi);
2479
2480 txq = netdev_get_tx_queue(vi->dev, qindex);
2481
2482 /* 1. wait all ximt complete
2483 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2484 */
2485 __netif_tx_lock_bh(txq);
2486
2487 /* Prevent rx poll from accessing sq. */
2488 sq->reset = true;
2489
2490 /* Prevent the upper layer from trying to send packets. */
2491 netif_stop_subqueue(vi->dev, qindex);
2492
2493 __netif_tx_unlock_bh(txq);
2494
2495 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2496 if (err)
2497 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2498
2499 __netif_tx_lock_bh(txq);
2500 sq->reset = false;
2501 netif_tx_wake_queue(txq);
2502 __netif_tx_unlock_bh(txq);
2503
2504 if (running)
2505 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2506 return err;
2507}
2508
2509/*
2510 * Send command via the control virtqueue and check status. Commands
2511 * supported by the hypervisor, as indicated by feature bits, should
2512 * never fail unless improperly formatted.
2513 */
2514static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2515 struct scatterlist *out)
2516{
2517 struct scatterlist *sgs[4], hdr, stat;
2518 unsigned out_num = 0, tmp;
2519 int ret;
2520
2521 /* Caller should know better */
2522 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2523
2524 vi->ctrl->status = ~0;
2525 vi->ctrl->hdr.class = class;
2526 vi->ctrl->hdr.cmd = cmd;
2527 /* Add header */
2528 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2529 sgs[out_num++] = &hdr;
2530
2531 if (out)
2532 sgs[out_num++] = out;
2533
2534 /* Add return status. */
2535 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2536 sgs[out_num] = &stat;
2537
2538 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2539 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2540 if (ret < 0) {
2541 dev_warn(&vi->vdev->dev,
2542 "Failed to add sgs for command vq: %d\n.", ret);
2543 return false;
2544 }
2545
2546 if (unlikely(!virtqueue_kick(vi->cvq)))
2547 return vi->ctrl->status == VIRTIO_NET_OK;
2548
2549 /* Spin for a response, the kick causes an ioport write, trapping
2550 * into the hypervisor, so the request should be handled immediately.
2551 */
2552 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553 !virtqueue_is_broken(vi->cvq))
2554 cpu_relax();
2555
2556 return vi->ctrl->status == VIRTIO_NET_OK;
2557}
2558
2559static int virtnet_set_mac_address(struct net_device *dev, void *p)
2560{
2561 struct virtnet_info *vi = netdev_priv(dev);
2562 struct virtio_device *vdev = vi->vdev;
2563 int ret;
2564 struct sockaddr *addr;
2565 struct scatterlist sg;
2566
2567 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2568 return -EOPNOTSUPP;
2569
2570 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2571 if (!addr)
2572 return -ENOMEM;
2573
2574 ret = eth_prepare_mac_addr_change(dev, addr);
2575 if (ret)
2576 goto out;
2577
2578 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2579 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2580 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2581 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2582 dev_warn(&vdev->dev,
2583 "Failed to set mac address by vq command.\n");
2584 ret = -EINVAL;
2585 goto out;
2586 }
2587 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2588 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589 unsigned int i;
2590
2591 /* Naturally, this has an atomicity problem. */
2592 for (i = 0; i < dev->addr_len; i++)
2593 virtio_cwrite8(vdev,
2594 offsetof(struct virtio_net_config, mac) +
2595 i, addr->sa_data[i]);
2596 }
2597
2598 eth_commit_mac_addr_change(dev, p);
2599 ret = 0;
2600
2601out:
2602 kfree(addr);
2603 return ret;
2604}
2605
2606static void virtnet_stats(struct net_device *dev,
2607 struct rtnl_link_stats64 *tot)
2608{
2609 struct virtnet_info *vi = netdev_priv(dev);
2610 unsigned int start;
2611 int i;
2612
2613 for (i = 0; i < vi->max_queue_pairs; i++) {
2614 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2615 struct receive_queue *rq = &vi->rq[i];
2616 struct send_queue *sq = &vi->sq[i];
2617
2618 do {
2619 start = u64_stats_fetch_begin(&sq->stats.syncp);
2620 tpackets = u64_stats_read(&sq->stats.packets);
2621 tbytes = u64_stats_read(&sq->stats.bytes);
2622 terrors = u64_stats_read(&sq->stats.tx_timeouts);
2623 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2624
2625 do {
2626 start = u64_stats_fetch_begin(&rq->stats.syncp);
2627 rpackets = u64_stats_read(&rq->stats.packets);
2628 rbytes = u64_stats_read(&rq->stats.bytes);
2629 rdrops = u64_stats_read(&rq->stats.drops);
2630 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2631
2632 tot->rx_packets += rpackets;
2633 tot->tx_packets += tpackets;
2634 tot->rx_bytes += rbytes;
2635 tot->tx_bytes += tbytes;
2636 tot->rx_dropped += rdrops;
2637 tot->tx_errors += terrors;
2638 }
2639
2640 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2641 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2642 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2643 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
2644}
2645
2646static void virtnet_ack_link_announce(struct virtnet_info *vi)
2647{
2648 rtnl_lock();
2649 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2650 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2651 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2652 rtnl_unlock();
2653}
2654
2655static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2656{
2657 struct scatterlist sg;
2658 struct net_device *dev = vi->dev;
2659
2660 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2661 return 0;
2662
2663 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2664 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2665
2666 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2667 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2668 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2669 queue_pairs);
2670 return -EINVAL;
2671 } else {
2672 vi->curr_queue_pairs = queue_pairs;
2673 /* virtnet_open() will refill when device is going to up. */
2674 if (dev->flags & IFF_UP)
2675 schedule_delayed_work(&vi->refill, 0);
2676 }
2677
2678 return 0;
2679}
2680
2681static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2682{
2683 int err;
2684
2685 rtnl_lock();
2686 err = _virtnet_set_queues(vi, queue_pairs);
2687 rtnl_unlock();
2688 return err;
2689}
2690
2691static int virtnet_close(struct net_device *dev)
2692{
2693 struct virtnet_info *vi = netdev_priv(dev);
2694 int i;
2695
2696 /* Make sure NAPI doesn't schedule refill work */
2697 disable_delayed_refill(vi);
2698 /* Make sure refill_work doesn't re-enable napi! */
2699 cancel_delayed_work_sync(&vi->refill);
2700
2701 for (i = 0; i < vi->max_queue_pairs; i++) {
2702 virtnet_disable_queue_pair(vi, i);
2703 cancel_work_sync(&vi->rq[i].dim.work);
2704 }
2705
2706 return 0;
2707}
2708
2709static void virtnet_set_rx_mode(struct net_device *dev)
2710{
2711 struct virtnet_info *vi = netdev_priv(dev);
2712 struct scatterlist sg[2];
2713 struct virtio_net_ctrl_mac *mac_data;
2714 struct netdev_hw_addr *ha;
2715 int uc_count;
2716 int mc_count;
2717 void *buf;
2718 int i;
2719
2720 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2721 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2722 return;
2723
2724 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2725 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2726
2727 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2728
2729 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2730 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2731 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2732 vi->ctrl->promisc ? "en" : "dis");
2733
2734 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2735
2736 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2737 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2738 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2739 vi->ctrl->allmulti ? "en" : "dis");
2740
2741 uc_count = netdev_uc_count(dev);
2742 mc_count = netdev_mc_count(dev);
2743 /* MAC filter - use one buffer for both lists */
2744 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2745 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2746 mac_data = buf;
2747 if (!buf)
2748 return;
2749
2750 sg_init_table(sg, 2);
2751
2752 /* Store the unicast list and count in the front of the buffer */
2753 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2754 i = 0;
2755 netdev_for_each_uc_addr(ha, dev)
2756 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2757
2758 sg_set_buf(&sg[0], mac_data,
2759 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2760
2761 /* multicast list and count fill the end */
2762 mac_data = (void *)&mac_data->macs[uc_count][0];
2763
2764 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2765 i = 0;
2766 netdev_for_each_mc_addr(ha, dev)
2767 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2768
2769 sg_set_buf(&sg[1], mac_data,
2770 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2771
2772 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2773 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2774 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2775
2776 kfree(buf);
2777}
2778
2779static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2780 __be16 proto, u16 vid)
2781{
2782 struct virtnet_info *vi = netdev_priv(dev);
2783 struct scatterlist sg;
2784
2785 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2786 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2787
2788 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2789 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2790 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2791 return 0;
2792}
2793
2794static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2795 __be16 proto, u16 vid)
2796{
2797 struct virtnet_info *vi = netdev_priv(dev);
2798 struct scatterlist sg;
2799
2800 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2801 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2802
2803 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2804 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2805 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2806 return 0;
2807}
2808
2809static void virtnet_clean_affinity(struct virtnet_info *vi)
2810{
2811 int i;
2812
2813 if (vi->affinity_hint_set) {
2814 for (i = 0; i < vi->max_queue_pairs; i++) {
2815 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2816 virtqueue_set_affinity(vi->sq[i].vq, NULL);
2817 }
2818
2819 vi->affinity_hint_set = false;
2820 }
2821}
2822
2823static void virtnet_set_affinity(struct virtnet_info *vi)
2824{
2825 cpumask_var_t mask;
2826 int stragglers;
2827 int group_size;
2828 int i, j, cpu;
2829 int num_cpu;
2830 int stride;
2831
2832 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2833 virtnet_clean_affinity(vi);
2834 return;
2835 }
2836
2837 num_cpu = num_online_cpus();
2838 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2839 stragglers = num_cpu >= vi->curr_queue_pairs ?
2840 num_cpu % vi->curr_queue_pairs :
2841 0;
2842 cpu = cpumask_first(cpu_online_mask);
2843
2844 for (i = 0; i < vi->curr_queue_pairs; i++) {
2845 group_size = stride + (i < stragglers ? 1 : 0);
2846
2847 for (j = 0; j < group_size; j++) {
2848 cpumask_set_cpu(cpu, mask);
2849 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2850 nr_cpu_ids, false);
2851 }
2852 virtqueue_set_affinity(vi->rq[i].vq, mask);
2853 virtqueue_set_affinity(vi->sq[i].vq, mask);
2854 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2855 cpumask_clear(mask);
2856 }
2857
2858 vi->affinity_hint_set = true;
2859 free_cpumask_var(mask);
2860}
2861
2862static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2863{
2864 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2865 node);
2866 virtnet_set_affinity(vi);
2867 return 0;
2868}
2869
2870static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2871{
2872 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2873 node_dead);
2874 virtnet_set_affinity(vi);
2875 return 0;
2876}
2877
2878static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2879{
2880 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2881 node);
2882
2883 virtnet_clean_affinity(vi);
2884 return 0;
2885}
2886
2887static enum cpuhp_state virtionet_online;
2888
2889static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2890{
2891 int ret;
2892
2893 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2894 if (ret)
2895 return ret;
2896 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2897 &vi->node_dead);
2898 if (!ret)
2899 return ret;
2900 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2901 return ret;
2902}
2903
2904static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2905{
2906 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2907 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2908 &vi->node_dead);
2909}
2910
2911static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2912 u16 vqn, u32 max_usecs, u32 max_packets)
2913{
2914 struct scatterlist sgs;
2915
2916 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
2917 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
2918 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
2919 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
2920
2921 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2922 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
2923 &sgs))
2924 return -EINVAL;
2925
2926 return 0;
2927}
2928
2929static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2930 u16 queue, u32 max_usecs,
2931 u32 max_packets)
2932{
2933 int err;
2934
2935 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
2936 max_usecs, max_packets);
2937 if (err)
2938 return err;
2939
2940 vi->rq[queue].intr_coal.max_usecs = max_usecs;
2941 vi->rq[queue].intr_coal.max_packets = max_packets;
2942
2943 return 0;
2944}
2945
2946static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2947 u16 queue, u32 max_usecs,
2948 u32 max_packets)
2949{
2950 int err;
2951
2952 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
2953 max_usecs, max_packets);
2954 if (err)
2955 return err;
2956
2957 vi->sq[queue].intr_coal.max_usecs = max_usecs;
2958 vi->sq[queue].intr_coal.max_packets = max_packets;
2959
2960 return 0;
2961}
2962
2963static void virtnet_get_ringparam(struct net_device *dev,
2964 struct ethtool_ringparam *ring,
2965 struct kernel_ethtool_ringparam *kernel_ring,
2966 struct netlink_ext_ack *extack)
2967{
2968 struct virtnet_info *vi = netdev_priv(dev);
2969
2970 ring->rx_max_pending = vi->rq[0].vq->num_max;
2971 ring->tx_max_pending = vi->sq[0].vq->num_max;
2972 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2973 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2974}
2975
2976static int virtnet_set_ringparam(struct net_device *dev,
2977 struct ethtool_ringparam *ring,
2978 struct kernel_ethtool_ringparam *kernel_ring,
2979 struct netlink_ext_ack *extack)
2980{
2981 struct virtnet_info *vi = netdev_priv(dev);
2982 u32 rx_pending, tx_pending;
2983 struct receive_queue *rq;
2984 struct send_queue *sq;
2985 int i, err;
2986
2987 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2988 return -EINVAL;
2989
2990 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2991 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2992
2993 if (ring->rx_pending == rx_pending &&
2994 ring->tx_pending == tx_pending)
2995 return 0;
2996
2997 if (ring->rx_pending > vi->rq[0].vq->num_max)
2998 return -EINVAL;
2999
3000 if (ring->tx_pending > vi->sq[0].vq->num_max)
3001 return -EINVAL;
3002
3003 for (i = 0; i < vi->max_queue_pairs; i++) {
3004 rq = vi->rq + i;
3005 sq = vi->sq + i;
3006
3007 if (ring->tx_pending != tx_pending) {
3008 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3009 if (err)
3010 return err;
3011
3012 /* Upon disabling and re-enabling a transmit virtqueue, the device must
3013 * set the coalescing parameters of the virtqueue to those configured
3014 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3015 * did not set any TX coalescing parameters, to 0.
3016 */
3017 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3018 vi->intr_coal_tx.max_usecs,
3019 vi->intr_coal_tx.max_packets);
3020 if (err)
3021 return err;
3022 }
3023
3024 if (ring->rx_pending != rx_pending) {
3025 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3026 if (err)
3027 return err;
3028
3029 /* The reason is same as the transmit virtqueue reset */
3030 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3031 vi->intr_coal_rx.max_usecs,
3032 vi->intr_coal_rx.max_packets);
3033 if (err)
3034 return err;
3035 }
3036 }
3037
3038 return 0;
3039}
3040
3041static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3042{
3043 struct net_device *dev = vi->dev;
3044 struct scatterlist sgs[4];
3045 unsigned int sg_buf_size;
3046
3047 /* prepare sgs */
3048 sg_init_table(sgs, 4);
3049
3050 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3051 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
3052
3053 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
3054 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
3055
3056 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3057 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3058 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
3059
3060 sg_buf_size = vi->rss_key_size;
3061 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
3062
3063 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3064 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3065 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
3066 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3067 return false;
3068 }
3069 return true;
3070}
3071
3072static void virtnet_init_default_rss(struct virtnet_info *vi)
3073{
3074 u32 indir_val = 0;
3075 int i = 0;
3076
3077 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3078 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3079 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3080 ? vi->rss_indir_table_size - 1 : 0;
3081 vi->ctrl->rss.unclassified_queue = 0;
3082
3083 for (; i < vi->rss_indir_table_size; ++i) {
3084 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3085 vi->ctrl->rss.indirection_table[i] = indir_val;
3086 }
3087
3088 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3089 vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3090
3091 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3092}
3093
3094static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3095{
3096 info->data = 0;
3097 switch (info->flow_type) {
3098 case TCP_V4_FLOW:
3099 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3100 info->data = RXH_IP_SRC | RXH_IP_DST |
3101 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3102 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3103 info->data = RXH_IP_SRC | RXH_IP_DST;
3104 }
3105 break;
3106 case TCP_V6_FLOW:
3107 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3108 info->data = RXH_IP_SRC | RXH_IP_DST |
3109 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3110 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3111 info->data = RXH_IP_SRC | RXH_IP_DST;
3112 }
3113 break;
3114 case UDP_V4_FLOW:
3115 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3116 info->data = RXH_IP_SRC | RXH_IP_DST |
3117 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3118 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3119 info->data = RXH_IP_SRC | RXH_IP_DST;
3120 }
3121 break;
3122 case UDP_V6_FLOW:
3123 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3124 info->data = RXH_IP_SRC | RXH_IP_DST |
3125 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3126 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3127 info->data = RXH_IP_SRC | RXH_IP_DST;
3128 }
3129 break;
3130 case IPV4_FLOW:
3131 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3132 info->data = RXH_IP_SRC | RXH_IP_DST;
3133
3134 break;
3135 case IPV6_FLOW:
3136 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3137 info->data = RXH_IP_SRC | RXH_IP_DST;
3138
3139 break;
3140 default:
3141 info->data = 0;
3142 break;
3143 }
3144}
3145
3146static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3147{
3148 u32 new_hashtypes = vi->rss_hash_types_saved;
3149 bool is_disable = info->data & RXH_DISCARD;
3150 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3151
3152 /* supports only 'sd', 'sdfn' and 'r' */
3153 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3154 return false;
3155
3156 switch (info->flow_type) {
3157 case TCP_V4_FLOW:
3158 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3159 if (!is_disable)
3160 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3161 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3162 break;
3163 case UDP_V4_FLOW:
3164 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3165 if (!is_disable)
3166 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3167 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3168 break;
3169 case IPV4_FLOW:
3170 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3171 if (!is_disable)
3172 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3173 break;
3174 case TCP_V6_FLOW:
3175 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3176 if (!is_disable)
3177 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3178 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3179 break;
3180 case UDP_V6_FLOW:
3181 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3182 if (!is_disable)
3183 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3184 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3185 break;
3186 case IPV6_FLOW:
3187 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3188 if (!is_disable)
3189 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3190 break;
3191 default:
3192 /* unsupported flow */
3193 return false;
3194 }
3195
3196 /* if unsupported hashtype was set */
3197 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3198 return false;
3199
3200 if (new_hashtypes != vi->rss_hash_types_saved) {
3201 vi->rss_hash_types_saved = new_hashtypes;
3202 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3203 if (vi->dev->features & NETIF_F_RXHASH)
3204 return virtnet_commit_rss_command(vi);
3205 }
3206
3207 return true;
3208}
3209
3210static void virtnet_get_drvinfo(struct net_device *dev,
3211 struct ethtool_drvinfo *info)
3212{
3213 struct virtnet_info *vi = netdev_priv(dev);
3214 struct virtio_device *vdev = vi->vdev;
3215
3216 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3217 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3218 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3219
3220}
3221
3222/* TODO: Eliminate OOO packets during switching */
3223static int virtnet_set_channels(struct net_device *dev,
3224 struct ethtool_channels *channels)
3225{
3226 struct virtnet_info *vi = netdev_priv(dev);
3227 u16 queue_pairs = channels->combined_count;
3228 int err;
3229
3230 /* We don't support separate rx/tx channels.
3231 * We don't allow setting 'other' channels.
3232 */
3233 if (channels->rx_count || channels->tx_count || channels->other_count)
3234 return -EINVAL;
3235
3236 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3237 return -EINVAL;
3238
3239 /* For now we don't support modifying channels while XDP is loaded
3240 * also when XDP is loaded all RX queues have XDP programs so we only
3241 * need to check a single RX queue.
3242 */
3243 if (vi->rq[0].xdp_prog)
3244 return -EINVAL;
3245
3246 cpus_read_lock();
3247 err = _virtnet_set_queues(vi, queue_pairs);
3248 if (err) {
3249 cpus_read_unlock();
3250 goto err;
3251 }
3252 virtnet_set_affinity(vi);
3253 cpus_read_unlock();
3254
3255 netif_set_real_num_tx_queues(dev, queue_pairs);
3256 netif_set_real_num_rx_queues(dev, queue_pairs);
3257 err:
3258 return err;
3259}
3260
3261static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3262{
3263 struct virtnet_info *vi = netdev_priv(dev);
3264 unsigned int i, j;
3265 u8 *p = data;
3266
3267 switch (stringset) {
3268 case ETH_SS_STATS:
3269 for (i = 0; i < vi->curr_queue_pairs; i++) {
3270 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3271 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3272 virtnet_rq_stats_desc[j].desc);
3273 }
3274
3275 for (i = 0; i < vi->curr_queue_pairs; i++) {
3276 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3277 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3278 virtnet_sq_stats_desc[j].desc);
3279 }
3280 break;
3281 }
3282}
3283
3284static int virtnet_get_sset_count(struct net_device *dev, int sset)
3285{
3286 struct virtnet_info *vi = netdev_priv(dev);
3287
3288 switch (sset) {
3289 case ETH_SS_STATS:
3290 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3291 VIRTNET_SQ_STATS_LEN);
3292 default:
3293 return -EOPNOTSUPP;
3294 }
3295}
3296
3297static void virtnet_get_ethtool_stats(struct net_device *dev,
3298 struct ethtool_stats *stats, u64 *data)
3299{
3300 struct virtnet_info *vi = netdev_priv(dev);
3301 unsigned int idx = 0, start, i, j;
3302 const u8 *stats_base;
3303 const u64_stats_t *p;
3304 size_t offset;
3305
3306 for (i = 0; i < vi->curr_queue_pairs; i++) {
3307 struct receive_queue *rq = &vi->rq[i];
3308
3309 stats_base = (const u8 *)&rq->stats;
3310 do {
3311 start = u64_stats_fetch_begin(&rq->stats.syncp);
3312 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3313 offset = virtnet_rq_stats_desc[j].offset;
3314 p = (const u64_stats_t *)(stats_base + offset);
3315 data[idx + j] = u64_stats_read(p);
3316 }
3317 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3318 idx += VIRTNET_RQ_STATS_LEN;
3319 }
3320
3321 for (i = 0; i < vi->curr_queue_pairs; i++) {
3322 struct send_queue *sq = &vi->sq[i];
3323
3324 stats_base = (const u8 *)&sq->stats;
3325 do {
3326 start = u64_stats_fetch_begin(&sq->stats.syncp);
3327 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3328 offset = virtnet_sq_stats_desc[j].offset;
3329 p = (const u64_stats_t *)(stats_base + offset);
3330 data[idx + j] = u64_stats_read(p);
3331 }
3332 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3333 idx += VIRTNET_SQ_STATS_LEN;
3334 }
3335}
3336
3337static void virtnet_get_channels(struct net_device *dev,
3338 struct ethtool_channels *channels)
3339{
3340 struct virtnet_info *vi = netdev_priv(dev);
3341
3342 channels->combined_count = vi->curr_queue_pairs;
3343 channels->max_combined = vi->max_queue_pairs;
3344 channels->max_other = 0;
3345 channels->rx_count = 0;
3346 channels->tx_count = 0;
3347 channels->other_count = 0;
3348}
3349
3350static int virtnet_set_link_ksettings(struct net_device *dev,
3351 const struct ethtool_link_ksettings *cmd)
3352{
3353 struct virtnet_info *vi = netdev_priv(dev);
3354
3355 return ethtool_virtdev_set_link_ksettings(dev, cmd,
3356 &vi->speed, &vi->duplex);
3357}
3358
3359static int virtnet_get_link_ksettings(struct net_device *dev,
3360 struct ethtool_link_ksettings *cmd)
3361{
3362 struct virtnet_info *vi = netdev_priv(dev);
3363
3364 cmd->base.speed = vi->speed;
3365 cmd->base.duplex = vi->duplex;
3366 cmd->base.port = PORT_OTHER;
3367
3368 return 0;
3369}
3370
3371static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
3372 struct ethtool_coalesce *ec)
3373{
3374 struct scatterlist sgs_tx;
3375 int i;
3376
3377 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3378 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3379 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3380
3381 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3382 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3383 &sgs_tx))
3384 return -EINVAL;
3385
3386 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3387 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3388 for (i = 0; i < vi->max_queue_pairs; i++) {
3389 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3390 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3391 }
3392
3393 return 0;
3394}
3395
3396static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
3397 struct ethtool_coalesce *ec)
3398{
3399 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3400 struct scatterlist sgs_rx;
3401 int i;
3402
3403 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3404 return -EOPNOTSUPP;
3405
3406 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3407 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3408 return -EINVAL;
3409
3410 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3411 vi->rx_dim_enabled = true;
3412 for (i = 0; i < vi->max_queue_pairs; i++)
3413 vi->rq[i].dim_enabled = true;
3414 return 0;
3415 }
3416
3417 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3418 vi->rx_dim_enabled = false;
3419 for (i = 0; i < vi->max_queue_pairs; i++)
3420 vi->rq[i].dim_enabled = false;
3421 }
3422
3423 /* Since the per-queue coalescing params can be set,
3424 * we need apply the global new params even if they
3425 * are not updated.
3426 */
3427 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3428 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3429 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3430
3431 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3432 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3433 &sgs_rx))
3434 return -EINVAL;
3435
3436 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3437 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3438 for (i = 0; i < vi->max_queue_pairs; i++) {
3439 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3440 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3441 }
3442
3443 return 0;
3444}
3445
3446static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3447 struct ethtool_coalesce *ec)
3448{
3449 int err;
3450
3451 err = virtnet_send_tx_notf_coal_cmds(vi, ec);
3452 if (err)
3453 return err;
3454
3455 err = virtnet_send_rx_notf_coal_cmds(vi, ec);
3456 if (err)
3457 return err;
3458
3459 return 0;
3460}
3461
3462static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3463 struct ethtool_coalesce *ec,
3464 u16 queue)
3465{
3466 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3467 bool cur_rx_dim = vi->rq[queue].dim_enabled;
3468 u32 max_usecs, max_packets;
3469 int err;
3470
3471 max_usecs = vi->rq[queue].intr_coal.max_usecs;
3472 max_packets = vi->rq[queue].intr_coal.max_packets;
3473
3474 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3475 ec->rx_max_coalesced_frames != max_packets))
3476 return -EINVAL;
3477
3478 if (rx_ctrl_dim_on && !cur_rx_dim) {
3479 vi->rq[queue].dim_enabled = true;
3480 return 0;
3481 }
3482
3483 if (!rx_ctrl_dim_on && cur_rx_dim)
3484 vi->rq[queue].dim_enabled = false;
3485
3486 /* If no params are updated, userspace ethtool will
3487 * reject the modification.
3488 */
3489 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
3490 ec->rx_coalesce_usecs,
3491 ec->rx_max_coalesced_frames);
3492 if (err)
3493 return err;
3494
3495 return 0;
3496}
3497
3498static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3499 struct ethtool_coalesce *ec,
3500 u16 queue)
3501{
3502 int err;
3503
3504 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3505 if (err)
3506 return err;
3507
3508 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
3509 ec->tx_coalesce_usecs,
3510 ec->tx_max_coalesced_frames);
3511 if (err)
3512 return err;
3513
3514 return 0;
3515}
3516
3517static void virtnet_rx_dim_work(struct work_struct *work)
3518{
3519 struct dim *dim = container_of(work, struct dim, work);
3520 struct receive_queue *rq = container_of(dim,
3521 struct receive_queue, dim);
3522 struct virtnet_info *vi = rq->vq->vdev->priv;
3523 struct net_device *dev = vi->dev;
3524 struct dim_cq_moder update_moder;
3525 int i, qnum, err;
3526
3527 if (!rtnl_trylock())
3528 return;
3529
3530 /* Each rxq's work is queued by "net_dim()->schedule_work()"
3531 * in response to NAPI traffic changes. Note that dim->profile_ix
3532 * for each rxq is updated prior to the queuing action.
3533 * So we only need to traverse and update profiles for all rxqs
3534 * in the work which is holding rtnl_lock.
3535 */
3536 for (i = 0; i < vi->curr_queue_pairs; i++) {
3537 rq = &vi->rq[i];
3538 dim = &rq->dim;
3539 qnum = rq - vi->rq;
3540
3541 if (!rq->dim_enabled)
3542 continue;
3543
3544 update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3545 if (update_moder.usec != rq->intr_coal.max_usecs ||
3546 update_moder.pkts != rq->intr_coal.max_packets) {
3547 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3548 update_moder.usec,
3549 update_moder.pkts);
3550 if (err)
3551 pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3552 dev->name, qnum);
3553 dim->state = DIM_START_MEASURE;
3554 }
3555 }
3556
3557 rtnl_unlock();
3558}
3559
3560static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3561{
3562 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3563 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
3564 */
3565 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3566 return -EOPNOTSUPP;
3567
3568 if (ec->tx_max_coalesced_frames > 1 ||
3569 ec->rx_max_coalesced_frames != 1)
3570 return -EINVAL;
3571
3572 return 0;
3573}
3574
3575static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3576 int vq_weight, bool *should_update)
3577{
3578 if (weight ^ vq_weight) {
3579 if (dev_flags & IFF_UP)
3580 return -EBUSY;
3581 *should_update = true;
3582 }
3583
3584 return 0;
3585}
3586
3587static int virtnet_set_coalesce(struct net_device *dev,
3588 struct ethtool_coalesce *ec,
3589 struct kernel_ethtool_coalesce *kernel_coal,
3590 struct netlink_ext_ack *extack)
3591{
3592 struct virtnet_info *vi = netdev_priv(dev);
3593 int ret, queue_number, napi_weight;
3594 bool update_napi = false;
3595
3596 /* Can't change NAPI weight if the link is up */
3597 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3598 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3599 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3600 vi->sq[queue_number].napi.weight,
3601 &update_napi);
3602 if (ret)
3603 return ret;
3604
3605 if (update_napi) {
3606 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3607 * updated for the sake of simplicity, which might not be necessary
3608 */
3609 break;
3610 }
3611 }
3612
3613 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3614 ret = virtnet_send_notf_coal_cmds(vi, ec);
3615 else
3616 ret = virtnet_coal_params_supported(ec);
3617
3618 if (ret)
3619 return ret;
3620
3621 if (update_napi) {
3622 for (; queue_number < vi->max_queue_pairs; queue_number++)
3623 vi->sq[queue_number].napi.weight = napi_weight;
3624 }
3625
3626 return ret;
3627}
3628
3629static int virtnet_get_coalesce(struct net_device *dev,
3630 struct ethtool_coalesce *ec,
3631 struct kernel_ethtool_coalesce *kernel_coal,
3632 struct netlink_ext_ack *extack)
3633{
3634 struct virtnet_info *vi = netdev_priv(dev);
3635
3636 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3637 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3638 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3639 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3640 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3641 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
3642 } else {
3643 ec->rx_max_coalesced_frames = 1;
3644
3645 if (vi->sq[0].napi.weight)
3646 ec->tx_max_coalesced_frames = 1;
3647 }
3648
3649 return 0;
3650}
3651
3652static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3653 u32 queue,
3654 struct ethtool_coalesce *ec)
3655{
3656 struct virtnet_info *vi = netdev_priv(dev);
3657 int ret, napi_weight;
3658 bool update_napi = false;
3659
3660 if (queue >= vi->max_queue_pairs)
3661 return -EINVAL;
3662
3663 /* Can't change NAPI weight if the link is up */
3664 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3665 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3666 vi->sq[queue].napi.weight,
3667 &update_napi);
3668 if (ret)
3669 return ret;
3670
3671 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3672 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3673 else
3674 ret = virtnet_coal_params_supported(ec);
3675
3676 if (ret)
3677 return ret;
3678
3679 if (update_napi)
3680 vi->sq[queue].napi.weight = napi_weight;
3681
3682 return 0;
3683}
3684
3685static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3686 u32 queue,
3687 struct ethtool_coalesce *ec)
3688{
3689 struct virtnet_info *vi = netdev_priv(dev);
3690
3691 if (queue >= vi->max_queue_pairs)
3692 return -EINVAL;
3693
3694 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3695 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3696 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3697 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3698 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3699 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
3700 } else {
3701 ec->rx_max_coalesced_frames = 1;
3702
3703 if (vi->sq[queue].napi.weight)
3704 ec->tx_max_coalesced_frames = 1;
3705 }
3706
3707 return 0;
3708}
3709
3710static void virtnet_init_settings(struct net_device *dev)
3711{
3712 struct virtnet_info *vi = netdev_priv(dev);
3713
3714 vi->speed = SPEED_UNKNOWN;
3715 vi->duplex = DUPLEX_UNKNOWN;
3716}
3717
3718static void virtnet_update_settings(struct virtnet_info *vi)
3719{
3720 u32 speed;
3721 u8 duplex;
3722
3723 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3724 return;
3725
3726 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3727
3728 if (ethtool_validate_speed(speed))
3729 vi->speed = speed;
3730
3731 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3732
3733 if (ethtool_validate_duplex(duplex))
3734 vi->duplex = duplex;
3735}
3736
3737static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3738{
3739 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3740}
3741
3742static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3743{
3744 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3745}
3746
3747static int virtnet_get_rxfh(struct net_device *dev,
3748 struct ethtool_rxfh_param *rxfh)
3749{
3750 struct virtnet_info *vi = netdev_priv(dev);
3751 int i;
3752
3753 if (rxfh->indir) {
3754 for (i = 0; i < vi->rss_indir_table_size; ++i)
3755 rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
3756 }
3757
3758 if (rxfh->key)
3759 memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
3760
3761 rxfh->hfunc = ETH_RSS_HASH_TOP;
3762
3763 return 0;
3764}
3765
3766static int virtnet_set_rxfh(struct net_device *dev,
3767 struct ethtool_rxfh_param *rxfh,
3768 struct netlink_ext_ack *extack)
3769{
3770 struct virtnet_info *vi = netdev_priv(dev);
3771 int i;
3772
3773 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3774 rxfh->hfunc != ETH_RSS_HASH_TOP)
3775 return -EOPNOTSUPP;
3776
3777 if (rxfh->indir) {
3778 for (i = 0; i < vi->rss_indir_table_size; ++i)
3779 vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
3780 }
3781 if (rxfh->key)
3782 memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
3783
3784 virtnet_commit_rss_command(vi);
3785
3786 return 0;
3787}
3788
3789static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3790{
3791 struct virtnet_info *vi = netdev_priv(dev);
3792 int rc = 0;
3793
3794 switch (info->cmd) {
3795 case ETHTOOL_GRXRINGS:
3796 info->data = vi->curr_queue_pairs;
3797 break;
3798 case ETHTOOL_GRXFH:
3799 virtnet_get_hashflow(vi, info);
3800 break;
3801 default:
3802 rc = -EOPNOTSUPP;
3803 }
3804
3805 return rc;
3806}
3807
3808static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3809{
3810 struct virtnet_info *vi = netdev_priv(dev);
3811 int rc = 0;
3812
3813 switch (info->cmd) {
3814 case ETHTOOL_SRXFH:
3815 if (!virtnet_set_hashflow(vi, info))
3816 rc = -EINVAL;
3817
3818 break;
3819 default:
3820 rc = -EOPNOTSUPP;
3821 }
3822
3823 return rc;
3824}
3825
3826static const struct ethtool_ops virtnet_ethtool_ops = {
3827 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3828 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
3829 .get_drvinfo = virtnet_get_drvinfo,
3830 .get_link = ethtool_op_get_link,
3831 .get_ringparam = virtnet_get_ringparam,
3832 .set_ringparam = virtnet_set_ringparam,
3833 .get_strings = virtnet_get_strings,
3834 .get_sset_count = virtnet_get_sset_count,
3835 .get_ethtool_stats = virtnet_get_ethtool_stats,
3836 .set_channels = virtnet_set_channels,
3837 .get_channels = virtnet_get_channels,
3838 .get_ts_info = ethtool_op_get_ts_info,
3839 .get_link_ksettings = virtnet_get_link_ksettings,
3840 .set_link_ksettings = virtnet_set_link_ksettings,
3841 .set_coalesce = virtnet_set_coalesce,
3842 .get_coalesce = virtnet_get_coalesce,
3843 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3844 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3845 .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3846 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3847 .get_rxfh = virtnet_get_rxfh,
3848 .set_rxfh = virtnet_set_rxfh,
3849 .get_rxnfc = virtnet_get_rxnfc,
3850 .set_rxnfc = virtnet_set_rxnfc,
3851};
3852
3853static void virtnet_freeze_down(struct virtio_device *vdev)
3854{
3855 struct virtnet_info *vi = vdev->priv;
3856
3857 /* Make sure no work handler is accessing the device */
3858 flush_work(&vi->config_work);
3859
3860 netif_tx_lock_bh(vi->dev);
3861 netif_device_detach(vi->dev);
3862 netif_tx_unlock_bh(vi->dev);
3863 if (netif_running(vi->dev))
3864 virtnet_close(vi->dev);
3865}
3866
3867static int init_vqs(struct virtnet_info *vi);
3868
3869static int virtnet_restore_up(struct virtio_device *vdev)
3870{
3871 struct virtnet_info *vi = vdev->priv;
3872 int err;
3873
3874 err = init_vqs(vi);
3875 if (err)
3876 return err;
3877
3878 virtio_device_ready(vdev);
3879
3880 enable_delayed_refill(vi);
3881
3882 if (netif_running(vi->dev)) {
3883 err = virtnet_open(vi->dev);
3884 if (err)
3885 return err;
3886 }
3887
3888 netif_tx_lock_bh(vi->dev);
3889 netif_device_attach(vi->dev);
3890 netif_tx_unlock_bh(vi->dev);
3891 return err;
3892}
3893
3894static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3895{
3896 struct scatterlist sg;
3897 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3898
3899 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3900
3901 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3902 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3903 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3904 return -EINVAL;
3905 }
3906
3907 return 0;
3908}
3909
3910static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3911{
3912 u64 offloads = 0;
3913
3914 if (!vi->guest_offloads)
3915 return 0;
3916
3917 return virtnet_set_guest_offloads(vi, offloads);
3918}
3919
3920static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3921{
3922 u64 offloads = vi->guest_offloads;
3923
3924 if (!vi->guest_offloads)
3925 return 0;
3926
3927 return virtnet_set_guest_offloads(vi, offloads);
3928}
3929
3930static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3931 struct netlink_ext_ack *extack)
3932{
3933 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3934 sizeof(struct skb_shared_info));
3935 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3936 struct virtnet_info *vi = netdev_priv(dev);
3937 struct bpf_prog *old_prog;
3938 u16 xdp_qp = 0, curr_qp;
3939 int i, err;
3940
3941 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3942 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3943 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3944 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3945 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3946 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3947 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3948 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3949 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3950 return -EOPNOTSUPP;
3951 }
3952
3953 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3954 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3955 return -EINVAL;
3956 }
3957
3958 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3959 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3960 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3961 return -EINVAL;
3962 }
3963
3964 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3965 if (prog)
3966 xdp_qp = nr_cpu_ids;
3967
3968 /* XDP requires extra queues for XDP_TX */
3969 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3970 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3971 curr_qp + xdp_qp, vi->max_queue_pairs);
3972 xdp_qp = 0;
3973 }
3974
3975 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3976 if (!prog && !old_prog)
3977 return 0;
3978
3979 if (prog)
3980 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3981
3982 /* Make sure NAPI is not using any XDP TX queues for RX. */
3983 if (netif_running(dev)) {
3984 for (i = 0; i < vi->max_queue_pairs; i++) {
3985 napi_disable(&vi->rq[i].napi);
3986 virtnet_napi_tx_disable(&vi->sq[i].napi);
3987 }
3988 }
3989
3990 if (!prog) {
3991 for (i = 0; i < vi->max_queue_pairs; i++) {
3992 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3993 if (i == 0)
3994 virtnet_restore_guest_offloads(vi);
3995 }
3996 synchronize_net();
3997 }
3998
3999 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
4000 if (err)
4001 goto err;
4002 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4003 vi->xdp_queue_pairs = xdp_qp;
4004
4005 if (prog) {
4006 vi->xdp_enabled = true;
4007 for (i = 0; i < vi->max_queue_pairs; i++) {
4008 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4009 if (i == 0 && !old_prog)
4010 virtnet_clear_guest_offloads(vi);
4011 }
4012 if (!old_prog)
4013 xdp_features_set_redirect_target(dev, true);
4014 } else {
4015 xdp_features_clear_redirect_target(dev);
4016 vi->xdp_enabled = false;
4017 }
4018
4019 for (i = 0; i < vi->max_queue_pairs; i++) {
4020 if (old_prog)
4021 bpf_prog_put(old_prog);
4022 if (netif_running(dev)) {
4023 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4024 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4025 &vi->sq[i].napi);
4026 }
4027 }
4028
4029 return 0;
4030
4031err:
4032 if (!prog) {
4033 virtnet_clear_guest_offloads(vi);
4034 for (i = 0; i < vi->max_queue_pairs; i++)
4035 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
4036 }
4037
4038 if (netif_running(dev)) {
4039 for (i = 0; i < vi->max_queue_pairs; i++) {
4040 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4041 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4042 &vi->sq[i].napi);
4043 }
4044 }
4045 if (prog)
4046 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
4047 return err;
4048}
4049
4050static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4051{
4052 switch (xdp->command) {
4053 case XDP_SETUP_PROG:
4054 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
4055 default:
4056 return -EINVAL;
4057 }
4058}
4059
4060static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
4061 size_t len)
4062{
4063 struct virtnet_info *vi = netdev_priv(dev);
4064 int ret;
4065
4066 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
4067 return -EOPNOTSUPP;
4068
4069 ret = snprintf(buf, len, "sby");
4070 if (ret >= len)
4071 return -EOPNOTSUPP;
4072
4073 return 0;
4074}
4075
4076static int virtnet_set_features(struct net_device *dev,
4077 netdev_features_t features)
4078{
4079 struct virtnet_info *vi = netdev_priv(dev);
4080 u64 offloads;
4081 int err;
4082
4083 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
4084 if (vi->xdp_enabled)
4085 return -EBUSY;
4086
4087 if (features & NETIF_F_GRO_HW)
4088 offloads = vi->guest_offloads_capable;
4089 else
4090 offloads = vi->guest_offloads_capable &
4091 ~GUEST_OFFLOAD_GRO_HW_MASK;
4092
4093 err = virtnet_set_guest_offloads(vi, offloads);
4094 if (err)
4095 return err;
4096 vi->guest_offloads = offloads;
4097 }
4098
4099 if ((dev->features ^ features) & NETIF_F_RXHASH) {
4100 if (features & NETIF_F_RXHASH)
4101 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
4102 else
4103 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
4104
4105 if (!virtnet_commit_rss_command(vi))
4106 return -EINVAL;
4107 }
4108
4109 return 0;
4110}
4111
4112static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
4113{
4114 struct virtnet_info *priv = netdev_priv(dev);
4115 struct send_queue *sq = &priv->sq[txqueue];
4116 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
4117
4118 u64_stats_update_begin(&sq->stats.syncp);
4119 u64_stats_inc(&sq->stats.tx_timeouts);
4120 u64_stats_update_end(&sq->stats.syncp);
4121
4122 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
4123 txqueue, sq->name, sq->vq->index, sq->vq->name,
4124 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
4125}
4126
4127static const struct net_device_ops virtnet_netdev = {
4128 .ndo_open = virtnet_open,
4129 .ndo_stop = virtnet_close,
4130 .ndo_start_xmit = start_xmit,
4131 .ndo_validate_addr = eth_validate_addr,
4132 .ndo_set_mac_address = virtnet_set_mac_address,
4133 .ndo_set_rx_mode = virtnet_set_rx_mode,
4134 .ndo_get_stats64 = virtnet_stats,
4135 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
4136 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
4137 .ndo_bpf = virtnet_xdp,
4138 .ndo_xdp_xmit = virtnet_xdp_xmit,
4139 .ndo_features_check = passthru_features_check,
4140 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
4141 .ndo_set_features = virtnet_set_features,
4142 .ndo_tx_timeout = virtnet_tx_timeout,
4143};
4144
4145static void virtnet_config_changed_work(struct work_struct *work)
4146{
4147 struct virtnet_info *vi =
4148 container_of(work, struct virtnet_info, config_work);
4149 u16 v;
4150
4151 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
4152 struct virtio_net_config, status, &v) < 0)
4153 return;
4154
4155 if (v & VIRTIO_NET_S_ANNOUNCE) {
4156 netdev_notify_peers(vi->dev);
4157 virtnet_ack_link_announce(vi);
4158 }
4159
4160 /* Ignore unknown (future) status bits */
4161 v &= VIRTIO_NET_S_LINK_UP;
4162
4163 if (vi->status == v)
4164 return;
4165
4166 vi->status = v;
4167
4168 if (vi->status & VIRTIO_NET_S_LINK_UP) {
4169 virtnet_update_settings(vi);
4170 netif_carrier_on(vi->dev);
4171 netif_tx_wake_all_queues(vi->dev);
4172 } else {
4173 netif_carrier_off(vi->dev);
4174 netif_tx_stop_all_queues(vi->dev);
4175 }
4176}
4177
4178static void virtnet_config_changed(struct virtio_device *vdev)
4179{
4180 struct virtnet_info *vi = vdev->priv;
4181
4182 schedule_work(&vi->config_work);
4183}
4184
4185static void virtnet_free_queues(struct virtnet_info *vi)
4186{
4187 int i;
4188
4189 for (i = 0; i < vi->max_queue_pairs; i++) {
4190 __netif_napi_del(&vi->rq[i].napi);
4191 __netif_napi_del(&vi->sq[i].napi);
4192 }
4193
4194 /* We called __netif_napi_del(),
4195 * we need to respect an RCU grace period before freeing vi->rq
4196 */
4197 synchronize_net();
4198
4199 kfree(vi->rq);
4200 kfree(vi->sq);
4201 kfree(vi->ctrl);
4202}
4203
4204static void _free_receive_bufs(struct virtnet_info *vi)
4205{
4206 struct bpf_prog *old_prog;
4207 int i;
4208
4209 for (i = 0; i < vi->max_queue_pairs; i++) {
4210 while (vi->rq[i].pages)
4211 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4212
4213 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4214 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4215 if (old_prog)
4216 bpf_prog_put(old_prog);
4217 }
4218}
4219
4220static void free_receive_bufs(struct virtnet_info *vi)
4221{
4222 rtnl_lock();
4223 _free_receive_bufs(vi);
4224 rtnl_unlock();
4225}
4226
4227static void free_receive_page_frags(struct virtnet_info *vi)
4228{
4229 int i;
4230 for (i = 0; i < vi->max_queue_pairs; i++)
4231 if (vi->rq[i].alloc_frag.page) {
4232 if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4233 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4234 put_page(vi->rq[i].alloc_frag.page);
4235 }
4236}
4237
4238static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4239{
4240 if (!is_xdp_frame(buf))
4241 dev_kfree_skb(buf);
4242 else
4243 xdp_return_frame(ptr_to_xdp(buf));
4244}
4245
4246static void free_unused_bufs(struct virtnet_info *vi)
4247{
4248 void *buf;
4249 int i;
4250
4251 for (i = 0; i < vi->max_queue_pairs; i++) {
4252 struct virtqueue *vq = vi->sq[i].vq;
4253 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4254 virtnet_sq_free_unused_buf(vq, buf);
4255 cond_resched();
4256 }
4257
4258 for (i = 0; i < vi->max_queue_pairs; i++) {
4259 struct virtqueue *vq = vi->rq[i].vq;
4260
4261 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4262 virtnet_rq_unmap_free_buf(vq, buf);
4263 cond_resched();
4264 }
4265}
4266
4267static void virtnet_del_vqs(struct virtnet_info *vi)
4268{
4269 struct virtio_device *vdev = vi->vdev;
4270
4271 virtnet_clean_affinity(vi);
4272
4273 vdev->config->del_vqs(vdev);
4274
4275 virtnet_free_queues(vi);
4276}
4277
4278/* How large should a single buffer be so a queue full of these can fit at
4279 * least one full packet?
4280 * Logic below assumes the mergeable buffer header is used.
4281 */
4282static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4283{
4284 const unsigned int hdr_len = vi->hdr_len;
4285 unsigned int rq_size = virtqueue_get_vring_size(vq);
4286 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4287 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4288 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4289
4290 return max(max(min_buf_len, hdr_len) - hdr_len,
4291 (unsigned int)GOOD_PACKET_LEN);
4292}
4293
4294static int virtnet_find_vqs(struct virtnet_info *vi)
4295{
4296 vq_callback_t **callbacks;
4297 struct virtqueue **vqs;
4298 const char **names;
4299 int ret = -ENOMEM;
4300 int total_vqs;
4301 bool *ctx;
4302 u16 i;
4303
4304 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4305 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4306 * possible control vq.
4307 */
4308 total_vqs = vi->max_queue_pairs * 2 +
4309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4310
4311 /* Allocate space for find_vqs parameters */
4312 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4313 if (!vqs)
4314 goto err_vq;
4315 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4316 if (!callbacks)
4317 goto err_callback;
4318 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4319 if (!names)
4320 goto err_names;
4321 if (!vi->big_packets || vi->mergeable_rx_bufs) {
4322 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4323 if (!ctx)
4324 goto err_ctx;
4325 } else {
4326 ctx = NULL;
4327 }
4328
4329 /* Parameters for control virtqueue, if any */
4330 if (vi->has_cvq) {
4331 callbacks[total_vqs - 1] = NULL;
4332 names[total_vqs - 1] = "control";
4333 }
4334
4335 /* Allocate/initialize parameters for send/receive virtqueues */
4336 for (i = 0; i < vi->max_queue_pairs; i++) {
4337 callbacks[rxq2vq(i)] = skb_recv_done;
4338 callbacks[txq2vq(i)] = skb_xmit_done;
4339 sprintf(vi->rq[i].name, "input.%u", i);
4340 sprintf(vi->sq[i].name, "output.%u", i);
4341 names[rxq2vq(i)] = vi->rq[i].name;
4342 names[txq2vq(i)] = vi->sq[i].name;
4343 if (ctx)
4344 ctx[rxq2vq(i)] = true;
4345 }
4346
4347 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4348 names, ctx, NULL);
4349 if (ret)
4350 goto err_find;
4351
4352 if (vi->has_cvq) {
4353 vi->cvq = vqs[total_vqs - 1];
4354 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4355 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4356 }
4357
4358 for (i = 0; i < vi->max_queue_pairs; i++) {
4359 vi->rq[i].vq = vqs[rxq2vq(i)];
4360 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4361 vi->sq[i].vq = vqs[txq2vq(i)];
4362 }
4363
4364 /* run here: ret == 0. */
4365
4366
4367err_find:
4368 kfree(ctx);
4369err_ctx:
4370 kfree(names);
4371err_names:
4372 kfree(callbacks);
4373err_callback:
4374 kfree(vqs);
4375err_vq:
4376 return ret;
4377}
4378
4379static int virtnet_alloc_queues(struct virtnet_info *vi)
4380{
4381 int i;
4382
4383 if (vi->has_cvq) {
4384 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4385 if (!vi->ctrl)
4386 goto err_ctrl;
4387 } else {
4388 vi->ctrl = NULL;
4389 }
4390 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4391 if (!vi->sq)
4392 goto err_sq;
4393 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4394 if (!vi->rq)
4395 goto err_rq;
4396
4397 INIT_DELAYED_WORK(&vi->refill, refill_work);
4398 for (i = 0; i < vi->max_queue_pairs; i++) {
4399 vi->rq[i].pages = NULL;
4400 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4401 napi_weight);
4402 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4403 virtnet_poll_tx,
4404 napi_tx ? napi_weight : 0);
4405
4406 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4407 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4408
4409 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4410 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4411 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4412
4413 u64_stats_init(&vi->rq[i].stats.syncp);
4414 u64_stats_init(&vi->sq[i].stats.syncp);
4415 }
4416
4417 return 0;
4418
4419err_rq:
4420 kfree(vi->sq);
4421err_sq:
4422 kfree(vi->ctrl);
4423err_ctrl:
4424 return -ENOMEM;
4425}
4426
4427static int init_vqs(struct virtnet_info *vi)
4428{
4429 int ret;
4430
4431 /* Allocate send & receive queues */
4432 ret = virtnet_alloc_queues(vi);
4433 if (ret)
4434 goto err;
4435
4436 ret = virtnet_find_vqs(vi);
4437 if (ret)
4438 goto err_free;
4439
4440 virtnet_rq_set_premapped(vi);
4441
4442 cpus_read_lock();
4443 virtnet_set_affinity(vi);
4444 cpus_read_unlock();
4445
4446 return 0;
4447
4448err_free:
4449 virtnet_free_queues(vi);
4450err:
4451 return ret;
4452}
4453
4454#ifdef CONFIG_SYSFS
4455static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4456 char *buf)
4457{
4458 struct virtnet_info *vi = netdev_priv(queue->dev);
4459 unsigned int queue_index = get_netdev_rx_queue_index(queue);
4460 unsigned int headroom = virtnet_get_headroom(vi);
4461 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4462 struct ewma_pkt_len *avg;
4463
4464 BUG_ON(queue_index >= vi->max_queue_pairs);
4465 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4466 return sprintf(buf, "%u\n",
4467 get_mergeable_buf_len(&vi->rq[queue_index], avg,
4468 SKB_DATA_ALIGN(headroom + tailroom)));
4469}
4470
4471static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4472 __ATTR_RO(mergeable_rx_buffer_size);
4473
4474static struct attribute *virtio_net_mrg_rx_attrs[] = {
4475 &mergeable_rx_buffer_size_attribute.attr,
4476 NULL
4477};
4478
4479static const struct attribute_group virtio_net_mrg_rx_group = {
4480 .name = "virtio_net",
4481 .attrs = virtio_net_mrg_rx_attrs
4482};
4483#endif
4484
4485static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4486 unsigned int fbit,
4487 const char *fname, const char *dname)
4488{
4489 if (!virtio_has_feature(vdev, fbit))
4490 return false;
4491
4492 dev_err(&vdev->dev, "device advertises feature %s but not %s",
4493 fname, dname);
4494
4495 return true;
4496}
4497
4498#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
4499 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4500
4501static bool virtnet_validate_features(struct virtio_device *vdev)
4502{
4503 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4504 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4505 "VIRTIO_NET_F_CTRL_VQ") ||
4506 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4507 "VIRTIO_NET_F_CTRL_VQ") ||
4508 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4509 "VIRTIO_NET_F_CTRL_VQ") ||
4510 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4511 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4512 "VIRTIO_NET_F_CTRL_VQ") ||
4513 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4514 "VIRTIO_NET_F_CTRL_VQ") ||
4515 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4516 "VIRTIO_NET_F_CTRL_VQ") ||
4517 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4518 "VIRTIO_NET_F_CTRL_VQ") ||
4519 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4520 "VIRTIO_NET_F_CTRL_VQ"))) {
4521 return false;
4522 }
4523
4524 return true;
4525}
4526
4527#define MIN_MTU ETH_MIN_MTU
4528#define MAX_MTU ETH_MAX_MTU
4529
4530static int virtnet_validate(struct virtio_device *vdev)
4531{
4532 if (!vdev->config->get) {
4533 dev_err(&vdev->dev, "%s failure: config access disabled\n",
4534 __func__);
4535 return -EINVAL;
4536 }
4537
4538 if (!virtnet_validate_features(vdev))
4539 return -EINVAL;
4540
4541 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4542 int mtu = virtio_cread16(vdev,
4543 offsetof(struct virtio_net_config,
4544 mtu));
4545 if (mtu < MIN_MTU)
4546 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4547 }
4548
4549 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4550 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4551 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4552 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4553 }
4554
4555 return 0;
4556}
4557
4558static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4559{
4560 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4561 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4562 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4563 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4564 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4565 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4566}
4567
4568static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4569{
4570 bool guest_gso = virtnet_check_guest_gso(vi);
4571
4572 /* If device can receive ANY guest GSO packets, regardless of mtu,
4573 * allocate packets of maximum size, otherwise limit it to only
4574 * mtu size worth only.
4575 */
4576 if (mtu > ETH_DATA_LEN || guest_gso) {
4577 vi->big_packets = true;
4578 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4579 }
4580}
4581
4582static int virtnet_probe(struct virtio_device *vdev)
4583{
4584 int i, err = -ENOMEM;
4585 struct net_device *dev;
4586 struct virtnet_info *vi;
4587 u16 max_queue_pairs;
4588 int mtu = 0;
4589
4590 /* Find if host supports multiqueue/rss virtio_net device */
4591 max_queue_pairs = 1;
4592 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4593 max_queue_pairs =
4594 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4595
4596 /* We need at least 2 queue's */
4597 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4598 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4599 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4600 max_queue_pairs = 1;
4601
4602 /* Allocate ourselves a network device with room for our info */
4603 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4604 if (!dev)
4605 return -ENOMEM;
4606
4607 /* Set up network device as normal. */
4608 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4609 IFF_TX_SKB_NO_LINEAR;
4610 dev->netdev_ops = &virtnet_netdev;
4611 dev->features = NETIF_F_HIGHDMA;
4612
4613 dev->ethtool_ops = &virtnet_ethtool_ops;
4614 SET_NETDEV_DEV(dev, &vdev->dev);
4615
4616 /* Do we support "hardware" checksums? */
4617 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4618 /* This opens up the world of extra features. */
4619 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4620 if (csum)
4621 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4622
4623 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4624 dev->hw_features |= NETIF_F_TSO
4625 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
4626 }
4627 /* Individual feature bits: what can host handle? */
4628 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4629 dev->hw_features |= NETIF_F_TSO;
4630 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4631 dev->hw_features |= NETIF_F_TSO6;
4632 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4633 dev->hw_features |= NETIF_F_TSO_ECN;
4634 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4635 dev->hw_features |= NETIF_F_GSO_UDP_L4;
4636
4637 dev->features |= NETIF_F_GSO_ROBUST;
4638
4639 if (gso)
4640 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4641 /* (!csum && gso) case will be fixed by register_netdev() */
4642 }
4643 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4644 dev->features |= NETIF_F_RXCSUM;
4645 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4646 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4647 dev->features |= NETIF_F_GRO_HW;
4648 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4649 dev->hw_features |= NETIF_F_GRO_HW;
4650
4651 dev->vlan_features = dev->features;
4652 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4653
4654 /* MTU range: 68 - 65535 */
4655 dev->min_mtu = MIN_MTU;
4656 dev->max_mtu = MAX_MTU;
4657
4658 /* Configuration may specify what MAC to use. Otherwise random. */
4659 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4660 u8 addr[ETH_ALEN];
4661
4662 virtio_cread_bytes(vdev,
4663 offsetof(struct virtio_net_config, mac),
4664 addr, ETH_ALEN);
4665 eth_hw_addr_set(dev, addr);
4666 } else {
4667 eth_hw_addr_random(dev);
4668 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4669 dev->dev_addr);
4670 }
4671
4672 /* Set up our device-specific information */
4673 vi = netdev_priv(dev);
4674 vi->dev = dev;
4675 vi->vdev = vdev;
4676 vdev->priv = vi;
4677
4678 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4679 spin_lock_init(&vi->refill_lock);
4680
4681 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4682 vi->mergeable_rx_bufs = true;
4683 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4684 }
4685
4686 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4687 vi->has_rss_hash_report = true;
4688
4689 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4690 vi->has_rss = true;
4691
4692 if (vi->has_rss || vi->has_rss_hash_report) {
4693 vi->rss_indir_table_size =
4694 virtio_cread16(vdev, offsetof(struct virtio_net_config,
4695 rss_max_indirection_table_length));
4696 vi->rss_key_size =
4697 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4698
4699 vi->rss_hash_types_supported =
4700 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4701 vi->rss_hash_types_supported &=
4702 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4703 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4704 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4705
4706 dev->hw_features |= NETIF_F_RXHASH;
4707 }
4708
4709 if (vi->has_rss_hash_report)
4710 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4711 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4712 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4713 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4714 else
4715 vi->hdr_len = sizeof(struct virtio_net_hdr);
4716
4717 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4718 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4719 vi->any_header_sg = true;
4720
4721 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4722 vi->has_cvq = true;
4723
4724 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4725 mtu = virtio_cread16(vdev,
4726 offsetof(struct virtio_net_config,
4727 mtu));
4728 if (mtu < dev->min_mtu) {
4729 /* Should never trigger: MTU was previously validated
4730 * in virtnet_validate.
4731 */
4732 dev_err(&vdev->dev,
4733 "device MTU appears to have changed it is now %d < %d",
4734 mtu, dev->min_mtu);
4735 err = -EINVAL;
4736 goto free;
4737 }
4738
4739 dev->mtu = mtu;
4740 dev->max_mtu = mtu;
4741 }
4742
4743 virtnet_set_big_packets(vi, mtu);
4744
4745 if (vi->any_header_sg)
4746 dev->needed_headroom = vi->hdr_len;
4747
4748 /* Enable multiqueue by default */
4749 if (num_online_cpus() >= max_queue_pairs)
4750 vi->curr_queue_pairs = max_queue_pairs;
4751 else
4752 vi->curr_queue_pairs = num_online_cpus();
4753 vi->max_queue_pairs = max_queue_pairs;
4754
4755 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4756 err = init_vqs(vi);
4757 if (err)
4758 goto free;
4759
4760 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4761 vi->intr_coal_rx.max_usecs = 0;
4762 vi->intr_coal_tx.max_usecs = 0;
4763 vi->intr_coal_rx.max_packets = 0;
4764
4765 /* Keep the default values of the coalescing parameters
4766 * aligned with the default napi_tx state.
4767 */
4768 if (vi->sq[0].napi.weight)
4769 vi->intr_coal_tx.max_packets = 1;
4770 else
4771 vi->intr_coal_tx.max_packets = 0;
4772 }
4773
4774 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4775 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
4776 for (i = 0; i < vi->max_queue_pairs; i++)
4777 if (vi->sq[i].napi.weight)
4778 vi->sq[i].intr_coal.max_packets = 1;
4779 }
4780
4781#ifdef CONFIG_SYSFS
4782 if (vi->mergeable_rx_bufs)
4783 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4784#endif
4785 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4786 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4787
4788 virtnet_init_settings(dev);
4789
4790 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4791 vi->failover = net_failover_create(vi->dev);
4792 if (IS_ERR(vi->failover)) {
4793 err = PTR_ERR(vi->failover);
4794 goto free_vqs;
4795 }
4796 }
4797
4798 if (vi->has_rss || vi->has_rss_hash_report)
4799 virtnet_init_default_rss(vi);
4800
4801 /* serialize netdev register + virtio_device_ready() with ndo_open() */
4802 rtnl_lock();
4803
4804 err = register_netdevice(dev);
4805 if (err) {
4806 pr_debug("virtio_net: registering device failed\n");
4807 rtnl_unlock();
4808 goto free_failover;
4809 }
4810
4811 virtio_device_ready(vdev);
4812
4813 _virtnet_set_queues(vi, vi->curr_queue_pairs);
4814
4815 /* a random MAC address has been assigned, notify the device.
4816 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4817 * because many devices work fine without getting MAC explicitly
4818 */
4819 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4820 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4821 struct scatterlist sg;
4822
4823 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4824 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4825 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4826 pr_debug("virtio_net: setting MAC address failed\n");
4827 rtnl_unlock();
4828 err = -EINVAL;
4829 goto free_unregister_netdev;
4830 }
4831 }
4832
4833 rtnl_unlock();
4834
4835 err = virtnet_cpu_notif_add(vi);
4836 if (err) {
4837 pr_debug("virtio_net: registering cpu notifier failed\n");
4838 goto free_unregister_netdev;
4839 }
4840
4841 /* Assume link up if device can't report link status,
4842 otherwise get link status from config. */
4843 netif_carrier_off(dev);
4844 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4845 schedule_work(&vi->config_work);
4846 } else {
4847 vi->status = VIRTIO_NET_S_LINK_UP;
4848 virtnet_update_settings(vi);
4849 netif_carrier_on(dev);
4850 }
4851
4852 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4853 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4854 set_bit(guest_offloads[i], &vi->guest_offloads);
4855 vi->guest_offloads_capable = vi->guest_offloads;
4856
4857 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4858 dev->name, max_queue_pairs);
4859
4860 return 0;
4861
4862free_unregister_netdev:
4863 unregister_netdev(dev);
4864free_failover:
4865 net_failover_destroy(vi->failover);
4866free_vqs:
4867 virtio_reset_device(vdev);
4868 cancel_delayed_work_sync(&vi->refill);
4869 free_receive_page_frags(vi);
4870 virtnet_del_vqs(vi);
4871free:
4872 free_netdev(dev);
4873 return err;
4874}
4875
4876static void remove_vq_common(struct virtnet_info *vi)
4877{
4878 virtio_reset_device(vi->vdev);
4879
4880 /* Free unused buffers in both send and recv, if any. */
4881 free_unused_bufs(vi);
4882
4883 free_receive_bufs(vi);
4884
4885 free_receive_page_frags(vi);
4886
4887 virtnet_del_vqs(vi);
4888}
4889
4890static void virtnet_remove(struct virtio_device *vdev)
4891{
4892 struct virtnet_info *vi = vdev->priv;
4893
4894 virtnet_cpu_notif_remove(vi);
4895
4896 /* Make sure no work handler is accessing the device. */
4897 flush_work(&vi->config_work);
4898
4899 unregister_netdev(vi->dev);
4900
4901 net_failover_destroy(vi->failover);
4902
4903 remove_vq_common(vi);
4904
4905 free_netdev(vi->dev);
4906}
4907
4908static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4909{
4910 struct virtnet_info *vi = vdev->priv;
4911
4912 virtnet_cpu_notif_remove(vi);
4913 virtnet_freeze_down(vdev);
4914 remove_vq_common(vi);
4915
4916 return 0;
4917}
4918
4919static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4920{
4921 struct virtnet_info *vi = vdev->priv;
4922 int err;
4923
4924 err = virtnet_restore_up(vdev);
4925 if (err)
4926 return err;
4927 virtnet_set_queues(vi, vi->curr_queue_pairs);
4928
4929 err = virtnet_cpu_notif_add(vi);
4930 if (err) {
4931 virtnet_freeze_down(vdev);
4932 remove_vq_common(vi);
4933 return err;
4934 }
4935
4936 return 0;
4937}
4938
4939static struct virtio_device_id id_table[] = {
4940 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4941 { 0 },
4942};
4943
4944#define VIRTNET_FEATURES \
4945 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4946 VIRTIO_NET_F_MAC, \
4947 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4948 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4949 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4950 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4951 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4952 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4953 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4954 VIRTIO_NET_F_CTRL_MAC_ADDR, \
4955 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4956 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4957 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4958 VIRTIO_NET_F_VQ_NOTF_COAL, \
4959 VIRTIO_NET_F_GUEST_HDRLEN
4960
4961static unsigned int features[] = {
4962 VIRTNET_FEATURES,
4963};
4964
4965static unsigned int features_legacy[] = {
4966 VIRTNET_FEATURES,
4967 VIRTIO_NET_F_GSO,
4968 VIRTIO_F_ANY_LAYOUT,
4969};
4970
4971static struct virtio_driver virtio_net_driver = {
4972 .feature_table = features,
4973 .feature_table_size = ARRAY_SIZE(features),
4974 .feature_table_legacy = features_legacy,
4975 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4976 .driver.name = KBUILD_MODNAME,
4977 .driver.owner = THIS_MODULE,
4978 .id_table = id_table,
4979 .validate = virtnet_validate,
4980 .probe = virtnet_probe,
4981 .remove = virtnet_remove,
4982 .config_changed = virtnet_config_changed,
4983#ifdef CONFIG_PM_SLEEP
4984 .freeze = virtnet_freeze,
4985 .restore = virtnet_restore,
4986#endif
4987};
4988
4989static __init int virtio_net_driver_init(void)
4990{
4991 int ret;
4992
4993 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4994 virtnet_cpu_online,
4995 virtnet_cpu_down_prep);
4996 if (ret < 0)
4997 goto out;
4998 virtionet_online = ret;
4999 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
5000 NULL, virtnet_cpu_dead);
5001 if (ret)
5002 goto err_dead;
5003 ret = register_virtio_driver(&virtio_net_driver);
5004 if (ret)
5005 goto err_virtio;
5006 return 0;
5007err_virtio:
5008 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5009err_dead:
5010 cpuhp_remove_multi_state(virtionet_online);
5011out:
5012 return ret;
5013}
5014module_init(virtio_net_driver_init);
5015
5016static __exit void virtio_net_driver_exit(void)
5017{
5018 unregister_virtio_driver(&virtio_net_driver);
5019 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5020 cpuhp_remove_multi_state(virtionet_online);
5021}
5022module_exit(virtio_net_driver_exit);
5023
5024MODULE_DEVICE_TABLE(virtio, id_table);
5025MODULE_DESCRIPTION("Virtio network driver");
5026MODULE_LICENSE("GPL");
1/* A network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18//#define DEBUG
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/module.h>
23#include <linux/virtio.h>
24#include <linux/virtio_net.h>
25#include <linux/bpf.h>
26#include <linux/bpf_trace.h>
27#include <linux/scatterlist.h>
28#include <linux/if_vlan.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/average.h>
32#include <linux/filter.h>
33#include <net/route.h>
34#include <net/xdp.h>
35
36static int napi_weight = NAPI_POLL_WEIGHT;
37module_param(napi_weight, int, 0444);
38
39static bool csum = true, gso = true, napi_tx;
40module_param(csum, bool, 0444);
41module_param(gso, bool, 0444);
42module_param(napi_tx, bool, 0644);
43
44/* FIXME: MTU in config. */
45#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
46#define GOOD_COPY_LEN 128
47
48#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
49
50/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
51#define VIRTIO_XDP_HEADROOM 256
52
53/* RX packet size EWMA. The average packet size is used to determine the packet
54 * buffer size when refilling RX rings. As the entire RX ring may be refilled
55 * at once, the weight is chosen so that the EWMA will be insensitive to short-
56 * term, transient changes in packet size.
57 */
58DECLARE_EWMA(pkt_len, 0, 64)
59
60#define VIRTNET_DRIVER_VERSION "1.0.0"
61
62static const unsigned long guest_offloads[] = {
63 VIRTIO_NET_F_GUEST_TSO4,
64 VIRTIO_NET_F_GUEST_TSO6,
65 VIRTIO_NET_F_GUEST_ECN,
66 VIRTIO_NET_F_GUEST_UFO
67};
68
69struct virtnet_stat_desc {
70 char desc[ETH_GSTRING_LEN];
71 size_t offset;
72};
73
74struct virtnet_sq_stats {
75 struct u64_stats_sync syncp;
76 u64 packets;
77 u64 bytes;
78};
79
80struct virtnet_rq_stats {
81 struct u64_stats_sync syncp;
82 u64 packets;
83 u64 bytes;
84};
85
86#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
87#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
88
89static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
90 { "packets", VIRTNET_SQ_STAT(packets) },
91 { "bytes", VIRTNET_SQ_STAT(bytes) },
92};
93
94static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
95 { "packets", VIRTNET_RQ_STAT(packets) },
96 { "bytes", VIRTNET_RQ_STAT(bytes) },
97};
98
99#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
100#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
101
102/* Internal representation of a send virtqueue */
103struct send_queue {
104 /* Virtqueue associated with this send _queue */
105 struct virtqueue *vq;
106
107 /* TX: fragments + linear part + virtio header */
108 struct scatterlist sg[MAX_SKB_FRAGS + 2];
109
110 /* Name of the send queue: output.$index */
111 char name[40];
112
113 struct virtnet_sq_stats stats;
114
115 struct napi_struct napi;
116};
117
118/* Internal representation of a receive virtqueue */
119struct receive_queue {
120 /* Virtqueue associated with this receive_queue */
121 struct virtqueue *vq;
122
123 struct napi_struct napi;
124
125 struct bpf_prog __rcu *xdp_prog;
126
127 struct virtnet_rq_stats stats;
128
129 /* Chain pages by the private ptr. */
130 struct page *pages;
131
132 /* Average packet length for mergeable receive buffers. */
133 struct ewma_pkt_len mrg_avg_pkt_len;
134
135 /* Page frag for packet buffer allocation. */
136 struct page_frag alloc_frag;
137
138 /* RX: fragments + linear part + virtio header */
139 struct scatterlist sg[MAX_SKB_FRAGS + 2];
140
141 /* Min single buffer size for mergeable buffers case. */
142 unsigned int min_buf_len;
143
144 /* Name of this receive queue: input.$index */
145 char name[40];
146
147 struct xdp_rxq_info xdp_rxq;
148};
149
150/* Control VQ buffers: protected by the rtnl lock */
151struct control_buf {
152 struct virtio_net_ctrl_hdr hdr;
153 virtio_net_ctrl_ack status;
154 struct virtio_net_ctrl_mq mq;
155 u8 promisc;
156 u8 allmulti;
157 __virtio16 vid;
158 __virtio64 offloads;
159};
160
161struct virtnet_info {
162 struct virtio_device *vdev;
163 struct virtqueue *cvq;
164 struct net_device *dev;
165 struct send_queue *sq;
166 struct receive_queue *rq;
167 unsigned int status;
168
169 /* Max # of queue pairs supported by the device */
170 u16 max_queue_pairs;
171
172 /* # of queue pairs currently used by the driver */
173 u16 curr_queue_pairs;
174
175 /* # of XDP queue pairs currently used by the driver */
176 u16 xdp_queue_pairs;
177
178 /* I like... big packets and I cannot lie! */
179 bool big_packets;
180
181 /* Host will merge rx buffers for big packets (shake it! shake it!) */
182 bool mergeable_rx_bufs;
183
184 /* Has control virtqueue */
185 bool has_cvq;
186
187 /* Host can handle any s/g split between our header and packet data */
188 bool any_header_sg;
189
190 /* Packet virtio header size */
191 u8 hdr_len;
192
193 /* Work struct for refilling if we run low on memory. */
194 struct delayed_work refill;
195
196 /* Work struct for config space updates */
197 struct work_struct config_work;
198
199 /* Does the affinity hint is set for virtqueues? */
200 bool affinity_hint_set;
201
202 /* CPU hotplug instances for online & dead */
203 struct hlist_node node;
204 struct hlist_node node_dead;
205
206 struct control_buf *ctrl;
207
208 /* Ethtool settings */
209 u8 duplex;
210 u32 speed;
211
212 unsigned long guest_offloads;
213};
214
215struct padded_vnet_hdr {
216 struct virtio_net_hdr_mrg_rxbuf hdr;
217 /*
218 * hdr is in a separate sg buffer, and data sg buffer shares same page
219 * with this header sg. This padding makes next sg 16 byte aligned
220 * after the header.
221 */
222 char padding[4];
223};
224
225/* Converting between virtqueue no. and kernel tx/rx queue no.
226 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
227 */
228static int vq2txq(struct virtqueue *vq)
229{
230 return (vq->index - 1) / 2;
231}
232
233static int txq2vq(int txq)
234{
235 return txq * 2 + 1;
236}
237
238static int vq2rxq(struct virtqueue *vq)
239{
240 return vq->index / 2;
241}
242
243static int rxq2vq(int rxq)
244{
245 return rxq * 2;
246}
247
248static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
249{
250 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
251}
252
253/*
254 * private is used to chain pages for big packets, put the whole
255 * most recent used list in the beginning for reuse
256 */
257static void give_pages(struct receive_queue *rq, struct page *page)
258{
259 struct page *end;
260
261 /* Find end of list, sew whole thing into vi->rq.pages. */
262 for (end = page; end->private; end = (struct page *)end->private);
263 end->private = (unsigned long)rq->pages;
264 rq->pages = page;
265}
266
267static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
268{
269 struct page *p = rq->pages;
270
271 if (p) {
272 rq->pages = (struct page *)p->private;
273 /* clear private here, it is used to chain pages */
274 p->private = 0;
275 } else
276 p = alloc_page(gfp_mask);
277 return p;
278}
279
280static void virtqueue_napi_schedule(struct napi_struct *napi,
281 struct virtqueue *vq)
282{
283 if (napi_schedule_prep(napi)) {
284 virtqueue_disable_cb(vq);
285 __napi_schedule(napi);
286 }
287}
288
289static void virtqueue_napi_complete(struct napi_struct *napi,
290 struct virtqueue *vq, int processed)
291{
292 int opaque;
293
294 opaque = virtqueue_enable_cb_prepare(vq);
295 if (napi_complete_done(napi, processed)) {
296 if (unlikely(virtqueue_poll(vq, opaque)))
297 virtqueue_napi_schedule(napi, vq);
298 } else {
299 virtqueue_disable_cb(vq);
300 }
301}
302
303static void skb_xmit_done(struct virtqueue *vq)
304{
305 struct virtnet_info *vi = vq->vdev->priv;
306 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
307
308 /* Suppress further interrupts. */
309 virtqueue_disable_cb(vq);
310
311 if (napi->weight)
312 virtqueue_napi_schedule(napi, vq);
313 else
314 /* We were probably waiting for more output buffers. */
315 netif_wake_subqueue(vi->dev, vq2txq(vq));
316}
317
318#define MRG_CTX_HEADER_SHIFT 22
319static void *mergeable_len_to_ctx(unsigned int truesize,
320 unsigned int headroom)
321{
322 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
323}
324
325static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
326{
327 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
328}
329
330static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
331{
332 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
333}
334
335/* Called from bottom half context */
336static struct sk_buff *page_to_skb(struct virtnet_info *vi,
337 struct receive_queue *rq,
338 struct page *page, unsigned int offset,
339 unsigned int len, unsigned int truesize)
340{
341 struct sk_buff *skb;
342 struct virtio_net_hdr_mrg_rxbuf *hdr;
343 unsigned int copy, hdr_len, hdr_padded_len;
344 char *p;
345
346 p = page_address(page) + offset;
347
348 /* copy small packet so we can reuse these pages for small data */
349 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
350 if (unlikely(!skb))
351 return NULL;
352
353 hdr = skb_vnet_hdr(skb);
354
355 hdr_len = vi->hdr_len;
356 if (vi->mergeable_rx_bufs)
357 hdr_padded_len = sizeof(*hdr);
358 else
359 hdr_padded_len = sizeof(struct padded_vnet_hdr);
360
361 memcpy(hdr, p, hdr_len);
362
363 len -= hdr_len;
364 offset += hdr_padded_len;
365 p += hdr_padded_len;
366
367 copy = len;
368 if (copy > skb_tailroom(skb))
369 copy = skb_tailroom(skb);
370 skb_put_data(skb, p, copy);
371
372 len -= copy;
373 offset += copy;
374
375 if (vi->mergeable_rx_bufs) {
376 if (len)
377 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
378 else
379 put_page(page);
380 return skb;
381 }
382
383 /*
384 * Verify that we can indeed put this data into a skb.
385 * This is here to handle cases when the device erroneously
386 * tries to receive more than is possible. This is usually
387 * the case of a broken device.
388 */
389 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
390 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
391 dev_kfree_skb(skb);
392 return NULL;
393 }
394 BUG_ON(offset >= PAGE_SIZE);
395 while (len) {
396 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
397 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
398 frag_size, truesize);
399 len -= frag_size;
400 page = (struct page *)page->private;
401 offset = 0;
402 }
403
404 if (page)
405 give_pages(rq, page);
406
407 return skb;
408}
409
410static void virtnet_xdp_flush(struct net_device *dev)
411{
412 struct virtnet_info *vi = netdev_priv(dev);
413 struct send_queue *sq;
414 unsigned int qp;
415
416 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
417 sq = &vi->sq[qp];
418
419 virtqueue_kick(sq->vq);
420}
421
422static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
423 struct xdp_buff *xdp)
424{
425 struct virtio_net_hdr_mrg_rxbuf *hdr;
426 unsigned int len;
427 struct send_queue *sq;
428 unsigned int qp;
429 void *xdp_sent;
430 int err;
431
432 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
433 sq = &vi->sq[qp];
434
435 /* Free up any pending old buffers before queueing new ones. */
436 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
437 struct page *sent_page = virt_to_head_page(xdp_sent);
438
439 put_page(sent_page);
440 }
441
442 xdp->data -= vi->hdr_len;
443 /* Zero header and leave csum up to XDP layers */
444 hdr = xdp->data;
445 memset(hdr, 0, vi->hdr_len);
446
447 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
448
449 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
450 if (unlikely(err))
451 return false; /* Caller handle free/refcnt */
452
453 return true;
454}
455
456static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
457{
458 struct virtnet_info *vi = netdev_priv(dev);
459 struct receive_queue *rq = vi->rq;
460 struct bpf_prog *xdp_prog;
461 bool sent;
462
463 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
464 * indicate XDP resources have been successfully allocated.
465 */
466 xdp_prog = rcu_dereference(rq->xdp_prog);
467 if (!xdp_prog)
468 return -ENXIO;
469
470 sent = __virtnet_xdp_xmit(vi, xdp);
471 if (!sent)
472 return -ENOSPC;
473 return 0;
474}
475
476static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
477{
478 return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
479}
480
481/* We copy the packet for XDP in the following cases:
482 *
483 * 1) Packet is scattered across multiple rx buffers.
484 * 2) Headroom space is insufficient.
485 *
486 * This is inefficient but it's a temporary condition that
487 * we hit right after XDP is enabled and until queue is refilled
488 * with large buffers with sufficient headroom - so it should affect
489 * at most queue size packets.
490 * Afterwards, the conditions to enable
491 * XDP should preclude the underlying device from sending packets
492 * across multiple buffers (num_buf > 1), and we make sure buffers
493 * have enough headroom.
494 */
495static struct page *xdp_linearize_page(struct receive_queue *rq,
496 u16 *num_buf,
497 struct page *p,
498 int offset,
499 int page_off,
500 unsigned int *len)
501{
502 struct page *page = alloc_page(GFP_ATOMIC);
503
504 if (!page)
505 return NULL;
506
507 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
508 page_off += *len;
509
510 while (--*num_buf) {
511 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
512 unsigned int buflen;
513 void *buf;
514 int off;
515
516 buf = virtqueue_get_buf(rq->vq, &buflen);
517 if (unlikely(!buf))
518 goto err_buf;
519
520 p = virt_to_head_page(buf);
521 off = buf - page_address(p);
522
523 /* guard against a misconfigured or uncooperative backend that
524 * is sending packet larger than the MTU.
525 */
526 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
527 put_page(p);
528 goto err_buf;
529 }
530
531 memcpy(page_address(page) + page_off,
532 page_address(p) + off, buflen);
533 page_off += buflen;
534 put_page(p);
535 }
536
537 /* Headroom does not contribute to packet length */
538 *len = page_off - VIRTIO_XDP_HEADROOM;
539 return page;
540err_buf:
541 __free_pages(page, 0);
542 return NULL;
543}
544
545static struct sk_buff *receive_small(struct net_device *dev,
546 struct virtnet_info *vi,
547 struct receive_queue *rq,
548 void *buf, void *ctx,
549 unsigned int len,
550 bool *xdp_xmit)
551{
552 struct sk_buff *skb;
553 struct bpf_prog *xdp_prog;
554 unsigned int xdp_headroom = (unsigned long)ctx;
555 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
556 unsigned int headroom = vi->hdr_len + header_offset;
557 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
558 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
559 struct page *page = virt_to_head_page(buf);
560 unsigned int delta = 0;
561 struct page *xdp_page;
562 bool sent;
563 int err;
564
565 len -= vi->hdr_len;
566
567 rcu_read_lock();
568 xdp_prog = rcu_dereference(rq->xdp_prog);
569 if (xdp_prog) {
570 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
571 struct xdp_buff xdp;
572 void *orig_data;
573 u32 act;
574
575 if (unlikely(hdr->hdr.gso_type))
576 goto err_xdp;
577
578 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
579 int offset = buf - page_address(page) + header_offset;
580 unsigned int tlen = len + vi->hdr_len;
581 u16 num_buf = 1;
582
583 xdp_headroom = virtnet_get_headroom(vi);
584 header_offset = VIRTNET_RX_PAD + xdp_headroom;
585 headroom = vi->hdr_len + header_offset;
586 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
587 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
588 xdp_page = xdp_linearize_page(rq, &num_buf, page,
589 offset, header_offset,
590 &tlen);
591 if (!xdp_page)
592 goto err_xdp;
593
594 buf = page_address(xdp_page);
595 put_page(page);
596 page = xdp_page;
597 }
598
599 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
600 xdp.data = xdp.data_hard_start + xdp_headroom;
601 xdp_set_data_meta_invalid(&xdp);
602 xdp.data_end = xdp.data + len;
603 xdp.rxq = &rq->xdp_rxq;
604 orig_data = xdp.data;
605 act = bpf_prog_run_xdp(xdp_prog, &xdp);
606
607 switch (act) {
608 case XDP_PASS:
609 /* Recalculate length in case bpf program changed it */
610 delta = orig_data - xdp.data;
611 break;
612 case XDP_TX:
613 sent = __virtnet_xdp_xmit(vi, &xdp);
614 if (unlikely(!sent)) {
615 trace_xdp_exception(vi->dev, xdp_prog, act);
616 goto err_xdp;
617 }
618 *xdp_xmit = true;
619 rcu_read_unlock();
620 goto xdp_xmit;
621 case XDP_REDIRECT:
622 err = xdp_do_redirect(dev, &xdp, xdp_prog);
623 if (err)
624 goto err_xdp;
625 *xdp_xmit = true;
626 rcu_read_unlock();
627 goto xdp_xmit;
628 default:
629 bpf_warn_invalid_xdp_action(act);
630 case XDP_ABORTED:
631 trace_xdp_exception(vi->dev, xdp_prog, act);
632 case XDP_DROP:
633 goto err_xdp;
634 }
635 }
636 rcu_read_unlock();
637
638 skb = build_skb(buf, buflen);
639 if (!skb) {
640 put_page(page);
641 goto err;
642 }
643 skb_reserve(skb, headroom - delta);
644 skb_put(skb, len + delta);
645 if (!delta) {
646 buf += header_offset;
647 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
648 } /* keep zeroed vnet hdr since packet was changed by bpf */
649
650err:
651 return skb;
652
653err_xdp:
654 rcu_read_unlock();
655 dev->stats.rx_dropped++;
656 put_page(page);
657xdp_xmit:
658 return NULL;
659}
660
661static struct sk_buff *receive_big(struct net_device *dev,
662 struct virtnet_info *vi,
663 struct receive_queue *rq,
664 void *buf,
665 unsigned int len)
666{
667 struct page *page = buf;
668 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
669
670 if (unlikely(!skb))
671 goto err;
672
673 return skb;
674
675err:
676 dev->stats.rx_dropped++;
677 give_pages(rq, page);
678 return NULL;
679}
680
681static struct sk_buff *receive_mergeable(struct net_device *dev,
682 struct virtnet_info *vi,
683 struct receive_queue *rq,
684 void *buf,
685 void *ctx,
686 unsigned int len,
687 bool *xdp_xmit)
688{
689 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
690 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
691 struct page *page = virt_to_head_page(buf);
692 int offset = buf - page_address(page);
693 struct sk_buff *head_skb, *curr_skb;
694 struct bpf_prog *xdp_prog;
695 unsigned int truesize;
696 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
697 bool sent;
698 int err;
699
700 head_skb = NULL;
701
702 rcu_read_lock();
703 xdp_prog = rcu_dereference(rq->xdp_prog);
704 if (xdp_prog) {
705 struct page *xdp_page;
706 struct xdp_buff xdp;
707 void *data;
708 u32 act;
709
710 /* Transient failure which in theory could occur if
711 * in-flight packets from before XDP was enabled reach
712 * the receive path after XDP is loaded.
713 */
714 if (unlikely(hdr->hdr.gso_type))
715 goto err_xdp;
716
717 /* This happens when rx buffer size is underestimated
718 * or headroom is not enough because of the buffer
719 * was refilled before XDP is set. This should only
720 * happen for the first several packets, so we don't
721 * care much about its performance.
722 */
723 if (unlikely(num_buf > 1 ||
724 headroom < virtnet_get_headroom(vi))) {
725 /* linearize data for XDP */
726 xdp_page = xdp_linearize_page(rq, &num_buf,
727 page, offset,
728 VIRTIO_XDP_HEADROOM,
729 &len);
730 if (!xdp_page)
731 goto err_xdp;
732 offset = VIRTIO_XDP_HEADROOM;
733 } else {
734 xdp_page = page;
735 }
736
737 /* Allow consuming headroom but reserve enough space to push
738 * the descriptor on if we get an XDP_TX return code.
739 */
740 data = page_address(xdp_page) + offset;
741 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
742 xdp.data = data + vi->hdr_len;
743 xdp_set_data_meta_invalid(&xdp);
744 xdp.data_end = xdp.data + (len - vi->hdr_len);
745 xdp.rxq = &rq->xdp_rxq;
746
747 act = bpf_prog_run_xdp(xdp_prog, &xdp);
748
749 switch (act) {
750 case XDP_PASS:
751 /* recalculate offset to account for any header
752 * adjustments. Note other cases do not build an
753 * skb and avoid using offset
754 */
755 offset = xdp.data -
756 page_address(xdp_page) - vi->hdr_len;
757
758 /* We can only create skb based on xdp_page. */
759 if (unlikely(xdp_page != page)) {
760 rcu_read_unlock();
761 put_page(page);
762 head_skb = page_to_skb(vi, rq, xdp_page,
763 offset, len, PAGE_SIZE);
764 return head_skb;
765 }
766 break;
767 case XDP_TX:
768 sent = __virtnet_xdp_xmit(vi, &xdp);
769 if (unlikely(!sent)) {
770 trace_xdp_exception(vi->dev, xdp_prog, act);
771 if (unlikely(xdp_page != page))
772 put_page(xdp_page);
773 goto err_xdp;
774 }
775 *xdp_xmit = true;
776 if (unlikely(xdp_page != page))
777 put_page(page);
778 rcu_read_unlock();
779 goto xdp_xmit;
780 case XDP_REDIRECT:
781 err = xdp_do_redirect(dev, &xdp, xdp_prog);
782 if (err) {
783 if (unlikely(xdp_page != page))
784 put_page(xdp_page);
785 goto err_xdp;
786 }
787 *xdp_xmit = true;
788 if (unlikely(xdp_page != page))
789 put_page(page);
790 rcu_read_unlock();
791 goto xdp_xmit;
792 default:
793 bpf_warn_invalid_xdp_action(act);
794 case XDP_ABORTED:
795 trace_xdp_exception(vi->dev, xdp_prog, act);
796 case XDP_DROP:
797 if (unlikely(xdp_page != page))
798 __free_pages(xdp_page, 0);
799 goto err_xdp;
800 }
801 }
802 rcu_read_unlock();
803
804 truesize = mergeable_ctx_to_truesize(ctx);
805 if (unlikely(len > truesize)) {
806 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
807 dev->name, len, (unsigned long)ctx);
808 dev->stats.rx_length_errors++;
809 goto err_skb;
810 }
811
812 head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
813 curr_skb = head_skb;
814
815 if (unlikely(!curr_skb))
816 goto err_skb;
817 while (--num_buf) {
818 int num_skb_frags;
819
820 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
821 if (unlikely(!buf)) {
822 pr_debug("%s: rx error: %d buffers out of %d missing\n",
823 dev->name, num_buf,
824 virtio16_to_cpu(vi->vdev,
825 hdr->num_buffers));
826 dev->stats.rx_length_errors++;
827 goto err_buf;
828 }
829
830 page = virt_to_head_page(buf);
831
832 truesize = mergeable_ctx_to_truesize(ctx);
833 if (unlikely(len > truesize)) {
834 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
835 dev->name, len, (unsigned long)ctx);
836 dev->stats.rx_length_errors++;
837 goto err_skb;
838 }
839
840 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
841 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
842 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
843
844 if (unlikely(!nskb))
845 goto err_skb;
846 if (curr_skb == head_skb)
847 skb_shinfo(curr_skb)->frag_list = nskb;
848 else
849 curr_skb->next = nskb;
850 curr_skb = nskb;
851 head_skb->truesize += nskb->truesize;
852 num_skb_frags = 0;
853 }
854 if (curr_skb != head_skb) {
855 head_skb->data_len += len;
856 head_skb->len += len;
857 head_skb->truesize += truesize;
858 }
859 offset = buf - page_address(page);
860 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
861 put_page(page);
862 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
863 len, truesize);
864 } else {
865 skb_add_rx_frag(curr_skb, num_skb_frags, page,
866 offset, len, truesize);
867 }
868 }
869
870 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
871 return head_skb;
872
873err_xdp:
874 rcu_read_unlock();
875err_skb:
876 put_page(page);
877 while (num_buf-- > 1) {
878 buf = virtqueue_get_buf(rq->vq, &len);
879 if (unlikely(!buf)) {
880 pr_debug("%s: rx error: %d buffers missing\n",
881 dev->name, num_buf);
882 dev->stats.rx_length_errors++;
883 break;
884 }
885 page = virt_to_head_page(buf);
886 put_page(page);
887 }
888err_buf:
889 dev->stats.rx_dropped++;
890 dev_kfree_skb(head_skb);
891xdp_xmit:
892 return NULL;
893}
894
895static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
896 void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
897{
898 struct net_device *dev = vi->dev;
899 struct sk_buff *skb;
900 struct virtio_net_hdr_mrg_rxbuf *hdr;
901 int ret;
902
903 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
904 pr_debug("%s: short packet %i\n", dev->name, len);
905 dev->stats.rx_length_errors++;
906 if (vi->mergeable_rx_bufs) {
907 put_page(virt_to_head_page(buf));
908 } else if (vi->big_packets) {
909 give_pages(rq, buf);
910 } else {
911 put_page(virt_to_head_page(buf));
912 }
913 return 0;
914 }
915
916 if (vi->mergeable_rx_bufs)
917 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit);
918 else if (vi->big_packets)
919 skb = receive_big(dev, vi, rq, buf, len);
920 else
921 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit);
922
923 if (unlikely(!skb))
924 return 0;
925
926 hdr = skb_vnet_hdr(skb);
927
928 ret = skb->len;
929
930 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
931 skb->ip_summed = CHECKSUM_UNNECESSARY;
932
933 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
934 virtio_is_little_endian(vi->vdev))) {
935 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
936 dev->name, hdr->hdr.gso_type,
937 hdr->hdr.gso_size);
938 goto frame_err;
939 }
940
941 skb->protocol = eth_type_trans(skb, dev);
942 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
943 ntohs(skb->protocol), skb->len, skb->pkt_type);
944
945 napi_gro_receive(&rq->napi, skb);
946 return ret;
947
948frame_err:
949 dev->stats.rx_frame_errors++;
950 dev_kfree_skb(skb);
951 return 0;
952}
953
954/* Unlike mergeable buffers, all buffers are allocated to the
955 * same size, except for the headroom. For this reason we do
956 * not need to use mergeable_len_to_ctx here - it is enough
957 * to store the headroom as the context ignoring the truesize.
958 */
959static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
960 gfp_t gfp)
961{
962 struct page_frag *alloc_frag = &rq->alloc_frag;
963 char *buf;
964 unsigned int xdp_headroom = virtnet_get_headroom(vi);
965 void *ctx = (void *)(unsigned long)xdp_headroom;
966 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
967 int err;
968
969 len = SKB_DATA_ALIGN(len) +
970 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
971 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
972 return -ENOMEM;
973
974 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
975 get_page(alloc_frag->page);
976 alloc_frag->offset += len;
977 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
978 vi->hdr_len + GOOD_PACKET_LEN);
979 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
980 if (err < 0)
981 put_page(virt_to_head_page(buf));
982 return err;
983}
984
985static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
986 gfp_t gfp)
987{
988 struct page *first, *list = NULL;
989 char *p;
990 int i, err, offset;
991
992 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
993
994 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
995 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
996 first = get_a_page(rq, gfp);
997 if (!first) {
998 if (list)
999 give_pages(rq, list);
1000 return -ENOMEM;
1001 }
1002 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1003
1004 /* chain new page in list head to match sg */
1005 first->private = (unsigned long)list;
1006 list = first;
1007 }
1008
1009 first = get_a_page(rq, gfp);
1010 if (!first) {
1011 give_pages(rq, list);
1012 return -ENOMEM;
1013 }
1014 p = page_address(first);
1015
1016 /* rq->sg[0], rq->sg[1] share the same page */
1017 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1018 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1019
1020 /* rq->sg[1] for data packet, from offset */
1021 offset = sizeof(struct padded_vnet_hdr);
1022 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1023
1024 /* chain first in list head */
1025 first->private = (unsigned long)list;
1026 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1027 first, gfp);
1028 if (err < 0)
1029 give_pages(rq, first);
1030
1031 return err;
1032}
1033
1034static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1035 struct ewma_pkt_len *avg_pkt_len,
1036 unsigned int room)
1037{
1038 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1039 unsigned int len;
1040
1041 if (room)
1042 return PAGE_SIZE - room;
1043
1044 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1045 rq->min_buf_len, PAGE_SIZE - hdr_len);
1046
1047 return ALIGN(len, L1_CACHE_BYTES);
1048}
1049
1050static int add_recvbuf_mergeable(struct virtnet_info *vi,
1051 struct receive_queue *rq, gfp_t gfp)
1052{
1053 struct page_frag *alloc_frag = &rq->alloc_frag;
1054 unsigned int headroom = virtnet_get_headroom(vi);
1055 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1056 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1057 char *buf;
1058 void *ctx;
1059 int err;
1060 unsigned int len, hole;
1061
1062 /* Extra tailroom is needed to satisfy XDP's assumption. This
1063 * means rx frags coalescing won't work, but consider we've
1064 * disabled GSO for XDP, it won't be a big issue.
1065 */
1066 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1067 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1068 return -ENOMEM;
1069
1070 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1071 buf += headroom; /* advance address leaving hole at front of pkt */
1072 get_page(alloc_frag->page);
1073 alloc_frag->offset += len + room;
1074 hole = alloc_frag->size - alloc_frag->offset;
1075 if (hole < len + room) {
1076 /* To avoid internal fragmentation, if there is very likely not
1077 * enough space for another buffer, add the remaining space to
1078 * the current buffer.
1079 */
1080 len += hole;
1081 alloc_frag->offset += hole;
1082 }
1083
1084 sg_init_one(rq->sg, buf, len);
1085 ctx = mergeable_len_to_ctx(len, headroom);
1086 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1087 if (err < 0)
1088 put_page(virt_to_head_page(buf));
1089
1090 return err;
1091}
1092
1093/*
1094 * Returns false if we couldn't fill entirely (OOM).
1095 *
1096 * Normally run in the receive path, but can also be run from ndo_open
1097 * before we're receiving packets, or from refill_work which is
1098 * careful to disable receiving (using napi_disable).
1099 */
1100static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1101 gfp_t gfp)
1102{
1103 int err;
1104 bool oom;
1105
1106 do {
1107 if (vi->mergeable_rx_bufs)
1108 err = add_recvbuf_mergeable(vi, rq, gfp);
1109 else if (vi->big_packets)
1110 err = add_recvbuf_big(vi, rq, gfp);
1111 else
1112 err = add_recvbuf_small(vi, rq, gfp);
1113
1114 oom = err == -ENOMEM;
1115 if (err)
1116 break;
1117 } while (rq->vq->num_free);
1118 virtqueue_kick(rq->vq);
1119 return !oom;
1120}
1121
1122static void skb_recv_done(struct virtqueue *rvq)
1123{
1124 struct virtnet_info *vi = rvq->vdev->priv;
1125 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1126
1127 virtqueue_napi_schedule(&rq->napi, rvq);
1128}
1129
1130static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1131{
1132 napi_enable(napi);
1133
1134 /* If all buffers were filled by other side before we napi_enabled, we
1135 * won't get another interrupt, so process any outstanding packets now.
1136 * Call local_bh_enable after to trigger softIRQ processing.
1137 */
1138 local_bh_disable();
1139 virtqueue_napi_schedule(napi, vq);
1140 local_bh_enable();
1141}
1142
1143static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1144 struct virtqueue *vq,
1145 struct napi_struct *napi)
1146{
1147 if (!napi->weight)
1148 return;
1149
1150 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1151 * enable the feature if this is likely affine with the transmit path.
1152 */
1153 if (!vi->affinity_hint_set) {
1154 napi->weight = 0;
1155 return;
1156 }
1157
1158 return virtnet_napi_enable(vq, napi);
1159}
1160
1161static void virtnet_napi_tx_disable(struct napi_struct *napi)
1162{
1163 if (napi->weight)
1164 napi_disable(napi);
1165}
1166
1167static void refill_work(struct work_struct *work)
1168{
1169 struct virtnet_info *vi =
1170 container_of(work, struct virtnet_info, refill.work);
1171 bool still_empty;
1172 int i;
1173
1174 for (i = 0; i < vi->curr_queue_pairs; i++) {
1175 struct receive_queue *rq = &vi->rq[i];
1176
1177 napi_disable(&rq->napi);
1178 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1179 virtnet_napi_enable(rq->vq, &rq->napi);
1180
1181 /* In theory, this can happen: if we don't get any buffers in
1182 * we will *never* try to fill again.
1183 */
1184 if (still_empty)
1185 schedule_delayed_work(&vi->refill, HZ/2);
1186 }
1187}
1188
1189static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
1190{
1191 struct virtnet_info *vi = rq->vq->vdev->priv;
1192 unsigned int len, received = 0, bytes = 0;
1193 void *buf;
1194
1195 if (!vi->big_packets || vi->mergeable_rx_bufs) {
1196 void *ctx;
1197
1198 while (received < budget &&
1199 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1200 bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit);
1201 received++;
1202 }
1203 } else {
1204 while (received < budget &&
1205 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1206 bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit);
1207 received++;
1208 }
1209 }
1210
1211 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
1212 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1213 schedule_delayed_work(&vi->refill, 0);
1214 }
1215
1216 u64_stats_update_begin(&rq->stats.syncp);
1217 rq->stats.bytes += bytes;
1218 rq->stats.packets += received;
1219 u64_stats_update_end(&rq->stats.syncp);
1220
1221 return received;
1222}
1223
1224static void free_old_xmit_skbs(struct send_queue *sq)
1225{
1226 struct sk_buff *skb;
1227 unsigned int len;
1228 unsigned int packets = 0;
1229 unsigned int bytes = 0;
1230
1231 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1232 pr_debug("Sent skb %p\n", skb);
1233
1234 bytes += skb->len;
1235 packets++;
1236
1237 dev_consume_skb_any(skb);
1238 }
1239
1240 /* Avoid overhead when no packets have been processed
1241 * happens when called speculatively from start_xmit.
1242 */
1243 if (!packets)
1244 return;
1245
1246 u64_stats_update_begin(&sq->stats.syncp);
1247 sq->stats.bytes += bytes;
1248 sq->stats.packets += packets;
1249 u64_stats_update_end(&sq->stats.syncp);
1250}
1251
1252static void virtnet_poll_cleantx(struct receive_queue *rq)
1253{
1254 struct virtnet_info *vi = rq->vq->vdev->priv;
1255 unsigned int index = vq2rxq(rq->vq);
1256 struct send_queue *sq = &vi->sq[index];
1257 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1258
1259 if (!sq->napi.weight)
1260 return;
1261
1262 if (__netif_tx_trylock(txq)) {
1263 free_old_xmit_skbs(sq);
1264 __netif_tx_unlock(txq);
1265 }
1266
1267 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1268 netif_tx_wake_queue(txq);
1269}
1270
1271static int virtnet_poll(struct napi_struct *napi, int budget)
1272{
1273 struct receive_queue *rq =
1274 container_of(napi, struct receive_queue, napi);
1275 struct virtnet_info *vi = rq->vq->vdev->priv;
1276 struct send_queue *sq;
1277 unsigned int received, qp;
1278 bool xdp_xmit = false;
1279
1280 virtnet_poll_cleantx(rq);
1281
1282 received = virtnet_receive(rq, budget, &xdp_xmit);
1283
1284 /* Out of packets? */
1285 if (received < budget)
1286 virtqueue_napi_complete(napi, rq->vq, received);
1287
1288 if (xdp_xmit) {
1289 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1290 smp_processor_id();
1291 sq = &vi->sq[qp];
1292 virtqueue_kick(sq->vq);
1293 xdp_do_flush_map();
1294 }
1295
1296 return received;
1297}
1298
1299static int virtnet_open(struct net_device *dev)
1300{
1301 struct virtnet_info *vi = netdev_priv(dev);
1302 int i, err;
1303
1304 for (i = 0; i < vi->max_queue_pairs; i++) {
1305 if (i < vi->curr_queue_pairs)
1306 /* Make sure we have some buffers: if oom use wq. */
1307 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1308 schedule_delayed_work(&vi->refill, 0);
1309
1310 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
1311 if (err < 0)
1312 return err;
1313
1314 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1315 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1316 }
1317
1318 return 0;
1319}
1320
1321static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1322{
1323 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1324 struct virtnet_info *vi = sq->vq->vdev->priv;
1325 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
1326
1327 __netif_tx_lock(txq, raw_smp_processor_id());
1328 free_old_xmit_skbs(sq);
1329 __netif_tx_unlock(txq);
1330
1331 virtqueue_napi_complete(napi, sq->vq, 0);
1332
1333 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1334 netif_tx_wake_queue(txq);
1335
1336 return 0;
1337}
1338
1339static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1340{
1341 struct virtio_net_hdr_mrg_rxbuf *hdr;
1342 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1343 struct virtnet_info *vi = sq->vq->vdev->priv;
1344 int num_sg;
1345 unsigned hdr_len = vi->hdr_len;
1346 bool can_push;
1347
1348 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1349
1350 can_push = vi->any_header_sg &&
1351 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1352 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1353 /* Even if we can, don't push here yet as this would skew
1354 * csum_start offset below. */
1355 if (can_push)
1356 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1357 else
1358 hdr = skb_vnet_hdr(skb);
1359
1360 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1361 virtio_is_little_endian(vi->vdev), false))
1362 BUG();
1363
1364 if (vi->mergeable_rx_bufs)
1365 hdr->num_buffers = 0;
1366
1367 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1368 if (can_push) {
1369 __skb_push(skb, hdr_len);
1370 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1371 if (unlikely(num_sg < 0))
1372 return num_sg;
1373 /* Pull header back to avoid skew in tx bytes calculations. */
1374 __skb_pull(skb, hdr_len);
1375 } else {
1376 sg_set_buf(sq->sg, hdr, hdr_len);
1377 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1378 if (unlikely(num_sg < 0))
1379 return num_sg;
1380 num_sg++;
1381 }
1382 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1383}
1384
1385static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1386{
1387 struct virtnet_info *vi = netdev_priv(dev);
1388 int qnum = skb_get_queue_mapping(skb);
1389 struct send_queue *sq = &vi->sq[qnum];
1390 int err;
1391 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1392 bool kick = !skb->xmit_more;
1393 bool use_napi = sq->napi.weight;
1394
1395 /* Free up any pending old buffers before queueing new ones. */
1396 free_old_xmit_skbs(sq);
1397
1398 if (use_napi && kick)
1399 virtqueue_enable_cb_delayed(sq->vq);
1400
1401 /* timestamp packet in software */
1402 skb_tx_timestamp(skb);
1403
1404 /* Try to transmit */
1405 err = xmit_skb(sq, skb);
1406
1407 /* This should not happen! */
1408 if (unlikely(err)) {
1409 dev->stats.tx_fifo_errors++;
1410 if (net_ratelimit())
1411 dev_warn(&dev->dev,
1412 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1413 dev->stats.tx_dropped++;
1414 dev_kfree_skb_any(skb);
1415 return NETDEV_TX_OK;
1416 }
1417
1418 /* Don't wait up for transmitted skbs to be freed. */
1419 if (!use_napi) {
1420 skb_orphan(skb);
1421 nf_reset(skb);
1422 }
1423
1424 /* If running out of space, stop queue to avoid getting packets that we
1425 * are then unable to transmit.
1426 * An alternative would be to force queuing layer to requeue the skb by
1427 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1428 * returned in a normal path of operation: it means that driver is not
1429 * maintaining the TX queue stop/start state properly, and causes
1430 * the stack to do a non-trivial amount of useless work.
1431 * Since most packets only take 1 or 2 ring slots, stopping the queue
1432 * early means 16 slots are typically wasted.
1433 */
1434 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1435 netif_stop_subqueue(dev, qnum);
1436 if (!use_napi &&
1437 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1438 /* More just got used, free them then recheck. */
1439 free_old_xmit_skbs(sq);
1440 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1441 netif_start_subqueue(dev, qnum);
1442 virtqueue_disable_cb(sq->vq);
1443 }
1444 }
1445 }
1446
1447 if (kick || netif_xmit_stopped(txq))
1448 virtqueue_kick(sq->vq);
1449
1450 return NETDEV_TX_OK;
1451}
1452
1453/*
1454 * Send command via the control virtqueue and check status. Commands
1455 * supported by the hypervisor, as indicated by feature bits, should
1456 * never fail unless improperly formatted.
1457 */
1458static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1459 struct scatterlist *out)
1460{
1461 struct scatterlist *sgs[4], hdr, stat;
1462 unsigned out_num = 0, tmp;
1463
1464 /* Caller should know better */
1465 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1466
1467 vi->ctrl->status = ~0;
1468 vi->ctrl->hdr.class = class;
1469 vi->ctrl->hdr.cmd = cmd;
1470 /* Add header */
1471 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1472 sgs[out_num++] = &hdr;
1473
1474 if (out)
1475 sgs[out_num++] = out;
1476
1477 /* Add return status. */
1478 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1479 sgs[out_num] = &stat;
1480
1481 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1482 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1483
1484 if (unlikely(!virtqueue_kick(vi->cvq)))
1485 return vi->ctrl->status == VIRTIO_NET_OK;
1486
1487 /* Spin for a response, the kick causes an ioport write, trapping
1488 * into the hypervisor, so the request should be handled immediately.
1489 */
1490 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1491 !virtqueue_is_broken(vi->cvq))
1492 cpu_relax();
1493
1494 return vi->ctrl->status == VIRTIO_NET_OK;
1495}
1496
1497static int virtnet_set_mac_address(struct net_device *dev, void *p)
1498{
1499 struct virtnet_info *vi = netdev_priv(dev);
1500 struct virtio_device *vdev = vi->vdev;
1501 int ret;
1502 struct sockaddr *addr;
1503 struct scatterlist sg;
1504
1505 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1506 if (!addr)
1507 return -ENOMEM;
1508
1509 ret = eth_prepare_mac_addr_change(dev, addr);
1510 if (ret)
1511 goto out;
1512
1513 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1514 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1515 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1516 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1517 dev_warn(&vdev->dev,
1518 "Failed to set mac address by vq command.\n");
1519 ret = -EINVAL;
1520 goto out;
1521 }
1522 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1523 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1524 unsigned int i;
1525
1526 /* Naturally, this has an atomicity problem. */
1527 for (i = 0; i < dev->addr_len; i++)
1528 virtio_cwrite8(vdev,
1529 offsetof(struct virtio_net_config, mac) +
1530 i, addr->sa_data[i]);
1531 }
1532
1533 eth_commit_mac_addr_change(dev, p);
1534 ret = 0;
1535
1536out:
1537 kfree(addr);
1538 return ret;
1539}
1540
1541static void virtnet_stats(struct net_device *dev,
1542 struct rtnl_link_stats64 *tot)
1543{
1544 struct virtnet_info *vi = netdev_priv(dev);
1545 unsigned int start;
1546 int i;
1547
1548 for (i = 0; i < vi->max_queue_pairs; i++) {
1549 u64 tpackets, tbytes, rpackets, rbytes;
1550 struct receive_queue *rq = &vi->rq[i];
1551 struct send_queue *sq = &vi->sq[i];
1552
1553 do {
1554 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1555 tpackets = sq->stats.packets;
1556 tbytes = sq->stats.bytes;
1557 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1558
1559 do {
1560 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1561 rpackets = rq->stats.packets;
1562 rbytes = rq->stats.bytes;
1563 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1564
1565 tot->rx_packets += rpackets;
1566 tot->tx_packets += tpackets;
1567 tot->rx_bytes += rbytes;
1568 tot->tx_bytes += tbytes;
1569 }
1570
1571 tot->tx_dropped = dev->stats.tx_dropped;
1572 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1573 tot->rx_dropped = dev->stats.rx_dropped;
1574 tot->rx_length_errors = dev->stats.rx_length_errors;
1575 tot->rx_frame_errors = dev->stats.rx_frame_errors;
1576}
1577
1578#ifdef CONFIG_NET_POLL_CONTROLLER
1579static void virtnet_netpoll(struct net_device *dev)
1580{
1581 struct virtnet_info *vi = netdev_priv(dev);
1582 int i;
1583
1584 for (i = 0; i < vi->curr_queue_pairs; i++)
1585 napi_schedule(&vi->rq[i].napi);
1586}
1587#endif
1588
1589static void virtnet_ack_link_announce(struct virtnet_info *vi)
1590{
1591 rtnl_lock();
1592 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1593 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1594 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1595 rtnl_unlock();
1596}
1597
1598static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1599{
1600 struct scatterlist sg;
1601 struct net_device *dev = vi->dev;
1602
1603 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1604 return 0;
1605
1606 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1607 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1608
1609 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1610 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1611 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1612 queue_pairs);
1613 return -EINVAL;
1614 } else {
1615 vi->curr_queue_pairs = queue_pairs;
1616 /* virtnet_open() will refill when device is going to up. */
1617 if (dev->flags & IFF_UP)
1618 schedule_delayed_work(&vi->refill, 0);
1619 }
1620
1621 return 0;
1622}
1623
1624static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1625{
1626 int err;
1627
1628 rtnl_lock();
1629 err = _virtnet_set_queues(vi, queue_pairs);
1630 rtnl_unlock();
1631 return err;
1632}
1633
1634static int virtnet_close(struct net_device *dev)
1635{
1636 struct virtnet_info *vi = netdev_priv(dev);
1637 int i;
1638
1639 /* Make sure refill_work doesn't re-enable napi! */
1640 cancel_delayed_work_sync(&vi->refill);
1641
1642 for (i = 0; i < vi->max_queue_pairs; i++) {
1643 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1644 napi_disable(&vi->rq[i].napi);
1645 virtnet_napi_tx_disable(&vi->sq[i].napi);
1646 }
1647
1648 return 0;
1649}
1650
1651static void virtnet_set_rx_mode(struct net_device *dev)
1652{
1653 struct virtnet_info *vi = netdev_priv(dev);
1654 struct scatterlist sg[2];
1655 struct virtio_net_ctrl_mac *mac_data;
1656 struct netdev_hw_addr *ha;
1657 int uc_count;
1658 int mc_count;
1659 void *buf;
1660 int i;
1661
1662 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1663 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1664 return;
1665
1666 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1667 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1668
1669 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1670
1671 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1672 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1673 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1674 vi->ctrl->promisc ? "en" : "dis");
1675
1676 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1677
1678 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1679 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1680 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1681 vi->ctrl->allmulti ? "en" : "dis");
1682
1683 uc_count = netdev_uc_count(dev);
1684 mc_count = netdev_mc_count(dev);
1685 /* MAC filter - use one buffer for both lists */
1686 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1687 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1688 mac_data = buf;
1689 if (!buf)
1690 return;
1691
1692 sg_init_table(sg, 2);
1693
1694 /* Store the unicast list and count in the front of the buffer */
1695 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1696 i = 0;
1697 netdev_for_each_uc_addr(ha, dev)
1698 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1699
1700 sg_set_buf(&sg[0], mac_data,
1701 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1702
1703 /* multicast list and count fill the end */
1704 mac_data = (void *)&mac_data->macs[uc_count][0];
1705
1706 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1707 i = 0;
1708 netdev_for_each_mc_addr(ha, dev)
1709 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1710
1711 sg_set_buf(&sg[1], mac_data,
1712 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1713
1714 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1715 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1716 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1717
1718 kfree(buf);
1719}
1720
1721static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1722 __be16 proto, u16 vid)
1723{
1724 struct virtnet_info *vi = netdev_priv(dev);
1725 struct scatterlist sg;
1726
1727 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1728 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1729
1730 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1731 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1732 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1733 return 0;
1734}
1735
1736static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1737 __be16 proto, u16 vid)
1738{
1739 struct virtnet_info *vi = netdev_priv(dev);
1740 struct scatterlist sg;
1741
1742 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1743 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1744
1745 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1746 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1747 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1748 return 0;
1749}
1750
1751static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1752{
1753 int i;
1754
1755 if (vi->affinity_hint_set) {
1756 for (i = 0; i < vi->max_queue_pairs; i++) {
1757 virtqueue_set_affinity(vi->rq[i].vq, -1);
1758 virtqueue_set_affinity(vi->sq[i].vq, -1);
1759 }
1760
1761 vi->affinity_hint_set = false;
1762 }
1763}
1764
1765static void virtnet_set_affinity(struct virtnet_info *vi)
1766{
1767 int i;
1768 int cpu;
1769
1770 /* In multiqueue mode, when the number of cpu is equal to the number of
1771 * queue pairs, we let the queue pairs to be private to one cpu by
1772 * setting the affinity hint to eliminate the contention.
1773 */
1774 if (vi->curr_queue_pairs == 1 ||
1775 vi->max_queue_pairs != num_online_cpus()) {
1776 virtnet_clean_affinity(vi, -1);
1777 return;
1778 }
1779
1780 i = 0;
1781 for_each_online_cpu(cpu) {
1782 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1783 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1784 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1785 i++;
1786 }
1787
1788 vi->affinity_hint_set = true;
1789}
1790
1791static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1792{
1793 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1794 node);
1795 virtnet_set_affinity(vi);
1796 return 0;
1797}
1798
1799static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1800{
1801 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1802 node_dead);
1803 virtnet_set_affinity(vi);
1804 return 0;
1805}
1806
1807static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1808{
1809 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1810 node);
1811
1812 virtnet_clean_affinity(vi, cpu);
1813 return 0;
1814}
1815
1816static enum cpuhp_state virtionet_online;
1817
1818static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1819{
1820 int ret;
1821
1822 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1823 if (ret)
1824 return ret;
1825 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1826 &vi->node_dead);
1827 if (!ret)
1828 return ret;
1829 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1830 return ret;
1831}
1832
1833static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1834{
1835 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1836 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1837 &vi->node_dead);
1838}
1839
1840static void virtnet_get_ringparam(struct net_device *dev,
1841 struct ethtool_ringparam *ring)
1842{
1843 struct virtnet_info *vi = netdev_priv(dev);
1844
1845 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1846 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1847 ring->rx_pending = ring->rx_max_pending;
1848 ring->tx_pending = ring->tx_max_pending;
1849}
1850
1851
1852static void virtnet_get_drvinfo(struct net_device *dev,
1853 struct ethtool_drvinfo *info)
1854{
1855 struct virtnet_info *vi = netdev_priv(dev);
1856 struct virtio_device *vdev = vi->vdev;
1857
1858 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1859 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1860 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1861
1862}
1863
1864/* TODO: Eliminate OOO packets during switching */
1865static int virtnet_set_channels(struct net_device *dev,
1866 struct ethtool_channels *channels)
1867{
1868 struct virtnet_info *vi = netdev_priv(dev);
1869 u16 queue_pairs = channels->combined_count;
1870 int err;
1871
1872 /* We don't support separate rx/tx channels.
1873 * We don't allow setting 'other' channels.
1874 */
1875 if (channels->rx_count || channels->tx_count || channels->other_count)
1876 return -EINVAL;
1877
1878 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1879 return -EINVAL;
1880
1881 /* For now we don't support modifying channels while XDP is loaded
1882 * also when XDP is loaded all RX queues have XDP programs so we only
1883 * need to check a single RX queue.
1884 */
1885 if (vi->rq[0].xdp_prog)
1886 return -EINVAL;
1887
1888 get_online_cpus();
1889 err = _virtnet_set_queues(vi, queue_pairs);
1890 if (!err) {
1891 netif_set_real_num_tx_queues(dev, queue_pairs);
1892 netif_set_real_num_rx_queues(dev, queue_pairs);
1893
1894 virtnet_set_affinity(vi);
1895 }
1896 put_online_cpus();
1897
1898 return err;
1899}
1900
1901static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1902{
1903 struct virtnet_info *vi = netdev_priv(dev);
1904 char *p = (char *)data;
1905 unsigned int i, j;
1906
1907 switch (stringset) {
1908 case ETH_SS_STATS:
1909 for (i = 0; i < vi->curr_queue_pairs; i++) {
1910 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
1911 snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
1912 i, virtnet_rq_stats_desc[j].desc);
1913 p += ETH_GSTRING_LEN;
1914 }
1915 }
1916
1917 for (i = 0; i < vi->curr_queue_pairs; i++) {
1918 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
1919 snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
1920 i, virtnet_sq_stats_desc[j].desc);
1921 p += ETH_GSTRING_LEN;
1922 }
1923 }
1924 break;
1925 }
1926}
1927
1928static int virtnet_get_sset_count(struct net_device *dev, int sset)
1929{
1930 struct virtnet_info *vi = netdev_priv(dev);
1931
1932 switch (sset) {
1933 case ETH_SS_STATS:
1934 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
1935 VIRTNET_SQ_STATS_LEN);
1936 default:
1937 return -EOPNOTSUPP;
1938 }
1939}
1940
1941static void virtnet_get_ethtool_stats(struct net_device *dev,
1942 struct ethtool_stats *stats, u64 *data)
1943{
1944 struct virtnet_info *vi = netdev_priv(dev);
1945 unsigned int idx = 0, start, i, j;
1946 const u8 *stats_base;
1947 size_t offset;
1948
1949 for (i = 0; i < vi->curr_queue_pairs; i++) {
1950 struct receive_queue *rq = &vi->rq[i];
1951
1952 stats_base = (u8 *)&rq->stats;
1953 do {
1954 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1955 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
1956 offset = virtnet_rq_stats_desc[j].offset;
1957 data[idx + j] = *(u64 *)(stats_base + offset);
1958 }
1959 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1960 idx += VIRTNET_RQ_STATS_LEN;
1961 }
1962
1963 for (i = 0; i < vi->curr_queue_pairs; i++) {
1964 struct send_queue *sq = &vi->sq[i];
1965
1966 stats_base = (u8 *)&sq->stats;
1967 do {
1968 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1969 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
1970 offset = virtnet_sq_stats_desc[j].offset;
1971 data[idx + j] = *(u64 *)(stats_base + offset);
1972 }
1973 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1974 idx += VIRTNET_SQ_STATS_LEN;
1975 }
1976}
1977
1978static void virtnet_get_channels(struct net_device *dev,
1979 struct ethtool_channels *channels)
1980{
1981 struct virtnet_info *vi = netdev_priv(dev);
1982
1983 channels->combined_count = vi->curr_queue_pairs;
1984 channels->max_combined = vi->max_queue_pairs;
1985 channels->max_other = 0;
1986 channels->rx_count = 0;
1987 channels->tx_count = 0;
1988 channels->other_count = 0;
1989}
1990
1991/* Check if the user is trying to change anything besides speed/duplex */
1992static bool
1993virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
1994{
1995 struct ethtool_link_ksettings diff1 = *cmd;
1996 struct ethtool_link_ksettings diff2 = {};
1997
1998 /* cmd is always set so we need to clear it, validate the port type
1999 * and also without autonegotiation we can ignore advertising
2000 */
2001 diff1.base.speed = 0;
2002 diff2.base.port = PORT_OTHER;
2003 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
2004 diff1.base.duplex = 0;
2005 diff1.base.cmd = 0;
2006 diff1.base.link_mode_masks_nwords = 0;
2007
2008 return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
2009 bitmap_empty(diff1.link_modes.supported,
2010 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
2011 bitmap_empty(diff1.link_modes.advertising,
2012 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
2013 bitmap_empty(diff1.link_modes.lp_advertising,
2014 __ETHTOOL_LINK_MODE_MASK_NBITS);
2015}
2016
2017static int virtnet_set_link_ksettings(struct net_device *dev,
2018 const struct ethtool_link_ksettings *cmd)
2019{
2020 struct virtnet_info *vi = netdev_priv(dev);
2021 u32 speed;
2022
2023 speed = cmd->base.speed;
2024 /* don't allow custom speed and duplex */
2025 if (!ethtool_validate_speed(speed) ||
2026 !ethtool_validate_duplex(cmd->base.duplex) ||
2027 !virtnet_validate_ethtool_cmd(cmd))
2028 return -EINVAL;
2029 vi->speed = speed;
2030 vi->duplex = cmd->base.duplex;
2031
2032 return 0;
2033}
2034
2035static int virtnet_get_link_ksettings(struct net_device *dev,
2036 struct ethtool_link_ksettings *cmd)
2037{
2038 struct virtnet_info *vi = netdev_priv(dev);
2039
2040 cmd->base.speed = vi->speed;
2041 cmd->base.duplex = vi->duplex;
2042 cmd->base.port = PORT_OTHER;
2043
2044 return 0;
2045}
2046
2047static void virtnet_init_settings(struct net_device *dev)
2048{
2049 struct virtnet_info *vi = netdev_priv(dev);
2050
2051 vi->speed = SPEED_UNKNOWN;
2052 vi->duplex = DUPLEX_UNKNOWN;
2053}
2054
2055static void virtnet_update_settings(struct virtnet_info *vi)
2056{
2057 u32 speed;
2058 u8 duplex;
2059
2060 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2061 return;
2062
2063 speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
2064 speed));
2065 if (ethtool_validate_speed(speed))
2066 vi->speed = speed;
2067 duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
2068 duplex));
2069 if (ethtool_validate_duplex(duplex))
2070 vi->duplex = duplex;
2071}
2072
2073static const struct ethtool_ops virtnet_ethtool_ops = {
2074 .get_drvinfo = virtnet_get_drvinfo,
2075 .get_link = ethtool_op_get_link,
2076 .get_ringparam = virtnet_get_ringparam,
2077 .get_strings = virtnet_get_strings,
2078 .get_sset_count = virtnet_get_sset_count,
2079 .get_ethtool_stats = virtnet_get_ethtool_stats,
2080 .set_channels = virtnet_set_channels,
2081 .get_channels = virtnet_get_channels,
2082 .get_ts_info = ethtool_op_get_ts_info,
2083 .get_link_ksettings = virtnet_get_link_ksettings,
2084 .set_link_ksettings = virtnet_set_link_ksettings,
2085};
2086
2087static void virtnet_freeze_down(struct virtio_device *vdev)
2088{
2089 struct virtnet_info *vi = vdev->priv;
2090 int i;
2091
2092 /* Make sure no work handler is accessing the device */
2093 flush_work(&vi->config_work);
2094
2095 netif_device_detach(vi->dev);
2096 netif_tx_disable(vi->dev);
2097 cancel_delayed_work_sync(&vi->refill);
2098
2099 if (netif_running(vi->dev)) {
2100 for (i = 0; i < vi->max_queue_pairs; i++) {
2101 napi_disable(&vi->rq[i].napi);
2102 virtnet_napi_tx_disable(&vi->sq[i].napi);
2103 }
2104 }
2105}
2106
2107static int init_vqs(struct virtnet_info *vi);
2108
2109static int virtnet_restore_up(struct virtio_device *vdev)
2110{
2111 struct virtnet_info *vi = vdev->priv;
2112 int err, i;
2113
2114 err = init_vqs(vi);
2115 if (err)
2116 return err;
2117
2118 virtio_device_ready(vdev);
2119
2120 if (netif_running(vi->dev)) {
2121 for (i = 0; i < vi->curr_queue_pairs; i++)
2122 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2123 schedule_delayed_work(&vi->refill, 0);
2124
2125 for (i = 0; i < vi->max_queue_pairs; i++) {
2126 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2127 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2128 &vi->sq[i].napi);
2129 }
2130 }
2131
2132 netif_device_attach(vi->dev);
2133 return err;
2134}
2135
2136static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2137{
2138 struct scatterlist sg;
2139 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2140
2141 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2142
2143 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2144 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2145 dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
2146 return -EINVAL;
2147 }
2148
2149 return 0;
2150}
2151
2152static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2153{
2154 u64 offloads = 0;
2155
2156 if (!vi->guest_offloads)
2157 return 0;
2158
2159 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2160 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2161
2162 return virtnet_set_guest_offloads(vi, offloads);
2163}
2164
2165static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2166{
2167 u64 offloads = vi->guest_offloads;
2168
2169 if (!vi->guest_offloads)
2170 return 0;
2171 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2172 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2173
2174 return virtnet_set_guest_offloads(vi, offloads);
2175}
2176
2177static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2178 struct netlink_ext_ack *extack)
2179{
2180 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2181 struct virtnet_info *vi = netdev_priv(dev);
2182 struct bpf_prog *old_prog;
2183 u16 xdp_qp = 0, curr_qp;
2184 int i, err;
2185
2186 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2187 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2188 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2189 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2190 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
2191 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
2192 return -EOPNOTSUPP;
2193 }
2194
2195 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2196 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
2197 return -EINVAL;
2198 }
2199
2200 if (dev->mtu > max_sz) {
2201 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
2202 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2203 return -EINVAL;
2204 }
2205
2206 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2207 if (prog)
2208 xdp_qp = nr_cpu_ids;
2209
2210 /* XDP requires extra queues for XDP_TX */
2211 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2212 NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
2213 netdev_warn(dev, "request %i queues but max is %i\n",
2214 curr_qp + xdp_qp, vi->max_queue_pairs);
2215 return -ENOMEM;
2216 }
2217
2218 if (prog) {
2219 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
2220 if (IS_ERR(prog))
2221 return PTR_ERR(prog);
2222 }
2223
2224 /* Make sure NAPI is not using any XDP TX queues for RX. */
2225 if (netif_running(dev))
2226 for (i = 0; i < vi->max_queue_pairs; i++)
2227 napi_disable(&vi->rq[i].napi);
2228
2229 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2230 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2231 if (err)
2232 goto err;
2233 vi->xdp_queue_pairs = xdp_qp;
2234
2235 for (i = 0; i < vi->max_queue_pairs; i++) {
2236 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2237 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2238 if (i == 0) {
2239 if (!old_prog)
2240 virtnet_clear_guest_offloads(vi);
2241 if (!prog)
2242 virtnet_restore_guest_offloads(vi);
2243 }
2244 if (old_prog)
2245 bpf_prog_put(old_prog);
2246 if (netif_running(dev))
2247 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2248 }
2249
2250 return 0;
2251
2252err:
2253 for (i = 0; i < vi->max_queue_pairs; i++)
2254 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2255 if (prog)
2256 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2257 return err;
2258}
2259
2260static u32 virtnet_xdp_query(struct net_device *dev)
2261{
2262 struct virtnet_info *vi = netdev_priv(dev);
2263 const struct bpf_prog *xdp_prog;
2264 int i;
2265
2266 for (i = 0; i < vi->max_queue_pairs; i++) {
2267 xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2268 if (xdp_prog)
2269 return xdp_prog->aux->id;
2270 }
2271 return 0;
2272}
2273
2274static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2275{
2276 switch (xdp->command) {
2277 case XDP_SETUP_PROG:
2278 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
2279 case XDP_QUERY_PROG:
2280 xdp->prog_id = virtnet_xdp_query(dev);
2281 xdp->prog_attached = !!xdp->prog_id;
2282 return 0;
2283 default:
2284 return -EINVAL;
2285 }
2286}
2287
2288static const struct net_device_ops virtnet_netdev = {
2289 .ndo_open = virtnet_open,
2290 .ndo_stop = virtnet_close,
2291 .ndo_start_xmit = start_xmit,
2292 .ndo_validate_addr = eth_validate_addr,
2293 .ndo_set_mac_address = virtnet_set_mac_address,
2294 .ndo_set_rx_mode = virtnet_set_rx_mode,
2295 .ndo_get_stats64 = virtnet_stats,
2296 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2297 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2298#ifdef CONFIG_NET_POLL_CONTROLLER
2299 .ndo_poll_controller = virtnet_netpoll,
2300#endif
2301 .ndo_bpf = virtnet_xdp,
2302 .ndo_xdp_xmit = virtnet_xdp_xmit,
2303 .ndo_xdp_flush = virtnet_xdp_flush,
2304 .ndo_features_check = passthru_features_check,
2305};
2306
2307static void virtnet_config_changed_work(struct work_struct *work)
2308{
2309 struct virtnet_info *vi =
2310 container_of(work, struct virtnet_info, config_work);
2311 u16 v;
2312
2313 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2314 struct virtio_net_config, status, &v) < 0)
2315 return;
2316
2317 if (v & VIRTIO_NET_S_ANNOUNCE) {
2318 netdev_notify_peers(vi->dev);
2319 virtnet_ack_link_announce(vi);
2320 }
2321
2322 /* Ignore unknown (future) status bits */
2323 v &= VIRTIO_NET_S_LINK_UP;
2324
2325 if (vi->status == v)
2326 return;
2327
2328 vi->status = v;
2329
2330 if (vi->status & VIRTIO_NET_S_LINK_UP) {
2331 virtnet_update_settings(vi);
2332 netif_carrier_on(vi->dev);
2333 netif_tx_wake_all_queues(vi->dev);
2334 } else {
2335 netif_carrier_off(vi->dev);
2336 netif_tx_stop_all_queues(vi->dev);
2337 }
2338}
2339
2340static void virtnet_config_changed(struct virtio_device *vdev)
2341{
2342 struct virtnet_info *vi = vdev->priv;
2343
2344 schedule_work(&vi->config_work);
2345}
2346
2347static void virtnet_free_queues(struct virtnet_info *vi)
2348{
2349 int i;
2350
2351 for (i = 0; i < vi->max_queue_pairs; i++) {
2352 napi_hash_del(&vi->rq[i].napi);
2353 netif_napi_del(&vi->rq[i].napi);
2354 netif_napi_del(&vi->sq[i].napi);
2355 }
2356
2357 /* We called napi_hash_del() before netif_napi_del(),
2358 * we need to respect an RCU grace period before freeing vi->rq
2359 */
2360 synchronize_net();
2361
2362 kfree(vi->rq);
2363 kfree(vi->sq);
2364 kfree(vi->ctrl);
2365}
2366
2367static void _free_receive_bufs(struct virtnet_info *vi)
2368{
2369 struct bpf_prog *old_prog;
2370 int i;
2371
2372 for (i = 0; i < vi->max_queue_pairs; i++) {
2373 while (vi->rq[i].pages)
2374 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
2375
2376 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2377 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2378 if (old_prog)
2379 bpf_prog_put(old_prog);
2380 }
2381}
2382
2383static void free_receive_bufs(struct virtnet_info *vi)
2384{
2385 rtnl_lock();
2386 _free_receive_bufs(vi);
2387 rtnl_unlock();
2388}
2389
2390static void free_receive_page_frags(struct virtnet_info *vi)
2391{
2392 int i;
2393 for (i = 0; i < vi->max_queue_pairs; i++)
2394 if (vi->rq[i].alloc_frag.page)
2395 put_page(vi->rq[i].alloc_frag.page);
2396}
2397
2398static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
2399{
2400 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
2401 return false;
2402 else if (q < vi->curr_queue_pairs)
2403 return true;
2404 else
2405 return false;
2406}
2407
2408static void free_unused_bufs(struct virtnet_info *vi)
2409{
2410 void *buf;
2411 int i;
2412
2413 for (i = 0; i < vi->max_queue_pairs; i++) {
2414 struct virtqueue *vq = vi->sq[i].vq;
2415 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2416 if (!is_xdp_raw_buffer_queue(vi, i))
2417 dev_kfree_skb(buf);
2418 else
2419 put_page(virt_to_head_page(buf));
2420 }
2421 }
2422
2423 for (i = 0; i < vi->max_queue_pairs; i++) {
2424 struct virtqueue *vq = vi->rq[i].vq;
2425
2426 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2427 if (vi->mergeable_rx_bufs) {
2428 put_page(virt_to_head_page(buf));
2429 } else if (vi->big_packets) {
2430 give_pages(&vi->rq[i], buf);
2431 } else {
2432 put_page(virt_to_head_page(buf));
2433 }
2434 }
2435 }
2436}
2437
2438static void virtnet_del_vqs(struct virtnet_info *vi)
2439{
2440 struct virtio_device *vdev = vi->vdev;
2441
2442 virtnet_clean_affinity(vi, -1);
2443
2444 vdev->config->del_vqs(vdev);
2445
2446 virtnet_free_queues(vi);
2447}
2448
2449/* How large should a single buffer be so a queue full of these can fit at
2450 * least one full packet?
2451 * Logic below assumes the mergeable buffer header is used.
2452 */
2453static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2454{
2455 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2456 unsigned int rq_size = virtqueue_get_vring_size(vq);
2457 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2458 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2459 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2460
2461 return max(max(min_buf_len, hdr_len) - hdr_len,
2462 (unsigned int)GOOD_PACKET_LEN);
2463}
2464
2465static int virtnet_find_vqs(struct virtnet_info *vi)
2466{
2467 vq_callback_t **callbacks;
2468 struct virtqueue **vqs;
2469 int ret = -ENOMEM;
2470 int i, total_vqs;
2471 const char **names;
2472 bool *ctx;
2473
2474 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2475 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2476 * possible control vq.
2477 */
2478 total_vqs = vi->max_queue_pairs * 2 +
2479 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2480
2481 /* Allocate space for find_vqs parameters */
2482 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
2483 if (!vqs)
2484 goto err_vq;
2485 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
2486 if (!callbacks)
2487 goto err_callback;
2488 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
2489 if (!names)
2490 goto err_names;
2491 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2492 ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL);
2493 if (!ctx)
2494 goto err_ctx;
2495 } else {
2496 ctx = NULL;
2497 }
2498
2499 /* Parameters for control virtqueue, if any */
2500 if (vi->has_cvq) {
2501 callbacks[total_vqs - 1] = NULL;
2502 names[total_vqs - 1] = "control";
2503 }
2504
2505 /* Allocate/initialize parameters for send/receive virtqueues */
2506 for (i = 0; i < vi->max_queue_pairs; i++) {
2507 callbacks[rxq2vq(i)] = skb_recv_done;
2508 callbacks[txq2vq(i)] = skb_xmit_done;
2509 sprintf(vi->rq[i].name, "input.%d", i);
2510 sprintf(vi->sq[i].name, "output.%d", i);
2511 names[rxq2vq(i)] = vi->rq[i].name;
2512 names[txq2vq(i)] = vi->sq[i].name;
2513 if (ctx)
2514 ctx[rxq2vq(i)] = true;
2515 }
2516
2517 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2518 names, ctx, NULL);
2519 if (ret)
2520 goto err_find;
2521
2522 if (vi->has_cvq) {
2523 vi->cvq = vqs[total_vqs - 1];
2524 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2525 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2526 }
2527
2528 for (i = 0; i < vi->max_queue_pairs; i++) {
2529 vi->rq[i].vq = vqs[rxq2vq(i)];
2530 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
2531 vi->sq[i].vq = vqs[txq2vq(i)];
2532 }
2533
2534 kfree(names);
2535 kfree(callbacks);
2536 kfree(vqs);
2537 kfree(ctx);
2538
2539 return 0;
2540
2541err_find:
2542 kfree(ctx);
2543err_ctx:
2544 kfree(names);
2545err_names:
2546 kfree(callbacks);
2547err_callback:
2548 kfree(vqs);
2549err_vq:
2550 return ret;
2551}
2552
2553static int virtnet_alloc_queues(struct virtnet_info *vi)
2554{
2555 int i;
2556
2557 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2558 if (!vi->ctrl)
2559 goto err_ctrl;
2560 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2561 if (!vi->sq)
2562 goto err_sq;
2563 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
2564 if (!vi->rq)
2565 goto err_rq;
2566
2567 INIT_DELAYED_WORK(&vi->refill, refill_work);
2568 for (i = 0; i < vi->max_queue_pairs; i++) {
2569 vi->rq[i].pages = NULL;
2570 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2571 napi_weight);
2572 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2573 napi_tx ? napi_weight : 0);
2574
2575 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2576 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2577 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2578
2579 u64_stats_init(&vi->rq[i].stats.syncp);
2580 u64_stats_init(&vi->sq[i].stats.syncp);
2581 }
2582
2583 return 0;
2584
2585err_rq:
2586 kfree(vi->sq);
2587err_sq:
2588 kfree(vi->ctrl);
2589err_ctrl:
2590 return -ENOMEM;
2591}
2592
2593static int init_vqs(struct virtnet_info *vi)
2594{
2595 int ret;
2596
2597 /* Allocate send & receive queues */
2598 ret = virtnet_alloc_queues(vi);
2599 if (ret)
2600 goto err;
2601
2602 ret = virtnet_find_vqs(vi);
2603 if (ret)
2604 goto err_free;
2605
2606 get_online_cpus();
2607 virtnet_set_affinity(vi);
2608 put_online_cpus();
2609
2610 return 0;
2611
2612err_free:
2613 virtnet_free_queues(vi);
2614err:
2615 return ret;
2616}
2617
2618#ifdef CONFIG_SYSFS
2619static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2620 char *buf)
2621{
2622 struct virtnet_info *vi = netdev_priv(queue->dev);
2623 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2624 unsigned int headroom = virtnet_get_headroom(vi);
2625 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2626 struct ewma_pkt_len *avg;
2627
2628 BUG_ON(queue_index >= vi->max_queue_pairs);
2629 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2630 return sprintf(buf, "%u\n",
2631 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2632 SKB_DATA_ALIGN(headroom + tailroom)));
2633}
2634
2635static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2636 __ATTR_RO(mergeable_rx_buffer_size);
2637
2638static struct attribute *virtio_net_mrg_rx_attrs[] = {
2639 &mergeable_rx_buffer_size_attribute.attr,
2640 NULL
2641};
2642
2643static const struct attribute_group virtio_net_mrg_rx_group = {
2644 .name = "virtio_net",
2645 .attrs = virtio_net_mrg_rx_attrs
2646};
2647#endif
2648
2649static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2650 unsigned int fbit,
2651 const char *fname, const char *dname)
2652{
2653 if (!virtio_has_feature(vdev, fbit))
2654 return false;
2655
2656 dev_err(&vdev->dev, "device advertises feature %s but not %s",
2657 fname, dname);
2658
2659 return true;
2660}
2661
2662#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2663 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2664
2665static bool virtnet_validate_features(struct virtio_device *vdev)
2666{
2667 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2668 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2669 "VIRTIO_NET_F_CTRL_VQ") ||
2670 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2671 "VIRTIO_NET_F_CTRL_VQ") ||
2672 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2673 "VIRTIO_NET_F_CTRL_VQ") ||
2674 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2675 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2676 "VIRTIO_NET_F_CTRL_VQ"))) {
2677 return false;
2678 }
2679
2680 return true;
2681}
2682
2683#define MIN_MTU ETH_MIN_MTU
2684#define MAX_MTU ETH_MAX_MTU
2685
2686static int virtnet_validate(struct virtio_device *vdev)
2687{
2688 if (!vdev->config->get) {
2689 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2690 __func__);
2691 return -EINVAL;
2692 }
2693
2694 if (!virtnet_validate_features(vdev))
2695 return -EINVAL;
2696
2697 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2698 int mtu = virtio_cread16(vdev,
2699 offsetof(struct virtio_net_config,
2700 mtu));
2701 if (mtu < MIN_MTU)
2702 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2703 }
2704
2705 return 0;
2706}
2707
2708static int virtnet_probe(struct virtio_device *vdev)
2709{
2710 int i, err = -ENOMEM;
2711 struct net_device *dev;
2712 struct virtnet_info *vi;
2713 u16 max_queue_pairs;
2714 int mtu;
2715
2716 /* Find if host supports multiqueue virtio_net device */
2717 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2718 struct virtio_net_config,
2719 max_virtqueue_pairs, &max_queue_pairs);
2720
2721 /* We need at least 2 queue's */
2722 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2723 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2724 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2725 max_queue_pairs = 1;
2726
2727 /* Allocate ourselves a network device with room for our info */
2728 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
2729 if (!dev)
2730 return -ENOMEM;
2731
2732 /* Set up network device as normal. */
2733 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2734 dev->netdev_ops = &virtnet_netdev;
2735 dev->features = NETIF_F_HIGHDMA;
2736
2737 dev->ethtool_ops = &virtnet_ethtool_ops;
2738 SET_NETDEV_DEV(dev, &vdev->dev);
2739
2740 /* Do we support "hardware" checksums? */
2741 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
2742 /* This opens up the world of extra features. */
2743 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2744 if (csum)
2745 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2746
2747 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2748 dev->hw_features |= NETIF_F_TSO
2749 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
2750 }
2751 /* Individual feature bits: what can host handle? */
2752 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2753 dev->hw_features |= NETIF_F_TSO;
2754 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2755 dev->hw_features |= NETIF_F_TSO6;
2756 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2757 dev->hw_features |= NETIF_F_TSO_ECN;
2758
2759 dev->features |= NETIF_F_GSO_ROBUST;
2760
2761 if (gso)
2762 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
2763 /* (!csum && gso) case will be fixed by register_netdev() */
2764 }
2765 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2766 dev->features |= NETIF_F_RXCSUM;
2767
2768 dev->vlan_features = dev->features;
2769
2770 /* MTU range: 68 - 65535 */
2771 dev->min_mtu = MIN_MTU;
2772 dev->max_mtu = MAX_MTU;
2773
2774 /* Configuration may specify what MAC to use. Otherwise random. */
2775 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
2776 virtio_cread_bytes(vdev,
2777 offsetof(struct virtio_net_config, mac),
2778 dev->dev_addr, dev->addr_len);
2779 else
2780 eth_hw_addr_random(dev);
2781
2782 /* Set up our device-specific information */
2783 vi = netdev_priv(dev);
2784 vi->dev = dev;
2785 vi->vdev = vdev;
2786 vdev->priv = vi;
2787
2788 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
2789
2790 /* If we can receive ANY GSO packets, we must allocate large ones. */
2791 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2792 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2793 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2794 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2795 vi->big_packets = true;
2796
2797 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2798 vi->mergeable_rx_bufs = true;
2799
2800 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2801 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2802 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2803 else
2804 vi->hdr_len = sizeof(struct virtio_net_hdr);
2805
2806 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2807 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2808 vi->any_header_sg = true;
2809
2810 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2811 vi->has_cvq = true;
2812
2813 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2814 mtu = virtio_cread16(vdev,
2815 offsetof(struct virtio_net_config,
2816 mtu));
2817 if (mtu < dev->min_mtu) {
2818 /* Should never trigger: MTU was previously validated
2819 * in virtnet_validate.
2820 */
2821 dev_err(&vdev->dev, "device MTU appears to have changed "
2822 "it is now %d < %d", mtu, dev->min_mtu);
2823 goto free;
2824 }
2825
2826 dev->mtu = mtu;
2827 dev->max_mtu = mtu;
2828
2829 /* TODO: size buffers correctly in this case. */
2830 if (dev->mtu > ETH_DATA_LEN)
2831 vi->big_packets = true;
2832 }
2833
2834 if (vi->any_header_sg)
2835 dev->needed_headroom = vi->hdr_len;
2836
2837 /* Enable multiqueue by default */
2838 if (num_online_cpus() >= max_queue_pairs)
2839 vi->curr_queue_pairs = max_queue_pairs;
2840 else
2841 vi->curr_queue_pairs = num_online_cpus();
2842 vi->max_queue_pairs = max_queue_pairs;
2843
2844 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2845 err = init_vqs(vi);
2846 if (err)
2847 goto free;
2848
2849#ifdef CONFIG_SYSFS
2850 if (vi->mergeable_rx_bufs)
2851 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2852#endif
2853 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2854 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
2855
2856 virtnet_init_settings(dev);
2857
2858 err = register_netdev(dev);
2859 if (err) {
2860 pr_debug("virtio_net: registering device failed\n");
2861 goto free_vqs;
2862 }
2863
2864 virtio_device_ready(vdev);
2865
2866 err = virtnet_cpu_notif_add(vi);
2867 if (err) {
2868 pr_debug("virtio_net: registering cpu notifier failed\n");
2869 goto free_unregister_netdev;
2870 }
2871
2872 virtnet_set_queues(vi, vi->curr_queue_pairs);
2873
2874 /* Assume link up if device can't report link status,
2875 otherwise get link status from config. */
2876 netif_carrier_off(dev);
2877 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2878 schedule_work(&vi->config_work);
2879 } else {
2880 vi->status = VIRTIO_NET_S_LINK_UP;
2881 virtnet_update_settings(vi);
2882 netif_carrier_on(dev);
2883 }
2884
2885 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
2886 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
2887 set_bit(guest_offloads[i], &vi->guest_offloads);
2888
2889 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2890 dev->name, max_queue_pairs);
2891
2892 return 0;
2893
2894free_unregister_netdev:
2895 vi->vdev->config->reset(vdev);
2896
2897 unregister_netdev(dev);
2898free_vqs:
2899 cancel_delayed_work_sync(&vi->refill);
2900 free_receive_page_frags(vi);
2901 virtnet_del_vqs(vi);
2902free:
2903 free_netdev(dev);
2904 return err;
2905}
2906
2907static void remove_vq_common(struct virtnet_info *vi)
2908{
2909 vi->vdev->config->reset(vi->vdev);
2910
2911 /* Free unused buffers in both send and recv, if any. */
2912 free_unused_bufs(vi);
2913
2914 free_receive_bufs(vi);
2915
2916 free_receive_page_frags(vi);
2917
2918 virtnet_del_vqs(vi);
2919}
2920
2921static void virtnet_remove(struct virtio_device *vdev)
2922{
2923 struct virtnet_info *vi = vdev->priv;
2924
2925 virtnet_cpu_notif_remove(vi);
2926
2927 /* Make sure no work handler is accessing the device. */
2928 flush_work(&vi->config_work);
2929
2930 unregister_netdev(vi->dev);
2931
2932 remove_vq_common(vi);
2933
2934 free_netdev(vi->dev);
2935}
2936
2937static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
2938{
2939 struct virtnet_info *vi = vdev->priv;
2940
2941 virtnet_cpu_notif_remove(vi);
2942 virtnet_freeze_down(vdev);
2943 remove_vq_common(vi);
2944
2945 return 0;
2946}
2947
2948static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
2949{
2950 struct virtnet_info *vi = vdev->priv;
2951 int err;
2952
2953 err = virtnet_restore_up(vdev);
2954 if (err)
2955 return err;
2956 virtnet_set_queues(vi, vi->curr_queue_pairs);
2957
2958 err = virtnet_cpu_notif_add(vi);
2959 if (err)
2960 return err;
2961
2962 return 0;
2963}
2964
2965static struct virtio_device_id id_table[] = {
2966 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2967 { 0 },
2968};
2969
2970#define VIRTNET_FEATURES \
2971 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2972 VIRTIO_NET_F_MAC, \
2973 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2974 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2975 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2976 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2977 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2978 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2979 VIRTIO_NET_F_CTRL_MAC_ADDR, \
2980 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
2981 VIRTIO_NET_F_SPEED_DUPLEX
2982
2983static unsigned int features[] = {
2984 VIRTNET_FEATURES,
2985};
2986
2987static unsigned int features_legacy[] = {
2988 VIRTNET_FEATURES,
2989 VIRTIO_NET_F_GSO,
2990 VIRTIO_F_ANY_LAYOUT,
2991};
2992
2993static struct virtio_driver virtio_net_driver = {
2994 .feature_table = features,
2995 .feature_table_size = ARRAY_SIZE(features),
2996 .feature_table_legacy = features_legacy,
2997 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2998 .driver.name = KBUILD_MODNAME,
2999 .driver.owner = THIS_MODULE,
3000 .id_table = id_table,
3001 .validate = virtnet_validate,
3002 .probe = virtnet_probe,
3003 .remove = virtnet_remove,
3004 .config_changed = virtnet_config_changed,
3005#ifdef CONFIG_PM_SLEEP
3006 .freeze = virtnet_freeze,
3007 .restore = virtnet_restore,
3008#endif
3009};
3010
3011static __init int virtio_net_driver_init(void)
3012{
3013 int ret;
3014
3015 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3016 virtnet_cpu_online,
3017 virtnet_cpu_down_prep);
3018 if (ret < 0)
3019 goto out;
3020 virtionet_online = ret;
3021 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3022 NULL, virtnet_cpu_dead);
3023 if (ret)
3024 goto err_dead;
3025
3026 ret = register_virtio_driver(&virtio_net_driver);
3027 if (ret)
3028 goto err_virtio;
3029 return 0;
3030err_virtio:
3031 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3032err_dead:
3033 cpuhp_remove_multi_state(virtionet_online);
3034out:
3035 return ret;
3036}
3037module_init(virtio_net_driver_init);
3038
3039static __exit void virtio_net_driver_exit(void)
3040{
3041 unregister_virtio_driver(&virtio_net_driver);
3042 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3043 cpuhp_remove_multi_state(virtionet_online);
3044}
3045module_exit(virtio_net_driver_exit);
3046
3047MODULE_DEVICE_TABLE(virtio, id_table);
3048MODULE_DESCRIPTION("Virtio network driver");
3049MODULE_LICENSE("GPL");