Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
13#include <linux/bpf.h>
14#include <linux/bpf_trace.h>
15#include <linux/scatterlist.h>
16#include <linux/if_vlan.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/average.h>
20#include <linux/filter.h>
21#include <linux/kernel.h>
22#include <linux/dim.h>
23#include <net/route.h>
24#include <net/xdp.h>
25#include <net/net_failover.h>
26#include <net/netdev_rx_queue.h>
27
28static int napi_weight = NAPI_POLL_WEIGHT;
29module_param(napi_weight, int, 0444);
30
31static bool csum = true, gso = true, napi_tx = true;
32module_param(csum, bool, 0444);
33module_param(gso, bool, 0444);
34module_param(napi_tx, bool, 0644);
35
36/* FIXME: MTU in config. */
37#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38#define GOOD_COPY_LEN 128
39
40#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
41
42/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
43#define VIRTIO_XDP_HEADROOM 256
44
45/* Separating two types of XDP xmit */
46#define VIRTIO_XDP_TX BIT(0)
47#define VIRTIO_XDP_REDIR BIT(1)
48
49#define VIRTIO_XDP_FLAG BIT(0)
50
51/* RX packet size EWMA. The average packet size is used to determine the packet
52 * buffer size when refilling RX rings. As the entire RX ring may be refilled
53 * at once, the weight is chosen so that the EWMA will be insensitive to short-
54 * term, transient changes in packet size.
55 */
56DECLARE_EWMA(pkt_len, 0, 64)
57
58#define VIRTNET_DRIVER_VERSION "1.0.0"
59
60static const unsigned long guest_offloads[] = {
61 VIRTIO_NET_F_GUEST_TSO4,
62 VIRTIO_NET_F_GUEST_TSO6,
63 VIRTIO_NET_F_GUEST_ECN,
64 VIRTIO_NET_F_GUEST_UFO,
65 VIRTIO_NET_F_GUEST_CSUM,
66 VIRTIO_NET_F_GUEST_USO4,
67 VIRTIO_NET_F_GUEST_USO6,
68 VIRTIO_NET_F_GUEST_HDRLEN
69};
70
71#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
72 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
73 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
74 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
75 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
76 (1ULL << VIRTIO_NET_F_GUEST_USO6))
77
78struct virtnet_stat_desc {
79 char desc[ETH_GSTRING_LEN];
80 size_t offset;
81};
82
83struct virtnet_sq_stats {
84 struct u64_stats_sync syncp;
85 u64_stats_t packets;
86 u64_stats_t bytes;
87 u64_stats_t xdp_tx;
88 u64_stats_t xdp_tx_drops;
89 u64_stats_t kicks;
90 u64_stats_t tx_timeouts;
91};
92
93struct virtnet_rq_stats {
94 struct u64_stats_sync syncp;
95 u64_stats_t packets;
96 u64_stats_t bytes;
97 u64_stats_t drops;
98 u64_stats_t xdp_packets;
99 u64_stats_t xdp_tx;
100 u64_stats_t xdp_redirects;
101 u64_stats_t xdp_drops;
102 u64_stats_t kicks;
103};
104
105#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
106#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
107
108static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
109 { "packets", VIRTNET_SQ_STAT(packets) },
110 { "bytes", VIRTNET_SQ_STAT(bytes) },
111 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
112 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
113 { "kicks", VIRTNET_SQ_STAT(kicks) },
114 { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
115};
116
117static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
118 { "packets", VIRTNET_RQ_STAT(packets) },
119 { "bytes", VIRTNET_RQ_STAT(bytes) },
120 { "drops", VIRTNET_RQ_STAT(drops) },
121 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
122 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
123 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
124 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
125 { "kicks", VIRTNET_RQ_STAT(kicks) },
126};
127
128#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
129#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
130
131struct virtnet_interrupt_coalesce {
132 u32 max_packets;
133 u32 max_usecs;
134};
135
136/* The dma information of pages allocated at a time. */
137struct virtnet_rq_dma {
138 dma_addr_t addr;
139 u32 ref;
140 u16 len;
141 u16 need_sync;
142};
143
144/* Internal representation of a send virtqueue */
145struct send_queue {
146 /* Virtqueue associated with this send _queue */
147 struct virtqueue *vq;
148
149 /* TX: fragments + linear part + virtio header */
150 struct scatterlist sg[MAX_SKB_FRAGS + 2];
151
152 /* Name of the send queue: output.$index */
153 char name[16];
154
155 struct virtnet_sq_stats stats;
156
157 struct virtnet_interrupt_coalesce intr_coal;
158
159 struct napi_struct napi;
160
161 /* Record whether sq is in reset state. */
162 bool reset;
163};
164
165/* Internal representation of a receive virtqueue */
166struct receive_queue {
167 /* Virtqueue associated with this receive_queue */
168 struct virtqueue *vq;
169
170 struct napi_struct napi;
171
172 struct bpf_prog __rcu *xdp_prog;
173
174 struct virtnet_rq_stats stats;
175
176 /* The number of rx notifications */
177 u16 calls;
178
179 /* Is dynamic interrupt moderation enabled? */
180 bool dim_enabled;
181
182 /* Dynamic Interrupt Moderation */
183 struct dim dim;
184
185 u32 packets_in_napi;
186
187 struct virtnet_interrupt_coalesce intr_coal;
188
189 /* Chain pages by the private ptr. */
190 struct page *pages;
191
192 /* Average packet length for mergeable receive buffers. */
193 struct ewma_pkt_len mrg_avg_pkt_len;
194
195 /* Page frag for packet buffer allocation. */
196 struct page_frag alloc_frag;
197
198 /* RX: fragments + linear part + virtio header */
199 struct scatterlist sg[MAX_SKB_FRAGS + 2];
200
201 /* Min single buffer size for mergeable buffers case. */
202 unsigned int min_buf_len;
203
204 /* Name of this receive queue: input.$index */
205 char name[16];
206
207 struct xdp_rxq_info xdp_rxq;
208
209 /* Record the last dma info to free after new pages is allocated. */
210 struct virtnet_rq_dma *last_dma;
211
212 /* Do dma by self */
213 bool do_dma;
214};
215
216/* This structure can contain rss message with maximum settings for indirection table and keysize
217 * Note, that default structure that describes RSS configuration virtio_net_rss_config
218 * contains same info but can't handle table values.
219 * In any case, structure would be passed to virtio hw through sg_buf split by parts
220 * because table sizes may be differ according to the device configuration.
221 */
222#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
223#define VIRTIO_NET_RSS_MAX_TABLE_LEN 128
224struct virtio_net_ctrl_rss {
225 u32 hash_types;
226 u16 indirection_table_mask;
227 u16 unclassified_queue;
228 u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
229 u16 max_tx_vq;
230 u8 hash_key_length;
231 u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
232};
233
234/* Control VQ buffers: protected by the rtnl lock */
235struct control_buf {
236 struct virtio_net_ctrl_hdr hdr;
237 virtio_net_ctrl_ack status;
238 struct virtio_net_ctrl_mq mq;
239 u8 promisc;
240 u8 allmulti;
241 __virtio16 vid;
242 __virtio64 offloads;
243 struct virtio_net_ctrl_rss rss;
244 struct virtio_net_ctrl_coal_tx coal_tx;
245 struct virtio_net_ctrl_coal_rx coal_rx;
246 struct virtio_net_ctrl_coal_vq coal_vq;
247};
248
249struct virtnet_info {
250 struct virtio_device *vdev;
251 struct virtqueue *cvq;
252 struct net_device *dev;
253 struct send_queue *sq;
254 struct receive_queue *rq;
255 unsigned int status;
256
257 /* Max # of queue pairs supported by the device */
258 u16 max_queue_pairs;
259
260 /* # of queue pairs currently used by the driver */
261 u16 curr_queue_pairs;
262
263 /* # of XDP queue pairs currently used by the driver */
264 u16 xdp_queue_pairs;
265
266 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
267 bool xdp_enabled;
268
269 /* I like... big packets and I cannot lie! */
270 bool big_packets;
271
272 /* number of sg entries allocated for big packets */
273 unsigned int big_packets_num_skbfrags;
274
275 /* Host will merge rx buffers for big packets (shake it! shake it!) */
276 bool mergeable_rx_bufs;
277
278 /* Host supports rss and/or hash report */
279 bool has_rss;
280 bool has_rss_hash_report;
281 u8 rss_key_size;
282 u16 rss_indir_table_size;
283 u32 rss_hash_types_supported;
284 u32 rss_hash_types_saved;
285
286 /* Has control virtqueue */
287 bool has_cvq;
288
289 /* Host can handle any s/g split between our header and packet data */
290 bool any_header_sg;
291
292 /* Packet virtio header size */
293 u8 hdr_len;
294
295 /* Work struct for delayed refilling if we run low on memory. */
296 struct delayed_work refill;
297
298 /* Is delayed refill enabled? */
299 bool refill_enabled;
300
301 /* The lock to synchronize the access to refill_enabled */
302 spinlock_t refill_lock;
303
304 /* Work struct for config space updates */
305 struct work_struct config_work;
306
307 /* Does the affinity hint is set for virtqueues? */
308 bool affinity_hint_set;
309
310 /* CPU hotplug instances for online & dead */
311 struct hlist_node node;
312 struct hlist_node node_dead;
313
314 struct control_buf *ctrl;
315
316 /* Ethtool settings */
317 u8 duplex;
318 u32 speed;
319
320 /* Is rx dynamic interrupt moderation enabled? */
321 bool rx_dim_enabled;
322
323 /* Interrupt coalescing settings */
324 struct virtnet_interrupt_coalesce intr_coal_tx;
325 struct virtnet_interrupt_coalesce intr_coal_rx;
326
327 unsigned long guest_offloads;
328 unsigned long guest_offloads_capable;
329
330 /* failover when STANDBY feature enabled */
331 struct failover *failover;
332};
333
334struct padded_vnet_hdr {
335 struct virtio_net_hdr_v1_hash hdr;
336 /*
337 * hdr is in a separate sg buffer, and data sg buffer shares same page
338 * with this header sg. This padding makes next sg 16 byte aligned
339 * after the header.
340 */
341 char padding[12];
342};
343
344struct virtio_net_common_hdr {
345 union {
346 struct virtio_net_hdr hdr;
347 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
348 struct virtio_net_hdr_v1_hash hash_v1_hdr;
349 };
350};
351
352static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
353
354static bool is_xdp_frame(void *ptr)
355{
356 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
357}
358
359static void *xdp_to_ptr(struct xdp_frame *ptr)
360{
361 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
362}
363
364static struct xdp_frame *ptr_to_xdp(void *ptr)
365{
366 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
367}
368
369/* Converting between virtqueue no. and kernel tx/rx queue no.
370 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
371 */
372static int vq2txq(struct virtqueue *vq)
373{
374 return (vq->index - 1) / 2;
375}
376
377static int txq2vq(int txq)
378{
379 return txq * 2 + 1;
380}
381
382static int vq2rxq(struct virtqueue *vq)
383{
384 return vq->index / 2;
385}
386
387static int rxq2vq(int rxq)
388{
389 return rxq * 2;
390}
391
392static inline struct virtio_net_common_hdr *
393skb_vnet_common_hdr(struct sk_buff *skb)
394{
395 return (struct virtio_net_common_hdr *)skb->cb;
396}
397
398/*
399 * private is used to chain pages for big packets, put the whole
400 * most recent used list in the beginning for reuse
401 */
402static void give_pages(struct receive_queue *rq, struct page *page)
403{
404 struct page *end;
405
406 /* Find end of list, sew whole thing into vi->rq.pages. */
407 for (end = page; end->private; end = (struct page *)end->private);
408 end->private = (unsigned long)rq->pages;
409 rq->pages = page;
410}
411
412static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
413{
414 struct page *p = rq->pages;
415
416 if (p) {
417 rq->pages = (struct page *)p->private;
418 /* clear private here, it is used to chain pages */
419 p->private = 0;
420 } else
421 p = alloc_page(gfp_mask);
422 return p;
423}
424
425static void virtnet_rq_free_buf(struct virtnet_info *vi,
426 struct receive_queue *rq, void *buf)
427{
428 if (vi->mergeable_rx_bufs)
429 put_page(virt_to_head_page(buf));
430 else if (vi->big_packets)
431 give_pages(rq, buf);
432 else
433 put_page(virt_to_head_page(buf));
434}
435
436static void enable_delayed_refill(struct virtnet_info *vi)
437{
438 spin_lock_bh(&vi->refill_lock);
439 vi->refill_enabled = true;
440 spin_unlock_bh(&vi->refill_lock);
441}
442
443static void disable_delayed_refill(struct virtnet_info *vi)
444{
445 spin_lock_bh(&vi->refill_lock);
446 vi->refill_enabled = false;
447 spin_unlock_bh(&vi->refill_lock);
448}
449
450static void virtqueue_napi_schedule(struct napi_struct *napi,
451 struct virtqueue *vq)
452{
453 if (napi_schedule_prep(napi)) {
454 virtqueue_disable_cb(vq);
455 __napi_schedule(napi);
456 }
457}
458
459static bool virtqueue_napi_complete(struct napi_struct *napi,
460 struct virtqueue *vq, int processed)
461{
462 int opaque;
463
464 opaque = virtqueue_enable_cb_prepare(vq);
465 if (napi_complete_done(napi, processed)) {
466 if (unlikely(virtqueue_poll(vq, opaque)))
467 virtqueue_napi_schedule(napi, vq);
468 else
469 return true;
470 } else {
471 virtqueue_disable_cb(vq);
472 }
473
474 return false;
475}
476
477static void skb_xmit_done(struct virtqueue *vq)
478{
479 struct virtnet_info *vi = vq->vdev->priv;
480 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
481
482 /* Suppress further interrupts. */
483 virtqueue_disable_cb(vq);
484
485 if (napi->weight)
486 virtqueue_napi_schedule(napi, vq);
487 else
488 /* We were probably waiting for more output buffers. */
489 netif_wake_subqueue(vi->dev, vq2txq(vq));
490}
491
492#define MRG_CTX_HEADER_SHIFT 22
493static void *mergeable_len_to_ctx(unsigned int truesize,
494 unsigned int headroom)
495{
496 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
497}
498
499static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
500{
501 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
502}
503
504static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
505{
506 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
507}
508
509static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
510 unsigned int headroom,
511 unsigned int len)
512{
513 struct sk_buff *skb;
514
515 skb = build_skb(buf, buflen);
516 if (unlikely(!skb))
517 return NULL;
518
519 skb_reserve(skb, headroom);
520 skb_put(skb, len);
521
522 return skb;
523}
524
525/* Called from bottom half context */
526static struct sk_buff *page_to_skb(struct virtnet_info *vi,
527 struct receive_queue *rq,
528 struct page *page, unsigned int offset,
529 unsigned int len, unsigned int truesize,
530 unsigned int headroom)
531{
532 struct sk_buff *skb;
533 struct virtio_net_common_hdr *hdr;
534 unsigned int copy, hdr_len, hdr_padded_len;
535 struct page *page_to_free = NULL;
536 int tailroom, shinfo_size;
537 char *p, *hdr_p, *buf;
538
539 p = page_address(page) + offset;
540 hdr_p = p;
541
542 hdr_len = vi->hdr_len;
543 if (vi->mergeable_rx_bufs)
544 hdr_padded_len = hdr_len;
545 else
546 hdr_padded_len = sizeof(struct padded_vnet_hdr);
547
548 buf = p - headroom;
549 len -= hdr_len;
550 offset += hdr_padded_len;
551 p += hdr_padded_len;
552 tailroom = truesize - headroom - hdr_padded_len - len;
553
554 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
555
556 /* copy small packet so we can reuse these pages */
557 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
558 skb = virtnet_build_skb(buf, truesize, p - buf, len);
559 if (unlikely(!skb))
560 return NULL;
561
562 page = (struct page *)page->private;
563 if (page)
564 give_pages(rq, page);
565 goto ok;
566 }
567
568 /* copy small packet so we can reuse these pages for small data */
569 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
570 if (unlikely(!skb))
571 return NULL;
572
573 /* Copy all frame if it fits skb->head, otherwise
574 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
575 */
576 if (len <= skb_tailroom(skb))
577 copy = len;
578 else
579 copy = ETH_HLEN;
580 skb_put_data(skb, p, copy);
581
582 len -= copy;
583 offset += copy;
584
585 if (vi->mergeable_rx_bufs) {
586 if (len)
587 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
588 else
589 page_to_free = page;
590 goto ok;
591 }
592
593 /*
594 * Verify that we can indeed put this data into a skb.
595 * This is here to handle cases when the device erroneously
596 * tries to receive more than is possible. This is usually
597 * the case of a broken device.
598 */
599 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
600 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
601 dev_kfree_skb(skb);
602 return NULL;
603 }
604 BUG_ON(offset >= PAGE_SIZE);
605 while (len) {
606 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
607 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
608 frag_size, truesize);
609 len -= frag_size;
610 page = (struct page *)page->private;
611 offset = 0;
612 }
613
614 if (page)
615 give_pages(rq, page);
616
617ok:
618 hdr = skb_vnet_common_hdr(skb);
619 memcpy(hdr, hdr_p, hdr_len);
620 if (page_to_free)
621 put_page(page_to_free);
622
623 return skb;
624}
625
626static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
627{
628 struct page *page = virt_to_head_page(buf);
629 struct virtnet_rq_dma *dma;
630 void *head;
631 int offset;
632
633 head = page_address(page);
634
635 dma = head;
636
637 --dma->ref;
638
639 if (dma->need_sync && len) {
640 offset = buf - (head + sizeof(*dma));
641
642 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
643 offset, len,
644 DMA_FROM_DEVICE);
645 }
646
647 if (dma->ref)
648 return;
649
650 virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
651 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
652 put_page(page);
653}
654
655static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
656{
657 void *buf;
658
659 buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
660 if (buf && rq->do_dma)
661 virtnet_rq_unmap(rq, buf, *len);
662
663 return buf;
664}
665
666static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
667{
668 struct virtnet_rq_dma *dma;
669 dma_addr_t addr;
670 u32 offset;
671 void *head;
672
673 if (!rq->do_dma) {
674 sg_init_one(rq->sg, buf, len);
675 return;
676 }
677
678 head = page_address(rq->alloc_frag.page);
679
680 offset = buf - head;
681
682 dma = head;
683
684 addr = dma->addr - sizeof(*dma) + offset;
685
686 sg_init_table(rq->sg, 1);
687 rq->sg[0].dma_address = addr;
688 rq->sg[0].length = len;
689}
690
691static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
692{
693 struct page_frag *alloc_frag = &rq->alloc_frag;
694 struct virtnet_rq_dma *dma;
695 void *buf, *head;
696 dma_addr_t addr;
697
698 if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
699 return NULL;
700
701 head = page_address(alloc_frag->page);
702
703 if (rq->do_dma) {
704 dma = head;
705
706 /* new pages */
707 if (!alloc_frag->offset) {
708 if (rq->last_dma) {
709 /* Now, the new page is allocated, the last dma
710 * will not be used. So the dma can be unmapped
711 * if the ref is 0.
712 */
713 virtnet_rq_unmap(rq, rq->last_dma, 0);
714 rq->last_dma = NULL;
715 }
716
717 dma->len = alloc_frag->size - sizeof(*dma);
718
719 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
720 dma->len, DMA_FROM_DEVICE, 0);
721 if (virtqueue_dma_mapping_error(rq->vq, addr))
722 return NULL;
723
724 dma->addr = addr;
725 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
726
727 /* Add a reference to dma to prevent the entire dma from
728 * being released during error handling. This reference
729 * will be freed after the pages are no longer used.
730 */
731 get_page(alloc_frag->page);
732 dma->ref = 1;
733 alloc_frag->offset = sizeof(*dma);
734
735 rq->last_dma = dma;
736 }
737
738 ++dma->ref;
739 }
740
741 buf = head + alloc_frag->offset;
742
743 get_page(alloc_frag->page);
744 alloc_frag->offset += size;
745
746 return buf;
747}
748
749static void virtnet_rq_set_premapped(struct virtnet_info *vi)
750{
751 int i;
752
753 /* disable for big mode */
754 if (!vi->mergeable_rx_bufs && vi->big_packets)
755 return;
756
757 for (i = 0; i < vi->max_queue_pairs; i++) {
758 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
759 continue;
760
761 vi->rq[i].do_dma = true;
762 }
763}
764
765static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
766{
767 struct virtnet_info *vi = vq->vdev->priv;
768 struct receive_queue *rq;
769 int i = vq2rxq(vq);
770
771 rq = &vi->rq[i];
772
773 if (rq->do_dma)
774 virtnet_rq_unmap(rq, buf, 0);
775
776 virtnet_rq_free_buf(vi, rq, buf);
777}
778
779static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
780{
781 unsigned int len;
782 unsigned int packets = 0;
783 unsigned int bytes = 0;
784 void *ptr;
785
786 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
787 if (likely(!is_xdp_frame(ptr))) {
788 struct sk_buff *skb = ptr;
789
790 pr_debug("Sent skb %p\n", skb);
791
792 bytes += skb->len;
793 napi_consume_skb(skb, in_napi);
794 } else {
795 struct xdp_frame *frame = ptr_to_xdp(ptr);
796
797 bytes += xdp_get_frame_len(frame);
798 xdp_return_frame(frame);
799 }
800 packets++;
801 }
802
803 /* Avoid overhead when no packets have been processed
804 * happens when called speculatively from start_xmit.
805 */
806 if (!packets)
807 return;
808
809 u64_stats_update_begin(&sq->stats.syncp);
810 u64_stats_add(&sq->stats.bytes, bytes);
811 u64_stats_add(&sq->stats.packets, packets);
812 u64_stats_update_end(&sq->stats.syncp);
813}
814
815static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
816{
817 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
818 return false;
819 else if (q < vi->curr_queue_pairs)
820 return true;
821 else
822 return false;
823}
824
825static void check_sq_full_and_disable(struct virtnet_info *vi,
826 struct net_device *dev,
827 struct send_queue *sq)
828{
829 bool use_napi = sq->napi.weight;
830 int qnum;
831
832 qnum = sq - vi->sq;
833
834 /* If running out of space, stop queue to avoid getting packets that we
835 * are then unable to transmit.
836 * An alternative would be to force queuing layer to requeue the skb by
837 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
838 * returned in a normal path of operation: it means that driver is not
839 * maintaining the TX queue stop/start state properly, and causes
840 * the stack to do a non-trivial amount of useless work.
841 * Since most packets only take 1 or 2 ring slots, stopping the queue
842 * early means 16 slots are typically wasted.
843 */
844 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
845 netif_stop_subqueue(dev, qnum);
846 if (use_napi) {
847 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
848 virtqueue_napi_schedule(&sq->napi, sq->vq);
849 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
850 /* More just got used, free them then recheck. */
851 free_old_xmit_skbs(sq, false);
852 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
853 netif_start_subqueue(dev, qnum);
854 virtqueue_disable_cb(sq->vq);
855 }
856 }
857 }
858}
859
860static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
861 struct send_queue *sq,
862 struct xdp_frame *xdpf)
863{
864 struct virtio_net_hdr_mrg_rxbuf *hdr;
865 struct skb_shared_info *shinfo;
866 u8 nr_frags = 0;
867 int err, i;
868
869 if (unlikely(xdpf->headroom < vi->hdr_len))
870 return -EOVERFLOW;
871
872 if (unlikely(xdp_frame_has_frags(xdpf))) {
873 shinfo = xdp_get_shared_info_from_frame(xdpf);
874 nr_frags = shinfo->nr_frags;
875 }
876
877 /* In wrapping function virtnet_xdp_xmit(), we need to free
878 * up the pending old buffers, where we need to calculate the
879 * position of skb_shared_info in xdp_get_frame_len() and
880 * xdp_return_frame(), which will involve to xdpf->data and
881 * xdpf->headroom. Therefore, we need to update the value of
882 * headroom synchronously here.
883 */
884 xdpf->headroom -= vi->hdr_len;
885 xdpf->data -= vi->hdr_len;
886 /* Zero header and leave csum up to XDP layers */
887 hdr = xdpf->data;
888 memset(hdr, 0, vi->hdr_len);
889 xdpf->len += vi->hdr_len;
890
891 sg_init_table(sq->sg, nr_frags + 1);
892 sg_set_buf(sq->sg, xdpf->data, xdpf->len);
893 for (i = 0; i < nr_frags; i++) {
894 skb_frag_t *frag = &shinfo->frags[i];
895
896 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
897 skb_frag_size(frag), skb_frag_off(frag));
898 }
899
900 err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
901 xdp_to_ptr(xdpf), GFP_ATOMIC);
902 if (unlikely(err))
903 return -ENOSPC; /* Caller handle free/refcnt */
904
905 return 0;
906}
907
908/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
909 * the current cpu, so it does not need to be locked.
910 *
911 * Here we use marco instead of inline functions because we have to deal with
912 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
913 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
914 * functions to perfectly solve these three problems at the same time.
915 */
916#define virtnet_xdp_get_sq(vi) ({ \
917 int cpu = smp_processor_id(); \
918 struct netdev_queue *txq; \
919 typeof(vi) v = (vi); \
920 unsigned int qp; \
921 \
922 if (v->curr_queue_pairs > nr_cpu_ids) { \
923 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
924 qp += cpu; \
925 txq = netdev_get_tx_queue(v->dev, qp); \
926 __netif_tx_acquire(txq); \
927 } else { \
928 qp = cpu % v->curr_queue_pairs; \
929 txq = netdev_get_tx_queue(v->dev, qp); \
930 __netif_tx_lock(txq, cpu); \
931 } \
932 v->sq + qp; \
933})
934
935#define virtnet_xdp_put_sq(vi, q) { \
936 struct netdev_queue *txq; \
937 typeof(vi) v = (vi); \
938 \
939 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
940 if (v->curr_queue_pairs > nr_cpu_ids) \
941 __netif_tx_release(txq); \
942 else \
943 __netif_tx_unlock(txq); \
944}
945
946static int virtnet_xdp_xmit(struct net_device *dev,
947 int n, struct xdp_frame **frames, u32 flags)
948{
949 struct virtnet_info *vi = netdev_priv(dev);
950 struct receive_queue *rq = vi->rq;
951 struct bpf_prog *xdp_prog;
952 struct send_queue *sq;
953 unsigned int len;
954 int packets = 0;
955 int bytes = 0;
956 int nxmit = 0;
957 int kicks = 0;
958 void *ptr;
959 int ret;
960 int i;
961
962 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
963 * indicate XDP resources have been successfully allocated.
964 */
965 xdp_prog = rcu_access_pointer(rq->xdp_prog);
966 if (!xdp_prog)
967 return -ENXIO;
968
969 sq = virtnet_xdp_get_sq(vi);
970
971 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
972 ret = -EINVAL;
973 goto out;
974 }
975
976 /* Free up any pending old buffers before queueing new ones. */
977 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
978 if (likely(is_xdp_frame(ptr))) {
979 struct xdp_frame *frame = ptr_to_xdp(ptr);
980
981 bytes += xdp_get_frame_len(frame);
982 xdp_return_frame(frame);
983 } else {
984 struct sk_buff *skb = ptr;
985
986 bytes += skb->len;
987 napi_consume_skb(skb, false);
988 }
989 packets++;
990 }
991
992 for (i = 0; i < n; i++) {
993 struct xdp_frame *xdpf = frames[i];
994
995 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
996 break;
997 nxmit++;
998 }
999 ret = nxmit;
1000
1001 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1002 check_sq_full_and_disable(vi, dev, sq);
1003
1004 if (flags & XDP_XMIT_FLUSH) {
1005 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1006 kicks = 1;
1007 }
1008out:
1009 u64_stats_update_begin(&sq->stats.syncp);
1010 u64_stats_add(&sq->stats.bytes, bytes);
1011 u64_stats_add(&sq->stats.packets, packets);
1012 u64_stats_add(&sq->stats.xdp_tx, n);
1013 u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1014 u64_stats_add(&sq->stats.kicks, kicks);
1015 u64_stats_update_end(&sq->stats.syncp);
1016
1017 virtnet_xdp_put_sq(vi, sq);
1018 return ret;
1019}
1020
1021static void put_xdp_frags(struct xdp_buff *xdp)
1022{
1023 struct skb_shared_info *shinfo;
1024 struct page *xdp_page;
1025 int i;
1026
1027 if (xdp_buff_has_frags(xdp)) {
1028 shinfo = xdp_get_shared_info_from_buff(xdp);
1029 for (i = 0; i < shinfo->nr_frags; i++) {
1030 xdp_page = skb_frag_page(&shinfo->frags[i]);
1031 put_page(xdp_page);
1032 }
1033 }
1034}
1035
1036static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1037 struct net_device *dev,
1038 unsigned int *xdp_xmit,
1039 struct virtnet_rq_stats *stats)
1040{
1041 struct xdp_frame *xdpf;
1042 int err;
1043 u32 act;
1044
1045 act = bpf_prog_run_xdp(xdp_prog, xdp);
1046 u64_stats_inc(&stats->xdp_packets);
1047
1048 switch (act) {
1049 case XDP_PASS:
1050 return act;
1051
1052 case XDP_TX:
1053 u64_stats_inc(&stats->xdp_tx);
1054 xdpf = xdp_convert_buff_to_frame(xdp);
1055 if (unlikely(!xdpf)) {
1056 netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1057 return XDP_DROP;
1058 }
1059
1060 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1061 if (unlikely(!err)) {
1062 xdp_return_frame_rx_napi(xdpf);
1063 } else if (unlikely(err < 0)) {
1064 trace_xdp_exception(dev, xdp_prog, act);
1065 return XDP_DROP;
1066 }
1067 *xdp_xmit |= VIRTIO_XDP_TX;
1068 return act;
1069
1070 case XDP_REDIRECT:
1071 u64_stats_inc(&stats->xdp_redirects);
1072 err = xdp_do_redirect(dev, xdp, xdp_prog);
1073 if (err)
1074 return XDP_DROP;
1075
1076 *xdp_xmit |= VIRTIO_XDP_REDIR;
1077 return act;
1078
1079 default:
1080 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1081 fallthrough;
1082 case XDP_ABORTED:
1083 trace_xdp_exception(dev, xdp_prog, act);
1084 fallthrough;
1085 case XDP_DROP:
1086 return XDP_DROP;
1087 }
1088}
1089
1090static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1091{
1092 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1093}
1094
1095/* We copy the packet for XDP in the following cases:
1096 *
1097 * 1) Packet is scattered across multiple rx buffers.
1098 * 2) Headroom space is insufficient.
1099 *
1100 * This is inefficient but it's a temporary condition that
1101 * we hit right after XDP is enabled and until queue is refilled
1102 * with large buffers with sufficient headroom - so it should affect
1103 * at most queue size packets.
1104 * Afterwards, the conditions to enable
1105 * XDP should preclude the underlying device from sending packets
1106 * across multiple buffers (num_buf > 1), and we make sure buffers
1107 * have enough headroom.
1108 */
1109static struct page *xdp_linearize_page(struct receive_queue *rq,
1110 int *num_buf,
1111 struct page *p,
1112 int offset,
1113 int page_off,
1114 unsigned int *len)
1115{
1116 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1117 struct page *page;
1118
1119 if (page_off + *len + tailroom > PAGE_SIZE)
1120 return NULL;
1121
1122 page = alloc_page(GFP_ATOMIC);
1123 if (!page)
1124 return NULL;
1125
1126 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1127 page_off += *len;
1128
1129 while (--*num_buf) {
1130 unsigned int buflen;
1131 void *buf;
1132 int off;
1133
1134 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1135 if (unlikely(!buf))
1136 goto err_buf;
1137
1138 p = virt_to_head_page(buf);
1139 off = buf - page_address(p);
1140
1141 /* guard against a misconfigured or uncooperative backend that
1142 * is sending packet larger than the MTU.
1143 */
1144 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1145 put_page(p);
1146 goto err_buf;
1147 }
1148
1149 memcpy(page_address(page) + page_off,
1150 page_address(p) + off, buflen);
1151 page_off += buflen;
1152 put_page(p);
1153 }
1154
1155 /* Headroom does not contribute to packet length */
1156 *len = page_off - VIRTIO_XDP_HEADROOM;
1157 return page;
1158err_buf:
1159 __free_pages(page, 0);
1160 return NULL;
1161}
1162
1163static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1164 unsigned int xdp_headroom,
1165 void *buf,
1166 unsigned int len)
1167{
1168 unsigned int header_offset;
1169 unsigned int headroom;
1170 unsigned int buflen;
1171 struct sk_buff *skb;
1172
1173 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1174 headroom = vi->hdr_len + header_offset;
1175 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1176 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1177
1178 skb = virtnet_build_skb(buf, buflen, headroom, len);
1179 if (unlikely(!skb))
1180 return NULL;
1181
1182 buf += header_offset;
1183 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1184
1185 return skb;
1186}
1187
1188static struct sk_buff *receive_small_xdp(struct net_device *dev,
1189 struct virtnet_info *vi,
1190 struct receive_queue *rq,
1191 struct bpf_prog *xdp_prog,
1192 void *buf,
1193 unsigned int xdp_headroom,
1194 unsigned int len,
1195 unsigned int *xdp_xmit,
1196 struct virtnet_rq_stats *stats)
1197{
1198 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1199 unsigned int headroom = vi->hdr_len + header_offset;
1200 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1201 struct page *page = virt_to_head_page(buf);
1202 struct page *xdp_page;
1203 unsigned int buflen;
1204 struct xdp_buff xdp;
1205 struct sk_buff *skb;
1206 unsigned int metasize = 0;
1207 u32 act;
1208
1209 if (unlikely(hdr->hdr.gso_type))
1210 goto err_xdp;
1211
1212 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1213 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1214
1215 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1216 int offset = buf - page_address(page) + header_offset;
1217 unsigned int tlen = len + vi->hdr_len;
1218 int num_buf = 1;
1219
1220 xdp_headroom = virtnet_get_headroom(vi);
1221 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1222 headroom = vi->hdr_len + header_offset;
1223 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1224 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1225 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1226 offset, header_offset,
1227 &tlen);
1228 if (!xdp_page)
1229 goto err_xdp;
1230
1231 buf = page_address(xdp_page);
1232 put_page(page);
1233 page = xdp_page;
1234 }
1235
1236 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1237 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1238 xdp_headroom, len, true);
1239
1240 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1241
1242 switch (act) {
1243 case XDP_PASS:
1244 /* Recalculate length in case bpf program changed it */
1245 len = xdp.data_end - xdp.data;
1246 metasize = xdp.data - xdp.data_meta;
1247 break;
1248
1249 case XDP_TX:
1250 case XDP_REDIRECT:
1251 goto xdp_xmit;
1252
1253 default:
1254 goto err_xdp;
1255 }
1256
1257 skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1258 if (unlikely(!skb))
1259 goto err;
1260
1261 if (metasize)
1262 skb_metadata_set(skb, metasize);
1263
1264 return skb;
1265
1266err_xdp:
1267 u64_stats_inc(&stats->xdp_drops);
1268err:
1269 u64_stats_inc(&stats->drops);
1270 put_page(page);
1271xdp_xmit:
1272 return NULL;
1273}
1274
1275static struct sk_buff *receive_small(struct net_device *dev,
1276 struct virtnet_info *vi,
1277 struct receive_queue *rq,
1278 void *buf, void *ctx,
1279 unsigned int len,
1280 unsigned int *xdp_xmit,
1281 struct virtnet_rq_stats *stats)
1282{
1283 unsigned int xdp_headroom = (unsigned long)ctx;
1284 struct page *page = virt_to_head_page(buf);
1285 struct sk_buff *skb;
1286
1287 len -= vi->hdr_len;
1288 u64_stats_add(&stats->bytes, len);
1289
1290 if (unlikely(len > GOOD_PACKET_LEN)) {
1291 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1292 dev->name, len, GOOD_PACKET_LEN);
1293 DEV_STATS_INC(dev, rx_length_errors);
1294 goto err;
1295 }
1296
1297 if (unlikely(vi->xdp_enabled)) {
1298 struct bpf_prog *xdp_prog;
1299
1300 rcu_read_lock();
1301 xdp_prog = rcu_dereference(rq->xdp_prog);
1302 if (xdp_prog) {
1303 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1304 xdp_headroom, len, xdp_xmit,
1305 stats);
1306 rcu_read_unlock();
1307 return skb;
1308 }
1309 rcu_read_unlock();
1310 }
1311
1312 skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1313 if (likely(skb))
1314 return skb;
1315
1316err:
1317 u64_stats_inc(&stats->drops);
1318 put_page(page);
1319 return NULL;
1320}
1321
1322static struct sk_buff *receive_big(struct net_device *dev,
1323 struct virtnet_info *vi,
1324 struct receive_queue *rq,
1325 void *buf,
1326 unsigned int len,
1327 struct virtnet_rq_stats *stats)
1328{
1329 struct page *page = buf;
1330 struct sk_buff *skb =
1331 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1332
1333 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1334 if (unlikely(!skb))
1335 goto err;
1336
1337 return skb;
1338
1339err:
1340 u64_stats_inc(&stats->drops);
1341 give_pages(rq, page);
1342 return NULL;
1343}
1344
1345static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1346 struct net_device *dev,
1347 struct virtnet_rq_stats *stats)
1348{
1349 struct page *page;
1350 void *buf;
1351 int len;
1352
1353 while (num_buf-- > 1) {
1354 buf = virtnet_rq_get_buf(rq, &len, NULL);
1355 if (unlikely(!buf)) {
1356 pr_debug("%s: rx error: %d buffers missing\n",
1357 dev->name, num_buf);
1358 DEV_STATS_INC(dev, rx_length_errors);
1359 break;
1360 }
1361 u64_stats_add(&stats->bytes, len);
1362 page = virt_to_head_page(buf);
1363 put_page(page);
1364 }
1365}
1366
1367/* Why not use xdp_build_skb_from_frame() ?
1368 * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1369 * virtio-net there are 2 points that do not match its requirements:
1370 * 1. The size of the prefilled buffer is not fixed before xdp is set.
1371 * 2. xdp_build_skb_from_frame() does more checks that we don't need,
1372 * like eth_type_trans() (which virtio-net does in receive_buf()).
1373 */
1374static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1375 struct virtnet_info *vi,
1376 struct xdp_buff *xdp,
1377 unsigned int xdp_frags_truesz)
1378{
1379 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1380 unsigned int headroom, data_len;
1381 struct sk_buff *skb;
1382 int metasize;
1383 u8 nr_frags;
1384
1385 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1386 pr_debug("Error building skb as missing reserved tailroom for xdp");
1387 return NULL;
1388 }
1389
1390 if (unlikely(xdp_buff_has_frags(xdp)))
1391 nr_frags = sinfo->nr_frags;
1392
1393 skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1394 if (unlikely(!skb))
1395 return NULL;
1396
1397 headroom = xdp->data - xdp->data_hard_start;
1398 data_len = xdp->data_end - xdp->data;
1399 skb_reserve(skb, headroom);
1400 __skb_put(skb, data_len);
1401
1402 metasize = xdp->data - xdp->data_meta;
1403 metasize = metasize > 0 ? metasize : 0;
1404 if (metasize)
1405 skb_metadata_set(skb, metasize);
1406
1407 if (unlikely(xdp_buff_has_frags(xdp)))
1408 xdp_update_skb_shared_info(skb, nr_frags,
1409 sinfo->xdp_frags_size,
1410 xdp_frags_truesz,
1411 xdp_buff_is_frag_pfmemalloc(xdp));
1412
1413 return skb;
1414}
1415
1416/* TODO: build xdp in big mode */
1417static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1418 struct virtnet_info *vi,
1419 struct receive_queue *rq,
1420 struct xdp_buff *xdp,
1421 void *buf,
1422 unsigned int len,
1423 unsigned int frame_sz,
1424 int *num_buf,
1425 unsigned int *xdp_frags_truesize,
1426 struct virtnet_rq_stats *stats)
1427{
1428 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1429 unsigned int headroom, tailroom, room;
1430 unsigned int truesize, cur_frag_size;
1431 struct skb_shared_info *shinfo;
1432 unsigned int xdp_frags_truesz = 0;
1433 struct page *page;
1434 skb_frag_t *frag;
1435 int offset;
1436 void *ctx;
1437
1438 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1439 xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1440 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1441
1442 if (!*num_buf)
1443 return 0;
1444
1445 if (*num_buf > 1) {
1446 /* If we want to build multi-buffer xdp, we need
1447 * to specify that the flags of xdp_buff have the
1448 * XDP_FLAGS_HAS_FRAG bit.
1449 */
1450 if (!xdp_buff_has_frags(xdp))
1451 xdp_buff_set_frags_flag(xdp);
1452
1453 shinfo = xdp_get_shared_info_from_buff(xdp);
1454 shinfo->nr_frags = 0;
1455 shinfo->xdp_frags_size = 0;
1456 }
1457
1458 if (*num_buf > MAX_SKB_FRAGS + 1)
1459 return -EINVAL;
1460
1461 while (--*num_buf > 0) {
1462 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1463 if (unlikely(!buf)) {
1464 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1465 dev->name, *num_buf,
1466 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1467 DEV_STATS_INC(dev, rx_length_errors);
1468 goto err;
1469 }
1470
1471 u64_stats_add(&stats->bytes, len);
1472 page = virt_to_head_page(buf);
1473 offset = buf - page_address(page);
1474
1475 truesize = mergeable_ctx_to_truesize(ctx);
1476 headroom = mergeable_ctx_to_headroom(ctx);
1477 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1478 room = SKB_DATA_ALIGN(headroom + tailroom);
1479
1480 cur_frag_size = truesize;
1481 xdp_frags_truesz += cur_frag_size;
1482 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1483 put_page(page);
1484 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1485 dev->name, len, (unsigned long)(truesize - room));
1486 DEV_STATS_INC(dev, rx_length_errors);
1487 goto err;
1488 }
1489
1490 frag = &shinfo->frags[shinfo->nr_frags++];
1491 skb_frag_fill_page_desc(frag, page, offset, len);
1492 if (page_is_pfmemalloc(page))
1493 xdp_buff_set_frag_pfmemalloc(xdp);
1494
1495 shinfo->xdp_frags_size += len;
1496 }
1497
1498 *xdp_frags_truesize = xdp_frags_truesz;
1499 return 0;
1500
1501err:
1502 put_xdp_frags(xdp);
1503 return -EINVAL;
1504}
1505
1506static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1507 struct receive_queue *rq,
1508 struct bpf_prog *xdp_prog,
1509 void *ctx,
1510 unsigned int *frame_sz,
1511 int *num_buf,
1512 struct page **page,
1513 int offset,
1514 unsigned int *len,
1515 struct virtio_net_hdr_mrg_rxbuf *hdr)
1516{
1517 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1518 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1519 struct page *xdp_page;
1520 unsigned int xdp_room;
1521
1522 /* Transient failure which in theory could occur if
1523 * in-flight packets from before XDP was enabled reach
1524 * the receive path after XDP is loaded.
1525 */
1526 if (unlikely(hdr->hdr.gso_type))
1527 return NULL;
1528
1529 /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1530 * with headroom may add hole in truesize, which
1531 * make their length exceed PAGE_SIZE. So we disabled the
1532 * hole mechanism for xdp. See add_recvbuf_mergeable().
1533 */
1534 *frame_sz = truesize;
1535
1536 if (likely(headroom >= virtnet_get_headroom(vi) &&
1537 (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1538 return page_address(*page) + offset;
1539 }
1540
1541 /* This happens when headroom is not enough because
1542 * of the buffer was prefilled before XDP is set.
1543 * This should only happen for the first several packets.
1544 * In fact, vq reset can be used here to help us clean up
1545 * the prefilled buffers, but many existing devices do not
1546 * support it, and we don't want to bother users who are
1547 * using xdp normally.
1548 */
1549 if (!xdp_prog->aux->xdp_has_frags) {
1550 /* linearize data for XDP */
1551 xdp_page = xdp_linearize_page(rq, num_buf,
1552 *page, offset,
1553 VIRTIO_XDP_HEADROOM,
1554 len);
1555 if (!xdp_page)
1556 return NULL;
1557 } else {
1558 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1559 sizeof(struct skb_shared_info));
1560 if (*len + xdp_room > PAGE_SIZE)
1561 return NULL;
1562
1563 xdp_page = alloc_page(GFP_ATOMIC);
1564 if (!xdp_page)
1565 return NULL;
1566
1567 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1568 page_address(*page) + offset, *len);
1569 }
1570
1571 *frame_sz = PAGE_SIZE;
1572
1573 put_page(*page);
1574
1575 *page = xdp_page;
1576
1577 return page_address(*page) + VIRTIO_XDP_HEADROOM;
1578}
1579
1580static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1581 struct virtnet_info *vi,
1582 struct receive_queue *rq,
1583 struct bpf_prog *xdp_prog,
1584 void *buf,
1585 void *ctx,
1586 unsigned int len,
1587 unsigned int *xdp_xmit,
1588 struct virtnet_rq_stats *stats)
1589{
1590 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1591 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1592 struct page *page = virt_to_head_page(buf);
1593 int offset = buf - page_address(page);
1594 unsigned int xdp_frags_truesz = 0;
1595 struct sk_buff *head_skb;
1596 unsigned int frame_sz;
1597 struct xdp_buff xdp;
1598 void *data;
1599 u32 act;
1600 int err;
1601
1602 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1603 offset, &len, hdr);
1604 if (unlikely(!data))
1605 goto err_xdp;
1606
1607 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1608 &num_buf, &xdp_frags_truesz, stats);
1609 if (unlikely(err))
1610 goto err_xdp;
1611
1612 act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1613
1614 switch (act) {
1615 case XDP_PASS:
1616 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1617 if (unlikely(!head_skb))
1618 break;
1619 return head_skb;
1620
1621 case XDP_TX:
1622 case XDP_REDIRECT:
1623 return NULL;
1624
1625 default:
1626 break;
1627 }
1628
1629 put_xdp_frags(&xdp);
1630
1631err_xdp:
1632 put_page(page);
1633 mergeable_buf_free(rq, num_buf, dev, stats);
1634
1635 u64_stats_inc(&stats->xdp_drops);
1636 u64_stats_inc(&stats->drops);
1637 return NULL;
1638}
1639
1640static struct sk_buff *receive_mergeable(struct net_device *dev,
1641 struct virtnet_info *vi,
1642 struct receive_queue *rq,
1643 void *buf,
1644 void *ctx,
1645 unsigned int len,
1646 unsigned int *xdp_xmit,
1647 struct virtnet_rq_stats *stats)
1648{
1649 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1650 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1651 struct page *page = virt_to_head_page(buf);
1652 int offset = buf - page_address(page);
1653 struct sk_buff *head_skb, *curr_skb;
1654 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1655 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1656 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1657 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1658
1659 head_skb = NULL;
1660 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1661
1662 if (unlikely(len > truesize - room)) {
1663 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1664 dev->name, len, (unsigned long)(truesize - room));
1665 DEV_STATS_INC(dev, rx_length_errors);
1666 goto err_skb;
1667 }
1668
1669 if (unlikely(vi->xdp_enabled)) {
1670 struct bpf_prog *xdp_prog;
1671
1672 rcu_read_lock();
1673 xdp_prog = rcu_dereference(rq->xdp_prog);
1674 if (xdp_prog) {
1675 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1676 len, xdp_xmit, stats);
1677 rcu_read_unlock();
1678 return head_skb;
1679 }
1680 rcu_read_unlock();
1681 }
1682
1683 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1684 curr_skb = head_skb;
1685
1686 if (unlikely(!curr_skb))
1687 goto err_skb;
1688 while (--num_buf) {
1689 int num_skb_frags;
1690
1691 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1692 if (unlikely(!buf)) {
1693 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1694 dev->name, num_buf,
1695 virtio16_to_cpu(vi->vdev,
1696 hdr->num_buffers));
1697 DEV_STATS_INC(dev, rx_length_errors);
1698 goto err_buf;
1699 }
1700
1701 u64_stats_add(&stats->bytes, len);
1702 page = virt_to_head_page(buf);
1703
1704 truesize = mergeable_ctx_to_truesize(ctx);
1705 headroom = mergeable_ctx_to_headroom(ctx);
1706 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1707 room = SKB_DATA_ALIGN(headroom + tailroom);
1708 if (unlikely(len > truesize - room)) {
1709 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1710 dev->name, len, (unsigned long)(truesize - room));
1711 DEV_STATS_INC(dev, rx_length_errors);
1712 goto err_skb;
1713 }
1714
1715 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1716 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1717 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1718
1719 if (unlikely(!nskb))
1720 goto err_skb;
1721 if (curr_skb == head_skb)
1722 skb_shinfo(curr_skb)->frag_list = nskb;
1723 else
1724 curr_skb->next = nskb;
1725 curr_skb = nskb;
1726 head_skb->truesize += nskb->truesize;
1727 num_skb_frags = 0;
1728 }
1729 if (curr_skb != head_skb) {
1730 head_skb->data_len += len;
1731 head_skb->len += len;
1732 head_skb->truesize += truesize;
1733 }
1734 offset = buf - page_address(page);
1735 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1736 put_page(page);
1737 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1738 len, truesize);
1739 } else {
1740 skb_add_rx_frag(curr_skb, num_skb_frags, page,
1741 offset, len, truesize);
1742 }
1743 }
1744
1745 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1746 return head_skb;
1747
1748err_skb:
1749 put_page(page);
1750 mergeable_buf_free(rq, num_buf, dev, stats);
1751
1752err_buf:
1753 u64_stats_inc(&stats->drops);
1754 dev_kfree_skb(head_skb);
1755 return NULL;
1756}
1757
1758static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1759 struct sk_buff *skb)
1760{
1761 enum pkt_hash_types rss_hash_type;
1762
1763 if (!hdr_hash || !skb)
1764 return;
1765
1766 switch (__le16_to_cpu(hdr_hash->hash_report)) {
1767 case VIRTIO_NET_HASH_REPORT_TCPv4:
1768 case VIRTIO_NET_HASH_REPORT_UDPv4:
1769 case VIRTIO_NET_HASH_REPORT_TCPv6:
1770 case VIRTIO_NET_HASH_REPORT_UDPv6:
1771 case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1772 case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1773 rss_hash_type = PKT_HASH_TYPE_L4;
1774 break;
1775 case VIRTIO_NET_HASH_REPORT_IPv4:
1776 case VIRTIO_NET_HASH_REPORT_IPv6:
1777 case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1778 rss_hash_type = PKT_HASH_TYPE_L3;
1779 break;
1780 case VIRTIO_NET_HASH_REPORT_NONE:
1781 default:
1782 rss_hash_type = PKT_HASH_TYPE_NONE;
1783 }
1784 skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1785}
1786
1787static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1788 void *buf, unsigned int len, void **ctx,
1789 unsigned int *xdp_xmit,
1790 struct virtnet_rq_stats *stats)
1791{
1792 struct net_device *dev = vi->dev;
1793 struct sk_buff *skb;
1794 struct virtio_net_common_hdr *hdr;
1795
1796 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1797 pr_debug("%s: short packet %i\n", dev->name, len);
1798 DEV_STATS_INC(dev, rx_length_errors);
1799 virtnet_rq_free_buf(vi, rq, buf);
1800 return;
1801 }
1802
1803 if (vi->mergeable_rx_bufs)
1804 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1805 stats);
1806 else if (vi->big_packets)
1807 skb = receive_big(dev, vi, rq, buf, len, stats);
1808 else
1809 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1810
1811 if (unlikely(!skb))
1812 return;
1813
1814 hdr = skb_vnet_common_hdr(skb);
1815 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1816 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1817
1818 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1819 skb->ip_summed = CHECKSUM_UNNECESSARY;
1820
1821 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1822 virtio_is_little_endian(vi->vdev))) {
1823 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1824 dev->name, hdr->hdr.gso_type,
1825 hdr->hdr.gso_size);
1826 goto frame_err;
1827 }
1828
1829 skb_record_rx_queue(skb, vq2rxq(rq->vq));
1830 skb->protocol = eth_type_trans(skb, dev);
1831 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1832 ntohs(skb->protocol), skb->len, skb->pkt_type);
1833
1834 napi_gro_receive(&rq->napi, skb);
1835 return;
1836
1837frame_err:
1838 DEV_STATS_INC(dev, rx_frame_errors);
1839 dev_kfree_skb(skb);
1840}
1841
1842/* Unlike mergeable buffers, all buffers are allocated to the
1843 * same size, except for the headroom. For this reason we do
1844 * not need to use mergeable_len_to_ctx here - it is enough
1845 * to store the headroom as the context ignoring the truesize.
1846 */
1847static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1848 gfp_t gfp)
1849{
1850 char *buf;
1851 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1852 void *ctx = (void *)(unsigned long)xdp_headroom;
1853 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1854 int err;
1855
1856 len = SKB_DATA_ALIGN(len) +
1857 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1858
1859 buf = virtnet_rq_alloc(rq, len, gfp);
1860 if (unlikely(!buf))
1861 return -ENOMEM;
1862
1863 virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
1864 vi->hdr_len + GOOD_PACKET_LEN);
1865
1866 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1867 if (err < 0) {
1868 if (rq->do_dma)
1869 virtnet_rq_unmap(rq, buf, 0);
1870 put_page(virt_to_head_page(buf));
1871 }
1872
1873 return err;
1874}
1875
1876static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1877 gfp_t gfp)
1878{
1879 struct page *first, *list = NULL;
1880 char *p;
1881 int i, err, offset;
1882
1883 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1884
1885 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1886 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1887 first = get_a_page(rq, gfp);
1888 if (!first) {
1889 if (list)
1890 give_pages(rq, list);
1891 return -ENOMEM;
1892 }
1893 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1894
1895 /* chain new page in list head to match sg */
1896 first->private = (unsigned long)list;
1897 list = first;
1898 }
1899
1900 first = get_a_page(rq, gfp);
1901 if (!first) {
1902 give_pages(rq, list);
1903 return -ENOMEM;
1904 }
1905 p = page_address(first);
1906
1907 /* rq->sg[0], rq->sg[1] share the same page */
1908 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1909 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1910
1911 /* rq->sg[1] for data packet, from offset */
1912 offset = sizeof(struct padded_vnet_hdr);
1913 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1914
1915 /* chain first in list head */
1916 first->private = (unsigned long)list;
1917 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1918 first, gfp);
1919 if (err < 0)
1920 give_pages(rq, first);
1921
1922 return err;
1923}
1924
1925static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1926 struct ewma_pkt_len *avg_pkt_len,
1927 unsigned int room)
1928{
1929 struct virtnet_info *vi = rq->vq->vdev->priv;
1930 const size_t hdr_len = vi->hdr_len;
1931 unsigned int len;
1932
1933 if (room)
1934 return PAGE_SIZE - room;
1935
1936 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1937 rq->min_buf_len, PAGE_SIZE - hdr_len);
1938
1939 return ALIGN(len, L1_CACHE_BYTES);
1940}
1941
1942static int add_recvbuf_mergeable(struct virtnet_info *vi,
1943 struct receive_queue *rq, gfp_t gfp)
1944{
1945 struct page_frag *alloc_frag = &rq->alloc_frag;
1946 unsigned int headroom = virtnet_get_headroom(vi);
1947 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1948 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1949 unsigned int len, hole;
1950 void *ctx;
1951 char *buf;
1952 int err;
1953
1954 /* Extra tailroom is needed to satisfy XDP's assumption. This
1955 * means rx frags coalescing won't work, but consider we've
1956 * disabled GSO for XDP, it won't be a big issue.
1957 */
1958 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1959
1960 buf = virtnet_rq_alloc(rq, len + room, gfp);
1961 if (unlikely(!buf))
1962 return -ENOMEM;
1963
1964 buf += headroom; /* advance address leaving hole at front of pkt */
1965 hole = alloc_frag->size - alloc_frag->offset;
1966 if (hole < len + room) {
1967 /* To avoid internal fragmentation, if there is very likely not
1968 * enough space for another buffer, add the remaining space to
1969 * the current buffer.
1970 * XDP core assumes that frame_size of xdp_buff and the length
1971 * of the frag are PAGE_SIZE, so we disable the hole mechanism.
1972 */
1973 if (!headroom)
1974 len += hole;
1975 alloc_frag->offset += hole;
1976 }
1977
1978 virtnet_rq_init_one_sg(rq, buf, len);
1979
1980 ctx = mergeable_len_to_ctx(len + room, headroom);
1981 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1982 if (err < 0) {
1983 if (rq->do_dma)
1984 virtnet_rq_unmap(rq, buf, 0);
1985 put_page(virt_to_head_page(buf));
1986 }
1987
1988 return err;
1989}
1990
1991/*
1992 * Returns false if we couldn't fill entirely (OOM).
1993 *
1994 * Normally run in the receive path, but can also be run from ndo_open
1995 * before we're receiving packets, or from refill_work which is
1996 * careful to disable receiving (using napi_disable).
1997 */
1998static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1999 gfp_t gfp)
2000{
2001 int err;
2002 bool oom;
2003
2004 do {
2005 if (vi->mergeable_rx_bufs)
2006 err = add_recvbuf_mergeable(vi, rq, gfp);
2007 else if (vi->big_packets)
2008 err = add_recvbuf_big(vi, rq, gfp);
2009 else
2010 err = add_recvbuf_small(vi, rq, gfp);
2011
2012 oom = err == -ENOMEM;
2013 if (err)
2014 break;
2015 } while (rq->vq->num_free);
2016 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2017 unsigned long flags;
2018
2019 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2020 u64_stats_inc(&rq->stats.kicks);
2021 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2022 }
2023
2024 return !oom;
2025}
2026
2027static void skb_recv_done(struct virtqueue *rvq)
2028{
2029 struct virtnet_info *vi = rvq->vdev->priv;
2030 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2031
2032 rq->calls++;
2033 virtqueue_napi_schedule(&rq->napi, rvq);
2034}
2035
2036static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2037{
2038 napi_enable(napi);
2039
2040 /* If all buffers were filled by other side before we napi_enabled, we
2041 * won't get another interrupt, so process any outstanding packets now.
2042 * Call local_bh_enable after to trigger softIRQ processing.
2043 */
2044 local_bh_disable();
2045 virtqueue_napi_schedule(napi, vq);
2046 local_bh_enable();
2047}
2048
2049static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2050 struct virtqueue *vq,
2051 struct napi_struct *napi)
2052{
2053 if (!napi->weight)
2054 return;
2055
2056 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2057 * enable the feature if this is likely affine with the transmit path.
2058 */
2059 if (!vi->affinity_hint_set) {
2060 napi->weight = 0;
2061 return;
2062 }
2063
2064 return virtnet_napi_enable(vq, napi);
2065}
2066
2067static void virtnet_napi_tx_disable(struct napi_struct *napi)
2068{
2069 if (napi->weight)
2070 napi_disable(napi);
2071}
2072
2073static void refill_work(struct work_struct *work)
2074{
2075 struct virtnet_info *vi =
2076 container_of(work, struct virtnet_info, refill.work);
2077 bool still_empty;
2078 int i;
2079
2080 for (i = 0; i < vi->curr_queue_pairs; i++) {
2081 struct receive_queue *rq = &vi->rq[i];
2082
2083 napi_disable(&rq->napi);
2084 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2085 virtnet_napi_enable(rq->vq, &rq->napi);
2086
2087 /* In theory, this can happen: if we don't get any buffers in
2088 * we will *never* try to fill again.
2089 */
2090 if (still_empty)
2091 schedule_delayed_work(&vi->refill, HZ/2);
2092 }
2093}
2094
2095static int virtnet_receive(struct receive_queue *rq, int budget,
2096 unsigned int *xdp_xmit)
2097{
2098 struct virtnet_info *vi = rq->vq->vdev->priv;
2099 struct virtnet_rq_stats stats = {};
2100 unsigned int len;
2101 int packets = 0;
2102 void *buf;
2103 int i;
2104
2105 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2106 void *ctx;
2107
2108 while (packets < budget &&
2109 (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2110 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2111 packets++;
2112 }
2113 } else {
2114 while (packets < budget &&
2115 (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
2116 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2117 packets++;
2118 }
2119 }
2120
2121 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2122 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2123 spin_lock(&vi->refill_lock);
2124 if (vi->refill_enabled)
2125 schedule_delayed_work(&vi->refill, 0);
2126 spin_unlock(&vi->refill_lock);
2127 }
2128 }
2129
2130 u64_stats_set(&stats.packets, packets);
2131 u64_stats_update_begin(&rq->stats.syncp);
2132 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
2133 size_t offset = virtnet_rq_stats_desc[i].offset;
2134 u64_stats_t *item, *src;
2135
2136 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2137 src = (u64_stats_t *)((u8 *)&stats + offset);
2138 u64_stats_add(item, u64_stats_read(src));
2139 }
2140 u64_stats_update_end(&rq->stats.syncp);
2141
2142 return packets;
2143}
2144
2145static void virtnet_poll_cleantx(struct receive_queue *rq)
2146{
2147 struct virtnet_info *vi = rq->vq->vdev->priv;
2148 unsigned int index = vq2rxq(rq->vq);
2149 struct send_queue *sq = &vi->sq[index];
2150 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2151
2152 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2153 return;
2154
2155 if (__netif_tx_trylock(txq)) {
2156 if (sq->reset) {
2157 __netif_tx_unlock(txq);
2158 return;
2159 }
2160
2161 do {
2162 virtqueue_disable_cb(sq->vq);
2163 free_old_xmit_skbs(sq, true);
2164 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2165
2166 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2167 netif_tx_wake_queue(txq);
2168
2169 __netif_tx_unlock(txq);
2170 }
2171}
2172
2173static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2174{
2175 struct dim_sample cur_sample = {};
2176
2177 if (!rq->packets_in_napi)
2178 return;
2179
2180 u64_stats_update_begin(&rq->stats.syncp);
2181 dim_update_sample(rq->calls,
2182 u64_stats_read(&rq->stats.packets),
2183 u64_stats_read(&rq->stats.bytes),
2184 &cur_sample);
2185 u64_stats_update_end(&rq->stats.syncp);
2186
2187 net_dim(&rq->dim, cur_sample);
2188 rq->packets_in_napi = 0;
2189}
2190
2191static int virtnet_poll(struct napi_struct *napi, int budget)
2192{
2193 struct receive_queue *rq =
2194 container_of(napi, struct receive_queue, napi);
2195 struct virtnet_info *vi = rq->vq->vdev->priv;
2196 struct send_queue *sq;
2197 unsigned int received;
2198 unsigned int xdp_xmit = 0;
2199 bool napi_complete;
2200
2201 virtnet_poll_cleantx(rq);
2202
2203 received = virtnet_receive(rq, budget, &xdp_xmit);
2204 rq->packets_in_napi += received;
2205
2206 if (xdp_xmit & VIRTIO_XDP_REDIR)
2207 xdp_do_flush();
2208
2209 /* Out of packets? */
2210 if (received < budget) {
2211 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2212 if (napi_complete && rq->dim_enabled)
2213 virtnet_rx_dim_update(vi, rq);
2214 }
2215
2216 if (xdp_xmit & VIRTIO_XDP_TX) {
2217 sq = virtnet_xdp_get_sq(vi);
2218 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2219 u64_stats_update_begin(&sq->stats.syncp);
2220 u64_stats_inc(&sq->stats.kicks);
2221 u64_stats_update_end(&sq->stats.syncp);
2222 }
2223 virtnet_xdp_put_sq(vi, sq);
2224 }
2225
2226 return received;
2227}
2228
2229static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2230{
2231 virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2232 napi_disable(&vi->rq[qp_index].napi);
2233 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2234}
2235
2236static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2237{
2238 struct net_device *dev = vi->dev;
2239 int err;
2240
2241 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2242 vi->rq[qp_index].napi.napi_id);
2243 if (err < 0)
2244 return err;
2245
2246 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2247 MEM_TYPE_PAGE_SHARED, NULL);
2248 if (err < 0)
2249 goto err_xdp_reg_mem_model;
2250
2251 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2252 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2253
2254 return 0;
2255
2256err_xdp_reg_mem_model:
2257 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2258 return err;
2259}
2260
2261static int virtnet_open(struct net_device *dev)
2262{
2263 struct virtnet_info *vi = netdev_priv(dev);
2264 int i, err;
2265
2266 enable_delayed_refill(vi);
2267
2268 for (i = 0; i < vi->max_queue_pairs; i++) {
2269 if (i < vi->curr_queue_pairs)
2270 /* Make sure we have some buffers: if oom use wq. */
2271 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2272 schedule_delayed_work(&vi->refill, 0);
2273
2274 err = virtnet_enable_queue_pair(vi, i);
2275 if (err < 0)
2276 goto err_enable_qp;
2277 }
2278
2279 return 0;
2280
2281err_enable_qp:
2282 disable_delayed_refill(vi);
2283 cancel_delayed_work_sync(&vi->refill);
2284
2285 for (i--; i >= 0; i--) {
2286 virtnet_disable_queue_pair(vi, i);
2287 cancel_work_sync(&vi->rq[i].dim.work);
2288 }
2289
2290 return err;
2291}
2292
2293static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2294{
2295 struct send_queue *sq = container_of(napi, struct send_queue, napi);
2296 struct virtnet_info *vi = sq->vq->vdev->priv;
2297 unsigned int index = vq2txq(sq->vq);
2298 struct netdev_queue *txq;
2299 int opaque;
2300 bool done;
2301
2302 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2303 /* We don't need to enable cb for XDP */
2304 napi_complete_done(napi, 0);
2305 return 0;
2306 }
2307
2308 txq = netdev_get_tx_queue(vi->dev, index);
2309 __netif_tx_lock(txq, raw_smp_processor_id());
2310 virtqueue_disable_cb(sq->vq);
2311 free_old_xmit_skbs(sq, true);
2312
2313 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
2314 netif_tx_wake_queue(txq);
2315
2316 opaque = virtqueue_enable_cb_prepare(sq->vq);
2317
2318 done = napi_complete_done(napi, 0);
2319
2320 if (!done)
2321 virtqueue_disable_cb(sq->vq);
2322
2323 __netif_tx_unlock(txq);
2324
2325 if (done) {
2326 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2327 if (napi_schedule_prep(napi)) {
2328 __netif_tx_lock(txq, raw_smp_processor_id());
2329 virtqueue_disable_cb(sq->vq);
2330 __netif_tx_unlock(txq);
2331 __napi_schedule(napi);
2332 }
2333 }
2334 }
2335
2336 return 0;
2337}
2338
2339static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2340{
2341 struct virtio_net_hdr_mrg_rxbuf *hdr;
2342 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2343 struct virtnet_info *vi = sq->vq->vdev->priv;
2344 int num_sg;
2345 unsigned hdr_len = vi->hdr_len;
2346 bool can_push;
2347
2348 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2349
2350 can_push = vi->any_header_sg &&
2351 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2352 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2353 /* Even if we can, don't push here yet as this would skew
2354 * csum_start offset below. */
2355 if (can_push)
2356 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2357 else
2358 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2359
2360 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2361 virtio_is_little_endian(vi->vdev), false,
2362 0))
2363 return -EPROTO;
2364
2365 if (vi->mergeable_rx_bufs)
2366 hdr->num_buffers = 0;
2367
2368 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2369 if (can_push) {
2370 __skb_push(skb, hdr_len);
2371 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2372 if (unlikely(num_sg < 0))
2373 return num_sg;
2374 /* Pull header back to avoid skew in tx bytes calculations. */
2375 __skb_pull(skb, hdr_len);
2376 } else {
2377 sg_set_buf(sq->sg, hdr, hdr_len);
2378 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2379 if (unlikely(num_sg < 0))
2380 return num_sg;
2381 num_sg++;
2382 }
2383 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2384}
2385
2386static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2387{
2388 struct virtnet_info *vi = netdev_priv(dev);
2389 int qnum = skb_get_queue_mapping(skb);
2390 struct send_queue *sq = &vi->sq[qnum];
2391 int err;
2392 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2393 bool kick = !netdev_xmit_more();
2394 bool use_napi = sq->napi.weight;
2395
2396 /* Free up any pending old buffers before queueing new ones. */
2397 do {
2398 if (use_napi)
2399 virtqueue_disable_cb(sq->vq);
2400
2401 free_old_xmit_skbs(sq, false);
2402
2403 } while (use_napi && kick &&
2404 unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2405
2406 /* timestamp packet in software */
2407 skb_tx_timestamp(skb);
2408
2409 /* Try to transmit */
2410 err = xmit_skb(sq, skb);
2411
2412 /* This should not happen! */
2413 if (unlikely(err)) {
2414 DEV_STATS_INC(dev, tx_fifo_errors);
2415 if (net_ratelimit())
2416 dev_warn(&dev->dev,
2417 "Unexpected TXQ (%d) queue failure: %d\n",
2418 qnum, err);
2419 DEV_STATS_INC(dev, tx_dropped);
2420 dev_kfree_skb_any(skb);
2421 return NETDEV_TX_OK;
2422 }
2423
2424 /* Don't wait up for transmitted skbs to be freed. */
2425 if (!use_napi) {
2426 skb_orphan(skb);
2427 nf_reset_ct(skb);
2428 }
2429
2430 check_sq_full_and_disable(vi, dev, sq);
2431
2432 if (kick || netif_xmit_stopped(txq)) {
2433 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2434 u64_stats_update_begin(&sq->stats.syncp);
2435 u64_stats_inc(&sq->stats.kicks);
2436 u64_stats_update_end(&sq->stats.syncp);
2437 }
2438 }
2439
2440 return NETDEV_TX_OK;
2441}
2442
2443static int virtnet_rx_resize(struct virtnet_info *vi,
2444 struct receive_queue *rq, u32 ring_num)
2445{
2446 bool running = netif_running(vi->dev);
2447 int err, qindex;
2448
2449 qindex = rq - vi->rq;
2450
2451 if (running) {
2452 napi_disable(&rq->napi);
2453 cancel_work_sync(&rq->dim.work);
2454 }
2455
2456 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2457 if (err)
2458 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2459
2460 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2461 schedule_delayed_work(&vi->refill, 0);
2462
2463 if (running)
2464 virtnet_napi_enable(rq->vq, &rq->napi);
2465 return err;
2466}
2467
2468static int virtnet_tx_resize(struct virtnet_info *vi,
2469 struct send_queue *sq, u32 ring_num)
2470{
2471 bool running = netif_running(vi->dev);
2472 struct netdev_queue *txq;
2473 int err, qindex;
2474
2475 qindex = sq - vi->sq;
2476
2477 if (running)
2478 virtnet_napi_tx_disable(&sq->napi);
2479
2480 txq = netdev_get_tx_queue(vi->dev, qindex);
2481
2482 /* 1. wait all ximt complete
2483 * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2484 */
2485 __netif_tx_lock_bh(txq);
2486
2487 /* Prevent rx poll from accessing sq. */
2488 sq->reset = true;
2489
2490 /* Prevent the upper layer from trying to send packets. */
2491 netif_stop_subqueue(vi->dev, qindex);
2492
2493 __netif_tx_unlock_bh(txq);
2494
2495 err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2496 if (err)
2497 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2498
2499 __netif_tx_lock_bh(txq);
2500 sq->reset = false;
2501 netif_tx_wake_queue(txq);
2502 __netif_tx_unlock_bh(txq);
2503
2504 if (running)
2505 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2506 return err;
2507}
2508
2509/*
2510 * Send command via the control virtqueue and check status. Commands
2511 * supported by the hypervisor, as indicated by feature bits, should
2512 * never fail unless improperly formatted.
2513 */
2514static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2515 struct scatterlist *out)
2516{
2517 struct scatterlist *sgs[4], hdr, stat;
2518 unsigned out_num = 0, tmp;
2519 int ret;
2520
2521 /* Caller should know better */
2522 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2523
2524 vi->ctrl->status = ~0;
2525 vi->ctrl->hdr.class = class;
2526 vi->ctrl->hdr.cmd = cmd;
2527 /* Add header */
2528 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2529 sgs[out_num++] = &hdr;
2530
2531 if (out)
2532 sgs[out_num++] = out;
2533
2534 /* Add return status. */
2535 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2536 sgs[out_num] = &stat;
2537
2538 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
2539 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2540 if (ret < 0) {
2541 dev_warn(&vi->vdev->dev,
2542 "Failed to add sgs for command vq: %d\n.", ret);
2543 return false;
2544 }
2545
2546 if (unlikely(!virtqueue_kick(vi->cvq)))
2547 return vi->ctrl->status == VIRTIO_NET_OK;
2548
2549 /* Spin for a response, the kick causes an ioport write, trapping
2550 * into the hypervisor, so the request should be handled immediately.
2551 */
2552 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553 !virtqueue_is_broken(vi->cvq))
2554 cpu_relax();
2555
2556 return vi->ctrl->status == VIRTIO_NET_OK;
2557}
2558
2559static int virtnet_set_mac_address(struct net_device *dev, void *p)
2560{
2561 struct virtnet_info *vi = netdev_priv(dev);
2562 struct virtio_device *vdev = vi->vdev;
2563 int ret;
2564 struct sockaddr *addr;
2565 struct scatterlist sg;
2566
2567 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2568 return -EOPNOTSUPP;
2569
2570 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2571 if (!addr)
2572 return -ENOMEM;
2573
2574 ret = eth_prepare_mac_addr_change(dev, addr);
2575 if (ret)
2576 goto out;
2577
2578 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2579 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2580 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2581 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2582 dev_warn(&vdev->dev,
2583 "Failed to set mac address by vq command.\n");
2584 ret = -EINVAL;
2585 goto out;
2586 }
2587 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2588 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2589 unsigned int i;
2590
2591 /* Naturally, this has an atomicity problem. */
2592 for (i = 0; i < dev->addr_len; i++)
2593 virtio_cwrite8(vdev,
2594 offsetof(struct virtio_net_config, mac) +
2595 i, addr->sa_data[i]);
2596 }
2597
2598 eth_commit_mac_addr_change(dev, p);
2599 ret = 0;
2600
2601out:
2602 kfree(addr);
2603 return ret;
2604}
2605
2606static void virtnet_stats(struct net_device *dev,
2607 struct rtnl_link_stats64 *tot)
2608{
2609 struct virtnet_info *vi = netdev_priv(dev);
2610 unsigned int start;
2611 int i;
2612
2613 for (i = 0; i < vi->max_queue_pairs; i++) {
2614 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2615 struct receive_queue *rq = &vi->rq[i];
2616 struct send_queue *sq = &vi->sq[i];
2617
2618 do {
2619 start = u64_stats_fetch_begin(&sq->stats.syncp);
2620 tpackets = u64_stats_read(&sq->stats.packets);
2621 tbytes = u64_stats_read(&sq->stats.bytes);
2622 terrors = u64_stats_read(&sq->stats.tx_timeouts);
2623 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2624
2625 do {
2626 start = u64_stats_fetch_begin(&rq->stats.syncp);
2627 rpackets = u64_stats_read(&rq->stats.packets);
2628 rbytes = u64_stats_read(&rq->stats.bytes);
2629 rdrops = u64_stats_read(&rq->stats.drops);
2630 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2631
2632 tot->rx_packets += rpackets;
2633 tot->tx_packets += tpackets;
2634 tot->rx_bytes += rbytes;
2635 tot->tx_bytes += tbytes;
2636 tot->rx_dropped += rdrops;
2637 tot->tx_errors += terrors;
2638 }
2639
2640 tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2641 tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2642 tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2643 tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
2644}
2645
2646static void virtnet_ack_link_announce(struct virtnet_info *vi)
2647{
2648 rtnl_lock();
2649 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2650 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2651 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2652 rtnl_unlock();
2653}
2654
2655static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2656{
2657 struct scatterlist sg;
2658 struct net_device *dev = vi->dev;
2659
2660 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2661 return 0;
2662
2663 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2664 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2665
2666 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2667 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2668 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2669 queue_pairs);
2670 return -EINVAL;
2671 } else {
2672 vi->curr_queue_pairs = queue_pairs;
2673 /* virtnet_open() will refill when device is going to up. */
2674 if (dev->flags & IFF_UP)
2675 schedule_delayed_work(&vi->refill, 0);
2676 }
2677
2678 return 0;
2679}
2680
2681static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2682{
2683 int err;
2684
2685 rtnl_lock();
2686 err = _virtnet_set_queues(vi, queue_pairs);
2687 rtnl_unlock();
2688 return err;
2689}
2690
2691static int virtnet_close(struct net_device *dev)
2692{
2693 struct virtnet_info *vi = netdev_priv(dev);
2694 int i;
2695
2696 /* Make sure NAPI doesn't schedule refill work */
2697 disable_delayed_refill(vi);
2698 /* Make sure refill_work doesn't re-enable napi! */
2699 cancel_delayed_work_sync(&vi->refill);
2700
2701 for (i = 0; i < vi->max_queue_pairs; i++) {
2702 virtnet_disable_queue_pair(vi, i);
2703 cancel_work_sync(&vi->rq[i].dim.work);
2704 }
2705
2706 return 0;
2707}
2708
2709static void virtnet_set_rx_mode(struct net_device *dev)
2710{
2711 struct virtnet_info *vi = netdev_priv(dev);
2712 struct scatterlist sg[2];
2713 struct virtio_net_ctrl_mac *mac_data;
2714 struct netdev_hw_addr *ha;
2715 int uc_count;
2716 int mc_count;
2717 void *buf;
2718 int i;
2719
2720 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2721 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2722 return;
2723
2724 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2725 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2726
2727 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2728
2729 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2730 VIRTIO_NET_CTRL_RX_PROMISC, sg))
2731 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2732 vi->ctrl->promisc ? "en" : "dis");
2733
2734 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2735
2736 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2737 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2738 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2739 vi->ctrl->allmulti ? "en" : "dis");
2740
2741 uc_count = netdev_uc_count(dev);
2742 mc_count = netdev_mc_count(dev);
2743 /* MAC filter - use one buffer for both lists */
2744 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2745 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2746 mac_data = buf;
2747 if (!buf)
2748 return;
2749
2750 sg_init_table(sg, 2);
2751
2752 /* Store the unicast list and count in the front of the buffer */
2753 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2754 i = 0;
2755 netdev_for_each_uc_addr(ha, dev)
2756 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2757
2758 sg_set_buf(&sg[0], mac_data,
2759 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2760
2761 /* multicast list and count fill the end */
2762 mac_data = (void *)&mac_data->macs[uc_count][0];
2763
2764 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2765 i = 0;
2766 netdev_for_each_mc_addr(ha, dev)
2767 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2768
2769 sg_set_buf(&sg[1], mac_data,
2770 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2771
2772 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2773 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2774 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2775
2776 kfree(buf);
2777}
2778
2779static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2780 __be16 proto, u16 vid)
2781{
2782 struct virtnet_info *vi = netdev_priv(dev);
2783 struct scatterlist sg;
2784
2785 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2786 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2787
2788 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2789 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2790 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2791 return 0;
2792}
2793
2794static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2795 __be16 proto, u16 vid)
2796{
2797 struct virtnet_info *vi = netdev_priv(dev);
2798 struct scatterlist sg;
2799
2800 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2801 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2802
2803 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2804 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2805 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2806 return 0;
2807}
2808
2809static void virtnet_clean_affinity(struct virtnet_info *vi)
2810{
2811 int i;
2812
2813 if (vi->affinity_hint_set) {
2814 for (i = 0; i < vi->max_queue_pairs; i++) {
2815 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2816 virtqueue_set_affinity(vi->sq[i].vq, NULL);
2817 }
2818
2819 vi->affinity_hint_set = false;
2820 }
2821}
2822
2823static void virtnet_set_affinity(struct virtnet_info *vi)
2824{
2825 cpumask_var_t mask;
2826 int stragglers;
2827 int group_size;
2828 int i, j, cpu;
2829 int num_cpu;
2830 int stride;
2831
2832 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2833 virtnet_clean_affinity(vi);
2834 return;
2835 }
2836
2837 num_cpu = num_online_cpus();
2838 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2839 stragglers = num_cpu >= vi->curr_queue_pairs ?
2840 num_cpu % vi->curr_queue_pairs :
2841 0;
2842 cpu = cpumask_first(cpu_online_mask);
2843
2844 for (i = 0; i < vi->curr_queue_pairs; i++) {
2845 group_size = stride + (i < stragglers ? 1 : 0);
2846
2847 for (j = 0; j < group_size; j++) {
2848 cpumask_set_cpu(cpu, mask);
2849 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2850 nr_cpu_ids, false);
2851 }
2852 virtqueue_set_affinity(vi->rq[i].vq, mask);
2853 virtqueue_set_affinity(vi->sq[i].vq, mask);
2854 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2855 cpumask_clear(mask);
2856 }
2857
2858 vi->affinity_hint_set = true;
2859 free_cpumask_var(mask);
2860}
2861
2862static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2863{
2864 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2865 node);
2866 virtnet_set_affinity(vi);
2867 return 0;
2868}
2869
2870static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2871{
2872 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2873 node_dead);
2874 virtnet_set_affinity(vi);
2875 return 0;
2876}
2877
2878static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2879{
2880 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2881 node);
2882
2883 virtnet_clean_affinity(vi);
2884 return 0;
2885}
2886
2887static enum cpuhp_state virtionet_online;
2888
2889static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2890{
2891 int ret;
2892
2893 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2894 if (ret)
2895 return ret;
2896 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2897 &vi->node_dead);
2898 if (!ret)
2899 return ret;
2900 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2901 return ret;
2902}
2903
2904static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2905{
2906 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2907 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2908 &vi->node_dead);
2909}
2910
2911static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2912 u16 vqn, u32 max_usecs, u32 max_packets)
2913{
2914 struct scatterlist sgs;
2915
2916 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
2917 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
2918 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
2919 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
2920
2921 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
2922 VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
2923 &sgs))
2924 return -EINVAL;
2925
2926 return 0;
2927}
2928
2929static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2930 u16 queue, u32 max_usecs,
2931 u32 max_packets)
2932{
2933 int err;
2934
2935 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
2936 max_usecs, max_packets);
2937 if (err)
2938 return err;
2939
2940 vi->rq[queue].intr_coal.max_usecs = max_usecs;
2941 vi->rq[queue].intr_coal.max_packets = max_packets;
2942
2943 return 0;
2944}
2945
2946static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2947 u16 queue, u32 max_usecs,
2948 u32 max_packets)
2949{
2950 int err;
2951
2952 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
2953 max_usecs, max_packets);
2954 if (err)
2955 return err;
2956
2957 vi->sq[queue].intr_coal.max_usecs = max_usecs;
2958 vi->sq[queue].intr_coal.max_packets = max_packets;
2959
2960 return 0;
2961}
2962
2963static void virtnet_get_ringparam(struct net_device *dev,
2964 struct ethtool_ringparam *ring,
2965 struct kernel_ethtool_ringparam *kernel_ring,
2966 struct netlink_ext_ack *extack)
2967{
2968 struct virtnet_info *vi = netdev_priv(dev);
2969
2970 ring->rx_max_pending = vi->rq[0].vq->num_max;
2971 ring->tx_max_pending = vi->sq[0].vq->num_max;
2972 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2973 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2974}
2975
2976static int virtnet_set_ringparam(struct net_device *dev,
2977 struct ethtool_ringparam *ring,
2978 struct kernel_ethtool_ringparam *kernel_ring,
2979 struct netlink_ext_ack *extack)
2980{
2981 struct virtnet_info *vi = netdev_priv(dev);
2982 u32 rx_pending, tx_pending;
2983 struct receive_queue *rq;
2984 struct send_queue *sq;
2985 int i, err;
2986
2987 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2988 return -EINVAL;
2989
2990 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2991 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2992
2993 if (ring->rx_pending == rx_pending &&
2994 ring->tx_pending == tx_pending)
2995 return 0;
2996
2997 if (ring->rx_pending > vi->rq[0].vq->num_max)
2998 return -EINVAL;
2999
3000 if (ring->tx_pending > vi->sq[0].vq->num_max)
3001 return -EINVAL;
3002
3003 for (i = 0; i < vi->max_queue_pairs; i++) {
3004 rq = vi->rq + i;
3005 sq = vi->sq + i;
3006
3007 if (ring->tx_pending != tx_pending) {
3008 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3009 if (err)
3010 return err;
3011
3012 /* Upon disabling and re-enabling a transmit virtqueue, the device must
3013 * set the coalescing parameters of the virtqueue to those configured
3014 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3015 * did not set any TX coalescing parameters, to 0.
3016 */
3017 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3018 vi->intr_coal_tx.max_usecs,
3019 vi->intr_coal_tx.max_packets);
3020 if (err)
3021 return err;
3022 }
3023
3024 if (ring->rx_pending != rx_pending) {
3025 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3026 if (err)
3027 return err;
3028
3029 /* The reason is same as the transmit virtqueue reset */
3030 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3031 vi->intr_coal_rx.max_usecs,
3032 vi->intr_coal_rx.max_packets);
3033 if (err)
3034 return err;
3035 }
3036 }
3037
3038 return 0;
3039}
3040
3041static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3042{
3043 struct net_device *dev = vi->dev;
3044 struct scatterlist sgs[4];
3045 unsigned int sg_buf_size;
3046
3047 /* prepare sgs */
3048 sg_init_table(sgs, 4);
3049
3050 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3051 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
3052
3053 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
3054 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
3055
3056 sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3057 - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3058 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
3059
3060 sg_buf_size = vi->rss_key_size;
3061 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
3062
3063 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3064 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3065 : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
3066 dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3067 return false;
3068 }
3069 return true;
3070}
3071
3072static void virtnet_init_default_rss(struct virtnet_info *vi)
3073{
3074 u32 indir_val = 0;
3075 int i = 0;
3076
3077 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
3078 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3079 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
3080 ? vi->rss_indir_table_size - 1 : 0;
3081 vi->ctrl->rss.unclassified_queue = 0;
3082
3083 for (; i < vi->rss_indir_table_size; ++i) {
3084 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3085 vi->ctrl->rss.indirection_table[i] = indir_val;
3086 }
3087
3088 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3089 vi->ctrl->rss.hash_key_length = vi->rss_key_size;
3090
3091 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3092}
3093
3094static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3095{
3096 info->data = 0;
3097 switch (info->flow_type) {
3098 case TCP_V4_FLOW:
3099 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3100 info->data = RXH_IP_SRC | RXH_IP_DST |
3101 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3102 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3103 info->data = RXH_IP_SRC | RXH_IP_DST;
3104 }
3105 break;
3106 case TCP_V6_FLOW:
3107 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3108 info->data = RXH_IP_SRC | RXH_IP_DST |
3109 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3110 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3111 info->data = RXH_IP_SRC | RXH_IP_DST;
3112 }
3113 break;
3114 case UDP_V4_FLOW:
3115 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3116 info->data = RXH_IP_SRC | RXH_IP_DST |
3117 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3118 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3119 info->data = RXH_IP_SRC | RXH_IP_DST;
3120 }
3121 break;
3122 case UDP_V6_FLOW:
3123 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3124 info->data = RXH_IP_SRC | RXH_IP_DST |
3125 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3126 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3127 info->data = RXH_IP_SRC | RXH_IP_DST;
3128 }
3129 break;
3130 case IPV4_FLOW:
3131 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3132 info->data = RXH_IP_SRC | RXH_IP_DST;
3133
3134 break;
3135 case IPV6_FLOW:
3136 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3137 info->data = RXH_IP_SRC | RXH_IP_DST;
3138
3139 break;
3140 default:
3141 info->data = 0;
3142 break;
3143 }
3144}
3145
3146static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3147{
3148 u32 new_hashtypes = vi->rss_hash_types_saved;
3149 bool is_disable = info->data & RXH_DISCARD;
3150 bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3151
3152 /* supports only 'sd', 'sdfn' and 'r' */
3153 if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3154 return false;
3155
3156 switch (info->flow_type) {
3157 case TCP_V4_FLOW:
3158 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3159 if (!is_disable)
3160 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3161 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3162 break;
3163 case UDP_V4_FLOW:
3164 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3165 if (!is_disable)
3166 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3167 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3168 break;
3169 case IPV4_FLOW:
3170 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3171 if (!is_disable)
3172 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3173 break;
3174 case TCP_V6_FLOW:
3175 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3176 if (!is_disable)
3177 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3178 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3179 break;
3180 case UDP_V6_FLOW:
3181 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3182 if (!is_disable)
3183 new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3184 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3185 break;
3186 case IPV6_FLOW:
3187 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3188 if (!is_disable)
3189 new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3190 break;
3191 default:
3192 /* unsupported flow */
3193 return false;
3194 }
3195
3196 /* if unsupported hashtype was set */
3197 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3198 return false;
3199
3200 if (new_hashtypes != vi->rss_hash_types_saved) {
3201 vi->rss_hash_types_saved = new_hashtypes;
3202 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3203 if (vi->dev->features & NETIF_F_RXHASH)
3204 return virtnet_commit_rss_command(vi);
3205 }
3206
3207 return true;
3208}
3209
3210static void virtnet_get_drvinfo(struct net_device *dev,
3211 struct ethtool_drvinfo *info)
3212{
3213 struct virtnet_info *vi = netdev_priv(dev);
3214 struct virtio_device *vdev = vi->vdev;
3215
3216 strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3217 strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3218 strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3219
3220}
3221
3222/* TODO: Eliminate OOO packets during switching */
3223static int virtnet_set_channels(struct net_device *dev,
3224 struct ethtool_channels *channels)
3225{
3226 struct virtnet_info *vi = netdev_priv(dev);
3227 u16 queue_pairs = channels->combined_count;
3228 int err;
3229
3230 /* We don't support separate rx/tx channels.
3231 * We don't allow setting 'other' channels.
3232 */
3233 if (channels->rx_count || channels->tx_count || channels->other_count)
3234 return -EINVAL;
3235
3236 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3237 return -EINVAL;
3238
3239 /* For now we don't support modifying channels while XDP is loaded
3240 * also when XDP is loaded all RX queues have XDP programs so we only
3241 * need to check a single RX queue.
3242 */
3243 if (vi->rq[0].xdp_prog)
3244 return -EINVAL;
3245
3246 cpus_read_lock();
3247 err = _virtnet_set_queues(vi, queue_pairs);
3248 if (err) {
3249 cpus_read_unlock();
3250 goto err;
3251 }
3252 virtnet_set_affinity(vi);
3253 cpus_read_unlock();
3254
3255 netif_set_real_num_tx_queues(dev, queue_pairs);
3256 netif_set_real_num_rx_queues(dev, queue_pairs);
3257 err:
3258 return err;
3259}
3260
3261static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3262{
3263 struct virtnet_info *vi = netdev_priv(dev);
3264 unsigned int i, j;
3265 u8 *p = data;
3266
3267 switch (stringset) {
3268 case ETH_SS_STATS:
3269 for (i = 0; i < vi->curr_queue_pairs; i++) {
3270 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
3271 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
3272 virtnet_rq_stats_desc[j].desc);
3273 }
3274
3275 for (i = 0; i < vi->curr_queue_pairs; i++) {
3276 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
3277 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
3278 virtnet_sq_stats_desc[j].desc);
3279 }
3280 break;
3281 }
3282}
3283
3284static int virtnet_get_sset_count(struct net_device *dev, int sset)
3285{
3286 struct virtnet_info *vi = netdev_priv(dev);
3287
3288 switch (sset) {
3289 case ETH_SS_STATS:
3290 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3291 VIRTNET_SQ_STATS_LEN);
3292 default:
3293 return -EOPNOTSUPP;
3294 }
3295}
3296
3297static void virtnet_get_ethtool_stats(struct net_device *dev,
3298 struct ethtool_stats *stats, u64 *data)
3299{
3300 struct virtnet_info *vi = netdev_priv(dev);
3301 unsigned int idx = 0, start, i, j;
3302 const u8 *stats_base;
3303 const u64_stats_t *p;
3304 size_t offset;
3305
3306 for (i = 0; i < vi->curr_queue_pairs; i++) {
3307 struct receive_queue *rq = &vi->rq[i];
3308
3309 stats_base = (const u8 *)&rq->stats;
3310 do {
3311 start = u64_stats_fetch_begin(&rq->stats.syncp);
3312 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
3313 offset = virtnet_rq_stats_desc[j].offset;
3314 p = (const u64_stats_t *)(stats_base + offset);
3315 data[idx + j] = u64_stats_read(p);
3316 }
3317 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
3318 idx += VIRTNET_RQ_STATS_LEN;
3319 }
3320
3321 for (i = 0; i < vi->curr_queue_pairs; i++) {
3322 struct send_queue *sq = &vi->sq[i];
3323
3324 stats_base = (const u8 *)&sq->stats;
3325 do {
3326 start = u64_stats_fetch_begin(&sq->stats.syncp);
3327 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
3328 offset = virtnet_sq_stats_desc[j].offset;
3329 p = (const u64_stats_t *)(stats_base + offset);
3330 data[idx + j] = u64_stats_read(p);
3331 }
3332 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
3333 idx += VIRTNET_SQ_STATS_LEN;
3334 }
3335}
3336
3337static void virtnet_get_channels(struct net_device *dev,
3338 struct ethtool_channels *channels)
3339{
3340 struct virtnet_info *vi = netdev_priv(dev);
3341
3342 channels->combined_count = vi->curr_queue_pairs;
3343 channels->max_combined = vi->max_queue_pairs;
3344 channels->max_other = 0;
3345 channels->rx_count = 0;
3346 channels->tx_count = 0;
3347 channels->other_count = 0;
3348}
3349
3350static int virtnet_set_link_ksettings(struct net_device *dev,
3351 const struct ethtool_link_ksettings *cmd)
3352{
3353 struct virtnet_info *vi = netdev_priv(dev);
3354
3355 return ethtool_virtdev_set_link_ksettings(dev, cmd,
3356 &vi->speed, &vi->duplex);
3357}
3358
3359static int virtnet_get_link_ksettings(struct net_device *dev,
3360 struct ethtool_link_ksettings *cmd)
3361{
3362 struct virtnet_info *vi = netdev_priv(dev);
3363
3364 cmd->base.speed = vi->speed;
3365 cmd->base.duplex = vi->duplex;
3366 cmd->base.port = PORT_OTHER;
3367
3368 return 0;
3369}
3370
3371static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
3372 struct ethtool_coalesce *ec)
3373{
3374 struct scatterlist sgs_tx;
3375 int i;
3376
3377 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3378 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3379 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3380
3381 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3382 VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
3383 &sgs_tx))
3384 return -EINVAL;
3385
3386 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3387 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3388 for (i = 0; i < vi->max_queue_pairs; i++) {
3389 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3390 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3391 }
3392
3393 return 0;
3394}
3395
3396static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
3397 struct ethtool_coalesce *ec)
3398{
3399 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3400 struct scatterlist sgs_rx;
3401 int i;
3402
3403 if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3404 return -EOPNOTSUPP;
3405
3406 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3407 ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3408 return -EINVAL;
3409
3410 if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3411 vi->rx_dim_enabled = true;
3412 for (i = 0; i < vi->max_queue_pairs; i++)
3413 vi->rq[i].dim_enabled = true;
3414 return 0;
3415 }
3416
3417 if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3418 vi->rx_dim_enabled = false;
3419 for (i = 0; i < vi->max_queue_pairs; i++)
3420 vi->rq[i].dim_enabled = false;
3421 }
3422
3423 /* Since the per-queue coalescing params can be set,
3424 * we need apply the global new params even if they
3425 * are not updated.
3426 */
3427 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3428 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3429 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3430
3431 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3432 VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
3433 &sgs_rx))
3434 return -EINVAL;
3435
3436 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3437 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3438 for (i = 0; i < vi->max_queue_pairs; i++) {
3439 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3440 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3441 }
3442
3443 return 0;
3444}
3445
3446static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3447 struct ethtool_coalesce *ec)
3448{
3449 int err;
3450
3451 err = virtnet_send_tx_notf_coal_cmds(vi, ec);
3452 if (err)
3453 return err;
3454
3455 err = virtnet_send_rx_notf_coal_cmds(vi, ec);
3456 if (err)
3457 return err;
3458
3459 return 0;
3460}
3461
3462static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3463 struct ethtool_coalesce *ec,
3464 u16 queue)
3465{
3466 bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3467 bool cur_rx_dim = vi->rq[queue].dim_enabled;
3468 u32 max_usecs, max_packets;
3469 int err;
3470
3471 max_usecs = vi->rq[queue].intr_coal.max_usecs;
3472 max_packets = vi->rq[queue].intr_coal.max_packets;
3473
3474 if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3475 ec->rx_max_coalesced_frames != max_packets))
3476 return -EINVAL;
3477
3478 if (rx_ctrl_dim_on && !cur_rx_dim) {
3479 vi->rq[queue].dim_enabled = true;
3480 return 0;
3481 }
3482
3483 if (!rx_ctrl_dim_on && cur_rx_dim)
3484 vi->rq[queue].dim_enabled = false;
3485
3486 /* If no params are updated, userspace ethtool will
3487 * reject the modification.
3488 */
3489 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
3490 ec->rx_coalesce_usecs,
3491 ec->rx_max_coalesced_frames);
3492 if (err)
3493 return err;
3494
3495 return 0;
3496}
3497
3498static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3499 struct ethtool_coalesce *ec,
3500 u16 queue)
3501{
3502 int err;
3503
3504 err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3505 if (err)
3506 return err;
3507
3508 err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
3509 ec->tx_coalesce_usecs,
3510 ec->tx_max_coalesced_frames);
3511 if (err)
3512 return err;
3513
3514 return 0;
3515}
3516
3517static void virtnet_rx_dim_work(struct work_struct *work)
3518{
3519 struct dim *dim = container_of(work, struct dim, work);
3520 struct receive_queue *rq = container_of(dim,
3521 struct receive_queue, dim);
3522 struct virtnet_info *vi = rq->vq->vdev->priv;
3523 struct net_device *dev = vi->dev;
3524 struct dim_cq_moder update_moder;
3525 int i, qnum, err;
3526
3527 if (!rtnl_trylock())
3528 return;
3529
3530 /* Each rxq's work is queued by "net_dim()->schedule_work()"
3531 * in response to NAPI traffic changes. Note that dim->profile_ix
3532 * for each rxq is updated prior to the queuing action.
3533 * So we only need to traverse and update profiles for all rxqs
3534 * in the work which is holding rtnl_lock.
3535 */
3536 for (i = 0; i < vi->curr_queue_pairs; i++) {
3537 rq = &vi->rq[i];
3538 dim = &rq->dim;
3539 qnum = rq - vi->rq;
3540
3541 if (!rq->dim_enabled)
3542 continue;
3543
3544 update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3545 if (update_moder.usec != rq->intr_coal.max_usecs ||
3546 update_moder.pkts != rq->intr_coal.max_packets) {
3547 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3548 update_moder.usec,
3549 update_moder.pkts);
3550 if (err)
3551 pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3552 dev->name, qnum);
3553 dim->state = DIM_START_MEASURE;
3554 }
3555 }
3556
3557 rtnl_unlock();
3558}
3559
3560static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
3561{
3562 /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
3563 * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
3564 */
3565 if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
3566 return -EOPNOTSUPP;
3567
3568 if (ec->tx_max_coalesced_frames > 1 ||
3569 ec->rx_max_coalesced_frames != 1)
3570 return -EINVAL;
3571
3572 return 0;
3573}
3574
3575static int virtnet_should_update_vq_weight(int dev_flags, int weight,
3576 int vq_weight, bool *should_update)
3577{
3578 if (weight ^ vq_weight) {
3579 if (dev_flags & IFF_UP)
3580 return -EBUSY;
3581 *should_update = true;
3582 }
3583
3584 return 0;
3585}
3586
3587static int virtnet_set_coalesce(struct net_device *dev,
3588 struct ethtool_coalesce *ec,
3589 struct kernel_ethtool_coalesce *kernel_coal,
3590 struct netlink_ext_ack *extack)
3591{
3592 struct virtnet_info *vi = netdev_priv(dev);
3593 int ret, queue_number, napi_weight;
3594 bool update_napi = false;
3595
3596 /* Can't change NAPI weight if the link is up */
3597 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3598 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3599 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3600 vi->sq[queue_number].napi.weight,
3601 &update_napi);
3602 if (ret)
3603 return ret;
3604
3605 if (update_napi) {
3606 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3607 * updated for the sake of simplicity, which might not be necessary
3608 */
3609 break;
3610 }
3611 }
3612
3613 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3614 ret = virtnet_send_notf_coal_cmds(vi, ec);
3615 else
3616 ret = virtnet_coal_params_supported(ec);
3617
3618 if (ret)
3619 return ret;
3620
3621 if (update_napi) {
3622 for (; queue_number < vi->max_queue_pairs; queue_number++)
3623 vi->sq[queue_number].napi.weight = napi_weight;
3624 }
3625
3626 return ret;
3627}
3628
3629static int virtnet_get_coalesce(struct net_device *dev,
3630 struct ethtool_coalesce *ec,
3631 struct kernel_ethtool_coalesce *kernel_coal,
3632 struct netlink_ext_ack *extack)
3633{
3634 struct virtnet_info *vi = netdev_priv(dev);
3635
3636 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3637 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3638 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3639 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3640 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3641 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
3642 } else {
3643 ec->rx_max_coalesced_frames = 1;
3644
3645 if (vi->sq[0].napi.weight)
3646 ec->tx_max_coalesced_frames = 1;
3647 }
3648
3649 return 0;
3650}
3651
3652static int virtnet_set_per_queue_coalesce(struct net_device *dev,
3653 u32 queue,
3654 struct ethtool_coalesce *ec)
3655{
3656 struct virtnet_info *vi = netdev_priv(dev);
3657 int ret, napi_weight;
3658 bool update_napi = false;
3659
3660 if (queue >= vi->max_queue_pairs)
3661 return -EINVAL;
3662
3663 /* Can't change NAPI weight if the link is up */
3664 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
3665 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
3666 vi->sq[queue].napi.weight,
3667 &update_napi);
3668 if (ret)
3669 return ret;
3670
3671 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3672 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3673 else
3674 ret = virtnet_coal_params_supported(ec);
3675
3676 if (ret)
3677 return ret;
3678
3679 if (update_napi)
3680 vi->sq[queue].napi.weight = napi_weight;
3681
3682 return 0;
3683}
3684
3685static int virtnet_get_per_queue_coalesce(struct net_device *dev,
3686 u32 queue,
3687 struct ethtool_coalesce *ec)
3688{
3689 struct virtnet_info *vi = netdev_priv(dev);
3690
3691 if (queue >= vi->max_queue_pairs)
3692 return -EINVAL;
3693
3694 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3695 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3696 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3697 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3698 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3699 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
3700 } else {
3701 ec->rx_max_coalesced_frames = 1;
3702
3703 if (vi->sq[queue].napi.weight)
3704 ec->tx_max_coalesced_frames = 1;
3705 }
3706
3707 return 0;
3708}
3709
3710static void virtnet_init_settings(struct net_device *dev)
3711{
3712 struct virtnet_info *vi = netdev_priv(dev);
3713
3714 vi->speed = SPEED_UNKNOWN;
3715 vi->duplex = DUPLEX_UNKNOWN;
3716}
3717
3718static void virtnet_update_settings(struct virtnet_info *vi)
3719{
3720 u32 speed;
3721 u8 duplex;
3722
3723 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3724 return;
3725
3726 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3727
3728 if (ethtool_validate_speed(speed))
3729 vi->speed = speed;
3730
3731 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3732
3733 if (ethtool_validate_duplex(duplex))
3734 vi->duplex = duplex;
3735}
3736
3737static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
3738{
3739 return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
3740}
3741
3742static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
3743{
3744 return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
3745}
3746
3747static int virtnet_get_rxfh(struct net_device *dev,
3748 struct ethtool_rxfh_param *rxfh)
3749{
3750 struct virtnet_info *vi = netdev_priv(dev);
3751 int i;
3752
3753 if (rxfh->indir) {
3754 for (i = 0; i < vi->rss_indir_table_size; ++i)
3755 rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
3756 }
3757
3758 if (rxfh->key)
3759 memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
3760
3761 rxfh->hfunc = ETH_RSS_HASH_TOP;
3762
3763 return 0;
3764}
3765
3766static int virtnet_set_rxfh(struct net_device *dev,
3767 struct ethtool_rxfh_param *rxfh,
3768 struct netlink_ext_ack *extack)
3769{
3770 struct virtnet_info *vi = netdev_priv(dev);
3771 int i;
3772
3773 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
3774 rxfh->hfunc != ETH_RSS_HASH_TOP)
3775 return -EOPNOTSUPP;
3776
3777 if (rxfh->indir) {
3778 for (i = 0; i < vi->rss_indir_table_size; ++i)
3779 vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
3780 }
3781 if (rxfh->key)
3782 memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
3783
3784 virtnet_commit_rss_command(vi);
3785
3786 return 0;
3787}
3788
3789static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
3790{
3791 struct virtnet_info *vi = netdev_priv(dev);
3792 int rc = 0;
3793
3794 switch (info->cmd) {
3795 case ETHTOOL_GRXRINGS:
3796 info->data = vi->curr_queue_pairs;
3797 break;
3798 case ETHTOOL_GRXFH:
3799 virtnet_get_hashflow(vi, info);
3800 break;
3801 default:
3802 rc = -EOPNOTSUPP;
3803 }
3804
3805 return rc;
3806}
3807
3808static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3809{
3810 struct virtnet_info *vi = netdev_priv(dev);
3811 int rc = 0;
3812
3813 switch (info->cmd) {
3814 case ETHTOOL_SRXFH:
3815 if (!virtnet_set_hashflow(vi, info))
3816 rc = -EINVAL;
3817
3818 break;
3819 default:
3820 rc = -EOPNOTSUPP;
3821 }
3822
3823 return rc;
3824}
3825
3826static const struct ethtool_ops virtnet_ethtool_ops = {
3827 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3828 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
3829 .get_drvinfo = virtnet_get_drvinfo,
3830 .get_link = ethtool_op_get_link,
3831 .get_ringparam = virtnet_get_ringparam,
3832 .set_ringparam = virtnet_set_ringparam,
3833 .get_strings = virtnet_get_strings,
3834 .get_sset_count = virtnet_get_sset_count,
3835 .get_ethtool_stats = virtnet_get_ethtool_stats,
3836 .set_channels = virtnet_set_channels,
3837 .get_channels = virtnet_get_channels,
3838 .get_ts_info = ethtool_op_get_ts_info,
3839 .get_link_ksettings = virtnet_get_link_ksettings,
3840 .set_link_ksettings = virtnet_set_link_ksettings,
3841 .set_coalesce = virtnet_set_coalesce,
3842 .get_coalesce = virtnet_get_coalesce,
3843 .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
3844 .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
3845 .get_rxfh_key_size = virtnet_get_rxfh_key_size,
3846 .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
3847 .get_rxfh = virtnet_get_rxfh,
3848 .set_rxfh = virtnet_set_rxfh,
3849 .get_rxnfc = virtnet_get_rxnfc,
3850 .set_rxnfc = virtnet_set_rxnfc,
3851};
3852
3853static void virtnet_freeze_down(struct virtio_device *vdev)
3854{
3855 struct virtnet_info *vi = vdev->priv;
3856
3857 /* Make sure no work handler is accessing the device */
3858 flush_work(&vi->config_work);
3859
3860 netif_tx_lock_bh(vi->dev);
3861 netif_device_detach(vi->dev);
3862 netif_tx_unlock_bh(vi->dev);
3863 if (netif_running(vi->dev))
3864 virtnet_close(vi->dev);
3865}
3866
3867static int init_vqs(struct virtnet_info *vi);
3868
3869static int virtnet_restore_up(struct virtio_device *vdev)
3870{
3871 struct virtnet_info *vi = vdev->priv;
3872 int err;
3873
3874 err = init_vqs(vi);
3875 if (err)
3876 return err;
3877
3878 virtio_device_ready(vdev);
3879
3880 enable_delayed_refill(vi);
3881
3882 if (netif_running(vi->dev)) {
3883 err = virtnet_open(vi->dev);
3884 if (err)
3885 return err;
3886 }
3887
3888 netif_tx_lock_bh(vi->dev);
3889 netif_device_attach(vi->dev);
3890 netif_tx_unlock_bh(vi->dev);
3891 return err;
3892}
3893
3894static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3895{
3896 struct scatterlist sg;
3897 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3898
3899 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3900
3901 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3902 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
3903 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3904 return -EINVAL;
3905 }
3906
3907 return 0;
3908}
3909
3910static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3911{
3912 u64 offloads = 0;
3913
3914 if (!vi->guest_offloads)
3915 return 0;
3916
3917 return virtnet_set_guest_offloads(vi, offloads);
3918}
3919
3920static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3921{
3922 u64 offloads = vi->guest_offloads;
3923
3924 if (!vi->guest_offloads)
3925 return 0;
3926
3927 return virtnet_set_guest_offloads(vi, offloads);
3928}
3929
3930static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
3931 struct netlink_ext_ack *extack)
3932{
3933 unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
3934 sizeof(struct skb_shared_info));
3935 unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
3936 struct virtnet_info *vi = netdev_priv(dev);
3937 struct bpf_prog *old_prog;
3938 u16 xdp_qp = 0, curr_qp;
3939 int i, err;
3940
3941 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3942 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3943 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3944 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3945 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3946 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3947 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3948 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3949 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
3950 return -EOPNOTSUPP;
3951 }
3952
3953 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3954 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
3955 return -EINVAL;
3956 }
3957
3958 if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
3959 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
3960 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
3961 return -EINVAL;
3962 }
3963
3964 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3965 if (prog)
3966 xdp_qp = nr_cpu_ids;
3967
3968 /* XDP requires extra queues for XDP_TX */
3969 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3970 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
3971 curr_qp + xdp_qp, vi->max_queue_pairs);
3972 xdp_qp = 0;
3973 }
3974
3975 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3976 if (!prog && !old_prog)
3977 return 0;
3978
3979 if (prog)
3980 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3981
3982 /* Make sure NAPI is not using any XDP TX queues for RX. */
3983 if (netif_running(dev)) {
3984 for (i = 0; i < vi->max_queue_pairs; i++) {
3985 napi_disable(&vi->rq[i].napi);
3986 virtnet_napi_tx_disable(&vi->sq[i].napi);
3987 }
3988 }
3989
3990 if (!prog) {
3991 for (i = 0; i < vi->max_queue_pairs; i++) {
3992 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3993 if (i == 0)
3994 virtnet_restore_guest_offloads(vi);
3995 }
3996 synchronize_net();
3997 }
3998
3999 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
4000 if (err)
4001 goto err;
4002 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4003 vi->xdp_queue_pairs = xdp_qp;
4004
4005 if (prog) {
4006 vi->xdp_enabled = true;
4007 for (i = 0; i < vi->max_queue_pairs; i++) {
4008 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4009 if (i == 0 && !old_prog)
4010 virtnet_clear_guest_offloads(vi);
4011 }
4012 if (!old_prog)
4013 xdp_features_set_redirect_target(dev, true);
4014 } else {
4015 xdp_features_clear_redirect_target(dev);
4016 vi->xdp_enabled = false;
4017 }
4018
4019 for (i = 0; i < vi->max_queue_pairs; i++) {
4020 if (old_prog)
4021 bpf_prog_put(old_prog);
4022 if (netif_running(dev)) {
4023 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4024 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4025 &vi->sq[i].napi);
4026 }
4027 }
4028
4029 return 0;
4030
4031err:
4032 if (!prog) {
4033 virtnet_clear_guest_offloads(vi);
4034 for (i = 0; i < vi->max_queue_pairs; i++)
4035 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
4036 }
4037
4038 if (netif_running(dev)) {
4039 for (i = 0; i < vi->max_queue_pairs; i++) {
4040 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
4041 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
4042 &vi->sq[i].napi);
4043 }
4044 }
4045 if (prog)
4046 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
4047 return err;
4048}
4049
4050static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4051{
4052 switch (xdp->command) {
4053 case XDP_SETUP_PROG:
4054 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
4055 default:
4056 return -EINVAL;
4057 }
4058}
4059
4060static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
4061 size_t len)
4062{
4063 struct virtnet_info *vi = netdev_priv(dev);
4064 int ret;
4065
4066 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
4067 return -EOPNOTSUPP;
4068
4069 ret = snprintf(buf, len, "sby");
4070 if (ret >= len)
4071 return -EOPNOTSUPP;
4072
4073 return 0;
4074}
4075
4076static int virtnet_set_features(struct net_device *dev,
4077 netdev_features_t features)
4078{
4079 struct virtnet_info *vi = netdev_priv(dev);
4080 u64 offloads;
4081 int err;
4082
4083 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
4084 if (vi->xdp_enabled)
4085 return -EBUSY;
4086
4087 if (features & NETIF_F_GRO_HW)
4088 offloads = vi->guest_offloads_capable;
4089 else
4090 offloads = vi->guest_offloads_capable &
4091 ~GUEST_OFFLOAD_GRO_HW_MASK;
4092
4093 err = virtnet_set_guest_offloads(vi, offloads);
4094 if (err)
4095 return err;
4096 vi->guest_offloads = offloads;
4097 }
4098
4099 if ((dev->features ^ features) & NETIF_F_RXHASH) {
4100 if (features & NETIF_F_RXHASH)
4101 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
4102 else
4103 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
4104
4105 if (!virtnet_commit_rss_command(vi))
4106 return -EINVAL;
4107 }
4108
4109 return 0;
4110}
4111
4112static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
4113{
4114 struct virtnet_info *priv = netdev_priv(dev);
4115 struct send_queue *sq = &priv->sq[txqueue];
4116 struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
4117
4118 u64_stats_update_begin(&sq->stats.syncp);
4119 u64_stats_inc(&sq->stats.tx_timeouts);
4120 u64_stats_update_end(&sq->stats.syncp);
4121
4122 netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
4123 txqueue, sq->name, sq->vq->index, sq->vq->name,
4124 jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
4125}
4126
4127static const struct net_device_ops virtnet_netdev = {
4128 .ndo_open = virtnet_open,
4129 .ndo_stop = virtnet_close,
4130 .ndo_start_xmit = start_xmit,
4131 .ndo_validate_addr = eth_validate_addr,
4132 .ndo_set_mac_address = virtnet_set_mac_address,
4133 .ndo_set_rx_mode = virtnet_set_rx_mode,
4134 .ndo_get_stats64 = virtnet_stats,
4135 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
4136 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
4137 .ndo_bpf = virtnet_xdp,
4138 .ndo_xdp_xmit = virtnet_xdp_xmit,
4139 .ndo_features_check = passthru_features_check,
4140 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
4141 .ndo_set_features = virtnet_set_features,
4142 .ndo_tx_timeout = virtnet_tx_timeout,
4143};
4144
4145static void virtnet_config_changed_work(struct work_struct *work)
4146{
4147 struct virtnet_info *vi =
4148 container_of(work, struct virtnet_info, config_work);
4149 u16 v;
4150
4151 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
4152 struct virtio_net_config, status, &v) < 0)
4153 return;
4154
4155 if (v & VIRTIO_NET_S_ANNOUNCE) {
4156 netdev_notify_peers(vi->dev);
4157 virtnet_ack_link_announce(vi);
4158 }
4159
4160 /* Ignore unknown (future) status bits */
4161 v &= VIRTIO_NET_S_LINK_UP;
4162
4163 if (vi->status == v)
4164 return;
4165
4166 vi->status = v;
4167
4168 if (vi->status & VIRTIO_NET_S_LINK_UP) {
4169 virtnet_update_settings(vi);
4170 netif_carrier_on(vi->dev);
4171 netif_tx_wake_all_queues(vi->dev);
4172 } else {
4173 netif_carrier_off(vi->dev);
4174 netif_tx_stop_all_queues(vi->dev);
4175 }
4176}
4177
4178static void virtnet_config_changed(struct virtio_device *vdev)
4179{
4180 struct virtnet_info *vi = vdev->priv;
4181
4182 schedule_work(&vi->config_work);
4183}
4184
4185static void virtnet_free_queues(struct virtnet_info *vi)
4186{
4187 int i;
4188
4189 for (i = 0; i < vi->max_queue_pairs; i++) {
4190 __netif_napi_del(&vi->rq[i].napi);
4191 __netif_napi_del(&vi->sq[i].napi);
4192 }
4193
4194 /* We called __netif_napi_del(),
4195 * we need to respect an RCU grace period before freeing vi->rq
4196 */
4197 synchronize_net();
4198
4199 kfree(vi->rq);
4200 kfree(vi->sq);
4201 kfree(vi->ctrl);
4202}
4203
4204static void _free_receive_bufs(struct virtnet_info *vi)
4205{
4206 struct bpf_prog *old_prog;
4207 int i;
4208
4209 for (i = 0; i < vi->max_queue_pairs; i++) {
4210 while (vi->rq[i].pages)
4211 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4212
4213 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4214 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4215 if (old_prog)
4216 bpf_prog_put(old_prog);
4217 }
4218}
4219
4220static void free_receive_bufs(struct virtnet_info *vi)
4221{
4222 rtnl_lock();
4223 _free_receive_bufs(vi);
4224 rtnl_unlock();
4225}
4226
4227static void free_receive_page_frags(struct virtnet_info *vi)
4228{
4229 int i;
4230 for (i = 0; i < vi->max_queue_pairs; i++)
4231 if (vi->rq[i].alloc_frag.page) {
4232 if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4233 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4234 put_page(vi->rq[i].alloc_frag.page);
4235 }
4236}
4237
4238static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
4239{
4240 if (!is_xdp_frame(buf))
4241 dev_kfree_skb(buf);
4242 else
4243 xdp_return_frame(ptr_to_xdp(buf));
4244}
4245
4246static void free_unused_bufs(struct virtnet_info *vi)
4247{
4248 void *buf;
4249 int i;
4250
4251 for (i = 0; i < vi->max_queue_pairs; i++) {
4252 struct virtqueue *vq = vi->sq[i].vq;
4253 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4254 virtnet_sq_free_unused_buf(vq, buf);
4255 cond_resched();
4256 }
4257
4258 for (i = 0; i < vi->max_queue_pairs; i++) {
4259 struct virtqueue *vq = vi->rq[i].vq;
4260
4261 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
4262 virtnet_rq_unmap_free_buf(vq, buf);
4263 cond_resched();
4264 }
4265}
4266
4267static void virtnet_del_vqs(struct virtnet_info *vi)
4268{
4269 struct virtio_device *vdev = vi->vdev;
4270
4271 virtnet_clean_affinity(vi);
4272
4273 vdev->config->del_vqs(vdev);
4274
4275 virtnet_free_queues(vi);
4276}
4277
4278/* How large should a single buffer be so a queue full of these can fit at
4279 * least one full packet?
4280 * Logic below assumes the mergeable buffer header is used.
4281 */
4282static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4283{
4284 const unsigned int hdr_len = vi->hdr_len;
4285 unsigned int rq_size = virtqueue_get_vring_size(vq);
4286 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4287 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
4288 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
4289
4290 return max(max(min_buf_len, hdr_len) - hdr_len,
4291 (unsigned int)GOOD_PACKET_LEN);
4292}
4293
4294static int virtnet_find_vqs(struct virtnet_info *vi)
4295{
4296 vq_callback_t **callbacks;
4297 struct virtqueue **vqs;
4298 const char **names;
4299 int ret = -ENOMEM;
4300 int total_vqs;
4301 bool *ctx;
4302 u16 i;
4303
4304 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
4305 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
4306 * possible control vq.
4307 */
4308 total_vqs = vi->max_queue_pairs * 2 +
4309 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4310
4311 /* Allocate space for find_vqs parameters */
4312 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
4313 if (!vqs)
4314 goto err_vq;
4315 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
4316 if (!callbacks)
4317 goto err_callback;
4318 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
4319 if (!names)
4320 goto err_names;
4321 if (!vi->big_packets || vi->mergeable_rx_bufs) {
4322 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
4323 if (!ctx)
4324 goto err_ctx;
4325 } else {
4326 ctx = NULL;
4327 }
4328
4329 /* Parameters for control virtqueue, if any */
4330 if (vi->has_cvq) {
4331 callbacks[total_vqs - 1] = NULL;
4332 names[total_vqs - 1] = "control";
4333 }
4334
4335 /* Allocate/initialize parameters for send/receive virtqueues */
4336 for (i = 0; i < vi->max_queue_pairs; i++) {
4337 callbacks[rxq2vq(i)] = skb_recv_done;
4338 callbacks[txq2vq(i)] = skb_xmit_done;
4339 sprintf(vi->rq[i].name, "input.%u", i);
4340 sprintf(vi->sq[i].name, "output.%u", i);
4341 names[rxq2vq(i)] = vi->rq[i].name;
4342 names[txq2vq(i)] = vi->sq[i].name;
4343 if (ctx)
4344 ctx[rxq2vq(i)] = true;
4345 }
4346
4347 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4348 names, ctx, NULL);
4349 if (ret)
4350 goto err_find;
4351
4352 if (vi->has_cvq) {
4353 vi->cvq = vqs[total_vqs - 1];
4354 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4355 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4356 }
4357
4358 for (i = 0; i < vi->max_queue_pairs; i++) {
4359 vi->rq[i].vq = vqs[rxq2vq(i)];
4360 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4361 vi->sq[i].vq = vqs[txq2vq(i)];
4362 }
4363
4364 /* run here: ret == 0. */
4365
4366
4367err_find:
4368 kfree(ctx);
4369err_ctx:
4370 kfree(names);
4371err_names:
4372 kfree(callbacks);
4373err_callback:
4374 kfree(vqs);
4375err_vq:
4376 return ret;
4377}
4378
4379static int virtnet_alloc_queues(struct virtnet_info *vi)
4380{
4381 int i;
4382
4383 if (vi->has_cvq) {
4384 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4385 if (!vi->ctrl)
4386 goto err_ctrl;
4387 } else {
4388 vi->ctrl = NULL;
4389 }
4390 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4391 if (!vi->sq)
4392 goto err_sq;
4393 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4394 if (!vi->rq)
4395 goto err_rq;
4396
4397 INIT_DELAYED_WORK(&vi->refill, refill_work);
4398 for (i = 0; i < vi->max_queue_pairs; i++) {
4399 vi->rq[i].pages = NULL;
4400 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4401 napi_weight);
4402 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4403 virtnet_poll_tx,
4404 napi_tx ? napi_weight : 0);
4405
4406 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4407 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4408
4409 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4410 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4411 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4412
4413 u64_stats_init(&vi->rq[i].stats.syncp);
4414 u64_stats_init(&vi->sq[i].stats.syncp);
4415 }
4416
4417 return 0;
4418
4419err_rq:
4420 kfree(vi->sq);
4421err_sq:
4422 kfree(vi->ctrl);
4423err_ctrl:
4424 return -ENOMEM;
4425}
4426
4427static int init_vqs(struct virtnet_info *vi)
4428{
4429 int ret;
4430
4431 /* Allocate send & receive queues */
4432 ret = virtnet_alloc_queues(vi);
4433 if (ret)
4434 goto err;
4435
4436 ret = virtnet_find_vqs(vi);
4437 if (ret)
4438 goto err_free;
4439
4440 virtnet_rq_set_premapped(vi);
4441
4442 cpus_read_lock();
4443 virtnet_set_affinity(vi);
4444 cpus_read_unlock();
4445
4446 return 0;
4447
4448err_free:
4449 virtnet_free_queues(vi);
4450err:
4451 return ret;
4452}
4453
4454#ifdef CONFIG_SYSFS
4455static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
4456 char *buf)
4457{
4458 struct virtnet_info *vi = netdev_priv(queue->dev);
4459 unsigned int queue_index = get_netdev_rx_queue_index(queue);
4460 unsigned int headroom = virtnet_get_headroom(vi);
4461 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
4462 struct ewma_pkt_len *avg;
4463
4464 BUG_ON(queue_index >= vi->max_queue_pairs);
4465 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4466 return sprintf(buf, "%u\n",
4467 get_mergeable_buf_len(&vi->rq[queue_index], avg,
4468 SKB_DATA_ALIGN(headroom + tailroom)));
4469}
4470
4471static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
4472 __ATTR_RO(mergeable_rx_buffer_size);
4473
4474static struct attribute *virtio_net_mrg_rx_attrs[] = {
4475 &mergeable_rx_buffer_size_attribute.attr,
4476 NULL
4477};
4478
4479static const struct attribute_group virtio_net_mrg_rx_group = {
4480 .name = "virtio_net",
4481 .attrs = virtio_net_mrg_rx_attrs
4482};
4483#endif
4484
4485static bool virtnet_fail_on_feature(struct virtio_device *vdev,
4486 unsigned int fbit,
4487 const char *fname, const char *dname)
4488{
4489 if (!virtio_has_feature(vdev, fbit))
4490 return false;
4491
4492 dev_err(&vdev->dev, "device advertises feature %s but not %s",
4493 fname, dname);
4494
4495 return true;
4496}
4497
4498#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
4499 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
4500
4501static bool virtnet_validate_features(struct virtio_device *vdev)
4502{
4503 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
4504 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
4505 "VIRTIO_NET_F_CTRL_VQ") ||
4506 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
4507 "VIRTIO_NET_F_CTRL_VQ") ||
4508 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
4509 "VIRTIO_NET_F_CTRL_VQ") ||
4510 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
4511 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
4512 "VIRTIO_NET_F_CTRL_VQ") ||
4513 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
4514 "VIRTIO_NET_F_CTRL_VQ") ||
4515 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
4516 "VIRTIO_NET_F_CTRL_VQ") ||
4517 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
4518 "VIRTIO_NET_F_CTRL_VQ") ||
4519 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
4520 "VIRTIO_NET_F_CTRL_VQ"))) {
4521 return false;
4522 }
4523
4524 return true;
4525}
4526
4527#define MIN_MTU ETH_MIN_MTU
4528#define MAX_MTU ETH_MAX_MTU
4529
4530static int virtnet_validate(struct virtio_device *vdev)
4531{
4532 if (!vdev->config->get) {
4533 dev_err(&vdev->dev, "%s failure: config access disabled\n",
4534 __func__);
4535 return -EINVAL;
4536 }
4537
4538 if (!virtnet_validate_features(vdev))
4539 return -EINVAL;
4540
4541 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4542 int mtu = virtio_cread16(vdev,
4543 offsetof(struct virtio_net_config,
4544 mtu));
4545 if (mtu < MIN_MTU)
4546 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
4547 }
4548
4549 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
4550 !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4551 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
4552 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
4553 }
4554
4555 return 0;
4556}
4557
4558static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4559{
4560 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4561 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4562 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4563 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4564 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4565 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4566}
4567
4568static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4569{
4570 bool guest_gso = virtnet_check_guest_gso(vi);
4571
4572 /* If device can receive ANY guest GSO packets, regardless of mtu,
4573 * allocate packets of maximum size, otherwise limit it to only
4574 * mtu size worth only.
4575 */
4576 if (mtu > ETH_DATA_LEN || guest_gso) {
4577 vi->big_packets = true;
4578 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4579 }
4580}
4581
4582static int virtnet_probe(struct virtio_device *vdev)
4583{
4584 int i, err = -ENOMEM;
4585 struct net_device *dev;
4586 struct virtnet_info *vi;
4587 u16 max_queue_pairs;
4588 int mtu = 0;
4589
4590 /* Find if host supports multiqueue/rss virtio_net device */
4591 max_queue_pairs = 1;
4592 if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4593 max_queue_pairs =
4594 virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
4595
4596 /* We need at least 2 queue's */
4597 if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
4598 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
4599 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4600 max_queue_pairs = 1;
4601
4602 /* Allocate ourselves a network device with room for our info */
4603 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
4604 if (!dev)
4605 return -ENOMEM;
4606
4607 /* Set up network device as normal. */
4608 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
4609 IFF_TX_SKB_NO_LINEAR;
4610 dev->netdev_ops = &virtnet_netdev;
4611 dev->features = NETIF_F_HIGHDMA;
4612
4613 dev->ethtool_ops = &virtnet_ethtool_ops;
4614 SET_NETDEV_DEV(dev, &vdev->dev);
4615
4616 /* Do we support "hardware" checksums? */
4617 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
4618 /* This opens up the world of extra features. */
4619 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4620 if (csum)
4621 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4622
4623 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
4624 dev->hw_features |= NETIF_F_TSO
4625 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
4626 }
4627 /* Individual feature bits: what can host handle? */
4628 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
4629 dev->hw_features |= NETIF_F_TSO;
4630 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
4631 dev->hw_features |= NETIF_F_TSO6;
4632 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
4633 dev->hw_features |= NETIF_F_TSO_ECN;
4634 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
4635 dev->hw_features |= NETIF_F_GSO_UDP_L4;
4636
4637 dev->features |= NETIF_F_GSO_ROBUST;
4638
4639 if (gso)
4640 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
4641 /* (!csum && gso) case will be fixed by register_netdev() */
4642 }
4643 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
4644 dev->features |= NETIF_F_RXCSUM;
4645 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4646 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
4647 dev->features |= NETIF_F_GRO_HW;
4648 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
4649 dev->hw_features |= NETIF_F_GRO_HW;
4650
4651 dev->vlan_features = dev->features;
4652 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
4653
4654 /* MTU range: 68 - 65535 */
4655 dev->min_mtu = MIN_MTU;
4656 dev->max_mtu = MAX_MTU;
4657
4658 /* Configuration may specify what MAC to use. Otherwise random. */
4659 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
4660 u8 addr[ETH_ALEN];
4661
4662 virtio_cread_bytes(vdev,
4663 offsetof(struct virtio_net_config, mac),
4664 addr, ETH_ALEN);
4665 eth_hw_addr_set(dev, addr);
4666 } else {
4667 eth_hw_addr_random(dev);
4668 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
4669 dev->dev_addr);
4670 }
4671
4672 /* Set up our device-specific information */
4673 vi = netdev_priv(dev);
4674 vi->dev = dev;
4675 vi->vdev = vdev;
4676 vdev->priv = vi;
4677
4678 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4679 spin_lock_init(&vi->refill_lock);
4680
4681 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
4682 vi->mergeable_rx_bufs = true;
4683 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
4684 }
4685
4686 if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
4687 vi->has_rss_hash_report = true;
4688
4689 if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
4690 vi->has_rss = true;
4691
4692 if (vi->has_rss || vi->has_rss_hash_report) {
4693 vi->rss_indir_table_size =
4694 virtio_cread16(vdev, offsetof(struct virtio_net_config,
4695 rss_max_indirection_table_length));
4696 vi->rss_key_size =
4697 virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
4698
4699 vi->rss_hash_types_supported =
4700 virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
4701 vi->rss_hash_types_supported &=
4702 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
4703 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
4704 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
4705
4706 dev->hw_features |= NETIF_F_RXHASH;
4707 }
4708
4709 if (vi->has_rss_hash_report)
4710 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4711 else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
4712 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4713 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4714 else
4715 vi->hdr_len = sizeof(struct virtio_net_hdr);
4716
4717 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
4718 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
4719 vi->any_header_sg = true;
4720
4721 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
4722 vi->has_cvq = true;
4723
4724 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
4725 mtu = virtio_cread16(vdev,
4726 offsetof(struct virtio_net_config,
4727 mtu));
4728 if (mtu < dev->min_mtu) {
4729 /* Should never trigger: MTU was previously validated
4730 * in virtnet_validate.
4731 */
4732 dev_err(&vdev->dev,
4733 "device MTU appears to have changed it is now %d < %d",
4734 mtu, dev->min_mtu);
4735 err = -EINVAL;
4736 goto free;
4737 }
4738
4739 dev->mtu = mtu;
4740 dev->max_mtu = mtu;
4741 }
4742
4743 virtnet_set_big_packets(vi, mtu);
4744
4745 if (vi->any_header_sg)
4746 dev->needed_headroom = vi->hdr_len;
4747
4748 /* Enable multiqueue by default */
4749 if (num_online_cpus() >= max_queue_pairs)
4750 vi->curr_queue_pairs = max_queue_pairs;
4751 else
4752 vi->curr_queue_pairs = num_online_cpus();
4753 vi->max_queue_pairs = max_queue_pairs;
4754
4755 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
4756 err = init_vqs(vi);
4757 if (err)
4758 goto free;
4759
4760 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4761 vi->intr_coal_rx.max_usecs = 0;
4762 vi->intr_coal_tx.max_usecs = 0;
4763 vi->intr_coal_rx.max_packets = 0;
4764
4765 /* Keep the default values of the coalescing parameters
4766 * aligned with the default napi_tx state.
4767 */
4768 if (vi->sq[0].napi.weight)
4769 vi->intr_coal_tx.max_packets = 1;
4770 else
4771 vi->intr_coal_tx.max_packets = 0;
4772 }
4773
4774 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4775 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
4776 for (i = 0; i < vi->max_queue_pairs; i++)
4777 if (vi->sq[i].napi.weight)
4778 vi->sq[i].intr_coal.max_packets = 1;
4779 }
4780
4781#ifdef CONFIG_SYSFS
4782 if (vi->mergeable_rx_bufs)
4783 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
4784#endif
4785 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4786 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4787
4788 virtnet_init_settings(dev);
4789
4790 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
4791 vi->failover = net_failover_create(vi->dev);
4792 if (IS_ERR(vi->failover)) {
4793 err = PTR_ERR(vi->failover);
4794 goto free_vqs;
4795 }
4796 }
4797
4798 if (vi->has_rss || vi->has_rss_hash_report)
4799 virtnet_init_default_rss(vi);
4800
4801 /* serialize netdev register + virtio_device_ready() with ndo_open() */
4802 rtnl_lock();
4803
4804 err = register_netdevice(dev);
4805 if (err) {
4806 pr_debug("virtio_net: registering device failed\n");
4807 rtnl_unlock();
4808 goto free_failover;
4809 }
4810
4811 virtio_device_ready(vdev);
4812
4813 _virtnet_set_queues(vi, vi->curr_queue_pairs);
4814
4815 /* a random MAC address has been assigned, notify the device.
4816 * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
4817 * because many devices work fine without getting MAC explicitly
4818 */
4819 if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
4820 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4821 struct scatterlist sg;
4822
4823 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
4824 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4825 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
4826 pr_debug("virtio_net: setting MAC address failed\n");
4827 rtnl_unlock();
4828 err = -EINVAL;
4829 goto free_unregister_netdev;
4830 }
4831 }
4832
4833 rtnl_unlock();
4834
4835 err = virtnet_cpu_notif_add(vi);
4836 if (err) {
4837 pr_debug("virtio_net: registering cpu notifier failed\n");
4838 goto free_unregister_netdev;
4839 }
4840
4841 /* Assume link up if device can't report link status,
4842 otherwise get link status from config. */
4843 netif_carrier_off(dev);
4844 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4845 schedule_work(&vi->config_work);
4846 } else {
4847 vi->status = VIRTIO_NET_S_LINK_UP;
4848 virtnet_update_settings(vi);
4849 netif_carrier_on(dev);
4850 }
4851
4852 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
4853 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4854 set_bit(guest_offloads[i], &vi->guest_offloads);
4855 vi->guest_offloads_capable = vi->guest_offloads;
4856
4857 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
4858 dev->name, max_queue_pairs);
4859
4860 return 0;
4861
4862free_unregister_netdev:
4863 unregister_netdev(dev);
4864free_failover:
4865 net_failover_destroy(vi->failover);
4866free_vqs:
4867 virtio_reset_device(vdev);
4868 cancel_delayed_work_sync(&vi->refill);
4869 free_receive_page_frags(vi);
4870 virtnet_del_vqs(vi);
4871free:
4872 free_netdev(dev);
4873 return err;
4874}
4875
4876static void remove_vq_common(struct virtnet_info *vi)
4877{
4878 virtio_reset_device(vi->vdev);
4879
4880 /* Free unused buffers in both send and recv, if any. */
4881 free_unused_bufs(vi);
4882
4883 free_receive_bufs(vi);
4884
4885 free_receive_page_frags(vi);
4886
4887 virtnet_del_vqs(vi);
4888}
4889
4890static void virtnet_remove(struct virtio_device *vdev)
4891{
4892 struct virtnet_info *vi = vdev->priv;
4893
4894 virtnet_cpu_notif_remove(vi);
4895
4896 /* Make sure no work handler is accessing the device. */
4897 flush_work(&vi->config_work);
4898
4899 unregister_netdev(vi->dev);
4900
4901 net_failover_destroy(vi->failover);
4902
4903 remove_vq_common(vi);
4904
4905 free_netdev(vi->dev);
4906}
4907
4908static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
4909{
4910 struct virtnet_info *vi = vdev->priv;
4911
4912 virtnet_cpu_notif_remove(vi);
4913 virtnet_freeze_down(vdev);
4914 remove_vq_common(vi);
4915
4916 return 0;
4917}
4918
4919static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
4920{
4921 struct virtnet_info *vi = vdev->priv;
4922 int err;
4923
4924 err = virtnet_restore_up(vdev);
4925 if (err)
4926 return err;
4927 virtnet_set_queues(vi, vi->curr_queue_pairs);
4928
4929 err = virtnet_cpu_notif_add(vi);
4930 if (err) {
4931 virtnet_freeze_down(vdev);
4932 remove_vq_common(vi);
4933 return err;
4934 }
4935
4936 return 0;
4937}
4938
4939static struct virtio_device_id id_table[] = {
4940 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
4941 { 0 },
4942};
4943
4944#define VIRTNET_FEATURES \
4945 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
4946 VIRTIO_NET_F_MAC, \
4947 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
4948 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
4949 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
4950 VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
4951 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
4952 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
4953 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
4954 VIRTIO_NET_F_CTRL_MAC_ADDR, \
4955 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
4956 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
4957 VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
4958 VIRTIO_NET_F_VQ_NOTF_COAL, \
4959 VIRTIO_NET_F_GUEST_HDRLEN
4960
4961static unsigned int features[] = {
4962 VIRTNET_FEATURES,
4963};
4964
4965static unsigned int features_legacy[] = {
4966 VIRTNET_FEATURES,
4967 VIRTIO_NET_F_GSO,
4968 VIRTIO_F_ANY_LAYOUT,
4969};
4970
4971static struct virtio_driver virtio_net_driver = {
4972 .feature_table = features,
4973 .feature_table_size = ARRAY_SIZE(features),
4974 .feature_table_legacy = features_legacy,
4975 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
4976 .driver.name = KBUILD_MODNAME,
4977 .driver.owner = THIS_MODULE,
4978 .id_table = id_table,
4979 .validate = virtnet_validate,
4980 .probe = virtnet_probe,
4981 .remove = virtnet_remove,
4982 .config_changed = virtnet_config_changed,
4983#ifdef CONFIG_PM_SLEEP
4984 .freeze = virtnet_freeze,
4985 .restore = virtnet_restore,
4986#endif
4987};
4988
4989static __init int virtio_net_driver_init(void)
4990{
4991 int ret;
4992
4993 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
4994 virtnet_cpu_online,
4995 virtnet_cpu_down_prep);
4996 if (ret < 0)
4997 goto out;
4998 virtionet_online = ret;
4999 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
5000 NULL, virtnet_cpu_dead);
5001 if (ret)
5002 goto err_dead;
5003 ret = register_virtio_driver(&virtio_net_driver);
5004 if (ret)
5005 goto err_virtio;
5006 return 0;
5007err_virtio:
5008 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5009err_dead:
5010 cpuhp_remove_multi_state(virtionet_online);
5011out:
5012 return ret;
5013}
5014module_init(virtio_net_driver_init);
5015
5016static __exit void virtio_net_driver_exit(void)
5017{
5018 unregister_virtio_driver(&virtio_net_driver);
5019 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
5020 cpuhp_remove_multi_state(virtionet_online);
5021}
5022module_exit(virtio_net_driver_exit);
5023
5024MODULE_DEVICE_TABLE(virtio, id_table);
5025MODULE_DESCRIPTION("Virtio network driver");
5026MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* A network driver using virtio.
3 *
4 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
5 */
6//#define DEBUG
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9#include <linux/ethtool.h>
10#include <linux/module.h>
11#include <linux/virtio.h>
12#include <linux/virtio_net.h>
13#include <linux/bpf.h>
14#include <linux/bpf_trace.h>
15#include <linux/scatterlist.h>
16#include <linux/if_vlan.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/average.h>
20#include <linux/filter.h>
21#include <linux/kernel.h>
22#include <net/route.h>
23#include <net/xdp.h>
24#include <net/net_failover.h>
25
26static int napi_weight = NAPI_POLL_WEIGHT;
27module_param(napi_weight, int, 0444);
28
29static bool csum = true, gso = true, napi_tx = true;
30module_param(csum, bool, 0444);
31module_param(gso, bool, 0444);
32module_param(napi_tx, bool, 0644);
33
34/* FIXME: MTU in config. */
35#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
36#define GOOD_COPY_LEN 128
37
38#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
39
40/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
41#define VIRTIO_XDP_HEADROOM 256
42
43/* Separating two types of XDP xmit */
44#define VIRTIO_XDP_TX BIT(0)
45#define VIRTIO_XDP_REDIR BIT(1)
46
47#define VIRTIO_XDP_FLAG BIT(0)
48
49/* RX packet size EWMA. The average packet size is used to determine the packet
50 * buffer size when refilling RX rings. As the entire RX ring may be refilled
51 * at once, the weight is chosen so that the EWMA will be insensitive to short-
52 * term, transient changes in packet size.
53 */
54DECLARE_EWMA(pkt_len, 0, 64)
55
56#define VIRTNET_DRIVER_VERSION "1.0.0"
57
58static const unsigned long guest_offloads[] = {
59 VIRTIO_NET_F_GUEST_TSO4,
60 VIRTIO_NET_F_GUEST_TSO6,
61 VIRTIO_NET_F_GUEST_ECN,
62 VIRTIO_NET_F_GUEST_UFO,
63 VIRTIO_NET_F_GUEST_CSUM
64};
65
66#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
67 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
68 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
69 (1ULL << VIRTIO_NET_F_GUEST_UFO))
70
71struct virtnet_stat_desc {
72 char desc[ETH_GSTRING_LEN];
73 size_t offset;
74};
75
76struct virtnet_sq_stats {
77 struct u64_stats_sync syncp;
78 u64 packets;
79 u64 bytes;
80 u64 xdp_tx;
81 u64 xdp_tx_drops;
82 u64 kicks;
83};
84
85struct virtnet_rq_stats {
86 struct u64_stats_sync syncp;
87 u64 packets;
88 u64 bytes;
89 u64 drops;
90 u64 xdp_packets;
91 u64 xdp_tx;
92 u64 xdp_redirects;
93 u64 xdp_drops;
94 u64 kicks;
95};
96
97#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
98#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
99
100static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
101 { "packets", VIRTNET_SQ_STAT(packets) },
102 { "bytes", VIRTNET_SQ_STAT(bytes) },
103 { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
104 { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
105 { "kicks", VIRTNET_SQ_STAT(kicks) },
106};
107
108static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
109 { "packets", VIRTNET_RQ_STAT(packets) },
110 { "bytes", VIRTNET_RQ_STAT(bytes) },
111 { "drops", VIRTNET_RQ_STAT(drops) },
112 { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
113 { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
114 { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
115 { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
116 { "kicks", VIRTNET_RQ_STAT(kicks) },
117};
118
119#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
120#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
121
122/* Internal representation of a send virtqueue */
123struct send_queue {
124 /* Virtqueue associated with this send _queue */
125 struct virtqueue *vq;
126
127 /* TX: fragments + linear part + virtio header */
128 struct scatterlist sg[MAX_SKB_FRAGS + 2];
129
130 /* Name of the send queue: output.$index */
131 char name[40];
132
133 struct virtnet_sq_stats stats;
134
135 struct napi_struct napi;
136};
137
138/* Internal representation of a receive virtqueue */
139struct receive_queue {
140 /* Virtqueue associated with this receive_queue */
141 struct virtqueue *vq;
142
143 struct napi_struct napi;
144
145 struct bpf_prog __rcu *xdp_prog;
146
147 struct virtnet_rq_stats stats;
148
149 /* Chain pages by the private ptr. */
150 struct page *pages;
151
152 /* Average packet length for mergeable receive buffers. */
153 struct ewma_pkt_len mrg_avg_pkt_len;
154
155 /* Page frag for packet buffer allocation. */
156 struct page_frag alloc_frag;
157
158 /* RX: fragments + linear part + virtio header */
159 struct scatterlist sg[MAX_SKB_FRAGS + 2];
160
161 /* Min single buffer size for mergeable buffers case. */
162 unsigned int min_buf_len;
163
164 /* Name of this receive queue: input.$index */
165 char name[40];
166
167 struct xdp_rxq_info xdp_rxq;
168};
169
170/* Control VQ buffers: protected by the rtnl lock */
171struct control_buf {
172 struct virtio_net_ctrl_hdr hdr;
173 virtio_net_ctrl_ack status;
174 struct virtio_net_ctrl_mq mq;
175 u8 promisc;
176 u8 allmulti;
177 __virtio16 vid;
178 __virtio64 offloads;
179};
180
181struct virtnet_info {
182 struct virtio_device *vdev;
183 struct virtqueue *cvq;
184 struct net_device *dev;
185 struct send_queue *sq;
186 struct receive_queue *rq;
187 unsigned int status;
188
189 /* Max # of queue pairs supported by the device */
190 u16 max_queue_pairs;
191
192 /* # of queue pairs currently used by the driver */
193 u16 curr_queue_pairs;
194
195 /* # of XDP queue pairs currently used by the driver */
196 u16 xdp_queue_pairs;
197
198 /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
199 bool xdp_enabled;
200
201 /* I like... big packets and I cannot lie! */
202 bool big_packets;
203
204 /* Host will merge rx buffers for big packets (shake it! shake it!) */
205 bool mergeable_rx_bufs;
206
207 /* Has control virtqueue */
208 bool has_cvq;
209
210 /* Host can handle any s/g split between our header and packet data */
211 bool any_header_sg;
212
213 /* Packet virtio header size */
214 u8 hdr_len;
215
216 /* Work struct for refilling if we run low on memory. */
217 struct delayed_work refill;
218
219 /* Work struct for config space updates */
220 struct work_struct config_work;
221
222 /* Does the affinity hint is set for virtqueues? */
223 bool affinity_hint_set;
224
225 /* CPU hotplug instances for online & dead */
226 struct hlist_node node;
227 struct hlist_node node_dead;
228
229 struct control_buf *ctrl;
230
231 /* Ethtool settings */
232 u8 duplex;
233 u32 speed;
234
235 unsigned long guest_offloads;
236 unsigned long guest_offloads_capable;
237
238 /* failover when STANDBY feature enabled */
239 struct failover *failover;
240};
241
242struct padded_vnet_hdr {
243 struct virtio_net_hdr_mrg_rxbuf hdr;
244 /*
245 * hdr is in a separate sg buffer, and data sg buffer shares same page
246 * with this header sg. This padding makes next sg 16 byte aligned
247 * after the header.
248 */
249 char padding[4];
250};
251
252static bool is_xdp_frame(void *ptr)
253{
254 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
255}
256
257static void *xdp_to_ptr(struct xdp_frame *ptr)
258{
259 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
260}
261
262static struct xdp_frame *ptr_to_xdp(void *ptr)
263{
264 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
265}
266
267/* Converting between virtqueue no. and kernel tx/rx queue no.
268 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
269 */
270static int vq2txq(struct virtqueue *vq)
271{
272 return (vq->index - 1) / 2;
273}
274
275static int txq2vq(int txq)
276{
277 return txq * 2 + 1;
278}
279
280static int vq2rxq(struct virtqueue *vq)
281{
282 return vq->index / 2;
283}
284
285static int rxq2vq(int rxq)
286{
287 return rxq * 2;
288}
289
290static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
291{
292 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
293}
294
295/*
296 * private is used to chain pages for big packets, put the whole
297 * most recent used list in the beginning for reuse
298 */
299static void give_pages(struct receive_queue *rq, struct page *page)
300{
301 struct page *end;
302
303 /* Find end of list, sew whole thing into vi->rq.pages. */
304 for (end = page; end->private; end = (struct page *)end->private);
305 end->private = (unsigned long)rq->pages;
306 rq->pages = page;
307}
308
309static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
310{
311 struct page *p = rq->pages;
312
313 if (p) {
314 rq->pages = (struct page *)p->private;
315 /* clear private here, it is used to chain pages */
316 p->private = 0;
317 } else
318 p = alloc_page(gfp_mask);
319 return p;
320}
321
322static void virtqueue_napi_schedule(struct napi_struct *napi,
323 struct virtqueue *vq)
324{
325 if (napi_schedule_prep(napi)) {
326 virtqueue_disable_cb(vq);
327 __napi_schedule(napi);
328 }
329}
330
331static void virtqueue_napi_complete(struct napi_struct *napi,
332 struct virtqueue *vq, int processed)
333{
334 int opaque;
335
336 opaque = virtqueue_enable_cb_prepare(vq);
337 if (napi_complete_done(napi, processed)) {
338 if (unlikely(virtqueue_poll(vq, opaque)))
339 virtqueue_napi_schedule(napi, vq);
340 } else {
341 virtqueue_disable_cb(vq);
342 }
343}
344
345static void skb_xmit_done(struct virtqueue *vq)
346{
347 struct virtnet_info *vi = vq->vdev->priv;
348 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
349
350 /* Suppress further interrupts. */
351 virtqueue_disable_cb(vq);
352
353 if (napi->weight)
354 virtqueue_napi_schedule(napi, vq);
355 else
356 /* We were probably waiting for more output buffers. */
357 netif_wake_subqueue(vi->dev, vq2txq(vq));
358}
359
360#define MRG_CTX_HEADER_SHIFT 22
361static void *mergeable_len_to_ctx(unsigned int truesize,
362 unsigned int headroom)
363{
364 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
365}
366
367static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
368{
369 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
370}
371
372static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
373{
374 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
375}
376
377/* Called from bottom half context */
378static struct sk_buff *page_to_skb(struct virtnet_info *vi,
379 struct receive_queue *rq,
380 struct page *page, unsigned int offset,
381 unsigned int len, unsigned int truesize,
382 bool hdr_valid, unsigned int metasize,
383 bool whole_page)
384{
385 struct sk_buff *skb;
386 struct virtio_net_hdr_mrg_rxbuf *hdr;
387 unsigned int copy, hdr_len, hdr_padded_len;
388 struct page *page_to_free = NULL;
389 int tailroom, shinfo_size;
390 char *p, *hdr_p, *buf;
391
392 p = page_address(page) + offset;
393 hdr_p = p;
394
395 hdr_len = vi->hdr_len;
396 if (vi->mergeable_rx_bufs)
397 hdr_padded_len = sizeof(*hdr);
398 else
399 hdr_padded_len = sizeof(struct padded_vnet_hdr);
400
401 /* If whole_page, there is an offset between the beginning of the
402 * data and the allocated space, otherwise the data and the allocated
403 * space are aligned.
404 *
405 * Buffers with headroom use PAGE_SIZE as alloc size, see
406 * add_recvbuf_mergeable() + get_mergeable_buf_len()
407 */
408 if (whole_page) {
409 /* Buffers with whole_page use PAGE_SIZE as alloc size,
410 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
411 */
412 truesize = PAGE_SIZE;
413
414 /* page maybe head page, so we should get the buf by p, not the
415 * page
416 */
417 tailroom = truesize - len - offset_in_page(p);
418 buf = (char *)((unsigned long)p & PAGE_MASK);
419 } else {
420 tailroom = truesize - len;
421 buf = p;
422 }
423
424 len -= hdr_len;
425 offset += hdr_padded_len;
426 p += hdr_padded_len;
427
428 shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
429
430 /* copy small packet so we can reuse these pages */
431 if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
432 skb = build_skb(buf, truesize);
433 if (unlikely(!skb))
434 return NULL;
435
436 skb_reserve(skb, p - buf);
437 skb_put(skb, len);
438
439 page = (struct page *)page->private;
440 if (page)
441 give_pages(rq, page);
442 goto ok;
443 }
444
445 /* copy small packet so we can reuse these pages for small data */
446 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
447 if (unlikely(!skb))
448 return NULL;
449
450 /* Copy all frame if it fits skb->head, otherwise
451 * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
452 */
453 if (len <= skb_tailroom(skb))
454 copy = len;
455 else
456 copy = ETH_HLEN + metasize;
457 skb_put_data(skb, p, copy);
458
459 len -= copy;
460 offset += copy;
461
462 if (vi->mergeable_rx_bufs) {
463 if (len)
464 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
465 else
466 page_to_free = page;
467 goto ok;
468 }
469
470 /*
471 * Verify that we can indeed put this data into a skb.
472 * This is here to handle cases when the device erroneously
473 * tries to receive more than is possible. This is usually
474 * the case of a broken device.
475 */
476 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
477 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
478 dev_kfree_skb(skb);
479 return NULL;
480 }
481 BUG_ON(offset >= PAGE_SIZE);
482 while (len) {
483 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
484 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
485 frag_size, truesize);
486 len -= frag_size;
487 page = (struct page *)page->private;
488 offset = 0;
489 }
490
491 if (page)
492 give_pages(rq, page);
493
494ok:
495 /* hdr_valid means no XDP, so we can copy the vnet header */
496 if (hdr_valid) {
497 hdr = skb_vnet_hdr(skb);
498 memcpy(hdr, hdr_p, hdr_len);
499 }
500 if (page_to_free)
501 put_page(page_to_free);
502
503 if (metasize) {
504 __skb_pull(skb, metasize);
505 skb_metadata_set(skb, metasize);
506 }
507
508 return skb;
509}
510
511static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
512 struct send_queue *sq,
513 struct xdp_frame *xdpf)
514{
515 struct virtio_net_hdr_mrg_rxbuf *hdr;
516 int err;
517
518 if (unlikely(xdpf->headroom < vi->hdr_len))
519 return -EOVERFLOW;
520
521 /* Make room for virtqueue hdr (also change xdpf->headroom?) */
522 xdpf->data -= vi->hdr_len;
523 /* Zero header and leave csum up to XDP layers */
524 hdr = xdpf->data;
525 memset(hdr, 0, vi->hdr_len);
526 xdpf->len += vi->hdr_len;
527
528 sg_init_one(sq->sg, xdpf->data, xdpf->len);
529
530 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
531 GFP_ATOMIC);
532 if (unlikely(err))
533 return -ENOSPC; /* Caller handle free/refcnt */
534
535 return 0;
536}
537
538/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
539 * the current cpu, so it does not need to be locked.
540 *
541 * Here we use marco instead of inline functions because we have to deal with
542 * three issues at the same time: 1. the choice of sq. 2. judge and execute the
543 * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
544 * functions to perfectly solve these three problems at the same time.
545 */
546#define virtnet_xdp_get_sq(vi) ({ \
547 struct netdev_queue *txq; \
548 typeof(vi) v = (vi); \
549 unsigned int qp; \
550 \
551 if (v->curr_queue_pairs > nr_cpu_ids) { \
552 qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
553 qp += smp_processor_id(); \
554 txq = netdev_get_tx_queue(v->dev, qp); \
555 __netif_tx_acquire(txq); \
556 } else { \
557 qp = smp_processor_id() % v->curr_queue_pairs; \
558 txq = netdev_get_tx_queue(v->dev, qp); \
559 __netif_tx_lock(txq, raw_smp_processor_id()); \
560 } \
561 v->sq + qp; \
562})
563
564#define virtnet_xdp_put_sq(vi, q) { \
565 struct netdev_queue *txq; \
566 typeof(vi) v = (vi); \
567 \
568 txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
569 if (v->curr_queue_pairs > nr_cpu_ids) \
570 __netif_tx_release(txq); \
571 else \
572 __netif_tx_unlock(txq); \
573}
574
575static int virtnet_xdp_xmit(struct net_device *dev,
576 int n, struct xdp_frame **frames, u32 flags)
577{
578 struct virtnet_info *vi = netdev_priv(dev);
579 struct receive_queue *rq = vi->rq;
580 struct bpf_prog *xdp_prog;
581 struct send_queue *sq;
582 unsigned int len;
583 int packets = 0;
584 int bytes = 0;
585 int nxmit = 0;
586 int kicks = 0;
587 void *ptr;
588 int ret;
589 int i;
590
591 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
592 * indicate XDP resources have been successfully allocated.
593 */
594 xdp_prog = rcu_access_pointer(rq->xdp_prog);
595 if (!xdp_prog)
596 return -ENXIO;
597
598 sq = virtnet_xdp_get_sq(vi);
599
600 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
601 ret = -EINVAL;
602 goto out;
603 }
604
605 /* Free up any pending old buffers before queueing new ones. */
606 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
607 if (likely(is_xdp_frame(ptr))) {
608 struct xdp_frame *frame = ptr_to_xdp(ptr);
609
610 bytes += frame->len;
611 xdp_return_frame(frame);
612 } else {
613 struct sk_buff *skb = ptr;
614
615 bytes += skb->len;
616 napi_consume_skb(skb, false);
617 }
618 packets++;
619 }
620
621 for (i = 0; i < n; i++) {
622 struct xdp_frame *xdpf = frames[i];
623
624 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
625 break;
626 nxmit++;
627 }
628 ret = nxmit;
629
630 if (flags & XDP_XMIT_FLUSH) {
631 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
632 kicks = 1;
633 }
634out:
635 u64_stats_update_begin(&sq->stats.syncp);
636 sq->stats.bytes += bytes;
637 sq->stats.packets += packets;
638 sq->stats.xdp_tx += n;
639 sq->stats.xdp_tx_drops += n - nxmit;
640 sq->stats.kicks += kicks;
641 u64_stats_update_end(&sq->stats.syncp);
642
643 virtnet_xdp_put_sq(vi, sq);
644 return ret;
645}
646
647static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
648{
649 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
650}
651
652/* We copy the packet for XDP in the following cases:
653 *
654 * 1) Packet is scattered across multiple rx buffers.
655 * 2) Headroom space is insufficient.
656 *
657 * This is inefficient but it's a temporary condition that
658 * we hit right after XDP is enabled and until queue is refilled
659 * with large buffers with sufficient headroom - so it should affect
660 * at most queue size packets.
661 * Afterwards, the conditions to enable
662 * XDP should preclude the underlying device from sending packets
663 * across multiple buffers (num_buf > 1), and we make sure buffers
664 * have enough headroom.
665 */
666static struct page *xdp_linearize_page(struct receive_queue *rq,
667 u16 *num_buf,
668 struct page *p,
669 int offset,
670 int page_off,
671 unsigned int *len)
672{
673 struct page *page = alloc_page(GFP_ATOMIC);
674
675 if (!page)
676 return NULL;
677
678 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
679 page_off += *len;
680
681 while (--*num_buf) {
682 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
683 unsigned int buflen;
684 void *buf;
685 int off;
686
687 buf = virtqueue_get_buf(rq->vq, &buflen);
688 if (unlikely(!buf))
689 goto err_buf;
690
691 p = virt_to_head_page(buf);
692 off = buf - page_address(p);
693
694 /* guard against a misconfigured or uncooperative backend that
695 * is sending packet larger than the MTU.
696 */
697 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
698 put_page(p);
699 goto err_buf;
700 }
701
702 memcpy(page_address(page) + page_off,
703 page_address(p) + off, buflen);
704 page_off += buflen;
705 put_page(p);
706 }
707
708 /* Headroom does not contribute to packet length */
709 *len = page_off - VIRTIO_XDP_HEADROOM;
710 return page;
711err_buf:
712 __free_pages(page, 0);
713 return NULL;
714}
715
716static struct sk_buff *receive_small(struct net_device *dev,
717 struct virtnet_info *vi,
718 struct receive_queue *rq,
719 void *buf, void *ctx,
720 unsigned int len,
721 unsigned int *xdp_xmit,
722 struct virtnet_rq_stats *stats)
723{
724 struct sk_buff *skb;
725 struct bpf_prog *xdp_prog;
726 unsigned int xdp_headroom = (unsigned long)ctx;
727 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
728 unsigned int headroom = vi->hdr_len + header_offset;
729 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
731 struct page *page = virt_to_head_page(buf);
732 unsigned int delta = 0;
733 struct page *xdp_page;
734 int err;
735 unsigned int metasize = 0;
736
737 len -= vi->hdr_len;
738 stats->bytes += len;
739
740 if (unlikely(len > GOOD_PACKET_LEN)) {
741 pr_debug("%s: rx error: len %u exceeds max size %d\n",
742 dev->name, len, GOOD_PACKET_LEN);
743 dev->stats.rx_length_errors++;
744 goto err_len;
745 }
746 rcu_read_lock();
747 xdp_prog = rcu_dereference(rq->xdp_prog);
748 if (xdp_prog) {
749 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
750 struct xdp_frame *xdpf;
751 struct xdp_buff xdp;
752 void *orig_data;
753 u32 act;
754
755 if (unlikely(hdr->hdr.gso_type))
756 goto err_xdp;
757
758 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
759 int offset = buf - page_address(page) + header_offset;
760 unsigned int tlen = len + vi->hdr_len;
761 u16 num_buf = 1;
762
763 xdp_headroom = virtnet_get_headroom(vi);
764 header_offset = VIRTNET_RX_PAD + xdp_headroom;
765 headroom = vi->hdr_len + header_offset;
766 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
767 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
768 xdp_page = xdp_linearize_page(rq, &num_buf, page,
769 offset, header_offset,
770 &tlen);
771 if (!xdp_page)
772 goto err_xdp;
773
774 buf = page_address(xdp_page);
775 put_page(page);
776 page = xdp_page;
777 }
778
779 xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
780 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
781 xdp_headroom, len, true);
782 orig_data = xdp.data;
783 act = bpf_prog_run_xdp(xdp_prog, &xdp);
784 stats->xdp_packets++;
785
786 switch (act) {
787 case XDP_PASS:
788 /* Recalculate length in case bpf program changed it */
789 delta = orig_data - xdp.data;
790 len = xdp.data_end - xdp.data;
791 metasize = xdp.data - xdp.data_meta;
792 break;
793 case XDP_TX:
794 stats->xdp_tx++;
795 xdpf = xdp_convert_buff_to_frame(&xdp);
796 if (unlikely(!xdpf))
797 goto err_xdp;
798 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
799 if (unlikely(!err)) {
800 xdp_return_frame_rx_napi(xdpf);
801 } else if (unlikely(err < 0)) {
802 trace_xdp_exception(vi->dev, xdp_prog, act);
803 goto err_xdp;
804 }
805 *xdp_xmit |= VIRTIO_XDP_TX;
806 rcu_read_unlock();
807 goto xdp_xmit;
808 case XDP_REDIRECT:
809 stats->xdp_redirects++;
810 err = xdp_do_redirect(dev, &xdp, xdp_prog);
811 if (err)
812 goto err_xdp;
813 *xdp_xmit |= VIRTIO_XDP_REDIR;
814 rcu_read_unlock();
815 goto xdp_xmit;
816 default:
817 bpf_warn_invalid_xdp_action(act);
818 fallthrough;
819 case XDP_ABORTED:
820 trace_xdp_exception(vi->dev, xdp_prog, act);
821 goto err_xdp;
822 case XDP_DROP:
823 goto err_xdp;
824 }
825 }
826 rcu_read_unlock();
827
828 skb = build_skb(buf, buflen);
829 if (!skb) {
830 put_page(page);
831 goto err;
832 }
833 skb_reserve(skb, headroom - delta);
834 skb_put(skb, len);
835 if (!xdp_prog) {
836 buf += header_offset;
837 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
838 } /* keep zeroed vnet hdr since XDP is loaded */
839
840 if (metasize)
841 skb_metadata_set(skb, metasize);
842
843err:
844 return skb;
845
846err_xdp:
847 rcu_read_unlock();
848 stats->xdp_drops++;
849err_len:
850 stats->drops++;
851 put_page(page);
852xdp_xmit:
853 return NULL;
854}
855
856static struct sk_buff *receive_big(struct net_device *dev,
857 struct virtnet_info *vi,
858 struct receive_queue *rq,
859 void *buf,
860 unsigned int len,
861 struct virtnet_rq_stats *stats)
862{
863 struct page *page = buf;
864 struct sk_buff *skb =
865 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
866
867 stats->bytes += len - vi->hdr_len;
868 if (unlikely(!skb))
869 goto err;
870
871 return skb;
872
873err:
874 stats->drops++;
875 give_pages(rq, page);
876 return NULL;
877}
878
879static struct sk_buff *receive_mergeable(struct net_device *dev,
880 struct virtnet_info *vi,
881 struct receive_queue *rq,
882 void *buf,
883 void *ctx,
884 unsigned int len,
885 unsigned int *xdp_xmit,
886 struct virtnet_rq_stats *stats)
887{
888 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
889 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
890 struct page *page = virt_to_head_page(buf);
891 int offset = buf - page_address(page);
892 struct sk_buff *head_skb, *curr_skb;
893 struct bpf_prog *xdp_prog;
894 unsigned int truesize = mergeable_ctx_to_truesize(ctx);
895 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
896 unsigned int metasize = 0;
897 unsigned int frame_sz;
898 int err;
899
900 head_skb = NULL;
901 stats->bytes += len - vi->hdr_len;
902
903 if (unlikely(len > truesize)) {
904 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
905 dev->name, len, (unsigned long)ctx);
906 dev->stats.rx_length_errors++;
907 goto err_skb;
908 }
909 rcu_read_lock();
910 xdp_prog = rcu_dereference(rq->xdp_prog);
911 if (xdp_prog) {
912 struct xdp_frame *xdpf;
913 struct page *xdp_page;
914 struct xdp_buff xdp;
915 void *data;
916 u32 act;
917
918 /* Transient failure which in theory could occur if
919 * in-flight packets from before XDP was enabled reach
920 * the receive path after XDP is loaded.
921 */
922 if (unlikely(hdr->hdr.gso_type))
923 goto err_xdp;
924
925 /* Buffers with headroom use PAGE_SIZE as alloc size,
926 * see add_recvbuf_mergeable() + get_mergeable_buf_len()
927 */
928 frame_sz = headroom ? PAGE_SIZE : truesize;
929
930 /* This happens when rx buffer size is underestimated
931 * or headroom is not enough because of the buffer
932 * was refilled before XDP is set. This should only
933 * happen for the first several packets, so we don't
934 * care much about its performance.
935 */
936 if (unlikely(num_buf > 1 ||
937 headroom < virtnet_get_headroom(vi))) {
938 /* linearize data for XDP */
939 xdp_page = xdp_linearize_page(rq, &num_buf,
940 page, offset,
941 VIRTIO_XDP_HEADROOM,
942 &len);
943 frame_sz = PAGE_SIZE;
944
945 if (!xdp_page)
946 goto err_xdp;
947 offset = VIRTIO_XDP_HEADROOM;
948 } else {
949 xdp_page = page;
950 }
951
952 /* Allow consuming headroom but reserve enough space to push
953 * the descriptor on if we get an XDP_TX return code.
954 */
955 data = page_address(xdp_page) + offset;
956 xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
957 xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
958 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
959
960 act = bpf_prog_run_xdp(xdp_prog, &xdp);
961 stats->xdp_packets++;
962
963 switch (act) {
964 case XDP_PASS:
965 metasize = xdp.data - xdp.data_meta;
966
967 /* recalculate offset to account for any header
968 * adjustments and minus the metasize to copy the
969 * metadata in page_to_skb(). Note other cases do not
970 * build an skb and avoid using offset
971 */
972 offset = xdp.data - page_address(xdp_page) -
973 vi->hdr_len - metasize;
974
975 /* recalculate len if xdp.data, xdp.data_end or
976 * xdp.data_meta were adjusted
977 */
978 len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
979 /* We can only create skb based on xdp_page. */
980 if (unlikely(xdp_page != page)) {
981 rcu_read_unlock();
982 put_page(page);
983 head_skb = page_to_skb(vi, rq, xdp_page, offset,
984 len, PAGE_SIZE, false,
985 metasize, true);
986 return head_skb;
987 }
988 break;
989 case XDP_TX:
990 stats->xdp_tx++;
991 xdpf = xdp_convert_buff_to_frame(&xdp);
992 if (unlikely(!xdpf))
993 goto err_xdp;
994 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
995 if (unlikely(!err)) {
996 xdp_return_frame_rx_napi(xdpf);
997 } else if (unlikely(err < 0)) {
998 trace_xdp_exception(vi->dev, xdp_prog, act);
999 if (unlikely(xdp_page != page))
1000 put_page(xdp_page);
1001 goto err_xdp;
1002 }
1003 *xdp_xmit |= VIRTIO_XDP_TX;
1004 if (unlikely(xdp_page != page))
1005 put_page(page);
1006 rcu_read_unlock();
1007 goto xdp_xmit;
1008 case XDP_REDIRECT:
1009 stats->xdp_redirects++;
1010 err = xdp_do_redirect(dev, &xdp, xdp_prog);
1011 if (err) {
1012 if (unlikely(xdp_page != page))
1013 put_page(xdp_page);
1014 goto err_xdp;
1015 }
1016 *xdp_xmit |= VIRTIO_XDP_REDIR;
1017 if (unlikely(xdp_page != page))
1018 put_page(page);
1019 rcu_read_unlock();
1020 goto xdp_xmit;
1021 default:
1022 bpf_warn_invalid_xdp_action(act);
1023 fallthrough;
1024 case XDP_ABORTED:
1025 trace_xdp_exception(vi->dev, xdp_prog, act);
1026 fallthrough;
1027 case XDP_DROP:
1028 if (unlikely(xdp_page != page))
1029 __free_pages(xdp_page, 0);
1030 goto err_xdp;
1031 }
1032 }
1033 rcu_read_unlock();
1034
1035 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1036 metasize, !!headroom);
1037 curr_skb = head_skb;
1038
1039 if (unlikely(!curr_skb))
1040 goto err_skb;
1041 while (--num_buf) {
1042 int num_skb_frags;
1043
1044 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
1045 if (unlikely(!buf)) {
1046 pr_debug("%s: rx error: %d buffers out of %d missing\n",
1047 dev->name, num_buf,
1048 virtio16_to_cpu(vi->vdev,
1049 hdr->num_buffers));
1050 dev->stats.rx_length_errors++;
1051 goto err_buf;
1052 }
1053
1054 stats->bytes += len;
1055 page = virt_to_head_page(buf);
1056
1057 truesize = mergeable_ctx_to_truesize(ctx);
1058 if (unlikely(len > truesize)) {
1059 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1060 dev->name, len, (unsigned long)ctx);
1061 dev->stats.rx_length_errors++;
1062 goto err_skb;
1063 }
1064
1065 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1066 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1067 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1068
1069 if (unlikely(!nskb))
1070 goto err_skb;
1071 if (curr_skb == head_skb)
1072 skb_shinfo(curr_skb)->frag_list = nskb;
1073 else
1074 curr_skb->next = nskb;
1075 curr_skb = nskb;
1076 head_skb->truesize += nskb->truesize;
1077 num_skb_frags = 0;
1078 }
1079 if (curr_skb != head_skb) {
1080 head_skb->data_len += len;
1081 head_skb->len += len;
1082 head_skb->truesize += truesize;
1083 }
1084 offset = buf - page_address(page);
1085 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1086 put_page(page);
1087 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1088 len, truesize);
1089 } else {
1090 skb_add_rx_frag(curr_skb, num_skb_frags, page,
1091 offset, len, truesize);
1092 }
1093 }
1094
1095 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1096 return head_skb;
1097
1098err_xdp:
1099 rcu_read_unlock();
1100 stats->xdp_drops++;
1101err_skb:
1102 put_page(page);
1103 while (num_buf-- > 1) {
1104 buf = virtqueue_get_buf(rq->vq, &len);
1105 if (unlikely(!buf)) {
1106 pr_debug("%s: rx error: %d buffers missing\n",
1107 dev->name, num_buf);
1108 dev->stats.rx_length_errors++;
1109 break;
1110 }
1111 stats->bytes += len;
1112 page = virt_to_head_page(buf);
1113 put_page(page);
1114 }
1115err_buf:
1116 stats->drops++;
1117 dev_kfree_skb(head_skb);
1118xdp_xmit:
1119 return NULL;
1120}
1121
1122static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1123 void *buf, unsigned int len, void **ctx,
1124 unsigned int *xdp_xmit,
1125 struct virtnet_rq_stats *stats)
1126{
1127 struct net_device *dev = vi->dev;
1128 struct sk_buff *skb;
1129 struct virtio_net_hdr_mrg_rxbuf *hdr;
1130
1131 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1132 pr_debug("%s: short packet %i\n", dev->name, len);
1133 dev->stats.rx_length_errors++;
1134 if (vi->mergeable_rx_bufs) {
1135 put_page(virt_to_head_page(buf));
1136 } else if (vi->big_packets) {
1137 give_pages(rq, buf);
1138 } else {
1139 put_page(virt_to_head_page(buf));
1140 }
1141 return;
1142 }
1143
1144 if (vi->mergeable_rx_bufs)
1145 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1146 stats);
1147 else if (vi->big_packets)
1148 skb = receive_big(dev, vi, rq, buf, len, stats);
1149 else
1150 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1151
1152 if (unlikely(!skb))
1153 return;
1154
1155 hdr = skb_vnet_hdr(skb);
1156
1157 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159
1160 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1161 virtio_is_little_endian(vi->vdev))) {
1162 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1163 dev->name, hdr->hdr.gso_type,
1164 hdr->hdr.gso_size);
1165 goto frame_err;
1166 }
1167
1168 skb_record_rx_queue(skb, vq2rxq(rq->vq));
1169 skb->protocol = eth_type_trans(skb, dev);
1170 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1171 ntohs(skb->protocol), skb->len, skb->pkt_type);
1172
1173 napi_gro_receive(&rq->napi, skb);
1174 return;
1175
1176frame_err:
1177 dev->stats.rx_frame_errors++;
1178 dev_kfree_skb(skb);
1179}
1180
1181/* Unlike mergeable buffers, all buffers are allocated to the
1182 * same size, except for the headroom. For this reason we do
1183 * not need to use mergeable_len_to_ctx here - it is enough
1184 * to store the headroom as the context ignoring the truesize.
1185 */
1186static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1187 gfp_t gfp)
1188{
1189 struct page_frag *alloc_frag = &rq->alloc_frag;
1190 char *buf;
1191 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1192 void *ctx = (void *)(unsigned long)xdp_headroom;
1193 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1194 int err;
1195
1196 len = SKB_DATA_ALIGN(len) +
1197 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1198 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1199 return -ENOMEM;
1200
1201 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1202 get_page(alloc_frag->page);
1203 alloc_frag->offset += len;
1204 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1205 vi->hdr_len + GOOD_PACKET_LEN);
1206 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1207 if (err < 0)
1208 put_page(virt_to_head_page(buf));
1209 return err;
1210}
1211
1212static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1213 gfp_t gfp)
1214{
1215 struct page *first, *list = NULL;
1216 char *p;
1217 int i, err, offset;
1218
1219 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
1220
1221 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
1222 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1223 first = get_a_page(rq, gfp);
1224 if (!first) {
1225 if (list)
1226 give_pages(rq, list);
1227 return -ENOMEM;
1228 }
1229 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1230
1231 /* chain new page in list head to match sg */
1232 first->private = (unsigned long)list;
1233 list = first;
1234 }
1235
1236 first = get_a_page(rq, gfp);
1237 if (!first) {
1238 give_pages(rq, list);
1239 return -ENOMEM;
1240 }
1241 p = page_address(first);
1242
1243 /* rq->sg[0], rq->sg[1] share the same page */
1244 /* a separated rq->sg[0] for header - required in case !any_header_sg */
1245 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1246
1247 /* rq->sg[1] for data packet, from offset */
1248 offset = sizeof(struct padded_vnet_hdr);
1249 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1250
1251 /* chain first in list head */
1252 first->private = (unsigned long)list;
1253 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1254 first, gfp);
1255 if (err < 0)
1256 give_pages(rq, first);
1257
1258 return err;
1259}
1260
1261static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1262 struct ewma_pkt_len *avg_pkt_len,
1263 unsigned int room)
1264{
1265 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1266 unsigned int len;
1267
1268 if (room)
1269 return PAGE_SIZE - room;
1270
1271 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1272 rq->min_buf_len, PAGE_SIZE - hdr_len);
1273
1274 return ALIGN(len, L1_CACHE_BYTES);
1275}
1276
1277static int add_recvbuf_mergeable(struct virtnet_info *vi,
1278 struct receive_queue *rq, gfp_t gfp)
1279{
1280 struct page_frag *alloc_frag = &rq->alloc_frag;
1281 unsigned int headroom = virtnet_get_headroom(vi);
1282 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1283 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1284 char *buf;
1285 void *ctx;
1286 int err;
1287 unsigned int len, hole;
1288
1289 /* Extra tailroom is needed to satisfy XDP's assumption. This
1290 * means rx frags coalescing won't work, but consider we've
1291 * disabled GSO for XDP, it won't be a big issue.
1292 */
1293 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1294 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1295 return -ENOMEM;
1296
1297 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1298 buf += headroom; /* advance address leaving hole at front of pkt */
1299 get_page(alloc_frag->page);
1300 alloc_frag->offset += len + room;
1301 hole = alloc_frag->size - alloc_frag->offset;
1302 if (hole < len + room) {
1303 /* To avoid internal fragmentation, if there is very likely not
1304 * enough space for another buffer, add the remaining space to
1305 * the current buffer.
1306 */
1307 len += hole;
1308 alloc_frag->offset += hole;
1309 }
1310
1311 sg_init_one(rq->sg, buf, len);
1312 ctx = mergeable_len_to_ctx(len, headroom);
1313 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1314 if (err < 0)
1315 put_page(virt_to_head_page(buf));
1316
1317 return err;
1318}
1319
1320/*
1321 * Returns false if we couldn't fill entirely (OOM).
1322 *
1323 * Normally run in the receive path, but can also be run from ndo_open
1324 * before we're receiving packets, or from refill_work which is
1325 * careful to disable receiving (using napi_disable).
1326 */
1327static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1328 gfp_t gfp)
1329{
1330 int err;
1331 bool oom;
1332
1333 do {
1334 if (vi->mergeable_rx_bufs)
1335 err = add_recvbuf_mergeable(vi, rq, gfp);
1336 else if (vi->big_packets)
1337 err = add_recvbuf_big(vi, rq, gfp);
1338 else
1339 err = add_recvbuf_small(vi, rq, gfp);
1340
1341 oom = err == -ENOMEM;
1342 if (err)
1343 break;
1344 } while (rq->vq->num_free);
1345 if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
1346 unsigned long flags;
1347
1348 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
1349 rq->stats.kicks++;
1350 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
1351 }
1352
1353 return !oom;
1354}
1355
1356static void skb_recv_done(struct virtqueue *rvq)
1357{
1358 struct virtnet_info *vi = rvq->vdev->priv;
1359 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1360
1361 virtqueue_napi_schedule(&rq->napi, rvq);
1362}
1363
1364static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1365{
1366 napi_enable(napi);
1367
1368 /* If all buffers were filled by other side before we napi_enabled, we
1369 * won't get another interrupt, so process any outstanding packets now.
1370 * Call local_bh_enable after to trigger softIRQ processing.
1371 */
1372 local_bh_disable();
1373 virtqueue_napi_schedule(napi, vq);
1374 local_bh_enable();
1375}
1376
1377static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1378 struct virtqueue *vq,
1379 struct napi_struct *napi)
1380{
1381 if (!napi->weight)
1382 return;
1383
1384 /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
1385 * enable the feature if this is likely affine with the transmit path.
1386 */
1387 if (!vi->affinity_hint_set) {
1388 napi->weight = 0;
1389 return;
1390 }
1391
1392 return virtnet_napi_enable(vq, napi);
1393}
1394
1395static void virtnet_napi_tx_disable(struct napi_struct *napi)
1396{
1397 if (napi->weight)
1398 napi_disable(napi);
1399}
1400
1401static void refill_work(struct work_struct *work)
1402{
1403 struct virtnet_info *vi =
1404 container_of(work, struct virtnet_info, refill.work);
1405 bool still_empty;
1406 int i;
1407
1408 for (i = 0; i < vi->curr_queue_pairs; i++) {
1409 struct receive_queue *rq = &vi->rq[i];
1410
1411 napi_disable(&rq->napi);
1412 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1413 virtnet_napi_enable(rq->vq, &rq->napi);
1414
1415 /* In theory, this can happen: if we don't get any buffers in
1416 * we will *never* try to fill again.
1417 */
1418 if (still_empty)
1419 schedule_delayed_work(&vi->refill, HZ/2);
1420 }
1421}
1422
1423static int virtnet_receive(struct receive_queue *rq, int budget,
1424 unsigned int *xdp_xmit)
1425{
1426 struct virtnet_info *vi = rq->vq->vdev->priv;
1427 struct virtnet_rq_stats stats = {};
1428 unsigned int len;
1429 void *buf;
1430 int i;
1431
1432 if (!vi->big_packets || vi->mergeable_rx_bufs) {
1433 void *ctx;
1434
1435 while (stats.packets < budget &&
1436 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1437 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1438 stats.packets++;
1439 }
1440 } else {
1441 while (stats.packets < budget &&
1442 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1443 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1444 stats.packets++;
1445 }
1446 }
1447
1448 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1449 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1450 schedule_delayed_work(&vi->refill, 0);
1451 }
1452
1453 u64_stats_update_begin(&rq->stats.syncp);
1454 for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
1455 size_t offset = virtnet_rq_stats_desc[i].offset;
1456 u64 *item;
1457
1458 item = (u64 *)((u8 *)&rq->stats + offset);
1459 *item += *(u64 *)((u8 *)&stats + offset);
1460 }
1461 u64_stats_update_end(&rq->stats.syncp);
1462
1463 return stats.packets;
1464}
1465
1466static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1467{
1468 unsigned int len;
1469 unsigned int packets = 0;
1470 unsigned int bytes = 0;
1471 void *ptr;
1472
1473 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1474 if (likely(!is_xdp_frame(ptr))) {
1475 struct sk_buff *skb = ptr;
1476
1477 pr_debug("Sent skb %p\n", skb);
1478
1479 bytes += skb->len;
1480 napi_consume_skb(skb, in_napi);
1481 } else {
1482 struct xdp_frame *frame = ptr_to_xdp(ptr);
1483
1484 bytes += frame->len;
1485 xdp_return_frame(frame);
1486 }
1487 packets++;
1488 }
1489
1490 /* Avoid overhead when no packets have been processed
1491 * happens when called speculatively from start_xmit.
1492 */
1493 if (!packets)
1494 return;
1495
1496 u64_stats_update_begin(&sq->stats.syncp);
1497 sq->stats.bytes += bytes;
1498 sq->stats.packets += packets;
1499 u64_stats_update_end(&sq->stats.syncp);
1500}
1501
1502static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1503{
1504 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1505 return false;
1506 else if (q < vi->curr_queue_pairs)
1507 return true;
1508 else
1509 return false;
1510}
1511
1512static void virtnet_poll_cleantx(struct receive_queue *rq)
1513{
1514 struct virtnet_info *vi = rq->vq->vdev->priv;
1515 unsigned int index = vq2rxq(rq->vq);
1516 struct send_queue *sq = &vi->sq[index];
1517 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1518
1519 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1520 return;
1521
1522 if (__netif_tx_trylock(txq)) {
1523 do {
1524 virtqueue_disable_cb(sq->vq);
1525 free_old_xmit_skbs(sq, true);
1526 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1527
1528 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1529 netif_tx_wake_queue(txq);
1530
1531 __netif_tx_unlock(txq);
1532 }
1533}
1534
1535static int virtnet_poll(struct napi_struct *napi, int budget)
1536{
1537 struct receive_queue *rq =
1538 container_of(napi, struct receive_queue, napi);
1539 struct virtnet_info *vi = rq->vq->vdev->priv;
1540 struct send_queue *sq;
1541 unsigned int received;
1542 unsigned int xdp_xmit = 0;
1543
1544 virtnet_poll_cleantx(rq);
1545
1546 received = virtnet_receive(rq, budget, &xdp_xmit);
1547
1548 /* Out of packets? */
1549 if (received < budget)
1550 virtqueue_napi_complete(napi, rq->vq, received);
1551
1552 if (xdp_xmit & VIRTIO_XDP_REDIR)
1553 xdp_do_flush();
1554
1555 if (xdp_xmit & VIRTIO_XDP_TX) {
1556 sq = virtnet_xdp_get_sq(vi);
1557 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1558 u64_stats_update_begin(&sq->stats.syncp);
1559 sq->stats.kicks++;
1560 u64_stats_update_end(&sq->stats.syncp);
1561 }
1562 virtnet_xdp_put_sq(vi, sq);
1563 }
1564
1565 return received;
1566}
1567
1568static int virtnet_open(struct net_device *dev)
1569{
1570 struct virtnet_info *vi = netdev_priv(dev);
1571 int i, err;
1572
1573 for (i = 0; i < vi->max_queue_pairs; i++) {
1574 if (i < vi->curr_queue_pairs)
1575 /* Make sure we have some buffers: if oom use wq. */
1576 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1577 schedule_delayed_work(&vi->refill, 0);
1578
1579 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
1580 if (err < 0)
1581 return err;
1582
1583 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1584 MEM_TYPE_PAGE_SHARED, NULL);
1585 if (err < 0) {
1586 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1587 return err;
1588 }
1589
1590 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1591 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1592 }
1593
1594 return 0;
1595}
1596
1597static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1598{
1599 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1600 struct virtnet_info *vi = sq->vq->vdev->priv;
1601 unsigned int index = vq2txq(sq->vq);
1602 struct netdev_queue *txq;
1603 int opaque;
1604 bool done;
1605
1606 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1607 /* We don't need to enable cb for XDP */
1608 napi_complete_done(napi, 0);
1609 return 0;
1610 }
1611
1612 txq = netdev_get_tx_queue(vi->dev, index);
1613 __netif_tx_lock(txq, raw_smp_processor_id());
1614 virtqueue_disable_cb(sq->vq);
1615 free_old_xmit_skbs(sq, true);
1616
1617 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1618 netif_tx_wake_queue(txq);
1619
1620 opaque = virtqueue_enable_cb_prepare(sq->vq);
1621
1622 done = napi_complete_done(napi, 0);
1623
1624 if (!done)
1625 virtqueue_disable_cb(sq->vq);
1626
1627 __netif_tx_unlock(txq);
1628
1629 if (done) {
1630 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
1631 if (napi_schedule_prep(napi)) {
1632 __netif_tx_lock(txq, raw_smp_processor_id());
1633 virtqueue_disable_cb(sq->vq);
1634 __netif_tx_unlock(txq);
1635 __napi_schedule(napi);
1636 }
1637 }
1638 }
1639
1640 return 0;
1641}
1642
1643static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1644{
1645 struct virtio_net_hdr_mrg_rxbuf *hdr;
1646 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1647 struct virtnet_info *vi = sq->vq->vdev->priv;
1648 int num_sg;
1649 unsigned hdr_len = vi->hdr_len;
1650 bool can_push;
1651
1652 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1653
1654 can_push = vi->any_header_sg &&
1655 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1656 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1657 /* Even if we can, don't push here yet as this would skew
1658 * csum_start offset below. */
1659 if (can_push)
1660 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1661 else
1662 hdr = skb_vnet_hdr(skb);
1663
1664 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1665 virtio_is_little_endian(vi->vdev), false,
1666 0))
1667 return -EPROTO;
1668
1669 if (vi->mergeable_rx_bufs)
1670 hdr->num_buffers = 0;
1671
1672 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1673 if (can_push) {
1674 __skb_push(skb, hdr_len);
1675 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1676 if (unlikely(num_sg < 0))
1677 return num_sg;
1678 /* Pull header back to avoid skew in tx bytes calculations. */
1679 __skb_pull(skb, hdr_len);
1680 } else {
1681 sg_set_buf(sq->sg, hdr, hdr_len);
1682 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1683 if (unlikely(num_sg < 0))
1684 return num_sg;
1685 num_sg++;
1686 }
1687 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1688}
1689
1690static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1691{
1692 struct virtnet_info *vi = netdev_priv(dev);
1693 int qnum = skb_get_queue_mapping(skb);
1694 struct send_queue *sq = &vi->sq[qnum];
1695 int err;
1696 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1697 bool kick = !netdev_xmit_more();
1698 bool use_napi = sq->napi.weight;
1699
1700 /* Free up any pending old buffers before queueing new ones. */
1701 do {
1702 if (use_napi)
1703 virtqueue_disable_cb(sq->vq);
1704
1705 free_old_xmit_skbs(sq, false);
1706
1707 } while (use_napi && kick &&
1708 unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
1709
1710 /* timestamp packet in software */
1711 skb_tx_timestamp(skb);
1712
1713 /* Try to transmit */
1714 err = xmit_skb(sq, skb);
1715
1716 /* This should not happen! */
1717 if (unlikely(err)) {
1718 dev->stats.tx_fifo_errors++;
1719 if (net_ratelimit())
1720 dev_warn(&dev->dev,
1721 "Unexpected TXQ (%d) queue failure: %d\n",
1722 qnum, err);
1723 dev->stats.tx_dropped++;
1724 dev_kfree_skb_any(skb);
1725 return NETDEV_TX_OK;
1726 }
1727
1728 /* Don't wait up for transmitted skbs to be freed. */
1729 if (!use_napi) {
1730 skb_orphan(skb);
1731 nf_reset_ct(skb);
1732 }
1733
1734 /* If running out of space, stop queue to avoid getting packets that we
1735 * are then unable to transmit.
1736 * An alternative would be to force queuing layer to requeue the skb by
1737 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1738 * returned in a normal path of operation: it means that driver is not
1739 * maintaining the TX queue stop/start state properly, and causes
1740 * the stack to do a non-trivial amount of useless work.
1741 * Since most packets only take 1 or 2 ring slots, stopping the queue
1742 * early means 16 slots are typically wasted.
1743 */
1744 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1745 netif_stop_subqueue(dev, qnum);
1746 if (!use_napi &&
1747 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1748 /* More just got used, free them then recheck. */
1749 free_old_xmit_skbs(sq, false);
1750 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1751 netif_start_subqueue(dev, qnum);
1752 virtqueue_disable_cb(sq->vq);
1753 }
1754 }
1755 }
1756
1757 if (kick || netif_xmit_stopped(txq)) {
1758 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1759 u64_stats_update_begin(&sq->stats.syncp);
1760 sq->stats.kicks++;
1761 u64_stats_update_end(&sq->stats.syncp);
1762 }
1763 }
1764
1765 return NETDEV_TX_OK;
1766}
1767
1768/*
1769 * Send command via the control virtqueue and check status. Commands
1770 * supported by the hypervisor, as indicated by feature bits, should
1771 * never fail unless improperly formatted.
1772 */
1773static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1774 struct scatterlist *out)
1775{
1776 struct scatterlist *sgs[4], hdr, stat;
1777 unsigned out_num = 0, tmp;
1778 int ret;
1779
1780 /* Caller should know better */
1781 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1782
1783 vi->ctrl->status = ~0;
1784 vi->ctrl->hdr.class = class;
1785 vi->ctrl->hdr.cmd = cmd;
1786 /* Add header */
1787 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1788 sgs[out_num++] = &hdr;
1789
1790 if (out)
1791 sgs[out_num++] = out;
1792
1793 /* Add return status. */
1794 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1795 sgs[out_num] = &stat;
1796
1797 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1798 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1799 if (ret < 0) {
1800 dev_warn(&vi->vdev->dev,
1801 "Failed to add sgs for command vq: %d\n.", ret);
1802 return false;
1803 }
1804
1805 if (unlikely(!virtqueue_kick(vi->cvq)))
1806 return vi->ctrl->status == VIRTIO_NET_OK;
1807
1808 /* Spin for a response, the kick causes an ioport write, trapping
1809 * into the hypervisor, so the request should be handled immediately.
1810 */
1811 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1812 !virtqueue_is_broken(vi->cvq))
1813 cpu_relax();
1814
1815 return vi->ctrl->status == VIRTIO_NET_OK;
1816}
1817
1818static int virtnet_set_mac_address(struct net_device *dev, void *p)
1819{
1820 struct virtnet_info *vi = netdev_priv(dev);
1821 struct virtio_device *vdev = vi->vdev;
1822 int ret;
1823 struct sockaddr *addr;
1824 struct scatterlist sg;
1825
1826 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1827 return -EOPNOTSUPP;
1828
1829 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1830 if (!addr)
1831 return -ENOMEM;
1832
1833 ret = eth_prepare_mac_addr_change(dev, addr);
1834 if (ret)
1835 goto out;
1836
1837 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1838 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1839 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1840 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1841 dev_warn(&vdev->dev,
1842 "Failed to set mac address by vq command.\n");
1843 ret = -EINVAL;
1844 goto out;
1845 }
1846 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1847 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1848 unsigned int i;
1849
1850 /* Naturally, this has an atomicity problem. */
1851 for (i = 0; i < dev->addr_len; i++)
1852 virtio_cwrite8(vdev,
1853 offsetof(struct virtio_net_config, mac) +
1854 i, addr->sa_data[i]);
1855 }
1856
1857 eth_commit_mac_addr_change(dev, p);
1858 ret = 0;
1859
1860out:
1861 kfree(addr);
1862 return ret;
1863}
1864
1865static void virtnet_stats(struct net_device *dev,
1866 struct rtnl_link_stats64 *tot)
1867{
1868 struct virtnet_info *vi = netdev_priv(dev);
1869 unsigned int start;
1870 int i;
1871
1872 for (i = 0; i < vi->max_queue_pairs; i++) {
1873 u64 tpackets, tbytes, rpackets, rbytes, rdrops;
1874 struct receive_queue *rq = &vi->rq[i];
1875 struct send_queue *sq = &vi->sq[i];
1876
1877 do {
1878 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1879 tpackets = sq->stats.packets;
1880 tbytes = sq->stats.bytes;
1881 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1882
1883 do {
1884 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1885 rpackets = rq->stats.packets;
1886 rbytes = rq->stats.bytes;
1887 rdrops = rq->stats.drops;
1888 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1889
1890 tot->rx_packets += rpackets;
1891 tot->tx_packets += tpackets;
1892 tot->rx_bytes += rbytes;
1893 tot->tx_bytes += tbytes;
1894 tot->rx_dropped += rdrops;
1895 }
1896
1897 tot->tx_dropped = dev->stats.tx_dropped;
1898 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1899 tot->rx_length_errors = dev->stats.rx_length_errors;
1900 tot->rx_frame_errors = dev->stats.rx_frame_errors;
1901}
1902
1903static void virtnet_ack_link_announce(struct virtnet_info *vi)
1904{
1905 rtnl_lock();
1906 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1907 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1908 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1909 rtnl_unlock();
1910}
1911
1912static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1913{
1914 struct scatterlist sg;
1915 struct net_device *dev = vi->dev;
1916
1917 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1918 return 0;
1919
1920 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1921 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1922
1923 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1924 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1925 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1926 queue_pairs);
1927 return -EINVAL;
1928 } else {
1929 vi->curr_queue_pairs = queue_pairs;
1930 /* virtnet_open() will refill when device is going to up. */
1931 if (dev->flags & IFF_UP)
1932 schedule_delayed_work(&vi->refill, 0);
1933 }
1934
1935 return 0;
1936}
1937
1938static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1939{
1940 int err;
1941
1942 rtnl_lock();
1943 err = _virtnet_set_queues(vi, queue_pairs);
1944 rtnl_unlock();
1945 return err;
1946}
1947
1948static int virtnet_close(struct net_device *dev)
1949{
1950 struct virtnet_info *vi = netdev_priv(dev);
1951 int i;
1952
1953 /* Make sure refill_work doesn't re-enable napi! */
1954 cancel_delayed_work_sync(&vi->refill);
1955
1956 for (i = 0; i < vi->max_queue_pairs; i++) {
1957 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1958 napi_disable(&vi->rq[i].napi);
1959 virtnet_napi_tx_disable(&vi->sq[i].napi);
1960 }
1961
1962 return 0;
1963}
1964
1965static void virtnet_set_rx_mode(struct net_device *dev)
1966{
1967 struct virtnet_info *vi = netdev_priv(dev);
1968 struct scatterlist sg[2];
1969 struct virtio_net_ctrl_mac *mac_data;
1970 struct netdev_hw_addr *ha;
1971 int uc_count;
1972 int mc_count;
1973 void *buf;
1974 int i;
1975
1976 /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1977 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1978 return;
1979
1980 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1981 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1982
1983 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1984
1985 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1986 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1987 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1988 vi->ctrl->promisc ? "en" : "dis");
1989
1990 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1991
1992 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1993 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1994 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1995 vi->ctrl->allmulti ? "en" : "dis");
1996
1997 uc_count = netdev_uc_count(dev);
1998 mc_count = netdev_mc_count(dev);
1999 /* MAC filter - use one buffer for both lists */
2000 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2001 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2002 mac_data = buf;
2003 if (!buf)
2004 return;
2005
2006 sg_init_table(sg, 2);
2007
2008 /* Store the unicast list and count in the front of the buffer */
2009 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2010 i = 0;
2011 netdev_for_each_uc_addr(ha, dev)
2012 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2013
2014 sg_set_buf(&sg[0], mac_data,
2015 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2016
2017 /* multicast list and count fill the end */
2018 mac_data = (void *)&mac_data->macs[uc_count][0];
2019
2020 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2021 i = 0;
2022 netdev_for_each_mc_addr(ha, dev)
2023 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2024
2025 sg_set_buf(&sg[1], mac_data,
2026 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2027
2028 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2029 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2030 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2031
2032 kfree(buf);
2033}
2034
2035static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2036 __be16 proto, u16 vid)
2037{
2038 struct virtnet_info *vi = netdev_priv(dev);
2039 struct scatterlist sg;
2040
2041 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2042 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2043
2044 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2045 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
2046 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
2047 return 0;
2048}
2049
2050static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
2051 __be16 proto, u16 vid)
2052{
2053 struct virtnet_info *vi = netdev_priv(dev);
2054 struct scatterlist sg;
2055
2056 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2057 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2058
2059 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2060 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
2061 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
2062 return 0;
2063}
2064
2065static void virtnet_clean_affinity(struct virtnet_info *vi)
2066{
2067 int i;
2068
2069 if (vi->affinity_hint_set) {
2070 for (i = 0; i < vi->max_queue_pairs; i++) {
2071 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2072 virtqueue_set_affinity(vi->sq[i].vq, NULL);
2073 }
2074
2075 vi->affinity_hint_set = false;
2076 }
2077}
2078
2079static void virtnet_set_affinity(struct virtnet_info *vi)
2080{
2081 cpumask_var_t mask;
2082 int stragglers;
2083 int group_size;
2084 int i, j, cpu;
2085 int num_cpu;
2086 int stride;
2087
2088 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2089 virtnet_clean_affinity(vi);
2090 return;
2091 }
2092
2093 num_cpu = num_online_cpus();
2094 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2095 stragglers = num_cpu >= vi->curr_queue_pairs ?
2096 num_cpu % vi->curr_queue_pairs :
2097 0;
2098 cpu = cpumask_next(-1, cpu_online_mask);
2099
2100 for (i = 0; i < vi->curr_queue_pairs; i++) {
2101 group_size = stride + (i < stragglers ? 1 : 0);
2102
2103 for (j = 0; j < group_size; j++) {
2104 cpumask_set_cpu(cpu, mask);
2105 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
2106 nr_cpu_ids, false);
2107 }
2108 virtqueue_set_affinity(vi->rq[i].vq, mask);
2109 virtqueue_set_affinity(vi->sq[i].vq, mask);
2110 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2111 cpumask_clear(mask);
2112 }
2113
2114 vi->affinity_hint_set = true;
2115 free_cpumask_var(mask);
2116}
2117
2118static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
2119{
2120 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2121 node);
2122 virtnet_set_affinity(vi);
2123 return 0;
2124}
2125
2126static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
2127{
2128 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2129 node_dead);
2130 virtnet_set_affinity(vi);
2131 return 0;
2132}
2133
2134static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
2135{
2136 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2137 node);
2138
2139 virtnet_clean_affinity(vi);
2140 return 0;
2141}
2142
2143static enum cpuhp_state virtionet_online;
2144
2145static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2146{
2147 int ret;
2148
2149 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2150 if (ret)
2151 return ret;
2152 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2153 &vi->node_dead);
2154 if (!ret)
2155 return ret;
2156 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2157 return ret;
2158}
2159
2160static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2161{
2162 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2163 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
2164 &vi->node_dead);
2165}
2166
2167static void virtnet_get_ringparam(struct net_device *dev,
2168 struct ethtool_ringparam *ring)
2169{
2170 struct virtnet_info *vi = netdev_priv(dev);
2171
2172 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2173 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2174 ring->rx_pending = ring->rx_max_pending;
2175 ring->tx_pending = ring->tx_max_pending;
2176}
2177
2178
2179static void virtnet_get_drvinfo(struct net_device *dev,
2180 struct ethtool_drvinfo *info)
2181{
2182 struct virtnet_info *vi = netdev_priv(dev);
2183 struct virtio_device *vdev = vi->vdev;
2184
2185 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
2186 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
2187 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
2188
2189}
2190
2191/* TODO: Eliminate OOO packets during switching */
2192static int virtnet_set_channels(struct net_device *dev,
2193 struct ethtool_channels *channels)
2194{
2195 struct virtnet_info *vi = netdev_priv(dev);
2196 u16 queue_pairs = channels->combined_count;
2197 int err;
2198
2199 /* We don't support separate rx/tx channels.
2200 * We don't allow setting 'other' channels.
2201 */
2202 if (channels->rx_count || channels->tx_count || channels->other_count)
2203 return -EINVAL;
2204
2205 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2206 return -EINVAL;
2207
2208 /* For now we don't support modifying channels while XDP is loaded
2209 * also when XDP is loaded all RX queues have XDP programs so we only
2210 * need to check a single RX queue.
2211 */
2212 if (vi->rq[0].xdp_prog)
2213 return -EINVAL;
2214
2215 get_online_cpus();
2216 err = _virtnet_set_queues(vi, queue_pairs);
2217 if (err) {
2218 put_online_cpus();
2219 goto err;
2220 }
2221 virtnet_set_affinity(vi);
2222 put_online_cpus();
2223
2224 netif_set_real_num_tx_queues(dev, queue_pairs);
2225 netif_set_real_num_rx_queues(dev, queue_pairs);
2226 err:
2227 return err;
2228}
2229
2230static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2231{
2232 struct virtnet_info *vi = netdev_priv(dev);
2233 unsigned int i, j;
2234 u8 *p = data;
2235
2236 switch (stringset) {
2237 case ETH_SS_STATS:
2238 for (i = 0; i < vi->curr_queue_pairs; i++) {
2239 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
2240 ethtool_sprintf(&p, "rx_queue_%u_%s", i,
2241 virtnet_rq_stats_desc[j].desc);
2242 }
2243
2244 for (i = 0; i < vi->curr_queue_pairs; i++) {
2245 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
2246 ethtool_sprintf(&p, "tx_queue_%u_%s", i,
2247 virtnet_sq_stats_desc[j].desc);
2248 }
2249 break;
2250 }
2251}
2252
2253static int virtnet_get_sset_count(struct net_device *dev, int sset)
2254{
2255 struct virtnet_info *vi = netdev_priv(dev);
2256
2257 switch (sset) {
2258 case ETH_SS_STATS:
2259 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2260 VIRTNET_SQ_STATS_LEN);
2261 default:
2262 return -EOPNOTSUPP;
2263 }
2264}
2265
2266static void virtnet_get_ethtool_stats(struct net_device *dev,
2267 struct ethtool_stats *stats, u64 *data)
2268{
2269 struct virtnet_info *vi = netdev_priv(dev);
2270 unsigned int idx = 0, start, i, j;
2271 const u8 *stats_base;
2272 size_t offset;
2273
2274 for (i = 0; i < vi->curr_queue_pairs; i++) {
2275 struct receive_queue *rq = &vi->rq[i];
2276
2277 stats_base = (u8 *)&rq->stats;
2278 do {
2279 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
2280 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2281 offset = virtnet_rq_stats_desc[j].offset;
2282 data[idx + j] = *(u64 *)(stats_base + offset);
2283 }
2284 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
2285 idx += VIRTNET_RQ_STATS_LEN;
2286 }
2287
2288 for (i = 0; i < vi->curr_queue_pairs; i++) {
2289 struct send_queue *sq = &vi->sq[i];
2290
2291 stats_base = (u8 *)&sq->stats;
2292 do {
2293 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
2294 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2295 offset = virtnet_sq_stats_desc[j].offset;
2296 data[idx + j] = *(u64 *)(stats_base + offset);
2297 }
2298 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
2299 idx += VIRTNET_SQ_STATS_LEN;
2300 }
2301}
2302
2303static void virtnet_get_channels(struct net_device *dev,
2304 struct ethtool_channels *channels)
2305{
2306 struct virtnet_info *vi = netdev_priv(dev);
2307
2308 channels->combined_count = vi->curr_queue_pairs;
2309 channels->max_combined = vi->max_queue_pairs;
2310 channels->max_other = 0;
2311 channels->rx_count = 0;
2312 channels->tx_count = 0;
2313 channels->other_count = 0;
2314}
2315
2316static int virtnet_set_link_ksettings(struct net_device *dev,
2317 const struct ethtool_link_ksettings *cmd)
2318{
2319 struct virtnet_info *vi = netdev_priv(dev);
2320
2321 return ethtool_virtdev_set_link_ksettings(dev, cmd,
2322 &vi->speed, &vi->duplex);
2323}
2324
2325static int virtnet_get_link_ksettings(struct net_device *dev,
2326 struct ethtool_link_ksettings *cmd)
2327{
2328 struct virtnet_info *vi = netdev_priv(dev);
2329
2330 cmd->base.speed = vi->speed;
2331 cmd->base.duplex = vi->duplex;
2332 cmd->base.port = PORT_OTHER;
2333
2334 return 0;
2335}
2336
2337static int virtnet_set_coalesce(struct net_device *dev,
2338 struct ethtool_coalesce *ec)
2339{
2340 struct virtnet_info *vi = netdev_priv(dev);
2341 int i, napi_weight;
2342
2343 if (ec->tx_max_coalesced_frames > 1 ||
2344 ec->rx_max_coalesced_frames != 1)
2345 return -EINVAL;
2346
2347 napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
2348 if (napi_weight ^ vi->sq[0].napi.weight) {
2349 if (dev->flags & IFF_UP)
2350 return -EBUSY;
2351 for (i = 0; i < vi->max_queue_pairs; i++)
2352 vi->sq[i].napi.weight = napi_weight;
2353 }
2354
2355 return 0;
2356}
2357
2358static int virtnet_get_coalesce(struct net_device *dev,
2359 struct ethtool_coalesce *ec)
2360{
2361 struct ethtool_coalesce ec_default = {
2362 .cmd = ETHTOOL_GCOALESCE,
2363 .rx_max_coalesced_frames = 1,
2364 };
2365 struct virtnet_info *vi = netdev_priv(dev);
2366
2367 memcpy(ec, &ec_default, sizeof(ec_default));
2368
2369 if (vi->sq[0].napi.weight)
2370 ec->tx_max_coalesced_frames = 1;
2371
2372 return 0;
2373}
2374
2375static void virtnet_init_settings(struct net_device *dev)
2376{
2377 struct virtnet_info *vi = netdev_priv(dev);
2378
2379 vi->speed = SPEED_UNKNOWN;
2380 vi->duplex = DUPLEX_UNKNOWN;
2381}
2382
2383static void virtnet_update_settings(struct virtnet_info *vi)
2384{
2385 u32 speed;
2386 u8 duplex;
2387
2388 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2389 return;
2390
2391 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
2392
2393 if (ethtool_validate_speed(speed))
2394 vi->speed = speed;
2395
2396 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
2397
2398 if (ethtool_validate_duplex(duplex))
2399 vi->duplex = duplex;
2400}
2401
2402static const struct ethtool_ops virtnet_ethtool_ops = {
2403 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
2404 .get_drvinfo = virtnet_get_drvinfo,
2405 .get_link = ethtool_op_get_link,
2406 .get_ringparam = virtnet_get_ringparam,
2407 .get_strings = virtnet_get_strings,
2408 .get_sset_count = virtnet_get_sset_count,
2409 .get_ethtool_stats = virtnet_get_ethtool_stats,
2410 .set_channels = virtnet_set_channels,
2411 .get_channels = virtnet_get_channels,
2412 .get_ts_info = ethtool_op_get_ts_info,
2413 .get_link_ksettings = virtnet_get_link_ksettings,
2414 .set_link_ksettings = virtnet_set_link_ksettings,
2415 .set_coalesce = virtnet_set_coalesce,
2416 .get_coalesce = virtnet_get_coalesce,
2417};
2418
2419static void virtnet_freeze_down(struct virtio_device *vdev)
2420{
2421 struct virtnet_info *vi = vdev->priv;
2422 int i;
2423
2424 /* Make sure no work handler is accessing the device */
2425 flush_work(&vi->config_work);
2426
2427 netif_tx_lock_bh(vi->dev);
2428 netif_device_detach(vi->dev);
2429 netif_tx_unlock_bh(vi->dev);
2430 cancel_delayed_work_sync(&vi->refill);
2431
2432 if (netif_running(vi->dev)) {
2433 for (i = 0; i < vi->max_queue_pairs; i++) {
2434 napi_disable(&vi->rq[i].napi);
2435 virtnet_napi_tx_disable(&vi->sq[i].napi);
2436 }
2437 }
2438}
2439
2440static int init_vqs(struct virtnet_info *vi);
2441
2442static int virtnet_restore_up(struct virtio_device *vdev)
2443{
2444 struct virtnet_info *vi = vdev->priv;
2445 int err, i;
2446
2447 err = init_vqs(vi);
2448 if (err)
2449 return err;
2450
2451 virtio_device_ready(vdev);
2452
2453 if (netif_running(vi->dev)) {
2454 for (i = 0; i < vi->curr_queue_pairs; i++)
2455 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2456 schedule_delayed_work(&vi->refill, 0);
2457
2458 for (i = 0; i < vi->max_queue_pairs; i++) {
2459 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2460 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2461 &vi->sq[i].napi);
2462 }
2463 }
2464
2465 netif_tx_lock_bh(vi->dev);
2466 netif_device_attach(vi->dev);
2467 netif_tx_unlock_bh(vi->dev);
2468 return err;
2469}
2470
2471static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2472{
2473 struct scatterlist sg;
2474 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2475
2476 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2477
2478 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2479 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2480 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
2481 return -EINVAL;
2482 }
2483
2484 return 0;
2485}
2486
2487static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2488{
2489 u64 offloads = 0;
2490
2491 if (!vi->guest_offloads)
2492 return 0;
2493
2494 return virtnet_set_guest_offloads(vi, offloads);
2495}
2496
2497static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2498{
2499 u64 offloads = vi->guest_offloads;
2500
2501 if (!vi->guest_offloads)
2502 return 0;
2503
2504 return virtnet_set_guest_offloads(vi, offloads);
2505}
2506
2507static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2508 struct netlink_ext_ack *extack)
2509{
2510 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2511 struct virtnet_info *vi = netdev_priv(dev);
2512 struct bpf_prog *old_prog;
2513 u16 xdp_qp = 0, curr_qp;
2514 int i, err;
2515
2516 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2517 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2518 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2519 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2520 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2521 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2522 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
2523 return -EOPNOTSUPP;
2524 }
2525
2526 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2527 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
2528 return -EINVAL;
2529 }
2530
2531 if (dev->mtu > max_sz) {
2532 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
2533 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2534 return -EINVAL;
2535 }
2536
2537 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2538 if (prog)
2539 xdp_qp = nr_cpu_ids;
2540
2541 /* XDP requires extra queues for XDP_TX */
2542 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2543 netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
2544 curr_qp + xdp_qp, vi->max_queue_pairs);
2545 xdp_qp = 0;
2546 }
2547
2548 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2549 if (!prog && !old_prog)
2550 return 0;
2551
2552 if (prog)
2553 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2554
2555 /* Make sure NAPI is not using any XDP TX queues for RX. */
2556 if (netif_running(dev)) {
2557 for (i = 0; i < vi->max_queue_pairs; i++) {
2558 napi_disable(&vi->rq[i].napi);
2559 virtnet_napi_tx_disable(&vi->sq[i].napi);
2560 }
2561 }
2562
2563 if (!prog) {
2564 for (i = 0; i < vi->max_queue_pairs; i++) {
2565 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2566 if (i == 0)
2567 virtnet_restore_guest_offloads(vi);
2568 }
2569 synchronize_net();
2570 }
2571
2572 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2573 if (err)
2574 goto err;
2575 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2576 vi->xdp_queue_pairs = xdp_qp;
2577
2578 if (prog) {
2579 vi->xdp_enabled = true;
2580 for (i = 0; i < vi->max_queue_pairs; i++) {
2581 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2582 if (i == 0 && !old_prog)
2583 virtnet_clear_guest_offloads(vi);
2584 }
2585 } else {
2586 vi->xdp_enabled = false;
2587 }
2588
2589 for (i = 0; i < vi->max_queue_pairs; i++) {
2590 if (old_prog)
2591 bpf_prog_put(old_prog);
2592 if (netif_running(dev)) {
2593 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2594 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2595 &vi->sq[i].napi);
2596 }
2597 }
2598
2599 return 0;
2600
2601err:
2602 if (!prog) {
2603 virtnet_clear_guest_offloads(vi);
2604 for (i = 0; i < vi->max_queue_pairs; i++)
2605 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2606 }
2607
2608 if (netif_running(dev)) {
2609 for (i = 0; i < vi->max_queue_pairs; i++) {
2610 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2611 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2612 &vi->sq[i].napi);
2613 }
2614 }
2615 if (prog)
2616 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2617 return err;
2618}
2619
2620static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2621{
2622 switch (xdp->command) {
2623 case XDP_SETUP_PROG:
2624 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
2625 default:
2626 return -EINVAL;
2627 }
2628}
2629
2630static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
2631 size_t len)
2632{
2633 struct virtnet_info *vi = netdev_priv(dev);
2634 int ret;
2635
2636 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2637 return -EOPNOTSUPP;
2638
2639 ret = snprintf(buf, len, "sby");
2640 if (ret >= len)
2641 return -EOPNOTSUPP;
2642
2643 return 0;
2644}
2645
2646static int virtnet_set_features(struct net_device *dev,
2647 netdev_features_t features)
2648{
2649 struct virtnet_info *vi = netdev_priv(dev);
2650 u64 offloads;
2651 int err;
2652
2653 if ((dev->features ^ features) & NETIF_F_GRO_HW) {
2654 if (vi->xdp_enabled)
2655 return -EBUSY;
2656
2657 if (features & NETIF_F_GRO_HW)
2658 offloads = vi->guest_offloads_capable;
2659 else
2660 offloads = vi->guest_offloads_capable &
2661 ~GUEST_OFFLOAD_GRO_HW_MASK;
2662
2663 err = virtnet_set_guest_offloads(vi, offloads);
2664 if (err)
2665 return err;
2666 vi->guest_offloads = offloads;
2667 }
2668
2669 return 0;
2670}
2671
2672static const struct net_device_ops virtnet_netdev = {
2673 .ndo_open = virtnet_open,
2674 .ndo_stop = virtnet_close,
2675 .ndo_start_xmit = start_xmit,
2676 .ndo_validate_addr = eth_validate_addr,
2677 .ndo_set_mac_address = virtnet_set_mac_address,
2678 .ndo_set_rx_mode = virtnet_set_rx_mode,
2679 .ndo_get_stats64 = virtnet_stats,
2680 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2681 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2682 .ndo_bpf = virtnet_xdp,
2683 .ndo_xdp_xmit = virtnet_xdp_xmit,
2684 .ndo_features_check = passthru_features_check,
2685 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
2686 .ndo_set_features = virtnet_set_features,
2687};
2688
2689static void virtnet_config_changed_work(struct work_struct *work)
2690{
2691 struct virtnet_info *vi =
2692 container_of(work, struct virtnet_info, config_work);
2693 u16 v;
2694
2695 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2696 struct virtio_net_config, status, &v) < 0)
2697 return;
2698
2699 if (v & VIRTIO_NET_S_ANNOUNCE) {
2700 netdev_notify_peers(vi->dev);
2701 virtnet_ack_link_announce(vi);
2702 }
2703
2704 /* Ignore unknown (future) status bits */
2705 v &= VIRTIO_NET_S_LINK_UP;
2706
2707 if (vi->status == v)
2708 return;
2709
2710 vi->status = v;
2711
2712 if (vi->status & VIRTIO_NET_S_LINK_UP) {
2713 virtnet_update_settings(vi);
2714 netif_carrier_on(vi->dev);
2715 netif_tx_wake_all_queues(vi->dev);
2716 } else {
2717 netif_carrier_off(vi->dev);
2718 netif_tx_stop_all_queues(vi->dev);
2719 }
2720}
2721
2722static void virtnet_config_changed(struct virtio_device *vdev)
2723{
2724 struct virtnet_info *vi = vdev->priv;
2725
2726 schedule_work(&vi->config_work);
2727}
2728
2729static void virtnet_free_queues(struct virtnet_info *vi)
2730{
2731 int i;
2732
2733 for (i = 0; i < vi->max_queue_pairs; i++) {
2734 __netif_napi_del(&vi->rq[i].napi);
2735 __netif_napi_del(&vi->sq[i].napi);
2736 }
2737
2738 /* We called __netif_napi_del(),
2739 * we need to respect an RCU grace period before freeing vi->rq
2740 */
2741 synchronize_net();
2742
2743 kfree(vi->rq);
2744 kfree(vi->sq);
2745 kfree(vi->ctrl);
2746}
2747
2748static void _free_receive_bufs(struct virtnet_info *vi)
2749{
2750 struct bpf_prog *old_prog;
2751 int i;
2752
2753 for (i = 0; i < vi->max_queue_pairs; i++) {
2754 while (vi->rq[i].pages)
2755 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
2756
2757 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2758 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2759 if (old_prog)
2760 bpf_prog_put(old_prog);
2761 }
2762}
2763
2764static void free_receive_bufs(struct virtnet_info *vi)
2765{
2766 rtnl_lock();
2767 _free_receive_bufs(vi);
2768 rtnl_unlock();
2769}
2770
2771static void free_receive_page_frags(struct virtnet_info *vi)
2772{
2773 int i;
2774 for (i = 0; i < vi->max_queue_pairs; i++)
2775 if (vi->rq[i].alloc_frag.page)
2776 put_page(vi->rq[i].alloc_frag.page);
2777}
2778
2779static void free_unused_bufs(struct virtnet_info *vi)
2780{
2781 void *buf;
2782 int i;
2783
2784 for (i = 0; i < vi->max_queue_pairs; i++) {
2785 struct virtqueue *vq = vi->sq[i].vq;
2786 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2787 if (!is_xdp_frame(buf))
2788 dev_kfree_skb(buf);
2789 else
2790 xdp_return_frame(ptr_to_xdp(buf));
2791 }
2792 }
2793
2794 for (i = 0; i < vi->max_queue_pairs; i++) {
2795 struct virtqueue *vq = vi->rq[i].vq;
2796
2797 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2798 if (vi->mergeable_rx_bufs) {
2799 put_page(virt_to_head_page(buf));
2800 } else if (vi->big_packets) {
2801 give_pages(&vi->rq[i], buf);
2802 } else {
2803 put_page(virt_to_head_page(buf));
2804 }
2805 }
2806 }
2807}
2808
2809static void virtnet_del_vqs(struct virtnet_info *vi)
2810{
2811 struct virtio_device *vdev = vi->vdev;
2812
2813 virtnet_clean_affinity(vi);
2814
2815 vdev->config->del_vqs(vdev);
2816
2817 virtnet_free_queues(vi);
2818}
2819
2820/* How large should a single buffer be so a queue full of these can fit at
2821 * least one full packet?
2822 * Logic below assumes the mergeable buffer header is used.
2823 */
2824static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2825{
2826 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2827 unsigned int rq_size = virtqueue_get_vring_size(vq);
2828 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2829 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2830 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2831
2832 return max(max(min_buf_len, hdr_len) - hdr_len,
2833 (unsigned int)GOOD_PACKET_LEN);
2834}
2835
2836static int virtnet_find_vqs(struct virtnet_info *vi)
2837{
2838 vq_callback_t **callbacks;
2839 struct virtqueue **vqs;
2840 int ret = -ENOMEM;
2841 int i, total_vqs;
2842 const char **names;
2843 bool *ctx;
2844
2845 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
2846 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
2847 * possible control vq.
2848 */
2849 total_vqs = vi->max_queue_pairs * 2 +
2850 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2851
2852 /* Allocate space for find_vqs parameters */
2853 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
2854 if (!vqs)
2855 goto err_vq;
2856 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
2857 if (!callbacks)
2858 goto err_callback;
2859 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
2860 if (!names)
2861 goto err_names;
2862 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2863 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
2864 if (!ctx)
2865 goto err_ctx;
2866 } else {
2867 ctx = NULL;
2868 }
2869
2870 /* Parameters for control virtqueue, if any */
2871 if (vi->has_cvq) {
2872 callbacks[total_vqs - 1] = NULL;
2873 names[total_vqs - 1] = "control";
2874 }
2875
2876 /* Allocate/initialize parameters for send/receive virtqueues */
2877 for (i = 0; i < vi->max_queue_pairs; i++) {
2878 callbacks[rxq2vq(i)] = skb_recv_done;
2879 callbacks[txq2vq(i)] = skb_xmit_done;
2880 sprintf(vi->rq[i].name, "input.%d", i);
2881 sprintf(vi->sq[i].name, "output.%d", i);
2882 names[rxq2vq(i)] = vi->rq[i].name;
2883 names[txq2vq(i)] = vi->sq[i].name;
2884 if (ctx)
2885 ctx[rxq2vq(i)] = true;
2886 }
2887
2888 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
2889 names, ctx, NULL);
2890 if (ret)
2891 goto err_find;
2892
2893 if (vi->has_cvq) {
2894 vi->cvq = vqs[total_vqs - 1];
2895 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2896 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2897 }
2898
2899 for (i = 0; i < vi->max_queue_pairs; i++) {
2900 vi->rq[i].vq = vqs[rxq2vq(i)];
2901 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
2902 vi->sq[i].vq = vqs[txq2vq(i)];
2903 }
2904
2905 /* run here: ret == 0. */
2906
2907
2908err_find:
2909 kfree(ctx);
2910err_ctx:
2911 kfree(names);
2912err_names:
2913 kfree(callbacks);
2914err_callback:
2915 kfree(vqs);
2916err_vq:
2917 return ret;
2918}
2919
2920static int virtnet_alloc_queues(struct virtnet_info *vi)
2921{
2922 int i;
2923
2924 if (vi->has_cvq) {
2925 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2926 if (!vi->ctrl)
2927 goto err_ctrl;
2928 } else {
2929 vi->ctrl = NULL;
2930 }
2931 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
2932 if (!vi->sq)
2933 goto err_sq;
2934 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
2935 if (!vi->rq)
2936 goto err_rq;
2937
2938 INIT_DELAYED_WORK(&vi->refill, refill_work);
2939 for (i = 0; i < vi->max_queue_pairs; i++) {
2940 vi->rq[i].pages = NULL;
2941 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2942 napi_weight);
2943 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2944 napi_tx ? napi_weight : 0);
2945
2946 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2947 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2948 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2949
2950 u64_stats_init(&vi->rq[i].stats.syncp);
2951 u64_stats_init(&vi->sq[i].stats.syncp);
2952 }
2953
2954 return 0;
2955
2956err_rq:
2957 kfree(vi->sq);
2958err_sq:
2959 kfree(vi->ctrl);
2960err_ctrl:
2961 return -ENOMEM;
2962}
2963
2964static int init_vqs(struct virtnet_info *vi)
2965{
2966 int ret;
2967
2968 /* Allocate send & receive queues */
2969 ret = virtnet_alloc_queues(vi);
2970 if (ret)
2971 goto err;
2972
2973 ret = virtnet_find_vqs(vi);
2974 if (ret)
2975 goto err_free;
2976
2977 get_online_cpus();
2978 virtnet_set_affinity(vi);
2979 put_online_cpus();
2980
2981 return 0;
2982
2983err_free:
2984 virtnet_free_queues(vi);
2985err:
2986 return ret;
2987}
2988
2989#ifdef CONFIG_SYSFS
2990static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2991 char *buf)
2992{
2993 struct virtnet_info *vi = netdev_priv(queue->dev);
2994 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2995 unsigned int headroom = virtnet_get_headroom(vi);
2996 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2997 struct ewma_pkt_len *avg;
2998
2999 BUG_ON(queue_index >= vi->max_queue_pairs);
3000 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
3001 return sprintf(buf, "%u\n",
3002 get_mergeable_buf_len(&vi->rq[queue_index], avg,
3003 SKB_DATA_ALIGN(headroom + tailroom)));
3004}
3005
3006static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
3007 __ATTR_RO(mergeable_rx_buffer_size);
3008
3009static struct attribute *virtio_net_mrg_rx_attrs[] = {
3010 &mergeable_rx_buffer_size_attribute.attr,
3011 NULL
3012};
3013
3014static const struct attribute_group virtio_net_mrg_rx_group = {
3015 .name = "virtio_net",
3016 .attrs = virtio_net_mrg_rx_attrs
3017};
3018#endif
3019
3020static bool virtnet_fail_on_feature(struct virtio_device *vdev,
3021 unsigned int fbit,
3022 const char *fname, const char *dname)
3023{
3024 if (!virtio_has_feature(vdev, fbit))
3025 return false;
3026
3027 dev_err(&vdev->dev, "device advertises feature %s but not %s",
3028 fname, dname);
3029
3030 return true;
3031}
3032
3033#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
3034 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
3035
3036static bool virtnet_validate_features(struct virtio_device *vdev)
3037{
3038 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
3039 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
3040 "VIRTIO_NET_F_CTRL_VQ") ||
3041 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
3042 "VIRTIO_NET_F_CTRL_VQ") ||
3043 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
3044 "VIRTIO_NET_F_CTRL_VQ") ||
3045 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
3046 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
3047 "VIRTIO_NET_F_CTRL_VQ"))) {
3048 return false;
3049 }
3050
3051 return true;
3052}
3053
3054#define MIN_MTU ETH_MIN_MTU
3055#define MAX_MTU ETH_MAX_MTU
3056
3057static int virtnet_validate(struct virtio_device *vdev)
3058{
3059 if (!vdev->config->get) {
3060 dev_err(&vdev->dev, "%s failure: config access disabled\n",
3061 __func__);
3062 return -EINVAL;
3063 }
3064
3065 if (!virtnet_validate_features(vdev))
3066 return -EINVAL;
3067
3068 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3069 int mtu = virtio_cread16(vdev,
3070 offsetof(struct virtio_net_config,
3071 mtu));
3072 if (mtu < MIN_MTU)
3073 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
3074 }
3075
3076 return 0;
3077}
3078
3079static int virtnet_probe(struct virtio_device *vdev)
3080{
3081 int i, err = -ENOMEM;
3082 struct net_device *dev;
3083 struct virtnet_info *vi;
3084 u16 max_queue_pairs;
3085 int mtu;
3086
3087 /* Find if host supports multiqueue virtio_net device */
3088 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
3089 struct virtio_net_config,
3090 max_virtqueue_pairs, &max_queue_pairs);
3091
3092 /* We need at least 2 queue's */
3093 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
3094 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
3095 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3096 max_queue_pairs = 1;
3097
3098 /* Allocate ourselves a network device with room for our info */
3099 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
3100 if (!dev)
3101 return -ENOMEM;
3102
3103 /* Set up network device as normal. */
3104 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
3105 IFF_TX_SKB_NO_LINEAR;
3106 dev->netdev_ops = &virtnet_netdev;
3107 dev->features = NETIF_F_HIGHDMA;
3108
3109 dev->ethtool_ops = &virtnet_ethtool_ops;
3110 SET_NETDEV_DEV(dev, &vdev->dev);
3111
3112 /* Do we support "hardware" checksums? */
3113 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
3114 /* This opens up the world of extra features. */
3115 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3116 if (csum)
3117 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
3118
3119 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
3120 dev->hw_features |= NETIF_F_TSO
3121 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
3122 }
3123 /* Individual feature bits: what can host handle? */
3124 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
3125 dev->hw_features |= NETIF_F_TSO;
3126 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
3127 dev->hw_features |= NETIF_F_TSO6;
3128 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
3129 dev->hw_features |= NETIF_F_TSO_ECN;
3130
3131 dev->features |= NETIF_F_GSO_ROBUST;
3132
3133 if (gso)
3134 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
3135 /* (!csum && gso) case will be fixed by register_netdev() */
3136 }
3137 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
3138 dev->features |= NETIF_F_RXCSUM;
3139 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3140 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
3141 dev->features |= NETIF_F_GRO_HW;
3142 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
3143 dev->hw_features |= NETIF_F_GRO_HW;
3144
3145 dev->vlan_features = dev->features;
3146
3147 /* MTU range: 68 - 65535 */
3148 dev->min_mtu = MIN_MTU;
3149 dev->max_mtu = MAX_MTU;
3150
3151 /* Configuration may specify what MAC to use. Otherwise random. */
3152 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
3153 virtio_cread_bytes(vdev,
3154 offsetof(struct virtio_net_config, mac),
3155 dev->dev_addr, dev->addr_len);
3156 else
3157 eth_hw_addr_random(dev);
3158
3159 /* Set up our device-specific information */
3160 vi = netdev_priv(dev);
3161 vi->dev = dev;
3162 vi->vdev = vdev;
3163 vdev->priv = vi;
3164
3165 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
3166
3167 /* If we can receive ANY GSO packets, we must allocate large ones. */
3168 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3169 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3170 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
3171 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
3172 vi->big_packets = true;
3173
3174 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
3175 vi->mergeable_rx_bufs = true;
3176
3177 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
3178 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3179 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3180 else
3181 vi->hdr_len = sizeof(struct virtio_net_hdr);
3182
3183 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
3184 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
3185 vi->any_header_sg = true;
3186
3187 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
3188 vi->has_cvq = true;
3189
3190 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
3191 mtu = virtio_cread16(vdev,
3192 offsetof(struct virtio_net_config,
3193 mtu));
3194 if (mtu < dev->min_mtu) {
3195 /* Should never trigger: MTU was previously validated
3196 * in virtnet_validate.
3197 */
3198 dev_err(&vdev->dev,
3199 "device MTU appears to have changed it is now %d < %d",
3200 mtu, dev->min_mtu);
3201 err = -EINVAL;
3202 goto free;
3203 }
3204
3205 dev->mtu = mtu;
3206 dev->max_mtu = mtu;
3207
3208 /* TODO: size buffers correctly in this case. */
3209 if (dev->mtu > ETH_DATA_LEN)
3210 vi->big_packets = true;
3211 }
3212
3213 if (vi->any_header_sg)
3214 dev->needed_headroom = vi->hdr_len;
3215
3216 /* Enable multiqueue by default */
3217 if (num_online_cpus() >= max_queue_pairs)
3218 vi->curr_queue_pairs = max_queue_pairs;
3219 else
3220 vi->curr_queue_pairs = num_online_cpus();
3221 vi->max_queue_pairs = max_queue_pairs;
3222
3223 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
3224 err = init_vqs(vi);
3225 if (err)
3226 goto free;
3227
3228#ifdef CONFIG_SYSFS
3229 if (vi->mergeable_rx_bufs)
3230 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
3231#endif
3232 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
3233 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
3234
3235 virtnet_init_settings(dev);
3236
3237 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3238 vi->failover = net_failover_create(vi->dev);
3239 if (IS_ERR(vi->failover)) {
3240 err = PTR_ERR(vi->failover);
3241 goto free_vqs;
3242 }
3243 }
3244
3245 err = register_netdev(dev);
3246 if (err) {
3247 pr_debug("virtio_net: registering device failed\n");
3248 goto free_failover;
3249 }
3250
3251 virtio_device_ready(vdev);
3252
3253 err = virtnet_cpu_notif_add(vi);
3254 if (err) {
3255 pr_debug("virtio_net: registering cpu notifier failed\n");
3256 goto free_unregister_netdev;
3257 }
3258
3259 virtnet_set_queues(vi, vi->curr_queue_pairs);
3260
3261 /* Assume link up if device can't report link status,
3262 otherwise get link status from config. */
3263 netif_carrier_off(dev);
3264 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3265 schedule_work(&vi->config_work);
3266 } else {
3267 vi->status = VIRTIO_NET_S_LINK_UP;
3268 virtnet_update_settings(vi);
3269 netif_carrier_on(dev);
3270 }
3271
3272 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
3273 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3274 set_bit(guest_offloads[i], &vi->guest_offloads);
3275 vi->guest_offloads_capable = vi->guest_offloads;
3276
3277 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
3278 dev->name, max_queue_pairs);
3279
3280 return 0;
3281
3282free_unregister_netdev:
3283 vi->vdev->config->reset(vdev);
3284
3285 unregister_netdev(dev);
3286free_failover:
3287 net_failover_destroy(vi->failover);
3288free_vqs:
3289 cancel_delayed_work_sync(&vi->refill);
3290 free_receive_page_frags(vi);
3291 virtnet_del_vqs(vi);
3292free:
3293 free_netdev(dev);
3294 return err;
3295}
3296
3297static void remove_vq_common(struct virtnet_info *vi)
3298{
3299 vi->vdev->config->reset(vi->vdev);
3300
3301 /* Free unused buffers in both send and recv, if any. */
3302 free_unused_bufs(vi);
3303
3304 free_receive_bufs(vi);
3305
3306 free_receive_page_frags(vi);
3307
3308 virtnet_del_vqs(vi);
3309}
3310
3311static void virtnet_remove(struct virtio_device *vdev)
3312{
3313 struct virtnet_info *vi = vdev->priv;
3314
3315 virtnet_cpu_notif_remove(vi);
3316
3317 /* Make sure no work handler is accessing the device. */
3318 flush_work(&vi->config_work);
3319
3320 unregister_netdev(vi->dev);
3321
3322 net_failover_destroy(vi->failover);
3323
3324 remove_vq_common(vi);
3325
3326 free_netdev(vi->dev);
3327}
3328
3329static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
3330{
3331 struct virtnet_info *vi = vdev->priv;
3332
3333 virtnet_cpu_notif_remove(vi);
3334 virtnet_freeze_down(vdev);
3335 remove_vq_common(vi);
3336
3337 return 0;
3338}
3339
3340static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3341{
3342 struct virtnet_info *vi = vdev->priv;
3343 int err;
3344
3345 err = virtnet_restore_up(vdev);
3346 if (err)
3347 return err;
3348 virtnet_set_queues(vi, vi->curr_queue_pairs);
3349
3350 err = virtnet_cpu_notif_add(vi);
3351 if (err) {
3352 virtnet_freeze_down(vdev);
3353 remove_vq_common(vi);
3354 return err;
3355 }
3356
3357 return 0;
3358}
3359
3360static struct virtio_device_id id_table[] = {
3361 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
3362 { 0 },
3363};
3364
3365#define VIRTNET_FEATURES \
3366 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
3367 VIRTIO_NET_F_MAC, \
3368 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
3369 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
3370 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
3371 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
3372 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
3373 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
3374 VIRTIO_NET_F_CTRL_MAC_ADDR, \
3375 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3376 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
3377
3378static unsigned int features[] = {
3379 VIRTNET_FEATURES,
3380};
3381
3382static unsigned int features_legacy[] = {
3383 VIRTNET_FEATURES,
3384 VIRTIO_NET_F_GSO,
3385 VIRTIO_F_ANY_LAYOUT,
3386};
3387
3388static struct virtio_driver virtio_net_driver = {
3389 .feature_table = features,
3390 .feature_table_size = ARRAY_SIZE(features),
3391 .feature_table_legacy = features_legacy,
3392 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
3393 .driver.name = KBUILD_MODNAME,
3394 .driver.owner = THIS_MODULE,
3395 .id_table = id_table,
3396 .validate = virtnet_validate,
3397 .probe = virtnet_probe,
3398 .remove = virtnet_remove,
3399 .config_changed = virtnet_config_changed,
3400#ifdef CONFIG_PM_SLEEP
3401 .freeze = virtnet_freeze,
3402 .restore = virtnet_restore,
3403#endif
3404};
3405
3406static __init int virtio_net_driver_init(void)
3407{
3408 int ret;
3409
3410 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3411 virtnet_cpu_online,
3412 virtnet_cpu_down_prep);
3413 if (ret < 0)
3414 goto out;
3415 virtionet_online = ret;
3416 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3417 NULL, virtnet_cpu_dead);
3418 if (ret)
3419 goto err_dead;
3420
3421 ret = register_virtio_driver(&virtio_net_driver);
3422 if (ret)
3423 goto err_virtio;
3424 return 0;
3425err_virtio:
3426 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3427err_dead:
3428 cpuhp_remove_multi_state(virtionet_online);
3429out:
3430 return ret;
3431}
3432module_init(virtio_net_driver_init);
3433
3434static __exit void virtio_net_driver_exit(void)
3435{
3436 unregister_virtio_driver(&virtio_net_driver);
3437 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3438 cpuhp_remove_multi_state(virtionet_online);
3439}
3440module_exit(virtio_net_driver_exit);
3441
3442MODULE_DEVICE_TABLE(virtio, id_table);
3443MODULE_DESCRIPTION("Virtio network driver");
3444MODULE_LICENSE("GPL");