Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2009 Red Hat, Inc.
3 * Author: Michael S. Tsirkin <mst@redhat.com>
4 *
5 * virtio-net server in host kernel.
6 */
7
8#include <linux/compat.h>
9#include <linux/eventfd.h>
10#include <linux/vhost.h>
11#include <linux/virtio_net.h>
12#include <linux/miscdevice.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/mutex.h>
16#include <linux/workqueue.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19#include <linux/sched/clock.h>
20#include <linux/sched/signal.h>
21#include <linux/vmalloc.h>
22
23#include <linux/net.h>
24#include <linux/if_packet.h>
25#include <linux/if_arp.h>
26#include <linux/if_tun.h>
27#include <linux/if_macvlan.h>
28#include <linux/if_tap.h>
29#include <linux/if_vlan.h>
30#include <linux/skb_array.h>
31#include <linux/skbuff.h>
32
33#include <net/sock.h>
34#include <net/xdp.h>
35
36#include "vhost.h"
37
38static int experimental_zcopytx = 0;
39module_param(experimental_zcopytx, int, 0444);
40MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
41 " 1 -Enable; 0 - Disable");
42
43/* Max number of bytes transferred before requeueing the job.
44 * Using this limit prevents one virtqueue from starving others. */
45#define VHOST_NET_WEIGHT 0x80000
46
47/* Max number of packets transferred before requeueing the job.
48 * Using this limit prevents one virtqueue from starving others with small
49 * pkts.
50 */
51#define VHOST_NET_PKT_WEIGHT 256
52
53/* MAX number of TX used buffers for outstanding zerocopy */
54#define VHOST_MAX_PEND 128
55#define VHOST_GOODCOPY_LEN 256
56
57/*
58 * For transmit, used buffer len is unused; we override it to track buffer
59 * status internally; used for zerocopy tx only.
60 */
61/* Lower device DMA failed */
62#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
63/* Lower device DMA done */
64#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
65/* Lower device DMA in progress */
66#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
67/* Buffer unused */
68#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
69
70#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
71
72enum {
73 VHOST_NET_FEATURES = VHOST_FEATURES |
74 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
75 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
76 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
77 (1ULL << VIRTIO_F_RING_RESET)
78};
79
80enum {
81 VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
82};
83
84enum {
85 VHOST_NET_VQ_RX = 0,
86 VHOST_NET_VQ_TX = 1,
87 VHOST_NET_VQ_MAX = 2,
88};
89
90struct vhost_net_ubuf_ref {
91 /* refcount follows semantics similar to kref:
92 * 0: object is released
93 * 1: no outstanding ubufs
94 * >1: outstanding ubufs
95 */
96 atomic_t refcount;
97 wait_queue_head_t wait;
98 struct vhost_virtqueue *vq;
99};
100
101#define VHOST_NET_BATCH 64
102struct vhost_net_buf {
103 void **queue;
104 int tail;
105 int head;
106};
107
108struct vhost_net_virtqueue {
109 struct vhost_virtqueue vq;
110 size_t vhost_hlen;
111 size_t sock_hlen;
112 /* vhost zerocopy support fields below: */
113 /* last used idx for outstanding DMA zerocopy buffers */
114 int upend_idx;
115 /* For TX, first used idx for DMA done zerocopy buffers
116 * For RX, number of batched heads
117 */
118 int done_idx;
119 /* Number of XDP frames batched */
120 int batched_xdp;
121 /* an array of userspace buffers info */
122 struct ubuf_info_msgzc *ubuf_info;
123 /* Reference counting for outstanding ubufs.
124 * Protected by vq mutex. Writers must also take device mutex. */
125 struct vhost_net_ubuf_ref *ubufs;
126 struct ptr_ring *rx_ring;
127 struct vhost_net_buf rxq;
128 /* Batched XDP buffs */
129 struct xdp_buff *xdp;
130};
131
132struct vhost_net {
133 struct vhost_dev dev;
134 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
135 struct vhost_poll poll[VHOST_NET_VQ_MAX];
136 /* Number of TX recently submitted.
137 * Protected by tx vq lock. */
138 unsigned tx_packets;
139 /* Number of times zerocopy TX recently failed.
140 * Protected by tx vq lock. */
141 unsigned tx_zcopy_err;
142 /* Flush in progress. Protected by tx vq lock. */
143 bool tx_flush;
144 /* Private page frag cache */
145 struct page_frag_cache pf_cache;
146};
147
148static unsigned vhost_net_zcopy_mask __read_mostly;
149
150static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
151{
152 if (rxq->tail != rxq->head)
153 return rxq->queue[rxq->head];
154 else
155 return NULL;
156}
157
158static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
159{
160 return rxq->tail - rxq->head;
161}
162
163static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
164{
165 return rxq->tail == rxq->head;
166}
167
168static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
169{
170 void *ret = vhost_net_buf_get_ptr(rxq);
171 ++rxq->head;
172 return ret;
173}
174
175static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
176{
177 struct vhost_net_buf *rxq = &nvq->rxq;
178
179 rxq->head = 0;
180 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
181 VHOST_NET_BATCH);
182 return rxq->tail;
183}
184
185static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
186{
187 struct vhost_net_buf *rxq = &nvq->rxq;
188
189 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
190 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
191 vhost_net_buf_get_size(rxq),
192 tun_ptr_free);
193 rxq->head = rxq->tail = 0;
194 }
195}
196
197static int vhost_net_buf_peek_len(void *ptr)
198{
199 if (tun_is_xdp_frame(ptr)) {
200 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
201
202 return xdpf->len;
203 }
204
205 return __skb_array_len_with_tag(ptr);
206}
207
208static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
209{
210 struct vhost_net_buf *rxq = &nvq->rxq;
211
212 if (!vhost_net_buf_is_empty(rxq))
213 goto out;
214
215 if (!vhost_net_buf_produce(nvq))
216 return 0;
217
218out:
219 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
220}
221
222static void vhost_net_buf_init(struct vhost_net_buf *rxq)
223{
224 rxq->head = rxq->tail = 0;
225}
226
227static void vhost_net_enable_zcopy(int vq)
228{
229 vhost_net_zcopy_mask |= 0x1 << vq;
230}
231
232static struct vhost_net_ubuf_ref *
233vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
234{
235 struct vhost_net_ubuf_ref *ubufs;
236 /* No zero copy backend? Nothing to count. */
237 if (!zcopy)
238 return NULL;
239 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
240 if (!ubufs)
241 return ERR_PTR(-ENOMEM);
242 atomic_set(&ubufs->refcount, 1);
243 init_waitqueue_head(&ubufs->wait);
244 ubufs->vq = vq;
245 return ubufs;
246}
247
248static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
249{
250 int r = atomic_sub_return(1, &ubufs->refcount);
251 if (unlikely(!r))
252 wake_up(&ubufs->wait);
253 return r;
254}
255
256static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
257{
258 vhost_net_ubuf_put(ubufs);
259 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
260}
261
262static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
263{
264 vhost_net_ubuf_put_and_wait(ubufs);
265 kfree(ubufs);
266}
267
268static void vhost_net_clear_ubuf_info(struct vhost_net *n)
269{
270 int i;
271
272 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
273 kfree(n->vqs[i].ubuf_info);
274 n->vqs[i].ubuf_info = NULL;
275 }
276}
277
278static int vhost_net_set_ubuf_info(struct vhost_net *n)
279{
280 bool zcopy;
281 int i;
282
283 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
284 zcopy = vhost_net_zcopy_mask & (0x1 << i);
285 if (!zcopy)
286 continue;
287 n->vqs[i].ubuf_info =
288 kmalloc_array(UIO_MAXIOV,
289 sizeof(*n->vqs[i].ubuf_info),
290 GFP_KERNEL);
291 if (!n->vqs[i].ubuf_info)
292 goto err;
293 }
294 return 0;
295
296err:
297 vhost_net_clear_ubuf_info(n);
298 return -ENOMEM;
299}
300
301static void vhost_net_vq_reset(struct vhost_net *n)
302{
303 int i;
304
305 vhost_net_clear_ubuf_info(n);
306
307 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
308 n->vqs[i].done_idx = 0;
309 n->vqs[i].upend_idx = 0;
310 n->vqs[i].ubufs = NULL;
311 n->vqs[i].vhost_hlen = 0;
312 n->vqs[i].sock_hlen = 0;
313 vhost_net_buf_init(&n->vqs[i].rxq);
314 }
315
316}
317
318static void vhost_net_tx_packet(struct vhost_net *net)
319{
320 ++net->tx_packets;
321 if (net->tx_packets < 1024)
322 return;
323 net->tx_packets = 0;
324 net->tx_zcopy_err = 0;
325}
326
327static void vhost_net_tx_err(struct vhost_net *net)
328{
329 ++net->tx_zcopy_err;
330}
331
332static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
333{
334 /* TX flush waits for outstanding DMAs to be done.
335 * Don't start new DMAs.
336 */
337 return !net->tx_flush &&
338 net->tx_packets / 64 >= net->tx_zcopy_err;
339}
340
341static bool vhost_sock_zcopy(struct socket *sock)
342{
343 return unlikely(experimental_zcopytx) &&
344 sock_flag(sock->sk, SOCK_ZEROCOPY);
345}
346
347static bool vhost_sock_xdp(struct socket *sock)
348{
349 return sock_flag(sock->sk, SOCK_XDP);
350}
351
352/* In case of DMA done not in order in lower device driver for some reason.
353 * upend_idx is used to track end of used idx, done_idx is used to track head
354 * of used idx. Once lower device DMA done contiguously, we will signal KVM
355 * guest used idx.
356 */
357static void vhost_zerocopy_signal_used(struct vhost_net *net,
358 struct vhost_virtqueue *vq)
359{
360 struct vhost_net_virtqueue *nvq =
361 container_of(vq, struct vhost_net_virtqueue, vq);
362 int i, add;
363 int j = 0;
364
365 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
366 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
367 vhost_net_tx_err(net);
368 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
369 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
370 ++j;
371 } else
372 break;
373 }
374 while (j) {
375 add = min(UIO_MAXIOV - nvq->done_idx, j);
376 vhost_add_used_and_signal_n(vq->dev, vq,
377 &vq->heads[nvq->done_idx], add);
378 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
379 j -= add;
380 }
381}
382
383static void vhost_zerocopy_complete(struct sk_buff *skb,
384 struct ubuf_info *ubuf_base, bool success)
385{
386 struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
387 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
388 struct vhost_virtqueue *vq = ubufs->vq;
389 int cnt;
390
391 rcu_read_lock_bh();
392
393 /* set len to mark this desc buffers done DMA */
394 vq->heads[ubuf->desc].len = success ?
395 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
396 cnt = vhost_net_ubuf_put(ubufs);
397
398 /*
399 * Trigger polling thread if guest stopped submitting new buffers:
400 * in this case, the refcount after decrement will eventually reach 1.
401 * We also trigger polling periodically after each 16 packets
402 * (the value 16 here is more or less arbitrary, it's tuned to trigger
403 * less than 10% of times).
404 */
405 if (cnt <= 1 || !(cnt % 16))
406 vhost_poll_queue(&vq->poll);
407
408 rcu_read_unlock_bh();
409}
410
411static const struct ubuf_info_ops vhost_ubuf_ops = {
412 .complete = vhost_zerocopy_complete,
413};
414
415static inline unsigned long busy_clock(void)
416{
417 return local_clock() >> 10;
418}
419
420static bool vhost_can_busy_poll(unsigned long endtime)
421{
422 return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
423 !signal_pending(current));
424}
425
426static void vhost_net_disable_vq(struct vhost_net *n,
427 struct vhost_virtqueue *vq)
428{
429 struct vhost_net_virtqueue *nvq =
430 container_of(vq, struct vhost_net_virtqueue, vq);
431 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
432 if (!vhost_vq_get_backend(vq))
433 return;
434 vhost_poll_stop(poll);
435}
436
437static int vhost_net_enable_vq(struct vhost_net *n,
438 struct vhost_virtqueue *vq)
439{
440 struct vhost_net_virtqueue *nvq =
441 container_of(vq, struct vhost_net_virtqueue, vq);
442 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
443 struct socket *sock;
444
445 sock = vhost_vq_get_backend(vq);
446 if (!sock)
447 return 0;
448
449 return vhost_poll_start(poll, sock->file);
450}
451
452static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
453{
454 struct vhost_virtqueue *vq = &nvq->vq;
455 struct vhost_dev *dev = vq->dev;
456
457 if (!nvq->done_idx)
458 return;
459
460 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
461 nvq->done_idx = 0;
462}
463
464static void vhost_tx_batch(struct vhost_net *net,
465 struct vhost_net_virtqueue *nvq,
466 struct socket *sock,
467 struct msghdr *msghdr)
468{
469 struct tun_msg_ctl ctl = {
470 .type = TUN_MSG_PTR,
471 .num = nvq->batched_xdp,
472 .ptr = nvq->xdp,
473 };
474 int i, err;
475
476 if (nvq->batched_xdp == 0)
477 goto signal_used;
478
479 msghdr->msg_control = &ctl;
480 msghdr->msg_controllen = sizeof(ctl);
481 err = sock->ops->sendmsg(sock, msghdr, 0);
482 if (unlikely(err < 0)) {
483 vq_err(&nvq->vq, "Fail to batch sending packets\n");
484
485 /* free pages owned by XDP; since this is an unlikely error path,
486 * keep it simple and avoid more complex bulk update for the
487 * used pages
488 */
489 for (i = 0; i < nvq->batched_xdp; ++i)
490 put_page(virt_to_head_page(nvq->xdp[i].data));
491 nvq->batched_xdp = 0;
492 nvq->done_idx = 0;
493 return;
494 }
495
496signal_used:
497 vhost_net_signal_used(nvq);
498 nvq->batched_xdp = 0;
499}
500
501static int sock_has_rx_data(struct socket *sock)
502{
503 if (unlikely(!sock))
504 return 0;
505
506 if (sock->ops->peek_len)
507 return sock->ops->peek_len(sock);
508
509 return skb_queue_empty(&sock->sk->sk_receive_queue);
510}
511
512static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
513 struct vhost_virtqueue *vq)
514{
515 if (!vhost_vq_avail_empty(&net->dev, vq)) {
516 vhost_poll_queue(&vq->poll);
517 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
518 vhost_disable_notify(&net->dev, vq);
519 vhost_poll_queue(&vq->poll);
520 }
521}
522
523static void vhost_net_busy_poll(struct vhost_net *net,
524 struct vhost_virtqueue *rvq,
525 struct vhost_virtqueue *tvq,
526 bool *busyloop_intr,
527 bool poll_rx)
528{
529 unsigned long busyloop_timeout;
530 unsigned long endtime;
531 struct socket *sock;
532 struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
533
534 /* Try to hold the vq mutex of the paired virtqueue. We can't
535 * use mutex_lock() here since we could not guarantee a
536 * consistenet lock ordering.
537 */
538 if (!mutex_trylock(&vq->mutex))
539 return;
540
541 vhost_disable_notify(&net->dev, vq);
542 sock = vhost_vq_get_backend(rvq);
543
544 busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
545 tvq->busyloop_timeout;
546
547 preempt_disable();
548 endtime = busy_clock() + busyloop_timeout;
549
550 while (vhost_can_busy_poll(endtime)) {
551 if (vhost_vq_has_work(vq)) {
552 *busyloop_intr = true;
553 break;
554 }
555
556 if ((sock_has_rx_data(sock) &&
557 !vhost_vq_avail_empty(&net->dev, rvq)) ||
558 !vhost_vq_avail_empty(&net->dev, tvq))
559 break;
560
561 cpu_relax();
562 }
563
564 preempt_enable();
565
566 if (poll_rx || sock_has_rx_data(sock))
567 vhost_net_busy_poll_try_queue(net, vq);
568 else if (!poll_rx) /* On tx here, sock has no rx data. */
569 vhost_enable_notify(&net->dev, rvq);
570
571 mutex_unlock(&vq->mutex);
572}
573
574static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
575 struct vhost_net_virtqueue *tnvq,
576 unsigned int *out_num, unsigned int *in_num,
577 struct msghdr *msghdr, bool *busyloop_intr)
578{
579 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
580 struct vhost_virtqueue *rvq = &rnvq->vq;
581 struct vhost_virtqueue *tvq = &tnvq->vq;
582
583 int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
584 out_num, in_num, NULL, NULL);
585
586 if (r == tvq->num && tvq->busyloop_timeout) {
587 /* Flush batched packets first */
588 if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
589 vhost_tx_batch(net, tnvq,
590 vhost_vq_get_backend(tvq),
591 msghdr);
592
593 vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
594
595 r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
596 out_num, in_num, NULL, NULL);
597 }
598
599 return r;
600}
601
602static bool vhost_exceeds_maxpend(struct vhost_net *net)
603{
604 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
605 struct vhost_virtqueue *vq = &nvq->vq;
606
607 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
608 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
609}
610
611static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
612 size_t hdr_size, int out)
613{
614 /* Skip header. TODO: support TSO. */
615 size_t len = iov_length(vq->iov, out);
616
617 iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
618 iov_iter_advance(iter, hdr_size);
619
620 return iov_iter_count(iter);
621}
622
623static int get_tx_bufs(struct vhost_net *net,
624 struct vhost_net_virtqueue *nvq,
625 struct msghdr *msg,
626 unsigned int *out, unsigned int *in,
627 size_t *len, bool *busyloop_intr)
628{
629 struct vhost_virtqueue *vq = &nvq->vq;
630 int ret;
631
632 ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
633
634 if (ret < 0 || ret == vq->num)
635 return ret;
636
637 if (*in) {
638 vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
639 *out, *in);
640 return -EFAULT;
641 }
642
643 /* Sanity check */
644 *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
645 if (*len == 0) {
646 vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
647 *len, nvq->vhost_hlen);
648 return -EFAULT;
649 }
650
651 return ret;
652}
653
654static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
655{
656 return total_len < VHOST_NET_WEIGHT &&
657 !vhost_vq_avail_empty(vq->dev, vq);
658}
659
660#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
661
662static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
663 struct iov_iter *from)
664{
665 struct vhost_virtqueue *vq = &nvq->vq;
666 struct vhost_net *net = container_of(vq->dev, struct vhost_net,
667 dev);
668 struct socket *sock = vhost_vq_get_backend(vq);
669 struct virtio_net_hdr *gso;
670 struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
671 struct tun_xdp_hdr *hdr;
672 size_t len = iov_iter_count(from);
673 int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
674 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
675 int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
676 int sock_hlen = nvq->sock_hlen;
677 void *buf;
678 int copied;
679 int ret;
680
681 if (unlikely(len < nvq->sock_hlen))
682 return -EFAULT;
683
684 if (SKB_DATA_ALIGN(len + pad) +
685 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
686 return -ENOSPC;
687
688 buflen += SKB_DATA_ALIGN(len + pad);
689 buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
690 SMP_CACHE_BYTES);
691 if (unlikely(!buf))
692 return -ENOMEM;
693
694 copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
695 sock_hlen, from);
696 if (copied != sock_hlen) {
697 ret = -EFAULT;
698 goto err;
699 }
700
701 hdr = buf;
702 gso = &hdr->gso;
703
704 if (!sock_hlen)
705 memset(buf, 0, pad);
706
707 if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
708 vhost16_to_cpu(vq, gso->csum_start) +
709 vhost16_to_cpu(vq, gso->csum_offset) + 2 >
710 vhost16_to_cpu(vq, gso->hdr_len)) {
711 gso->hdr_len = cpu_to_vhost16(vq,
712 vhost16_to_cpu(vq, gso->csum_start) +
713 vhost16_to_cpu(vq, gso->csum_offset) + 2);
714
715 if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
716 ret = -EINVAL;
717 goto err;
718 }
719 }
720
721 len -= sock_hlen;
722 copied = copy_from_iter(buf + pad, len, from);
723 if (copied != len) {
724 ret = -EFAULT;
725 goto err;
726 }
727
728 xdp_init_buff(xdp, buflen, NULL);
729 xdp_prepare_buff(xdp, buf, pad, len, true);
730 hdr->buflen = buflen;
731
732 ++nvq->batched_xdp;
733
734 return 0;
735
736err:
737 page_frag_free(buf);
738 return ret;
739}
740
741static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
742{
743 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
744 struct vhost_virtqueue *vq = &nvq->vq;
745 unsigned out, in;
746 int head;
747 struct msghdr msg = {
748 .msg_name = NULL,
749 .msg_namelen = 0,
750 .msg_control = NULL,
751 .msg_controllen = 0,
752 .msg_flags = MSG_DONTWAIT,
753 };
754 size_t len, total_len = 0;
755 int err;
756 int sent_pkts = 0;
757 bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
758
759 do {
760 bool busyloop_intr = false;
761
762 if (nvq->done_idx == VHOST_NET_BATCH)
763 vhost_tx_batch(net, nvq, sock, &msg);
764
765 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
766 &busyloop_intr);
767 /* On error, stop handling until the next kick. */
768 if (unlikely(head < 0))
769 break;
770 /* Nothing new? Wait for eventfd to tell us they refilled. */
771 if (head == vq->num) {
772 if (unlikely(busyloop_intr)) {
773 vhost_poll_queue(&vq->poll);
774 } else if (unlikely(vhost_enable_notify(&net->dev,
775 vq))) {
776 vhost_disable_notify(&net->dev, vq);
777 continue;
778 }
779 break;
780 }
781
782 total_len += len;
783
784 /* For simplicity, TX batching is only enabled if
785 * sndbuf is unlimited.
786 */
787 if (sock_can_batch) {
788 err = vhost_net_build_xdp(nvq, &msg.msg_iter);
789 if (!err) {
790 goto done;
791 } else if (unlikely(err != -ENOSPC)) {
792 vhost_tx_batch(net, nvq, sock, &msg);
793 vhost_discard_vq_desc(vq, 1);
794 vhost_net_enable_vq(net, vq);
795 break;
796 }
797
798 /* We can't build XDP buff, go for single
799 * packet path but let's flush batched
800 * packets.
801 */
802 vhost_tx_batch(net, nvq, sock, &msg);
803 msg.msg_control = NULL;
804 } else {
805 if (tx_can_batch(vq, total_len))
806 msg.msg_flags |= MSG_MORE;
807 else
808 msg.msg_flags &= ~MSG_MORE;
809 }
810
811 err = sock->ops->sendmsg(sock, &msg, len);
812 if (unlikely(err < 0)) {
813 if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
814 vhost_discard_vq_desc(vq, 1);
815 vhost_net_enable_vq(net, vq);
816 break;
817 }
818 pr_debug("Fail to send packet: err %d", err);
819 } else if (unlikely(err != len))
820 pr_debug("Truncated TX packet: len %d != %zd\n",
821 err, len);
822done:
823 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
824 vq->heads[nvq->done_idx].len = 0;
825 ++nvq->done_idx;
826 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
827
828 vhost_tx_batch(net, nvq, sock, &msg);
829}
830
831static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
832{
833 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
834 struct vhost_virtqueue *vq = &nvq->vq;
835 unsigned out, in;
836 int head;
837 struct msghdr msg = {
838 .msg_name = NULL,
839 .msg_namelen = 0,
840 .msg_control = NULL,
841 .msg_controllen = 0,
842 .msg_flags = MSG_DONTWAIT,
843 };
844 struct tun_msg_ctl ctl;
845 size_t len, total_len = 0;
846 int err;
847 struct vhost_net_ubuf_ref *ubufs;
848 struct ubuf_info_msgzc *ubuf;
849 bool zcopy_used;
850 int sent_pkts = 0;
851
852 do {
853 bool busyloop_intr;
854
855 /* Release DMAs done buffers first */
856 vhost_zerocopy_signal_used(net, vq);
857
858 busyloop_intr = false;
859 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
860 &busyloop_intr);
861 /* On error, stop handling until the next kick. */
862 if (unlikely(head < 0))
863 break;
864 /* Nothing new? Wait for eventfd to tell us they refilled. */
865 if (head == vq->num) {
866 if (unlikely(busyloop_intr)) {
867 vhost_poll_queue(&vq->poll);
868 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
869 vhost_disable_notify(&net->dev, vq);
870 continue;
871 }
872 break;
873 }
874
875 zcopy_used = len >= VHOST_GOODCOPY_LEN
876 && !vhost_exceeds_maxpend(net)
877 && vhost_net_tx_select_zcopy(net);
878
879 /* use msg_control to pass vhost zerocopy ubuf info to skb */
880 if (zcopy_used) {
881 ubuf = nvq->ubuf_info + nvq->upend_idx;
882 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
883 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
884 ubuf->ctx = nvq->ubufs;
885 ubuf->desc = nvq->upend_idx;
886 ubuf->ubuf.ops = &vhost_ubuf_ops;
887 ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
888 refcount_set(&ubuf->ubuf.refcnt, 1);
889 msg.msg_control = &ctl;
890 ctl.type = TUN_MSG_UBUF;
891 ctl.ptr = &ubuf->ubuf;
892 msg.msg_controllen = sizeof(ctl);
893 ubufs = nvq->ubufs;
894 atomic_inc(&ubufs->refcount);
895 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
896 } else {
897 msg.msg_control = NULL;
898 ubufs = NULL;
899 }
900 total_len += len;
901 if (tx_can_batch(vq, total_len) &&
902 likely(!vhost_exceeds_maxpend(net))) {
903 msg.msg_flags |= MSG_MORE;
904 } else {
905 msg.msg_flags &= ~MSG_MORE;
906 }
907
908 err = sock->ops->sendmsg(sock, &msg, len);
909 if (unlikely(err < 0)) {
910 bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
911
912 if (zcopy_used) {
913 if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
914 vhost_net_ubuf_put(ubufs);
915 if (retry)
916 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
917 % UIO_MAXIOV;
918 else
919 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
920 }
921 if (retry) {
922 vhost_discard_vq_desc(vq, 1);
923 vhost_net_enable_vq(net, vq);
924 break;
925 }
926 pr_debug("Fail to send packet: err %d", err);
927 } else if (unlikely(err != len))
928 pr_debug("Truncated TX packet: "
929 " len %d != %zd\n", err, len);
930 if (!zcopy_used)
931 vhost_add_used_and_signal(&net->dev, vq, head, 0);
932 else
933 vhost_zerocopy_signal_used(net, vq);
934 vhost_net_tx_packet(net);
935 } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
936}
937
938/* Expects to be always run from workqueue - which acts as
939 * read-size critical section for our kind of RCU. */
940static void handle_tx(struct vhost_net *net)
941{
942 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
943 struct vhost_virtqueue *vq = &nvq->vq;
944 struct socket *sock;
945
946 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
947 sock = vhost_vq_get_backend(vq);
948 if (!sock)
949 goto out;
950
951 if (!vq_meta_prefetch(vq))
952 goto out;
953
954 vhost_disable_notify(&net->dev, vq);
955 vhost_net_disable_vq(net, vq);
956
957 if (vhost_sock_zcopy(sock))
958 handle_tx_zerocopy(net, sock);
959 else
960 handle_tx_copy(net, sock);
961
962out:
963 mutex_unlock(&vq->mutex);
964}
965
966static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
967{
968 struct sk_buff *head;
969 int len = 0;
970 unsigned long flags;
971
972 if (rvq->rx_ring)
973 return vhost_net_buf_peek(rvq);
974
975 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
976 head = skb_peek(&sk->sk_receive_queue);
977 if (likely(head)) {
978 len = head->len;
979 if (skb_vlan_tag_present(head))
980 len += VLAN_HLEN;
981 }
982
983 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
984 return len;
985}
986
987static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
988 bool *busyloop_intr)
989{
990 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
991 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
992 struct vhost_virtqueue *rvq = &rnvq->vq;
993 struct vhost_virtqueue *tvq = &tnvq->vq;
994 int len = peek_head_len(rnvq, sk);
995
996 if (!len && rvq->busyloop_timeout) {
997 /* Flush batched heads first */
998 vhost_net_signal_used(rnvq);
999 /* Both tx vq and rx socket were polled here */
1000 vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
1001
1002 len = peek_head_len(rnvq, sk);
1003 }
1004
1005 return len;
1006}
1007
1008/* This is a multi-buffer version of vhost_get_desc, that works if
1009 * vq has read descriptors only.
1010 * @vq - the relevant virtqueue
1011 * @datalen - data length we'll be reading
1012 * @iovcount - returned count of io vectors we fill
1013 * @log - vhost log
1014 * @log_num - log offset
1015 * @quota - headcount quota, 1 for big buffer
1016 * returns number of buffer heads allocated, negative on error
1017 */
1018static int get_rx_bufs(struct vhost_virtqueue *vq,
1019 struct vring_used_elem *heads,
1020 int datalen,
1021 unsigned *iovcount,
1022 struct vhost_log *log,
1023 unsigned *log_num,
1024 unsigned int quota)
1025{
1026 unsigned int out, in;
1027 int seg = 0;
1028 int headcount = 0;
1029 unsigned d;
1030 int r, nlogs = 0;
1031 /* len is always initialized before use since we are always called with
1032 * datalen > 0.
1033 */
1034 u32 len;
1035
1036 while (datalen > 0 && headcount < quota) {
1037 if (unlikely(seg >= UIO_MAXIOV)) {
1038 r = -ENOBUFS;
1039 goto err;
1040 }
1041 r = vhost_get_vq_desc(vq, vq->iov + seg,
1042 ARRAY_SIZE(vq->iov) - seg, &out,
1043 &in, log, log_num);
1044 if (unlikely(r < 0))
1045 goto err;
1046
1047 d = r;
1048 if (d == vq->num) {
1049 r = 0;
1050 goto err;
1051 }
1052 if (unlikely(out || in <= 0)) {
1053 vq_err(vq, "unexpected descriptor format for RX: "
1054 "out %d, in %d\n", out, in);
1055 r = -EINVAL;
1056 goto err;
1057 }
1058 if (unlikely(log)) {
1059 nlogs += *log_num;
1060 log += *log_num;
1061 }
1062 heads[headcount].id = cpu_to_vhost32(vq, d);
1063 len = iov_length(vq->iov + seg, in);
1064 heads[headcount].len = cpu_to_vhost32(vq, len);
1065 datalen -= len;
1066 ++headcount;
1067 seg += in;
1068 }
1069 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
1070 *iovcount = seg;
1071 if (unlikely(log))
1072 *log_num = nlogs;
1073
1074 /* Detect overrun */
1075 if (unlikely(datalen > 0)) {
1076 r = UIO_MAXIOV + 1;
1077 goto err;
1078 }
1079 return headcount;
1080err:
1081 vhost_discard_vq_desc(vq, headcount);
1082 return r;
1083}
1084
1085/* Expects to be always run from workqueue - which acts as
1086 * read-size critical section for our kind of RCU. */
1087static void handle_rx(struct vhost_net *net)
1088{
1089 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
1090 struct vhost_virtqueue *vq = &nvq->vq;
1091 unsigned in, log;
1092 struct vhost_log *vq_log;
1093 struct msghdr msg = {
1094 .msg_name = NULL,
1095 .msg_namelen = 0,
1096 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
1097 .msg_controllen = 0,
1098 .msg_flags = MSG_DONTWAIT,
1099 };
1100 struct virtio_net_hdr hdr = {
1101 .flags = 0,
1102 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1103 };
1104 size_t total_len = 0;
1105 int err, mergeable;
1106 s16 headcount;
1107 size_t vhost_hlen, sock_hlen;
1108 size_t vhost_len, sock_len;
1109 bool busyloop_intr = false;
1110 struct socket *sock;
1111 struct iov_iter fixup;
1112 __virtio16 num_buffers;
1113 int recv_pkts = 0;
1114
1115 mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
1116 sock = vhost_vq_get_backend(vq);
1117 if (!sock)
1118 goto out;
1119
1120 if (!vq_meta_prefetch(vq))
1121 goto out;
1122
1123 vhost_disable_notify(&net->dev, vq);
1124 vhost_net_disable_vq(net, vq);
1125
1126 vhost_hlen = nvq->vhost_hlen;
1127 sock_hlen = nvq->sock_hlen;
1128
1129 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1130 vq->log : NULL;
1131 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1132
1133 do {
1134 sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1135 &busyloop_intr);
1136 if (!sock_len)
1137 break;
1138 sock_len += sock_hlen;
1139 vhost_len = sock_len + vhost_hlen;
1140 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
1141 vhost_len, &in, vq_log, &log,
1142 likely(mergeable) ? UIO_MAXIOV : 1);
1143 /* On error, stop handling until the next kick. */
1144 if (unlikely(headcount < 0))
1145 goto out;
1146 /* OK, now we need to know about added descriptors. */
1147 if (!headcount) {
1148 if (unlikely(busyloop_intr)) {
1149 vhost_poll_queue(&vq->poll);
1150 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1151 /* They have slipped one in as we were
1152 * doing that: check again. */
1153 vhost_disable_notify(&net->dev, vq);
1154 continue;
1155 }
1156 /* Nothing new? Wait for eventfd to tell us
1157 * they refilled. */
1158 goto out;
1159 }
1160 busyloop_intr = false;
1161 if (nvq->rx_ring)
1162 msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
1163 /* On overrun, truncate and discard */
1164 if (unlikely(headcount > UIO_MAXIOV)) {
1165 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
1166 err = sock->ops->recvmsg(sock, &msg,
1167 1, MSG_DONTWAIT | MSG_TRUNC);
1168 pr_debug("Discarded rx packet: len %zd\n", sock_len);
1169 continue;
1170 }
1171 /* We don't need to be notified again. */
1172 iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
1173 fixup = msg.msg_iter;
1174 if (unlikely((vhost_hlen))) {
1175 /* We will supply the header ourselves
1176 * TODO: support TSO.
1177 */
1178 iov_iter_advance(&msg.msg_iter, vhost_hlen);
1179 }
1180 err = sock->ops->recvmsg(sock, &msg,
1181 sock_len, MSG_DONTWAIT | MSG_TRUNC);
1182 /* Userspace might have consumed the packet meanwhile:
1183 * it's not supposed to do this usually, but might be hard
1184 * to prevent. Discard data we got (if any) and keep going. */
1185 if (unlikely(err != sock_len)) {
1186 pr_debug("Discarded rx packet: "
1187 " len %d, expected %zd\n", err, sock_len);
1188 vhost_discard_vq_desc(vq, headcount);
1189 continue;
1190 }
1191 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1192 if (unlikely(vhost_hlen)) {
1193 if (copy_to_iter(&hdr, sizeof(hdr),
1194 &fixup) != sizeof(hdr)) {
1195 vq_err(vq, "Unable to write vnet_hdr "
1196 "at addr %p\n", vq->iov->iov_base);
1197 goto out;
1198 }
1199 } else {
1200 /* Header came from socket; we'll need to patch
1201 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1202 */
1203 iov_iter_advance(&fixup, sizeof(hdr));
1204 }
1205 /* TODO: Should check and handle checksum. */
1206
1207 num_buffers = cpu_to_vhost16(vq, headcount);
1208 if (likely(mergeable) &&
1209 copy_to_iter(&num_buffers, sizeof num_buffers,
1210 &fixup) != sizeof num_buffers) {
1211 vq_err(vq, "Failed num_buffers write");
1212 vhost_discard_vq_desc(vq, headcount);
1213 goto out;
1214 }
1215 nvq->done_idx += headcount;
1216 if (nvq->done_idx > VHOST_NET_BATCH)
1217 vhost_net_signal_used(nvq);
1218 if (unlikely(vq_log))
1219 vhost_log_write(vq, vq_log, log, vhost_len,
1220 vq->iov, in);
1221 total_len += vhost_len;
1222 } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1223
1224 if (unlikely(busyloop_intr))
1225 vhost_poll_queue(&vq->poll);
1226 else if (!sock_len)
1227 vhost_net_enable_vq(net, vq);
1228out:
1229 vhost_net_signal_used(nvq);
1230 mutex_unlock(&vq->mutex);
1231}
1232
1233static void handle_tx_kick(struct vhost_work *work)
1234{
1235 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1236 poll.work);
1237 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1238
1239 handle_tx(net);
1240}
1241
1242static void handle_rx_kick(struct vhost_work *work)
1243{
1244 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1245 poll.work);
1246 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1247
1248 handle_rx(net);
1249}
1250
1251static void handle_tx_net(struct vhost_work *work)
1252{
1253 struct vhost_net *net = container_of(work, struct vhost_net,
1254 poll[VHOST_NET_VQ_TX].work);
1255 handle_tx(net);
1256}
1257
1258static void handle_rx_net(struct vhost_work *work)
1259{
1260 struct vhost_net *net = container_of(work, struct vhost_net,
1261 poll[VHOST_NET_VQ_RX].work);
1262 handle_rx(net);
1263}
1264
1265static int vhost_net_open(struct inode *inode, struct file *f)
1266{
1267 struct vhost_net *n;
1268 struct vhost_dev *dev;
1269 struct vhost_virtqueue **vqs;
1270 void **queue;
1271 struct xdp_buff *xdp;
1272 int i;
1273
1274 n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1275 if (!n)
1276 return -ENOMEM;
1277 vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
1278 if (!vqs) {
1279 kvfree(n);
1280 return -ENOMEM;
1281 }
1282
1283 queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1284 GFP_KERNEL);
1285 if (!queue) {
1286 kfree(vqs);
1287 kvfree(n);
1288 return -ENOMEM;
1289 }
1290 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1291
1292 xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
1293 if (!xdp) {
1294 kfree(vqs);
1295 kvfree(n);
1296 kfree(queue);
1297 return -ENOMEM;
1298 }
1299 n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
1300
1301 dev = &n->dev;
1302 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1303 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1304 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1305 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1306 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
1307 n->vqs[i].ubufs = NULL;
1308 n->vqs[i].ubuf_info = NULL;
1309 n->vqs[i].upend_idx = 0;
1310 n->vqs[i].done_idx = 0;
1311 n->vqs[i].batched_xdp = 0;
1312 n->vqs[i].vhost_hlen = 0;
1313 n->vqs[i].sock_hlen = 0;
1314 n->vqs[i].rx_ring = NULL;
1315 vhost_net_buf_init(&n->vqs[i].rxq);
1316 }
1317 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1318 UIO_MAXIOV + VHOST_NET_BATCH,
1319 VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
1320 NULL);
1321
1322 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
1323 vqs[VHOST_NET_VQ_TX]);
1324 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
1325 vqs[VHOST_NET_VQ_RX]);
1326
1327 f->private_data = n;
1328 page_frag_cache_init(&n->pf_cache);
1329
1330 return 0;
1331}
1332
1333static struct socket *vhost_net_stop_vq(struct vhost_net *n,
1334 struct vhost_virtqueue *vq)
1335{
1336 struct socket *sock;
1337 struct vhost_net_virtqueue *nvq =
1338 container_of(vq, struct vhost_net_virtqueue, vq);
1339
1340 mutex_lock(&vq->mutex);
1341 sock = vhost_vq_get_backend(vq);
1342 vhost_net_disable_vq(n, vq);
1343 vhost_vq_set_backend(vq, NULL);
1344 vhost_net_buf_unproduce(nvq);
1345 nvq->rx_ring = NULL;
1346 mutex_unlock(&vq->mutex);
1347 return sock;
1348}
1349
1350static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
1351 struct socket **rx_sock)
1352{
1353 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1354 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1355}
1356
1357static void vhost_net_flush(struct vhost_net *n)
1358{
1359 vhost_dev_flush(&n->dev);
1360 if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1361 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1362 n->tx_flush = true;
1363 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1364 /* Wait for all lower device DMAs done. */
1365 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1366 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1367 n->tx_flush = false;
1368 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1369 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1370 }
1371}
1372
1373static int vhost_net_release(struct inode *inode, struct file *f)
1374{
1375 struct vhost_net *n = f->private_data;
1376 struct socket *tx_sock;
1377 struct socket *rx_sock;
1378
1379 vhost_net_stop(n, &tx_sock, &rx_sock);
1380 vhost_net_flush(n);
1381 vhost_dev_stop(&n->dev);
1382 vhost_dev_cleanup(&n->dev);
1383 vhost_net_vq_reset(n);
1384 if (tx_sock)
1385 sockfd_put(tx_sock);
1386 if (rx_sock)
1387 sockfd_put(rx_sock);
1388 /* Make sure no callbacks are outstanding */
1389 synchronize_rcu();
1390 /* We do an extra flush before freeing memory,
1391 * since jobs can re-queue themselves. */
1392 vhost_net_flush(n);
1393 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1394 kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
1395 kfree(n->dev.vqs);
1396 page_frag_cache_drain(&n->pf_cache);
1397 kvfree(n);
1398 return 0;
1399}
1400
1401static struct socket *get_raw_socket(int fd)
1402{
1403 int r;
1404 struct socket *sock = sockfd_lookup(fd, &r);
1405
1406 if (!sock)
1407 return ERR_PTR(-ENOTSOCK);
1408
1409 /* Parameter checking */
1410 if (sock->sk->sk_type != SOCK_RAW) {
1411 r = -ESOCKTNOSUPPORT;
1412 goto err;
1413 }
1414
1415 if (sock->sk->sk_family != AF_PACKET) {
1416 r = -EPFNOSUPPORT;
1417 goto err;
1418 }
1419 return sock;
1420err:
1421 sockfd_put(sock);
1422 return ERR_PTR(r);
1423}
1424
1425static struct ptr_ring *get_tap_ptr_ring(struct file *file)
1426{
1427 struct ptr_ring *ring;
1428 ring = tun_get_tx_ring(file);
1429 if (!IS_ERR(ring))
1430 goto out;
1431 ring = tap_get_ptr_ring(file);
1432 if (!IS_ERR(ring))
1433 goto out;
1434 ring = NULL;
1435out:
1436 return ring;
1437}
1438
1439static struct socket *get_tap_socket(int fd)
1440{
1441 struct file *file = fget(fd);
1442 struct socket *sock;
1443
1444 if (!file)
1445 return ERR_PTR(-EBADF);
1446 sock = tun_get_socket(file);
1447 if (!IS_ERR(sock))
1448 return sock;
1449 sock = tap_get_socket(file);
1450 if (IS_ERR(sock))
1451 fput(file);
1452 return sock;
1453}
1454
1455static struct socket *get_socket(int fd)
1456{
1457 struct socket *sock;
1458
1459 /* special case to disable backend */
1460 if (fd == -1)
1461 return NULL;
1462 sock = get_raw_socket(fd);
1463 if (!IS_ERR(sock))
1464 return sock;
1465 sock = get_tap_socket(fd);
1466 if (!IS_ERR(sock))
1467 return sock;
1468 return ERR_PTR(-ENOTSOCK);
1469}
1470
1471static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1472{
1473 struct socket *sock, *oldsock;
1474 struct vhost_virtqueue *vq;
1475 struct vhost_net_virtqueue *nvq;
1476 struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1477 int r;
1478
1479 mutex_lock(&n->dev.mutex);
1480 r = vhost_dev_check_owner(&n->dev);
1481 if (r)
1482 goto err;
1483
1484 if (index >= VHOST_NET_VQ_MAX) {
1485 r = -ENOBUFS;
1486 goto err;
1487 }
1488 vq = &n->vqs[index].vq;
1489 nvq = &n->vqs[index];
1490 mutex_lock(&vq->mutex);
1491
1492 if (fd == -1)
1493 vhost_clear_msg(&n->dev);
1494
1495 /* Verify that ring has been setup correctly. */
1496 if (!vhost_vq_access_ok(vq)) {
1497 r = -EFAULT;
1498 goto err_vq;
1499 }
1500 sock = get_socket(fd);
1501 if (IS_ERR(sock)) {
1502 r = PTR_ERR(sock);
1503 goto err_vq;
1504 }
1505
1506 /* start polling new socket */
1507 oldsock = vhost_vq_get_backend(vq);
1508 if (sock != oldsock) {
1509 ubufs = vhost_net_ubuf_alloc(vq,
1510 sock && vhost_sock_zcopy(sock));
1511 if (IS_ERR(ubufs)) {
1512 r = PTR_ERR(ubufs);
1513 goto err_ubufs;
1514 }
1515
1516 vhost_net_disable_vq(n, vq);
1517 vhost_vq_set_backend(vq, sock);
1518 vhost_net_buf_unproduce(nvq);
1519 r = vhost_vq_init_access(vq);
1520 if (r)
1521 goto err_used;
1522 r = vhost_net_enable_vq(n, vq);
1523 if (r)
1524 goto err_used;
1525 if (index == VHOST_NET_VQ_RX) {
1526 if (sock)
1527 nvq->rx_ring = get_tap_ptr_ring(sock->file);
1528 else
1529 nvq->rx_ring = NULL;
1530 }
1531
1532 oldubufs = nvq->ubufs;
1533 nvq->ubufs = ubufs;
1534
1535 n->tx_packets = 0;
1536 n->tx_zcopy_err = 0;
1537 n->tx_flush = false;
1538 }
1539
1540 mutex_unlock(&vq->mutex);
1541
1542 if (oldubufs) {
1543 vhost_net_ubuf_put_wait_and_free(oldubufs);
1544 mutex_lock(&vq->mutex);
1545 vhost_zerocopy_signal_used(n, vq);
1546 mutex_unlock(&vq->mutex);
1547 }
1548
1549 if (oldsock) {
1550 vhost_dev_flush(&n->dev);
1551 sockfd_put(oldsock);
1552 }
1553
1554 mutex_unlock(&n->dev.mutex);
1555 return 0;
1556
1557err_used:
1558 vhost_vq_set_backend(vq, oldsock);
1559 vhost_net_enable_vq(n, vq);
1560 if (ubufs)
1561 vhost_net_ubuf_put_wait_and_free(ubufs);
1562err_ubufs:
1563 if (sock)
1564 sockfd_put(sock);
1565err_vq:
1566 mutex_unlock(&vq->mutex);
1567err:
1568 mutex_unlock(&n->dev.mutex);
1569 return r;
1570}
1571
1572static long vhost_net_reset_owner(struct vhost_net *n)
1573{
1574 struct socket *tx_sock = NULL;
1575 struct socket *rx_sock = NULL;
1576 long err;
1577 struct vhost_iotlb *umem;
1578
1579 mutex_lock(&n->dev.mutex);
1580 err = vhost_dev_check_owner(&n->dev);
1581 if (err)
1582 goto done;
1583 umem = vhost_dev_reset_owner_prepare();
1584 if (!umem) {
1585 err = -ENOMEM;
1586 goto done;
1587 }
1588 vhost_net_stop(n, &tx_sock, &rx_sock);
1589 vhost_net_flush(n);
1590 vhost_dev_stop(&n->dev);
1591 vhost_dev_reset_owner(&n->dev, umem);
1592 vhost_net_vq_reset(n);
1593done:
1594 mutex_unlock(&n->dev.mutex);
1595 if (tx_sock)
1596 sockfd_put(tx_sock);
1597 if (rx_sock)
1598 sockfd_put(rx_sock);
1599 return err;
1600}
1601
1602static int vhost_net_set_features(struct vhost_net *n, u64 features)
1603{
1604 size_t vhost_hlen, sock_hlen, hdr_len;
1605 int i;
1606
1607 hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1608 (1ULL << VIRTIO_F_VERSION_1))) ?
1609 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1610 sizeof(struct virtio_net_hdr);
1611 if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1612 /* vhost provides vnet_hdr */
1613 vhost_hlen = hdr_len;
1614 sock_hlen = 0;
1615 } else {
1616 /* socket provides vnet_hdr */
1617 vhost_hlen = 0;
1618 sock_hlen = hdr_len;
1619 }
1620 mutex_lock(&n->dev.mutex);
1621 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1622 !vhost_log_access_ok(&n->dev))
1623 goto out_unlock;
1624
1625 if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
1626 if (vhost_init_device_iotlb(&n->dev))
1627 goto out_unlock;
1628 }
1629
1630 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1631 mutex_lock(&n->vqs[i].vq.mutex);
1632 n->vqs[i].vq.acked_features = features;
1633 n->vqs[i].vhost_hlen = vhost_hlen;
1634 n->vqs[i].sock_hlen = sock_hlen;
1635 mutex_unlock(&n->vqs[i].vq.mutex);
1636 }
1637 mutex_unlock(&n->dev.mutex);
1638 return 0;
1639
1640out_unlock:
1641 mutex_unlock(&n->dev.mutex);
1642 return -EFAULT;
1643}
1644
1645static long vhost_net_set_owner(struct vhost_net *n)
1646{
1647 int r;
1648
1649 mutex_lock(&n->dev.mutex);
1650 if (vhost_dev_has_owner(&n->dev)) {
1651 r = -EBUSY;
1652 goto out;
1653 }
1654 r = vhost_net_set_ubuf_info(n);
1655 if (r)
1656 goto out;
1657 r = vhost_dev_set_owner(&n->dev);
1658 if (r)
1659 vhost_net_clear_ubuf_info(n);
1660 vhost_net_flush(n);
1661out:
1662 mutex_unlock(&n->dev.mutex);
1663 return r;
1664}
1665
1666static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1667 unsigned long arg)
1668{
1669 struct vhost_net *n = f->private_data;
1670 void __user *argp = (void __user *)arg;
1671 u64 __user *featurep = argp;
1672 struct vhost_vring_file backend;
1673 u64 features;
1674 int r;
1675
1676 switch (ioctl) {
1677 case VHOST_NET_SET_BACKEND:
1678 if (copy_from_user(&backend, argp, sizeof backend))
1679 return -EFAULT;
1680 return vhost_net_set_backend(n, backend.index, backend.fd);
1681 case VHOST_GET_FEATURES:
1682 features = VHOST_NET_FEATURES;
1683 if (copy_to_user(featurep, &features, sizeof features))
1684 return -EFAULT;
1685 return 0;
1686 case VHOST_SET_FEATURES:
1687 if (copy_from_user(&features, featurep, sizeof features))
1688 return -EFAULT;
1689 if (features & ~VHOST_NET_FEATURES)
1690 return -EOPNOTSUPP;
1691 return vhost_net_set_features(n, features);
1692 case VHOST_GET_BACKEND_FEATURES:
1693 features = VHOST_NET_BACKEND_FEATURES;
1694 if (copy_to_user(featurep, &features, sizeof(features)))
1695 return -EFAULT;
1696 return 0;
1697 case VHOST_SET_BACKEND_FEATURES:
1698 if (copy_from_user(&features, featurep, sizeof(features)))
1699 return -EFAULT;
1700 if (features & ~VHOST_NET_BACKEND_FEATURES)
1701 return -EOPNOTSUPP;
1702 vhost_set_backend_features(&n->dev, features);
1703 return 0;
1704 case VHOST_RESET_OWNER:
1705 return vhost_net_reset_owner(n);
1706 case VHOST_SET_OWNER:
1707 return vhost_net_set_owner(n);
1708 default:
1709 mutex_lock(&n->dev.mutex);
1710 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1711 if (r == -ENOIOCTLCMD)
1712 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1713 else
1714 vhost_net_flush(n);
1715 mutex_unlock(&n->dev.mutex);
1716 return r;
1717 }
1718}
1719
1720static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1721{
1722 struct file *file = iocb->ki_filp;
1723 struct vhost_net *n = file->private_data;
1724 struct vhost_dev *dev = &n->dev;
1725 int noblock = file->f_flags & O_NONBLOCK;
1726
1727 return vhost_chr_read_iter(dev, to, noblock);
1728}
1729
1730static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1731 struct iov_iter *from)
1732{
1733 struct file *file = iocb->ki_filp;
1734 struct vhost_net *n = file->private_data;
1735 struct vhost_dev *dev = &n->dev;
1736
1737 return vhost_chr_write_iter(dev, from);
1738}
1739
1740static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1741{
1742 struct vhost_net *n = file->private_data;
1743 struct vhost_dev *dev = &n->dev;
1744
1745 return vhost_chr_poll(file, dev, wait);
1746}
1747
1748static const struct file_operations vhost_net_fops = {
1749 .owner = THIS_MODULE,
1750 .release = vhost_net_release,
1751 .read_iter = vhost_net_chr_read_iter,
1752 .write_iter = vhost_net_chr_write_iter,
1753 .poll = vhost_net_chr_poll,
1754 .unlocked_ioctl = vhost_net_ioctl,
1755 .compat_ioctl = compat_ptr_ioctl,
1756 .open = vhost_net_open,
1757 .llseek = noop_llseek,
1758};
1759
1760static struct miscdevice vhost_net_misc = {
1761 .minor = VHOST_NET_MINOR,
1762 .name = "vhost-net",
1763 .fops = &vhost_net_fops,
1764};
1765
1766static int __init vhost_net_init(void)
1767{
1768 if (experimental_zcopytx)
1769 vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1770 return misc_register(&vhost_net_misc);
1771}
1772module_init(vhost_net_init);
1773
1774static void __exit vhost_net_exit(void)
1775{
1776 misc_deregister(&vhost_net_misc);
1777}
1778module_exit(vhost_net_exit);
1779
1780MODULE_VERSION("0.0.1");
1781MODULE_LICENSE("GPL v2");
1782MODULE_AUTHOR("Michael S. Tsirkin");
1783MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1784MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1785MODULE_ALIAS("devname:vhost-net");
1/* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * virtio-net server in host kernel.
7 */
8
9#include <linux/compat.h>
10#include <linux/eventfd.h>
11#include <linux/vhost.h>
12#include <linux/virtio_net.h>
13#include <linux/miscdevice.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/mutex.h>
17#include <linux/workqueue.h>
18#include <linux/file.h>
19#include <linux/slab.h>
20#include <linux/sched/clock.h>
21#include <linux/sched/signal.h>
22#include <linux/vmalloc.h>
23
24#include <linux/net.h>
25#include <linux/if_packet.h>
26#include <linux/if_arp.h>
27#include <linux/if_tun.h>
28#include <linux/if_macvlan.h>
29#include <linux/if_tap.h>
30#include <linux/if_vlan.h>
31#include <linux/skb_array.h>
32#include <linux/skbuff.h>
33
34#include <net/sock.h>
35
36#include "vhost.h"
37
38static int experimental_zcopytx = 1;
39module_param(experimental_zcopytx, int, 0444);
40MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
41 " 1 -Enable; 0 - Disable");
42
43/* Max number of bytes transferred before requeueing the job.
44 * Using this limit prevents one virtqueue from starving others. */
45#define VHOST_NET_WEIGHT 0x80000
46
47/* Max number of packets transferred before requeueing the job.
48 * Using this limit prevents one virtqueue from starving rx. */
49#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2)
50
51/* MAX number of TX used buffers for outstanding zerocopy */
52#define VHOST_MAX_PEND 128
53#define VHOST_GOODCOPY_LEN 256
54
55/*
56 * For transmit, used buffer len is unused; we override it to track buffer
57 * status internally; used for zerocopy tx only.
58 */
59/* Lower device DMA failed */
60#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
61/* Lower device DMA done */
62#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
63/* Lower device DMA in progress */
64#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
65/* Buffer unused */
66#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
67
68#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
69
70enum {
71 VHOST_NET_FEATURES = VHOST_FEATURES |
72 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
73 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
74 (1ULL << VIRTIO_F_IOMMU_PLATFORM)
75};
76
77enum {
78 VHOST_NET_VQ_RX = 0,
79 VHOST_NET_VQ_TX = 1,
80 VHOST_NET_VQ_MAX = 2,
81};
82
83struct vhost_net_ubuf_ref {
84 /* refcount follows semantics similar to kref:
85 * 0: object is released
86 * 1: no outstanding ubufs
87 * >1: outstanding ubufs
88 */
89 atomic_t refcount;
90 wait_queue_head_t wait;
91 struct vhost_virtqueue *vq;
92};
93
94#define VHOST_RX_BATCH 64
95struct vhost_net_buf {
96 void **queue;
97 int tail;
98 int head;
99};
100
101struct vhost_net_virtqueue {
102 struct vhost_virtqueue vq;
103 size_t vhost_hlen;
104 size_t sock_hlen;
105 /* vhost zerocopy support fields below: */
106 /* last used idx for outstanding DMA zerocopy buffers */
107 int upend_idx;
108 /* For TX, first used idx for DMA done zerocopy buffers
109 * For RX, number of batched heads
110 */
111 int done_idx;
112 /* an array of userspace buffers info */
113 struct ubuf_info *ubuf_info;
114 /* Reference counting for outstanding ubufs.
115 * Protected by vq mutex. Writers must also take device mutex. */
116 struct vhost_net_ubuf_ref *ubufs;
117 struct ptr_ring *rx_ring;
118 struct vhost_net_buf rxq;
119};
120
121struct vhost_net {
122 struct vhost_dev dev;
123 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
124 struct vhost_poll poll[VHOST_NET_VQ_MAX];
125 /* Number of TX recently submitted.
126 * Protected by tx vq lock. */
127 unsigned tx_packets;
128 /* Number of times zerocopy TX recently failed.
129 * Protected by tx vq lock. */
130 unsigned tx_zcopy_err;
131 /* Flush in progress. Protected by tx vq lock. */
132 bool tx_flush;
133};
134
135static unsigned vhost_net_zcopy_mask __read_mostly;
136
137static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
138{
139 if (rxq->tail != rxq->head)
140 return rxq->queue[rxq->head];
141 else
142 return NULL;
143}
144
145static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
146{
147 return rxq->tail - rxq->head;
148}
149
150static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
151{
152 return rxq->tail == rxq->head;
153}
154
155static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
156{
157 void *ret = vhost_net_buf_get_ptr(rxq);
158 ++rxq->head;
159 return ret;
160}
161
162static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
163{
164 struct vhost_net_buf *rxq = &nvq->rxq;
165
166 rxq->head = 0;
167 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
168 VHOST_RX_BATCH);
169 return rxq->tail;
170}
171
172static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
173{
174 struct vhost_net_buf *rxq = &nvq->rxq;
175
176 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
177 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
178 vhost_net_buf_get_size(rxq),
179 tun_ptr_free);
180 rxq->head = rxq->tail = 0;
181 }
182}
183
184static int vhost_net_buf_peek_len(void *ptr)
185{
186 if (tun_is_xdp_buff(ptr)) {
187 struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
188
189 return xdp->data_end - xdp->data;
190 }
191
192 return __skb_array_len_with_tag(ptr);
193}
194
195static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
196{
197 struct vhost_net_buf *rxq = &nvq->rxq;
198
199 if (!vhost_net_buf_is_empty(rxq))
200 goto out;
201
202 if (!vhost_net_buf_produce(nvq))
203 return 0;
204
205out:
206 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
207}
208
209static void vhost_net_buf_init(struct vhost_net_buf *rxq)
210{
211 rxq->head = rxq->tail = 0;
212}
213
214static void vhost_net_enable_zcopy(int vq)
215{
216 vhost_net_zcopy_mask |= 0x1 << vq;
217}
218
219static struct vhost_net_ubuf_ref *
220vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
221{
222 struct vhost_net_ubuf_ref *ubufs;
223 /* No zero copy backend? Nothing to count. */
224 if (!zcopy)
225 return NULL;
226 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
227 if (!ubufs)
228 return ERR_PTR(-ENOMEM);
229 atomic_set(&ubufs->refcount, 1);
230 init_waitqueue_head(&ubufs->wait);
231 ubufs->vq = vq;
232 return ubufs;
233}
234
235static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
236{
237 int r = atomic_sub_return(1, &ubufs->refcount);
238 if (unlikely(!r))
239 wake_up(&ubufs->wait);
240 return r;
241}
242
243static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
244{
245 vhost_net_ubuf_put(ubufs);
246 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
247}
248
249static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
250{
251 vhost_net_ubuf_put_and_wait(ubufs);
252 kfree(ubufs);
253}
254
255static void vhost_net_clear_ubuf_info(struct vhost_net *n)
256{
257 int i;
258
259 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
260 kfree(n->vqs[i].ubuf_info);
261 n->vqs[i].ubuf_info = NULL;
262 }
263}
264
265static int vhost_net_set_ubuf_info(struct vhost_net *n)
266{
267 bool zcopy;
268 int i;
269
270 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
271 zcopy = vhost_net_zcopy_mask & (0x1 << i);
272 if (!zcopy)
273 continue;
274 n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
275 UIO_MAXIOV, GFP_KERNEL);
276 if (!n->vqs[i].ubuf_info)
277 goto err;
278 }
279 return 0;
280
281err:
282 vhost_net_clear_ubuf_info(n);
283 return -ENOMEM;
284}
285
286static void vhost_net_vq_reset(struct vhost_net *n)
287{
288 int i;
289
290 vhost_net_clear_ubuf_info(n);
291
292 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
293 n->vqs[i].done_idx = 0;
294 n->vqs[i].upend_idx = 0;
295 n->vqs[i].ubufs = NULL;
296 n->vqs[i].vhost_hlen = 0;
297 n->vqs[i].sock_hlen = 0;
298 vhost_net_buf_init(&n->vqs[i].rxq);
299 }
300
301}
302
303static void vhost_net_tx_packet(struct vhost_net *net)
304{
305 ++net->tx_packets;
306 if (net->tx_packets < 1024)
307 return;
308 net->tx_packets = 0;
309 net->tx_zcopy_err = 0;
310}
311
312static void vhost_net_tx_err(struct vhost_net *net)
313{
314 ++net->tx_zcopy_err;
315}
316
317static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
318{
319 /* TX flush waits for outstanding DMAs to be done.
320 * Don't start new DMAs.
321 */
322 return !net->tx_flush &&
323 net->tx_packets / 64 >= net->tx_zcopy_err;
324}
325
326static bool vhost_sock_zcopy(struct socket *sock)
327{
328 return unlikely(experimental_zcopytx) &&
329 sock_flag(sock->sk, SOCK_ZEROCOPY);
330}
331
332/* In case of DMA done not in order in lower device driver for some reason.
333 * upend_idx is used to track end of used idx, done_idx is used to track head
334 * of used idx. Once lower device DMA done contiguously, we will signal KVM
335 * guest used idx.
336 */
337static void vhost_zerocopy_signal_used(struct vhost_net *net,
338 struct vhost_virtqueue *vq)
339{
340 struct vhost_net_virtqueue *nvq =
341 container_of(vq, struct vhost_net_virtqueue, vq);
342 int i, add;
343 int j = 0;
344
345 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
346 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
347 vhost_net_tx_err(net);
348 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
349 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
350 ++j;
351 } else
352 break;
353 }
354 while (j) {
355 add = min(UIO_MAXIOV - nvq->done_idx, j);
356 vhost_add_used_and_signal_n(vq->dev, vq,
357 &vq->heads[nvq->done_idx], add);
358 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
359 j -= add;
360 }
361}
362
363static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
364{
365 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
366 struct vhost_virtqueue *vq = ubufs->vq;
367 int cnt;
368
369 rcu_read_lock_bh();
370
371 /* set len to mark this desc buffers done DMA */
372 vq->heads[ubuf->desc].len = success ?
373 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
374 cnt = vhost_net_ubuf_put(ubufs);
375
376 /*
377 * Trigger polling thread if guest stopped submitting new buffers:
378 * in this case, the refcount after decrement will eventually reach 1.
379 * We also trigger polling periodically after each 16 packets
380 * (the value 16 here is more or less arbitrary, it's tuned to trigger
381 * less than 10% of times).
382 */
383 if (cnt <= 1 || !(cnt % 16))
384 vhost_poll_queue(&vq->poll);
385
386 rcu_read_unlock_bh();
387}
388
389static inline unsigned long busy_clock(void)
390{
391 return local_clock() >> 10;
392}
393
394static bool vhost_can_busy_poll(struct vhost_dev *dev,
395 unsigned long endtime)
396{
397 return likely(!need_resched()) &&
398 likely(!time_after(busy_clock(), endtime)) &&
399 likely(!signal_pending(current)) &&
400 !vhost_has_work(dev);
401}
402
403static void vhost_net_disable_vq(struct vhost_net *n,
404 struct vhost_virtqueue *vq)
405{
406 struct vhost_net_virtqueue *nvq =
407 container_of(vq, struct vhost_net_virtqueue, vq);
408 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
409 if (!vq->private_data)
410 return;
411 vhost_poll_stop(poll);
412}
413
414static int vhost_net_enable_vq(struct vhost_net *n,
415 struct vhost_virtqueue *vq)
416{
417 struct vhost_net_virtqueue *nvq =
418 container_of(vq, struct vhost_net_virtqueue, vq);
419 struct vhost_poll *poll = n->poll + (nvq - n->vqs);
420 struct socket *sock;
421
422 sock = vq->private_data;
423 if (!sock)
424 return 0;
425
426 return vhost_poll_start(poll, sock->file);
427}
428
429static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
430 struct vhost_virtqueue *vq,
431 struct iovec iov[], unsigned int iov_size,
432 unsigned int *out_num, unsigned int *in_num)
433{
434 unsigned long uninitialized_var(endtime);
435 int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
436 out_num, in_num, NULL, NULL);
437
438 if (r == vq->num && vq->busyloop_timeout) {
439 preempt_disable();
440 endtime = busy_clock() + vq->busyloop_timeout;
441 while (vhost_can_busy_poll(vq->dev, endtime) &&
442 vhost_vq_avail_empty(vq->dev, vq))
443 cpu_relax();
444 preempt_enable();
445 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
446 out_num, in_num, NULL, NULL);
447 }
448
449 return r;
450}
451
452static bool vhost_exceeds_maxpend(struct vhost_net *net)
453{
454 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
455 struct vhost_virtqueue *vq = &nvq->vq;
456
457 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
458 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
459}
460
461/* Expects to be always run from workqueue - which acts as
462 * read-size critical section for our kind of RCU. */
463static void handle_tx(struct vhost_net *net)
464{
465 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
466 struct vhost_virtqueue *vq = &nvq->vq;
467 unsigned out, in;
468 int head;
469 struct msghdr msg = {
470 .msg_name = NULL,
471 .msg_namelen = 0,
472 .msg_control = NULL,
473 .msg_controllen = 0,
474 .msg_flags = MSG_DONTWAIT,
475 };
476 size_t len, total_len = 0;
477 int err;
478 size_t hdr_size;
479 struct socket *sock;
480 struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
481 bool zcopy, zcopy_used;
482 int sent_pkts = 0;
483
484 mutex_lock(&vq->mutex);
485 sock = vq->private_data;
486 if (!sock)
487 goto out;
488
489 if (!vq_iotlb_prefetch(vq))
490 goto out;
491
492 vhost_disable_notify(&net->dev, vq);
493 vhost_net_disable_vq(net, vq);
494
495 hdr_size = nvq->vhost_hlen;
496 zcopy = nvq->ubufs;
497
498 for (;;) {
499 /* Release DMAs done buffers first */
500 if (zcopy)
501 vhost_zerocopy_signal_used(net, vq);
502
503
504 head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
505 ARRAY_SIZE(vq->iov),
506 &out, &in);
507 /* On error, stop handling until the next kick. */
508 if (unlikely(head < 0))
509 break;
510 /* Nothing new? Wait for eventfd to tell us they refilled. */
511 if (head == vq->num) {
512 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
513 vhost_disable_notify(&net->dev, vq);
514 continue;
515 }
516 break;
517 }
518 if (in) {
519 vq_err(vq, "Unexpected descriptor format for TX: "
520 "out %d, int %d\n", out, in);
521 break;
522 }
523 /* Skip header. TODO: support TSO. */
524 len = iov_length(vq->iov, out);
525 iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
526 iov_iter_advance(&msg.msg_iter, hdr_size);
527 /* Sanity check */
528 if (!msg_data_left(&msg)) {
529 vq_err(vq, "Unexpected header len for TX: "
530 "%zd expected %zd\n",
531 len, hdr_size);
532 break;
533 }
534 len = msg_data_left(&msg);
535
536 zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
537 && !vhost_exceeds_maxpend(net)
538 && vhost_net_tx_select_zcopy(net);
539
540 /* use msg_control to pass vhost zerocopy ubuf info to skb */
541 if (zcopy_used) {
542 struct ubuf_info *ubuf;
543 ubuf = nvq->ubuf_info + nvq->upend_idx;
544
545 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
546 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
547 ubuf->callback = vhost_zerocopy_callback;
548 ubuf->ctx = nvq->ubufs;
549 ubuf->desc = nvq->upend_idx;
550 refcount_set(&ubuf->refcnt, 1);
551 msg.msg_control = ubuf;
552 msg.msg_controllen = sizeof(ubuf);
553 ubufs = nvq->ubufs;
554 atomic_inc(&ubufs->refcount);
555 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
556 } else {
557 msg.msg_control = NULL;
558 ubufs = NULL;
559 }
560
561 total_len += len;
562 if (total_len < VHOST_NET_WEIGHT &&
563 !vhost_vq_avail_empty(&net->dev, vq) &&
564 likely(!vhost_exceeds_maxpend(net))) {
565 msg.msg_flags |= MSG_MORE;
566 } else {
567 msg.msg_flags &= ~MSG_MORE;
568 }
569
570 /* TODO: Check specific error and bomb out unless ENOBUFS? */
571 err = sock->ops->sendmsg(sock, &msg, len);
572 if (unlikely(err < 0)) {
573 if (zcopy_used) {
574 vhost_net_ubuf_put(ubufs);
575 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
576 % UIO_MAXIOV;
577 }
578 vhost_discard_vq_desc(vq, 1);
579 vhost_net_enable_vq(net, vq);
580 break;
581 }
582 if (err != len)
583 pr_debug("Truncated TX packet: "
584 " len %d != %zd\n", err, len);
585 if (!zcopy_used)
586 vhost_add_used_and_signal(&net->dev, vq, head, 0);
587 else
588 vhost_zerocopy_signal_used(net, vq);
589 vhost_net_tx_packet(net);
590 if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
591 unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) {
592 vhost_poll_queue(&vq->poll);
593 break;
594 }
595 }
596out:
597 mutex_unlock(&vq->mutex);
598}
599
600static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
601{
602 struct sk_buff *head;
603 int len = 0;
604 unsigned long flags;
605
606 if (rvq->rx_ring)
607 return vhost_net_buf_peek(rvq);
608
609 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
610 head = skb_peek(&sk->sk_receive_queue);
611 if (likely(head)) {
612 len = head->len;
613 if (skb_vlan_tag_present(head))
614 len += VLAN_HLEN;
615 }
616
617 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
618 return len;
619}
620
621static int sk_has_rx_data(struct sock *sk)
622{
623 struct socket *sock = sk->sk_socket;
624
625 if (sock->ops->peek_len)
626 return sock->ops->peek_len(sock);
627
628 return skb_queue_empty(&sk->sk_receive_queue);
629}
630
631static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
632{
633 struct vhost_virtqueue *vq = &nvq->vq;
634 struct vhost_dev *dev = vq->dev;
635
636 if (!nvq->done_idx)
637 return;
638
639 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
640 nvq->done_idx = 0;
641}
642
643static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
644{
645 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
646 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
647 struct vhost_virtqueue *vq = &nvq->vq;
648 unsigned long uninitialized_var(endtime);
649 int len = peek_head_len(rvq, sk);
650
651 if (!len && vq->busyloop_timeout) {
652 /* Flush batched heads first */
653 vhost_rx_signal_used(rvq);
654 /* Both tx vq and rx socket were polled here */
655 mutex_lock_nested(&vq->mutex, 1);
656 vhost_disable_notify(&net->dev, vq);
657
658 preempt_disable();
659 endtime = busy_clock() + vq->busyloop_timeout;
660
661 while (vhost_can_busy_poll(&net->dev, endtime) &&
662 !sk_has_rx_data(sk) &&
663 vhost_vq_avail_empty(&net->dev, vq))
664 cpu_relax();
665
666 preempt_enable();
667
668 if (!vhost_vq_avail_empty(&net->dev, vq))
669 vhost_poll_queue(&vq->poll);
670 else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
671 vhost_disable_notify(&net->dev, vq);
672 vhost_poll_queue(&vq->poll);
673 }
674
675 mutex_unlock(&vq->mutex);
676
677 len = peek_head_len(rvq, sk);
678 }
679
680 return len;
681}
682
683/* This is a multi-buffer version of vhost_get_desc, that works if
684 * vq has read descriptors only.
685 * @vq - the relevant virtqueue
686 * @datalen - data length we'll be reading
687 * @iovcount - returned count of io vectors we fill
688 * @log - vhost log
689 * @log_num - log offset
690 * @quota - headcount quota, 1 for big buffer
691 * returns number of buffer heads allocated, negative on error
692 */
693static int get_rx_bufs(struct vhost_virtqueue *vq,
694 struct vring_used_elem *heads,
695 int datalen,
696 unsigned *iovcount,
697 struct vhost_log *log,
698 unsigned *log_num,
699 unsigned int quota)
700{
701 unsigned int out, in;
702 int seg = 0;
703 int headcount = 0;
704 unsigned d;
705 int r, nlogs = 0;
706 /* len is always initialized before use since we are always called with
707 * datalen > 0.
708 */
709 u32 uninitialized_var(len);
710
711 while (datalen > 0 && headcount < quota) {
712 if (unlikely(seg >= UIO_MAXIOV)) {
713 r = -ENOBUFS;
714 goto err;
715 }
716 r = vhost_get_vq_desc(vq, vq->iov + seg,
717 ARRAY_SIZE(vq->iov) - seg, &out,
718 &in, log, log_num);
719 if (unlikely(r < 0))
720 goto err;
721
722 d = r;
723 if (d == vq->num) {
724 r = 0;
725 goto err;
726 }
727 if (unlikely(out || in <= 0)) {
728 vq_err(vq, "unexpected descriptor format for RX: "
729 "out %d, in %d\n", out, in);
730 r = -EINVAL;
731 goto err;
732 }
733 if (unlikely(log)) {
734 nlogs += *log_num;
735 log += *log_num;
736 }
737 heads[headcount].id = cpu_to_vhost32(vq, d);
738 len = iov_length(vq->iov + seg, in);
739 heads[headcount].len = cpu_to_vhost32(vq, len);
740 datalen -= len;
741 ++headcount;
742 seg += in;
743 }
744 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
745 *iovcount = seg;
746 if (unlikely(log))
747 *log_num = nlogs;
748
749 /* Detect overrun */
750 if (unlikely(datalen > 0)) {
751 r = UIO_MAXIOV + 1;
752 goto err;
753 }
754 return headcount;
755err:
756 vhost_discard_vq_desc(vq, headcount);
757 return r;
758}
759
760/* Expects to be always run from workqueue - which acts as
761 * read-size critical section for our kind of RCU. */
762static void handle_rx(struct vhost_net *net)
763{
764 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
765 struct vhost_virtqueue *vq = &nvq->vq;
766 unsigned uninitialized_var(in), log;
767 struct vhost_log *vq_log;
768 struct msghdr msg = {
769 .msg_name = NULL,
770 .msg_namelen = 0,
771 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
772 .msg_controllen = 0,
773 .msg_flags = MSG_DONTWAIT,
774 };
775 struct virtio_net_hdr hdr = {
776 .flags = 0,
777 .gso_type = VIRTIO_NET_HDR_GSO_NONE
778 };
779 size_t total_len = 0;
780 int err, mergeable;
781 s16 headcount;
782 size_t vhost_hlen, sock_hlen;
783 size_t vhost_len, sock_len;
784 struct socket *sock;
785 struct iov_iter fixup;
786 __virtio16 num_buffers;
787
788 mutex_lock_nested(&vq->mutex, 0);
789 sock = vq->private_data;
790 if (!sock)
791 goto out;
792
793 if (!vq_iotlb_prefetch(vq))
794 goto out;
795
796 vhost_disable_notify(&net->dev, vq);
797 vhost_net_disable_vq(net, vq);
798
799 vhost_hlen = nvq->vhost_hlen;
800 sock_hlen = nvq->sock_hlen;
801
802 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
803 vq->log : NULL;
804 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
805
806 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
807 sock_len += sock_hlen;
808 vhost_len = sock_len + vhost_hlen;
809 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
810 vhost_len, &in, vq_log, &log,
811 likely(mergeable) ? UIO_MAXIOV : 1);
812 /* On error, stop handling until the next kick. */
813 if (unlikely(headcount < 0))
814 goto out;
815 /* OK, now we need to know about added descriptors. */
816 if (!headcount) {
817 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
818 /* They have slipped one in as we were
819 * doing that: check again. */
820 vhost_disable_notify(&net->dev, vq);
821 continue;
822 }
823 /* Nothing new? Wait for eventfd to tell us
824 * they refilled. */
825 goto out;
826 }
827 if (nvq->rx_ring)
828 msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
829 /* On overrun, truncate and discard */
830 if (unlikely(headcount > UIO_MAXIOV)) {
831 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
832 err = sock->ops->recvmsg(sock, &msg,
833 1, MSG_DONTWAIT | MSG_TRUNC);
834 pr_debug("Discarded rx packet: len %zd\n", sock_len);
835 continue;
836 }
837 /* We don't need to be notified again. */
838 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
839 fixup = msg.msg_iter;
840 if (unlikely((vhost_hlen))) {
841 /* We will supply the header ourselves
842 * TODO: support TSO.
843 */
844 iov_iter_advance(&msg.msg_iter, vhost_hlen);
845 }
846 err = sock->ops->recvmsg(sock, &msg,
847 sock_len, MSG_DONTWAIT | MSG_TRUNC);
848 /* Userspace might have consumed the packet meanwhile:
849 * it's not supposed to do this usually, but might be hard
850 * to prevent. Discard data we got (if any) and keep going. */
851 if (unlikely(err != sock_len)) {
852 pr_debug("Discarded rx packet: "
853 " len %d, expected %zd\n", err, sock_len);
854 vhost_discard_vq_desc(vq, headcount);
855 continue;
856 }
857 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
858 if (unlikely(vhost_hlen)) {
859 if (copy_to_iter(&hdr, sizeof(hdr),
860 &fixup) != sizeof(hdr)) {
861 vq_err(vq, "Unable to write vnet_hdr "
862 "at addr %p\n", vq->iov->iov_base);
863 goto out;
864 }
865 } else {
866 /* Header came from socket; we'll need to patch
867 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
868 */
869 iov_iter_advance(&fixup, sizeof(hdr));
870 }
871 /* TODO: Should check and handle checksum. */
872
873 num_buffers = cpu_to_vhost16(vq, headcount);
874 if (likely(mergeable) &&
875 copy_to_iter(&num_buffers, sizeof num_buffers,
876 &fixup) != sizeof num_buffers) {
877 vq_err(vq, "Failed num_buffers write");
878 vhost_discard_vq_desc(vq, headcount);
879 goto out;
880 }
881 nvq->done_idx += headcount;
882 if (nvq->done_idx > VHOST_RX_BATCH)
883 vhost_rx_signal_used(nvq);
884 if (unlikely(vq_log))
885 vhost_log_write(vq, vq_log, log, vhost_len);
886 total_len += vhost_len;
887 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
888 vhost_poll_queue(&vq->poll);
889 goto out;
890 }
891 }
892 vhost_net_enable_vq(net, vq);
893out:
894 vhost_rx_signal_used(nvq);
895 mutex_unlock(&vq->mutex);
896}
897
898static void handle_tx_kick(struct vhost_work *work)
899{
900 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
901 poll.work);
902 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
903
904 handle_tx(net);
905}
906
907static void handle_rx_kick(struct vhost_work *work)
908{
909 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
910 poll.work);
911 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
912
913 handle_rx(net);
914}
915
916static void handle_tx_net(struct vhost_work *work)
917{
918 struct vhost_net *net = container_of(work, struct vhost_net,
919 poll[VHOST_NET_VQ_TX].work);
920 handle_tx(net);
921}
922
923static void handle_rx_net(struct vhost_work *work)
924{
925 struct vhost_net *net = container_of(work, struct vhost_net,
926 poll[VHOST_NET_VQ_RX].work);
927 handle_rx(net);
928}
929
930static int vhost_net_open(struct inode *inode, struct file *f)
931{
932 struct vhost_net *n;
933 struct vhost_dev *dev;
934 struct vhost_virtqueue **vqs;
935 void **queue;
936 int i;
937
938 n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
939 if (!n)
940 return -ENOMEM;
941 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
942 if (!vqs) {
943 kvfree(n);
944 return -ENOMEM;
945 }
946
947 queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
948 GFP_KERNEL);
949 if (!queue) {
950 kfree(vqs);
951 kvfree(n);
952 return -ENOMEM;
953 }
954 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
955
956 dev = &n->dev;
957 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
958 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
959 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
960 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
961 for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
962 n->vqs[i].ubufs = NULL;
963 n->vqs[i].ubuf_info = NULL;
964 n->vqs[i].upend_idx = 0;
965 n->vqs[i].done_idx = 0;
966 n->vqs[i].vhost_hlen = 0;
967 n->vqs[i].sock_hlen = 0;
968 n->vqs[i].rx_ring = NULL;
969 vhost_net_buf_init(&n->vqs[i].rxq);
970 }
971 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
972
973 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
974 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
975
976 f->private_data = n;
977
978 return 0;
979}
980
981static struct socket *vhost_net_stop_vq(struct vhost_net *n,
982 struct vhost_virtqueue *vq)
983{
984 struct socket *sock;
985 struct vhost_net_virtqueue *nvq =
986 container_of(vq, struct vhost_net_virtqueue, vq);
987
988 mutex_lock(&vq->mutex);
989 sock = vq->private_data;
990 vhost_net_disable_vq(n, vq);
991 vq->private_data = NULL;
992 vhost_net_buf_unproduce(nvq);
993 nvq->rx_ring = NULL;
994 mutex_unlock(&vq->mutex);
995 return sock;
996}
997
998static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
999 struct socket **rx_sock)
1000{
1001 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1002 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1003}
1004
1005static void vhost_net_flush_vq(struct vhost_net *n, int index)
1006{
1007 vhost_poll_flush(n->poll + index);
1008 vhost_poll_flush(&n->vqs[index].vq.poll);
1009}
1010
1011static void vhost_net_flush(struct vhost_net *n)
1012{
1013 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
1014 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
1015 if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1016 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1017 n->tx_flush = true;
1018 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1019 /* Wait for all lower device DMAs done. */
1020 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1021 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1022 n->tx_flush = false;
1023 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1024 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1025 }
1026}
1027
1028static int vhost_net_release(struct inode *inode, struct file *f)
1029{
1030 struct vhost_net *n = f->private_data;
1031 struct socket *tx_sock;
1032 struct socket *rx_sock;
1033
1034 vhost_net_stop(n, &tx_sock, &rx_sock);
1035 vhost_net_flush(n);
1036 vhost_dev_stop(&n->dev);
1037 vhost_dev_cleanup(&n->dev);
1038 vhost_net_vq_reset(n);
1039 if (tx_sock)
1040 sockfd_put(tx_sock);
1041 if (rx_sock)
1042 sockfd_put(rx_sock);
1043 /* Make sure no callbacks are outstanding */
1044 synchronize_rcu_bh();
1045 /* We do an extra flush before freeing memory,
1046 * since jobs can re-queue themselves. */
1047 vhost_net_flush(n);
1048 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1049 kfree(n->dev.vqs);
1050 kvfree(n);
1051 return 0;
1052}
1053
1054static struct socket *get_raw_socket(int fd)
1055{
1056 struct {
1057 struct sockaddr_ll sa;
1058 char buf[MAX_ADDR_LEN];
1059 } uaddr;
1060 int r;
1061 struct socket *sock = sockfd_lookup(fd, &r);
1062
1063 if (!sock)
1064 return ERR_PTR(-ENOTSOCK);
1065
1066 /* Parameter checking */
1067 if (sock->sk->sk_type != SOCK_RAW) {
1068 r = -ESOCKTNOSUPPORT;
1069 goto err;
1070 }
1071
1072 r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
1073 if (r < 0)
1074 goto err;
1075
1076 if (uaddr.sa.sll_family != AF_PACKET) {
1077 r = -EPFNOSUPPORT;
1078 goto err;
1079 }
1080 return sock;
1081err:
1082 sockfd_put(sock);
1083 return ERR_PTR(r);
1084}
1085
1086static struct ptr_ring *get_tap_ptr_ring(int fd)
1087{
1088 struct ptr_ring *ring;
1089 struct file *file = fget(fd);
1090
1091 if (!file)
1092 return NULL;
1093 ring = tun_get_tx_ring(file);
1094 if (!IS_ERR(ring))
1095 goto out;
1096 ring = tap_get_ptr_ring(file);
1097 if (!IS_ERR(ring))
1098 goto out;
1099 ring = NULL;
1100out:
1101 fput(file);
1102 return ring;
1103}
1104
1105static struct socket *get_tap_socket(int fd)
1106{
1107 struct file *file = fget(fd);
1108 struct socket *sock;
1109
1110 if (!file)
1111 return ERR_PTR(-EBADF);
1112 sock = tun_get_socket(file);
1113 if (!IS_ERR(sock))
1114 return sock;
1115 sock = tap_get_socket(file);
1116 if (IS_ERR(sock))
1117 fput(file);
1118 return sock;
1119}
1120
1121static struct socket *get_socket(int fd)
1122{
1123 struct socket *sock;
1124
1125 /* special case to disable backend */
1126 if (fd == -1)
1127 return NULL;
1128 sock = get_raw_socket(fd);
1129 if (!IS_ERR(sock))
1130 return sock;
1131 sock = get_tap_socket(fd);
1132 if (!IS_ERR(sock))
1133 return sock;
1134 return ERR_PTR(-ENOTSOCK);
1135}
1136
1137static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1138{
1139 struct socket *sock, *oldsock;
1140 struct vhost_virtqueue *vq;
1141 struct vhost_net_virtqueue *nvq;
1142 struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1143 int r;
1144
1145 mutex_lock(&n->dev.mutex);
1146 r = vhost_dev_check_owner(&n->dev);
1147 if (r)
1148 goto err;
1149
1150 if (index >= VHOST_NET_VQ_MAX) {
1151 r = -ENOBUFS;
1152 goto err;
1153 }
1154 vq = &n->vqs[index].vq;
1155 nvq = &n->vqs[index];
1156 mutex_lock(&vq->mutex);
1157
1158 /* Verify that ring has been setup correctly. */
1159 if (!vhost_vq_access_ok(vq)) {
1160 r = -EFAULT;
1161 goto err_vq;
1162 }
1163 sock = get_socket(fd);
1164 if (IS_ERR(sock)) {
1165 r = PTR_ERR(sock);
1166 goto err_vq;
1167 }
1168
1169 /* start polling new socket */
1170 oldsock = vq->private_data;
1171 if (sock != oldsock) {
1172 ubufs = vhost_net_ubuf_alloc(vq,
1173 sock && vhost_sock_zcopy(sock));
1174 if (IS_ERR(ubufs)) {
1175 r = PTR_ERR(ubufs);
1176 goto err_ubufs;
1177 }
1178
1179 vhost_net_disable_vq(n, vq);
1180 vq->private_data = sock;
1181 vhost_net_buf_unproduce(nvq);
1182 r = vhost_vq_init_access(vq);
1183 if (r)
1184 goto err_used;
1185 r = vhost_net_enable_vq(n, vq);
1186 if (r)
1187 goto err_used;
1188 if (index == VHOST_NET_VQ_RX)
1189 nvq->rx_ring = get_tap_ptr_ring(fd);
1190
1191 oldubufs = nvq->ubufs;
1192 nvq->ubufs = ubufs;
1193
1194 n->tx_packets = 0;
1195 n->tx_zcopy_err = 0;
1196 n->tx_flush = false;
1197 }
1198
1199 mutex_unlock(&vq->mutex);
1200
1201 if (oldubufs) {
1202 vhost_net_ubuf_put_wait_and_free(oldubufs);
1203 mutex_lock(&vq->mutex);
1204 vhost_zerocopy_signal_used(n, vq);
1205 mutex_unlock(&vq->mutex);
1206 }
1207
1208 if (oldsock) {
1209 vhost_net_flush_vq(n, index);
1210 sockfd_put(oldsock);
1211 }
1212
1213 mutex_unlock(&n->dev.mutex);
1214 return 0;
1215
1216err_used:
1217 vq->private_data = oldsock;
1218 vhost_net_enable_vq(n, vq);
1219 if (ubufs)
1220 vhost_net_ubuf_put_wait_and_free(ubufs);
1221err_ubufs:
1222 sockfd_put(sock);
1223err_vq:
1224 mutex_unlock(&vq->mutex);
1225err:
1226 mutex_unlock(&n->dev.mutex);
1227 return r;
1228}
1229
1230static long vhost_net_reset_owner(struct vhost_net *n)
1231{
1232 struct socket *tx_sock = NULL;
1233 struct socket *rx_sock = NULL;
1234 long err;
1235 struct vhost_umem *umem;
1236
1237 mutex_lock(&n->dev.mutex);
1238 err = vhost_dev_check_owner(&n->dev);
1239 if (err)
1240 goto done;
1241 umem = vhost_dev_reset_owner_prepare();
1242 if (!umem) {
1243 err = -ENOMEM;
1244 goto done;
1245 }
1246 vhost_net_stop(n, &tx_sock, &rx_sock);
1247 vhost_net_flush(n);
1248 vhost_dev_stop(&n->dev);
1249 vhost_dev_reset_owner(&n->dev, umem);
1250 vhost_net_vq_reset(n);
1251done:
1252 mutex_unlock(&n->dev.mutex);
1253 if (tx_sock)
1254 sockfd_put(tx_sock);
1255 if (rx_sock)
1256 sockfd_put(rx_sock);
1257 return err;
1258}
1259
1260static int vhost_net_set_features(struct vhost_net *n, u64 features)
1261{
1262 size_t vhost_hlen, sock_hlen, hdr_len;
1263 int i;
1264
1265 hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1266 (1ULL << VIRTIO_F_VERSION_1))) ?
1267 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1268 sizeof(struct virtio_net_hdr);
1269 if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1270 /* vhost provides vnet_hdr */
1271 vhost_hlen = hdr_len;
1272 sock_hlen = 0;
1273 } else {
1274 /* socket provides vnet_hdr */
1275 vhost_hlen = 0;
1276 sock_hlen = hdr_len;
1277 }
1278 mutex_lock(&n->dev.mutex);
1279 if ((features & (1 << VHOST_F_LOG_ALL)) &&
1280 !vhost_log_access_ok(&n->dev))
1281 goto out_unlock;
1282
1283 if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
1284 if (vhost_init_device_iotlb(&n->dev, true))
1285 goto out_unlock;
1286 }
1287
1288 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1289 mutex_lock(&n->vqs[i].vq.mutex);
1290 n->vqs[i].vq.acked_features = features;
1291 n->vqs[i].vhost_hlen = vhost_hlen;
1292 n->vqs[i].sock_hlen = sock_hlen;
1293 mutex_unlock(&n->vqs[i].vq.mutex);
1294 }
1295 mutex_unlock(&n->dev.mutex);
1296 return 0;
1297
1298out_unlock:
1299 mutex_unlock(&n->dev.mutex);
1300 return -EFAULT;
1301}
1302
1303static long vhost_net_set_owner(struct vhost_net *n)
1304{
1305 int r;
1306
1307 mutex_lock(&n->dev.mutex);
1308 if (vhost_dev_has_owner(&n->dev)) {
1309 r = -EBUSY;
1310 goto out;
1311 }
1312 r = vhost_net_set_ubuf_info(n);
1313 if (r)
1314 goto out;
1315 r = vhost_dev_set_owner(&n->dev);
1316 if (r)
1317 vhost_net_clear_ubuf_info(n);
1318 vhost_net_flush(n);
1319out:
1320 mutex_unlock(&n->dev.mutex);
1321 return r;
1322}
1323
1324static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1325 unsigned long arg)
1326{
1327 struct vhost_net *n = f->private_data;
1328 void __user *argp = (void __user *)arg;
1329 u64 __user *featurep = argp;
1330 struct vhost_vring_file backend;
1331 u64 features;
1332 int r;
1333
1334 switch (ioctl) {
1335 case VHOST_NET_SET_BACKEND:
1336 if (copy_from_user(&backend, argp, sizeof backend))
1337 return -EFAULT;
1338 return vhost_net_set_backend(n, backend.index, backend.fd);
1339 case VHOST_GET_FEATURES:
1340 features = VHOST_NET_FEATURES;
1341 if (copy_to_user(featurep, &features, sizeof features))
1342 return -EFAULT;
1343 return 0;
1344 case VHOST_SET_FEATURES:
1345 if (copy_from_user(&features, featurep, sizeof features))
1346 return -EFAULT;
1347 if (features & ~VHOST_NET_FEATURES)
1348 return -EOPNOTSUPP;
1349 return vhost_net_set_features(n, features);
1350 case VHOST_RESET_OWNER:
1351 return vhost_net_reset_owner(n);
1352 case VHOST_SET_OWNER:
1353 return vhost_net_set_owner(n);
1354 default:
1355 mutex_lock(&n->dev.mutex);
1356 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1357 if (r == -ENOIOCTLCMD)
1358 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1359 else
1360 vhost_net_flush(n);
1361 mutex_unlock(&n->dev.mutex);
1362 return r;
1363 }
1364}
1365
1366#ifdef CONFIG_COMPAT
1367static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1368 unsigned long arg)
1369{
1370 return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1371}
1372#endif
1373
1374static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1375{
1376 struct file *file = iocb->ki_filp;
1377 struct vhost_net *n = file->private_data;
1378 struct vhost_dev *dev = &n->dev;
1379 int noblock = file->f_flags & O_NONBLOCK;
1380
1381 return vhost_chr_read_iter(dev, to, noblock);
1382}
1383
1384static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1385 struct iov_iter *from)
1386{
1387 struct file *file = iocb->ki_filp;
1388 struct vhost_net *n = file->private_data;
1389 struct vhost_dev *dev = &n->dev;
1390
1391 return vhost_chr_write_iter(dev, from);
1392}
1393
1394static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1395{
1396 struct vhost_net *n = file->private_data;
1397 struct vhost_dev *dev = &n->dev;
1398
1399 return vhost_chr_poll(file, dev, wait);
1400}
1401
1402static const struct file_operations vhost_net_fops = {
1403 .owner = THIS_MODULE,
1404 .release = vhost_net_release,
1405 .read_iter = vhost_net_chr_read_iter,
1406 .write_iter = vhost_net_chr_write_iter,
1407 .poll = vhost_net_chr_poll,
1408 .unlocked_ioctl = vhost_net_ioctl,
1409#ifdef CONFIG_COMPAT
1410 .compat_ioctl = vhost_net_compat_ioctl,
1411#endif
1412 .open = vhost_net_open,
1413 .llseek = noop_llseek,
1414};
1415
1416static struct miscdevice vhost_net_misc = {
1417 .minor = VHOST_NET_MINOR,
1418 .name = "vhost-net",
1419 .fops = &vhost_net_fops,
1420};
1421
1422static int vhost_net_init(void)
1423{
1424 if (experimental_zcopytx)
1425 vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1426 return misc_register(&vhost_net_misc);
1427}
1428module_init(vhost_net_init);
1429
1430static void vhost_net_exit(void)
1431{
1432 misc_deregister(&vhost_net_misc);
1433}
1434module_exit(vhost_net_exit);
1435
1436MODULE_VERSION("0.0.1");
1437MODULE_LICENSE("GPL v2");
1438MODULE_AUTHOR("Michael S. Tsirkin");
1439MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1440MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1441MODULE_ALIAS("devname:vhost-net");