Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/mm.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include <linux/netdevice.h>
19#include <linux/if_ether.h>
20#include <linux/vmalloc.h>
21#include <linux/rtnetlink.h>
22#include <linux/prefetch.h>
23#include <linux/filter.h>
24
25#include <asm/sync_bitops.h>
26#include <asm/mshyperv.h>
27
28#include "hyperv_net.h"
29#include "netvsc_trace.h"
30
31/*
32 * Switch the data path from the synthetic interface to the VF
33 * interface.
34 */
35int netvsc_switch_datapath(struct net_device *ndev, bool vf)
36{
37 struct net_device_context *net_device_ctx = netdev_priv(ndev);
38 struct hv_device *dev = net_device_ctx->device_ctx;
39 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
40 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
41 int ret, retry = 0;
42
43 /* Block sending traffic to VF if it's about to be gone */
44 if (!vf)
45 net_device_ctx->data_path_is_vf = vf;
46
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56again:
57 trace_nvsp_send(ndev, init_pkt);
58
59 ret = vmbus_sendpacket(dev->channel, init_pkt,
60 sizeof(struct nvsp_message),
61 (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
62 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
63
64 /* If failed to switch to/from VF, let data_path_is_vf stay false,
65 * so we use synthetic path to send data.
66 */
67 if (ret) {
68 if (ret != -EAGAIN) {
69 netdev_err(ndev,
70 "Unable to send sw datapath msg, err: %d\n",
71 ret);
72 return ret;
73 }
74
75 if (retry++ < RETRY_MAX) {
76 usleep_range(RETRY_US_LO, RETRY_US_HI);
77 goto again;
78 } else {
79 netdev_err(
80 ndev,
81 "Retry failed to send sw datapath msg, err: %d\n",
82 ret);
83 return ret;
84 }
85 }
86
87 wait_for_completion(&nv_dev->channel_init_wait);
88 net_device_ctx->data_path_is_vf = vf;
89
90 return 0;
91}
92
93/* Worker to setup sub channels on initial setup
94 * Initial hotplug event occurs in softirq context
95 * and can't wait for channels.
96 */
97static void netvsc_subchan_work(struct work_struct *w)
98{
99 struct netvsc_device *nvdev =
100 container_of(w, struct netvsc_device, subchan_work);
101 struct rndis_device *rdev;
102 int i, ret;
103
104 /* Avoid deadlock with device removal already under RTNL */
105 if (!rtnl_trylock()) {
106 schedule_work(w);
107 return;
108 }
109
110 rdev = nvdev->extension;
111 if (rdev) {
112 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
113 if (ret == 0) {
114 netif_device_attach(rdev->ndev);
115 } else {
116 /* fallback to only primary channel */
117 for (i = 1; i < nvdev->num_chn; i++)
118 netif_napi_del(&nvdev->chan_table[i].napi);
119
120 nvdev->max_chn = 1;
121 nvdev->num_chn = 1;
122 }
123 }
124
125 rtnl_unlock();
126}
127
128static struct netvsc_device *alloc_net_device(void)
129{
130 struct netvsc_device *net_device;
131
132 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
133 if (!net_device)
134 return NULL;
135
136 init_waitqueue_head(&net_device->wait_drain);
137 net_device->destroy = false;
138 net_device->tx_disable = true;
139
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
142
143 init_completion(&net_device->channel_init_wait);
144 init_waitqueue_head(&net_device->subchan_open);
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
146
147 return net_device;
148}
149
150static void free_netvsc_device(struct rcu_head *head)
151{
152 struct netvsc_device *nvdev
153 = container_of(head, struct netvsc_device, rcu);
154 int i;
155
156 kfree(nvdev->extension);
157
158 if (!nvdev->recv_buf_gpadl_handle.decrypted)
159 vfree(nvdev->recv_buf);
160 if (!nvdev->send_buf_gpadl_handle.decrypted)
161 vfree(nvdev->send_buf);
162 bitmap_free(nvdev->send_section_map);
163
164 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
165 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
166 kfree(nvdev->chan_table[i].recv_buf);
167 vfree(nvdev->chan_table[i].mrc.slots);
168 }
169
170 kfree(nvdev);
171}
172
173static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
174{
175 call_rcu(&nvdev->rcu, free_netvsc_device);
176}
177
178static void netvsc_revoke_recv_buf(struct hv_device *device,
179 struct netvsc_device *net_device,
180 struct net_device *ndev)
181{
182 struct nvsp_message *revoke_packet;
183 int ret;
184
185 /*
186 * If we got a section count, it means we received a
187 * SendReceiveBufferComplete msg (ie sent
188 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
189 * to send a revoke msg here
190 */
191 if (net_device->recv_section_cnt) {
192 /* Send the revoke receive buffer */
193 revoke_packet = &net_device->revoke_packet;
194 memset(revoke_packet, 0, sizeof(struct nvsp_message));
195
196 revoke_packet->hdr.msg_type =
197 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
198 revoke_packet->msg.v1_msg.
199 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
200
201 trace_nvsp_send(ndev, revoke_packet);
202
203 ret = vmbus_sendpacket(device->channel,
204 revoke_packet,
205 sizeof(struct nvsp_message),
206 VMBUS_RQST_ID_NO_RESPONSE,
207 VM_PKT_DATA_INBAND, 0);
208 /* If the failure is because the channel is rescinded;
209 * ignore the failure since we cannot send on a rescinded
210 * channel. This would allow us to properly cleanup
211 * even when the channel is rescinded.
212 */
213 if (device->channel->rescind)
214 ret = 0;
215 /*
216 * If we failed here, we might as well return and
217 * have a leak rather than continue and a bugchk
218 */
219 if (ret != 0) {
220 netdev_err(ndev, "unable to send "
221 "revoke receive buffer to netvsp\n");
222 return;
223 }
224 net_device->recv_section_cnt = 0;
225 }
226}
227
228static void netvsc_revoke_send_buf(struct hv_device *device,
229 struct netvsc_device *net_device,
230 struct net_device *ndev)
231{
232 struct nvsp_message *revoke_packet;
233 int ret;
234
235 /* Deal with the send buffer we may have setup.
236 * If we got a send section size, it means we received a
237 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
238 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
239 * to send a revoke msg here
240 */
241 if (net_device->send_section_cnt) {
242 /* Send the revoke receive buffer */
243 revoke_packet = &net_device->revoke_packet;
244 memset(revoke_packet, 0, sizeof(struct nvsp_message));
245
246 revoke_packet->hdr.msg_type =
247 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
248 revoke_packet->msg.v1_msg.revoke_send_buf.id =
249 NETVSC_SEND_BUFFER_ID;
250
251 trace_nvsp_send(ndev, revoke_packet);
252
253 ret = vmbus_sendpacket(device->channel,
254 revoke_packet,
255 sizeof(struct nvsp_message),
256 VMBUS_RQST_ID_NO_RESPONSE,
257 VM_PKT_DATA_INBAND, 0);
258
259 /* If the failure is because the channel is rescinded;
260 * ignore the failure since we cannot send on a rescinded
261 * channel. This would allow us to properly cleanup
262 * even when the channel is rescinded.
263 */
264 if (device->channel->rescind)
265 ret = 0;
266
267 /* If we failed here, we might as well return and
268 * have a leak rather than continue and a bugchk
269 */
270 if (ret != 0) {
271 netdev_err(ndev, "unable to send "
272 "revoke send buffer to netvsp\n");
273 return;
274 }
275 net_device->send_section_cnt = 0;
276 }
277}
278
279static void netvsc_teardown_recv_gpadl(struct hv_device *device,
280 struct netvsc_device *net_device,
281 struct net_device *ndev)
282{
283 int ret;
284
285 if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
286 ret = vmbus_teardown_gpadl(device->channel,
287 &net_device->recv_buf_gpadl_handle);
288
289 /* If we failed here, we might as well return and have a leak
290 * rather than continue and a bugchk
291 */
292 if (ret != 0) {
293 netdev_err(ndev,
294 "unable to teardown receive buffer's gpadl\n");
295 return;
296 }
297 }
298}
299
300static void netvsc_teardown_send_gpadl(struct hv_device *device,
301 struct netvsc_device *net_device,
302 struct net_device *ndev)
303{
304 int ret;
305
306 if (net_device->send_buf_gpadl_handle.gpadl_handle) {
307 ret = vmbus_teardown_gpadl(device->channel,
308 &net_device->send_buf_gpadl_handle);
309
310 /* If we failed here, we might as well return and have a leak
311 * rather than continue and a bugchk
312 */
313 if (ret != 0) {
314 netdev_err(ndev,
315 "unable to teardown send buffer's gpadl\n");
316 return;
317 }
318 }
319}
320
321int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
322{
323 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
324 int node = cpu_to_node(nvchan->channel->target_cpu);
325 size_t size;
326
327 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
328 nvchan->mrc.slots = vzalloc_node(size, node);
329 if (!nvchan->mrc.slots)
330 nvchan->mrc.slots = vzalloc(size);
331
332 return nvchan->mrc.slots ? 0 : -ENOMEM;
333}
334
335static int netvsc_init_buf(struct hv_device *device,
336 struct netvsc_device *net_device,
337 const struct netvsc_device_info *device_info)
338{
339 struct nvsp_1_message_send_receive_buffer_complete *resp;
340 struct net_device *ndev = hv_get_drvdata(device);
341 struct nvsp_message *init_packet;
342 unsigned int buf_size;
343 int i, ret = 0;
344
345 /* Get receive buffer area. */
346 buf_size = device_info->recv_sections * device_info->recv_section_size;
347 buf_size = roundup(buf_size, PAGE_SIZE);
348
349 /* Legacy hosts only allow smaller receive buffer */
350 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
351 buf_size = min_t(unsigned int, buf_size,
352 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
353
354 net_device->recv_buf = vzalloc(buf_size);
355 if (!net_device->recv_buf) {
356 netdev_err(ndev,
357 "unable to allocate receive buffer of size %u\n",
358 buf_size);
359 ret = -ENOMEM;
360 goto cleanup;
361 }
362
363 net_device->recv_buf_size = buf_size;
364
365 /*
366 * Establish the gpadl handle for this buffer on this
367 * channel. Note: This call uses the vmbus connection rather
368 * than the channel to establish the gpadl handle.
369 */
370 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
371 buf_size,
372 &net_device->recv_buf_gpadl_handle);
373 if (ret != 0) {
374 netdev_err(ndev,
375 "unable to establish receive buffer's gpadl\n");
376 goto cleanup;
377 }
378
379 /* Notify the NetVsp of the gpadl handle */
380 init_packet = &net_device->channel_init_pkt;
381 memset(init_packet, 0, sizeof(struct nvsp_message));
382 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
383 init_packet->msg.v1_msg.send_recv_buf.
384 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
385 init_packet->msg.v1_msg.
386 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
387
388 trace_nvsp_send(ndev, init_packet);
389
390 /* Send the gpadl notification request */
391 ret = vmbus_sendpacket(device->channel, init_packet,
392 sizeof(struct nvsp_message),
393 (unsigned long)init_packet,
394 VM_PKT_DATA_INBAND,
395 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
396 if (ret != 0) {
397 netdev_err(ndev,
398 "unable to send receive buffer's gpadl to netvsp\n");
399 goto cleanup;
400 }
401
402 wait_for_completion(&net_device->channel_init_wait);
403
404 /* Check the response */
405 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
406 if (resp->status != NVSP_STAT_SUCCESS) {
407 netdev_err(ndev,
408 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
409 resp->status);
410 ret = -EINVAL;
411 goto cleanup;
412 }
413
414 /* Parse the response */
415 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
416 resp->num_sections, resp->sections[0].sub_alloc_size,
417 resp->sections[0].num_sub_allocs);
418
419 /* There should only be one section for the entire receive buffer */
420 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
421 ret = -EINVAL;
422 goto cleanup;
423 }
424
425 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
426 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
427
428 /* Ensure buffer will not overflow */
429 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
430 (u64)net_device->recv_section_cnt > (u64)buf_size) {
431 netdev_err(ndev, "invalid recv_section_size %u\n",
432 net_device->recv_section_size);
433 ret = -EINVAL;
434 goto cleanup;
435 }
436
437 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
438 struct netvsc_channel *nvchan = &net_device->chan_table[i];
439
440 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
441 if (nvchan->recv_buf == NULL) {
442 ret = -ENOMEM;
443 goto cleanup;
444 }
445 }
446
447 /* Setup receive completion ring.
448 * Add 1 to the recv_section_cnt because at least one entry in a
449 * ring buffer has to be empty.
450 */
451 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
452 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
453 if (ret)
454 goto cleanup;
455
456 /* Now setup the send buffer. */
457 buf_size = device_info->send_sections * device_info->send_section_size;
458 buf_size = round_up(buf_size, PAGE_SIZE);
459
460 net_device->send_buf = vzalloc(buf_size);
461 if (!net_device->send_buf) {
462 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
463 buf_size);
464 ret = -ENOMEM;
465 goto cleanup;
466 }
467 net_device->send_buf_size = buf_size;
468
469 /* Establish the gpadl handle for this buffer on this
470 * channel. Note: This call uses the vmbus connection rather
471 * than the channel to establish the gpadl handle.
472 */
473 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
474 buf_size,
475 &net_device->send_buf_gpadl_handle);
476 if (ret != 0) {
477 netdev_err(ndev,
478 "unable to establish send buffer's gpadl\n");
479 goto cleanup;
480 }
481
482 /* Notify the NetVsp of the gpadl handle */
483 init_packet = &net_device->channel_init_pkt;
484 memset(init_packet, 0, sizeof(struct nvsp_message));
485 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
486 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
487 net_device->send_buf_gpadl_handle.gpadl_handle;
488 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
489
490 trace_nvsp_send(ndev, init_packet);
491
492 /* Send the gpadl notification request */
493 ret = vmbus_sendpacket(device->channel, init_packet,
494 sizeof(struct nvsp_message),
495 (unsigned long)init_packet,
496 VM_PKT_DATA_INBAND,
497 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
498 if (ret != 0) {
499 netdev_err(ndev,
500 "unable to send send buffer's gpadl to netvsp\n");
501 goto cleanup;
502 }
503
504 wait_for_completion(&net_device->channel_init_wait);
505
506 /* Check the response */
507 if (init_packet->msg.v1_msg.
508 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
509 netdev_err(ndev, "Unable to complete send buffer "
510 "initialization with NetVsp - status %d\n",
511 init_packet->msg.v1_msg.
512 send_send_buf_complete.status);
513 ret = -EINVAL;
514 goto cleanup;
515 }
516
517 /* Parse the response */
518 net_device->send_section_size = init_packet->msg.
519 v1_msg.send_send_buf_complete.section_size;
520 if (net_device->send_section_size < NETVSC_MTU_MIN) {
521 netdev_err(ndev, "invalid send_section_size %u\n",
522 net_device->send_section_size);
523 ret = -EINVAL;
524 goto cleanup;
525 }
526
527 /* Section count is simply the size divided by the section size. */
528 net_device->send_section_cnt = buf_size / net_device->send_section_size;
529
530 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
531 net_device->send_section_size, net_device->send_section_cnt);
532
533 /* Setup state for managing the send buffer. */
534 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
535 GFP_KERNEL);
536 if (!net_device->send_section_map) {
537 ret = -ENOMEM;
538 goto cleanup;
539 }
540
541 goto exit;
542
543cleanup:
544 netvsc_revoke_recv_buf(device, net_device, ndev);
545 netvsc_revoke_send_buf(device, net_device, ndev);
546 netvsc_teardown_recv_gpadl(device, net_device, ndev);
547 netvsc_teardown_send_gpadl(device, net_device, ndev);
548
549exit:
550 return ret;
551}
552
553/* Negotiate NVSP protocol version */
554static int negotiate_nvsp_ver(struct hv_device *device,
555 struct netvsc_device *net_device,
556 struct nvsp_message *init_packet,
557 u32 nvsp_ver)
558{
559 struct net_device *ndev = hv_get_drvdata(device);
560 int ret;
561
562 memset(init_packet, 0, sizeof(struct nvsp_message));
563 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
564 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
565 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
566 trace_nvsp_send(ndev, init_packet);
567
568 /* Send the init request */
569 ret = vmbus_sendpacket(device->channel, init_packet,
570 sizeof(struct nvsp_message),
571 (unsigned long)init_packet,
572 VM_PKT_DATA_INBAND,
573 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
574
575 if (ret != 0)
576 return ret;
577
578 wait_for_completion(&net_device->channel_init_wait);
579
580 if (init_packet->msg.init_msg.init_complete.status !=
581 NVSP_STAT_SUCCESS)
582 return -EINVAL;
583
584 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
585 return 0;
586
587 /* NVSPv2 or later: Send NDIS config */
588 memset(init_packet, 0, sizeof(struct nvsp_message));
589 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
590 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
591 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
592
593 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
594 if (hv_is_isolation_supported())
595 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
596 else
597 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
598
599 /* Teaming bit is needed to receive link speed updates */
600 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
601 }
602
603 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
604 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
605
606 trace_nvsp_send(ndev, init_packet);
607
608 ret = vmbus_sendpacket(device->channel, init_packet,
609 sizeof(struct nvsp_message),
610 VMBUS_RQST_ID_NO_RESPONSE,
611 VM_PKT_DATA_INBAND, 0);
612
613 return ret;
614}
615
616static int netvsc_connect_vsp(struct hv_device *device,
617 struct netvsc_device *net_device,
618 const struct netvsc_device_info *device_info)
619{
620 struct net_device *ndev = hv_get_drvdata(device);
621 static const u32 ver_list[] = {
622 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
623 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
624 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
625 };
626 struct nvsp_message *init_packet;
627 int ndis_version, i, ret;
628
629 init_packet = &net_device->channel_init_pkt;
630
631 /* Negotiate the latest NVSP protocol supported */
632 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
633 if (negotiate_nvsp_ver(device, net_device, init_packet,
634 ver_list[i]) == 0) {
635 net_device->nvsp_version = ver_list[i];
636 break;
637 }
638
639 if (i < 0) {
640 ret = -EPROTO;
641 goto cleanup;
642 }
643
644 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
645 netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
646 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
647 ret = -EPROTO;
648 goto cleanup;
649 }
650
651 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
652
653 /* Send the ndis version */
654 memset(init_packet, 0, sizeof(struct nvsp_message));
655
656 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
657 ndis_version = 0x00060001;
658 else
659 ndis_version = 0x0006001e;
660
661 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
662 init_packet->msg.v1_msg.
663 send_ndis_ver.ndis_major_ver =
664 (ndis_version & 0xFFFF0000) >> 16;
665 init_packet->msg.v1_msg.
666 send_ndis_ver.ndis_minor_ver =
667 ndis_version & 0xFFFF;
668
669 trace_nvsp_send(ndev, init_packet);
670
671 /* Send the init request */
672 ret = vmbus_sendpacket(device->channel, init_packet,
673 sizeof(struct nvsp_message),
674 VMBUS_RQST_ID_NO_RESPONSE,
675 VM_PKT_DATA_INBAND, 0);
676 if (ret != 0)
677 goto cleanup;
678
679
680 ret = netvsc_init_buf(device, net_device, device_info);
681
682cleanup:
683 return ret;
684}
685
686/*
687 * netvsc_device_remove - Callback when the root bus device is removed
688 */
689void netvsc_device_remove(struct hv_device *device)
690{
691 struct net_device *ndev = hv_get_drvdata(device);
692 struct net_device_context *net_device_ctx = netdev_priv(ndev);
693 struct netvsc_device *net_device
694 = rtnl_dereference(net_device_ctx->nvdev);
695 int i;
696
697 /*
698 * Revoke receive buffer. If host is pre-Win2016 then tear down
699 * receive buffer GPADL. Do the same for send buffer.
700 */
701 netvsc_revoke_recv_buf(device, net_device, ndev);
702 if (vmbus_proto_version < VERSION_WIN10)
703 netvsc_teardown_recv_gpadl(device, net_device, ndev);
704
705 netvsc_revoke_send_buf(device, net_device, ndev);
706 if (vmbus_proto_version < VERSION_WIN10)
707 netvsc_teardown_send_gpadl(device, net_device, ndev);
708
709 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
710
711 /* Disable NAPI and disassociate its context from the device. */
712 for (i = 0; i < net_device->num_chn; i++) {
713 /* See also vmbus_reset_channel_cb(). */
714 /* only disable enabled NAPI channel */
715 if (i < ndev->real_num_rx_queues) {
716 netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_TX,
717 NULL);
718 netif_queue_set_napi(ndev, i, NETDEV_QUEUE_TYPE_RX,
719 NULL);
720 napi_disable(&net_device->chan_table[i].napi);
721 }
722
723 netif_napi_del(&net_device->chan_table[i].napi);
724 }
725
726 /*
727 * At this point, no one should be accessing net_device
728 * except in here
729 */
730 netdev_dbg(ndev, "net device safe to remove\n");
731
732 /* Now, we can close the channel safely */
733 vmbus_close(device->channel);
734
735 /*
736 * If host is Win2016 or higher then we do the GPADL tear down
737 * here after VMBus is closed.
738 */
739 if (vmbus_proto_version >= VERSION_WIN10) {
740 netvsc_teardown_recv_gpadl(device, net_device, ndev);
741 netvsc_teardown_send_gpadl(device, net_device, ndev);
742 }
743
744 /* Release all resources */
745 free_netvsc_device_rcu(net_device);
746}
747
748#define RING_AVAIL_PERCENT_HIWATER 20
749#define RING_AVAIL_PERCENT_LOWATER 10
750
751static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
752 u32 index)
753{
754 sync_change_bit(index, net_device->send_section_map);
755}
756
757static void netvsc_send_tx_complete(struct net_device *ndev,
758 struct netvsc_device *net_device,
759 struct vmbus_channel *channel,
760 const struct vmpacket_descriptor *desc,
761 int budget)
762{
763 struct net_device_context *ndev_ctx = netdev_priv(ndev);
764 struct sk_buff *skb;
765 u16 q_idx = 0;
766 int queue_sends;
767 u64 cmd_rqst;
768
769 cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
770 if (cmd_rqst == VMBUS_RQST_ERROR) {
771 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
772 return;
773 }
774
775 skb = (struct sk_buff *)(unsigned long)cmd_rqst;
776
777 /* Notify the layer above us */
778 if (likely(skb)) {
779 struct hv_netvsc_packet *packet
780 = (struct hv_netvsc_packet *)skb->cb;
781 u32 send_index = packet->send_buf_index;
782 struct netvsc_stats_tx *tx_stats;
783
784 if (send_index != NETVSC_INVALID_INDEX)
785 netvsc_free_send_slot(net_device, send_index);
786 q_idx = packet->q_idx;
787
788 tx_stats = &net_device->chan_table[q_idx].tx_stats;
789
790 u64_stats_update_begin(&tx_stats->syncp);
791 tx_stats->packets += packet->total_packets;
792 tx_stats->bytes += packet->total_bytes;
793 u64_stats_update_end(&tx_stats->syncp);
794
795 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
796 napi_consume_skb(skb, budget);
797 }
798
799 queue_sends =
800 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
801
802 if (unlikely(net_device->destroy)) {
803 if (queue_sends == 0)
804 wake_up(&net_device->wait_drain);
805 } else {
806 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
807
808 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
809 (hv_get_avail_to_write_percent(&channel->outbound) >
810 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
811 netif_tx_wake_queue(txq);
812 ndev_ctx->eth_stats.wake_queue++;
813 }
814 }
815}
816
817static void netvsc_send_completion(struct net_device *ndev,
818 struct netvsc_device *net_device,
819 struct vmbus_channel *incoming_channel,
820 const struct vmpacket_descriptor *desc,
821 int budget)
822{
823 const struct nvsp_message *nvsp_packet;
824 u32 msglen = hv_pkt_datalen(desc);
825 struct nvsp_message *pkt_rqst;
826 u64 cmd_rqst;
827 u32 status;
828
829 /* First check if this is a VMBUS completion without data payload */
830 if (!msglen) {
831 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
832 desc->trans_id);
833 if (cmd_rqst == VMBUS_RQST_ERROR) {
834 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
835 return;
836 }
837
838 pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
839 switch (pkt_rqst->hdr.msg_type) {
840 case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
841 complete(&net_device->channel_init_wait);
842 break;
843
844 default:
845 netdev_err(ndev, "Unexpected VMBUS completion!!\n");
846 }
847 return;
848 }
849
850 /* Ensure packet is big enough to read header fields */
851 if (msglen < sizeof(struct nvsp_message_header)) {
852 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
853 return;
854 }
855
856 nvsp_packet = hv_pkt_data(desc);
857 switch (nvsp_packet->hdr.msg_type) {
858 case NVSP_MSG_TYPE_INIT_COMPLETE:
859 if (msglen < sizeof(struct nvsp_message_header) +
860 sizeof(struct nvsp_message_init_complete)) {
861 netdev_err(ndev, "nvsp_msg length too small: %u\n",
862 msglen);
863 return;
864 }
865 break;
866
867 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
868 if (msglen < sizeof(struct nvsp_message_header) +
869 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
870 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
871 msglen);
872 return;
873 }
874 break;
875
876 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
877 if (msglen < sizeof(struct nvsp_message_header) +
878 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
879 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
880 msglen);
881 return;
882 }
883 break;
884
885 case NVSP_MSG5_TYPE_SUBCHANNEL:
886 if (msglen < sizeof(struct nvsp_message_header) +
887 sizeof(struct nvsp_5_subchannel_complete)) {
888 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
889 msglen);
890 return;
891 }
892 break;
893
894 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
895 if (msglen < sizeof(struct nvsp_message_header) +
896 sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
897 if (net_ratelimit())
898 netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
899 msglen);
900 return;
901 }
902
903 /* If status indicates an error, output a message so we know
904 * there's a problem. But process the completion anyway so the
905 * resources are released.
906 */
907 status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
908 if (status != NVSP_STAT_SUCCESS && net_ratelimit())
909 netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
910 status);
911
912 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
913 desc, budget);
914 return;
915
916 default:
917 netdev_err(ndev,
918 "Unknown send completion type %d received!!\n",
919 nvsp_packet->hdr.msg_type);
920 return;
921 }
922
923 /* Copy the response back */
924 memcpy(&net_device->channel_init_pkt, nvsp_packet,
925 sizeof(struct nvsp_message));
926 complete(&net_device->channel_init_wait);
927}
928
929static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
930{
931 unsigned long *map_addr = net_device->send_section_map;
932 unsigned int i;
933
934 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
935 if (sync_test_and_set_bit(i, map_addr) == 0)
936 return i;
937 }
938
939 return NETVSC_INVALID_INDEX;
940}
941
942static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
943 unsigned int section_index,
944 u32 pend_size,
945 struct hv_netvsc_packet *packet,
946 struct rndis_message *rndis_msg,
947 struct hv_page_buffer *pb,
948 bool xmit_more)
949{
950 char *start = net_device->send_buf;
951 char *dest = start + (section_index * net_device->send_section_size)
952 + pend_size;
953 int i;
954 u32 padding = 0;
955 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
956 packet->page_buf_cnt;
957 u32 remain;
958
959 /* Add padding */
960 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
961 if (xmit_more && remain) {
962 padding = net_device->pkt_align - remain;
963 rndis_msg->msg_len += padding;
964 packet->total_data_buflen += padding;
965 }
966
967 for (i = 0; i < page_count; i++) {
968 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
969 u32 offset = pb[i].offset;
970 u32 len = pb[i].len;
971
972 memcpy(dest, (src + offset), len);
973 dest += len;
974 }
975
976 if (padding)
977 memset(dest, 0, padding);
978}
979
980void netvsc_dma_unmap(struct hv_device *hv_dev,
981 struct hv_netvsc_packet *packet)
982{
983 int i;
984
985 if (!hv_is_isolation_supported())
986 return;
987
988 if (!packet->dma_range)
989 return;
990
991 for (i = 0; i < packet->page_buf_cnt; i++)
992 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
993 packet->dma_range[i].mapping_size,
994 DMA_TO_DEVICE);
995
996 kfree(packet->dma_range);
997}
998
999/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
1000 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
1001 * VM.
1002 *
1003 * In isolation VM, netvsc send buffer has been marked visible to
1004 * host and so the data copied to send buffer doesn't need to use
1005 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
1006 * may not be copied to send buffer and so these pages need to be
1007 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1008 * that. The pfns in the struct hv_page_buffer need to be converted
1009 * to bounce buffer's pfn. The loop here is necessary because the
1010 * entries in the page buffer array are not necessarily full
1011 * pages of data. Each entry in the array has a separate offset and
1012 * len that may be non-zero, even for entries in the middle of the
1013 * array. And the entries are not physically contiguous. So each
1014 * entry must be individually mapped rather than as a contiguous unit.
1015 * So not use dma_map_sg() here.
1016 */
1017static int netvsc_dma_map(struct hv_device *hv_dev,
1018 struct hv_netvsc_packet *packet,
1019 struct hv_page_buffer *pb)
1020{
1021 u32 page_count = packet->page_buf_cnt;
1022 dma_addr_t dma;
1023 int i;
1024
1025 if (!hv_is_isolation_supported())
1026 return 0;
1027
1028 packet->dma_range = kcalloc(page_count,
1029 sizeof(*packet->dma_range),
1030 GFP_ATOMIC);
1031 if (!packet->dma_range)
1032 return -ENOMEM;
1033
1034 for (i = 0; i < page_count; i++) {
1035 char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1036 + pb[i].offset);
1037 u32 len = pb[i].len;
1038
1039 dma = dma_map_single(&hv_dev->device, src, len,
1040 DMA_TO_DEVICE);
1041 if (dma_mapping_error(&hv_dev->device, dma)) {
1042 kfree(packet->dma_range);
1043 return -ENOMEM;
1044 }
1045
1046 /* pb[].offset and pb[].len are not changed during dma mapping
1047 * and so not reassign.
1048 */
1049 packet->dma_range[i].dma = dma;
1050 packet->dma_range[i].mapping_size = len;
1051 pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1052 }
1053
1054 return 0;
1055}
1056
1057static inline int netvsc_send_pkt(
1058 struct hv_device *device,
1059 struct hv_netvsc_packet *packet,
1060 struct netvsc_device *net_device,
1061 struct hv_page_buffer *pb,
1062 struct sk_buff *skb)
1063{
1064 struct nvsp_message nvmsg;
1065 struct nvsp_1_message_send_rndis_packet *rpkt =
1066 &nvmsg.msg.v1_msg.send_rndis_pkt;
1067 struct netvsc_channel * const nvchan =
1068 &net_device->chan_table[packet->q_idx];
1069 struct vmbus_channel *out_channel = nvchan->channel;
1070 struct net_device *ndev = hv_get_drvdata(device);
1071 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1072 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1073 u64 req_id;
1074 int ret;
1075 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1076
1077 memset(&nvmsg, 0, sizeof(struct nvsp_message));
1078 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1079 if (skb)
1080 rpkt->channel_type = 0; /* 0 is RMC_DATA */
1081 else
1082 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
1083
1084 rpkt->send_buf_section_index = packet->send_buf_index;
1085 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1086 rpkt->send_buf_section_size = 0;
1087 else
1088 rpkt->send_buf_section_size = packet->total_data_buflen;
1089
1090 req_id = (ulong)skb;
1091
1092 if (out_channel->rescind)
1093 return -ENODEV;
1094
1095 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1096
1097 packet->dma_range = NULL;
1098 if (packet->page_buf_cnt) {
1099 if (packet->cp_partial)
1100 pb += packet->rmsg_pgcnt;
1101
1102 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1103 if (ret) {
1104 ret = -EAGAIN;
1105 goto exit;
1106 }
1107
1108 ret = vmbus_sendpacket_pagebuffer(out_channel,
1109 pb, packet->page_buf_cnt,
1110 &nvmsg, sizeof(nvmsg),
1111 req_id);
1112
1113 if (ret)
1114 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1115 } else {
1116 ret = vmbus_sendpacket(out_channel,
1117 &nvmsg, sizeof(nvmsg),
1118 req_id, VM_PKT_DATA_INBAND,
1119 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1120 }
1121
1122exit:
1123 if (ret == 0) {
1124 atomic_inc_return(&nvchan->queue_sends);
1125
1126 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1127 netif_tx_stop_queue(txq);
1128 ndev_ctx->eth_stats.stop_queue++;
1129 }
1130 } else if (ret == -EAGAIN) {
1131 netif_tx_stop_queue(txq);
1132 ndev_ctx->eth_stats.stop_queue++;
1133 } else {
1134 netdev_err(ndev,
1135 "Unable to send packet pages %u len %u, ret %d\n",
1136 packet->page_buf_cnt, packet->total_data_buflen,
1137 ret);
1138 }
1139
1140 if (netif_tx_queue_stopped(txq) &&
1141 atomic_read(&nvchan->queue_sends) < 1 &&
1142 !net_device->tx_disable) {
1143 netif_tx_wake_queue(txq);
1144 ndev_ctx->eth_stats.wake_queue++;
1145 if (ret == -EAGAIN)
1146 ret = -ENOSPC;
1147 }
1148
1149 return ret;
1150}
1151
1152/* Move packet out of multi send data (msd), and clear msd */
1153static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1154 struct sk_buff **msd_skb,
1155 struct multi_send_data *msdp)
1156{
1157 *msd_skb = msdp->skb;
1158 *msd_send = msdp->pkt;
1159 msdp->skb = NULL;
1160 msdp->pkt = NULL;
1161 msdp->count = 0;
1162}
1163
1164/* RCU already held by caller */
1165/* Batching/bouncing logic is designed to attempt to optimize
1166 * performance.
1167 *
1168 * For small, non-LSO packets we copy the packet to a send buffer
1169 * which is pre-registered with the Hyper-V side. This enables the
1170 * hypervisor to avoid remapping the aperture to access the packet
1171 * descriptor and data.
1172 *
1173 * If we already started using a buffer and the netdev is transmitting
1174 * a burst of packets, keep on copying into the buffer until it is
1175 * full or we are done collecting a burst. If there is an existing
1176 * buffer with space for the RNDIS descriptor but not the packet, copy
1177 * the RNDIS descriptor to the buffer, keeping the packet in place.
1178 *
1179 * If we do batching and send more than one packet using a single
1180 * NetVSC message, free the SKBs of the packets copied, except for the
1181 * last packet. This is done to streamline the handling of the case
1182 * where the last packet only had the RNDIS descriptor copied to the
1183 * send buffer, with the data pointers included in the NetVSC message.
1184 */
1185int netvsc_send(struct net_device *ndev,
1186 struct hv_netvsc_packet *packet,
1187 struct rndis_message *rndis_msg,
1188 struct hv_page_buffer *pb,
1189 struct sk_buff *skb,
1190 bool xdp_tx)
1191{
1192 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1193 struct netvsc_device *net_device
1194 = rcu_dereference_bh(ndev_ctx->nvdev);
1195 struct hv_device *device = ndev_ctx->device_ctx;
1196 int ret = 0;
1197 struct netvsc_channel *nvchan;
1198 u32 pktlen = packet->total_data_buflen, msd_len = 0;
1199 unsigned int section_index = NETVSC_INVALID_INDEX;
1200 struct multi_send_data *msdp;
1201 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1202 struct sk_buff *msd_skb = NULL;
1203 bool try_batch, xmit_more;
1204
1205 /* If device is rescinded, return error and packet will get dropped. */
1206 if (unlikely(!net_device || net_device->destroy))
1207 return -ENODEV;
1208
1209 nvchan = &net_device->chan_table[packet->q_idx];
1210 packet->send_buf_index = NETVSC_INVALID_INDEX;
1211 packet->cp_partial = false;
1212
1213 /* Send a control message or XDP packet directly without accessing
1214 * msd (Multi-Send Data) field which may be changed during data packet
1215 * processing.
1216 */
1217 if (!skb || xdp_tx)
1218 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1219
1220 /* batch packets in send buffer if possible */
1221 msdp = &nvchan->msd;
1222 if (msdp->pkt)
1223 msd_len = msdp->pkt->total_data_buflen;
1224
1225 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1226 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1227 net_device->send_section_size) {
1228 section_index = msdp->pkt->send_buf_index;
1229
1230 } else if (try_batch && msd_len + packet->rmsg_size <
1231 net_device->send_section_size) {
1232 section_index = msdp->pkt->send_buf_index;
1233 packet->cp_partial = true;
1234
1235 } else if (pktlen + net_device->pkt_align <
1236 net_device->send_section_size) {
1237 section_index = netvsc_get_next_send_section(net_device);
1238 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1239 ++ndev_ctx->eth_stats.tx_send_full;
1240 } else {
1241 move_pkt_msd(&msd_send, &msd_skb, msdp);
1242 msd_len = 0;
1243 }
1244 }
1245
1246 /* Keep aggregating only if stack says more data is coming
1247 * and not doing mixed modes send and not flow blocked
1248 */
1249 xmit_more = netdev_xmit_more() &&
1250 !packet->cp_partial &&
1251 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1252
1253 if (section_index != NETVSC_INVALID_INDEX) {
1254 netvsc_copy_to_send_buf(net_device,
1255 section_index, msd_len,
1256 packet, rndis_msg, pb, xmit_more);
1257
1258 packet->send_buf_index = section_index;
1259
1260 if (packet->cp_partial) {
1261 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1262 packet->total_data_buflen = msd_len + packet->rmsg_size;
1263 } else {
1264 packet->page_buf_cnt = 0;
1265 packet->total_data_buflen += msd_len;
1266 }
1267
1268 if (msdp->pkt) {
1269 packet->total_packets += msdp->pkt->total_packets;
1270 packet->total_bytes += msdp->pkt->total_bytes;
1271 }
1272
1273 if (msdp->skb)
1274 dev_consume_skb_any(msdp->skb);
1275
1276 if (xmit_more) {
1277 msdp->skb = skb;
1278 msdp->pkt = packet;
1279 msdp->count++;
1280 } else {
1281 cur_send = packet;
1282 msdp->skb = NULL;
1283 msdp->pkt = NULL;
1284 msdp->count = 0;
1285 }
1286 } else {
1287 move_pkt_msd(&msd_send, &msd_skb, msdp);
1288 cur_send = packet;
1289 }
1290
1291 if (msd_send) {
1292 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1293 NULL, msd_skb);
1294
1295 if (m_ret != 0) {
1296 netvsc_free_send_slot(net_device,
1297 msd_send->send_buf_index);
1298 dev_kfree_skb_any(msd_skb);
1299 }
1300 }
1301
1302 if (cur_send)
1303 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1304
1305 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1306 netvsc_free_send_slot(net_device, section_index);
1307
1308 return ret;
1309}
1310
1311/* Send pending recv completions */
1312static int send_recv_completions(struct net_device *ndev,
1313 struct netvsc_device *nvdev,
1314 struct netvsc_channel *nvchan)
1315{
1316 struct multi_recv_comp *mrc = &nvchan->mrc;
1317 struct recv_comp_msg {
1318 struct nvsp_message_header hdr;
1319 u32 status;
1320 } __packed;
1321 struct recv_comp_msg msg = {
1322 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1323 };
1324 int ret;
1325
1326 while (mrc->first != mrc->next) {
1327 const struct recv_comp_data *rcd
1328 = mrc->slots + mrc->first;
1329
1330 msg.status = rcd->status;
1331 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1332 rcd->tid, VM_PKT_COMP, 0);
1333 if (unlikely(ret)) {
1334 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1335
1336 ++ndev_ctx->eth_stats.rx_comp_busy;
1337 return ret;
1338 }
1339
1340 if (++mrc->first == nvdev->recv_completion_cnt)
1341 mrc->first = 0;
1342 }
1343
1344 /* receive completion ring has been emptied */
1345 if (unlikely(nvdev->destroy))
1346 wake_up(&nvdev->wait_drain);
1347
1348 return 0;
1349}
1350
1351/* Count how many receive completions are outstanding */
1352static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1353 const struct multi_recv_comp *mrc,
1354 u32 *filled, u32 *avail)
1355{
1356 u32 count = nvdev->recv_completion_cnt;
1357
1358 if (mrc->next >= mrc->first)
1359 *filled = mrc->next - mrc->first;
1360 else
1361 *filled = (count - mrc->first) + mrc->next;
1362
1363 *avail = count - *filled - 1;
1364}
1365
1366/* Add receive complete to ring to send to host. */
1367static void enq_receive_complete(struct net_device *ndev,
1368 struct netvsc_device *nvdev, u16 q_idx,
1369 u64 tid, u32 status)
1370{
1371 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1372 struct multi_recv_comp *mrc = &nvchan->mrc;
1373 struct recv_comp_data *rcd;
1374 u32 filled, avail;
1375
1376 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1377
1378 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1379 send_recv_completions(ndev, nvdev, nvchan);
1380 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1381 }
1382
1383 if (unlikely(!avail)) {
1384 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1385 q_idx, tid);
1386 return;
1387 }
1388
1389 rcd = mrc->slots + mrc->next;
1390 rcd->tid = tid;
1391 rcd->status = status;
1392
1393 if (++mrc->next == nvdev->recv_completion_cnt)
1394 mrc->next = 0;
1395}
1396
1397static int netvsc_receive(struct net_device *ndev,
1398 struct netvsc_device *net_device,
1399 struct netvsc_channel *nvchan,
1400 const struct vmpacket_descriptor *desc)
1401{
1402 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1403 struct vmbus_channel *channel = nvchan->channel;
1404 const struct vmtransfer_page_packet_header *vmxferpage_packet
1405 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1406 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1407 u32 msglen = hv_pkt_datalen(desc);
1408 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1409 char *recv_buf = net_device->recv_buf;
1410 u32 status = NVSP_STAT_SUCCESS;
1411 int i;
1412 int count = 0;
1413
1414 /* Ensure packet is big enough to read header fields */
1415 if (msglen < sizeof(struct nvsp_message_header)) {
1416 netif_err(net_device_ctx, rx_err, ndev,
1417 "invalid nvsp header, length too small: %u\n",
1418 msglen);
1419 return 0;
1420 }
1421
1422 /* Make sure this is a valid nvsp packet */
1423 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1424 netif_err(net_device_ctx, rx_err, ndev,
1425 "Unknown nvsp packet type received %u\n",
1426 nvsp->hdr.msg_type);
1427 return 0;
1428 }
1429
1430 /* Validate xfer page pkt header */
1431 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1432 netif_err(net_device_ctx, rx_err, ndev,
1433 "Invalid xfer page pkt, offset too small: %u\n",
1434 desc->offset8 << 3);
1435 return 0;
1436 }
1437
1438 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1439 netif_err(net_device_ctx, rx_err, ndev,
1440 "Invalid xfer page set id - expecting %x got %x\n",
1441 NETVSC_RECEIVE_BUFFER_ID,
1442 vmxferpage_packet->xfer_pageset_id);
1443 return 0;
1444 }
1445
1446 count = vmxferpage_packet->range_cnt;
1447
1448 /* Check count for a valid value */
1449 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1450 netif_err(net_device_ctx, rx_err, ndev,
1451 "Range count is not valid: %d\n",
1452 count);
1453 return 0;
1454 }
1455
1456 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1457 for (i = 0; i < count; i++) {
1458 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1459 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1460 void *data;
1461 int ret;
1462
1463 if (unlikely(offset > net_device->recv_buf_size ||
1464 buflen > net_device->recv_buf_size - offset)) {
1465 nvchan->rsc.cnt = 0;
1466 status = NVSP_STAT_FAIL;
1467 netif_err(net_device_ctx, rx_err, ndev,
1468 "Packet offset:%u + len:%u too big\n",
1469 offset, buflen);
1470
1471 continue;
1472 }
1473
1474 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1475 * make sure that nvchan->recv_buf is large enough to hold the packet.
1476 */
1477 if (unlikely(buflen > net_device->recv_section_size)) {
1478 nvchan->rsc.cnt = 0;
1479 status = NVSP_STAT_FAIL;
1480 netif_err(net_device_ctx, rx_err, ndev,
1481 "Packet too big: buflen=%u recv_section_size=%u\n",
1482 buflen, net_device->recv_section_size);
1483
1484 continue;
1485 }
1486
1487 data = recv_buf + offset;
1488
1489 nvchan->rsc.is_last = (i == count - 1);
1490
1491 trace_rndis_recv(ndev, q_idx, data);
1492
1493 /* Pass it to the upper layer */
1494 ret = rndis_filter_receive(ndev, net_device,
1495 nvchan, data, buflen);
1496
1497 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1498 /* Drop incomplete packet */
1499 nvchan->rsc.cnt = 0;
1500 status = NVSP_STAT_FAIL;
1501 }
1502 }
1503
1504 enq_receive_complete(ndev, net_device, q_idx,
1505 vmxferpage_packet->d.trans_id, status);
1506
1507 return count;
1508}
1509
1510static void netvsc_send_table(struct net_device *ndev,
1511 struct netvsc_device *nvscdev,
1512 const struct nvsp_message *nvmsg,
1513 u32 msglen)
1514{
1515 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1516 u32 count, offset, *tab;
1517 int i;
1518
1519 /* Ensure packet is big enough to read send_table fields */
1520 if (msglen < sizeof(struct nvsp_message_header) +
1521 sizeof(struct nvsp_5_send_indirect_table)) {
1522 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1523 return;
1524 }
1525
1526 count = nvmsg->msg.v5_msg.send_table.count;
1527 offset = nvmsg->msg.v5_msg.send_table.offset;
1528
1529 if (count != VRSS_SEND_TAB_SIZE) {
1530 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1531 return;
1532 }
1533
1534 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1535 * wrong due to a host bug. So fix the offset here.
1536 */
1537 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1538 msglen >= sizeof(struct nvsp_message_header) +
1539 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1540 offset = sizeof(struct nvsp_message_header) +
1541 sizeof(union nvsp_6_message_uber);
1542
1543 /* Boundary check for all versions */
1544 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1545 netdev_err(ndev, "Received send-table offset too big:%u\n",
1546 offset);
1547 return;
1548 }
1549
1550 tab = (void *)nvmsg + offset;
1551
1552 for (i = 0; i < count; i++)
1553 net_device_ctx->tx_table[i] = tab[i];
1554}
1555
1556static void netvsc_send_vf(struct net_device *ndev,
1557 const struct nvsp_message *nvmsg,
1558 u32 msglen)
1559{
1560 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1561
1562 /* Ensure packet is big enough to read its fields */
1563 if (msglen < sizeof(struct nvsp_message_header) +
1564 sizeof(struct nvsp_4_send_vf_association)) {
1565 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1566 return;
1567 }
1568
1569 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1570 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1571
1572 if (net_device_ctx->vf_alloc)
1573 complete(&net_device_ctx->vf_add);
1574
1575 netdev_info(ndev, "VF slot %u %s\n",
1576 net_device_ctx->vf_serial,
1577 net_device_ctx->vf_alloc ? "added" : "removed");
1578}
1579
1580static void netvsc_receive_inband(struct net_device *ndev,
1581 struct netvsc_device *nvscdev,
1582 const struct vmpacket_descriptor *desc)
1583{
1584 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1585 u32 msglen = hv_pkt_datalen(desc);
1586
1587 /* Ensure packet is big enough to read header fields */
1588 if (msglen < sizeof(struct nvsp_message_header)) {
1589 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1590 return;
1591 }
1592
1593 switch (nvmsg->hdr.msg_type) {
1594 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1595 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1596 break;
1597
1598 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1599 if (hv_is_isolation_supported())
1600 netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1601 else
1602 netvsc_send_vf(ndev, nvmsg, msglen);
1603 break;
1604 }
1605}
1606
1607static int netvsc_process_raw_pkt(struct hv_device *device,
1608 struct netvsc_channel *nvchan,
1609 struct netvsc_device *net_device,
1610 struct net_device *ndev,
1611 const struct vmpacket_descriptor *desc,
1612 int budget)
1613{
1614 struct vmbus_channel *channel = nvchan->channel;
1615 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1616
1617 trace_nvsp_recv(ndev, channel, nvmsg);
1618
1619 switch (desc->type) {
1620 case VM_PKT_COMP:
1621 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1622 break;
1623
1624 case VM_PKT_DATA_USING_XFER_PAGES:
1625 return netvsc_receive(ndev, net_device, nvchan, desc);
1626
1627 case VM_PKT_DATA_INBAND:
1628 netvsc_receive_inband(ndev, net_device, desc);
1629 break;
1630
1631 default:
1632 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1633 desc->type, desc->trans_id);
1634 break;
1635 }
1636
1637 return 0;
1638}
1639
1640static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1641{
1642 struct vmbus_channel *primary = channel->primary_channel;
1643
1644 return primary ? primary->device_obj : channel->device_obj;
1645}
1646
1647/* Network processing softirq
1648 * Process data in incoming ring buffer from host
1649 * Stops when ring is empty or budget is met or exceeded.
1650 */
1651int netvsc_poll(struct napi_struct *napi, int budget)
1652{
1653 struct netvsc_channel *nvchan
1654 = container_of(napi, struct netvsc_channel, napi);
1655 struct netvsc_device *net_device = nvchan->net_device;
1656 struct vmbus_channel *channel = nvchan->channel;
1657 struct hv_device *device = netvsc_channel_to_device(channel);
1658 struct net_device *ndev = hv_get_drvdata(device);
1659 int work_done = 0;
1660 int ret;
1661
1662 /* If starting a new interval */
1663 if (!nvchan->desc)
1664 nvchan->desc = hv_pkt_iter_first(channel);
1665
1666 nvchan->xdp_flush = false;
1667
1668 while (nvchan->desc && work_done < budget) {
1669 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1670 ndev, nvchan->desc, budget);
1671 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1672 }
1673
1674 if (nvchan->xdp_flush)
1675 xdp_do_flush();
1676
1677 /* Send any pending receive completions */
1678 ret = send_recv_completions(ndev, net_device, nvchan);
1679
1680 /* If it did not exhaust NAPI budget this time
1681 * and not doing busy poll
1682 * then re-enable host interrupts
1683 * and reschedule if ring is not empty
1684 * or sending receive completion failed.
1685 */
1686 if (work_done < budget &&
1687 napi_complete_done(napi, work_done) &&
1688 (ret || hv_end_read(&channel->inbound)) &&
1689 napi_schedule_prep(napi)) {
1690 hv_begin_read(&channel->inbound);
1691 __napi_schedule(napi);
1692 }
1693
1694 /* Driver may overshoot since multiple packets per descriptor */
1695 return min(work_done, budget);
1696}
1697
1698/* Call back when data is available in host ring buffer.
1699 * Processing is deferred until network softirq (NAPI)
1700 */
1701void netvsc_channel_cb(void *context)
1702{
1703 struct netvsc_channel *nvchan = context;
1704 struct vmbus_channel *channel = nvchan->channel;
1705 struct hv_ring_buffer_info *rbi = &channel->inbound;
1706
1707 /* preload first vmpacket descriptor */
1708 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1709
1710 if (napi_schedule_prep(&nvchan->napi)) {
1711 /* disable interrupts from host */
1712 hv_begin_read(rbi);
1713
1714 __napi_schedule_irqoff(&nvchan->napi);
1715 }
1716}
1717
1718/*
1719 * netvsc_device_add - Callback when the device belonging to this
1720 * driver is added
1721 */
1722struct netvsc_device *netvsc_device_add(struct hv_device *device,
1723 const struct netvsc_device_info *device_info)
1724{
1725 int i, ret = 0;
1726 struct netvsc_device *net_device;
1727 struct net_device *ndev = hv_get_drvdata(device);
1728 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1729
1730 net_device = alloc_net_device();
1731 if (!net_device)
1732 return ERR_PTR(-ENOMEM);
1733
1734 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1735 net_device_ctx->tx_table[i] = 0;
1736
1737 /* Because the device uses NAPI, all the interrupt batching and
1738 * control is done via Net softirq, not the channel handling
1739 */
1740 set_channel_read_mode(device->channel, HV_CALL_ISR);
1741
1742 /* If we're reopening the device we may have multiple queues, fill the
1743 * chn_table with the default channel to use it before subchannels are
1744 * opened.
1745 * Initialize the channel state before we open;
1746 * we can be interrupted as soon as we open the channel.
1747 */
1748
1749 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1750 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1751
1752 nvchan->channel = device->channel;
1753 nvchan->net_device = net_device;
1754 u64_stats_init(&nvchan->tx_stats.syncp);
1755 u64_stats_init(&nvchan->rx_stats.syncp);
1756
1757 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1758
1759 if (ret) {
1760 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1761 goto cleanup2;
1762 }
1763
1764 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1765 MEM_TYPE_PAGE_SHARED, NULL);
1766
1767 if (ret) {
1768 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1769 goto cleanup2;
1770 }
1771 }
1772
1773 /* Enable NAPI handler before init callbacks */
1774 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1775
1776 /* Open the channel */
1777 device->channel->next_request_id_callback = vmbus_next_request_id;
1778 device->channel->request_addr_callback = vmbus_request_addr;
1779 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1780 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1781
1782 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1783 netvsc_ring_bytes, NULL, 0,
1784 netvsc_channel_cb, net_device->chan_table);
1785
1786 if (ret != 0) {
1787 netdev_err(ndev, "unable to open channel: %d\n", ret);
1788 goto cleanup;
1789 }
1790
1791 /* Channel is opened */
1792 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1793
1794 napi_enable(&net_device->chan_table[0].napi);
1795 netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
1796 &net_device->chan_table[0].napi);
1797 netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
1798 &net_device->chan_table[0].napi);
1799
1800 /* Connect with the NetVsp */
1801 ret = netvsc_connect_vsp(device, net_device, device_info);
1802 if (ret != 0) {
1803 netdev_err(ndev,
1804 "unable to connect to NetVSP - %d\n", ret);
1805 goto close;
1806 }
1807
1808 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1809 * populated.
1810 */
1811 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1812
1813 return net_device;
1814
1815close:
1816 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1817 netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
1818 netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
1819 napi_disable(&net_device->chan_table[0].napi);
1820
1821 /* Now, we can close the channel safely */
1822 vmbus_close(device->channel);
1823
1824cleanup:
1825 netif_napi_del(&net_device->chan_table[0].napi);
1826
1827cleanup2:
1828 free_netvsc_device(&net_device->rcu);
1829
1830 return ERR_PTR(ret);
1831}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/mm.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include <linux/netdevice.h>
19#include <linux/if_ether.h>
20#include <linux/vmalloc.h>
21#include <linux/rtnetlink.h>
22#include <linux/prefetch.h>
23
24#include <asm/sync_bitops.h>
25#include <asm/mshyperv.h>
26
27#include "hyperv_net.h"
28#include "netvsc_trace.h"
29
30/*
31 * Switch the data path from the synthetic interface to the VF
32 * interface.
33 */
34int netvsc_switch_datapath(struct net_device *ndev, bool vf)
35{
36 struct net_device_context *net_device_ctx = netdev_priv(ndev);
37 struct hv_device *dev = net_device_ctx->device_ctx;
38 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
39 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
40 int ret, retry = 0;
41
42 /* Block sending traffic to VF if it's about to be gone */
43 if (!vf)
44 net_device_ctx->data_path_is_vf = vf;
45
46 memset(init_pkt, 0, sizeof(struct nvsp_message));
47 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
48 if (vf)
49 init_pkt->msg.v4_msg.active_dp.active_datapath =
50 NVSP_DATAPATH_VF;
51 else
52 init_pkt->msg.v4_msg.active_dp.active_datapath =
53 NVSP_DATAPATH_SYNTHETIC;
54
55again:
56 trace_nvsp_send(ndev, init_pkt);
57
58 ret = vmbus_sendpacket(dev->channel, init_pkt,
59 sizeof(struct nvsp_message),
60 (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
61 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
62
63 /* If failed to switch to/from VF, let data_path_is_vf stay false,
64 * so we use synthetic path to send data.
65 */
66 if (ret) {
67 if (ret != -EAGAIN) {
68 netdev_err(ndev,
69 "Unable to send sw datapath msg, err: %d\n",
70 ret);
71 return ret;
72 }
73
74 if (retry++ < RETRY_MAX) {
75 usleep_range(RETRY_US_LO, RETRY_US_HI);
76 goto again;
77 } else {
78 netdev_err(
79 ndev,
80 "Retry failed to send sw datapath msg, err: %d\n",
81 ret);
82 return ret;
83 }
84 }
85
86 wait_for_completion(&nv_dev->channel_init_wait);
87 net_device_ctx->data_path_is_vf = vf;
88
89 return 0;
90}
91
92/* Worker to setup sub channels on initial setup
93 * Initial hotplug event occurs in softirq context
94 * and can't wait for channels.
95 */
96static void netvsc_subchan_work(struct work_struct *w)
97{
98 struct netvsc_device *nvdev =
99 container_of(w, struct netvsc_device, subchan_work);
100 struct rndis_device *rdev;
101 int i, ret;
102
103 /* Avoid deadlock with device removal already under RTNL */
104 if (!rtnl_trylock()) {
105 schedule_work(w);
106 return;
107 }
108
109 rdev = nvdev->extension;
110 if (rdev) {
111 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
112 if (ret == 0) {
113 netif_device_attach(rdev->ndev);
114 } else {
115 /* fallback to only primary channel */
116 for (i = 1; i < nvdev->num_chn; i++)
117 netif_napi_del(&nvdev->chan_table[i].napi);
118
119 nvdev->max_chn = 1;
120 nvdev->num_chn = 1;
121 }
122 }
123
124 rtnl_unlock();
125}
126
127static struct netvsc_device *alloc_net_device(void)
128{
129 struct netvsc_device *net_device;
130
131 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
132 if (!net_device)
133 return NULL;
134
135 init_waitqueue_head(&net_device->wait_drain);
136 net_device->destroy = false;
137 net_device->tx_disable = true;
138
139 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
140 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
141
142 init_completion(&net_device->channel_init_wait);
143 init_waitqueue_head(&net_device->subchan_open);
144 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
145
146 return net_device;
147}
148
149static void free_netvsc_device(struct rcu_head *head)
150{
151 struct netvsc_device *nvdev
152 = container_of(head, struct netvsc_device, rcu);
153 int i;
154
155 kfree(nvdev->extension);
156 vfree(nvdev->recv_buf);
157 vfree(nvdev->send_buf);
158 kfree(nvdev->send_section_map);
159
160 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
161 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
162 kfree(nvdev->chan_table[i].recv_buf);
163 vfree(nvdev->chan_table[i].mrc.slots);
164 }
165
166 kfree(nvdev);
167}
168
169static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
170{
171 call_rcu(&nvdev->rcu, free_netvsc_device);
172}
173
174static void netvsc_revoke_recv_buf(struct hv_device *device,
175 struct netvsc_device *net_device,
176 struct net_device *ndev)
177{
178 struct nvsp_message *revoke_packet;
179 int ret;
180
181 /*
182 * If we got a section count, it means we received a
183 * SendReceiveBufferComplete msg (ie sent
184 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
185 * to send a revoke msg here
186 */
187 if (net_device->recv_section_cnt) {
188 /* Send the revoke receive buffer */
189 revoke_packet = &net_device->revoke_packet;
190 memset(revoke_packet, 0, sizeof(struct nvsp_message));
191
192 revoke_packet->hdr.msg_type =
193 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
194 revoke_packet->msg.v1_msg.
195 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
196
197 trace_nvsp_send(ndev, revoke_packet);
198
199 ret = vmbus_sendpacket(device->channel,
200 revoke_packet,
201 sizeof(struct nvsp_message),
202 VMBUS_RQST_ID_NO_RESPONSE,
203 VM_PKT_DATA_INBAND, 0);
204 /* If the failure is because the channel is rescinded;
205 * ignore the failure since we cannot send on a rescinded
206 * channel. This would allow us to properly cleanup
207 * even when the channel is rescinded.
208 */
209 if (device->channel->rescind)
210 ret = 0;
211 /*
212 * If we failed here, we might as well return and
213 * have a leak rather than continue and a bugchk
214 */
215 if (ret != 0) {
216 netdev_err(ndev, "unable to send "
217 "revoke receive buffer to netvsp\n");
218 return;
219 }
220 net_device->recv_section_cnt = 0;
221 }
222}
223
224static void netvsc_revoke_send_buf(struct hv_device *device,
225 struct netvsc_device *net_device,
226 struct net_device *ndev)
227{
228 struct nvsp_message *revoke_packet;
229 int ret;
230
231 /* Deal with the send buffer we may have setup.
232 * If we got a send section size, it means we received a
233 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
234 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
235 * to send a revoke msg here
236 */
237 if (net_device->send_section_cnt) {
238 /* Send the revoke receive buffer */
239 revoke_packet = &net_device->revoke_packet;
240 memset(revoke_packet, 0, sizeof(struct nvsp_message));
241
242 revoke_packet->hdr.msg_type =
243 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
244 revoke_packet->msg.v1_msg.revoke_send_buf.id =
245 NETVSC_SEND_BUFFER_ID;
246
247 trace_nvsp_send(ndev, revoke_packet);
248
249 ret = vmbus_sendpacket(device->channel,
250 revoke_packet,
251 sizeof(struct nvsp_message),
252 VMBUS_RQST_ID_NO_RESPONSE,
253 VM_PKT_DATA_INBAND, 0);
254
255 /* If the failure is because the channel is rescinded;
256 * ignore the failure since we cannot send on a rescinded
257 * channel. This would allow us to properly cleanup
258 * even when the channel is rescinded.
259 */
260 if (device->channel->rescind)
261 ret = 0;
262
263 /* If we failed here, we might as well return and
264 * have a leak rather than continue and a bugchk
265 */
266 if (ret != 0) {
267 netdev_err(ndev, "unable to send "
268 "revoke send buffer to netvsp\n");
269 return;
270 }
271 net_device->send_section_cnt = 0;
272 }
273}
274
275static void netvsc_teardown_recv_gpadl(struct hv_device *device,
276 struct netvsc_device *net_device,
277 struct net_device *ndev)
278{
279 int ret;
280
281 if (net_device->recv_buf_gpadl_handle) {
282 ret = vmbus_teardown_gpadl(device->channel,
283 net_device->recv_buf_gpadl_handle);
284
285 /* If we failed here, we might as well return and have a leak
286 * rather than continue and a bugchk
287 */
288 if (ret != 0) {
289 netdev_err(ndev,
290 "unable to teardown receive buffer's gpadl\n");
291 return;
292 }
293 net_device->recv_buf_gpadl_handle = 0;
294 }
295}
296
297static void netvsc_teardown_send_gpadl(struct hv_device *device,
298 struct netvsc_device *net_device,
299 struct net_device *ndev)
300{
301 int ret;
302
303 if (net_device->send_buf_gpadl_handle) {
304 ret = vmbus_teardown_gpadl(device->channel,
305 net_device->send_buf_gpadl_handle);
306
307 /* If we failed here, we might as well return and have a leak
308 * rather than continue and a bugchk
309 */
310 if (ret != 0) {
311 netdev_err(ndev,
312 "unable to teardown send buffer's gpadl\n");
313 return;
314 }
315 net_device->send_buf_gpadl_handle = 0;
316 }
317}
318
319int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
320{
321 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
322 int node = cpu_to_node(nvchan->channel->target_cpu);
323 size_t size;
324
325 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
326 nvchan->mrc.slots = vzalloc_node(size, node);
327 if (!nvchan->mrc.slots)
328 nvchan->mrc.slots = vzalloc(size);
329
330 return nvchan->mrc.slots ? 0 : -ENOMEM;
331}
332
333static int netvsc_init_buf(struct hv_device *device,
334 struct netvsc_device *net_device,
335 const struct netvsc_device_info *device_info)
336{
337 struct nvsp_1_message_send_receive_buffer_complete *resp;
338 struct net_device *ndev = hv_get_drvdata(device);
339 struct nvsp_message *init_packet;
340 unsigned int buf_size;
341 size_t map_words;
342 int i, ret = 0;
343
344 /* Get receive buffer area. */
345 buf_size = device_info->recv_sections * device_info->recv_section_size;
346 buf_size = roundup(buf_size, PAGE_SIZE);
347
348 /* Legacy hosts only allow smaller receive buffer */
349 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
350 buf_size = min_t(unsigned int, buf_size,
351 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
352
353 net_device->recv_buf = vzalloc(buf_size);
354 if (!net_device->recv_buf) {
355 netdev_err(ndev,
356 "unable to allocate receive buffer of size %u\n",
357 buf_size);
358 ret = -ENOMEM;
359 goto cleanup;
360 }
361
362 net_device->recv_buf_size = buf_size;
363
364 /*
365 * Establish the gpadl handle for this buffer on this
366 * channel. Note: This call uses the vmbus connection rather
367 * than the channel to establish the gpadl handle.
368 */
369 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
370 buf_size,
371 &net_device->recv_buf_gpadl_handle);
372 if (ret != 0) {
373 netdev_err(ndev,
374 "unable to establish receive buffer's gpadl\n");
375 goto cleanup;
376 }
377
378 /* Notify the NetVsp of the gpadl handle */
379 init_packet = &net_device->channel_init_pkt;
380 memset(init_packet, 0, sizeof(struct nvsp_message));
381 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
382 init_packet->msg.v1_msg.send_recv_buf.
383 gpadl_handle = net_device->recv_buf_gpadl_handle;
384 init_packet->msg.v1_msg.
385 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
386
387 trace_nvsp_send(ndev, init_packet);
388
389 /* Send the gpadl notification request */
390 ret = vmbus_sendpacket(device->channel, init_packet,
391 sizeof(struct nvsp_message),
392 (unsigned long)init_packet,
393 VM_PKT_DATA_INBAND,
394 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
395 if (ret != 0) {
396 netdev_err(ndev,
397 "unable to send receive buffer's gpadl to netvsp\n");
398 goto cleanup;
399 }
400
401 wait_for_completion(&net_device->channel_init_wait);
402
403 /* Check the response */
404 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
405 if (resp->status != NVSP_STAT_SUCCESS) {
406 netdev_err(ndev,
407 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
408 resp->status);
409 ret = -EINVAL;
410 goto cleanup;
411 }
412
413 /* Parse the response */
414 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
415 resp->num_sections, resp->sections[0].sub_alloc_size,
416 resp->sections[0].num_sub_allocs);
417
418 /* There should only be one section for the entire receive buffer */
419 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
420 ret = -EINVAL;
421 goto cleanup;
422 }
423
424 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
425 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
426
427 /* Ensure buffer will not overflow */
428 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
429 (u64)net_device->recv_section_cnt > (u64)buf_size) {
430 netdev_err(ndev, "invalid recv_section_size %u\n",
431 net_device->recv_section_size);
432 ret = -EINVAL;
433 goto cleanup;
434 }
435
436 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
437 struct netvsc_channel *nvchan = &net_device->chan_table[i];
438
439 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
440 if (nvchan->recv_buf == NULL) {
441 ret = -ENOMEM;
442 goto cleanup;
443 }
444 }
445
446 /* Setup receive completion ring.
447 * Add 1 to the recv_section_cnt because at least one entry in a
448 * ring buffer has to be empty.
449 */
450 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
451 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
452 if (ret)
453 goto cleanup;
454
455 /* Now setup the send buffer. */
456 buf_size = device_info->send_sections * device_info->send_section_size;
457 buf_size = round_up(buf_size, PAGE_SIZE);
458
459 net_device->send_buf = vzalloc(buf_size);
460 if (!net_device->send_buf) {
461 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
462 buf_size);
463 ret = -ENOMEM;
464 goto cleanup;
465 }
466
467 /* Establish the gpadl handle for this buffer on this
468 * channel. Note: This call uses the vmbus connection rather
469 * than the channel to establish the gpadl handle.
470 */
471 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
472 buf_size,
473 &net_device->send_buf_gpadl_handle);
474 if (ret != 0) {
475 netdev_err(ndev,
476 "unable to establish send buffer's gpadl\n");
477 goto cleanup;
478 }
479
480 /* Notify the NetVsp of the gpadl handle */
481 init_packet = &net_device->channel_init_pkt;
482 memset(init_packet, 0, sizeof(struct nvsp_message));
483 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
484 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
485 net_device->send_buf_gpadl_handle;
486 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
487
488 trace_nvsp_send(ndev, init_packet);
489
490 /* Send the gpadl notification request */
491 ret = vmbus_sendpacket(device->channel, init_packet,
492 sizeof(struct nvsp_message),
493 (unsigned long)init_packet,
494 VM_PKT_DATA_INBAND,
495 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
496 if (ret != 0) {
497 netdev_err(ndev,
498 "unable to send send buffer's gpadl to netvsp\n");
499 goto cleanup;
500 }
501
502 wait_for_completion(&net_device->channel_init_wait);
503
504 /* Check the response */
505 if (init_packet->msg.v1_msg.
506 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
507 netdev_err(ndev, "Unable to complete send buffer "
508 "initialization with NetVsp - status %d\n",
509 init_packet->msg.v1_msg.
510 send_send_buf_complete.status);
511 ret = -EINVAL;
512 goto cleanup;
513 }
514
515 /* Parse the response */
516 net_device->send_section_size = init_packet->msg.
517 v1_msg.send_send_buf_complete.section_size;
518 if (net_device->send_section_size < NETVSC_MTU_MIN) {
519 netdev_err(ndev, "invalid send_section_size %u\n",
520 net_device->send_section_size);
521 ret = -EINVAL;
522 goto cleanup;
523 }
524
525 /* Section count is simply the size divided by the section size. */
526 net_device->send_section_cnt = buf_size / net_device->send_section_size;
527
528 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
529 net_device->send_section_size, net_device->send_section_cnt);
530
531 /* Setup state for managing the send buffer. */
532 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
533
534 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
535 if (net_device->send_section_map == NULL) {
536 ret = -ENOMEM;
537 goto cleanup;
538 }
539
540 goto exit;
541
542cleanup:
543 netvsc_revoke_recv_buf(device, net_device, ndev);
544 netvsc_revoke_send_buf(device, net_device, ndev);
545 netvsc_teardown_recv_gpadl(device, net_device, ndev);
546 netvsc_teardown_send_gpadl(device, net_device, ndev);
547
548exit:
549 return ret;
550}
551
552/* Negotiate NVSP protocol version */
553static int negotiate_nvsp_ver(struct hv_device *device,
554 struct netvsc_device *net_device,
555 struct nvsp_message *init_packet,
556 u32 nvsp_ver)
557{
558 struct net_device *ndev = hv_get_drvdata(device);
559 int ret;
560
561 memset(init_packet, 0, sizeof(struct nvsp_message));
562 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
563 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
564 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
565 trace_nvsp_send(ndev, init_packet);
566
567 /* Send the init request */
568 ret = vmbus_sendpacket(device->channel, init_packet,
569 sizeof(struct nvsp_message),
570 (unsigned long)init_packet,
571 VM_PKT_DATA_INBAND,
572 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
573
574 if (ret != 0)
575 return ret;
576
577 wait_for_completion(&net_device->channel_init_wait);
578
579 if (init_packet->msg.init_msg.init_complete.status !=
580 NVSP_STAT_SUCCESS)
581 return -EINVAL;
582
583 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
584 return 0;
585
586 /* NVSPv2 or later: Send NDIS config */
587 memset(init_packet, 0, sizeof(struct nvsp_message));
588 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
589 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
590 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
591
592 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
593 if (hv_is_isolation_supported())
594 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
595 else
596 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
597
598 /* Teaming bit is needed to receive link speed updates */
599 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
600 }
601
602 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
603 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
604
605 trace_nvsp_send(ndev, init_packet);
606
607 ret = vmbus_sendpacket(device->channel, init_packet,
608 sizeof(struct nvsp_message),
609 VMBUS_RQST_ID_NO_RESPONSE,
610 VM_PKT_DATA_INBAND, 0);
611
612 return ret;
613}
614
615static int netvsc_connect_vsp(struct hv_device *device,
616 struct netvsc_device *net_device,
617 const struct netvsc_device_info *device_info)
618{
619 struct net_device *ndev = hv_get_drvdata(device);
620 static const u32 ver_list[] = {
621 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
622 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
623 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
624 };
625 struct nvsp_message *init_packet;
626 int ndis_version, i, ret;
627
628 init_packet = &net_device->channel_init_pkt;
629
630 /* Negotiate the latest NVSP protocol supported */
631 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
632 if (negotiate_nvsp_ver(device, net_device, init_packet,
633 ver_list[i]) == 0) {
634 net_device->nvsp_version = ver_list[i];
635 break;
636 }
637
638 if (i < 0) {
639 ret = -EPROTO;
640 goto cleanup;
641 }
642
643 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
644 netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
645 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
646 ret = -EPROTO;
647 goto cleanup;
648 }
649
650 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
651
652 /* Send the ndis version */
653 memset(init_packet, 0, sizeof(struct nvsp_message));
654
655 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
656 ndis_version = 0x00060001;
657 else
658 ndis_version = 0x0006001e;
659
660 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
661 init_packet->msg.v1_msg.
662 send_ndis_ver.ndis_major_ver =
663 (ndis_version & 0xFFFF0000) >> 16;
664 init_packet->msg.v1_msg.
665 send_ndis_ver.ndis_minor_ver =
666 ndis_version & 0xFFFF;
667
668 trace_nvsp_send(ndev, init_packet);
669
670 /* Send the init request */
671 ret = vmbus_sendpacket(device->channel, init_packet,
672 sizeof(struct nvsp_message),
673 VMBUS_RQST_ID_NO_RESPONSE,
674 VM_PKT_DATA_INBAND, 0);
675 if (ret != 0)
676 goto cleanup;
677
678
679 ret = netvsc_init_buf(device, net_device, device_info);
680
681cleanup:
682 return ret;
683}
684
685/*
686 * netvsc_device_remove - Callback when the root bus device is removed
687 */
688void netvsc_device_remove(struct hv_device *device)
689{
690 struct net_device *ndev = hv_get_drvdata(device);
691 struct net_device_context *net_device_ctx = netdev_priv(ndev);
692 struct netvsc_device *net_device
693 = rtnl_dereference(net_device_ctx->nvdev);
694 int i;
695
696 /*
697 * Revoke receive buffer. If host is pre-Win2016 then tear down
698 * receive buffer GPADL. Do the same for send buffer.
699 */
700 netvsc_revoke_recv_buf(device, net_device, ndev);
701 if (vmbus_proto_version < VERSION_WIN10)
702 netvsc_teardown_recv_gpadl(device, net_device, ndev);
703
704 netvsc_revoke_send_buf(device, net_device, ndev);
705 if (vmbus_proto_version < VERSION_WIN10)
706 netvsc_teardown_send_gpadl(device, net_device, ndev);
707
708 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
709
710 /* Disable NAPI and disassociate its context from the device. */
711 for (i = 0; i < net_device->num_chn; i++) {
712 /* See also vmbus_reset_channel_cb(). */
713 napi_disable(&net_device->chan_table[i].napi);
714 netif_napi_del(&net_device->chan_table[i].napi);
715 }
716
717 /*
718 * At this point, no one should be accessing net_device
719 * except in here
720 */
721 netdev_dbg(ndev, "net device safe to remove\n");
722
723 /* Now, we can close the channel safely */
724 vmbus_close(device->channel);
725
726 /*
727 * If host is Win2016 or higher then we do the GPADL tear down
728 * here after VMBus is closed.
729 */
730 if (vmbus_proto_version >= VERSION_WIN10) {
731 netvsc_teardown_recv_gpadl(device, net_device, ndev);
732 netvsc_teardown_send_gpadl(device, net_device, ndev);
733 }
734
735 /* Release all resources */
736 free_netvsc_device_rcu(net_device);
737}
738
739#define RING_AVAIL_PERCENT_HIWATER 20
740#define RING_AVAIL_PERCENT_LOWATER 10
741
742static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
743 u32 index)
744{
745 sync_change_bit(index, net_device->send_section_map);
746}
747
748static void netvsc_send_tx_complete(struct net_device *ndev,
749 struct netvsc_device *net_device,
750 struct vmbus_channel *channel,
751 const struct vmpacket_descriptor *desc,
752 int budget)
753{
754 struct net_device_context *ndev_ctx = netdev_priv(ndev);
755 struct sk_buff *skb;
756 u16 q_idx = 0;
757 int queue_sends;
758 u64 cmd_rqst;
759
760 cmd_rqst = channel->request_addr_callback(channel, (u64)desc->trans_id);
761 if (cmd_rqst == VMBUS_RQST_ERROR) {
762 netdev_err(ndev, "Incorrect transaction id\n");
763 return;
764 }
765
766 skb = (struct sk_buff *)(unsigned long)cmd_rqst;
767
768 /* Notify the layer above us */
769 if (likely(skb)) {
770 const struct hv_netvsc_packet *packet
771 = (struct hv_netvsc_packet *)skb->cb;
772 u32 send_index = packet->send_buf_index;
773 struct netvsc_stats *tx_stats;
774
775 if (send_index != NETVSC_INVALID_INDEX)
776 netvsc_free_send_slot(net_device, send_index);
777 q_idx = packet->q_idx;
778
779 tx_stats = &net_device->chan_table[q_idx].tx_stats;
780
781 u64_stats_update_begin(&tx_stats->syncp);
782 tx_stats->packets += packet->total_packets;
783 tx_stats->bytes += packet->total_bytes;
784 u64_stats_update_end(&tx_stats->syncp);
785
786 napi_consume_skb(skb, budget);
787 }
788
789 queue_sends =
790 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
791
792 if (unlikely(net_device->destroy)) {
793 if (queue_sends == 0)
794 wake_up(&net_device->wait_drain);
795 } else {
796 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
797
798 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
799 (hv_get_avail_to_write_percent(&channel->outbound) >
800 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
801 netif_tx_wake_queue(txq);
802 ndev_ctx->eth_stats.wake_queue++;
803 }
804 }
805}
806
807static void netvsc_send_completion(struct net_device *ndev,
808 struct netvsc_device *net_device,
809 struct vmbus_channel *incoming_channel,
810 const struct vmpacket_descriptor *desc,
811 int budget)
812{
813 const struct nvsp_message *nvsp_packet;
814 u32 msglen = hv_pkt_datalen(desc);
815 struct nvsp_message *pkt_rqst;
816 u64 cmd_rqst;
817
818 /* First check if this is a VMBUS completion without data payload */
819 if (!msglen) {
820 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
821 (u64)desc->trans_id);
822 if (cmd_rqst == VMBUS_RQST_ERROR) {
823 netdev_err(ndev, "Invalid transaction id\n");
824 return;
825 }
826
827 pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
828 switch (pkt_rqst->hdr.msg_type) {
829 case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
830 complete(&net_device->channel_init_wait);
831 break;
832
833 default:
834 netdev_err(ndev, "Unexpected VMBUS completion!!\n");
835 }
836 return;
837 }
838
839 /* Ensure packet is big enough to read header fields */
840 if (msglen < sizeof(struct nvsp_message_header)) {
841 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
842 return;
843 }
844
845 nvsp_packet = hv_pkt_data(desc);
846 switch (nvsp_packet->hdr.msg_type) {
847 case NVSP_MSG_TYPE_INIT_COMPLETE:
848 if (msglen < sizeof(struct nvsp_message_header) +
849 sizeof(struct nvsp_message_init_complete)) {
850 netdev_err(ndev, "nvsp_msg length too small: %u\n",
851 msglen);
852 return;
853 }
854 fallthrough;
855
856 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
857 if (msglen < sizeof(struct nvsp_message_header) +
858 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
859 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
860 msglen);
861 return;
862 }
863 fallthrough;
864
865 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
866 if (msglen < sizeof(struct nvsp_message_header) +
867 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
868 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
869 msglen);
870 return;
871 }
872 fallthrough;
873
874 case NVSP_MSG5_TYPE_SUBCHANNEL:
875 if (msglen < sizeof(struct nvsp_message_header) +
876 sizeof(struct nvsp_5_subchannel_complete)) {
877 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
878 msglen);
879 return;
880 }
881 /* Copy the response back */
882 memcpy(&net_device->channel_init_pkt, nvsp_packet,
883 sizeof(struct nvsp_message));
884 complete(&net_device->channel_init_wait);
885 break;
886
887 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
888 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
889 desc, budget);
890 break;
891
892 default:
893 netdev_err(ndev,
894 "Unknown send completion type %d received!!\n",
895 nvsp_packet->hdr.msg_type);
896 }
897}
898
899static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
900{
901 unsigned long *map_addr = net_device->send_section_map;
902 unsigned int i;
903
904 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
905 if (sync_test_and_set_bit(i, map_addr) == 0)
906 return i;
907 }
908
909 return NETVSC_INVALID_INDEX;
910}
911
912static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
913 unsigned int section_index,
914 u32 pend_size,
915 struct hv_netvsc_packet *packet,
916 struct rndis_message *rndis_msg,
917 struct hv_page_buffer *pb,
918 bool xmit_more)
919{
920 char *start = net_device->send_buf;
921 char *dest = start + (section_index * net_device->send_section_size)
922 + pend_size;
923 int i;
924 u32 padding = 0;
925 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
926 packet->page_buf_cnt;
927 u32 remain;
928
929 /* Add padding */
930 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
931 if (xmit_more && remain) {
932 padding = net_device->pkt_align - remain;
933 rndis_msg->msg_len += padding;
934 packet->total_data_buflen += padding;
935 }
936
937 for (i = 0; i < page_count; i++) {
938 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
939 u32 offset = pb[i].offset;
940 u32 len = pb[i].len;
941
942 memcpy(dest, (src + offset), len);
943 dest += len;
944 }
945
946 if (padding)
947 memset(dest, 0, padding);
948}
949
950static inline int netvsc_send_pkt(
951 struct hv_device *device,
952 struct hv_netvsc_packet *packet,
953 struct netvsc_device *net_device,
954 struct hv_page_buffer *pb,
955 struct sk_buff *skb)
956{
957 struct nvsp_message nvmsg;
958 struct nvsp_1_message_send_rndis_packet *rpkt =
959 &nvmsg.msg.v1_msg.send_rndis_pkt;
960 struct netvsc_channel * const nvchan =
961 &net_device->chan_table[packet->q_idx];
962 struct vmbus_channel *out_channel = nvchan->channel;
963 struct net_device *ndev = hv_get_drvdata(device);
964 struct net_device_context *ndev_ctx = netdev_priv(ndev);
965 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
966 u64 req_id;
967 int ret;
968 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
969
970 memset(&nvmsg, 0, sizeof(struct nvsp_message));
971 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
972 if (skb)
973 rpkt->channel_type = 0; /* 0 is RMC_DATA */
974 else
975 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
976
977 rpkt->send_buf_section_index = packet->send_buf_index;
978 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
979 rpkt->send_buf_section_size = 0;
980 else
981 rpkt->send_buf_section_size = packet->total_data_buflen;
982
983 req_id = (ulong)skb;
984
985 if (out_channel->rescind)
986 return -ENODEV;
987
988 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
989
990 if (packet->page_buf_cnt) {
991 if (packet->cp_partial)
992 pb += packet->rmsg_pgcnt;
993
994 ret = vmbus_sendpacket_pagebuffer(out_channel,
995 pb, packet->page_buf_cnt,
996 &nvmsg, sizeof(nvmsg),
997 req_id);
998 } else {
999 ret = vmbus_sendpacket(out_channel,
1000 &nvmsg, sizeof(nvmsg),
1001 req_id, VM_PKT_DATA_INBAND,
1002 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1003 }
1004
1005 if (ret == 0) {
1006 atomic_inc_return(&nvchan->queue_sends);
1007
1008 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1009 netif_tx_stop_queue(txq);
1010 ndev_ctx->eth_stats.stop_queue++;
1011 }
1012 } else if (ret == -EAGAIN) {
1013 netif_tx_stop_queue(txq);
1014 ndev_ctx->eth_stats.stop_queue++;
1015 } else {
1016 netdev_err(ndev,
1017 "Unable to send packet pages %u len %u, ret %d\n",
1018 packet->page_buf_cnt, packet->total_data_buflen,
1019 ret);
1020 }
1021
1022 if (netif_tx_queue_stopped(txq) &&
1023 atomic_read(&nvchan->queue_sends) < 1 &&
1024 !net_device->tx_disable) {
1025 netif_tx_wake_queue(txq);
1026 ndev_ctx->eth_stats.wake_queue++;
1027 if (ret == -EAGAIN)
1028 ret = -ENOSPC;
1029 }
1030
1031 return ret;
1032}
1033
1034/* Move packet out of multi send data (msd), and clear msd */
1035static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1036 struct sk_buff **msd_skb,
1037 struct multi_send_data *msdp)
1038{
1039 *msd_skb = msdp->skb;
1040 *msd_send = msdp->pkt;
1041 msdp->skb = NULL;
1042 msdp->pkt = NULL;
1043 msdp->count = 0;
1044}
1045
1046/* RCU already held by caller */
1047/* Batching/bouncing logic is designed to attempt to optimize
1048 * performance.
1049 *
1050 * For small, non-LSO packets we copy the packet to a send buffer
1051 * which is pre-registered with the Hyper-V side. This enables the
1052 * hypervisor to avoid remapping the aperture to access the packet
1053 * descriptor and data.
1054 *
1055 * If we already started using a buffer and the netdev is transmitting
1056 * a burst of packets, keep on copying into the buffer until it is
1057 * full or we are done collecting a burst. If there is an existing
1058 * buffer with space for the RNDIS descriptor but not the packet, copy
1059 * the RNDIS descriptor to the buffer, keeping the packet in place.
1060 *
1061 * If we do batching and send more than one packet using a single
1062 * NetVSC message, free the SKBs of the packets copied, except for the
1063 * last packet. This is done to streamline the handling of the case
1064 * where the last packet only had the RNDIS descriptor copied to the
1065 * send buffer, with the data pointers included in the NetVSC message.
1066 */
1067int netvsc_send(struct net_device *ndev,
1068 struct hv_netvsc_packet *packet,
1069 struct rndis_message *rndis_msg,
1070 struct hv_page_buffer *pb,
1071 struct sk_buff *skb,
1072 bool xdp_tx)
1073{
1074 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1075 struct netvsc_device *net_device
1076 = rcu_dereference_bh(ndev_ctx->nvdev);
1077 struct hv_device *device = ndev_ctx->device_ctx;
1078 int ret = 0;
1079 struct netvsc_channel *nvchan;
1080 u32 pktlen = packet->total_data_buflen, msd_len = 0;
1081 unsigned int section_index = NETVSC_INVALID_INDEX;
1082 struct multi_send_data *msdp;
1083 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1084 struct sk_buff *msd_skb = NULL;
1085 bool try_batch, xmit_more;
1086
1087 /* If device is rescinded, return error and packet will get dropped. */
1088 if (unlikely(!net_device || net_device->destroy))
1089 return -ENODEV;
1090
1091 nvchan = &net_device->chan_table[packet->q_idx];
1092 packet->send_buf_index = NETVSC_INVALID_INDEX;
1093 packet->cp_partial = false;
1094
1095 /* Send a control message or XDP packet directly without accessing
1096 * msd (Multi-Send Data) field which may be changed during data packet
1097 * processing.
1098 */
1099 if (!skb || xdp_tx)
1100 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1101
1102 /* batch packets in send buffer if possible */
1103 msdp = &nvchan->msd;
1104 if (msdp->pkt)
1105 msd_len = msdp->pkt->total_data_buflen;
1106
1107 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1108 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1109 net_device->send_section_size) {
1110 section_index = msdp->pkt->send_buf_index;
1111
1112 } else if (try_batch && msd_len + packet->rmsg_size <
1113 net_device->send_section_size) {
1114 section_index = msdp->pkt->send_buf_index;
1115 packet->cp_partial = true;
1116
1117 } else if (pktlen + net_device->pkt_align <
1118 net_device->send_section_size) {
1119 section_index = netvsc_get_next_send_section(net_device);
1120 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1121 ++ndev_ctx->eth_stats.tx_send_full;
1122 } else {
1123 move_pkt_msd(&msd_send, &msd_skb, msdp);
1124 msd_len = 0;
1125 }
1126 }
1127
1128 /* Keep aggregating only if stack says more data is coming
1129 * and not doing mixed modes send and not flow blocked
1130 */
1131 xmit_more = netdev_xmit_more() &&
1132 !packet->cp_partial &&
1133 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1134
1135 if (section_index != NETVSC_INVALID_INDEX) {
1136 netvsc_copy_to_send_buf(net_device,
1137 section_index, msd_len,
1138 packet, rndis_msg, pb, xmit_more);
1139
1140 packet->send_buf_index = section_index;
1141
1142 if (packet->cp_partial) {
1143 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1144 packet->total_data_buflen = msd_len + packet->rmsg_size;
1145 } else {
1146 packet->page_buf_cnt = 0;
1147 packet->total_data_buflen += msd_len;
1148 }
1149
1150 if (msdp->pkt) {
1151 packet->total_packets += msdp->pkt->total_packets;
1152 packet->total_bytes += msdp->pkt->total_bytes;
1153 }
1154
1155 if (msdp->skb)
1156 dev_consume_skb_any(msdp->skb);
1157
1158 if (xmit_more) {
1159 msdp->skb = skb;
1160 msdp->pkt = packet;
1161 msdp->count++;
1162 } else {
1163 cur_send = packet;
1164 msdp->skb = NULL;
1165 msdp->pkt = NULL;
1166 msdp->count = 0;
1167 }
1168 } else {
1169 move_pkt_msd(&msd_send, &msd_skb, msdp);
1170 cur_send = packet;
1171 }
1172
1173 if (msd_send) {
1174 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1175 NULL, msd_skb);
1176
1177 if (m_ret != 0) {
1178 netvsc_free_send_slot(net_device,
1179 msd_send->send_buf_index);
1180 dev_kfree_skb_any(msd_skb);
1181 }
1182 }
1183
1184 if (cur_send)
1185 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1186
1187 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1188 netvsc_free_send_slot(net_device, section_index);
1189
1190 return ret;
1191}
1192
1193/* Send pending recv completions */
1194static int send_recv_completions(struct net_device *ndev,
1195 struct netvsc_device *nvdev,
1196 struct netvsc_channel *nvchan)
1197{
1198 struct multi_recv_comp *mrc = &nvchan->mrc;
1199 struct recv_comp_msg {
1200 struct nvsp_message_header hdr;
1201 u32 status;
1202 } __packed;
1203 struct recv_comp_msg msg = {
1204 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1205 };
1206 int ret;
1207
1208 while (mrc->first != mrc->next) {
1209 const struct recv_comp_data *rcd
1210 = mrc->slots + mrc->first;
1211
1212 msg.status = rcd->status;
1213 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1214 rcd->tid, VM_PKT_COMP, 0);
1215 if (unlikely(ret)) {
1216 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1217
1218 ++ndev_ctx->eth_stats.rx_comp_busy;
1219 return ret;
1220 }
1221
1222 if (++mrc->first == nvdev->recv_completion_cnt)
1223 mrc->first = 0;
1224 }
1225
1226 /* receive completion ring has been emptied */
1227 if (unlikely(nvdev->destroy))
1228 wake_up(&nvdev->wait_drain);
1229
1230 return 0;
1231}
1232
1233/* Count how many receive completions are outstanding */
1234static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1235 const struct multi_recv_comp *mrc,
1236 u32 *filled, u32 *avail)
1237{
1238 u32 count = nvdev->recv_completion_cnt;
1239
1240 if (mrc->next >= mrc->first)
1241 *filled = mrc->next - mrc->first;
1242 else
1243 *filled = (count - mrc->first) + mrc->next;
1244
1245 *avail = count - *filled - 1;
1246}
1247
1248/* Add receive complete to ring to send to host. */
1249static void enq_receive_complete(struct net_device *ndev,
1250 struct netvsc_device *nvdev, u16 q_idx,
1251 u64 tid, u32 status)
1252{
1253 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1254 struct multi_recv_comp *mrc = &nvchan->mrc;
1255 struct recv_comp_data *rcd;
1256 u32 filled, avail;
1257
1258 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1259
1260 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1261 send_recv_completions(ndev, nvdev, nvchan);
1262 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1263 }
1264
1265 if (unlikely(!avail)) {
1266 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1267 q_idx, tid);
1268 return;
1269 }
1270
1271 rcd = mrc->slots + mrc->next;
1272 rcd->tid = tid;
1273 rcd->status = status;
1274
1275 if (++mrc->next == nvdev->recv_completion_cnt)
1276 mrc->next = 0;
1277}
1278
1279static int netvsc_receive(struct net_device *ndev,
1280 struct netvsc_device *net_device,
1281 struct netvsc_channel *nvchan,
1282 const struct vmpacket_descriptor *desc)
1283{
1284 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1285 struct vmbus_channel *channel = nvchan->channel;
1286 const struct vmtransfer_page_packet_header *vmxferpage_packet
1287 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1288 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1289 u32 msglen = hv_pkt_datalen(desc);
1290 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1291 char *recv_buf = net_device->recv_buf;
1292 u32 status = NVSP_STAT_SUCCESS;
1293 int i;
1294 int count = 0;
1295
1296 /* Ensure packet is big enough to read header fields */
1297 if (msglen < sizeof(struct nvsp_message_header)) {
1298 netif_err(net_device_ctx, rx_err, ndev,
1299 "invalid nvsp header, length too small: %u\n",
1300 msglen);
1301 return 0;
1302 }
1303
1304 /* Make sure this is a valid nvsp packet */
1305 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1306 netif_err(net_device_ctx, rx_err, ndev,
1307 "Unknown nvsp packet type received %u\n",
1308 nvsp->hdr.msg_type);
1309 return 0;
1310 }
1311
1312 /* Validate xfer page pkt header */
1313 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1314 netif_err(net_device_ctx, rx_err, ndev,
1315 "Invalid xfer page pkt, offset too small: %u\n",
1316 desc->offset8 << 3);
1317 return 0;
1318 }
1319
1320 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1321 netif_err(net_device_ctx, rx_err, ndev,
1322 "Invalid xfer page set id - expecting %x got %x\n",
1323 NETVSC_RECEIVE_BUFFER_ID,
1324 vmxferpage_packet->xfer_pageset_id);
1325 return 0;
1326 }
1327
1328 count = vmxferpage_packet->range_cnt;
1329
1330 /* Check count for a valid value */
1331 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1332 netif_err(net_device_ctx, rx_err, ndev,
1333 "Range count is not valid: %d\n",
1334 count);
1335 return 0;
1336 }
1337
1338 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1339 for (i = 0; i < count; i++) {
1340 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1341 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1342 void *data;
1343 int ret;
1344
1345 if (unlikely(offset > net_device->recv_buf_size ||
1346 buflen > net_device->recv_buf_size - offset)) {
1347 nvchan->rsc.cnt = 0;
1348 status = NVSP_STAT_FAIL;
1349 netif_err(net_device_ctx, rx_err, ndev,
1350 "Packet offset:%u + len:%u too big\n",
1351 offset, buflen);
1352
1353 continue;
1354 }
1355
1356 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1357 * make sure that nvchan->recv_buf is large enough to hold the packet.
1358 */
1359 if (unlikely(buflen > net_device->recv_section_size)) {
1360 nvchan->rsc.cnt = 0;
1361 status = NVSP_STAT_FAIL;
1362 netif_err(net_device_ctx, rx_err, ndev,
1363 "Packet too big: buflen=%u recv_section_size=%u\n",
1364 buflen, net_device->recv_section_size);
1365
1366 continue;
1367 }
1368
1369 data = recv_buf + offset;
1370
1371 nvchan->rsc.is_last = (i == count - 1);
1372
1373 trace_rndis_recv(ndev, q_idx, data);
1374
1375 /* Pass it to the upper layer */
1376 ret = rndis_filter_receive(ndev, net_device,
1377 nvchan, data, buflen);
1378
1379 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1380 /* Drop incomplete packet */
1381 nvchan->rsc.cnt = 0;
1382 status = NVSP_STAT_FAIL;
1383 }
1384 }
1385
1386 enq_receive_complete(ndev, net_device, q_idx,
1387 vmxferpage_packet->d.trans_id, status);
1388
1389 return count;
1390}
1391
1392static void netvsc_send_table(struct net_device *ndev,
1393 struct netvsc_device *nvscdev,
1394 const struct nvsp_message *nvmsg,
1395 u32 msglen)
1396{
1397 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1398 u32 count, offset, *tab;
1399 int i;
1400
1401 /* Ensure packet is big enough to read send_table fields */
1402 if (msglen < sizeof(struct nvsp_message_header) +
1403 sizeof(struct nvsp_5_send_indirect_table)) {
1404 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1405 return;
1406 }
1407
1408 count = nvmsg->msg.v5_msg.send_table.count;
1409 offset = nvmsg->msg.v5_msg.send_table.offset;
1410
1411 if (count != VRSS_SEND_TAB_SIZE) {
1412 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1413 return;
1414 }
1415
1416 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1417 * wrong due to a host bug. So fix the offset here.
1418 */
1419 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1420 msglen >= sizeof(struct nvsp_message_header) +
1421 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1422 offset = sizeof(struct nvsp_message_header) +
1423 sizeof(union nvsp_6_message_uber);
1424
1425 /* Boundary check for all versions */
1426 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1427 netdev_err(ndev, "Received send-table offset too big:%u\n",
1428 offset);
1429 return;
1430 }
1431
1432 tab = (void *)nvmsg + offset;
1433
1434 for (i = 0; i < count; i++)
1435 net_device_ctx->tx_table[i] = tab[i];
1436}
1437
1438static void netvsc_send_vf(struct net_device *ndev,
1439 const struct nvsp_message *nvmsg,
1440 u32 msglen)
1441{
1442 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1443
1444 /* Ensure packet is big enough to read its fields */
1445 if (msglen < sizeof(struct nvsp_message_header) +
1446 sizeof(struct nvsp_4_send_vf_association)) {
1447 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1448 return;
1449 }
1450
1451 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1452 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1453 netdev_info(ndev, "VF slot %u %s\n",
1454 net_device_ctx->vf_serial,
1455 net_device_ctx->vf_alloc ? "added" : "removed");
1456}
1457
1458static void netvsc_receive_inband(struct net_device *ndev,
1459 struct netvsc_device *nvscdev,
1460 const struct vmpacket_descriptor *desc)
1461{
1462 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1463 u32 msglen = hv_pkt_datalen(desc);
1464
1465 /* Ensure packet is big enough to read header fields */
1466 if (msglen < sizeof(struct nvsp_message_header)) {
1467 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1468 return;
1469 }
1470
1471 switch (nvmsg->hdr.msg_type) {
1472 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1473 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1474 break;
1475
1476 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1477 if (hv_is_isolation_supported())
1478 netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1479 else
1480 netvsc_send_vf(ndev, nvmsg, msglen);
1481 break;
1482 }
1483}
1484
1485static int netvsc_process_raw_pkt(struct hv_device *device,
1486 struct netvsc_channel *nvchan,
1487 struct netvsc_device *net_device,
1488 struct net_device *ndev,
1489 const struct vmpacket_descriptor *desc,
1490 int budget)
1491{
1492 struct vmbus_channel *channel = nvchan->channel;
1493 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1494
1495 trace_nvsp_recv(ndev, channel, nvmsg);
1496
1497 switch (desc->type) {
1498 case VM_PKT_COMP:
1499 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1500 break;
1501
1502 case VM_PKT_DATA_USING_XFER_PAGES:
1503 return netvsc_receive(ndev, net_device, nvchan, desc);
1504 break;
1505
1506 case VM_PKT_DATA_INBAND:
1507 netvsc_receive_inband(ndev, net_device, desc);
1508 break;
1509
1510 default:
1511 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1512 desc->type, desc->trans_id);
1513 break;
1514 }
1515
1516 return 0;
1517}
1518
1519static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1520{
1521 struct vmbus_channel *primary = channel->primary_channel;
1522
1523 return primary ? primary->device_obj : channel->device_obj;
1524}
1525
1526/* Network processing softirq
1527 * Process data in incoming ring buffer from host
1528 * Stops when ring is empty or budget is met or exceeded.
1529 */
1530int netvsc_poll(struct napi_struct *napi, int budget)
1531{
1532 struct netvsc_channel *nvchan
1533 = container_of(napi, struct netvsc_channel, napi);
1534 struct netvsc_device *net_device = nvchan->net_device;
1535 struct vmbus_channel *channel = nvchan->channel;
1536 struct hv_device *device = netvsc_channel_to_device(channel);
1537 struct net_device *ndev = hv_get_drvdata(device);
1538 int work_done = 0;
1539 int ret;
1540
1541 /* If starting a new interval */
1542 if (!nvchan->desc)
1543 nvchan->desc = hv_pkt_iter_first(channel);
1544
1545 while (nvchan->desc && work_done < budget) {
1546 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1547 ndev, nvchan->desc, budget);
1548 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1549 }
1550
1551 /* Send any pending receive completions */
1552 ret = send_recv_completions(ndev, net_device, nvchan);
1553
1554 /* If it did not exhaust NAPI budget this time
1555 * and not doing busy poll
1556 * then re-enable host interrupts
1557 * and reschedule if ring is not empty
1558 * or sending receive completion failed.
1559 */
1560 if (work_done < budget &&
1561 napi_complete_done(napi, work_done) &&
1562 (ret || hv_end_read(&channel->inbound)) &&
1563 napi_schedule_prep(napi)) {
1564 hv_begin_read(&channel->inbound);
1565 __napi_schedule(napi);
1566 }
1567
1568 /* Driver may overshoot since multiple packets per descriptor */
1569 return min(work_done, budget);
1570}
1571
1572/* Call back when data is available in host ring buffer.
1573 * Processing is deferred until network softirq (NAPI)
1574 */
1575void netvsc_channel_cb(void *context)
1576{
1577 struct netvsc_channel *nvchan = context;
1578 struct vmbus_channel *channel = nvchan->channel;
1579 struct hv_ring_buffer_info *rbi = &channel->inbound;
1580
1581 /* preload first vmpacket descriptor */
1582 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1583
1584 if (napi_schedule_prep(&nvchan->napi)) {
1585 /* disable interrupts from host */
1586 hv_begin_read(rbi);
1587
1588 __napi_schedule_irqoff(&nvchan->napi);
1589 }
1590}
1591
1592/*
1593 * netvsc_device_add - Callback when the device belonging to this
1594 * driver is added
1595 */
1596struct netvsc_device *netvsc_device_add(struct hv_device *device,
1597 const struct netvsc_device_info *device_info)
1598{
1599 int i, ret = 0;
1600 struct netvsc_device *net_device;
1601 struct net_device *ndev = hv_get_drvdata(device);
1602 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1603
1604 net_device = alloc_net_device();
1605 if (!net_device)
1606 return ERR_PTR(-ENOMEM);
1607
1608 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1609 net_device_ctx->tx_table[i] = 0;
1610
1611 /* Because the device uses NAPI, all the interrupt batching and
1612 * control is done via Net softirq, not the channel handling
1613 */
1614 set_channel_read_mode(device->channel, HV_CALL_ISR);
1615
1616 /* If we're reopening the device we may have multiple queues, fill the
1617 * chn_table with the default channel to use it before subchannels are
1618 * opened.
1619 * Initialize the channel state before we open;
1620 * we can be interrupted as soon as we open the channel.
1621 */
1622
1623 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1624 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1625
1626 nvchan->channel = device->channel;
1627 nvchan->net_device = net_device;
1628 u64_stats_init(&nvchan->tx_stats.syncp);
1629 u64_stats_init(&nvchan->rx_stats.syncp);
1630
1631 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1632
1633 if (ret) {
1634 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1635 goto cleanup2;
1636 }
1637
1638 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1639 MEM_TYPE_PAGE_SHARED, NULL);
1640
1641 if (ret) {
1642 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1643 goto cleanup2;
1644 }
1645 }
1646
1647 /* Enable NAPI handler before init callbacks */
1648 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1649 netvsc_poll, NAPI_POLL_WEIGHT);
1650
1651 /* Open the channel */
1652 device->channel->next_request_id_callback = vmbus_next_request_id;
1653 device->channel->request_addr_callback = vmbus_request_addr;
1654 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1655 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1656
1657 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1658 netvsc_ring_bytes, NULL, 0,
1659 netvsc_channel_cb, net_device->chan_table);
1660
1661 if (ret != 0) {
1662 netdev_err(ndev, "unable to open channel: %d\n", ret);
1663 goto cleanup;
1664 }
1665
1666 /* Channel is opened */
1667 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1668
1669 napi_enable(&net_device->chan_table[0].napi);
1670
1671 /* Connect with the NetVsp */
1672 ret = netvsc_connect_vsp(device, net_device, device_info);
1673 if (ret != 0) {
1674 netdev_err(ndev,
1675 "unable to connect to NetVSP - %d\n", ret);
1676 goto close;
1677 }
1678
1679 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1680 * populated.
1681 */
1682 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1683
1684 return net_device;
1685
1686close:
1687 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1688 napi_disable(&net_device->chan_table[0].napi);
1689
1690 /* Now, we can close the channel safely */
1691 vmbus_close(device->channel);
1692
1693cleanup:
1694 netif_napi_del(&net_device->chan_table[0].napi);
1695
1696cleanup2:
1697 free_netvsc_device(&net_device->rcu);
1698
1699 return ERR_PTR(ret);
1700}