Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/mm.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include <linux/netdevice.h>
19#include <linux/if_ether.h>
20#include <linux/vmalloc.h>
21#include <linux/rtnetlink.h>
22#include <linux/prefetch.h>
23#include <linux/filter.h>
24
25#include <asm/sync_bitops.h>
26#include <asm/mshyperv.h>
27
28#include "hyperv_net.h"
29#include "netvsc_trace.h"
30
31/*
32 * Switch the data path from the synthetic interface to the VF
33 * interface.
34 */
35int netvsc_switch_datapath(struct net_device *ndev, bool vf)
36{
37 struct net_device_context *net_device_ctx = netdev_priv(ndev);
38 struct hv_device *dev = net_device_ctx->device_ctx;
39 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
40 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
41 int ret, retry = 0;
42
43 /* Block sending traffic to VF if it's about to be gone */
44 if (!vf)
45 net_device_ctx->data_path_is_vf = vf;
46
47 memset(init_pkt, 0, sizeof(struct nvsp_message));
48 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
49 if (vf)
50 init_pkt->msg.v4_msg.active_dp.active_datapath =
51 NVSP_DATAPATH_VF;
52 else
53 init_pkt->msg.v4_msg.active_dp.active_datapath =
54 NVSP_DATAPATH_SYNTHETIC;
55
56again:
57 trace_nvsp_send(ndev, init_pkt);
58
59 ret = vmbus_sendpacket(dev->channel, init_pkt,
60 sizeof(struct nvsp_message),
61 (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
62 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
63
64 /* If failed to switch to/from VF, let data_path_is_vf stay false,
65 * so we use synthetic path to send data.
66 */
67 if (ret) {
68 if (ret != -EAGAIN) {
69 netdev_err(ndev,
70 "Unable to send sw datapath msg, err: %d\n",
71 ret);
72 return ret;
73 }
74
75 if (retry++ < RETRY_MAX) {
76 usleep_range(RETRY_US_LO, RETRY_US_HI);
77 goto again;
78 } else {
79 netdev_err(
80 ndev,
81 "Retry failed to send sw datapath msg, err: %d\n",
82 ret);
83 return ret;
84 }
85 }
86
87 wait_for_completion(&nv_dev->channel_init_wait);
88 net_device_ctx->data_path_is_vf = vf;
89
90 return 0;
91}
92
93/* Worker to setup sub channels on initial setup
94 * Initial hotplug event occurs in softirq context
95 * and can't wait for channels.
96 */
97static void netvsc_subchan_work(struct work_struct *w)
98{
99 struct netvsc_device *nvdev =
100 container_of(w, struct netvsc_device, subchan_work);
101 struct rndis_device *rdev;
102 int i, ret;
103
104 /* Avoid deadlock with device removal already under RTNL */
105 if (!rtnl_trylock()) {
106 schedule_work(w);
107 return;
108 }
109
110 rdev = nvdev->extension;
111 if (rdev) {
112 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
113 if (ret == 0) {
114 netif_device_attach(rdev->ndev);
115 } else {
116 /* fallback to only primary channel */
117 for (i = 1; i < nvdev->num_chn; i++)
118 netif_napi_del(&nvdev->chan_table[i].napi);
119
120 nvdev->max_chn = 1;
121 nvdev->num_chn = 1;
122 }
123 }
124
125 rtnl_unlock();
126}
127
128static struct netvsc_device *alloc_net_device(void)
129{
130 struct netvsc_device *net_device;
131
132 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
133 if (!net_device)
134 return NULL;
135
136 init_waitqueue_head(&net_device->wait_drain);
137 net_device->destroy = false;
138 net_device->tx_disable = true;
139
140 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
141 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
142
143 init_completion(&net_device->channel_init_wait);
144 init_waitqueue_head(&net_device->subchan_open);
145 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
146
147 return net_device;
148}
149
150static void free_netvsc_device(struct rcu_head *head)
151{
152 struct netvsc_device *nvdev
153 = container_of(head, struct netvsc_device, rcu);
154 int i;
155
156 kfree(nvdev->extension);
157 vfree(nvdev->recv_buf);
158 vfree(nvdev->send_buf);
159 bitmap_free(nvdev->send_section_map);
160
161 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
162 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
163 kfree(nvdev->chan_table[i].recv_buf);
164 vfree(nvdev->chan_table[i].mrc.slots);
165 }
166
167 kfree(nvdev);
168}
169
170static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
171{
172 call_rcu(&nvdev->rcu, free_netvsc_device);
173}
174
175static void netvsc_revoke_recv_buf(struct hv_device *device,
176 struct netvsc_device *net_device,
177 struct net_device *ndev)
178{
179 struct nvsp_message *revoke_packet;
180 int ret;
181
182 /*
183 * If we got a section count, it means we received a
184 * SendReceiveBufferComplete msg (ie sent
185 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
186 * to send a revoke msg here
187 */
188 if (net_device->recv_section_cnt) {
189 /* Send the revoke receive buffer */
190 revoke_packet = &net_device->revoke_packet;
191 memset(revoke_packet, 0, sizeof(struct nvsp_message));
192
193 revoke_packet->hdr.msg_type =
194 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
195 revoke_packet->msg.v1_msg.
196 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
197
198 trace_nvsp_send(ndev, revoke_packet);
199
200 ret = vmbus_sendpacket(device->channel,
201 revoke_packet,
202 sizeof(struct nvsp_message),
203 VMBUS_RQST_ID_NO_RESPONSE,
204 VM_PKT_DATA_INBAND, 0);
205 /* If the failure is because the channel is rescinded;
206 * ignore the failure since we cannot send on a rescinded
207 * channel. This would allow us to properly cleanup
208 * even when the channel is rescinded.
209 */
210 if (device->channel->rescind)
211 ret = 0;
212 /*
213 * If we failed here, we might as well return and
214 * have a leak rather than continue and a bugchk
215 */
216 if (ret != 0) {
217 netdev_err(ndev, "unable to send "
218 "revoke receive buffer to netvsp\n");
219 return;
220 }
221 net_device->recv_section_cnt = 0;
222 }
223}
224
225static void netvsc_revoke_send_buf(struct hv_device *device,
226 struct netvsc_device *net_device,
227 struct net_device *ndev)
228{
229 struct nvsp_message *revoke_packet;
230 int ret;
231
232 /* Deal with the send buffer we may have setup.
233 * If we got a send section size, it means we received a
234 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
235 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
236 * to send a revoke msg here
237 */
238 if (net_device->send_section_cnt) {
239 /* Send the revoke receive buffer */
240 revoke_packet = &net_device->revoke_packet;
241 memset(revoke_packet, 0, sizeof(struct nvsp_message));
242
243 revoke_packet->hdr.msg_type =
244 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
245 revoke_packet->msg.v1_msg.revoke_send_buf.id =
246 NETVSC_SEND_BUFFER_ID;
247
248 trace_nvsp_send(ndev, revoke_packet);
249
250 ret = vmbus_sendpacket(device->channel,
251 revoke_packet,
252 sizeof(struct nvsp_message),
253 VMBUS_RQST_ID_NO_RESPONSE,
254 VM_PKT_DATA_INBAND, 0);
255
256 /* If the failure is because the channel is rescinded;
257 * ignore the failure since we cannot send on a rescinded
258 * channel. This would allow us to properly cleanup
259 * even when the channel is rescinded.
260 */
261 if (device->channel->rescind)
262 ret = 0;
263
264 /* If we failed here, we might as well return and
265 * have a leak rather than continue and a bugchk
266 */
267 if (ret != 0) {
268 netdev_err(ndev, "unable to send "
269 "revoke send buffer to netvsp\n");
270 return;
271 }
272 net_device->send_section_cnt = 0;
273 }
274}
275
276static void netvsc_teardown_recv_gpadl(struct hv_device *device,
277 struct netvsc_device *net_device,
278 struct net_device *ndev)
279{
280 int ret;
281
282 if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
283 ret = vmbus_teardown_gpadl(device->channel,
284 &net_device->recv_buf_gpadl_handle);
285
286 /* If we failed here, we might as well return and have a leak
287 * rather than continue and a bugchk
288 */
289 if (ret != 0) {
290 netdev_err(ndev,
291 "unable to teardown receive buffer's gpadl\n");
292 return;
293 }
294 }
295}
296
297static void netvsc_teardown_send_gpadl(struct hv_device *device,
298 struct netvsc_device *net_device,
299 struct net_device *ndev)
300{
301 int ret;
302
303 if (net_device->send_buf_gpadl_handle.gpadl_handle) {
304 ret = vmbus_teardown_gpadl(device->channel,
305 &net_device->send_buf_gpadl_handle);
306
307 /* If we failed here, we might as well return and have a leak
308 * rather than continue and a bugchk
309 */
310 if (ret != 0) {
311 netdev_err(ndev,
312 "unable to teardown send buffer's gpadl\n");
313 return;
314 }
315 }
316}
317
318int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
319{
320 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
321 int node = cpu_to_node(nvchan->channel->target_cpu);
322 size_t size;
323
324 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
325 nvchan->mrc.slots = vzalloc_node(size, node);
326 if (!nvchan->mrc.slots)
327 nvchan->mrc.slots = vzalloc(size);
328
329 return nvchan->mrc.slots ? 0 : -ENOMEM;
330}
331
332static int netvsc_init_buf(struct hv_device *device,
333 struct netvsc_device *net_device,
334 const struct netvsc_device_info *device_info)
335{
336 struct nvsp_1_message_send_receive_buffer_complete *resp;
337 struct net_device *ndev = hv_get_drvdata(device);
338 struct nvsp_message *init_packet;
339 unsigned int buf_size;
340 int i, ret = 0;
341
342 /* Get receive buffer area. */
343 buf_size = device_info->recv_sections * device_info->recv_section_size;
344 buf_size = roundup(buf_size, PAGE_SIZE);
345
346 /* Legacy hosts only allow smaller receive buffer */
347 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
348 buf_size = min_t(unsigned int, buf_size,
349 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
350
351 net_device->recv_buf = vzalloc(buf_size);
352 if (!net_device->recv_buf) {
353 netdev_err(ndev,
354 "unable to allocate receive buffer of size %u\n",
355 buf_size);
356 ret = -ENOMEM;
357 goto cleanup;
358 }
359
360 net_device->recv_buf_size = buf_size;
361
362 /*
363 * Establish the gpadl handle for this buffer on this
364 * channel. Note: This call uses the vmbus connection rather
365 * than the channel to establish the gpadl handle.
366 */
367 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
368 buf_size,
369 &net_device->recv_buf_gpadl_handle);
370 if (ret != 0) {
371 netdev_err(ndev,
372 "unable to establish receive buffer's gpadl\n");
373 goto cleanup;
374 }
375
376 /* Notify the NetVsp of the gpadl handle */
377 init_packet = &net_device->channel_init_pkt;
378 memset(init_packet, 0, sizeof(struct nvsp_message));
379 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
380 init_packet->msg.v1_msg.send_recv_buf.
381 gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
382 init_packet->msg.v1_msg.
383 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
384
385 trace_nvsp_send(ndev, init_packet);
386
387 /* Send the gpadl notification request */
388 ret = vmbus_sendpacket(device->channel, init_packet,
389 sizeof(struct nvsp_message),
390 (unsigned long)init_packet,
391 VM_PKT_DATA_INBAND,
392 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
393 if (ret != 0) {
394 netdev_err(ndev,
395 "unable to send receive buffer's gpadl to netvsp\n");
396 goto cleanup;
397 }
398
399 wait_for_completion(&net_device->channel_init_wait);
400
401 /* Check the response */
402 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
403 if (resp->status != NVSP_STAT_SUCCESS) {
404 netdev_err(ndev,
405 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
406 resp->status);
407 ret = -EINVAL;
408 goto cleanup;
409 }
410
411 /* Parse the response */
412 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
413 resp->num_sections, resp->sections[0].sub_alloc_size,
414 resp->sections[0].num_sub_allocs);
415
416 /* There should only be one section for the entire receive buffer */
417 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
418 ret = -EINVAL;
419 goto cleanup;
420 }
421
422 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
423 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
424
425 /* Ensure buffer will not overflow */
426 if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
427 (u64)net_device->recv_section_cnt > (u64)buf_size) {
428 netdev_err(ndev, "invalid recv_section_size %u\n",
429 net_device->recv_section_size);
430 ret = -EINVAL;
431 goto cleanup;
432 }
433
434 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
435 struct netvsc_channel *nvchan = &net_device->chan_table[i];
436
437 nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
438 if (nvchan->recv_buf == NULL) {
439 ret = -ENOMEM;
440 goto cleanup;
441 }
442 }
443
444 /* Setup receive completion ring.
445 * Add 1 to the recv_section_cnt because at least one entry in a
446 * ring buffer has to be empty.
447 */
448 net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
449 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
450 if (ret)
451 goto cleanup;
452
453 /* Now setup the send buffer. */
454 buf_size = device_info->send_sections * device_info->send_section_size;
455 buf_size = round_up(buf_size, PAGE_SIZE);
456
457 net_device->send_buf = vzalloc(buf_size);
458 if (!net_device->send_buf) {
459 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
460 buf_size);
461 ret = -ENOMEM;
462 goto cleanup;
463 }
464 net_device->send_buf_size = buf_size;
465
466 /* Establish the gpadl handle for this buffer on this
467 * channel. Note: This call uses the vmbus connection rather
468 * than the channel to establish the gpadl handle.
469 */
470 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
471 buf_size,
472 &net_device->send_buf_gpadl_handle);
473 if (ret != 0) {
474 netdev_err(ndev,
475 "unable to establish send buffer's gpadl\n");
476 goto cleanup;
477 }
478
479 /* Notify the NetVsp of the gpadl handle */
480 init_packet = &net_device->channel_init_pkt;
481 memset(init_packet, 0, sizeof(struct nvsp_message));
482 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
483 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
484 net_device->send_buf_gpadl_handle.gpadl_handle;
485 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
486
487 trace_nvsp_send(ndev, init_packet);
488
489 /* Send the gpadl notification request */
490 ret = vmbus_sendpacket(device->channel, init_packet,
491 sizeof(struct nvsp_message),
492 (unsigned long)init_packet,
493 VM_PKT_DATA_INBAND,
494 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
495 if (ret != 0) {
496 netdev_err(ndev,
497 "unable to send send buffer's gpadl to netvsp\n");
498 goto cleanup;
499 }
500
501 wait_for_completion(&net_device->channel_init_wait);
502
503 /* Check the response */
504 if (init_packet->msg.v1_msg.
505 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
506 netdev_err(ndev, "Unable to complete send buffer "
507 "initialization with NetVsp - status %d\n",
508 init_packet->msg.v1_msg.
509 send_send_buf_complete.status);
510 ret = -EINVAL;
511 goto cleanup;
512 }
513
514 /* Parse the response */
515 net_device->send_section_size = init_packet->msg.
516 v1_msg.send_send_buf_complete.section_size;
517 if (net_device->send_section_size < NETVSC_MTU_MIN) {
518 netdev_err(ndev, "invalid send_section_size %u\n",
519 net_device->send_section_size);
520 ret = -EINVAL;
521 goto cleanup;
522 }
523
524 /* Section count is simply the size divided by the section size. */
525 net_device->send_section_cnt = buf_size / net_device->send_section_size;
526
527 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
528 net_device->send_section_size, net_device->send_section_cnt);
529
530 /* Setup state for managing the send buffer. */
531 net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
532 GFP_KERNEL);
533 if (!net_device->send_section_map) {
534 ret = -ENOMEM;
535 goto cleanup;
536 }
537
538 goto exit;
539
540cleanup:
541 netvsc_revoke_recv_buf(device, net_device, ndev);
542 netvsc_revoke_send_buf(device, net_device, ndev);
543 netvsc_teardown_recv_gpadl(device, net_device, ndev);
544 netvsc_teardown_send_gpadl(device, net_device, ndev);
545
546exit:
547 return ret;
548}
549
550/* Negotiate NVSP protocol version */
551static int negotiate_nvsp_ver(struct hv_device *device,
552 struct netvsc_device *net_device,
553 struct nvsp_message *init_packet,
554 u32 nvsp_ver)
555{
556 struct net_device *ndev = hv_get_drvdata(device);
557 int ret;
558
559 memset(init_packet, 0, sizeof(struct nvsp_message));
560 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
561 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
562 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
563 trace_nvsp_send(ndev, init_packet);
564
565 /* Send the init request */
566 ret = vmbus_sendpacket(device->channel, init_packet,
567 sizeof(struct nvsp_message),
568 (unsigned long)init_packet,
569 VM_PKT_DATA_INBAND,
570 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
571
572 if (ret != 0)
573 return ret;
574
575 wait_for_completion(&net_device->channel_init_wait);
576
577 if (init_packet->msg.init_msg.init_complete.status !=
578 NVSP_STAT_SUCCESS)
579 return -EINVAL;
580
581 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
582 return 0;
583
584 /* NVSPv2 or later: Send NDIS config */
585 memset(init_packet, 0, sizeof(struct nvsp_message));
586 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
587 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
588 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
589
590 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
591 if (hv_is_isolation_supported())
592 netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
593 else
594 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
595
596 /* Teaming bit is needed to receive link speed updates */
597 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
598 }
599
600 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
601 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
602
603 trace_nvsp_send(ndev, init_packet);
604
605 ret = vmbus_sendpacket(device->channel, init_packet,
606 sizeof(struct nvsp_message),
607 VMBUS_RQST_ID_NO_RESPONSE,
608 VM_PKT_DATA_INBAND, 0);
609
610 return ret;
611}
612
613static int netvsc_connect_vsp(struct hv_device *device,
614 struct netvsc_device *net_device,
615 const struct netvsc_device_info *device_info)
616{
617 struct net_device *ndev = hv_get_drvdata(device);
618 static const u32 ver_list[] = {
619 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
620 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
621 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
622 };
623 struct nvsp_message *init_packet;
624 int ndis_version, i, ret;
625
626 init_packet = &net_device->channel_init_pkt;
627
628 /* Negotiate the latest NVSP protocol supported */
629 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
630 if (negotiate_nvsp_ver(device, net_device, init_packet,
631 ver_list[i]) == 0) {
632 net_device->nvsp_version = ver_list[i];
633 break;
634 }
635
636 if (i < 0) {
637 ret = -EPROTO;
638 goto cleanup;
639 }
640
641 if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
642 netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
643 net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
644 ret = -EPROTO;
645 goto cleanup;
646 }
647
648 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
649
650 /* Send the ndis version */
651 memset(init_packet, 0, sizeof(struct nvsp_message));
652
653 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
654 ndis_version = 0x00060001;
655 else
656 ndis_version = 0x0006001e;
657
658 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
659 init_packet->msg.v1_msg.
660 send_ndis_ver.ndis_major_ver =
661 (ndis_version & 0xFFFF0000) >> 16;
662 init_packet->msg.v1_msg.
663 send_ndis_ver.ndis_minor_ver =
664 ndis_version & 0xFFFF;
665
666 trace_nvsp_send(ndev, init_packet);
667
668 /* Send the init request */
669 ret = vmbus_sendpacket(device->channel, init_packet,
670 sizeof(struct nvsp_message),
671 VMBUS_RQST_ID_NO_RESPONSE,
672 VM_PKT_DATA_INBAND, 0);
673 if (ret != 0)
674 goto cleanup;
675
676
677 ret = netvsc_init_buf(device, net_device, device_info);
678
679cleanup:
680 return ret;
681}
682
683/*
684 * netvsc_device_remove - Callback when the root bus device is removed
685 */
686void netvsc_device_remove(struct hv_device *device)
687{
688 struct net_device *ndev = hv_get_drvdata(device);
689 struct net_device_context *net_device_ctx = netdev_priv(ndev);
690 struct netvsc_device *net_device
691 = rtnl_dereference(net_device_ctx->nvdev);
692 int i;
693
694 /*
695 * Revoke receive buffer. If host is pre-Win2016 then tear down
696 * receive buffer GPADL. Do the same for send buffer.
697 */
698 netvsc_revoke_recv_buf(device, net_device, ndev);
699 if (vmbus_proto_version < VERSION_WIN10)
700 netvsc_teardown_recv_gpadl(device, net_device, ndev);
701
702 netvsc_revoke_send_buf(device, net_device, ndev);
703 if (vmbus_proto_version < VERSION_WIN10)
704 netvsc_teardown_send_gpadl(device, net_device, ndev);
705
706 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
707
708 /* Disable NAPI and disassociate its context from the device. */
709 for (i = 0; i < net_device->num_chn; i++) {
710 /* See also vmbus_reset_channel_cb(). */
711 /* only disable enabled NAPI channel */
712 if (i < ndev->real_num_rx_queues)
713 napi_disable(&net_device->chan_table[i].napi);
714
715 netif_napi_del(&net_device->chan_table[i].napi);
716 }
717
718 /*
719 * At this point, no one should be accessing net_device
720 * except in here
721 */
722 netdev_dbg(ndev, "net device safe to remove\n");
723
724 /* Now, we can close the channel safely */
725 vmbus_close(device->channel);
726
727 /*
728 * If host is Win2016 or higher then we do the GPADL tear down
729 * here after VMBus is closed.
730 */
731 if (vmbus_proto_version >= VERSION_WIN10) {
732 netvsc_teardown_recv_gpadl(device, net_device, ndev);
733 netvsc_teardown_send_gpadl(device, net_device, ndev);
734 }
735
736 /* Release all resources */
737 free_netvsc_device_rcu(net_device);
738}
739
740#define RING_AVAIL_PERCENT_HIWATER 20
741#define RING_AVAIL_PERCENT_LOWATER 10
742
743static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
744 u32 index)
745{
746 sync_change_bit(index, net_device->send_section_map);
747}
748
749static void netvsc_send_tx_complete(struct net_device *ndev,
750 struct netvsc_device *net_device,
751 struct vmbus_channel *channel,
752 const struct vmpacket_descriptor *desc,
753 int budget)
754{
755 struct net_device_context *ndev_ctx = netdev_priv(ndev);
756 struct sk_buff *skb;
757 u16 q_idx = 0;
758 int queue_sends;
759 u64 cmd_rqst;
760
761 cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
762 if (cmd_rqst == VMBUS_RQST_ERROR) {
763 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
764 return;
765 }
766
767 skb = (struct sk_buff *)(unsigned long)cmd_rqst;
768
769 /* Notify the layer above us */
770 if (likely(skb)) {
771 struct hv_netvsc_packet *packet
772 = (struct hv_netvsc_packet *)skb->cb;
773 u32 send_index = packet->send_buf_index;
774 struct netvsc_stats_tx *tx_stats;
775
776 if (send_index != NETVSC_INVALID_INDEX)
777 netvsc_free_send_slot(net_device, send_index);
778 q_idx = packet->q_idx;
779
780 tx_stats = &net_device->chan_table[q_idx].tx_stats;
781
782 u64_stats_update_begin(&tx_stats->syncp);
783 tx_stats->packets += packet->total_packets;
784 tx_stats->bytes += packet->total_bytes;
785 u64_stats_update_end(&tx_stats->syncp);
786
787 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
788 napi_consume_skb(skb, budget);
789 }
790
791 queue_sends =
792 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
793
794 if (unlikely(net_device->destroy)) {
795 if (queue_sends == 0)
796 wake_up(&net_device->wait_drain);
797 } else {
798 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
799
800 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
801 (hv_get_avail_to_write_percent(&channel->outbound) >
802 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
803 netif_tx_wake_queue(txq);
804 ndev_ctx->eth_stats.wake_queue++;
805 }
806 }
807}
808
809static void netvsc_send_completion(struct net_device *ndev,
810 struct netvsc_device *net_device,
811 struct vmbus_channel *incoming_channel,
812 const struct vmpacket_descriptor *desc,
813 int budget)
814{
815 const struct nvsp_message *nvsp_packet;
816 u32 msglen = hv_pkt_datalen(desc);
817 struct nvsp_message *pkt_rqst;
818 u64 cmd_rqst;
819 u32 status;
820
821 /* First check if this is a VMBUS completion without data payload */
822 if (!msglen) {
823 cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
824 desc->trans_id);
825 if (cmd_rqst == VMBUS_RQST_ERROR) {
826 netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
827 return;
828 }
829
830 pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
831 switch (pkt_rqst->hdr.msg_type) {
832 case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
833 complete(&net_device->channel_init_wait);
834 break;
835
836 default:
837 netdev_err(ndev, "Unexpected VMBUS completion!!\n");
838 }
839 return;
840 }
841
842 /* Ensure packet is big enough to read header fields */
843 if (msglen < sizeof(struct nvsp_message_header)) {
844 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
845 return;
846 }
847
848 nvsp_packet = hv_pkt_data(desc);
849 switch (nvsp_packet->hdr.msg_type) {
850 case NVSP_MSG_TYPE_INIT_COMPLETE:
851 if (msglen < sizeof(struct nvsp_message_header) +
852 sizeof(struct nvsp_message_init_complete)) {
853 netdev_err(ndev, "nvsp_msg length too small: %u\n",
854 msglen);
855 return;
856 }
857 break;
858
859 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
860 if (msglen < sizeof(struct nvsp_message_header) +
861 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
862 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
863 msglen);
864 return;
865 }
866 break;
867
868 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
869 if (msglen < sizeof(struct nvsp_message_header) +
870 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
871 netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
872 msglen);
873 return;
874 }
875 break;
876
877 case NVSP_MSG5_TYPE_SUBCHANNEL:
878 if (msglen < sizeof(struct nvsp_message_header) +
879 sizeof(struct nvsp_5_subchannel_complete)) {
880 netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
881 msglen);
882 return;
883 }
884 break;
885
886 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
887 if (msglen < sizeof(struct nvsp_message_header) +
888 sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
889 if (net_ratelimit())
890 netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
891 msglen);
892 return;
893 }
894
895 /* If status indicates an error, output a message so we know
896 * there's a problem. But process the completion anyway so the
897 * resources are released.
898 */
899 status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
900 if (status != NVSP_STAT_SUCCESS && net_ratelimit())
901 netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
902 status);
903
904 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
905 desc, budget);
906 return;
907
908 default:
909 netdev_err(ndev,
910 "Unknown send completion type %d received!!\n",
911 nvsp_packet->hdr.msg_type);
912 return;
913 }
914
915 /* Copy the response back */
916 memcpy(&net_device->channel_init_pkt, nvsp_packet,
917 sizeof(struct nvsp_message));
918 complete(&net_device->channel_init_wait);
919}
920
921static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
922{
923 unsigned long *map_addr = net_device->send_section_map;
924 unsigned int i;
925
926 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
927 if (sync_test_and_set_bit(i, map_addr) == 0)
928 return i;
929 }
930
931 return NETVSC_INVALID_INDEX;
932}
933
934static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
935 unsigned int section_index,
936 u32 pend_size,
937 struct hv_netvsc_packet *packet,
938 struct rndis_message *rndis_msg,
939 struct hv_page_buffer *pb,
940 bool xmit_more)
941{
942 char *start = net_device->send_buf;
943 char *dest = start + (section_index * net_device->send_section_size)
944 + pend_size;
945 int i;
946 u32 padding = 0;
947 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
948 packet->page_buf_cnt;
949 u32 remain;
950
951 /* Add padding */
952 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
953 if (xmit_more && remain) {
954 padding = net_device->pkt_align - remain;
955 rndis_msg->msg_len += padding;
956 packet->total_data_buflen += padding;
957 }
958
959 for (i = 0; i < page_count; i++) {
960 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
961 u32 offset = pb[i].offset;
962 u32 len = pb[i].len;
963
964 memcpy(dest, (src + offset), len);
965 dest += len;
966 }
967
968 if (padding)
969 memset(dest, 0, padding);
970}
971
972void netvsc_dma_unmap(struct hv_device *hv_dev,
973 struct hv_netvsc_packet *packet)
974{
975 int i;
976
977 if (!hv_is_isolation_supported())
978 return;
979
980 if (!packet->dma_range)
981 return;
982
983 for (i = 0; i < packet->page_buf_cnt; i++)
984 dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
985 packet->dma_range[i].mapping_size,
986 DMA_TO_DEVICE);
987
988 kfree(packet->dma_range);
989}
990
991/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
992 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
993 * VM.
994 *
995 * In isolation VM, netvsc send buffer has been marked visible to
996 * host and so the data copied to send buffer doesn't need to use
997 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
998 * may not be copied to send buffer and so these pages need to be
999 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1000 * that. The pfns in the struct hv_page_buffer need to be converted
1001 * to bounce buffer's pfn. The loop here is necessary because the
1002 * entries in the page buffer array are not necessarily full
1003 * pages of data. Each entry in the array has a separate offset and
1004 * len that may be non-zero, even for entries in the middle of the
1005 * array. And the entries are not physically contiguous. So each
1006 * entry must be individually mapped rather than as a contiguous unit.
1007 * So not use dma_map_sg() here.
1008 */
1009static int netvsc_dma_map(struct hv_device *hv_dev,
1010 struct hv_netvsc_packet *packet,
1011 struct hv_page_buffer *pb)
1012{
1013 u32 page_count = packet->page_buf_cnt;
1014 dma_addr_t dma;
1015 int i;
1016
1017 if (!hv_is_isolation_supported())
1018 return 0;
1019
1020 packet->dma_range = kcalloc(page_count,
1021 sizeof(*packet->dma_range),
1022 GFP_ATOMIC);
1023 if (!packet->dma_range)
1024 return -ENOMEM;
1025
1026 for (i = 0; i < page_count; i++) {
1027 char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1028 + pb[i].offset);
1029 u32 len = pb[i].len;
1030
1031 dma = dma_map_single(&hv_dev->device, src, len,
1032 DMA_TO_DEVICE);
1033 if (dma_mapping_error(&hv_dev->device, dma)) {
1034 kfree(packet->dma_range);
1035 return -ENOMEM;
1036 }
1037
1038 /* pb[].offset and pb[].len are not changed during dma mapping
1039 * and so not reassign.
1040 */
1041 packet->dma_range[i].dma = dma;
1042 packet->dma_range[i].mapping_size = len;
1043 pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1044 }
1045
1046 return 0;
1047}
1048
1049static inline int netvsc_send_pkt(
1050 struct hv_device *device,
1051 struct hv_netvsc_packet *packet,
1052 struct netvsc_device *net_device,
1053 struct hv_page_buffer *pb,
1054 struct sk_buff *skb)
1055{
1056 struct nvsp_message nvmsg;
1057 struct nvsp_1_message_send_rndis_packet *rpkt =
1058 &nvmsg.msg.v1_msg.send_rndis_pkt;
1059 struct netvsc_channel * const nvchan =
1060 &net_device->chan_table[packet->q_idx];
1061 struct vmbus_channel *out_channel = nvchan->channel;
1062 struct net_device *ndev = hv_get_drvdata(device);
1063 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1064 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1065 u64 req_id;
1066 int ret;
1067 u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1068
1069 memset(&nvmsg, 0, sizeof(struct nvsp_message));
1070 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1071 if (skb)
1072 rpkt->channel_type = 0; /* 0 is RMC_DATA */
1073 else
1074 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
1075
1076 rpkt->send_buf_section_index = packet->send_buf_index;
1077 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1078 rpkt->send_buf_section_size = 0;
1079 else
1080 rpkt->send_buf_section_size = packet->total_data_buflen;
1081
1082 req_id = (ulong)skb;
1083
1084 if (out_channel->rescind)
1085 return -ENODEV;
1086
1087 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1088
1089 packet->dma_range = NULL;
1090 if (packet->page_buf_cnt) {
1091 if (packet->cp_partial)
1092 pb += packet->rmsg_pgcnt;
1093
1094 ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1095 if (ret) {
1096 ret = -EAGAIN;
1097 goto exit;
1098 }
1099
1100 ret = vmbus_sendpacket_pagebuffer(out_channel,
1101 pb, packet->page_buf_cnt,
1102 &nvmsg, sizeof(nvmsg),
1103 req_id);
1104
1105 if (ret)
1106 netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1107 } else {
1108 ret = vmbus_sendpacket(out_channel,
1109 &nvmsg, sizeof(nvmsg),
1110 req_id, VM_PKT_DATA_INBAND,
1111 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1112 }
1113
1114exit:
1115 if (ret == 0) {
1116 atomic_inc_return(&nvchan->queue_sends);
1117
1118 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1119 netif_tx_stop_queue(txq);
1120 ndev_ctx->eth_stats.stop_queue++;
1121 }
1122 } else if (ret == -EAGAIN) {
1123 netif_tx_stop_queue(txq);
1124 ndev_ctx->eth_stats.stop_queue++;
1125 } else {
1126 netdev_err(ndev,
1127 "Unable to send packet pages %u len %u, ret %d\n",
1128 packet->page_buf_cnt, packet->total_data_buflen,
1129 ret);
1130 }
1131
1132 if (netif_tx_queue_stopped(txq) &&
1133 atomic_read(&nvchan->queue_sends) < 1 &&
1134 !net_device->tx_disable) {
1135 netif_tx_wake_queue(txq);
1136 ndev_ctx->eth_stats.wake_queue++;
1137 if (ret == -EAGAIN)
1138 ret = -ENOSPC;
1139 }
1140
1141 return ret;
1142}
1143
1144/* Move packet out of multi send data (msd), and clear msd */
1145static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1146 struct sk_buff **msd_skb,
1147 struct multi_send_data *msdp)
1148{
1149 *msd_skb = msdp->skb;
1150 *msd_send = msdp->pkt;
1151 msdp->skb = NULL;
1152 msdp->pkt = NULL;
1153 msdp->count = 0;
1154}
1155
1156/* RCU already held by caller */
1157/* Batching/bouncing logic is designed to attempt to optimize
1158 * performance.
1159 *
1160 * For small, non-LSO packets we copy the packet to a send buffer
1161 * which is pre-registered with the Hyper-V side. This enables the
1162 * hypervisor to avoid remapping the aperture to access the packet
1163 * descriptor and data.
1164 *
1165 * If we already started using a buffer and the netdev is transmitting
1166 * a burst of packets, keep on copying into the buffer until it is
1167 * full or we are done collecting a burst. If there is an existing
1168 * buffer with space for the RNDIS descriptor but not the packet, copy
1169 * the RNDIS descriptor to the buffer, keeping the packet in place.
1170 *
1171 * If we do batching and send more than one packet using a single
1172 * NetVSC message, free the SKBs of the packets copied, except for the
1173 * last packet. This is done to streamline the handling of the case
1174 * where the last packet only had the RNDIS descriptor copied to the
1175 * send buffer, with the data pointers included in the NetVSC message.
1176 */
1177int netvsc_send(struct net_device *ndev,
1178 struct hv_netvsc_packet *packet,
1179 struct rndis_message *rndis_msg,
1180 struct hv_page_buffer *pb,
1181 struct sk_buff *skb,
1182 bool xdp_tx)
1183{
1184 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1185 struct netvsc_device *net_device
1186 = rcu_dereference_bh(ndev_ctx->nvdev);
1187 struct hv_device *device = ndev_ctx->device_ctx;
1188 int ret = 0;
1189 struct netvsc_channel *nvchan;
1190 u32 pktlen = packet->total_data_buflen, msd_len = 0;
1191 unsigned int section_index = NETVSC_INVALID_INDEX;
1192 struct multi_send_data *msdp;
1193 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1194 struct sk_buff *msd_skb = NULL;
1195 bool try_batch, xmit_more;
1196
1197 /* If device is rescinded, return error and packet will get dropped. */
1198 if (unlikely(!net_device || net_device->destroy))
1199 return -ENODEV;
1200
1201 nvchan = &net_device->chan_table[packet->q_idx];
1202 packet->send_buf_index = NETVSC_INVALID_INDEX;
1203 packet->cp_partial = false;
1204
1205 /* Send a control message or XDP packet directly without accessing
1206 * msd (Multi-Send Data) field which may be changed during data packet
1207 * processing.
1208 */
1209 if (!skb || xdp_tx)
1210 return netvsc_send_pkt(device, packet, net_device, pb, skb);
1211
1212 /* batch packets in send buffer if possible */
1213 msdp = &nvchan->msd;
1214 if (msdp->pkt)
1215 msd_len = msdp->pkt->total_data_buflen;
1216
1217 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
1218 if (try_batch && msd_len + pktlen + net_device->pkt_align <
1219 net_device->send_section_size) {
1220 section_index = msdp->pkt->send_buf_index;
1221
1222 } else if (try_batch && msd_len + packet->rmsg_size <
1223 net_device->send_section_size) {
1224 section_index = msdp->pkt->send_buf_index;
1225 packet->cp_partial = true;
1226
1227 } else if (pktlen + net_device->pkt_align <
1228 net_device->send_section_size) {
1229 section_index = netvsc_get_next_send_section(net_device);
1230 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1231 ++ndev_ctx->eth_stats.tx_send_full;
1232 } else {
1233 move_pkt_msd(&msd_send, &msd_skb, msdp);
1234 msd_len = 0;
1235 }
1236 }
1237
1238 /* Keep aggregating only if stack says more data is coming
1239 * and not doing mixed modes send and not flow blocked
1240 */
1241 xmit_more = netdev_xmit_more() &&
1242 !packet->cp_partial &&
1243 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1244
1245 if (section_index != NETVSC_INVALID_INDEX) {
1246 netvsc_copy_to_send_buf(net_device,
1247 section_index, msd_len,
1248 packet, rndis_msg, pb, xmit_more);
1249
1250 packet->send_buf_index = section_index;
1251
1252 if (packet->cp_partial) {
1253 packet->page_buf_cnt -= packet->rmsg_pgcnt;
1254 packet->total_data_buflen = msd_len + packet->rmsg_size;
1255 } else {
1256 packet->page_buf_cnt = 0;
1257 packet->total_data_buflen += msd_len;
1258 }
1259
1260 if (msdp->pkt) {
1261 packet->total_packets += msdp->pkt->total_packets;
1262 packet->total_bytes += msdp->pkt->total_bytes;
1263 }
1264
1265 if (msdp->skb)
1266 dev_consume_skb_any(msdp->skb);
1267
1268 if (xmit_more) {
1269 msdp->skb = skb;
1270 msdp->pkt = packet;
1271 msdp->count++;
1272 } else {
1273 cur_send = packet;
1274 msdp->skb = NULL;
1275 msdp->pkt = NULL;
1276 msdp->count = 0;
1277 }
1278 } else {
1279 move_pkt_msd(&msd_send, &msd_skb, msdp);
1280 cur_send = packet;
1281 }
1282
1283 if (msd_send) {
1284 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1285 NULL, msd_skb);
1286
1287 if (m_ret != 0) {
1288 netvsc_free_send_slot(net_device,
1289 msd_send->send_buf_index);
1290 dev_kfree_skb_any(msd_skb);
1291 }
1292 }
1293
1294 if (cur_send)
1295 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1296
1297 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1298 netvsc_free_send_slot(net_device, section_index);
1299
1300 return ret;
1301}
1302
1303/* Send pending recv completions */
1304static int send_recv_completions(struct net_device *ndev,
1305 struct netvsc_device *nvdev,
1306 struct netvsc_channel *nvchan)
1307{
1308 struct multi_recv_comp *mrc = &nvchan->mrc;
1309 struct recv_comp_msg {
1310 struct nvsp_message_header hdr;
1311 u32 status;
1312 } __packed;
1313 struct recv_comp_msg msg = {
1314 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1315 };
1316 int ret;
1317
1318 while (mrc->first != mrc->next) {
1319 const struct recv_comp_data *rcd
1320 = mrc->slots + mrc->first;
1321
1322 msg.status = rcd->status;
1323 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1324 rcd->tid, VM_PKT_COMP, 0);
1325 if (unlikely(ret)) {
1326 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1327
1328 ++ndev_ctx->eth_stats.rx_comp_busy;
1329 return ret;
1330 }
1331
1332 if (++mrc->first == nvdev->recv_completion_cnt)
1333 mrc->first = 0;
1334 }
1335
1336 /* receive completion ring has been emptied */
1337 if (unlikely(nvdev->destroy))
1338 wake_up(&nvdev->wait_drain);
1339
1340 return 0;
1341}
1342
1343/* Count how many receive completions are outstanding */
1344static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1345 const struct multi_recv_comp *mrc,
1346 u32 *filled, u32 *avail)
1347{
1348 u32 count = nvdev->recv_completion_cnt;
1349
1350 if (mrc->next >= mrc->first)
1351 *filled = mrc->next - mrc->first;
1352 else
1353 *filled = (count - mrc->first) + mrc->next;
1354
1355 *avail = count - *filled - 1;
1356}
1357
1358/* Add receive complete to ring to send to host. */
1359static void enq_receive_complete(struct net_device *ndev,
1360 struct netvsc_device *nvdev, u16 q_idx,
1361 u64 tid, u32 status)
1362{
1363 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1364 struct multi_recv_comp *mrc = &nvchan->mrc;
1365 struct recv_comp_data *rcd;
1366 u32 filled, avail;
1367
1368 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1369
1370 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1371 send_recv_completions(ndev, nvdev, nvchan);
1372 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1373 }
1374
1375 if (unlikely(!avail)) {
1376 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1377 q_idx, tid);
1378 return;
1379 }
1380
1381 rcd = mrc->slots + mrc->next;
1382 rcd->tid = tid;
1383 rcd->status = status;
1384
1385 if (++mrc->next == nvdev->recv_completion_cnt)
1386 mrc->next = 0;
1387}
1388
1389static int netvsc_receive(struct net_device *ndev,
1390 struct netvsc_device *net_device,
1391 struct netvsc_channel *nvchan,
1392 const struct vmpacket_descriptor *desc)
1393{
1394 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1395 struct vmbus_channel *channel = nvchan->channel;
1396 const struct vmtransfer_page_packet_header *vmxferpage_packet
1397 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1398 const struct nvsp_message *nvsp = hv_pkt_data(desc);
1399 u32 msglen = hv_pkt_datalen(desc);
1400 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1401 char *recv_buf = net_device->recv_buf;
1402 u32 status = NVSP_STAT_SUCCESS;
1403 int i;
1404 int count = 0;
1405
1406 /* Ensure packet is big enough to read header fields */
1407 if (msglen < sizeof(struct nvsp_message_header)) {
1408 netif_err(net_device_ctx, rx_err, ndev,
1409 "invalid nvsp header, length too small: %u\n",
1410 msglen);
1411 return 0;
1412 }
1413
1414 /* Make sure this is a valid nvsp packet */
1415 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1416 netif_err(net_device_ctx, rx_err, ndev,
1417 "Unknown nvsp packet type received %u\n",
1418 nvsp->hdr.msg_type);
1419 return 0;
1420 }
1421
1422 /* Validate xfer page pkt header */
1423 if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1424 netif_err(net_device_ctx, rx_err, ndev,
1425 "Invalid xfer page pkt, offset too small: %u\n",
1426 desc->offset8 << 3);
1427 return 0;
1428 }
1429
1430 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1431 netif_err(net_device_ctx, rx_err, ndev,
1432 "Invalid xfer page set id - expecting %x got %x\n",
1433 NETVSC_RECEIVE_BUFFER_ID,
1434 vmxferpage_packet->xfer_pageset_id);
1435 return 0;
1436 }
1437
1438 count = vmxferpage_packet->range_cnt;
1439
1440 /* Check count for a valid value */
1441 if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1442 netif_err(net_device_ctx, rx_err, ndev,
1443 "Range count is not valid: %d\n",
1444 count);
1445 return 0;
1446 }
1447
1448 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1449 for (i = 0; i < count; i++) {
1450 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1451 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1452 void *data;
1453 int ret;
1454
1455 if (unlikely(offset > net_device->recv_buf_size ||
1456 buflen > net_device->recv_buf_size - offset)) {
1457 nvchan->rsc.cnt = 0;
1458 status = NVSP_STAT_FAIL;
1459 netif_err(net_device_ctx, rx_err, ndev,
1460 "Packet offset:%u + len:%u too big\n",
1461 offset, buflen);
1462
1463 continue;
1464 }
1465
1466 /* We're going to copy (sections of) the packet into nvchan->recv_buf;
1467 * make sure that nvchan->recv_buf is large enough to hold the packet.
1468 */
1469 if (unlikely(buflen > net_device->recv_section_size)) {
1470 nvchan->rsc.cnt = 0;
1471 status = NVSP_STAT_FAIL;
1472 netif_err(net_device_ctx, rx_err, ndev,
1473 "Packet too big: buflen=%u recv_section_size=%u\n",
1474 buflen, net_device->recv_section_size);
1475
1476 continue;
1477 }
1478
1479 data = recv_buf + offset;
1480
1481 nvchan->rsc.is_last = (i == count - 1);
1482
1483 trace_rndis_recv(ndev, q_idx, data);
1484
1485 /* Pass it to the upper layer */
1486 ret = rndis_filter_receive(ndev, net_device,
1487 nvchan, data, buflen);
1488
1489 if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1490 /* Drop incomplete packet */
1491 nvchan->rsc.cnt = 0;
1492 status = NVSP_STAT_FAIL;
1493 }
1494 }
1495
1496 enq_receive_complete(ndev, net_device, q_idx,
1497 vmxferpage_packet->d.trans_id, status);
1498
1499 return count;
1500}
1501
1502static void netvsc_send_table(struct net_device *ndev,
1503 struct netvsc_device *nvscdev,
1504 const struct nvsp_message *nvmsg,
1505 u32 msglen)
1506{
1507 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1508 u32 count, offset, *tab;
1509 int i;
1510
1511 /* Ensure packet is big enough to read send_table fields */
1512 if (msglen < sizeof(struct nvsp_message_header) +
1513 sizeof(struct nvsp_5_send_indirect_table)) {
1514 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1515 return;
1516 }
1517
1518 count = nvmsg->msg.v5_msg.send_table.count;
1519 offset = nvmsg->msg.v5_msg.send_table.offset;
1520
1521 if (count != VRSS_SEND_TAB_SIZE) {
1522 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1523 return;
1524 }
1525
1526 /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1527 * wrong due to a host bug. So fix the offset here.
1528 */
1529 if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1530 msglen >= sizeof(struct nvsp_message_header) +
1531 sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1532 offset = sizeof(struct nvsp_message_header) +
1533 sizeof(union nvsp_6_message_uber);
1534
1535 /* Boundary check for all versions */
1536 if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1537 netdev_err(ndev, "Received send-table offset too big:%u\n",
1538 offset);
1539 return;
1540 }
1541
1542 tab = (void *)nvmsg + offset;
1543
1544 for (i = 0; i < count; i++)
1545 net_device_ctx->tx_table[i] = tab[i];
1546}
1547
1548static void netvsc_send_vf(struct net_device *ndev,
1549 const struct nvsp_message *nvmsg,
1550 u32 msglen)
1551{
1552 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1553
1554 /* Ensure packet is big enough to read its fields */
1555 if (msglen < sizeof(struct nvsp_message_header) +
1556 sizeof(struct nvsp_4_send_vf_association)) {
1557 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1558 return;
1559 }
1560
1561 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1562 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1563
1564 if (net_device_ctx->vf_alloc)
1565 complete(&net_device_ctx->vf_add);
1566
1567 netdev_info(ndev, "VF slot %u %s\n",
1568 net_device_ctx->vf_serial,
1569 net_device_ctx->vf_alloc ? "added" : "removed");
1570}
1571
1572static void netvsc_receive_inband(struct net_device *ndev,
1573 struct netvsc_device *nvscdev,
1574 const struct vmpacket_descriptor *desc)
1575{
1576 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1577 u32 msglen = hv_pkt_datalen(desc);
1578
1579 /* Ensure packet is big enough to read header fields */
1580 if (msglen < sizeof(struct nvsp_message_header)) {
1581 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1582 return;
1583 }
1584
1585 switch (nvmsg->hdr.msg_type) {
1586 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1587 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1588 break;
1589
1590 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1591 if (hv_is_isolation_supported())
1592 netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1593 else
1594 netvsc_send_vf(ndev, nvmsg, msglen);
1595 break;
1596 }
1597}
1598
1599static int netvsc_process_raw_pkt(struct hv_device *device,
1600 struct netvsc_channel *nvchan,
1601 struct netvsc_device *net_device,
1602 struct net_device *ndev,
1603 const struct vmpacket_descriptor *desc,
1604 int budget)
1605{
1606 struct vmbus_channel *channel = nvchan->channel;
1607 const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1608
1609 trace_nvsp_recv(ndev, channel, nvmsg);
1610
1611 switch (desc->type) {
1612 case VM_PKT_COMP:
1613 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1614 break;
1615
1616 case VM_PKT_DATA_USING_XFER_PAGES:
1617 return netvsc_receive(ndev, net_device, nvchan, desc);
1618
1619 case VM_PKT_DATA_INBAND:
1620 netvsc_receive_inband(ndev, net_device, desc);
1621 break;
1622
1623 default:
1624 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1625 desc->type, desc->trans_id);
1626 break;
1627 }
1628
1629 return 0;
1630}
1631
1632static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1633{
1634 struct vmbus_channel *primary = channel->primary_channel;
1635
1636 return primary ? primary->device_obj : channel->device_obj;
1637}
1638
1639/* Network processing softirq
1640 * Process data in incoming ring buffer from host
1641 * Stops when ring is empty or budget is met or exceeded.
1642 */
1643int netvsc_poll(struct napi_struct *napi, int budget)
1644{
1645 struct netvsc_channel *nvchan
1646 = container_of(napi, struct netvsc_channel, napi);
1647 struct netvsc_device *net_device = nvchan->net_device;
1648 struct vmbus_channel *channel = nvchan->channel;
1649 struct hv_device *device = netvsc_channel_to_device(channel);
1650 struct net_device *ndev = hv_get_drvdata(device);
1651 int work_done = 0;
1652 int ret;
1653
1654 /* If starting a new interval */
1655 if (!nvchan->desc)
1656 nvchan->desc = hv_pkt_iter_first(channel);
1657
1658 nvchan->xdp_flush = false;
1659
1660 while (nvchan->desc && work_done < budget) {
1661 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1662 ndev, nvchan->desc, budget);
1663 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1664 }
1665
1666 if (nvchan->xdp_flush)
1667 xdp_do_flush();
1668
1669 /* Send any pending receive completions */
1670 ret = send_recv_completions(ndev, net_device, nvchan);
1671
1672 /* If it did not exhaust NAPI budget this time
1673 * and not doing busy poll
1674 * then re-enable host interrupts
1675 * and reschedule if ring is not empty
1676 * or sending receive completion failed.
1677 */
1678 if (work_done < budget &&
1679 napi_complete_done(napi, work_done) &&
1680 (ret || hv_end_read(&channel->inbound)) &&
1681 napi_schedule_prep(napi)) {
1682 hv_begin_read(&channel->inbound);
1683 __napi_schedule(napi);
1684 }
1685
1686 /* Driver may overshoot since multiple packets per descriptor */
1687 return min(work_done, budget);
1688}
1689
1690/* Call back when data is available in host ring buffer.
1691 * Processing is deferred until network softirq (NAPI)
1692 */
1693void netvsc_channel_cb(void *context)
1694{
1695 struct netvsc_channel *nvchan = context;
1696 struct vmbus_channel *channel = nvchan->channel;
1697 struct hv_ring_buffer_info *rbi = &channel->inbound;
1698
1699 /* preload first vmpacket descriptor */
1700 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1701
1702 if (napi_schedule_prep(&nvchan->napi)) {
1703 /* disable interrupts from host */
1704 hv_begin_read(rbi);
1705
1706 __napi_schedule_irqoff(&nvchan->napi);
1707 }
1708}
1709
1710/*
1711 * netvsc_device_add - Callback when the device belonging to this
1712 * driver is added
1713 */
1714struct netvsc_device *netvsc_device_add(struct hv_device *device,
1715 const struct netvsc_device_info *device_info)
1716{
1717 int i, ret = 0;
1718 struct netvsc_device *net_device;
1719 struct net_device *ndev = hv_get_drvdata(device);
1720 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1721
1722 net_device = alloc_net_device();
1723 if (!net_device)
1724 return ERR_PTR(-ENOMEM);
1725
1726 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1727 net_device_ctx->tx_table[i] = 0;
1728
1729 /* Because the device uses NAPI, all the interrupt batching and
1730 * control is done via Net softirq, not the channel handling
1731 */
1732 set_channel_read_mode(device->channel, HV_CALL_ISR);
1733
1734 /* If we're reopening the device we may have multiple queues, fill the
1735 * chn_table with the default channel to use it before subchannels are
1736 * opened.
1737 * Initialize the channel state before we open;
1738 * we can be interrupted as soon as we open the channel.
1739 */
1740
1741 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1742 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1743
1744 nvchan->channel = device->channel;
1745 nvchan->net_device = net_device;
1746 u64_stats_init(&nvchan->tx_stats.syncp);
1747 u64_stats_init(&nvchan->rx_stats.syncp);
1748
1749 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1750
1751 if (ret) {
1752 netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1753 goto cleanup2;
1754 }
1755
1756 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1757 MEM_TYPE_PAGE_SHARED, NULL);
1758
1759 if (ret) {
1760 netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1761 goto cleanup2;
1762 }
1763 }
1764
1765 /* Enable NAPI handler before init callbacks */
1766 netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1767
1768 /* Open the channel */
1769 device->channel->next_request_id_callback = vmbus_next_request_id;
1770 device->channel->request_addr_callback = vmbus_request_addr;
1771 device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1772 device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1773
1774 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1775 netvsc_ring_bytes, NULL, 0,
1776 netvsc_channel_cb, net_device->chan_table);
1777
1778 if (ret != 0) {
1779 netdev_err(ndev, "unable to open channel: %d\n", ret);
1780 goto cleanup;
1781 }
1782
1783 /* Channel is opened */
1784 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1785
1786 napi_enable(&net_device->chan_table[0].napi);
1787
1788 /* Connect with the NetVsp */
1789 ret = netvsc_connect_vsp(device, net_device, device_info);
1790 if (ret != 0) {
1791 netdev_err(ndev,
1792 "unable to connect to NetVSP - %d\n", ret);
1793 goto close;
1794 }
1795
1796 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1797 * populated.
1798 */
1799 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1800
1801 return net_device;
1802
1803close:
1804 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1805 napi_disable(&net_device->chan_table[0].napi);
1806
1807 /* Now, we can close the channel safely */
1808 vmbus_close(device->channel);
1809
1810cleanup:
1811 netif_napi_del(&net_device->chan_table[0].napi);
1812
1813cleanup2:
1814 free_netvsc_device(&net_device->rcu);
1815
1816 return ERR_PTR(ret);
1817}
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/wait.h>
25#include <linux/mm.h>
26#include <linux/delay.h>
27#include <linux/io.h>
28#include <linux/slab.h>
29#include <linux/netdevice.h>
30#include <linux/if_ether.h>
31#include <linux/vmalloc.h>
32#include <linux/rtnetlink.h>
33#include <linux/prefetch.h>
34#include <linux/reciprocal_div.h>
35
36#include <asm/sync_bitops.h>
37
38#include "hyperv_net.h"
39#include "netvsc_trace.h"
40
41/*
42 * Switch the data path from the synthetic interface to the VF
43 * interface.
44 */
45void netvsc_switch_datapath(struct net_device *ndev, bool vf)
46{
47 struct net_device_context *net_device_ctx = netdev_priv(ndev);
48 struct hv_device *dev = net_device_ctx->device_ctx;
49 struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
50 struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
51
52 memset(init_pkt, 0, sizeof(struct nvsp_message));
53 init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
54 if (vf)
55 init_pkt->msg.v4_msg.active_dp.active_datapath =
56 NVSP_DATAPATH_VF;
57 else
58 init_pkt->msg.v4_msg.active_dp.active_datapath =
59 NVSP_DATAPATH_SYNTHETIC;
60
61 trace_nvsp_send(ndev, init_pkt);
62
63 vmbus_sendpacket(dev->channel, init_pkt,
64 sizeof(struct nvsp_message),
65 (unsigned long)init_pkt,
66 VM_PKT_DATA_INBAND, 0);
67}
68
69static struct netvsc_device *alloc_net_device(void)
70{
71 struct netvsc_device *net_device;
72
73 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
74 if (!net_device)
75 return NULL;
76
77 init_waitqueue_head(&net_device->wait_drain);
78 net_device->destroy = false;
79
80 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
81 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
82
83 init_completion(&net_device->channel_init_wait);
84 init_waitqueue_head(&net_device->subchan_open);
85 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
86
87 return net_device;
88}
89
90static void free_netvsc_device(struct rcu_head *head)
91{
92 struct netvsc_device *nvdev
93 = container_of(head, struct netvsc_device, rcu);
94 int i;
95
96 kfree(nvdev->extension);
97 vfree(nvdev->recv_buf);
98 vfree(nvdev->send_buf);
99 kfree(nvdev->send_section_map);
100
101 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
102 vfree(nvdev->chan_table[i].mrc.slots);
103
104 kfree(nvdev);
105}
106
107static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
108{
109 call_rcu(&nvdev->rcu, free_netvsc_device);
110}
111
112static void netvsc_revoke_recv_buf(struct hv_device *device,
113 struct netvsc_device *net_device,
114 struct net_device *ndev)
115{
116 struct nvsp_message *revoke_packet;
117 int ret;
118
119 /*
120 * If we got a section count, it means we received a
121 * SendReceiveBufferComplete msg (ie sent
122 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
123 * to send a revoke msg here
124 */
125 if (net_device->recv_section_cnt) {
126 /* Send the revoke receive buffer */
127 revoke_packet = &net_device->revoke_packet;
128 memset(revoke_packet, 0, sizeof(struct nvsp_message));
129
130 revoke_packet->hdr.msg_type =
131 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
132 revoke_packet->msg.v1_msg.
133 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
134
135 trace_nvsp_send(ndev, revoke_packet);
136
137 ret = vmbus_sendpacket(device->channel,
138 revoke_packet,
139 sizeof(struct nvsp_message),
140 (unsigned long)revoke_packet,
141 VM_PKT_DATA_INBAND, 0);
142 /* If the failure is because the channel is rescinded;
143 * ignore the failure since we cannot send on a rescinded
144 * channel. This would allow us to properly cleanup
145 * even when the channel is rescinded.
146 */
147 if (device->channel->rescind)
148 ret = 0;
149 /*
150 * If we failed here, we might as well return and
151 * have a leak rather than continue and a bugchk
152 */
153 if (ret != 0) {
154 netdev_err(ndev, "unable to send "
155 "revoke receive buffer to netvsp\n");
156 return;
157 }
158 net_device->recv_section_cnt = 0;
159 }
160}
161
162static void netvsc_revoke_send_buf(struct hv_device *device,
163 struct netvsc_device *net_device,
164 struct net_device *ndev)
165{
166 struct nvsp_message *revoke_packet;
167 int ret;
168
169 /* Deal with the send buffer we may have setup.
170 * If we got a send section size, it means we received a
171 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
172 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
173 * to send a revoke msg here
174 */
175 if (net_device->send_section_cnt) {
176 /* Send the revoke receive buffer */
177 revoke_packet = &net_device->revoke_packet;
178 memset(revoke_packet, 0, sizeof(struct nvsp_message));
179
180 revoke_packet->hdr.msg_type =
181 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
182 revoke_packet->msg.v1_msg.revoke_send_buf.id =
183 NETVSC_SEND_BUFFER_ID;
184
185 trace_nvsp_send(ndev, revoke_packet);
186
187 ret = vmbus_sendpacket(device->channel,
188 revoke_packet,
189 sizeof(struct nvsp_message),
190 (unsigned long)revoke_packet,
191 VM_PKT_DATA_INBAND, 0);
192
193 /* If the failure is because the channel is rescinded;
194 * ignore the failure since we cannot send on a rescinded
195 * channel. This would allow us to properly cleanup
196 * even when the channel is rescinded.
197 */
198 if (device->channel->rescind)
199 ret = 0;
200
201 /* If we failed here, we might as well return and
202 * have a leak rather than continue and a bugchk
203 */
204 if (ret != 0) {
205 netdev_err(ndev, "unable to send "
206 "revoke send buffer to netvsp\n");
207 return;
208 }
209 net_device->send_section_cnt = 0;
210 }
211}
212
213static void netvsc_teardown_recv_gpadl(struct hv_device *device,
214 struct netvsc_device *net_device,
215 struct net_device *ndev)
216{
217 int ret;
218
219 if (net_device->recv_buf_gpadl_handle) {
220 ret = vmbus_teardown_gpadl(device->channel,
221 net_device->recv_buf_gpadl_handle);
222
223 /* If we failed here, we might as well return and have a leak
224 * rather than continue and a bugchk
225 */
226 if (ret != 0) {
227 netdev_err(ndev,
228 "unable to teardown receive buffer's gpadl\n");
229 return;
230 }
231 net_device->recv_buf_gpadl_handle = 0;
232 }
233}
234
235static void netvsc_teardown_send_gpadl(struct hv_device *device,
236 struct netvsc_device *net_device,
237 struct net_device *ndev)
238{
239 int ret;
240
241 if (net_device->send_buf_gpadl_handle) {
242 ret = vmbus_teardown_gpadl(device->channel,
243 net_device->send_buf_gpadl_handle);
244
245 /* If we failed here, we might as well return and have a leak
246 * rather than continue and a bugchk
247 */
248 if (ret != 0) {
249 netdev_err(ndev,
250 "unable to teardown send buffer's gpadl\n");
251 return;
252 }
253 net_device->send_buf_gpadl_handle = 0;
254 }
255}
256
257int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
258{
259 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
260 int node = cpu_to_node(nvchan->channel->target_cpu);
261 size_t size;
262
263 size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
264 nvchan->mrc.slots = vzalloc_node(size, node);
265 if (!nvchan->mrc.slots)
266 nvchan->mrc.slots = vzalloc(size);
267
268 return nvchan->mrc.slots ? 0 : -ENOMEM;
269}
270
271static int netvsc_init_buf(struct hv_device *device,
272 struct netvsc_device *net_device,
273 const struct netvsc_device_info *device_info)
274{
275 struct nvsp_1_message_send_receive_buffer_complete *resp;
276 struct net_device *ndev = hv_get_drvdata(device);
277 struct nvsp_message *init_packet;
278 unsigned int buf_size;
279 size_t map_words;
280 int ret = 0;
281
282 /* Get receive buffer area. */
283 buf_size = device_info->recv_sections * device_info->recv_section_size;
284 buf_size = roundup(buf_size, PAGE_SIZE);
285
286 /* Legacy hosts only allow smaller receive buffer */
287 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
288 buf_size = min_t(unsigned int, buf_size,
289 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
290
291 net_device->recv_buf = vzalloc(buf_size);
292 if (!net_device->recv_buf) {
293 netdev_err(ndev,
294 "unable to allocate receive buffer of size %u\n",
295 buf_size);
296 ret = -ENOMEM;
297 goto cleanup;
298 }
299
300 net_device->recv_buf_size = buf_size;
301
302 /*
303 * Establish the gpadl handle for this buffer on this
304 * channel. Note: This call uses the vmbus connection rather
305 * than the channel to establish the gpadl handle.
306 */
307 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
308 buf_size,
309 &net_device->recv_buf_gpadl_handle);
310 if (ret != 0) {
311 netdev_err(ndev,
312 "unable to establish receive buffer's gpadl\n");
313 goto cleanup;
314 }
315
316 /* Notify the NetVsp of the gpadl handle */
317 init_packet = &net_device->channel_init_pkt;
318 memset(init_packet, 0, sizeof(struct nvsp_message));
319 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
320 init_packet->msg.v1_msg.send_recv_buf.
321 gpadl_handle = net_device->recv_buf_gpadl_handle;
322 init_packet->msg.v1_msg.
323 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
324
325 trace_nvsp_send(ndev, init_packet);
326
327 /* Send the gpadl notification request */
328 ret = vmbus_sendpacket(device->channel, init_packet,
329 sizeof(struct nvsp_message),
330 (unsigned long)init_packet,
331 VM_PKT_DATA_INBAND,
332 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
333 if (ret != 0) {
334 netdev_err(ndev,
335 "unable to send receive buffer's gpadl to netvsp\n");
336 goto cleanup;
337 }
338
339 wait_for_completion(&net_device->channel_init_wait);
340
341 /* Check the response */
342 resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
343 if (resp->status != NVSP_STAT_SUCCESS) {
344 netdev_err(ndev,
345 "Unable to complete receive buffer initialization with NetVsp - status %d\n",
346 resp->status);
347 ret = -EINVAL;
348 goto cleanup;
349 }
350
351 /* Parse the response */
352 netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
353 resp->num_sections, resp->sections[0].sub_alloc_size,
354 resp->sections[0].num_sub_allocs);
355
356 /* There should only be one section for the entire receive buffer */
357 if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
358 ret = -EINVAL;
359 goto cleanup;
360 }
361
362 net_device->recv_section_size = resp->sections[0].sub_alloc_size;
363 net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
364
365 /* Setup receive completion ring */
366 net_device->recv_completion_cnt
367 = round_up(net_device->recv_section_cnt + 1,
368 PAGE_SIZE / sizeof(u64));
369 ret = netvsc_alloc_recv_comp_ring(net_device, 0);
370 if (ret)
371 goto cleanup;
372
373 /* Now setup the send buffer. */
374 buf_size = device_info->send_sections * device_info->send_section_size;
375 buf_size = round_up(buf_size, PAGE_SIZE);
376
377 net_device->send_buf = vzalloc(buf_size);
378 if (!net_device->send_buf) {
379 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
380 buf_size);
381 ret = -ENOMEM;
382 goto cleanup;
383 }
384
385 /* Establish the gpadl handle for this buffer on this
386 * channel. Note: This call uses the vmbus connection rather
387 * than the channel to establish the gpadl handle.
388 */
389 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
390 buf_size,
391 &net_device->send_buf_gpadl_handle);
392 if (ret != 0) {
393 netdev_err(ndev,
394 "unable to establish send buffer's gpadl\n");
395 goto cleanup;
396 }
397
398 /* Notify the NetVsp of the gpadl handle */
399 init_packet = &net_device->channel_init_pkt;
400 memset(init_packet, 0, sizeof(struct nvsp_message));
401 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
402 init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
403 net_device->send_buf_gpadl_handle;
404 init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
405
406 trace_nvsp_send(ndev, init_packet);
407
408 /* Send the gpadl notification request */
409 ret = vmbus_sendpacket(device->channel, init_packet,
410 sizeof(struct nvsp_message),
411 (unsigned long)init_packet,
412 VM_PKT_DATA_INBAND,
413 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
414 if (ret != 0) {
415 netdev_err(ndev,
416 "unable to send send buffer's gpadl to netvsp\n");
417 goto cleanup;
418 }
419
420 wait_for_completion(&net_device->channel_init_wait);
421
422 /* Check the response */
423 if (init_packet->msg.v1_msg.
424 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
425 netdev_err(ndev, "Unable to complete send buffer "
426 "initialization with NetVsp - status %d\n",
427 init_packet->msg.v1_msg.
428 send_send_buf_complete.status);
429 ret = -EINVAL;
430 goto cleanup;
431 }
432
433 /* Parse the response */
434 net_device->send_section_size = init_packet->msg.
435 v1_msg.send_send_buf_complete.section_size;
436
437 /* Section count is simply the size divided by the section size. */
438 net_device->send_section_cnt = buf_size / net_device->send_section_size;
439
440 netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
441 net_device->send_section_size, net_device->send_section_cnt);
442
443 /* Setup state for managing the send buffer. */
444 map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
445
446 net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
447 if (net_device->send_section_map == NULL) {
448 ret = -ENOMEM;
449 goto cleanup;
450 }
451
452 goto exit;
453
454cleanup:
455 netvsc_revoke_recv_buf(device, net_device, ndev);
456 netvsc_revoke_send_buf(device, net_device, ndev);
457 netvsc_teardown_recv_gpadl(device, net_device, ndev);
458 netvsc_teardown_send_gpadl(device, net_device, ndev);
459
460exit:
461 return ret;
462}
463
464/* Negotiate NVSP protocol version */
465static int negotiate_nvsp_ver(struct hv_device *device,
466 struct netvsc_device *net_device,
467 struct nvsp_message *init_packet,
468 u32 nvsp_ver)
469{
470 struct net_device *ndev = hv_get_drvdata(device);
471 int ret;
472
473 memset(init_packet, 0, sizeof(struct nvsp_message));
474 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
475 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
476 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
477 trace_nvsp_send(ndev, init_packet);
478
479 /* Send the init request */
480 ret = vmbus_sendpacket(device->channel, init_packet,
481 sizeof(struct nvsp_message),
482 (unsigned long)init_packet,
483 VM_PKT_DATA_INBAND,
484 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
485
486 if (ret != 0)
487 return ret;
488
489 wait_for_completion(&net_device->channel_init_wait);
490
491 if (init_packet->msg.init_msg.init_complete.status !=
492 NVSP_STAT_SUCCESS)
493 return -EINVAL;
494
495 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
496 return 0;
497
498 /* NVSPv2 or later: Send NDIS config */
499 memset(init_packet, 0, sizeof(struct nvsp_message));
500 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
501 init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
502 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
503
504 if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
505 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
506
507 /* Teaming bit is needed to receive link speed updates */
508 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
509 }
510
511 trace_nvsp_send(ndev, init_packet);
512
513 ret = vmbus_sendpacket(device->channel, init_packet,
514 sizeof(struct nvsp_message),
515 (unsigned long)init_packet,
516 VM_PKT_DATA_INBAND, 0);
517
518 return ret;
519}
520
521static int netvsc_connect_vsp(struct hv_device *device,
522 struct netvsc_device *net_device,
523 const struct netvsc_device_info *device_info)
524{
525 struct net_device *ndev = hv_get_drvdata(device);
526 static const u32 ver_list[] = {
527 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
528 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5
529 };
530 struct nvsp_message *init_packet;
531 int ndis_version, i, ret;
532
533 init_packet = &net_device->channel_init_pkt;
534
535 /* Negotiate the latest NVSP protocol supported */
536 for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
537 if (negotiate_nvsp_ver(device, net_device, init_packet,
538 ver_list[i]) == 0) {
539 net_device->nvsp_version = ver_list[i];
540 break;
541 }
542
543 if (i < 0) {
544 ret = -EPROTO;
545 goto cleanup;
546 }
547
548 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
549
550 /* Send the ndis version */
551 memset(init_packet, 0, sizeof(struct nvsp_message));
552
553 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
554 ndis_version = 0x00060001;
555 else
556 ndis_version = 0x0006001e;
557
558 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
559 init_packet->msg.v1_msg.
560 send_ndis_ver.ndis_major_ver =
561 (ndis_version & 0xFFFF0000) >> 16;
562 init_packet->msg.v1_msg.
563 send_ndis_ver.ndis_minor_ver =
564 ndis_version & 0xFFFF;
565
566 trace_nvsp_send(ndev, init_packet);
567
568 /* Send the init request */
569 ret = vmbus_sendpacket(device->channel, init_packet,
570 sizeof(struct nvsp_message),
571 (unsigned long)init_packet,
572 VM_PKT_DATA_INBAND, 0);
573 if (ret != 0)
574 goto cleanup;
575
576
577 ret = netvsc_init_buf(device, net_device, device_info);
578
579cleanup:
580 return ret;
581}
582
583/*
584 * netvsc_device_remove - Callback when the root bus device is removed
585 */
586void netvsc_device_remove(struct hv_device *device)
587{
588 struct net_device *ndev = hv_get_drvdata(device);
589 struct net_device_context *net_device_ctx = netdev_priv(ndev);
590 struct netvsc_device *net_device
591 = rtnl_dereference(net_device_ctx->nvdev);
592 int i;
593
594 /*
595 * Revoke receive buffer. If host is pre-Win2016 then tear down
596 * receive buffer GPADL. Do the same for send buffer.
597 */
598 netvsc_revoke_recv_buf(device, net_device, ndev);
599 if (vmbus_proto_version < VERSION_WIN10)
600 netvsc_teardown_recv_gpadl(device, net_device, ndev);
601
602 netvsc_revoke_send_buf(device, net_device, ndev);
603 if (vmbus_proto_version < VERSION_WIN10)
604 netvsc_teardown_send_gpadl(device, net_device, ndev);
605
606 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
607
608 /* And disassociate NAPI context from device */
609 for (i = 0; i < net_device->num_chn; i++)
610 netif_napi_del(&net_device->chan_table[i].napi);
611
612 /*
613 * At this point, no one should be accessing net_device
614 * except in here
615 */
616 netdev_dbg(ndev, "net device safe to remove\n");
617
618 /* Now, we can close the channel safely */
619 vmbus_close(device->channel);
620
621 /*
622 * If host is Win2016 or higher then we do the GPADL tear down
623 * here after VMBus is closed.
624 */
625 if (vmbus_proto_version >= VERSION_WIN10) {
626 netvsc_teardown_recv_gpadl(device, net_device, ndev);
627 netvsc_teardown_send_gpadl(device, net_device, ndev);
628 }
629
630 /* Release all resources */
631 free_netvsc_device_rcu(net_device);
632}
633
634#define RING_AVAIL_PERCENT_HIWATER 20
635#define RING_AVAIL_PERCENT_LOWATER 10
636
637/*
638 * Get the percentage of available bytes to write in the ring.
639 * The return value is in range from 0 to 100.
640 */
641static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
642{
643 u32 avail_write = hv_get_bytes_to_write(ring_info);
644
645 return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
646}
647
648static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
649 u32 index)
650{
651 sync_change_bit(index, net_device->send_section_map);
652}
653
654static void netvsc_send_tx_complete(struct netvsc_device *net_device,
655 struct vmbus_channel *incoming_channel,
656 struct hv_device *device,
657 const struct vmpacket_descriptor *desc,
658 int budget)
659{
660 struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
661 struct net_device *ndev = hv_get_drvdata(device);
662 struct net_device_context *ndev_ctx = netdev_priv(ndev);
663 struct vmbus_channel *channel = device->channel;
664 u16 q_idx = 0;
665 int queue_sends;
666
667 /* Notify the layer above us */
668 if (likely(skb)) {
669 const struct hv_netvsc_packet *packet
670 = (struct hv_netvsc_packet *)skb->cb;
671 u32 send_index = packet->send_buf_index;
672 struct netvsc_stats *tx_stats;
673
674 if (send_index != NETVSC_INVALID_INDEX)
675 netvsc_free_send_slot(net_device, send_index);
676 q_idx = packet->q_idx;
677 channel = incoming_channel;
678
679 tx_stats = &net_device->chan_table[q_idx].tx_stats;
680
681 u64_stats_update_begin(&tx_stats->syncp);
682 tx_stats->packets += packet->total_packets;
683 tx_stats->bytes += packet->total_bytes;
684 u64_stats_update_end(&tx_stats->syncp);
685
686 napi_consume_skb(skb, budget);
687 }
688
689 queue_sends =
690 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
691
692 if (unlikely(net_device->destroy)) {
693 if (queue_sends == 0)
694 wake_up(&net_device->wait_drain);
695 } else {
696 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
697
698 if (netif_tx_queue_stopped(txq) &&
699 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
700 queue_sends < 1)) {
701 netif_tx_wake_queue(txq);
702 ndev_ctx->eth_stats.wake_queue++;
703 }
704 }
705}
706
707static void netvsc_send_completion(struct netvsc_device *net_device,
708 struct vmbus_channel *incoming_channel,
709 struct hv_device *device,
710 const struct vmpacket_descriptor *desc,
711 int budget)
712{
713 struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
714 struct net_device *ndev = hv_get_drvdata(device);
715
716 switch (nvsp_packet->hdr.msg_type) {
717 case NVSP_MSG_TYPE_INIT_COMPLETE:
718 case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
719 case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
720 case NVSP_MSG5_TYPE_SUBCHANNEL:
721 /* Copy the response back */
722 memcpy(&net_device->channel_init_pkt, nvsp_packet,
723 sizeof(struct nvsp_message));
724 complete(&net_device->channel_init_wait);
725 break;
726
727 case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
728 netvsc_send_tx_complete(net_device, incoming_channel,
729 device, desc, budget);
730 break;
731
732 default:
733 netdev_err(ndev,
734 "Unknown send completion type %d received!!\n",
735 nvsp_packet->hdr.msg_type);
736 }
737}
738
739static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
740{
741 unsigned long *map_addr = net_device->send_section_map;
742 unsigned int i;
743
744 for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
745 if (sync_test_and_set_bit(i, map_addr) == 0)
746 return i;
747 }
748
749 return NETVSC_INVALID_INDEX;
750}
751
752static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
753 unsigned int section_index,
754 u32 pend_size,
755 struct hv_netvsc_packet *packet,
756 struct rndis_message *rndis_msg,
757 struct hv_page_buffer *pb,
758 bool xmit_more)
759{
760 char *start = net_device->send_buf;
761 char *dest = start + (section_index * net_device->send_section_size)
762 + pend_size;
763 int i;
764 u32 padding = 0;
765 u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
766 packet->page_buf_cnt;
767 u32 remain;
768
769 /* Add padding */
770 remain = packet->total_data_buflen & (net_device->pkt_align - 1);
771 if (xmit_more && remain) {
772 padding = net_device->pkt_align - remain;
773 rndis_msg->msg_len += padding;
774 packet->total_data_buflen += padding;
775 }
776
777 for (i = 0; i < page_count; i++) {
778 char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
779 u32 offset = pb[i].offset;
780 u32 len = pb[i].len;
781
782 memcpy(dest, (src + offset), len);
783 dest += len;
784 }
785
786 if (padding)
787 memset(dest, 0, padding);
788}
789
790static inline int netvsc_send_pkt(
791 struct hv_device *device,
792 struct hv_netvsc_packet *packet,
793 struct netvsc_device *net_device,
794 struct hv_page_buffer *pb,
795 struct sk_buff *skb)
796{
797 struct nvsp_message nvmsg;
798 struct nvsp_1_message_send_rndis_packet *rpkt =
799 &nvmsg.msg.v1_msg.send_rndis_pkt;
800 struct netvsc_channel * const nvchan =
801 &net_device->chan_table[packet->q_idx];
802 struct vmbus_channel *out_channel = nvchan->channel;
803 struct net_device *ndev = hv_get_drvdata(device);
804 struct net_device_context *ndev_ctx = netdev_priv(ndev);
805 struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
806 u64 req_id;
807 int ret;
808 u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
809
810 nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
811 if (skb)
812 rpkt->channel_type = 0; /* 0 is RMC_DATA */
813 else
814 rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
815
816 rpkt->send_buf_section_index = packet->send_buf_index;
817 if (packet->send_buf_index == NETVSC_INVALID_INDEX)
818 rpkt->send_buf_section_size = 0;
819 else
820 rpkt->send_buf_section_size = packet->total_data_buflen;
821
822 req_id = (ulong)skb;
823
824 if (out_channel->rescind)
825 return -ENODEV;
826
827 trace_nvsp_send_pkt(ndev, out_channel, rpkt);
828
829 if (packet->page_buf_cnt) {
830 if (packet->cp_partial)
831 pb += packet->rmsg_pgcnt;
832
833 ret = vmbus_sendpacket_pagebuffer(out_channel,
834 pb, packet->page_buf_cnt,
835 &nvmsg, sizeof(nvmsg),
836 req_id);
837 } else {
838 ret = vmbus_sendpacket(out_channel,
839 &nvmsg, sizeof(nvmsg),
840 req_id, VM_PKT_DATA_INBAND,
841 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
842 }
843
844 if (ret == 0) {
845 atomic_inc_return(&nvchan->queue_sends);
846
847 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
848 netif_tx_stop_queue(txq);
849 ndev_ctx->eth_stats.stop_queue++;
850 }
851 } else if (ret == -EAGAIN) {
852 netif_tx_stop_queue(txq);
853 ndev_ctx->eth_stats.stop_queue++;
854 if (atomic_read(&nvchan->queue_sends) < 1) {
855 netif_tx_wake_queue(txq);
856 ndev_ctx->eth_stats.wake_queue++;
857 ret = -ENOSPC;
858 }
859 } else {
860 netdev_err(ndev,
861 "Unable to send packet pages %u len %u, ret %d\n",
862 packet->page_buf_cnt, packet->total_data_buflen,
863 ret);
864 }
865
866 return ret;
867}
868
869/* Move packet out of multi send data (msd), and clear msd */
870static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
871 struct sk_buff **msd_skb,
872 struct multi_send_data *msdp)
873{
874 *msd_skb = msdp->skb;
875 *msd_send = msdp->pkt;
876 msdp->skb = NULL;
877 msdp->pkt = NULL;
878 msdp->count = 0;
879}
880
881/* RCU already held by caller */
882int netvsc_send(struct net_device *ndev,
883 struct hv_netvsc_packet *packet,
884 struct rndis_message *rndis_msg,
885 struct hv_page_buffer *pb,
886 struct sk_buff *skb)
887{
888 struct net_device_context *ndev_ctx = netdev_priv(ndev);
889 struct netvsc_device *net_device
890 = rcu_dereference_bh(ndev_ctx->nvdev);
891 struct hv_device *device = ndev_ctx->device_ctx;
892 int ret = 0;
893 struct netvsc_channel *nvchan;
894 u32 pktlen = packet->total_data_buflen, msd_len = 0;
895 unsigned int section_index = NETVSC_INVALID_INDEX;
896 struct multi_send_data *msdp;
897 struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
898 struct sk_buff *msd_skb = NULL;
899 bool try_batch, xmit_more;
900
901 /* If device is rescinded, return error and packet will get dropped. */
902 if (unlikely(!net_device || net_device->destroy))
903 return -ENODEV;
904
905 nvchan = &net_device->chan_table[packet->q_idx];
906 packet->send_buf_index = NETVSC_INVALID_INDEX;
907 packet->cp_partial = false;
908
909 /* Send control message directly without accessing msd (Multi-Send
910 * Data) field which may be changed during data packet processing.
911 */
912 if (!skb)
913 return netvsc_send_pkt(device, packet, net_device, pb, skb);
914
915 /* batch packets in send buffer if possible */
916 msdp = &nvchan->msd;
917 if (msdp->pkt)
918 msd_len = msdp->pkt->total_data_buflen;
919
920 try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
921 if (try_batch && msd_len + pktlen + net_device->pkt_align <
922 net_device->send_section_size) {
923 section_index = msdp->pkt->send_buf_index;
924
925 } else if (try_batch && msd_len + packet->rmsg_size <
926 net_device->send_section_size) {
927 section_index = msdp->pkt->send_buf_index;
928 packet->cp_partial = true;
929
930 } else if (pktlen + net_device->pkt_align <
931 net_device->send_section_size) {
932 section_index = netvsc_get_next_send_section(net_device);
933 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
934 ++ndev_ctx->eth_stats.tx_send_full;
935 } else {
936 move_pkt_msd(&msd_send, &msd_skb, msdp);
937 msd_len = 0;
938 }
939 }
940
941 /* Keep aggregating only if stack says more data is coming
942 * and not doing mixed modes send and not flow blocked
943 */
944 xmit_more = skb->xmit_more &&
945 !packet->cp_partial &&
946 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
947
948 if (section_index != NETVSC_INVALID_INDEX) {
949 netvsc_copy_to_send_buf(net_device,
950 section_index, msd_len,
951 packet, rndis_msg, pb, xmit_more);
952
953 packet->send_buf_index = section_index;
954
955 if (packet->cp_partial) {
956 packet->page_buf_cnt -= packet->rmsg_pgcnt;
957 packet->total_data_buflen = msd_len + packet->rmsg_size;
958 } else {
959 packet->page_buf_cnt = 0;
960 packet->total_data_buflen += msd_len;
961 }
962
963 if (msdp->pkt) {
964 packet->total_packets += msdp->pkt->total_packets;
965 packet->total_bytes += msdp->pkt->total_bytes;
966 }
967
968 if (msdp->skb)
969 dev_consume_skb_any(msdp->skb);
970
971 if (xmit_more) {
972 msdp->skb = skb;
973 msdp->pkt = packet;
974 msdp->count++;
975 } else {
976 cur_send = packet;
977 msdp->skb = NULL;
978 msdp->pkt = NULL;
979 msdp->count = 0;
980 }
981 } else {
982 move_pkt_msd(&msd_send, &msd_skb, msdp);
983 cur_send = packet;
984 }
985
986 if (msd_send) {
987 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
988 NULL, msd_skb);
989
990 if (m_ret != 0) {
991 netvsc_free_send_slot(net_device,
992 msd_send->send_buf_index);
993 dev_kfree_skb_any(msd_skb);
994 }
995 }
996
997 if (cur_send)
998 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
999
1000 if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1001 netvsc_free_send_slot(net_device, section_index);
1002
1003 return ret;
1004}
1005
1006/* Send pending recv completions */
1007static int send_recv_completions(struct net_device *ndev,
1008 struct netvsc_device *nvdev,
1009 struct netvsc_channel *nvchan)
1010{
1011 struct multi_recv_comp *mrc = &nvchan->mrc;
1012 struct recv_comp_msg {
1013 struct nvsp_message_header hdr;
1014 u32 status;
1015 } __packed;
1016 struct recv_comp_msg msg = {
1017 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1018 };
1019 int ret;
1020
1021 while (mrc->first != mrc->next) {
1022 const struct recv_comp_data *rcd
1023 = mrc->slots + mrc->first;
1024
1025 msg.status = rcd->status;
1026 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1027 rcd->tid, VM_PKT_COMP, 0);
1028 if (unlikely(ret)) {
1029 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1030
1031 ++ndev_ctx->eth_stats.rx_comp_busy;
1032 return ret;
1033 }
1034
1035 if (++mrc->first == nvdev->recv_completion_cnt)
1036 mrc->first = 0;
1037 }
1038
1039 /* receive completion ring has been emptied */
1040 if (unlikely(nvdev->destroy))
1041 wake_up(&nvdev->wait_drain);
1042
1043 return 0;
1044}
1045
1046/* Count how many receive completions are outstanding */
1047static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1048 const struct multi_recv_comp *mrc,
1049 u32 *filled, u32 *avail)
1050{
1051 u32 count = nvdev->recv_completion_cnt;
1052
1053 if (mrc->next >= mrc->first)
1054 *filled = mrc->next - mrc->first;
1055 else
1056 *filled = (count - mrc->first) + mrc->next;
1057
1058 *avail = count - *filled - 1;
1059}
1060
1061/* Add receive complete to ring to send to host. */
1062static void enq_receive_complete(struct net_device *ndev,
1063 struct netvsc_device *nvdev, u16 q_idx,
1064 u64 tid, u32 status)
1065{
1066 struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1067 struct multi_recv_comp *mrc = &nvchan->mrc;
1068 struct recv_comp_data *rcd;
1069 u32 filled, avail;
1070
1071 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1072
1073 if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1074 send_recv_completions(ndev, nvdev, nvchan);
1075 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1076 }
1077
1078 if (unlikely(!avail)) {
1079 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1080 q_idx, tid);
1081 return;
1082 }
1083
1084 rcd = mrc->slots + mrc->next;
1085 rcd->tid = tid;
1086 rcd->status = status;
1087
1088 if (++mrc->next == nvdev->recv_completion_cnt)
1089 mrc->next = 0;
1090}
1091
1092static int netvsc_receive(struct net_device *ndev,
1093 struct netvsc_device *net_device,
1094 struct net_device_context *net_device_ctx,
1095 struct hv_device *device,
1096 struct vmbus_channel *channel,
1097 const struct vmpacket_descriptor *desc,
1098 struct nvsp_message *nvsp)
1099{
1100 const struct vmtransfer_page_packet_header *vmxferpage_packet
1101 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1102 u16 q_idx = channel->offermsg.offer.sub_channel_index;
1103 char *recv_buf = net_device->recv_buf;
1104 u32 status = NVSP_STAT_SUCCESS;
1105 int i;
1106 int count = 0;
1107
1108 /* Make sure this is a valid nvsp packet */
1109 if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1110 netif_err(net_device_ctx, rx_err, ndev,
1111 "Unknown nvsp packet type received %u\n",
1112 nvsp->hdr.msg_type);
1113 return 0;
1114 }
1115
1116 if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1117 netif_err(net_device_ctx, rx_err, ndev,
1118 "Invalid xfer page set id - expecting %x got %x\n",
1119 NETVSC_RECEIVE_BUFFER_ID,
1120 vmxferpage_packet->xfer_pageset_id);
1121 return 0;
1122 }
1123
1124 count = vmxferpage_packet->range_cnt;
1125
1126 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1127 for (i = 0; i < count; i++) {
1128 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1129 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1130 void *data;
1131 int ret;
1132
1133 if (unlikely(offset + buflen > net_device->recv_buf_size)) {
1134 status = NVSP_STAT_FAIL;
1135 netif_err(net_device_ctx, rx_err, ndev,
1136 "Packet offset:%u + len:%u too big\n",
1137 offset, buflen);
1138
1139 continue;
1140 }
1141
1142 data = recv_buf + offset;
1143
1144 trace_rndis_recv(ndev, q_idx, data);
1145
1146 /* Pass it to the upper layer */
1147 ret = rndis_filter_receive(ndev, net_device,
1148 channel, data, buflen);
1149
1150 if (unlikely(ret != NVSP_STAT_SUCCESS))
1151 status = NVSP_STAT_FAIL;
1152 }
1153
1154 enq_receive_complete(ndev, net_device, q_idx,
1155 vmxferpage_packet->d.trans_id, status);
1156
1157 return count;
1158}
1159
1160static void netvsc_send_table(struct hv_device *hdev,
1161 struct nvsp_message *nvmsg)
1162{
1163 struct net_device *ndev = hv_get_drvdata(hdev);
1164 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1165 int i;
1166 u32 count, *tab;
1167
1168 count = nvmsg->msg.v5_msg.send_table.count;
1169 if (count != VRSS_SEND_TAB_SIZE) {
1170 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1171 return;
1172 }
1173
1174 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
1175 nvmsg->msg.v5_msg.send_table.offset);
1176
1177 for (i = 0; i < count; i++)
1178 net_device_ctx->tx_table[i] = tab[i];
1179}
1180
1181static void netvsc_send_vf(struct net_device_context *net_device_ctx,
1182 struct nvsp_message *nvmsg)
1183{
1184 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1185 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1186}
1187
1188static inline void netvsc_receive_inband(struct hv_device *hdev,
1189 struct net_device_context *net_device_ctx,
1190 struct nvsp_message *nvmsg)
1191{
1192 switch (nvmsg->hdr.msg_type) {
1193 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1194 netvsc_send_table(hdev, nvmsg);
1195 break;
1196
1197 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1198 netvsc_send_vf(net_device_ctx, nvmsg);
1199 break;
1200 }
1201}
1202
1203static int netvsc_process_raw_pkt(struct hv_device *device,
1204 struct vmbus_channel *channel,
1205 struct netvsc_device *net_device,
1206 struct net_device *ndev,
1207 const struct vmpacket_descriptor *desc,
1208 int budget)
1209{
1210 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1211 struct nvsp_message *nvmsg = hv_pkt_data(desc);
1212
1213 trace_nvsp_recv(ndev, channel, nvmsg);
1214
1215 switch (desc->type) {
1216 case VM_PKT_COMP:
1217 netvsc_send_completion(net_device, channel, device,
1218 desc, budget);
1219 break;
1220
1221 case VM_PKT_DATA_USING_XFER_PAGES:
1222 return netvsc_receive(ndev, net_device, net_device_ctx,
1223 device, channel, desc, nvmsg);
1224 break;
1225
1226 case VM_PKT_DATA_INBAND:
1227 netvsc_receive_inband(device, net_device_ctx, nvmsg);
1228 break;
1229
1230 default:
1231 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1232 desc->type, desc->trans_id);
1233 break;
1234 }
1235
1236 return 0;
1237}
1238
1239static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1240{
1241 struct vmbus_channel *primary = channel->primary_channel;
1242
1243 return primary ? primary->device_obj : channel->device_obj;
1244}
1245
1246/* Network processing softirq
1247 * Process data in incoming ring buffer from host
1248 * Stops when ring is empty or budget is met or exceeded.
1249 */
1250int netvsc_poll(struct napi_struct *napi, int budget)
1251{
1252 struct netvsc_channel *nvchan
1253 = container_of(napi, struct netvsc_channel, napi);
1254 struct netvsc_device *net_device = nvchan->net_device;
1255 struct vmbus_channel *channel = nvchan->channel;
1256 struct hv_device *device = netvsc_channel_to_device(channel);
1257 struct net_device *ndev = hv_get_drvdata(device);
1258 int work_done = 0;
1259
1260 /* If starting a new interval */
1261 if (!nvchan->desc)
1262 nvchan->desc = hv_pkt_iter_first(channel);
1263
1264 while (nvchan->desc && work_done < budget) {
1265 work_done += netvsc_process_raw_pkt(device, channel, net_device,
1266 ndev, nvchan->desc, budget);
1267 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1268 }
1269
1270 /* If send of pending receive completions suceeded
1271 * and did not exhaust NAPI budget this time
1272 * and not doing busy poll
1273 * then re-enable host interrupts
1274 * and reschedule if ring is not empty.
1275 */
1276 if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
1277 work_done < budget &&
1278 napi_complete_done(napi, work_done) &&
1279 hv_end_read(&channel->inbound) &&
1280 napi_schedule_prep(napi)) {
1281 hv_begin_read(&channel->inbound);
1282 __napi_schedule(napi);
1283 }
1284
1285 /* Driver may overshoot since multiple packets per descriptor */
1286 return min(work_done, budget);
1287}
1288
1289/* Call back when data is available in host ring buffer.
1290 * Processing is deferred until network softirq (NAPI)
1291 */
1292void netvsc_channel_cb(void *context)
1293{
1294 struct netvsc_channel *nvchan = context;
1295 struct vmbus_channel *channel = nvchan->channel;
1296 struct hv_ring_buffer_info *rbi = &channel->inbound;
1297
1298 /* preload first vmpacket descriptor */
1299 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1300
1301 if (napi_schedule_prep(&nvchan->napi)) {
1302 /* disable interupts from host */
1303 hv_begin_read(rbi);
1304
1305 __napi_schedule_irqoff(&nvchan->napi);
1306 }
1307}
1308
1309/*
1310 * netvsc_device_add - Callback when the device belonging to this
1311 * driver is added
1312 */
1313struct netvsc_device *netvsc_device_add(struct hv_device *device,
1314 const struct netvsc_device_info *device_info)
1315{
1316 int i, ret = 0;
1317 struct netvsc_device *net_device;
1318 struct net_device *ndev = hv_get_drvdata(device);
1319 struct net_device_context *net_device_ctx = netdev_priv(ndev);
1320
1321 net_device = alloc_net_device();
1322 if (!net_device)
1323 return ERR_PTR(-ENOMEM);
1324
1325 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1326 net_device_ctx->tx_table[i] = 0;
1327
1328 /* Because the device uses NAPI, all the interrupt batching and
1329 * control is done via Net softirq, not the channel handling
1330 */
1331 set_channel_read_mode(device->channel, HV_CALL_ISR);
1332
1333 /* If we're reopening the device we may have multiple queues, fill the
1334 * chn_table with the default channel to use it before subchannels are
1335 * opened.
1336 * Initialize the channel state before we open;
1337 * we can be interrupted as soon as we open the channel.
1338 */
1339
1340 for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1341 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1342
1343 nvchan->channel = device->channel;
1344 nvchan->net_device = net_device;
1345 u64_stats_init(&nvchan->tx_stats.syncp);
1346 u64_stats_init(&nvchan->rx_stats.syncp);
1347 }
1348
1349 /* Enable NAPI handler before init callbacks */
1350 netif_napi_add(ndev, &net_device->chan_table[0].napi,
1351 netvsc_poll, NAPI_POLL_WEIGHT);
1352
1353 /* Open the channel */
1354 ret = vmbus_open(device->channel, netvsc_ring_bytes,
1355 netvsc_ring_bytes, NULL, 0,
1356 netvsc_channel_cb, net_device->chan_table);
1357
1358 if (ret != 0) {
1359 netdev_err(ndev, "unable to open channel: %d\n", ret);
1360 goto cleanup;
1361 }
1362
1363 /* Channel is opened */
1364 netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1365
1366 napi_enable(&net_device->chan_table[0].napi);
1367
1368 /* Connect with the NetVsp */
1369 ret = netvsc_connect_vsp(device, net_device, device_info);
1370 if (ret != 0) {
1371 netdev_err(ndev,
1372 "unable to connect to NetVSP - %d\n", ret);
1373 goto close;
1374 }
1375
1376 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1377 * populated.
1378 */
1379 rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1380
1381 return net_device;
1382
1383close:
1384 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1385 napi_disable(&net_device->chan_table[0].napi);
1386
1387 /* Now, we can close the channel safely */
1388 vmbus_close(device->channel);
1389
1390cleanup:
1391 netif_napi_del(&net_device->chan_table[0].napi);
1392 free_netvsc_device(&net_device->rcu);
1393
1394 return ERR_PTR(ret);
1395}