Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/mm.h>
  15#include <linux/delay.h>
  16#include <linux/io.h>
  17#include <linux/slab.h>
  18#include <linux/netdevice.h>
  19#include <linux/if_ether.h>
  20#include <linux/vmalloc.h>
  21#include <linux/rtnetlink.h>
  22#include <linux/prefetch.h>
  23#include <linux/filter.h>
  24
  25#include <asm/sync_bitops.h>
  26#include <asm/mshyperv.h>
  27
  28#include "hyperv_net.h"
  29#include "netvsc_trace.h"
  30
  31/*
  32 * Switch the data path from the synthetic interface to the VF
  33 * interface.
  34 */
  35int netvsc_switch_datapath(struct net_device *ndev, bool vf)
  36{
  37	struct net_device_context *net_device_ctx = netdev_priv(ndev);
  38	struct hv_device *dev = net_device_ctx->device_ctx;
  39	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
  40	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
  41	int ret, retry = 0;
  42
  43	/* Block sending traffic to VF if it's about to be gone */
  44	if (!vf)
  45		net_device_ctx->data_path_is_vf = vf;
  46
  47	memset(init_pkt, 0, sizeof(struct nvsp_message));
  48	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
  49	if (vf)
  50		init_pkt->msg.v4_msg.active_dp.active_datapath =
  51			NVSP_DATAPATH_VF;
  52	else
  53		init_pkt->msg.v4_msg.active_dp.active_datapath =
  54			NVSP_DATAPATH_SYNTHETIC;
  55
  56again:
  57	trace_nvsp_send(ndev, init_pkt);
  58
  59	ret = vmbus_sendpacket(dev->channel, init_pkt,
  60			       sizeof(struct nvsp_message),
  61			       (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
  62			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  63
  64	/* If failed to switch to/from VF, let data_path_is_vf stay false,
  65	 * so we use synthetic path to send data.
  66	 */
  67	if (ret) {
  68		if (ret != -EAGAIN) {
  69			netdev_err(ndev,
  70				   "Unable to send sw datapath msg, err: %d\n",
  71				   ret);
  72			return ret;
  73		}
  74
  75		if (retry++ < RETRY_MAX) {
  76			usleep_range(RETRY_US_LO, RETRY_US_HI);
  77			goto again;
  78		} else {
  79			netdev_err(
  80				ndev,
  81				"Retry failed to send sw datapath msg, err: %d\n",
  82				ret);
  83			return ret;
  84		}
  85	}
  86
  87	wait_for_completion(&nv_dev->channel_init_wait);
  88	net_device_ctx->data_path_is_vf = vf;
  89
  90	return 0;
  91}
  92
  93/* Worker to setup sub channels on initial setup
  94 * Initial hotplug event occurs in softirq context
  95 * and can't wait for channels.
  96 */
  97static void netvsc_subchan_work(struct work_struct *w)
  98{
  99	struct netvsc_device *nvdev =
 100		container_of(w, struct netvsc_device, subchan_work);
 101	struct rndis_device *rdev;
 102	int i, ret;
 103
 104	/* Avoid deadlock with device removal already under RTNL */
 105	if (!rtnl_trylock()) {
 106		schedule_work(w);
 107		return;
 108	}
 109
 110	rdev = nvdev->extension;
 111	if (rdev) {
 112		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
 113		if (ret == 0) {
 114			netif_device_attach(rdev->ndev);
 115		} else {
 116			/* fallback to only primary channel */
 117			for (i = 1; i < nvdev->num_chn; i++)
 118				netif_napi_del(&nvdev->chan_table[i].napi);
 119
 120			nvdev->max_chn = 1;
 121			nvdev->num_chn = 1;
 122		}
 123	}
 124
 125	rtnl_unlock();
 126}
 127
 128static struct netvsc_device *alloc_net_device(void)
 129{
 130	struct netvsc_device *net_device;
 131
 132	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
 133	if (!net_device)
 134		return NULL;
 135
 136	init_waitqueue_head(&net_device->wait_drain);
 137	net_device->destroy = false;
 138	net_device->tx_disable = true;
 139
 140	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
 141	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 142
 143	init_completion(&net_device->channel_init_wait);
 144	init_waitqueue_head(&net_device->subchan_open);
 145	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 146
 147	return net_device;
 148}
 149
 150static void free_netvsc_device(struct rcu_head *head)
 151{
 152	struct netvsc_device *nvdev
 153		= container_of(head, struct netvsc_device, rcu);
 154	int i;
 155
 156	kfree(nvdev->extension);
 157	vfree(nvdev->recv_buf);
 158	vfree(nvdev->send_buf);
 
 
 
 
 
 
 
 
 
 159	bitmap_free(nvdev->send_section_map);
 160
 161	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
 162		xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
 163		kfree(nvdev->chan_table[i].recv_buf);
 164		vfree(nvdev->chan_table[i].mrc.slots);
 165	}
 166
 167	kfree(nvdev);
 168}
 169
 170static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
 171{
 172	call_rcu(&nvdev->rcu, free_netvsc_device);
 173}
 174
 175static void netvsc_revoke_recv_buf(struct hv_device *device,
 176				   struct netvsc_device *net_device,
 177				   struct net_device *ndev)
 178{
 179	struct nvsp_message *revoke_packet;
 180	int ret;
 181
 182	/*
 183	 * If we got a section count, it means we received a
 184	 * SendReceiveBufferComplete msg (ie sent
 185	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
 186	 * to send a revoke msg here
 187	 */
 188	if (net_device->recv_section_cnt) {
 189		/* Send the revoke receive buffer */
 190		revoke_packet = &net_device->revoke_packet;
 191		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 192
 193		revoke_packet->hdr.msg_type =
 194			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
 195		revoke_packet->msg.v1_msg.
 196		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 197
 198		trace_nvsp_send(ndev, revoke_packet);
 199
 200		ret = vmbus_sendpacket(device->channel,
 201				       revoke_packet,
 202				       sizeof(struct nvsp_message),
 203				       VMBUS_RQST_ID_NO_RESPONSE,
 204				       VM_PKT_DATA_INBAND, 0);
 205		/* If the failure is because the channel is rescinded;
 206		 * ignore the failure since we cannot send on a rescinded
 207		 * channel. This would allow us to properly cleanup
 208		 * even when the channel is rescinded.
 209		 */
 210		if (device->channel->rescind)
 211			ret = 0;
 212		/*
 213		 * If we failed here, we might as well return and
 214		 * have a leak rather than continue and a bugchk
 215		 */
 216		if (ret != 0) {
 217			netdev_err(ndev, "unable to send "
 218				"revoke receive buffer to netvsp\n");
 219			return;
 220		}
 221		net_device->recv_section_cnt = 0;
 222	}
 223}
 224
 225static void netvsc_revoke_send_buf(struct hv_device *device,
 226				   struct netvsc_device *net_device,
 227				   struct net_device *ndev)
 228{
 229	struct nvsp_message *revoke_packet;
 230	int ret;
 231
 232	/* Deal with the send buffer we may have setup.
 233	 * If we got a  send section size, it means we received a
 234	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
 235	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
 236	 * to send a revoke msg here
 237	 */
 238	if (net_device->send_section_cnt) {
 239		/* Send the revoke receive buffer */
 240		revoke_packet = &net_device->revoke_packet;
 241		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 242
 243		revoke_packet->hdr.msg_type =
 244			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
 245		revoke_packet->msg.v1_msg.revoke_send_buf.id =
 246			NETVSC_SEND_BUFFER_ID;
 247
 248		trace_nvsp_send(ndev, revoke_packet);
 249
 250		ret = vmbus_sendpacket(device->channel,
 251				       revoke_packet,
 252				       sizeof(struct nvsp_message),
 253				       VMBUS_RQST_ID_NO_RESPONSE,
 254				       VM_PKT_DATA_INBAND, 0);
 255
 256		/* If the failure is because the channel is rescinded;
 257		 * ignore the failure since we cannot send on a rescinded
 258		 * channel. This would allow us to properly cleanup
 259		 * even when the channel is rescinded.
 260		 */
 261		if (device->channel->rescind)
 262			ret = 0;
 263
 264		/* If we failed here, we might as well return and
 265		 * have a leak rather than continue and a bugchk
 266		 */
 267		if (ret != 0) {
 268			netdev_err(ndev, "unable to send "
 269				   "revoke send buffer to netvsp\n");
 270			return;
 271		}
 272		net_device->send_section_cnt = 0;
 273	}
 274}
 275
 276static void netvsc_teardown_recv_gpadl(struct hv_device *device,
 277				       struct netvsc_device *net_device,
 278				       struct net_device *ndev)
 279{
 280	int ret;
 281
 282	if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
 283		ret = vmbus_teardown_gpadl(device->channel,
 284					   &net_device->recv_buf_gpadl_handle);
 285
 286		/* If we failed here, we might as well return and have a leak
 287		 * rather than continue and a bugchk
 288		 */
 289		if (ret != 0) {
 290			netdev_err(ndev,
 291				   "unable to teardown receive buffer's gpadl\n");
 292			return;
 293		}
 294	}
 295}
 296
 297static void netvsc_teardown_send_gpadl(struct hv_device *device,
 298				       struct netvsc_device *net_device,
 299				       struct net_device *ndev)
 300{
 301	int ret;
 302
 303	if (net_device->send_buf_gpadl_handle.gpadl_handle) {
 304		ret = vmbus_teardown_gpadl(device->channel,
 305					   &net_device->send_buf_gpadl_handle);
 306
 307		/* If we failed here, we might as well return and have a leak
 308		 * rather than continue and a bugchk
 309		 */
 310		if (ret != 0) {
 311			netdev_err(ndev,
 312				   "unable to teardown send buffer's gpadl\n");
 313			return;
 314		}
 315	}
 316}
 317
 318int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
 319{
 320	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 321	int node = cpu_to_node(nvchan->channel->target_cpu);
 322	size_t size;
 323
 324	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
 325	nvchan->mrc.slots = vzalloc_node(size, node);
 326	if (!nvchan->mrc.slots)
 327		nvchan->mrc.slots = vzalloc(size);
 328
 329	return nvchan->mrc.slots ? 0 : -ENOMEM;
 330}
 331
 332static int netvsc_init_buf(struct hv_device *device,
 333			   struct netvsc_device *net_device,
 334			   const struct netvsc_device_info *device_info)
 335{
 336	struct nvsp_1_message_send_receive_buffer_complete *resp;
 337	struct net_device *ndev = hv_get_drvdata(device);
 338	struct nvsp_message *init_packet;
 339	unsigned int buf_size;
 340	int i, ret = 0;
 
 341
 342	/* Get receive buffer area. */
 343	buf_size = device_info->recv_sections * device_info->recv_section_size;
 344	buf_size = roundup(buf_size, PAGE_SIZE);
 345
 346	/* Legacy hosts only allow smaller receive buffer */
 347	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
 348		buf_size = min_t(unsigned int, buf_size,
 349				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
 350
 351	net_device->recv_buf = vzalloc(buf_size);
 352	if (!net_device->recv_buf) {
 353		netdev_err(ndev,
 354			   "unable to allocate receive buffer of size %u\n",
 355			   buf_size);
 356		ret = -ENOMEM;
 357		goto cleanup;
 358	}
 359
 360	net_device->recv_buf_size = buf_size;
 361
 362	/*
 363	 * Establish the gpadl handle for this buffer on this
 364	 * channel.  Note: This call uses the vmbus connection rather
 365	 * than the channel to establish the gpadl handle.
 366	 */
 367	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
 368				    buf_size,
 369				    &net_device->recv_buf_gpadl_handle);
 370	if (ret != 0) {
 371		netdev_err(ndev,
 372			"unable to establish receive buffer's gpadl\n");
 373		goto cleanup;
 374	}
 375
 
 
 
 
 
 
 
 
 
 
 
 376	/* Notify the NetVsp of the gpadl handle */
 377	init_packet = &net_device->channel_init_pkt;
 378	memset(init_packet, 0, sizeof(struct nvsp_message));
 379	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
 380	init_packet->msg.v1_msg.send_recv_buf.
 381		gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
 382	init_packet->msg.v1_msg.
 383		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 384
 385	trace_nvsp_send(ndev, init_packet);
 386
 387	/* Send the gpadl notification request */
 388	ret = vmbus_sendpacket(device->channel, init_packet,
 389			       sizeof(struct nvsp_message),
 390			       (unsigned long)init_packet,
 391			       VM_PKT_DATA_INBAND,
 392			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 393	if (ret != 0) {
 394		netdev_err(ndev,
 395			"unable to send receive buffer's gpadl to netvsp\n");
 396		goto cleanup;
 397	}
 398
 399	wait_for_completion(&net_device->channel_init_wait);
 400
 401	/* Check the response */
 402	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
 403	if (resp->status != NVSP_STAT_SUCCESS) {
 404		netdev_err(ndev,
 405			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
 406			   resp->status);
 407		ret = -EINVAL;
 408		goto cleanup;
 409	}
 410
 411	/* Parse the response */
 412	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
 413		   resp->num_sections, resp->sections[0].sub_alloc_size,
 414		   resp->sections[0].num_sub_allocs);
 415
 416	/* There should only be one section for the entire receive buffer */
 417	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
 418		ret = -EINVAL;
 419		goto cleanup;
 420	}
 421
 422	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
 423	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
 424
 425	/* Ensure buffer will not overflow */
 426	if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
 427	    (u64)net_device->recv_section_cnt > (u64)buf_size) {
 428		netdev_err(ndev, "invalid recv_section_size %u\n",
 429			   net_device->recv_section_size);
 430		ret = -EINVAL;
 431		goto cleanup;
 432	}
 433
 434	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
 435		struct netvsc_channel *nvchan = &net_device->chan_table[i];
 436
 437		nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
 438		if (nvchan->recv_buf == NULL) {
 439			ret = -ENOMEM;
 440			goto cleanup;
 441		}
 442	}
 443
 444	/* Setup receive completion ring.
 445	 * Add 1 to the recv_section_cnt because at least one entry in a
 446	 * ring buffer has to be empty.
 447	 */
 448	net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
 449	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
 450	if (ret)
 451		goto cleanup;
 452
 453	/* Now setup the send buffer. */
 454	buf_size = device_info->send_sections * device_info->send_section_size;
 455	buf_size = round_up(buf_size, PAGE_SIZE);
 456
 457	net_device->send_buf = vzalloc(buf_size);
 458	if (!net_device->send_buf) {
 459		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
 460			   buf_size);
 461		ret = -ENOMEM;
 462		goto cleanup;
 463	}
 464	net_device->send_buf_size = buf_size;
 465
 466	/* Establish the gpadl handle for this buffer on this
 467	 * channel.  Note: This call uses the vmbus connection rather
 468	 * than the channel to establish the gpadl handle.
 469	 */
 470	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
 471				    buf_size,
 472				    &net_device->send_buf_gpadl_handle);
 473	if (ret != 0) {
 474		netdev_err(ndev,
 475			   "unable to establish send buffer's gpadl\n");
 476		goto cleanup;
 477	}
 478
 
 
 
 
 
 
 
 
 
 
 
 479	/* Notify the NetVsp of the gpadl handle */
 480	init_packet = &net_device->channel_init_pkt;
 481	memset(init_packet, 0, sizeof(struct nvsp_message));
 482	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
 483	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
 484		net_device->send_buf_gpadl_handle.gpadl_handle;
 485	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
 486
 487	trace_nvsp_send(ndev, init_packet);
 488
 489	/* Send the gpadl notification request */
 490	ret = vmbus_sendpacket(device->channel, init_packet,
 491			       sizeof(struct nvsp_message),
 492			       (unsigned long)init_packet,
 493			       VM_PKT_DATA_INBAND,
 494			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 495	if (ret != 0) {
 496		netdev_err(ndev,
 497			   "unable to send send buffer's gpadl to netvsp\n");
 498		goto cleanup;
 499	}
 500
 501	wait_for_completion(&net_device->channel_init_wait);
 502
 503	/* Check the response */
 504	if (init_packet->msg.v1_msg.
 505	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
 506		netdev_err(ndev, "Unable to complete send buffer "
 507			   "initialization with NetVsp - status %d\n",
 508			   init_packet->msg.v1_msg.
 509			   send_send_buf_complete.status);
 510		ret = -EINVAL;
 511		goto cleanup;
 512	}
 513
 514	/* Parse the response */
 515	net_device->send_section_size = init_packet->msg.
 516				v1_msg.send_send_buf_complete.section_size;
 517	if (net_device->send_section_size < NETVSC_MTU_MIN) {
 518		netdev_err(ndev, "invalid send_section_size %u\n",
 519			   net_device->send_section_size);
 520		ret = -EINVAL;
 521		goto cleanup;
 522	}
 523
 524	/* Section count is simply the size divided by the section size. */
 525	net_device->send_section_cnt = buf_size / net_device->send_section_size;
 526
 527	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
 528		   net_device->send_section_size, net_device->send_section_cnt);
 529
 530	/* Setup state for managing the send buffer. */
 531	net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
 532						     GFP_KERNEL);
 533	if (!net_device->send_section_map) {
 534		ret = -ENOMEM;
 535		goto cleanup;
 536	}
 537
 538	goto exit;
 539
 540cleanup:
 541	netvsc_revoke_recv_buf(device, net_device, ndev);
 542	netvsc_revoke_send_buf(device, net_device, ndev);
 543	netvsc_teardown_recv_gpadl(device, net_device, ndev);
 544	netvsc_teardown_send_gpadl(device, net_device, ndev);
 545
 546exit:
 547	return ret;
 548}
 549
 550/* Negotiate NVSP protocol version */
 551static int negotiate_nvsp_ver(struct hv_device *device,
 552			      struct netvsc_device *net_device,
 553			      struct nvsp_message *init_packet,
 554			      u32 nvsp_ver)
 555{
 556	struct net_device *ndev = hv_get_drvdata(device);
 557	int ret;
 558
 559	memset(init_packet, 0, sizeof(struct nvsp_message));
 560	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
 561	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
 562	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
 563	trace_nvsp_send(ndev, init_packet);
 564
 565	/* Send the init request */
 566	ret = vmbus_sendpacket(device->channel, init_packet,
 567			       sizeof(struct nvsp_message),
 568			       (unsigned long)init_packet,
 569			       VM_PKT_DATA_INBAND,
 570			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 571
 572	if (ret != 0)
 573		return ret;
 574
 575	wait_for_completion(&net_device->channel_init_wait);
 576
 577	if (init_packet->msg.init_msg.init_complete.status !=
 578	    NVSP_STAT_SUCCESS)
 579		return -EINVAL;
 580
 581	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
 582		return 0;
 583
 584	/* NVSPv2 or later: Send NDIS config */
 585	memset(init_packet, 0, sizeof(struct nvsp_message));
 586	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
 587	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
 588	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 589
 590	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
 591		if (hv_is_isolation_supported())
 592			netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
 593		else
 594			init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
 595
 596		/* Teaming bit is needed to receive link speed updates */
 597		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
 598	}
 599
 600	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
 601		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
 602
 603	trace_nvsp_send(ndev, init_packet);
 604
 605	ret = vmbus_sendpacket(device->channel, init_packet,
 606				sizeof(struct nvsp_message),
 607				VMBUS_RQST_ID_NO_RESPONSE,
 608				VM_PKT_DATA_INBAND, 0);
 609
 610	return ret;
 611}
 612
 613static int netvsc_connect_vsp(struct hv_device *device,
 614			      struct netvsc_device *net_device,
 615			      const struct netvsc_device_info *device_info)
 616{
 617	struct net_device *ndev = hv_get_drvdata(device);
 618	static const u32 ver_list[] = {
 619		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
 620		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
 621		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
 622	};
 623	struct nvsp_message *init_packet;
 624	int ndis_version, i, ret;
 625
 626	init_packet = &net_device->channel_init_pkt;
 627
 628	/* Negotiate the latest NVSP protocol supported */
 629	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
 630		if (negotiate_nvsp_ver(device, net_device, init_packet,
 631				       ver_list[i])  == 0) {
 632			net_device->nvsp_version = ver_list[i];
 633			break;
 634		}
 635
 636	if (i < 0) {
 637		ret = -EPROTO;
 638		goto cleanup;
 639	}
 640
 641	if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
 642		netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
 643			   net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
 644		ret = -EPROTO;
 645		goto cleanup;
 646	}
 647
 648	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
 649
 650	/* Send the ndis version */
 651	memset(init_packet, 0, sizeof(struct nvsp_message));
 652
 653	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
 654		ndis_version = 0x00060001;
 655	else
 656		ndis_version = 0x0006001e;
 657
 658	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
 659	init_packet->msg.v1_msg.
 660		send_ndis_ver.ndis_major_ver =
 661				(ndis_version & 0xFFFF0000) >> 16;
 662	init_packet->msg.v1_msg.
 663		send_ndis_ver.ndis_minor_ver =
 664				ndis_version & 0xFFFF;
 665
 666	trace_nvsp_send(ndev, init_packet);
 667
 668	/* Send the init request */
 669	ret = vmbus_sendpacket(device->channel, init_packet,
 670				sizeof(struct nvsp_message),
 671				VMBUS_RQST_ID_NO_RESPONSE,
 672				VM_PKT_DATA_INBAND, 0);
 673	if (ret != 0)
 674		goto cleanup;
 675
 676
 677	ret = netvsc_init_buf(device, net_device, device_info);
 678
 679cleanup:
 680	return ret;
 681}
 682
 683/*
 684 * netvsc_device_remove - Callback when the root bus device is removed
 685 */
 686void netvsc_device_remove(struct hv_device *device)
 687{
 688	struct net_device *ndev = hv_get_drvdata(device);
 689	struct net_device_context *net_device_ctx = netdev_priv(ndev);
 690	struct netvsc_device *net_device
 691		= rtnl_dereference(net_device_ctx->nvdev);
 692	int i;
 693
 694	/*
 695	 * Revoke receive buffer. If host is pre-Win2016 then tear down
 696	 * receive buffer GPADL. Do the same for send buffer.
 697	 */
 698	netvsc_revoke_recv_buf(device, net_device, ndev);
 699	if (vmbus_proto_version < VERSION_WIN10)
 700		netvsc_teardown_recv_gpadl(device, net_device, ndev);
 701
 702	netvsc_revoke_send_buf(device, net_device, ndev);
 703	if (vmbus_proto_version < VERSION_WIN10)
 704		netvsc_teardown_send_gpadl(device, net_device, ndev);
 705
 706	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
 707
 708	/* Disable NAPI and disassociate its context from the device. */
 709	for (i = 0; i < net_device->num_chn; i++) {
 710		/* See also vmbus_reset_channel_cb(). */
 711		/* only disable enabled NAPI channel */
 712		if (i < ndev->real_num_rx_queues)
 713			napi_disable(&net_device->chan_table[i].napi);
 714
 715		netif_napi_del(&net_device->chan_table[i].napi);
 716	}
 717
 718	/*
 719	 * At this point, no one should be accessing net_device
 720	 * except in here
 721	 */
 722	netdev_dbg(ndev, "net device safe to remove\n");
 723
 724	/* Now, we can close the channel safely */
 725	vmbus_close(device->channel);
 726
 727	/*
 728	 * If host is Win2016 or higher then we do the GPADL tear down
 729	 * here after VMBus is closed.
 730	*/
 731	if (vmbus_proto_version >= VERSION_WIN10) {
 732		netvsc_teardown_recv_gpadl(device, net_device, ndev);
 733		netvsc_teardown_send_gpadl(device, net_device, ndev);
 734	}
 735
 
 
 
 
 
 
 736	/* Release all resources */
 737	free_netvsc_device_rcu(net_device);
 738}
 739
 740#define RING_AVAIL_PERCENT_HIWATER 20
 741#define RING_AVAIL_PERCENT_LOWATER 10
 742
 743static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
 744					 u32 index)
 745{
 746	sync_change_bit(index, net_device->send_section_map);
 747}
 748
 749static void netvsc_send_tx_complete(struct net_device *ndev,
 750				    struct netvsc_device *net_device,
 751				    struct vmbus_channel *channel,
 752				    const struct vmpacket_descriptor *desc,
 753				    int budget)
 754{
 755	struct net_device_context *ndev_ctx = netdev_priv(ndev);
 756	struct sk_buff *skb;
 757	u16 q_idx = 0;
 758	int queue_sends;
 759	u64 cmd_rqst;
 760
 761	cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
 762	if (cmd_rqst == VMBUS_RQST_ERROR) {
 763		netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 764		return;
 765	}
 766
 767	skb = (struct sk_buff *)(unsigned long)cmd_rqst;
 768
 769	/* Notify the layer above us */
 770	if (likely(skb)) {
 771		struct hv_netvsc_packet *packet
 772			= (struct hv_netvsc_packet *)skb->cb;
 773		u32 send_index = packet->send_buf_index;
 774		struct netvsc_stats_tx *tx_stats;
 775
 776		if (send_index != NETVSC_INVALID_INDEX)
 777			netvsc_free_send_slot(net_device, send_index);
 778		q_idx = packet->q_idx;
 779
 780		tx_stats = &net_device->chan_table[q_idx].tx_stats;
 781
 782		u64_stats_update_begin(&tx_stats->syncp);
 783		tx_stats->packets += packet->total_packets;
 784		tx_stats->bytes += packet->total_bytes;
 785		u64_stats_update_end(&tx_stats->syncp);
 786
 787		netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
 788		napi_consume_skb(skb, budget);
 789	}
 790
 791	queue_sends =
 792		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
 793
 794	if (unlikely(net_device->destroy)) {
 795		if (queue_sends == 0)
 796			wake_up(&net_device->wait_drain);
 797	} else {
 798		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 799
 800		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
 801		    (hv_get_avail_to_write_percent(&channel->outbound) >
 802		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
 803			netif_tx_wake_queue(txq);
 804			ndev_ctx->eth_stats.wake_queue++;
 805		}
 806	}
 807}
 808
 809static void netvsc_send_completion(struct net_device *ndev,
 810				   struct netvsc_device *net_device,
 811				   struct vmbus_channel *incoming_channel,
 812				   const struct vmpacket_descriptor *desc,
 813				   int budget)
 814{
 815	const struct nvsp_message *nvsp_packet;
 816	u32 msglen = hv_pkt_datalen(desc);
 817	struct nvsp_message *pkt_rqst;
 818	u64 cmd_rqst;
 819	u32 status;
 820
 821	/* First check if this is a VMBUS completion without data payload */
 822	if (!msglen) {
 823		cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
 824								   desc->trans_id);
 825		if (cmd_rqst == VMBUS_RQST_ERROR) {
 826			netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 827			return;
 828		}
 829
 830		pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
 831		switch (pkt_rqst->hdr.msg_type) {
 832		case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
 833			complete(&net_device->channel_init_wait);
 834			break;
 835
 836		default:
 837			netdev_err(ndev, "Unexpected VMBUS completion!!\n");
 838		}
 839		return;
 840	}
 841
 842	/* Ensure packet is big enough to read header fields */
 843	if (msglen < sizeof(struct nvsp_message_header)) {
 844		netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
 845		return;
 846	}
 847
 848	nvsp_packet = hv_pkt_data(desc);
 849	switch (nvsp_packet->hdr.msg_type) {
 850	case NVSP_MSG_TYPE_INIT_COMPLETE:
 851		if (msglen < sizeof(struct nvsp_message_header) +
 852				sizeof(struct nvsp_message_init_complete)) {
 853			netdev_err(ndev, "nvsp_msg length too small: %u\n",
 854				   msglen);
 855			return;
 856		}
 857		break;
 858
 859	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
 860		if (msglen < sizeof(struct nvsp_message_header) +
 861				sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
 862			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 863				   msglen);
 864			return;
 865		}
 866		break;
 867
 868	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
 869		if (msglen < sizeof(struct nvsp_message_header) +
 870				sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
 871			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 872				   msglen);
 873			return;
 874		}
 875		break;
 876
 877	case NVSP_MSG5_TYPE_SUBCHANNEL:
 878		if (msglen < sizeof(struct nvsp_message_header) +
 879				sizeof(struct nvsp_5_subchannel_complete)) {
 880			netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
 881				   msglen);
 882			return;
 883		}
 
 
 
 
 884		break;
 885
 886	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
 887		if (msglen < sizeof(struct nvsp_message_header) +
 888		    sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
 889			if (net_ratelimit())
 890				netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
 891					   msglen);
 892			return;
 893		}
 894
 895		/* If status indicates an error, output a message so we know
 896		 * there's a problem. But process the completion anyway so the
 897		 * resources are released.
 898		 */
 899		status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
 900		if (status != NVSP_STAT_SUCCESS && net_ratelimit())
 901			netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
 902				   status);
 903
 904		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
 905					desc, budget);
 906		return;
 907
 908	default:
 909		netdev_err(ndev,
 910			   "Unknown send completion type %d received!!\n",
 911			   nvsp_packet->hdr.msg_type);
 912		return;
 913	}
 914
 915	/* Copy the response back */
 916	memcpy(&net_device->channel_init_pkt, nvsp_packet,
 917	       sizeof(struct nvsp_message));
 918	complete(&net_device->channel_init_wait);
 919}
 920
 921static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 922{
 923	unsigned long *map_addr = net_device->send_section_map;
 924	unsigned int i;
 925
 926	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
 927		if (sync_test_and_set_bit(i, map_addr) == 0)
 928			return i;
 929	}
 930
 931	return NETVSC_INVALID_INDEX;
 932}
 933
 934static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 935				    unsigned int section_index,
 936				    u32 pend_size,
 937				    struct hv_netvsc_packet *packet,
 938				    struct rndis_message *rndis_msg,
 939				    struct hv_page_buffer *pb,
 940				    bool xmit_more)
 941{
 942	char *start = net_device->send_buf;
 943	char *dest = start + (section_index * net_device->send_section_size)
 944		     + pend_size;
 945	int i;
 946	u32 padding = 0;
 947	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
 948		packet->page_buf_cnt;
 949	u32 remain;
 950
 951	/* Add padding */
 952	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
 953	if (xmit_more && remain) {
 954		padding = net_device->pkt_align - remain;
 955		rndis_msg->msg_len += padding;
 956		packet->total_data_buflen += padding;
 957	}
 958
 959	for (i = 0; i < page_count; i++) {
 960		char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
 961		u32 offset = pb[i].offset;
 962		u32 len = pb[i].len;
 963
 964		memcpy(dest, (src + offset), len);
 965		dest += len;
 966	}
 967
 968	if (padding)
 969		memset(dest, 0, padding);
 970}
 971
 972void netvsc_dma_unmap(struct hv_device *hv_dev,
 973		      struct hv_netvsc_packet *packet)
 974{
 975	int i;
 976
 977	if (!hv_is_isolation_supported())
 978		return;
 979
 980	if (!packet->dma_range)
 981		return;
 982
 983	for (i = 0; i < packet->page_buf_cnt; i++)
 984		dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
 985				 packet->dma_range[i].mapping_size,
 986				 DMA_TO_DEVICE);
 987
 988	kfree(packet->dma_range);
 989}
 990
 991/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
 992 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
 993 * VM.
 994 *
 995 * In isolation VM, netvsc send buffer has been marked visible to
 996 * host and so the data copied to send buffer doesn't need to use
 997 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
 998 * may not be copied to send buffer and so these pages need to be
 999 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1000 * that. The pfns in the struct hv_page_buffer need to be converted
1001 * to bounce buffer's pfn. The loop here is necessary because the
1002 * entries in the page buffer array are not necessarily full
1003 * pages of data.  Each entry in the array has a separate offset and
1004 * len that may be non-zero, even for entries in the middle of the
1005 * array.  And the entries are not physically contiguous.  So each
1006 * entry must be individually mapped rather than as a contiguous unit.
1007 * So not use dma_map_sg() here.
1008 */
1009static int netvsc_dma_map(struct hv_device *hv_dev,
1010			  struct hv_netvsc_packet *packet,
1011			  struct hv_page_buffer *pb)
1012{
1013	u32 page_count = packet->page_buf_cnt;
1014	dma_addr_t dma;
1015	int i;
1016
1017	if (!hv_is_isolation_supported())
1018		return 0;
1019
1020	packet->dma_range = kcalloc(page_count,
1021				    sizeof(*packet->dma_range),
1022				    GFP_ATOMIC);
1023	if (!packet->dma_range)
1024		return -ENOMEM;
1025
1026	for (i = 0; i < page_count; i++) {
1027		char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1028					 + pb[i].offset);
1029		u32 len = pb[i].len;
1030
1031		dma = dma_map_single(&hv_dev->device, src, len,
1032				     DMA_TO_DEVICE);
1033		if (dma_mapping_error(&hv_dev->device, dma)) {
1034			kfree(packet->dma_range);
1035			return -ENOMEM;
1036		}
1037
1038		/* pb[].offset and pb[].len are not changed during dma mapping
1039		 * and so not reassign.
1040		 */
1041		packet->dma_range[i].dma = dma;
1042		packet->dma_range[i].mapping_size = len;
1043		pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1044	}
1045
1046	return 0;
1047}
1048
1049static inline int netvsc_send_pkt(
1050	struct hv_device *device,
1051	struct hv_netvsc_packet *packet,
1052	struct netvsc_device *net_device,
1053	struct hv_page_buffer *pb,
1054	struct sk_buff *skb)
1055{
1056	struct nvsp_message nvmsg;
1057	struct nvsp_1_message_send_rndis_packet *rpkt =
1058		&nvmsg.msg.v1_msg.send_rndis_pkt;
1059	struct netvsc_channel * const nvchan =
1060		&net_device->chan_table[packet->q_idx];
1061	struct vmbus_channel *out_channel = nvchan->channel;
1062	struct net_device *ndev = hv_get_drvdata(device);
1063	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1064	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1065	u64 req_id;
1066	int ret;
1067	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1068
1069	memset(&nvmsg, 0, sizeof(struct nvsp_message));
1070	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1071	if (skb)
1072		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
1073	else
1074		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
1075
1076	rpkt->send_buf_section_index = packet->send_buf_index;
1077	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1078		rpkt->send_buf_section_size = 0;
1079	else
1080		rpkt->send_buf_section_size = packet->total_data_buflen;
1081
1082	req_id = (ulong)skb;
1083
1084	if (out_channel->rescind)
1085		return -ENODEV;
1086
1087	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1088
1089	packet->dma_range = NULL;
1090	if (packet->page_buf_cnt) {
1091		if (packet->cp_partial)
1092			pb += packet->rmsg_pgcnt;
1093
1094		ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1095		if (ret) {
1096			ret = -EAGAIN;
1097			goto exit;
1098		}
1099
1100		ret = vmbus_sendpacket_pagebuffer(out_channel,
1101						  pb, packet->page_buf_cnt,
1102						  &nvmsg, sizeof(nvmsg),
1103						  req_id);
1104
1105		if (ret)
1106			netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1107	} else {
1108		ret = vmbus_sendpacket(out_channel,
1109				       &nvmsg, sizeof(nvmsg),
1110				       req_id, VM_PKT_DATA_INBAND,
1111				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1112	}
1113
1114exit:
1115	if (ret == 0) {
1116		atomic_inc_return(&nvchan->queue_sends);
1117
1118		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1119			netif_tx_stop_queue(txq);
1120			ndev_ctx->eth_stats.stop_queue++;
1121		}
1122	} else if (ret == -EAGAIN) {
1123		netif_tx_stop_queue(txq);
1124		ndev_ctx->eth_stats.stop_queue++;
1125	} else {
1126		netdev_err(ndev,
1127			   "Unable to send packet pages %u len %u, ret %d\n",
1128			   packet->page_buf_cnt, packet->total_data_buflen,
1129			   ret);
1130	}
1131
1132	if (netif_tx_queue_stopped(txq) &&
1133	    atomic_read(&nvchan->queue_sends) < 1 &&
1134	    !net_device->tx_disable) {
1135		netif_tx_wake_queue(txq);
1136		ndev_ctx->eth_stats.wake_queue++;
1137		if (ret == -EAGAIN)
1138			ret = -ENOSPC;
1139	}
1140
1141	return ret;
1142}
1143
1144/* Move packet out of multi send data (msd), and clear msd */
1145static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1146				struct sk_buff **msd_skb,
1147				struct multi_send_data *msdp)
1148{
1149	*msd_skb = msdp->skb;
1150	*msd_send = msdp->pkt;
1151	msdp->skb = NULL;
1152	msdp->pkt = NULL;
1153	msdp->count = 0;
1154}
1155
1156/* RCU already held by caller */
1157/* Batching/bouncing logic is designed to attempt to optimize
1158 * performance.
1159 *
1160 * For small, non-LSO packets we copy the packet to a send buffer
1161 * which is pre-registered with the Hyper-V side. This enables the
1162 * hypervisor to avoid remapping the aperture to access the packet
1163 * descriptor and data.
1164 *
1165 * If we already started using a buffer and the netdev is transmitting
1166 * a burst of packets, keep on copying into the buffer until it is
1167 * full or we are done collecting a burst. If there is an existing
1168 * buffer with space for the RNDIS descriptor but not the packet, copy
1169 * the RNDIS descriptor to the buffer, keeping the packet in place.
1170 *
1171 * If we do batching and send more than one packet using a single
1172 * NetVSC message, free the SKBs of the packets copied, except for the
1173 * last packet. This is done to streamline the handling of the case
1174 * where the last packet only had the RNDIS descriptor copied to the
1175 * send buffer, with the data pointers included in the NetVSC message.
1176 */
1177int netvsc_send(struct net_device *ndev,
1178		struct hv_netvsc_packet *packet,
1179		struct rndis_message *rndis_msg,
1180		struct hv_page_buffer *pb,
1181		struct sk_buff *skb,
1182		bool xdp_tx)
1183{
1184	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1185	struct netvsc_device *net_device
1186		= rcu_dereference_bh(ndev_ctx->nvdev);
1187	struct hv_device *device = ndev_ctx->device_ctx;
1188	int ret = 0;
1189	struct netvsc_channel *nvchan;
1190	u32 pktlen = packet->total_data_buflen, msd_len = 0;
1191	unsigned int section_index = NETVSC_INVALID_INDEX;
1192	struct multi_send_data *msdp;
1193	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1194	struct sk_buff *msd_skb = NULL;
1195	bool try_batch, xmit_more;
1196
1197	/* If device is rescinded, return error and packet will get dropped. */
1198	if (unlikely(!net_device || net_device->destroy))
1199		return -ENODEV;
1200
1201	nvchan = &net_device->chan_table[packet->q_idx];
1202	packet->send_buf_index = NETVSC_INVALID_INDEX;
1203	packet->cp_partial = false;
1204
1205	/* Send a control message or XDP packet directly without accessing
1206	 * msd (Multi-Send Data) field which may be changed during data packet
1207	 * processing.
1208	 */
1209	if (!skb || xdp_tx)
1210		return netvsc_send_pkt(device, packet, net_device, pb, skb);
1211
1212	/* batch packets in send buffer if possible */
1213	msdp = &nvchan->msd;
1214	if (msdp->pkt)
1215		msd_len = msdp->pkt->total_data_buflen;
1216
1217	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
1218	if (try_batch && msd_len + pktlen + net_device->pkt_align <
1219	    net_device->send_section_size) {
1220		section_index = msdp->pkt->send_buf_index;
1221
1222	} else if (try_batch && msd_len + packet->rmsg_size <
1223		   net_device->send_section_size) {
1224		section_index = msdp->pkt->send_buf_index;
1225		packet->cp_partial = true;
1226
1227	} else if (pktlen + net_device->pkt_align <
1228		   net_device->send_section_size) {
1229		section_index = netvsc_get_next_send_section(net_device);
1230		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1231			++ndev_ctx->eth_stats.tx_send_full;
1232		} else {
1233			move_pkt_msd(&msd_send, &msd_skb, msdp);
1234			msd_len = 0;
1235		}
1236	}
1237
1238	/* Keep aggregating only if stack says more data is coming
1239	 * and not doing mixed modes send and not flow blocked
1240	 */
1241	xmit_more = netdev_xmit_more() &&
1242		!packet->cp_partial &&
1243		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1244
1245	if (section_index != NETVSC_INVALID_INDEX) {
1246		netvsc_copy_to_send_buf(net_device,
1247					section_index, msd_len,
1248					packet, rndis_msg, pb, xmit_more);
1249
1250		packet->send_buf_index = section_index;
1251
1252		if (packet->cp_partial) {
1253			packet->page_buf_cnt -= packet->rmsg_pgcnt;
1254			packet->total_data_buflen = msd_len + packet->rmsg_size;
1255		} else {
1256			packet->page_buf_cnt = 0;
1257			packet->total_data_buflen += msd_len;
1258		}
1259
1260		if (msdp->pkt) {
1261			packet->total_packets += msdp->pkt->total_packets;
1262			packet->total_bytes += msdp->pkt->total_bytes;
1263		}
1264
1265		if (msdp->skb)
1266			dev_consume_skb_any(msdp->skb);
1267
1268		if (xmit_more) {
1269			msdp->skb = skb;
1270			msdp->pkt = packet;
1271			msdp->count++;
1272		} else {
1273			cur_send = packet;
1274			msdp->skb = NULL;
1275			msdp->pkt = NULL;
1276			msdp->count = 0;
1277		}
1278	} else {
1279		move_pkt_msd(&msd_send, &msd_skb, msdp);
1280		cur_send = packet;
1281	}
1282
1283	if (msd_send) {
1284		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1285					    NULL, msd_skb);
1286
1287		if (m_ret != 0) {
1288			netvsc_free_send_slot(net_device,
1289					      msd_send->send_buf_index);
1290			dev_kfree_skb_any(msd_skb);
1291		}
1292	}
1293
1294	if (cur_send)
1295		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1296
1297	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1298		netvsc_free_send_slot(net_device, section_index);
1299
1300	return ret;
1301}
1302
1303/* Send pending recv completions */
1304static int send_recv_completions(struct net_device *ndev,
1305				 struct netvsc_device *nvdev,
1306				 struct netvsc_channel *nvchan)
1307{
1308	struct multi_recv_comp *mrc = &nvchan->mrc;
1309	struct recv_comp_msg {
1310		struct nvsp_message_header hdr;
1311		u32 status;
1312	}  __packed;
1313	struct recv_comp_msg msg = {
1314		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1315	};
1316	int ret;
1317
1318	while (mrc->first != mrc->next) {
1319		const struct recv_comp_data *rcd
1320			= mrc->slots + mrc->first;
1321
1322		msg.status = rcd->status;
1323		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1324				       rcd->tid, VM_PKT_COMP, 0);
1325		if (unlikely(ret)) {
1326			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1327
1328			++ndev_ctx->eth_stats.rx_comp_busy;
1329			return ret;
1330		}
1331
1332		if (++mrc->first == nvdev->recv_completion_cnt)
1333			mrc->first = 0;
1334	}
1335
1336	/* receive completion ring has been emptied */
1337	if (unlikely(nvdev->destroy))
1338		wake_up(&nvdev->wait_drain);
1339
1340	return 0;
1341}
1342
1343/* Count how many receive completions are outstanding */
1344static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1345				 const struct multi_recv_comp *mrc,
1346				 u32 *filled, u32 *avail)
1347{
1348	u32 count = nvdev->recv_completion_cnt;
1349
1350	if (mrc->next >= mrc->first)
1351		*filled = mrc->next - mrc->first;
1352	else
1353		*filled = (count - mrc->first) + mrc->next;
1354
1355	*avail = count - *filled - 1;
1356}
1357
1358/* Add receive complete to ring to send to host. */
1359static void enq_receive_complete(struct net_device *ndev,
1360				 struct netvsc_device *nvdev, u16 q_idx,
1361				 u64 tid, u32 status)
1362{
1363	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1364	struct multi_recv_comp *mrc = &nvchan->mrc;
1365	struct recv_comp_data *rcd;
1366	u32 filled, avail;
1367
1368	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1369
1370	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1371		send_recv_completions(ndev, nvdev, nvchan);
1372		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1373	}
1374
1375	if (unlikely(!avail)) {
1376		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1377			   q_idx, tid);
1378		return;
1379	}
1380
1381	rcd = mrc->slots + mrc->next;
1382	rcd->tid = tid;
1383	rcd->status = status;
1384
1385	if (++mrc->next == nvdev->recv_completion_cnt)
1386		mrc->next = 0;
1387}
1388
1389static int netvsc_receive(struct net_device *ndev,
1390			  struct netvsc_device *net_device,
1391			  struct netvsc_channel *nvchan,
1392			  const struct vmpacket_descriptor *desc)
1393{
1394	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1395	struct vmbus_channel *channel = nvchan->channel;
1396	const struct vmtransfer_page_packet_header *vmxferpage_packet
1397		= container_of(desc, const struct vmtransfer_page_packet_header, d);
1398	const struct nvsp_message *nvsp = hv_pkt_data(desc);
1399	u32 msglen = hv_pkt_datalen(desc);
1400	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1401	char *recv_buf = net_device->recv_buf;
1402	u32 status = NVSP_STAT_SUCCESS;
1403	int i;
1404	int count = 0;
1405
1406	/* Ensure packet is big enough to read header fields */
1407	if (msglen < sizeof(struct nvsp_message_header)) {
1408		netif_err(net_device_ctx, rx_err, ndev,
1409			  "invalid nvsp header, length too small: %u\n",
1410			  msglen);
1411		return 0;
1412	}
1413
1414	/* Make sure this is a valid nvsp packet */
1415	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1416		netif_err(net_device_ctx, rx_err, ndev,
1417			  "Unknown nvsp packet type received %u\n",
1418			  nvsp->hdr.msg_type);
1419		return 0;
1420	}
1421
1422	/* Validate xfer page pkt header */
1423	if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1424		netif_err(net_device_ctx, rx_err, ndev,
1425			  "Invalid xfer page pkt, offset too small: %u\n",
1426			  desc->offset8 << 3);
1427		return 0;
1428	}
1429
1430	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1431		netif_err(net_device_ctx, rx_err, ndev,
1432			  "Invalid xfer page set id - expecting %x got %x\n",
1433			  NETVSC_RECEIVE_BUFFER_ID,
1434			  vmxferpage_packet->xfer_pageset_id);
1435		return 0;
1436	}
1437
1438	count = vmxferpage_packet->range_cnt;
1439
1440	/* Check count for a valid value */
1441	if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1442		netif_err(net_device_ctx, rx_err, ndev,
1443			  "Range count is not valid: %d\n",
1444			  count);
1445		return 0;
1446	}
1447
1448	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1449	for (i = 0; i < count; i++) {
1450		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1451		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1452		void *data;
1453		int ret;
1454
1455		if (unlikely(offset > net_device->recv_buf_size ||
1456			     buflen > net_device->recv_buf_size - offset)) {
1457			nvchan->rsc.cnt = 0;
1458			status = NVSP_STAT_FAIL;
1459			netif_err(net_device_ctx, rx_err, ndev,
1460				  "Packet offset:%u + len:%u too big\n",
1461				  offset, buflen);
1462
1463			continue;
1464		}
1465
1466		/* We're going to copy (sections of) the packet into nvchan->recv_buf;
1467		 * make sure that nvchan->recv_buf is large enough to hold the packet.
1468		 */
1469		if (unlikely(buflen > net_device->recv_section_size)) {
1470			nvchan->rsc.cnt = 0;
1471			status = NVSP_STAT_FAIL;
1472			netif_err(net_device_ctx, rx_err, ndev,
1473				  "Packet too big: buflen=%u recv_section_size=%u\n",
1474				  buflen, net_device->recv_section_size);
1475
1476			continue;
1477		}
1478
1479		data = recv_buf + offset;
1480
1481		nvchan->rsc.is_last = (i == count - 1);
1482
1483		trace_rndis_recv(ndev, q_idx, data);
1484
1485		/* Pass it to the upper layer */
1486		ret = rndis_filter_receive(ndev, net_device,
1487					   nvchan, data, buflen);
1488
1489		if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1490			/* Drop incomplete packet */
1491			nvchan->rsc.cnt = 0;
1492			status = NVSP_STAT_FAIL;
1493		}
1494	}
1495
1496	enq_receive_complete(ndev, net_device, q_idx,
1497			     vmxferpage_packet->d.trans_id, status);
1498
1499	return count;
1500}
1501
1502static void netvsc_send_table(struct net_device *ndev,
1503			      struct netvsc_device *nvscdev,
1504			      const struct nvsp_message *nvmsg,
1505			      u32 msglen)
1506{
1507	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1508	u32 count, offset, *tab;
1509	int i;
1510
1511	/* Ensure packet is big enough to read send_table fields */
1512	if (msglen < sizeof(struct nvsp_message_header) +
1513		     sizeof(struct nvsp_5_send_indirect_table)) {
1514		netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1515		return;
1516	}
1517
1518	count = nvmsg->msg.v5_msg.send_table.count;
1519	offset = nvmsg->msg.v5_msg.send_table.offset;
1520
1521	if (count != VRSS_SEND_TAB_SIZE) {
1522		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1523		return;
1524	}
1525
1526	/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1527	 * wrong due to a host bug. So fix the offset here.
1528	 */
1529	if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1530	    msglen >= sizeof(struct nvsp_message_header) +
1531	    sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1532		offset = sizeof(struct nvsp_message_header) +
1533			 sizeof(union nvsp_6_message_uber);
1534
1535	/* Boundary check for all versions */
1536	if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1537		netdev_err(ndev, "Received send-table offset too big:%u\n",
1538			   offset);
1539		return;
1540	}
1541
1542	tab = (void *)nvmsg + offset;
1543
1544	for (i = 0; i < count; i++)
1545		net_device_ctx->tx_table[i] = tab[i];
1546}
1547
1548static void netvsc_send_vf(struct net_device *ndev,
1549			   const struct nvsp_message *nvmsg,
1550			   u32 msglen)
1551{
1552	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1553
1554	/* Ensure packet is big enough to read its fields */
1555	if (msglen < sizeof(struct nvsp_message_header) +
1556		     sizeof(struct nvsp_4_send_vf_association)) {
1557		netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1558		return;
1559	}
1560
1561	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1562	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1563
1564	if (net_device_ctx->vf_alloc)
1565		complete(&net_device_ctx->vf_add);
1566
1567	netdev_info(ndev, "VF slot %u %s\n",
1568		    net_device_ctx->vf_serial,
1569		    net_device_ctx->vf_alloc ? "added" : "removed");
1570}
1571
1572static void netvsc_receive_inband(struct net_device *ndev,
1573				  struct netvsc_device *nvscdev,
1574				  const struct vmpacket_descriptor *desc)
1575{
1576	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1577	u32 msglen = hv_pkt_datalen(desc);
1578
1579	/* Ensure packet is big enough to read header fields */
1580	if (msglen < sizeof(struct nvsp_message_header)) {
1581		netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1582		return;
1583	}
1584
1585	switch (nvmsg->hdr.msg_type) {
1586	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1587		netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1588		break;
1589
1590	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1591		if (hv_is_isolation_supported())
1592			netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1593		else
1594			netvsc_send_vf(ndev, nvmsg, msglen);
1595		break;
1596	}
1597}
1598
1599static int netvsc_process_raw_pkt(struct hv_device *device,
1600				  struct netvsc_channel *nvchan,
1601				  struct netvsc_device *net_device,
1602				  struct net_device *ndev,
1603				  const struct vmpacket_descriptor *desc,
1604				  int budget)
1605{
1606	struct vmbus_channel *channel = nvchan->channel;
1607	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1608
1609	trace_nvsp_recv(ndev, channel, nvmsg);
1610
1611	switch (desc->type) {
1612	case VM_PKT_COMP:
1613		netvsc_send_completion(ndev, net_device, channel, desc, budget);
1614		break;
1615
1616	case VM_PKT_DATA_USING_XFER_PAGES:
1617		return netvsc_receive(ndev, net_device, nvchan, desc);
1618
1619	case VM_PKT_DATA_INBAND:
1620		netvsc_receive_inband(ndev, net_device, desc);
1621		break;
1622
1623	default:
1624		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1625			   desc->type, desc->trans_id);
1626		break;
1627	}
1628
1629	return 0;
1630}
1631
1632static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1633{
1634	struct vmbus_channel *primary = channel->primary_channel;
1635
1636	return primary ? primary->device_obj : channel->device_obj;
1637}
1638
1639/* Network processing softirq
1640 * Process data in incoming ring buffer from host
1641 * Stops when ring is empty or budget is met or exceeded.
1642 */
1643int netvsc_poll(struct napi_struct *napi, int budget)
1644{
1645	struct netvsc_channel *nvchan
1646		= container_of(napi, struct netvsc_channel, napi);
1647	struct netvsc_device *net_device = nvchan->net_device;
1648	struct vmbus_channel *channel = nvchan->channel;
1649	struct hv_device *device = netvsc_channel_to_device(channel);
1650	struct net_device *ndev = hv_get_drvdata(device);
1651	int work_done = 0;
1652	int ret;
1653
1654	/* If starting a new interval */
1655	if (!nvchan->desc)
1656		nvchan->desc = hv_pkt_iter_first(channel);
1657
1658	nvchan->xdp_flush = false;
1659
1660	while (nvchan->desc && work_done < budget) {
1661		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1662						    ndev, nvchan->desc, budget);
1663		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1664	}
1665
1666	if (nvchan->xdp_flush)
1667		xdp_do_flush();
1668
1669	/* Send any pending receive completions */
1670	ret = send_recv_completions(ndev, net_device, nvchan);
1671
1672	/* If it did not exhaust NAPI budget this time
1673	 *  and not doing busy poll
1674	 * then re-enable host interrupts
1675	 *  and reschedule if ring is not empty
1676	 *   or sending receive completion failed.
1677	 */
1678	if (work_done < budget &&
1679	    napi_complete_done(napi, work_done) &&
1680	    (ret || hv_end_read(&channel->inbound)) &&
1681	    napi_schedule_prep(napi)) {
1682		hv_begin_read(&channel->inbound);
1683		__napi_schedule(napi);
1684	}
1685
1686	/* Driver may overshoot since multiple packets per descriptor */
1687	return min(work_done, budget);
1688}
1689
1690/* Call back when data is available in host ring buffer.
1691 * Processing is deferred until network softirq (NAPI)
1692 */
1693void netvsc_channel_cb(void *context)
1694{
1695	struct netvsc_channel *nvchan = context;
1696	struct vmbus_channel *channel = nvchan->channel;
1697	struct hv_ring_buffer_info *rbi = &channel->inbound;
1698
1699	/* preload first vmpacket descriptor */
1700	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1701
1702	if (napi_schedule_prep(&nvchan->napi)) {
1703		/* disable interrupts from host */
1704		hv_begin_read(rbi);
1705
1706		__napi_schedule_irqoff(&nvchan->napi);
1707	}
1708}
1709
1710/*
1711 * netvsc_device_add - Callback when the device belonging to this
1712 * driver is added
1713 */
1714struct netvsc_device *netvsc_device_add(struct hv_device *device,
1715				const struct netvsc_device_info *device_info)
1716{
1717	int i, ret = 0;
1718	struct netvsc_device *net_device;
1719	struct net_device *ndev = hv_get_drvdata(device);
1720	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1721
1722	net_device = alloc_net_device();
1723	if (!net_device)
1724		return ERR_PTR(-ENOMEM);
1725
1726	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1727		net_device_ctx->tx_table[i] = 0;
1728
1729	/* Because the device uses NAPI, all the interrupt batching and
1730	 * control is done via Net softirq, not the channel handling
1731	 */
1732	set_channel_read_mode(device->channel, HV_CALL_ISR);
1733
1734	/* If we're reopening the device we may have multiple queues, fill the
1735	 * chn_table with the default channel to use it before subchannels are
1736	 * opened.
1737	 * Initialize the channel state before we open;
1738	 * we can be interrupted as soon as we open the channel.
1739	 */
1740
1741	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1742		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1743
1744		nvchan->channel = device->channel;
1745		nvchan->net_device = net_device;
1746		u64_stats_init(&nvchan->tx_stats.syncp);
1747		u64_stats_init(&nvchan->rx_stats.syncp);
1748
1749		ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1750
1751		if (ret) {
1752			netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1753			goto cleanup2;
1754		}
1755
1756		ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1757						 MEM_TYPE_PAGE_SHARED, NULL);
1758
1759		if (ret) {
1760			netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1761			goto cleanup2;
1762		}
1763	}
1764
1765	/* Enable NAPI handler before init callbacks */
1766	netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1767
1768	/* Open the channel */
1769	device->channel->next_request_id_callback = vmbus_next_request_id;
1770	device->channel->request_addr_callback = vmbus_request_addr;
1771	device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1772	device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1773
1774	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1775			 netvsc_ring_bytes,  NULL, 0,
1776			 netvsc_channel_cb, net_device->chan_table);
1777
1778	if (ret != 0) {
1779		netdev_err(ndev, "unable to open channel: %d\n", ret);
1780		goto cleanup;
1781	}
1782
1783	/* Channel is opened */
1784	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1785
1786	napi_enable(&net_device->chan_table[0].napi);
1787
1788	/* Connect with the NetVsp */
1789	ret = netvsc_connect_vsp(device, net_device, device_info);
1790	if (ret != 0) {
1791		netdev_err(ndev,
1792			"unable to connect to NetVSP - %d\n", ret);
1793		goto close;
1794	}
1795
1796	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1797	 * populated.
1798	 */
1799	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1800
1801	return net_device;
1802
1803close:
1804	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1805	napi_disable(&net_device->chan_table[0].napi);
1806
1807	/* Now, we can close the channel safely */
1808	vmbus_close(device->channel);
1809
1810cleanup:
1811	netif_napi_del(&net_device->chan_table[0].napi);
1812
1813cleanup2:
 
 
 
 
 
 
1814	free_netvsc_device(&net_device->rcu);
1815
1816	return ERR_PTR(ret);
1817}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/kernel.h>
  12#include <linux/sched.h>
  13#include <linux/wait.h>
  14#include <linux/mm.h>
  15#include <linux/delay.h>
  16#include <linux/io.h>
  17#include <linux/slab.h>
  18#include <linux/netdevice.h>
  19#include <linux/if_ether.h>
  20#include <linux/vmalloc.h>
  21#include <linux/rtnetlink.h>
  22#include <linux/prefetch.h>
  23#include <linux/filter.h>
  24
  25#include <asm/sync_bitops.h>
  26#include <asm/mshyperv.h>
  27
  28#include "hyperv_net.h"
  29#include "netvsc_trace.h"
  30
  31/*
  32 * Switch the data path from the synthetic interface to the VF
  33 * interface.
  34 */
  35int netvsc_switch_datapath(struct net_device *ndev, bool vf)
  36{
  37	struct net_device_context *net_device_ctx = netdev_priv(ndev);
  38	struct hv_device *dev = net_device_ctx->device_ctx;
  39	struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
  40	struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
  41	int ret, retry = 0;
  42
  43	/* Block sending traffic to VF if it's about to be gone */
  44	if (!vf)
  45		net_device_ctx->data_path_is_vf = vf;
  46
  47	memset(init_pkt, 0, sizeof(struct nvsp_message));
  48	init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
  49	if (vf)
  50		init_pkt->msg.v4_msg.active_dp.active_datapath =
  51			NVSP_DATAPATH_VF;
  52	else
  53		init_pkt->msg.v4_msg.active_dp.active_datapath =
  54			NVSP_DATAPATH_SYNTHETIC;
  55
  56again:
  57	trace_nvsp_send(ndev, init_pkt);
  58
  59	ret = vmbus_sendpacket(dev->channel, init_pkt,
  60			       sizeof(struct nvsp_message),
  61			       (unsigned long)init_pkt, VM_PKT_DATA_INBAND,
  62			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
  63
  64	/* If failed to switch to/from VF, let data_path_is_vf stay false,
  65	 * so we use synthetic path to send data.
  66	 */
  67	if (ret) {
  68		if (ret != -EAGAIN) {
  69			netdev_err(ndev,
  70				   "Unable to send sw datapath msg, err: %d\n",
  71				   ret);
  72			return ret;
  73		}
  74
  75		if (retry++ < RETRY_MAX) {
  76			usleep_range(RETRY_US_LO, RETRY_US_HI);
  77			goto again;
  78		} else {
  79			netdev_err(
  80				ndev,
  81				"Retry failed to send sw datapath msg, err: %d\n",
  82				ret);
  83			return ret;
  84		}
  85	}
  86
  87	wait_for_completion(&nv_dev->channel_init_wait);
  88	net_device_ctx->data_path_is_vf = vf;
  89
  90	return 0;
  91}
  92
  93/* Worker to setup sub channels on initial setup
  94 * Initial hotplug event occurs in softirq context
  95 * and can't wait for channels.
  96 */
  97static void netvsc_subchan_work(struct work_struct *w)
  98{
  99	struct netvsc_device *nvdev =
 100		container_of(w, struct netvsc_device, subchan_work);
 101	struct rndis_device *rdev;
 102	int i, ret;
 103
 104	/* Avoid deadlock with device removal already under RTNL */
 105	if (!rtnl_trylock()) {
 106		schedule_work(w);
 107		return;
 108	}
 109
 110	rdev = nvdev->extension;
 111	if (rdev) {
 112		ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
 113		if (ret == 0) {
 114			netif_device_attach(rdev->ndev);
 115		} else {
 116			/* fallback to only primary channel */
 117			for (i = 1; i < nvdev->num_chn; i++)
 118				netif_napi_del(&nvdev->chan_table[i].napi);
 119
 120			nvdev->max_chn = 1;
 121			nvdev->num_chn = 1;
 122		}
 123	}
 124
 125	rtnl_unlock();
 126}
 127
 128static struct netvsc_device *alloc_net_device(void)
 129{
 130	struct netvsc_device *net_device;
 131
 132	net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
 133	if (!net_device)
 134		return NULL;
 135
 136	init_waitqueue_head(&net_device->wait_drain);
 137	net_device->destroy = false;
 138	net_device->tx_disable = true;
 139
 140	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
 141	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 142
 143	init_completion(&net_device->channel_init_wait);
 144	init_waitqueue_head(&net_device->subchan_open);
 145	INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 146
 147	return net_device;
 148}
 149
 150static void free_netvsc_device(struct rcu_head *head)
 151{
 152	struct netvsc_device *nvdev
 153		= container_of(head, struct netvsc_device, rcu);
 154	int i;
 155
 156	kfree(nvdev->extension);
 157
 158	if (nvdev->recv_original_buf)
 159		vfree(nvdev->recv_original_buf);
 160	else
 161		vfree(nvdev->recv_buf);
 162
 163	if (nvdev->send_original_buf)
 164		vfree(nvdev->send_original_buf);
 165	else
 166		vfree(nvdev->send_buf);
 167
 168	bitmap_free(nvdev->send_section_map);
 169
 170	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
 171		xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
 172		kfree(nvdev->chan_table[i].recv_buf);
 173		vfree(nvdev->chan_table[i].mrc.slots);
 174	}
 175
 176	kfree(nvdev);
 177}
 178
 179static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
 180{
 181	call_rcu(&nvdev->rcu, free_netvsc_device);
 182}
 183
 184static void netvsc_revoke_recv_buf(struct hv_device *device,
 185				   struct netvsc_device *net_device,
 186				   struct net_device *ndev)
 187{
 188	struct nvsp_message *revoke_packet;
 189	int ret;
 190
 191	/*
 192	 * If we got a section count, it means we received a
 193	 * SendReceiveBufferComplete msg (ie sent
 194	 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
 195	 * to send a revoke msg here
 196	 */
 197	if (net_device->recv_section_cnt) {
 198		/* Send the revoke receive buffer */
 199		revoke_packet = &net_device->revoke_packet;
 200		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 201
 202		revoke_packet->hdr.msg_type =
 203			NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
 204		revoke_packet->msg.v1_msg.
 205		revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 206
 207		trace_nvsp_send(ndev, revoke_packet);
 208
 209		ret = vmbus_sendpacket(device->channel,
 210				       revoke_packet,
 211				       sizeof(struct nvsp_message),
 212				       VMBUS_RQST_ID_NO_RESPONSE,
 213				       VM_PKT_DATA_INBAND, 0);
 214		/* If the failure is because the channel is rescinded;
 215		 * ignore the failure since we cannot send on a rescinded
 216		 * channel. This would allow us to properly cleanup
 217		 * even when the channel is rescinded.
 218		 */
 219		if (device->channel->rescind)
 220			ret = 0;
 221		/*
 222		 * If we failed here, we might as well return and
 223		 * have a leak rather than continue and a bugchk
 224		 */
 225		if (ret != 0) {
 226			netdev_err(ndev, "unable to send "
 227				"revoke receive buffer to netvsp\n");
 228			return;
 229		}
 230		net_device->recv_section_cnt = 0;
 231	}
 232}
 233
 234static void netvsc_revoke_send_buf(struct hv_device *device,
 235				   struct netvsc_device *net_device,
 236				   struct net_device *ndev)
 237{
 238	struct nvsp_message *revoke_packet;
 239	int ret;
 240
 241	/* Deal with the send buffer we may have setup.
 242	 * If we got a  send section size, it means we received a
 243	 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
 244	 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
 245	 * to send a revoke msg here
 246	 */
 247	if (net_device->send_section_cnt) {
 248		/* Send the revoke receive buffer */
 249		revoke_packet = &net_device->revoke_packet;
 250		memset(revoke_packet, 0, sizeof(struct nvsp_message));
 251
 252		revoke_packet->hdr.msg_type =
 253			NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
 254		revoke_packet->msg.v1_msg.revoke_send_buf.id =
 255			NETVSC_SEND_BUFFER_ID;
 256
 257		trace_nvsp_send(ndev, revoke_packet);
 258
 259		ret = vmbus_sendpacket(device->channel,
 260				       revoke_packet,
 261				       sizeof(struct nvsp_message),
 262				       VMBUS_RQST_ID_NO_RESPONSE,
 263				       VM_PKT_DATA_INBAND, 0);
 264
 265		/* If the failure is because the channel is rescinded;
 266		 * ignore the failure since we cannot send on a rescinded
 267		 * channel. This would allow us to properly cleanup
 268		 * even when the channel is rescinded.
 269		 */
 270		if (device->channel->rescind)
 271			ret = 0;
 272
 273		/* If we failed here, we might as well return and
 274		 * have a leak rather than continue and a bugchk
 275		 */
 276		if (ret != 0) {
 277			netdev_err(ndev, "unable to send "
 278				   "revoke send buffer to netvsp\n");
 279			return;
 280		}
 281		net_device->send_section_cnt = 0;
 282	}
 283}
 284
 285static void netvsc_teardown_recv_gpadl(struct hv_device *device,
 286				       struct netvsc_device *net_device,
 287				       struct net_device *ndev)
 288{
 289	int ret;
 290
 291	if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
 292		ret = vmbus_teardown_gpadl(device->channel,
 293					   &net_device->recv_buf_gpadl_handle);
 294
 295		/* If we failed here, we might as well return and have a leak
 296		 * rather than continue and a bugchk
 297		 */
 298		if (ret != 0) {
 299			netdev_err(ndev,
 300				   "unable to teardown receive buffer's gpadl\n");
 301			return;
 302		}
 303	}
 304}
 305
 306static void netvsc_teardown_send_gpadl(struct hv_device *device,
 307				       struct netvsc_device *net_device,
 308				       struct net_device *ndev)
 309{
 310	int ret;
 311
 312	if (net_device->send_buf_gpadl_handle.gpadl_handle) {
 313		ret = vmbus_teardown_gpadl(device->channel,
 314					   &net_device->send_buf_gpadl_handle);
 315
 316		/* If we failed here, we might as well return and have a leak
 317		 * rather than continue and a bugchk
 318		 */
 319		if (ret != 0) {
 320			netdev_err(ndev,
 321				   "unable to teardown send buffer's gpadl\n");
 322			return;
 323		}
 324	}
 325}
 326
 327int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
 328{
 329	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 330	int node = cpu_to_node(nvchan->channel->target_cpu);
 331	size_t size;
 332
 333	size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
 334	nvchan->mrc.slots = vzalloc_node(size, node);
 335	if (!nvchan->mrc.slots)
 336		nvchan->mrc.slots = vzalloc(size);
 337
 338	return nvchan->mrc.slots ? 0 : -ENOMEM;
 339}
 340
 341static int netvsc_init_buf(struct hv_device *device,
 342			   struct netvsc_device *net_device,
 343			   const struct netvsc_device_info *device_info)
 344{
 345	struct nvsp_1_message_send_receive_buffer_complete *resp;
 346	struct net_device *ndev = hv_get_drvdata(device);
 347	struct nvsp_message *init_packet;
 348	unsigned int buf_size;
 349	int i, ret = 0;
 350	void *vaddr;
 351
 352	/* Get receive buffer area. */
 353	buf_size = device_info->recv_sections * device_info->recv_section_size;
 354	buf_size = roundup(buf_size, PAGE_SIZE);
 355
 356	/* Legacy hosts only allow smaller receive buffer */
 357	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
 358		buf_size = min_t(unsigned int, buf_size,
 359				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
 360
 361	net_device->recv_buf = vzalloc(buf_size);
 362	if (!net_device->recv_buf) {
 363		netdev_err(ndev,
 364			   "unable to allocate receive buffer of size %u\n",
 365			   buf_size);
 366		ret = -ENOMEM;
 367		goto cleanup;
 368	}
 369
 370	net_device->recv_buf_size = buf_size;
 371
 372	/*
 373	 * Establish the gpadl handle for this buffer on this
 374	 * channel.  Note: This call uses the vmbus connection rather
 375	 * than the channel to establish the gpadl handle.
 376	 */
 377	ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
 378				    buf_size,
 379				    &net_device->recv_buf_gpadl_handle);
 380	if (ret != 0) {
 381		netdev_err(ndev,
 382			"unable to establish receive buffer's gpadl\n");
 383		goto cleanup;
 384	}
 385
 386	if (hv_isolation_type_snp()) {
 387		vaddr = hv_map_memory(net_device->recv_buf, buf_size);
 388		if (!vaddr) {
 389			ret = -ENOMEM;
 390			goto cleanup;
 391		}
 392
 393		net_device->recv_original_buf = net_device->recv_buf;
 394		net_device->recv_buf = vaddr;
 395	}
 396
 397	/* Notify the NetVsp of the gpadl handle */
 398	init_packet = &net_device->channel_init_pkt;
 399	memset(init_packet, 0, sizeof(struct nvsp_message));
 400	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
 401	init_packet->msg.v1_msg.send_recv_buf.
 402		gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
 403	init_packet->msg.v1_msg.
 404		send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
 405
 406	trace_nvsp_send(ndev, init_packet);
 407
 408	/* Send the gpadl notification request */
 409	ret = vmbus_sendpacket(device->channel, init_packet,
 410			       sizeof(struct nvsp_message),
 411			       (unsigned long)init_packet,
 412			       VM_PKT_DATA_INBAND,
 413			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 414	if (ret != 0) {
 415		netdev_err(ndev,
 416			"unable to send receive buffer's gpadl to netvsp\n");
 417		goto cleanup;
 418	}
 419
 420	wait_for_completion(&net_device->channel_init_wait);
 421
 422	/* Check the response */
 423	resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
 424	if (resp->status != NVSP_STAT_SUCCESS) {
 425		netdev_err(ndev,
 426			   "Unable to complete receive buffer initialization with NetVsp - status %d\n",
 427			   resp->status);
 428		ret = -EINVAL;
 429		goto cleanup;
 430	}
 431
 432	/* Parse the response */
 433	netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
 434		   resp->num_sections, resp->sections[0].sub_alloc_size,
 435		   resp->sections[0].num_sub_allocs);
 436
 437	/* There should only be one section for the entire receive buffer */
 438	if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
 439		ret = -EINVAL;
 440		goto cleanup;
 441	}
 442
 443	net_device->recv_section_size = resp->sections[0].sub_alloc_size;
 444	net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
 445
 446	/* Ensure buffer will not overflow */
 447	if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
 448	    (u64)net_device->recv_section_cnt > (u64)buf_size) {
 449		netdev_err(ndev, "invalid recv_section_size %u\n",
 450			   net_device->recv_section_size);
 451		ret = -EINVAL;
 452		goto cleanup;
 453	}
 454
 455	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
 456		struct netvsc_channel *nvchan = &net_device->chan_table[i];
 457
 458		nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL);
 459		if (nvchan->recv_buf == NULL) {
 460			ret = -ENOMEM;
 461			goto cleanup;
 462		}
 463	}
 464
 465	/* Setup receive completion ring.
 466	 * Add 1 to the recv_section_cnt because at least one entry in a
 467	 * ring buffer has to be empty.
 468	 */
 469	net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
 470	ret = netvsc_alloc_recv_comp_ring(net_device, 0);
 471	if (ret)
 472		goto cleanup;
 473
 474	/* Now setup the send buffer. */
 475	buf_size = device_info->send_sections * device_info->send_section_size;
 476	buf_size = round_up(buf_size, PAGE_SIZE);
 477
 478	net_device->send_buf = vzalloc(buf_size);
 479	if (!net_device->send_buf) {
 480		netdev_err(ndev, "unable to allocate send buffer of size %u\n",
 481			   buf_size);
 482		ret = -ENOMEM;
 483		goto cleanup;
 484	}
 485	net_device->send_buf_size = buf_size;
 486
 487	/* Establish the gpadl handle for this buffer on this
 488	 * channel.  Note: This call uses the vmbus connection rather
 489	 * than the channel to establish the gpadl handle.
 490	 */
 491	ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
 492				    buf_size,
 493				    &net_device->send_buf_gpadl_handle);
 494	if (ret != 0) {
 495		netdev_err(ndev,
 496			   "unable to establish send buffer's gpadl\n");
 497		goto cleanup;
 498	}
 499
 500	if (hv_isolation_type_snp()) {
 501		vaddr = hv_map_memory(net_device->send_buf, buf_size);
 502		if (!vaddr) {
 503			ret = -ENOMEM;
 504			goto cleanup;
 505		}
 506
 507		net_device->send_original_buf = net_device->send_buf;
 508		net_device->send_buf = vaddr;
 509	}
 510
 511	/* Notify the NetVsp of the gpadl handle */
 512	init_packet = &net_device->channel_init_pkt;
 513	memset(init_packet, 0, sizeof(struct nvsp_message));
 514	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
 515	init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
 516		net_device->send_buf_gpadl_handle.gpadl_handle;
 517	init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
 518
 519	trace_nvsp_send(ndev, init_packet);
 520
 521	/* Send the gpadl notification request */
 522	ret = vmbus_sendpacket(device->channel, init_packet,
 523			       sizeof(struct nvsp_message),
 524			       (unsigned long)init_packet,
 525			       VM_PKT_DATA_INBAND,
 526			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 527	if (ret != 0) {
 528		netdev_err(ndev,
 529			   "unable to send send buffer's gpadl to netvsp\n");
 530		goto cleanup;
 531	}
 532
 533	wait_for_completion(&net_device->channel_init_wait);
 534
 535	/* Check the response */
 536	if (init_packet->msg.v1_msg.
 537	    send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
 538		netdev_err(ndev, "Unable to complete send buffer "
 539			   "initialization with NetVsp - status %d\n",
 540			   init_packet->msg.v1_msg.
 541			   send_send_buf_complete.status);
 542		ret = -EINVAL;
 543		goto cleanup;
 544	}
 545
 546	/* Parse the response */
 547	net_device->send_section_size = init_packet->msg.
 548				v1_msg.send_send_buf_complete.section_size;
 549	if (net_device->send_section_size < NETVSC_MTU_MIN) {
 550		netdev_err(ndev, "invalid send_section_size %u\n",
 551			   net_device->send_section_size);
 552		ret = -EINVAL;
 553		goto cleanup;
 554	}
 555
 556	/* Section count is simply the size divided by the section size. */
 557	net_device->send_section_cnt = buf_size / net_device->send_section_size;
 558
 559	netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
 560		   net_device->send_section_size, net_device->send_section_cnt);
 561
 562	/* Setup state for managing the send buffer. */
 563	net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt,
 564						     GFP_KERNEL);
 565	if (!net_device->send_section_map) {
 566		ret = -ENOMEM;
 567		goto cleanup;
 568	}
 569
 570	goto exit;
 571
 572cleanup:
 573	netvsc_revoke_recv_buf(device, net_device, ndev);
 574	netvsc_revoke_send_buf(device, net_device, ndev);
 575	netvsc_teardown_recv_gpadl(device, net_device, ndev);
 576	netvsc_teardown_send_gpadl(device, net_device, ndev);
 577
 578exit:
 579	return ret;
 580}
 581
 582/* Negotiate NVSP protocol version */
 583static int negotiate_nvsp_ver(struct hv_device *device,
 584			      struct netvsc_device *net_device,
 585			      struct nvsp_message *init_packet,
 586			      u32 nvsp_ver)
 587{
 588	struct net_device *ndev = hv_get_drvdata(device);
 589	int ret;
 590
 591	memset(init_packet, 0, sizeof(struct nvsp_message));
 592	init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
 593	init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
 594	init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
 595	trace_nvsp_send(ndev, init_packet);
 596
 597	/* Send the init request */
 598	ret = vmbus_sendpacket(device->channel, init_packet,
 599			       sizeof(struct nvsp_message),
 600			       (unsigned long)init_packet,
 601			       VM_PKT_DATA_INBAND,
 602			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 603
 604	if (ret != 0)
 605		return ret;
 606
 607	wait_for_completion(&net_device->channel_init_wait);
 608
 609	if (init_packet->msg.init_msg.init_complete.status !=
 610	    NVSP_STAT_SUCCESS)
 611		return -EINVAL;
 612
 613	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
 614		return 0;
 615
 616	/* NVSPv2 or later: Send NDIS config */
 617	memset(init_packet, 0, sizeof(struct nvsp_message));
 618	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
 619	init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
 620	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 621
 622	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
 623		if (hv_is_isolation_supported())
 624			netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n");
 625		else
 626			init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
 627
 628		/* Teaming bit is needed to receive link speed updates */
 629		init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
 630	}
 631
 632	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
 633		init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
 634
 635	trace_nvsp_send(ndev, init_packet);
 636
 637	ret = vmbus_sendpacket(device->channel, init_packet,
 638				sizeof(struct nvsp_message),
 639				VMBUS_RQST_ID_NO_RESPONSE,
 640				VM_PKT_DATA_INBAND, 0);
 641
 642	return ret;
 643}
 644
 645static int netvsc_connect_vsp(struct hv_device *device,
 646			      struct netvsc_device *net_device,
 647			      const struct netvsc_device_info *device_info)
 648{
 649	struct net_device *ndev = hv_get_drvdata(device);
 650	static const u32 ver_list[] = {
 651		NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
 652		NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
 653		NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
 654	};
 655	struct nvsp_message *init_packet;
 656	int ndis_version, i, ret;
 657
 658	init_packet = &net_device->channel_init_pkt;
 659
 660	/* Negotiate the latest NVSP protocol supported */
 661	for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
 662		if (negotiate_nvsp_ver(device, net_device, init_packet,
 663				       ver_list[i])  == 0) {
 664			net_device->nvsp_version = ver_list[i];
 665			break;
 666		}
 667
 668	if (i < 0) {
 669		ret = -EPROTO;
 670		goto cleanup;
 671	}
 672
 673	if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) {
 674		netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n",
 675			   net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61);
 676		ret = -EPROTO;
 677		goto cleanup;
 678	}
 679
 680	pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
 681
 682	/* Send the ndis version */
 683	memset(init_packet, 0, sizeof(struct nvsp_message));
 684
 685	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
 686		ndis_version = 0x00060001;
 687	else
 688		ndis_version = 0x0006001e;
 689
 690	init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
 691	init_packet->msg.v1_msg.
 692		send_ndis_ver.ndis_major_ver =
 693				(ndis_version & 0xFFFF0000) >> 16;
 694	init_packet->msg.v1_msg.
 695		send_ndis_ver.ndis_minor_ver =
 696				ndis_version & 0xFFFF;
 697
 698	trace_nvsp_send(ndev, init_packet);
 699
 700	/* Send the init request */
 701	ret = vmbus_sendpacket(device->channel, init_packet,
 702				sizeof(struct nvsp_message),
 703				VMBUS_RQST_ID_NO_RESPONSE,
 704				VM_PKT_DATA_INBAND, 0);
 705	if (ret != 0)
 706		goto cleanup;
 707
 708
 709	ret = netvsc_init_buf(device, net_device, device_info);
 710
 711cleanup:
 712	return ret;
 713}
 714
 715/*
 716 * netvsc_device_remove - Callback when the root bus device is removed
 717 */
 718void netvsc_device_remove(struct hv_device *device)
 719{
 720	struct net_device *ndev = hv_get_drvdata(device);
 721	struct net_device_context *net_device_ctx = netdev_priv(ndev);
 722	struct netvsc_device *net_device
 723		= rtnl_dereference(net_device_ctx->nvdev);
 724	int i;
 725
 726	/*
 727	 * Revoke receive buffer. If host is pre-Win2016 then tear down
 728	 * receive buffer GPADL. Do the same for send buffer.
 729	 */
 730	netvsc_revoke_recv_buf(device, net_device, ndev);
 731	if (vmbus_proto_version < VERSION_WIN10)
 732		netvsc_teardown_recv_gpadl(device, net_device, ndev);
 733
 734	netvsc_revoke_send_buf(device, net_device, ndev);
 735	if (vmbus_proto_version < VERSION_WIN10)
 736		netvsc_teardown_send_gpadl(device, net_device, ndev);
 737
 738	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
 739
 740	/* Disable NAPI and disassociate its context from the device. */
 741	for (i = 0; i < net_device->num_chn; i++) {
 742		/* See also vmbus_reset_channel_cb(). */
 743		napi_disable(&net_device->chan_table[i].napi);
 
 
 
 744		netif_napi_del(&net_device->chan_table[i].napi);
 745	}
 746
 747	/*
 748	 * At this point, no one should be accessing net_device
 749	 * except in here
 750	 */
 751	netdev_dbg(ndev, "net device safe to remove\n");
 752
 753	/* Now, we can close the channel safely */
 754	vmbus_close(device->channel);
 755
 756	/*
 757	 * If host is Win2016 or higher then we do the GPADL tear down
 758	 * here after VMBus is closed.
 759	*/
 760	if (vmbus_proto_version >= VERSION_WIN10) {
 761		netvsc_teardown_recv_gpadl(device, net_device, ndev);
 762		netvsc_teardown_send_gpadl(device, net_device, ndev);
 763	}
 764
 765	if (net_device->recv_original_buf)
 766		hv_unmap_memory(net_device->recv_buf);
 767
 768	if (net_device->send_original_buf)
 769		hv_unmap_memory(net_device->send_buf);
 770
 771	/* Release all resources */
 772	free_netvsc_device_rcu(net_device);
 773}
 774
 775#define RING_AVAIL_PERCENT_HIWATER 20
 776#define RING_AVAIL_PERCENT_LOWATER 10
 777
 778static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
 779					 u32 index)
 780{
 781	sync_change_bit(index, net_device->send_section_map);
 782}
 783
 784static void netvsc_send_tx_complete(struct net_device *ndev,
 785				    struct netvsc_device *net_device,
 786				    struct vmbus_channel *channel,
 787				    const struct vmpacket_descriptor *desc,
 788				    int budget)
 789{
 790	struct net_device_context *ndev_ctx = netdev_priv(ndev);
 791	struct sk_buff *skb;
 792	u16 q_idx = 0;
 793	int queue_sends;
 794	u64 cmd_rqst;
 795
 796	cmd_rqst = channel->request_addr_callback(channel, desc->trans_id);
 797	if (cmd_rqst == VMBUS_RQST_ERROR) {
 798		netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 799		return;
 800	}
 801
 802	skb = (struct sk_buff *)(unsigned long)cmd_rqst;
 803
 804	/* Notify the layer above us */
 805	if (likely(skb)) {
 806		struct hv_netvsc_packet *packet
 807			= (struct hv_netvsc_packet *)skb->cb;
 808		u32 send_index = packet->send_buf_index;
 809		struct netvsc_stats_tx *tx_stats;
 810
 811		if (send_index != NETVSC_INVALID_INDEX)
 812			netvsc_free_send_slot(net_device, send_index);
 813		q_idx = packet->q_idx;
 814
 815		tx_stats = &net_device->chan_table[q_idx].tx_stats;
 816
 817		u64_stats_update_begin(&tx_stats->syncp);
 818		tx_stats->packets += packet->total_packets;
 819		tx_stats->bytes += packet->total_bytes;
 820		u64_stats_update_end(&tx_stats->syncp);
 821
 822		netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
 823		napi_consume_skb(skb, budget);
 824	}
 825
 826	queue_sends =
 827		atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
 828
 829	if (unlikely(net_device->destroy)) {
 830		if (queue_sends == 0)
 831			wake_up(&net_device->wait_drain);
 832	} else {
 833		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
 834
 835		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
 836		    (hv_get_avail_to_write_percent(&channel->outbound) >
 837		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
 838			netif_tx_wake_queue(txq);
 839			ndev_ctx->eth_stats.wake_queue++;
 840		}
 841	}
 842}
 843
 844static void netvsc_send_completion(struct net_device *ndev,
 845				   struct netvsc_device *net_device,
 846				   struct vmbus_channel *incoming_channel,
 847				   const struct vmpacket_descriptor *desc,
 848				   int budget)
 849{
 850	const struct nvsp_message *nvsp_packet;
 851	u32 msglen = hv_pkt_datalen(desc);
 852	struct nvsp_message *pkt_rqst;
 853	u64 cmd_rqst;
 
 854
 855	/* First check if this is a VMBUS completion without data payload */
 856	if (!msglen) {
 857		cmd_rqst = incoming_channel->request_addr_callback(incoming_channel,
 858								   desc->trans_id);
 859		if (cmd_rqst == VMBUS_RQST_ERROR) {
 860			netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id);
 861			return;
 862		}
 863
 864		pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst;
 865		switch (pkt_rqst->hdr.msg_type) {
 866		case NVSP_MSG4_TYPE_SWITCH_DATA_PATH:
 867			complete(&net_device->channel_init_wait);
 868			break;
 869
 870		default:
 871			netdev_err(ndev, "Unexpected VMBUS completion!!\n");
 872		}
 873		return;
 874	}
 875
 876	/* Ensure packet is big enough to read header fields */
 877	if (msglen < sizeof(struct nvsp_message_header)) {
 878		netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
 879		return;
 880	}
 881
 882	nvsp_packet = hv_pkt_data(desc);
 883	switch (nvsp_packet->hdr.msg_type) {
 884	case NVSP_MSG_TYPE_INIT_COMPLETE:
 885		if (msglen < sizeof(struct nvsp_message_header) +
 886				sizeof(struct nvsp_message_init_complete)) {
 887			netdev_err(ndev, "nvsp_msg length too small: %u\n",
 888				   msglen);
 889			return;
 890		}
 891		fallthrough;
 892
 893	case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
 894		if (msglen < sizeof(struct nvsp_message_header) +
 895				sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
 896			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 897				   msglen);
 898			return;
 899		}
 900		fallthrough;
 901
 902	case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
 903		if (msglen < sizeof(struct nvsp_message_header) +
 904				sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
 905			netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
 906				   msglen);
 907			return;
 908		}
 909		fallthrough;
 910
 911	case NVSP_MSG5_TYPE_SUBCHANNEL:
 912		if (msglen < sizeof(struct nvsp_message_header) +
 913				sizeof(struct nvsp_5_subchannel_complete)) {
 914			netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
 915				   msglen);
 916			return;
 917		}
 918		/* Copy the response back */
 919		memcpy(&net_device->channel_init_pkt, nvsp_packet,
 920		       sizeof(struct nvsp_message));
 921		complete(&net_device->channel_init_wait);
 922		break;
 923
 924	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
 926					desc, budget);
 927		break;
 928
 929	default:
 930		netdev_err(ndev,
 931			   "Unknown send completion type %d received!!\n",
 932			   nvsp_packet->hdr.msg_type);
 
 933	}
 
 
 
 
 
 934}
 935
 936static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 937{
 938	unsigned long *map_addr = net_device->send_section_map;
 939	unsigned int i;
 940
 941	for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
 942		if (sync_test_and_set_bit(i, map_addr) == 0)
 943			return i;
 944	}
 945
 946	return NETVSC_INVALID_INDEX;
 947}
 948
 949static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 950				    unsigned int section_index,
 951				    u32 pend_size,
 952				    struct hv_netvsc_packet *packet,
 953				    struct rndis_message *rndis_msg,
 954				    struct hv_page_buffer *pb,
 955				    bool xmit_more)
 956{
 957	char *start = net_device->send_buf;
 958	char *dest = start + (section_index * net_device->send_section_size)
 959		     + pend_size;
 960	int i;
 961	u32 padding = 0;
 962	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
 963		packet->page_buf_cnt;
 964	u32 remain;
 965
 966	/* Add padding */
 967	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
 968	if (xmit_more && remain) {
 969		padding = net_device->pkt_align - remain;
 970		rndis_msg->msg_len += padding;
 971		packet->total_data_buflen += padding;
 972	}
 973
 974	for (i = 0; i < page_count; i++) {
 975		char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
 976		u32 offset = pb[i].offset;
 977		u32 len = pb[i].len;
 978
 979		memcpy(dest, (src + offset), len);
 980		dest += len;
 981	}
 982
 983	if (padding)
 984		memset(dest, 0, padding);
 985}
 986
 987void netvsc_dma_unmap(struct hv_device *hv_dev,
 988		      struct hv_netvsc_packet *packet)
 989{
 990	int i;
 991
 992	if (!hv_is_isolation_supported())
 993		return;
 994
 995	if (!packet->dma_range)
 996		return;
 997
 998	for (i = 0; i < packet->page_buf_cnt; i++)
 999		dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
1000				 packet->dma_range[i].mapping_size,
1001				 DMA_TO_DEVICE);
1002
1003	kfree(packet->dma_range);
1004}
1005
1006/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
1007 * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
1008 * VM.
1009 *
1010 * In isolation VM, netvsc send buffer has been marked visible to
1011 * host and so the data copied to send buffer doesn't need to use
1012 * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
1013 * may not be copied to send buffer and so these pages need to be
1014 * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
1015 * that. The pfns in the struct hv_page_buffer need to be converted
1016 * to bounce buffer's pfn. The loop here is necessary because the
1017 * entries in the page buffer array are not necessarily full
1018 * pages of data.  Each entry in the array has a separate offset and
1019 * len that may be non-zero, even for entries in the middle of the
1020 * array.  And the entries are not physically contiguous.  So each
1021 * entry must be individually mapped rather than as a contiguous unit.
1022 * So not use dma_map_sg() here.
1023 */
1024static int netvsc_dma_map(struct hv_device *hv_dev,
1025			  struct hv_netvsc_packet *packet,
1026			  struct hv_page_buffer *pb)
1027{
1028	u32 page_count = packet->page_buf_cnt;
1029	dma_addr_t dma;
1030	int i;
1031
1032	if (!hv_is_isolation_supported())
1033		return 0;
1034
1035	packet->dma_range = kcalloc(page_count,
1036				    sizeof(*packet->dma_range),
1037				    GFP_ATOMIC);
1038	if (!packet->dma_range)
1039		return -ENOMEM;
1040
1041	for (i = 0; i < page_count; i++) {
1042		char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
1043					 + pb[i].offset);
1044		u32 len = pb[i].len;
1045
1046		dma = dma_map_single(&hv_dev->device, src, len,
1047				     DMA_TO_DEVICE);
1048		if (dma_mapping_error(&hv_dev->device, dma)) {
1049			kfree(packet->dma_range);
1050			return -ENOMEM;
1051		}
1052
1053		/* pb[].offset and pb[].len are not changed during dma mapping
1054		 * and so not reassign.
1055		 */
1056		packet->dma_range[i].dma = dma;
1057		packet->dma_range[i].mapping_size = len;
1058		pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
1059	}
1060
1061	return 0;
1062}
1063
1064static inline int netvsc_send_pkt(
1065	struct hv_device *device,
1066	struct hv_netvsc_packet *packet,
1067	struct netvsc_device *net_device,
1068	struct hv_page_buffer *pb,
1069	struct sk_buff *skb)
1070{
1071	struct nvsp_message nvmsg;
1072	struct nvsp_1_message_send_rndis_packet *rpkt =
1073		&nvmsg.msg.v1_msg.send_rndis_pkt;
1074	struct netvsc_channel * const nvchan =
1075		&net_device->chan_table[packet->q_idx];
1076	struct vmbus_channel *out_channel = nvchan->channel;
1077	struct net_device *ndev = hv_get_drvdata(device);
1078	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1079	struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
1080	u64 req_id;
1081	int ret;
1082	u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
1083
1084	memset(&nvmsg, 0, sizeof(struct nvsp_message));
1085	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
1086	if (skb)
1087		rpkt->channel_type = 0;		/* 0 is RMC_DATA */
1088	else
1089		rpkt->channel_type = 1;		/* 1 is RMC_CONTROL */
1090
1091	rpkt->send_buf_section_index = packet->send_buf_index;
1092	if (packet->send_buf_index == NETVSC_INVALID_INDEX)
1093		rpkt->send_buf_section_size = 0;
1094	else
1095		rpkt->send_buf_section_size = packet->total_data_buflen;
1096
1097	req_id = (ulong)skb;
1098
1099	if (out_channel->rescind)
1100		return -ENODEV;
1101
1102	trace_nvsp_send_pkt(ndev, out_channel, rpkt);
1103
1104	packet->dma_range = NULL;
1105	if (packet->page_buf_cnt) {
1106		if (packet->cp_partial)
1107			pb += packet->rmsg_pgcnt;
1108
1109		ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
1110		if (ret) {
1111			ret = -EAGAIN;
1112			goto exit;
1113		}
1114
1115		ret = vmbus_sendpacket_pagebuffer(out_channel,
1116						  pb, packet->page_buf_cnt,
1117						  &nvmsg, sizeof(nvmsg),
1118						  req_id);
1119
1120		if (ret)
1121			netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
1122	} else {
1123		ret = vmbus_sendpacket(out_channel,
1124				       &nvmsg, sizeof(nvmsg),
1125				       req_id, VM_PKT_DATA_INBAND,
1126				       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1127	}
1128
1129exit:
1130	if (ret == 0) {
1131		atomic_inc_return(&nvchan->queue_sends);
1132
1133		if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
1134			netif_tx_stop_queue(txq);
1135			ndev_ctx->eth_stats.stop_queue++;
1136		}
1137	} else if (ret == -EAGAIN) {
1138		netif_tx_stop_queue(txq);
1139		ndev_ctx->eth_stats.stop_queue++;
1140	} else {
1141		netdev_err(ndev,
1142			   "Unable to send packet pages %u len %u, ret %d\n",
1143			   packet->page_buf_cnt, packet->total_data_buflen,
1144			   ret);
1145	}
1146
1147	if (netif_tx_queue_stopped(txq) &&
1148	    atomic_read(&nvchan->queue_sends) < 1 &&
1149	    !net_device->tx_disable) {
1150		netif_tx_wake_queue(txq);
1151		ndev_ctx->eth_stats.wake_queue++;
1152		if (ret == -EAGAIN)
1153			ret = -ENOSPC;
1154	}
1155
1156	return ret;
1157}
1158
1159/* Move packet out of multi send data (msd), and clear msd */
1160static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
1161				struct sk_buff **msd_skb,
1162				struct multi_send_data *msdp)
1163{
1164	*msd_skb = msdp->skb;
1165	*msd_send = msdp->pkt;
1166	msdp->skb = NULL;
1167	msdp->pkt = NULL;
1168	msdp->count = 0;
1169}
1170
1171/* RCU already held by caller */
1172/* Batching/bouncing logic is designed to attempt to optimize
1173 * performance.
1174 *
1175 * For small, non-LSO packets we copy the packet to a send buffer
1176 * which is pre-registered with the Hyper-V side. This enables the
1177 * hypervisor to avoid remapping the aperture to access the packet
1178 * descriptor and data.
1179 *
1180 * If we already started using a buffer and the netdev is transmitting
1181 * a burst of packets, keep on copying into the buffer until it is
1182 * full or we are done collecting a burst. If there is an existing
1183 * buffer with space for the RNDIS descriptor but not the packet, copy
1184 * the RNDIS descriptor to the buffer, keeping the packet in place.
1185 *
1186 * If we do batching and send more than one packet using a single
1187 * NetVSC message, free the SKBs of the packets copied, except for the
1188 * last packet. This is done to streamline the handling of the case
1189 * where the last packet only had the RNDIS descriptor copied to the
1190 * send buffer, with the data pointers included in the NetVSC message.
1191 */
1192int netvsc_send(struct net_device *ndev,
1193		struct hv_netvsc_packet *packet,
1194		struct rndis_message *rndis_msg,
1195		struct hv_page_buffer *pb,
1196		struct sk_buff *skb,
1197		bool xdp_tx)
1198{
1199	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1200	struct netvsc_device *net_device
1201		= rcu_dereference_bh(ndev_ctx->nvdev);
1202	struct hv_device *device = ndev_ctx->device_ctx;
1203	int ret = 0;
1204	struct netvsc_channel *nvchan;
1205	u32 pktlen = packet->total_data_buflen, msd_len = 0;
1206	unsigned int section_index = NETVSC_INVALID_INDEX;
1207	struct multi_send_data *msdp;
1208	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
1209	struct sk_buff *msd_skb = NULL;
1210	bool try_batch, xmit_more;
1211
1212	/* If device is rescinded, return error and packet will get dropped. */
1213	if (unlikely(!net_device || net_device->destroy))
1214		return -ENODEV;
1215
1216	nvchan = &net_device->chan_table[packet->q_idx];
1217	packet->send_buf_index = NETVSC_INVALID_INDEX;
1218	packet->cp_partial = false;
1219
1220	/* Send a control message or XDP packet directly without accessing
1221	 * msd (Multi-Send Data) field which may be changed during data packet
1222	 * processing.
1223	 */
1224	if (!skb || xdp_tx)
1225		return netvsc_send_pkt(device, packet, net_device, pb, skb);
1226
1227	/* batch packets in send buffer if possible */
1228	msdp = &nvchan->msd;
1229	if (msdp->pkt)
1230		msd_len = msdp->pkt->total_data_buflen;
1231
1232	try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
1233	if (try_batch && msd_len + pktlen + net_device->pkt_align <
1234	    net_device->send_section_size) {
1235		section_index = msdp->pkt->send_buf_index;
1236
1237	} else if (try_batch && msd_len + packet->rmsg_size <
1238		   net_device->send_section_size) {
1239		section_index = msdp->pkt->send_buf_index;
1240		packet->cp_partial = true;
1241
1242	} else if (pktlen + net_device->pkt_align <
1243		   net_device->send_section_size) {
1244		section_index = netvsc_get_next_send_section(net_device);
1245		if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1246			++ndev_ctx->eth_stats.tx_send_full;
1247		} else {
1248			move_pkt_msd(&msd_send, &msd_skb, msdp);
1249			msd_len = 0;
1250		}
1251	}
1252
1253	/* Keep aggregating only if stack says more data is coming
1254	 * and not doing mixed modes send and not flow blocked
1255	 */
1256	xmit_more = netdev_xmit_more() &&
1257		!packet->cp_partial &&
1258		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1259
1260	if (section_index != NETVSC_INVALID_INDEX) {
1261		netvsc_copy_to_send_buf(net_device,
1262					section_index, msd_len,
1263					packet, rndis_msg, pb, xmit_more);
1264
1265		packet->send_buf_index = section_index;
1266
1267		if (packet->cp_partial) {
1268			packet->page_buf_cnt -= packet->rmsg_pgcnt;
1269			packet->total_data_buflen = msd_len + packet->rmsg_size;
1270		} else {
1271			packet->page_buf_cnt = 0;
1272			packet->total_data_buflen += msd_len;
1273		}
1274
1275		if (msdp->pkt) {
1276			packet->total_packets += msdp->pkt->total_packets;
1277			packet->total_bytes += msdp->pkt->total_bytes;
1278		}
1279
1280		if (msdp->skb)
1281			dev_consume_skb_any(msdp->skb);
1282
1283		if (xmit_more) {
1284			msdp->skb = skb;
1285			msdp->pkt = packet;
1286			msdp->count++;
1287		} else {
1288			cur_send = packet;
1289			msdp->skb = NULL;
1290			msdp->pkt = NULL;
1291			msdp->count = 0;
1292		}
1293	} else {
1294		move_pkt_msd(&msd_send, &msd_skb, msdp);
1295		cur_send = packet;
1296	}
1297
1298	if (msd_send) {
1299		int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1300					    NULL, msd_skb);
1301
1302		if (m_ret != 0) {
1303			netvsc_free_send_slot(net_device,
1304					      msd_send->send_buf_index);
1305			dev_kfree_skb_any(msd_skb);
1306		}
1307	}
1308
1309	if (cur_send)
1310		ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1311
1312	if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1313		netvsc_free_send_slot(net_device, section_index);
1314
1315	return ret;
1316}
1317
1318/* Send pending recv completions */
1319static int send_recv_completions(struct net_device *ndev,
1320				 struct netvsc_device *nvdev,
1321				 struct netvsc_channel *nvchan)
1322{
1323	struct multi_recv_comp *mrc = &nvchan->mrc;
1324	struct recv_comp_msg {
1325		struct nvsp_message_header hdr;
1326		u32 status;
1327	}  __packed;
1328	struct recv_comp_msg msg = {
1329		.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1330	};
1331	int ret;
1332
1333	while (mrc->first != mrc->next) {
1334		const struct recv_comp_data *rcd
1335			= mrc->slots + mrc->first;
1336
1337		msg.status = rcd->status;
1338		ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1339				       rcd->tid, VM_PKT_COMP, 0);
1340		if (unlikely(ret)) {
1341			struct net_device_context *ndev_ctx = netdev_priv(ndev);
1342
1343			++ndev_ctx->eth_stats.rx_comp_busy;
1344			return ret;
1345		}
1346
1347		if (++mrc->first == nvdev->recv_completion_cnt)
1348			mrc->first = 0;
1349	}
1350
1351	/* receive completion ring has been emptied */
1352	if (unlikely(nvdev->destroy))
1353		wake_up(&nvdev->wait_drain);
1354
1355	return 0;
1356}
1357
1358/* Count how many receive completions are outstanding */
1359static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1360				 const struct multi_recv_comp *mrc,
1361				 u32 *filled, u32 *avail)
1362{
1363	u32 count = nvdev->recv_completion_cnt;
1364
1365	if (mrc->next >= mrc->first)
1366		*filled = mrc->next - mrc->first;
1367	else
1368		*filled = (count - mrc->first) + mrc->next;
1369
1370	*avail = count - *filled - 1;
1371}
1372
1373/* Add receive complete to ring to send to host. */
1374static void enq_receive_complete(struct net_device *ndev,
1375				 struct netvsc_device *nvdev, u16 q_idx,
1376				 u64 tid, u32 status)
1377{
1378	struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1379	struct multi_recv_comp *mrc = &nvchan->mrc;
1380	struct recv_comp_data *rcd;
1381	u32 filled, avail;
1382
1383	recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1384
1385	if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1386		send_recv_completions(ndev, nvdev, nvchan);
1387		recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1388	}
1389
1390	if (unlikely(!avail)) {
1391		netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1392			   q_idx, tid);
1393		return;
1394	}
1395
1396	rcd = mrc->slots + mrc->next;
1397	rcd->tid = tid;
1398	rcd->status = status;
1399
1400	if (++mrc->next == nvdev->recv_completion_cnt)
1401		mrc->next = 0;
1402}
1403
1404static int netvsc_receive(struct net_device *ndev,
1405			  struct netvsc_device *net_device,
1406			  struct netvsc_channel *nvchan,
1407			  const struct vmpacket_descriptor *desc)
1408{
1409	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1410	struct vmbus_channel *channel = nvchan->channel;
1411	const struct vmtransfer_page_packet_header *vmxferpage_packet
1412		= container_of(desc, const struct vmtransfer_page_packet_header, d);
1413	const struct nvsp_message *nvsp = hv_pkt_data(desc);
1414	u32 msglen = hv_pkt_datalen(desc);
1415	u16 q_idx = channel->offermsg.offer.sub_channel_index;
1416	char *recv_buf = net_device->recv_buf;
1417	u32 status = NVSP_STAT_SUCCESS;
1418	int i;
1419	int count = 0;
1420
1421	/* Ensure packet is big enough to read header fields */
1422	if (msglen < sizeof(struct nvsp_message_header)) {
1423		netif_err(net_device_ctx, rx_err, ndev,
1424			  "invalid nvsp header, length too small: %u\n",
1425			  msglen);
1426		return 0;
1427	}
1428
1429	/* Make sure this is a valid nvsp packet */
1430	if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1431		netif_err(net_device_ctx, rx_err, ndev,
1432			  "Unknown nvsp packet type received %u\n",
1433			  nvsp->hdr.msg_type);
1434		return 0;
1435	}
1436
1437	/* Validate xfer page pkt header */
1438	if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1439		netif_err(net_device_ctx, rx_err, ndev,
1440			  "Invalid xfer page pkt, offset too small: %u\n",
1441			  desc->offset8 << 3);
1442		return 0;
1443	}
1444
1445	if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1446		netif_err(net_device_ctx, rx_err, ndev,
1447			  "Invalid xfer page set id - expecting %x got %x\n",
1448			  NETVSC_RECEIVE_BUFFER_ID,
1449			  vmxferpage_packet->xfer_pageset_id);
1450		return 0;
1451	}
1452
1453	count = vmxferpage_packet->range_cnt;
1454
1455	/* Check count for a valid value */
1456	if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1457		netif_err(net_device_ctx, rx_err, ndev,
1458			  "Range count is not valid: %d\n",
1459			  count);
1460		return 0;
1461	}
1462
1463	/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1464	for (i = 0; i < count; i++) {
1465		u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1466		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1467		void *data;
1468		int ret;
1469
1470		if (unlikely(offset > net_device->recv_buf_size ||
1471			     buflen > net_device->recv_buf_size - offset)) {
1472			nvchan->rsc.cnt = 0;
1473			status = NVSP_STAT_FAIL;
1474			netif_err(net_device_ctx, rx_err, ndev,
1475				  "Packet offset:%u + len:%u too big\n",
1476				  offset, buflen);
1477
1478			continue;
1479		}
1480
1481		/* We're going to copy (sections of) the packet into nvchan->recv_buf;
1482		 * make sure that nvchan->recv_buf is large enough to hold the packet.
1483		 */
1484		if (unlikely(buflen > net_device->recv_section_size)) {
1485			nvchan->rsc.cnt = 0;
1486			status = NVSP_STAT_FAIL;
1487			netif_err(net_device_ctx, rx_err, ndev,
1488				  "Packet too big: buflen=%u recv_section_size=%u\n",
1489				  buflen, net_device->recv_section_size);
1490
1491			continue;
1492		}
1493
1494		data = recv_buf + offset;
1495
1496		nvchan->rsc.is_last = (i == count - 1);
1497
1498		trace_rndis_recv(ndev, q_idx, data);
1499
1500		/* Pass it to the upper layer */
1501		ret = rndis_filter_receive(ndev, net_device,
1502					   nvchan, data, buflen);
1503
1504		if (unlikely(ret != NVSP_STAT_SUCCESS)) {
1505			/* Drop incomplete packet */
1506			nvchan->rsc.cnt = 0;
1507			status = NVSP_STAT_FAIL;
1508		}
1509	}
1510
1511	enq_receive_complete(ndev, net_device, q_idx,
1512			     vmxferpage_packet->d.trans_id, status);
1513
1514	return count;
1515}
1516
1517static void netvsc_send_table(struct net_device *ndev,
1518			      struct netvsc_device *nvscdev,
1519			      const struct nvsp_message *nvmsg,
1520			      u32 msglen)
1521{
1522	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1523	u32 count, offset, *tab;
1524	int i;
1525
1526	/* Ensure packet is big enough to read send_table fields */
1527	if (msglen < sizeof(struct nvsp_message_header) +
1528		     sizeof(struct nvsp_5_send_indirect_table)) {
1529		netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1530		return;
1531	}
1532
1533	count = nvmsg->msg.v5_msg.send_table.count;
1534	offset = nvmsg->msg.v5_msg.send_table.offset;
1535
1536	if (count != VRSS_SEND_TAB_SIZE) {
1537		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1538		return;
1539	}
1540
1541	/* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1542	 * wrong due to a host bug. So fix the offset here.
1543	 */
1544	if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1545	    msglen >= sizeof(struct nvsp_message_header) +
1546	    sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1547		offset = sizeof(struct nvsp_message_header) +
1548			 sizeof(union nvsp_6_message_uber);
1549
1550	/* Boundary check for all versions */
1551	if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) {
1552		netdev_err(ndev, "Received send-table offset too big:%u\n",
1553			   offset);
1554		return;
1555	}
1556
1557	tab = (void *)nvmsg + offset;
1558
1559	for (i = 0; i < count; i++)
1560		net_device_ctx->tx_table[i] = tab[i];
1561}
1562
1563static void netvsc_send_vf(struct net_device *ndev,
1564			   const struct nvsp_message *nvmsg,
1565			   u32 msglen)
1566{
1567	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1568
1569	/* Ensure packet is big enough to read its fields */
1570	if (msglen < sizeof(struct nvsp_message_header) +
1571		     sizeof(struct nvsp_4_send_vf_association)) {
1572		netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1573		return;
1574	}
1575
1576	net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1577	net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1578
1579	if (net_device_ctx->vf_alloc)
1580		complete(&net_device_ctx->vf_add);
1581
1582	netdev_info(ndev, "VF slot %u %s\n",
1583		    net_device_ctx->vf_serial,
1584		    net_device_ctx->vf_alloc ? "added" : "removed");
1585}
1586
1587static void netvsc_receive_inband(struct net_device *ndev,
1588				  struct netvsc_device *nvscdev,
1589				  const struct vmpacket_descriptor *desc)
1590{
1591	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1592	u32 msglen = hv_pkt_datalen(desc);
1593
1594	/* Ensure packet is big enough to read header fields */
1595	if (msglen < sizeof(struct nvsp_message_header)) {
1596		netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1597		return;
1598	}
1599
1600	switch (nvmsg->hdr.msg_type) {
1601	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1602		netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1603		break;
1604
1605	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1606		if (hv_is_isolation_supported())
1607			netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n");
1608		else
1609			netvsc_send_vf(ndev, nvmsg, msglen);
1610		break;
1611	}
1612}
1613
1614static int netvsc_process_raw_pkt(struct hv_device *device,
1615				  struct netvsc_channel *nvchan,
1616				  struct netvsc_device *net_device,
1617				  struct net_device *ndev,
1618				  const struct vmpacket_descriptor *desc,
1619				  int budget)
1620{
1621	struct vmbus_channel *channel = nvchan->channel;
1622	const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1623
1624	trace_nvsp_recv(ndev, channel, nvmsg);
1625
1626	switch (desc->type) {
1627	case VM_PKT_COMP:
1628		netvsc_send_completion(ndev, net_device, channel, desc, budget);
1629		break;
1630
1631	case VM_PKT_DATA_USING_XFER_PAGES:
1632		return netvsc_receive(ndev, net_device, nvchan, desc);
1633
1634	case VM_PKT_DATA_INBAND:
1635		netvsc_receive_inband(ndev, net_device, desc);
1636		break;
1637
1638	default:
1639		netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1640			   desc->type, desc->trans_id);
1641		break;
1642	}
1643
1644	return 0;
1645}
1646
1647static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1648{
1649	struct vmbus_channel *primary = channel->primary_channel;
1650
1651	return primary ? primary->device_obj : channel->device_obj;
1652}
1653
1654/* Network processing softirq
1655 * Process data in incoming ring buffer from host
1656 * Stops when ring is empty or budget is met or exceeded.
1657 */
1658int netvsc_poll(struct napi_struct *napi, int budget)
1659{
1660	struct netvsc_channel *nvchan
1661		= container_of(napi, struct netvsc_channel, napi);
1662	struct netvsc_device *net_device = nvchan->net_device;
1663	struct vmbus_channel *channel = nvchan->channel;
1664	struct hv_device *device = netvsc_channel_to_device(channel);
1665	struct net_device *ndev = hv_get_drvdata(device);
1666	int work_done = 0;
1667	int ret;
1668
1669	/* If starting a new interval */
1670	if (!nvchan->desc)
1671		nvchan->desc = hv_pkt_iter_first(channel);
1672
1673	nvchan->xdp_flush = false;
1674
1675	while (nvchan->desc && work_done < budget) {
1676		work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1677						    ndev, nvchan->desc, budget);
1678		nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1679	}
1680
1681	if (nvchan->xdp_flush)
1682		xdp_do_flush();
1683
1684	/* Send any pending receive completions */
1685	ret = send_recv_completions(ndev, net_device, nvchan);
1686
1687	/* If it did not exhaust NAPI budget this time
1688	 *  and not doing busy poll
1689	 * then re-enable host interrupts
1690	 *  and reschedule if ring is not empty
1691	 *   or sending receive completion failed.
1692	 */
1693	if (work_done < budget &&
1694	    napi_complete_done(napi, work_done) &&
1695	    (ret || hv_end_read(&channel->inbound)) &&
1696	    napi_schedule_prep(napi)) {
1697		hv_begin_read(&channel->inbound);
1698		__napi_schedule(napi);
1699	}
1700
1701	/* Driver may overshoot since multiple packets per descriptor */
1702	return min(work_done, budget);
1703}
1704
1705/* Call back when data is available in host ring buffer.
1706 * Processing is deferred until network softirq (NAPI)
1707 */
1708void netvsc_channel_cb(void *context)
1709{
1710	struct netvsc_channel *nvchan = context;
1711	struct vmbus_channel *channel = nvchan->channel;
1712	struct hv_ring_buffer_info *rbi = &channel->inbound;
1713
1714	/* preload first vmpacket descriptor */
1715	prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1716
1717	if (napi_schedule_prep(&nvchan->napi)) {
1718		/* disable interrupts from host */
1719		hv_begin_read(rbi);
1720
1721		__napi_schedule_irqoff(&nvchan->napi);
1722	}
1723}
1724
1725/*
1726 * netvsc_device_add - Callback when the device belonging to this
1727 * driver is added
1728 */
1729struct netvsc_device *netvsc_device_add(struct hv_device *device,
1730				const struct netvsc_device_info *device_info)
1731{
1732	int i, ret = 0;
1733	struct netvsc_device *net_device;
1734	struct net_device *ndev = hv_get_drvdata(device);
1735	struct net_device_context *net_device_ctx = netdev_priv(ndev);
1736
1737	net_device = alloc_net_device();
1738	if (!net_device)
1739		return ERR_PTR(-ENOMEM);
1740
1741	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1742		net_device_ctx->tx_table[i] = 0;
1743
1744	/* Because the device uses NAPI, all the interrupt batching and
1745	 * control is done via Net softirq, not the channel handling
1746	 */
1747	set_channel_read_mode(device->channel, HV_CALL_ISR);
1748
1749	/* If we're reopening the device we may have multiple queues, fill the
1750	 * chn_table with the default channel to use it before subchannels are
1751	 * opened.
1752	 * Initialize the channel state before we open;
1753	 * we can be interrupted as soon as we open the channel.
1754	 */
1755
1756	for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1757		struct netvsc_channel *nvchan = &net_device->chan_table[i];
1758
1759		nvchan->channel = device->channel;
1760		nvchan->net_device = net_device;
1761		u64_stats_init(&nvchan->tx_stats.syncp);
1762		u64_stats_init(&nvchan->rx_stats.syncp);
1763
1764		ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0);
1765
1766		if (ret) {
1767			netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1768			goto cleanup2;
1769		}
1770
1771		ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1772						 MEM_TYPE_PAGE_SHARED, NULL);
1773
1774		if (ret) {
1775			netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1776			goto cleanup2;
1777		}
1778	}
1779
1780	/* Enable NAPI handler before init callbacks */
1781	netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
1782
1783	/* Open the channel */
1784	device->channel->next_request_id_callback = vmbus_next_request_id;
1785	device->channel->request_addr_callback = vmbus_request_addr;
1786	device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1787	device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1788
1789	ret = vmbus_open(device->channel, netvsc_ring_bytes,
1790			 netvsc_ring_bytes,  NULL, 0,
1791			 netvsc_channel_cb, net_device->chan_table);
1792
1793	if (ret != 0) {
1794		netdev_err(ndev, "unable to open channel: %d\n", ret);
1795		goto cleanup;
1796	}
1797
1798	/* Channel is opened */
1799	netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1800
1801	napi_enable(&net_device->chan_table[0].napi);
1802
1803	/* Connect with the NetVsp */
1804	ret = netvsc_connect_vsp(device, net_device, device_info);
1805	if (ret != 0) {
1806		netdev_err(ndev,
1807			"unable to connect to NetVSP - %d\n", ret);
1808		goto close;
1809	}
1810
1811	/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1812	 * populated.
1813	 */
1814	rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1815
1816	return net_device;
1817
1818close:
1819	RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1820	napi_disable(&net_device->chan_table[0].napi);
1821
1822	/* Now, we can close the channel safely */
1823	vmbus_close(device->channel);
1824
1825cleanup:
1826	netif_napi_del(&net_device->chan_table[0].napi);
1827
1828cleanup2:
1829	if (net_device->recv_original_buf)
1830		hv_unmap_memory(net_device->recv_buf);
1831
1832	if (net_device->send_original_buf)
1833		hv_unmap_memory(net_device->send_buf);
1834
1835	free_netvsc_device(&net_device->rcu);
1836
1837	return ERR_PTR(ret);
1838}