Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/init.h>
  12#include <linux/atomic.h>
  13#include <linux/ethtool.h>
  14#include <linux/module.h>
  15#include <linux/highmem.h>
  16#include <linux/device.h>
  17#include <linux/io.h>
  18#include <linux/delay.h>
  19#include <linux/netdevice.h>
  20#include <linux/inetdevice.h>
  21#include <linux/etherdevice.h>
  22#include <linux/pci.h>
  23#include <linux/skbuff.h>
  24#include <linux/if_vlan.h>
  25#include <linux/in.h>
  26#include <linux/slab.h>
  27#include <linux/rtnetlink.h>
  28#include <linux/netpoll.h>
  29#include <linux/bpf.h>
  30
  31#include <net/arp.h>
  32#include <net/route.h>
  33#include <net/sock.h>
  34#include <net/pkt_sched.h>
  35#include <net/checksum.h>
  36#include <net/ip6_checksum.h>
  37
  38#include "hyperv_net.h"
  39
  40#define RING_SIZE_MIN	64
 
 
 
  41
  42#define LINKCHANGE_INT (2 * HZ)
  43#define VF_TAKEOVER_INT (HZ / 10)
  44
  45/* Macros to define the context of vf registration */
  46#define VF_REG_IN_PROBE		1
  47#define VF_REG_IN_NOTIFIER	2
  48
  49static unsigned int ring_size __ro_after_init = 128;
  50module_param(ring_size, uint, 0444);
  51MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
  52unsigned int netvsc_ring_bytes __ro_after_init;
  53
  54static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  55				NETIF_MSG_LINK | NETIF_MSG_IFUP |
  56				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
  57				NETIF_MSG_TX_ERR;
  58
  59static int debug = -1;
  60module_param(debug, int, 0444);
  61MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  62
  63static LIST_HEAD(netvsc_dev_list);
  64
  65static void netvsc_change_rx_flags(struct net_device *net, int change)
  66{
  67	struct net_device_context *ndev_ctx = netdev_priv(net);
  68	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  69	int inc;
  70
  71	if (!vf_netdev)
  72		return;
  73
  74	if (change & IFF_PROMISC) {
  75		inc = (net->flags & IFF_PROMISC) ? 1 : -1;
  76		dev_set_promiscuity(vf_netdev, inc);
  77	}
  78
  79	if (change & IFF_ALLMULTI) {
  80		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
  81		dev_set_allmulti(vf_netdev, inc);
  82	}
  83}
  84
  85static void netvsc_set_rx_mode(struct net_device *net)
  86{
  87	struct net_device_context *ndev_ctx = netdev_priv(net);
  88	struct net_device *vf_netdev;
  89	struct netvsc_device *nvdev;
  90
  91	rcu_read_lock();
  92	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
  93	if (vf_netdev) {
  94		dev_uc_sync(vf_netdev, net);
  95		dev_mc_sync(vf_netdev, net);
  96	}
  97
  98	nvdev = rcu_dereference(ndev_ctx->nvdev);
  99	if (nvdev)
 100		rndis_filter_update(nvdev);
 101	rcu_read_unlock();
 102}
 103
 104static void netvsc_tx_enable(struct netvsc_device *nvscdev,
 105			     struct net_device *ndev)
 106{
 107	nvscdev->tx_disable = false;
 108	virt_wmb(); /* ensure queue wake up mechanism is on */
 109
 110	netif_tx_wake_all_queues(ndev);
 111}
 112
 113static int netvsc_open(struct net_device *net)
 114{
 115	struct net_device_context *ndev_ctx = netdev_priv(net);
 116	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
 117	struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
 118	struct rndis_device *rdev;
 119	int ret = 0;
 120
 121	netif_carrier_off(net);
 122
 123	/* Open up the device */
 124	ret = rndis_filter_open(nvdev);
 125	if (ret != 0) {
 126		netdev_err(net, "unable to open device (ret %d).\n", ret);
 127		return ret;
 128	}
 129
 130	rdev = nvdev->extension;
 131	if (!rdev->link_state) {
 132		netif_carrier_on(net);
 133		netvsc_tx_enable(nvdev, net);
 134	}
 135
 136	if (vf_netdev) {
 137		/* Setting synthetic device up transparently sets
 138		 * slave as up. If open fails, then slave will be
 139		 * still be offline (and not used).
 140		 */
 141		ret = dev_open(vf_netdev, NULL);
 142		if (ret)
 143			netdev_warn(net,
 144				    "unable to open slave: %s: %d\n",
 145				    vf_netdev->name, ret);
 146	}
 147	return 0;
 148}
 149
 150static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
 151{
 152	unsigned int retry = 0;
 153	int i;
 154
 155	/* Ensure pending bytes in ring are read */
 156	for (;;) {
 157		u32 aread = 0;
 158
 159		for (i = 0; i < nvdev->num_chn; i++) {
 160			struct vmbus_channel *chn
 161				= nvdev->chan_table[i].channel;
 162
 163			if (!chn)
 164				continue;
 165
 166			/* make sure receive not running now */
 167			napi_synchronize(&nvdev->chan_table[i].napi);
 168
 169			aread = hv_get_bytes_to_read(&chn->inbound);
 170			if (aread)
 171				break;
 172
 173			aread = hv_get_bytes_to_read(&chn->outbound);
 174			if (aread)
 175				break;
 176		}
 177
 178		if (aread == 0)
 179			return 0;
 180
 181		if (++retry > RETRY_MAX)
 182			return -ETIMEDOUT;
 183
 184		usleep_range(RETRY_US_LO, RETRY_US_HI);
 185	}
 186}
 187
 188static void netvsc_tx_disable(struct netvsc_device *nvscdev,
 189			      struct net_device *ndev)
 190{
 191	if (nvscdev) {
 192		nvscdev->tx_disable = true;
 193		virt_wmb(); /* ensure txq will not wake up after stop */
 194	}
 195
 196	netif_tx_disable(ndev);
 197}
 198
 199static int netvsc_close(struct net_device *net)
 200{
 201	struct net_device_context *net_device_ctx = netdev_priv(net);
 202	struct net_device *vf_netdev
 203		= rtnl_dereference(net_device_ctx->vf_netdev);
 204	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 205	int ret;
 206
 207	netvsc_tx_disable(nvdev, net);
 208
 209	/* No need to close rndis filter if it is removed already */
 210	if (!nvdev)
 211		return 0;
 212
 213	ret = rndis_filter_close(nvdev);
 214	if (ret != 0) {
 215		netdev_err(net, "unable to close device (ret %d).\n", ret);
 216		return ret;
 217	}
 218
 219	ret = netvsc_wait_until_empty(nvdev);
 220	if (ret)
 221		netdev_err(net, "Ring buffer not empty after closing rndis\n");
 222
 223	if (vf_netdev)
 224		dev_close(vf_netdev);
 225
 226	return ret;
 227}
 228
 229static inline void *init_ppi_data(struct rndis_message *msg,
 230				  u32 ppi_size, u32 pkt_type)
 231{
 232	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
 233	struct rndis_per_packet_info *ppi;
 234
 235	rndis_pkt->data_offset += ppi_size;
 236	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
 237		+ rndis_pkt->per_pkt_info_len;
 238
 239	ppi->size = ppi_size;
 240	ppi->type = pkt_type;
 241	ppi->internal = 0;
 242	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
 243
 244	rndis_pkt->per_pkt_info_len += ppi_size;
 245
 246	return ppi + 1;
 247}
 248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249static inline int netvsc_get_tx_queue(struct net_device *ndev,
 250				      struct sk_buff *skb, int old_idx)
 251{
 252	const struct net_device_context *ndc = netdev_priv(ndev);
 253	struct sock *sk = skb->sk;
 254	int q_idx;
 255
 256	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
 257			      (VRSS_SEND_TAB_SIZE - 1)];
 258
 259	/* If queue index changed record the new value */
 260	if (q_idx != old_idx &&
 261	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
 262		sk_tx_queue_set(sk, q_idx);
 263
 264	return q_idx;
 265}
 266
 267/*
 268 * Select queue for transmit.
 269 *
 270 * If a valid queue has already been assigned, then use that.
 271 * Otherwise compute tx queue based on hash and the send table.
 272 *
 273 * This is basically similar to default (netdev_pick_tx) with the added step
 274 * of using the host send_table when no other queue has been assigned.
 275 *
 276 * TODO support XPS - but get_xps_queue not exported
 277 */
 278static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
 279{
 280	int q_idx = sk_tx_queue_get(skb->sk);
 281
 282	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
 283		/* If forwarding a packet, we use the recorded queue when
 284		 * available for better cache locality.
 285		 */
 286		if (skb_rx_queue_recorded(skb))
 287			q_idx = skb_get_rx_queue(skb);
 288		else
 289			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
 290	}
 291
 292	return q_idx;
 293}
 294
 295static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 296			       struct net_device *sb_dev)
 297{
 298	struct net_device_context *ndc = netdev_priv(ndev);
 299	struct net_device *vf_netdev;
 300	u16 txq;
 301
 302	rcu_read_lock();
 303	vf_netdev = rcu_dereference(ndc->vf_netdev);
 304	if (vf_netdev) {
 305		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
 306
 307		if (vf_ops->ndo_select_queue)
 308			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
 309		else
 310			txq = netdev_pick_tx(vf_netdev, skb, NULL);
 311
 312		/* Record the queue selected by VF so that it can be
 313		 * used for common case where VF has more queues than
 314		 * the synthetic device.
 315		 */
 316		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
 317	} else {
 318		txq = netvsc_pick_tx(ndev, skb);
 319	}
 320	rcu_read_unlock();
 321
 322	while (txq >= ndev->real_num_tx_queues)
 323		txq -= ndev->real_num_tx_queues;
 324
 325	return txq;
 326}
 327
 328static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
 329		       struct hv_page_buffer *pb)
 330{
 331	int j = 0;
 332
 333	hvpfn += offset >> HV_HYP_PAGE_SHIFT;
 334	offset = offset & ~HV_HYP_PAGE_MASK;
 
 
 
 335
 336	while (len > 0) {
 337		unsigned long bytes;
 338
 339		bytes = HV_HYP_PAGE_SIZE - offset;
 340		if (bytes > len)
 341			bytes = len;
 342		pb[j].pfn = hvpfn;
 343		pb[j].offset = offset;
 344		pb[j].len = bytes;
 345
 346		offset += bytes;
 347		len -= bytes;
 348
 349		if (offset == HV_HYP_PAGE_SIZE && len) {
 350			hvpfn++;
 351			offset = 0;
 352			j++;
 353		}
 354	}
 355
 356	return j + 1;
 357}
 358
 359static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
 360			   struct hv_netvsc_packet *packet,
 361			   struct hv_page_buffer *pb)
 362{
 363	u32 slots_used = 0;
 364	char *data = skb->data;
 365	int frags = skb_shinfo(skb)->nr_frags;
 366	int i;
 367
 368	/* The packet is laid out thus:
 369	 * 1. hdr: RNDIS header and PPI
 370	 * 2. skb linear data
 371	 * 3. skb fragment data
 372	 */
 373	slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
 374				  offset_in_hvpage(hdr),
 375				  len,
 376				  &pb[slots_used]);
 377
 378	packet->rmsg_size = len;
 379	packet->rmsg_pgcnt = slots_used;
 380
 381	slots_used += fill_pg_buf(virt_to_hvpfn(data),
 382				  offset_in_hvpage(data),
 383				  skb_headlen(skb),
 384				  &pb[slots_used]);
 385
 386	for (i = 0; i < frags; i++) {
 387		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 388
 389		slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
 390					  skb_frag_off(frag),
 391					  skb_frag_size(frag),
 392					  &pb[slots_used]);
 393	}
 394	return slots_used;
 395}
 396
 397static int count_skb_frag_slots(struct sk_buff *skb)
 398{
 399	int i, frags = skb_shinfo(skb)->nr_frags;
 400	int pages = 0;
 401
 402	for (i = 0; i < frags; i++) {
 403		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 404		unsigned long size = skb_frag_size(frag);
 405		unsigned long offset = skb_frag_off(frag);
 406
 407		/* Skip unused frames from start of page */
 408		offset &= ~HV_HYP_PAGE_MASK;
 409		pages += HVPFN_UP(offset + size);
 410	}
 411	return pages;
 412}
 413
 414static int netvsc_get_slots(struct sk_buff *skb)
 415{
 416	char *data = skb->data;
 417	unsigned int offset = offset_in_hvpage(data);
 418	unsigned int len = skb_headlen(skb);
 419	int slots;
 420	int frag_slots;
 421
 422	slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
 423	frag_slots = count_skb_frag_slots(skb);
 424	return slots + frag_slots;
 425}
 426
 427static u32 net_checksum_info(struct sk_buff *skb)
 428{
 429	if (skb->protocol == htons(ETH_P_IP)) {
 430		struct iphdr *ip = ip_hdr(skb);
 431
 432		if (ip->protocol == IPPROTO_TCP)
 433			return TRANSPORT_INFO_IPV4_TCP;
 434		else if (ip->protocol == IPPROTO_UDP)
 435			return TRANSPORT_INFO_IPV4_UDP;
 436	} else {
 437		struct ipv6hdr *ip6 = ipv6_hdr(skb);
 438
 439		if (ip6->nexthdr == IPPROTO_TCP)
 440			return TRANSPORT_INFO_IPV6_TCP;
 441		else if (ip6->nexthdr == IPPROTO_UDP)
 442			return TRANSPORT_INFO_IPV6_UDP;
 443	}
 444
 445	return TRANSPORT_INFO_NOT_IP;
 446}
 447
 448/* Send skb on the slave VF device. */
 449static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
 450			  struct sk_buff *skb)
 451{
 452	struct net_device_context *ndev_ctx = netdev_priv(net);
 453	unsigned int len = skb->len;
 454	int rc;
 455
 456	skb->dev = vf_netdev;
 457	skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
 458
 459	rc = dev_queue_xmit(skb);
 460	if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
 461		struct netvsc_vf_pcpu_stats *pcpu_stats
 462			= this_cpu_ptr(ndev_ctx->vf_stats);
 463
 464		u64_stats_update_begin(&pcpu_stats->syncp);
 465		pcpu_stats->tx_packets++;
 466		pcpu_stats->tx_bytes += len;
 467		u64_stats_update_end(&pcpu_stats->syncp);
 468	} else {
 469		this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
 470	}
 471
 472	return rc;
 473}
 474
 475static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
 476{
 477	struct net_device_context *net_device_ctx = netdev_priv(net);
 478	struct hv_netvsc_packet *packet = NULL;
 479	int ret;
 480	unsigned int num_data_pgs;
 481	struct rndis_message *rndis_msg;
 482	struct net_device *vf_netdev;
 483	u32 rndis_msg_size;
 484	u32 hash;
 485	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
 486
 487	/* If VF is present and up then redirect packets to it.
 488	 * Skip the VF if it is marked down or has no carrier.
 489	 * If netpoll is in uses, then VF can not be used either.
 490	 */
 491	vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
 492	if (vf_netdev && netif_running(vf_netdev) &&
 493	    netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net) &&
 494	    net_device_ctx->data_path_is_vf)
 495		return netvsc_vf_xmit(net, vf_netdev, skb);
 496
 497	/* We will atmost need two pages to describe the rndis
 498	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
 499	 * of pages in a single packet. If skb is scattered around
 500	 * more pages we try linearizing it.
 501	 */
 502
 503	num_data_pgs = netvsc_get_slots(skb) + 2;
 504
 505	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
 506		++net_device_ctx->eth_stats.tx_scattered;
 507
 508		if (skb_linearize(skb))
 509			goto no_memory;
 510
 511		num_data_pgs = netvsc_get_slots(skb) + 2;
 512		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
 513			++net_device_ctx->eth_stats.tx_too_big;
 514			goto drop;
 515		}
 516	}
 517
 518	/*
 519	 * Place the rndis header in the skb head room and
 520	 * the skb->cb will be used for hv_netvsc_packet
 521	 * structure.
 522	 */
 523	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
 524	if (ret)
 525		goto no_memory;
 526
 527	/* Use the skb control buffer for building up the packet */
 528	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
 529			sizeof_field(struct sk_buff, cb));
 530	packet = (struct hv_netvsc_packet *)skb->cb;
 531
 532	packet->q_idx = skb_get_queue_mapping(skb);
 533
 534	packet->total_data_buflen = skb->len;
 535	packet->total_bytes = skb->len;
 536	packet->total_packets = 1;
 537
 538	rndis_msg = (struct rndis_message *)skb->head;
 539
 540	/* Add the rndis header */
 541	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 542	rndis_msg->msg_len = packet->total_data_buflen;
 543
 544	rndis_msg->msg.pkt = (struct rndis_packet) {
 545		.data_offset = sizeof(struct rndis_packet),
 546		.data_len = packet->total_data_buflen,
 547		.per_pkt_info_offset = sizeof(struct rndis_packet),
 548	};
 549
 550	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 551
 552	hash = skb_get_hash_raw(skb);
 553	if (hash != 0 && net->real_num_tx_queues > 1) {
 554		u32 *hash_info;
 555
 556		rndis_msg_size += NDIS_HASH_PPI_SIZE;
 557		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
 558					  NBL_HASH_VALUE);
 559		*hash_info = hash;
 560	}
 561
 562	/* When using AF_PACKET we need to drop VLAN header from
 563	 * the frame and update the SKB to allow the HOST OS
 564	 * to transmit the 802.1Q packet
 565	 */
 566	if (skb->protocol == htons(ETH_P_8021Q)) {
 567		u16 vlan_tci;
 568
 569		skb_reset_mac_header(skb);
 570		if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
 571			if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
 572				++net_device_ctx->eth_stats.vlan_error;
 573				goto drop;
 574			}
 575
 576			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
 577			/* Update the NDIS header pkt lengths */
 578			packet->total_data_buflen -= VLAN_HLEN;
 579			packet->total_bytes -= VLAN_HLEN;
 580			rndis_msg->msg_len = packet->total_data_buflen;
 581			rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
 582		}
 583	}
 584
 585	if (skb_vlan_tag_present(skb)) {
 586		struct ndis_pkt_8021q_info *vlan;
 587
 588		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
 589		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
 590				     IEEE_8021Q_INFO);
 591
 592		vlan->value = 0;
 593		vlan->vlanid = skb_vlan_tag_get_id(skb);
 594		vlan->cfi = skb_vlan_tag_get_cfi(skb);
 595		vlan->pri = skb_vlan_tag_get_prio(skb);
 596	}
 597
 598	if (skb_is_gso(skb)) {
 599		struct ndis_tcp_lso_info *lso_info;
 600
 601		rndis_msg_size += NDIS_LSO_PPI_SIZE;
 602		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
 603					 TCP_LARGESEND_PKTINFO);
 604
 605		lso_info->value = 0;
 606		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
 607		if (skb->protocol == htons(ETH_P_IP)) {
 608			lso_info->lso_v2_transmit.ip_version =
 609				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
 610			ip_hdr(skb)->tot_len = 0;
 611			ip_hdr(skb)->check = 0;
 612			tcp_hdr(skb)->check =
 613				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 614						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 615		} else {
 616			lso_info->lso_v2_transmit.ip_version =
 617				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
 618			tcp_v6_gso_csum_prep(skb);
 619		}
 620		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
 621		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
 622	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 623		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
 624			struct ndis_tcp_ip_checksum_info *csum_info;
 625
 626			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
 627			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
 628						  TCPIP_CHKSUM_PKTINFO);
 629
 630			csum_info->value = 0;
 631			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
 632
 633			if (skb->protocol == htons(ETH_P_IP)) {
 634				csum_info->transmit.is_ipv4 = 1;
 635
 636				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 637					csum_info->transmit.tcp_checksum = 1;
 638				else
 639					csum_info->transmit.udp_checksum = 1;
 640			} else {
 641				csum_info->transmit.is_ipv6 = 1;
 642
 643				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 644					csum_info->transmit.tcp_checksum = 1;
 645				else
 646					csum_info->transmit.udp_checksum = 1;
 647			}
 648		} else {
 649			/* Can't do offload of this type of checksum */
 650			if (skb_checksum_help(skb))
 651				goto drop;
 652		}
 653	}
 654
 655	/* Start filling in the page buffers with the rndis hdr */
 656	rndis_msg->msg_len += rndis_msg_size;
 657	packet->total_data_buflen = rndis_msg->msg_len;
 658	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
 659					       skb, packet, pb);
 660
 661	/* timestamp packet in software */
 662	skb_tx_timestamp(skb);
 663
 664	ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
 665	if (likely(ret == 0))
 666		return NETDEV_TX_OK;
 667
 668	if (ret == -EAGAIN) {
 669		++net_device_ctx->eth_stats.tx_busy;
 670		return NETDEV_TX_BUSY;
 671	}
 672
 673	if (ret == -ENOSPC)
 674		++net_device_ctx->eth_stats.tx_no_space;
 675
 676drop:
 677	dev_kfree_skb_any(skb);
 678	net->stats.tx_dropped++;
 679
 680	return NETDEV_TX_OK;
 681
 682no_memory:
 683	++net_device_ctx->eth_stats.tx_no_memory;
 684	goto drop;
 685}
 686
 687static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
 688				     struct net_device *ndev)
 689{
 690	return netvsc_xmit(skb, ndev, false);
 691}
 692
 693/*
 694 * netvsc_linkstatus_callback - Link up/down notification
 695 */
 696void netvsc_linkstatus_callback(struct net_device *net,
 697				struct rndis_message *resp,
 698				void *data, u32 data_buflen)
 699{
 700	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
 701	struct net_device_context *ndev_ctx = netdev_priv(net);
 702	struct netvsc_reconfig *event;
 703	unsigned long flags;
 704
 705	/* Ensure the packet is big enough to access its fields */
 706	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
 707		netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
 708			   resp->msg_len);
 709		return;
 710	}
 711
 712	/* Copy the RNDIS indicate status into nvchan->recv_buf */
 713	memcpy(indicate, data + RNDIS_HEADER_SIZE, sizeof(*indicate));
 714
 715	/* Update the physical link speed when changing to another vSwitch */
 716	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
 717		u32 speed;
 718
 719		/* Validate status_buf_offset and status_buflen.
 720		 *
 721		 * Certain (pre-Fe) implementations of Hyper-V's vSwitch didn't account
 722		 * for the status buffer field in resp->msg_len; perform the validation
 723		 * using data_buflen (>= resp->msg_len).
 724		 */
 725		if (indicate->status_buflen < sizeof(speed) ||
 726		    indicate->status_buf_offset < sizeof(*indicate) ||
 727		    data_buflen - RNDIS_HEADER_SIZE < indicate->status_buf_offset ||
 728		    data_buflen - RNDIS_HEADER_SIZE - indicate->status_buf_offset
 729				< indicate->status_buflen) {
 730			netdev_err(net, "invalid rndis_indicate_status packet\n");
 731			return;
 732		}
 733
 734		speed = *(u32 *)(data + RNDIS_HEADER_SIZE + indicate->status_buf_offset) / 10000;
 735		ndev_ctx->speed = speed;
 736		return;
 737	}
 738
 739	/* Handle these link change statuses below */
 740	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
 741	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
 742	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
 743		return;
 744
 745	if (net->reg_state != NETREG_REGISTERED)
 746		return;
 747
 748	event = kzalloc(sizeof(*event), GFP_ATOMIC);
 749	if (!event)
 750		return;
 751	event->event = indicate->status;
 752
 753	spin_lock_irqsave(&ndev_ctx->lock, flags);
 754	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
 755	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 756
 757	schedule_delayed_work(&ndev_ctx->dwork, 0);
 758}
 759
 760/* This function should only be called after skb_record_rx_queue() */
 761void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
 762{
 763	int rc;
 764
 765	skb->queue_mapping = skb_get_rx_queue(skb);
 766	__skb_push(skb, ETH_HLEN);
 767
 768	rc = netvsc_xmit(skb, ndev, true);
 769
 770	if (dev_xmit_complete(rc))
 771		return;
 772
 773	dev_kfree_skb_any(skb);
 774	ndev->stats.tx_dropped++;
 775}
 776
 777static void netvsc_comp_ipcsum(struct sk_buff *skb)
 778{
 779	struct iphdr *iph = (struct iphdr *)skb->data;
 780
 781	iph->check = 0;
 782	iph->check = ip_fast_csum(iph, iph->ihl);
 783}
 784
 785static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 786					     struct netvsc_channel *nvchan,
 787					     struct xdp_buff *xdp)
 788{
 789	struct napi_struct *napi = &nvchan->napi;
 790	const struct ndis_pkt_8021q_info *vlan = &nvchan->rsc.vlan;
 791	const struct ndis_tcp_ip_checksum_info *csum_info =
 792						&nvchan->rsc.csum_info;
 793	const u32 *hash_info = &nvchan->rsc.hash_info;
 794	u8 ppi_flags = nvchan->rsc.ppi_flags;
 795	struct sk_buff *skb;
 796	void *xbuf = xdp->data_hard_start;
 797	int i;
 798
 799	if (xbuf) {
 800		unsigned int hdroom = xdp->data - xdp->data_hard_start;
 801		unsigned int xlen = xdp->data_end - xdp->data;
 802		unsigned int frag_size = xdp->frame_sz;
 803
 804		skb = build_skb(xbuf, frag_size);
 805
 806		if (!skb) {
 807			__free_page(virt_to_page(xbuf));
 808			return NULL;
 809		}
 810
 811		skb_reserve(skb, hdroom);
 812		skb_put(skb, xlen);
 813		skb->dev = napi->dev;
 814	} else {
 815		skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
 816
 817		if (!skb)
 818			return NULL;
 819
 820		/* Copy to skb. This copy is needed here since the memory
 821		 * pointed by hv_netvsc_packet cannot be deallocated.
 822		 */
 823		for (i = 0; i < nvchan->rsc.cnt; i++)
 824			skb_put_data(skb, nvchan->rsc.data[i],
 825				     nvchan->rsc.len[i]);
 826	}
 827
 828	skb->protocol = eth_type_trans(skb, net);
 829
 830	/* skb is already created with CHECKSUM_NONE */
 831	skb_checksum_none_assert(skb);
 832
 833	/* Incoming packets may have IP header checksum verified by the host.
 834	 * They may not have IP header checksum computed after coalescing.
 835	 * We compute it here if the flags are set, because on Linux, the IP
 836	 * checksum is always checked.
 837	 */
 838	if ((ppi_flags & NVSC_RSC_CSUM_INFO) && csum_info->receive.ip_checksum_value_invalid &&
 839	    csum_info->receive.ip_checksum_succeeded &&
 840	    skb->protocol == htons(ETH_P_IP)) {
 841		/* Check that there is enough space to hold the IP header. */
 842		if (skb_headlen(skb) < sizeof(struct iphdr)) {
 843			kfree_skb(skb);
 844			return NULL;
 845		}
 846		netvsc_comp_ipcsum(skb);
 847	}
 848
 849	/* Do L4 checksum offload if enabled and present. */
 850	if ((ppi_flags & NVSC_RSC_CSUM_INFO) && (net->features & NETIF_F_RXCSUM)) {
 851		if (csum_info->receive.tcp_checksum_succeeded ||
 852		    csum_info->receive.udp_checksum_succeeded)
 853			skb->ip_summed = CHECKSUM_UNNECESSARY;
 854	}
 855
 856	if ((ppi_flags & NVSC_RSC_HASH_INFO) && (net->features & NETIF_F_RXHASH))
 857		skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
 858
 859	if (ppi_flags & NVSC_RSC_VLAN) {
 860		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
 861			(vlan->cfi ? VLAN_CFI_MASK : 0);
 862
 863		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 864				       vlan_tci);
 865	}
 866
 867	return skb;
 868}
 869
 870/*
 871 * netvsc_recv_callback -  Callback when we receive a packet from the
 872 * "wire" on the specified device.
 873 */
 874int netvsc_recv_callback(struct net_device *net,
 875			 struct netvsc_device *net_device,
 876			 struct netvsc_channel *nvchan)
 877{
 878	struct net_device_context *net_device_ctx = netdev_priv(net);
 879	struct vmbus_channel *channel = nvchan->channel;
 880	u16 q_idx = channel->offermsg.offer.sub_channel_index;
 881	struct sk_buff *skb;
 882	struct netvsc_stats_rx *rx_stats = &nvchan->rx_stats;
 883	struct xdp_buff xdp;
 884	u32 act;
 885
 886	if (net->reg_state != NETREG_REGISTERED)
 887		return NVSP_STAT_FAIL;
 888
 889	act = netvsc_run_xdp(net, nvchan, &xdp);
 890
 891	if (act == XDP_REDIRECT)
 892		return NVSP_STAT_SUCCESS;
 893
 894	if (act != XDP_PASS && act != XDP_TX) {
 895		u64_stats_update_begin(&rx_stats->syncp);
 896		rx_stats->xdp_drop++;
 897		u64_stats_update_end(&rx_stats->syncp);
 898
 899		return NVSP_STAT_SUCCESS; /* consumed by XDP */
 900	}
 901
 902	/* Allocate a skb - TODO direct I/O to pages? */
 903	skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
 904
 905	if (unlikely(!skb)) {
 906		++net_device_ctx->eth_stats.rx_no_memory;
 907		return NVSP_STAT_FAIL;
 908	}
 909
 910	skb_record_rx_queue(skb, q_idx);
 911
 912	/*
 913	 * Even if injecting the packet, record the statistics
 914	 * on the synthetic device because modifying the VF device
 915	 * statistics will not work correctly.
 916	 */
 917	u64_stats_update_begin(&rx_stats->syncp);
 918	if (act == XDP_TX)
 919		rx_stats->xdp_tx++;
 920
 921	rx_stats->packets++;
 922	rx_stats->bytes += nvchan->rsc.pktlen;
 923
 924	if (skb->pkt_type == PACKET_BROADCAST)
 925		++rx_stats->broadcast;
 926	else if (skb->pkt_type == PACKET_MULTICAST)
 927		++rx_stats->multicast;
 928	u64_stats_update_end(&rx_stats->syncp);
 929
 930	if (act == XDP_TX) {
 931		netvsc_xdp_xmit(skb, net);
 932		return NVSP_STAT_SUCCESS;
 933	}
 934
 935	napi_gro_receive(&nvchan->napi, skb);
 936	return NVSP_STAT_SUCCESS;
 937}
 938
 939static void netvsc_get_drvinfo(struct net_device *net,
 940			       struct ethtool_drvinfo *info)
 941{
 942	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 943	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
 944}
 945
 946static void netvsc_get_channels(struct net_device *net,
 947				struct ethtool_channels *channel)
 948{
 949	struct net_device_context *net_device_ctx = netdev_priv(net);
 950	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 951
 952	if (nvdev) {
 953		channel->max_combined	= nvdev->max_chn;
 954		channel->combined_count = nvdev->num_chn;
 955	}
 956}
 957
 958/* Alloc struct netvsc_device_info, and initialize it from either existing
 959 * struct netvsc_device, or from default values.
 960 */
 961static
 962struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
 963{
 964	struct netvsc_device_info *dev_info;
 965	struct bpf_prog *prog;
 966
 967	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
 968
 969	if (!dev_info)
 970		return NULL;
 971
 972	if (nvdev) {
 973		ASSERT_RTNL();
 974
 975		dev_info->num_chn = nvdev->num_chn;
 976		dev_info->send_sections = nvdev->send_section_cnt;
 977		dev_info->send_section_size = nvdev->send_section_size;
 978		dev_info->recv_sections = nvdev->recv_section_cnt;
 979		dev_info->recv_section_size = nvdev->recv_section_size;
 980
 981		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
 982		       NETVSC_HASH_KEYLEN);
 983
 984		prog = netvsc_xdp_get(nvdev);
 985		if (prog) {
 986			bpf_prog_inc(prog);
 987			dev_info->bprog = prog;
 988		}
 989	} else {
 990		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
 991		dev_info->send_sections = NETVSC_DEFAULT_TX;
 992		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
 993		dev_info->recv_sections = NETVSC_DEFAULT_RX;
 994		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
 995	}
 996
 997	return dev_info;
 998}
 999
1000/* Free struct netvsc_device_info */
1001static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
1002{
1003	if (dev_info->bprog) {
1004		ASSERT_RTNL();
1005		bpf_prog_put(dev_info->bprog);
1006	}
1007
1008	kfree(dev_info);
1009}
1010
1011static int netvsc_detach(struct net_device *ndev,
1012			 struct netvsc_device *nvdev)
1013{
1014	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1015	struct hv_device *hdev = ndev_ctx->device_ctx;
1016	int ret;
1017
1018	/* Don't try continuing to try and setup sub channels */
1019	if (cancel_work_sync(&nvdev->subchan_work))
1020		nvdev->num_chn = 1;
1021
1022	netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1023
1024	/* If device was up (receiving) then shutdown */
1025	if (netif_running(ndev)) {
1026		netvsc_tx_disable(nvdev, ndev);
1027
1028		ret = rndis_filter_close(nvdev);
1029		if (ret) {
1030			netdev_err(ndev,
1031				   "unable to close device (ret %d).\n", ret);
1032			return ret;
1033		}
1034
1035		ret = netvsc_wait_until_empty(nvdev);
1036		if (ret) {
1037			netdev_err(ndev,
1038				   "Ring buffer not empty after closing rndis\n");
1039			return ret;
1040		}
1041	}
1042
1043	netif_device_detach(ndev);
1044
1045	rndis_filter_device_remove(hdev, nvdev);
1046
1047	return 0;
1048}
1049
1050static int netvsc_attach(struct net_device *ndev,
1051			 struct netvsc_device_info *dev_info)
1052{
1053	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1054	struct hv_device *hdev = ndev_ctx->device_ctx;
1055	struct netvsc_device *nvdev;
1056	struct rndis_device *rdev;
1057	struct bpf_prog *prog;
1058	int ret = 0;
1059
1060	nvdev = rndis_filter_device_add(hdev, dev_info);
1061	if (IS_ERR(nvdev))
1062		return PTR_ERR(nvdev);
1063
1064	if (nvdev->num_chn > 1) {
1065		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1066
1067		/* if unavailable, just proceed with one queue */
1068		if (ret) {
1069			nvdev->max_chn = 1;
1070			nvdev->num_chn = 1;
1071		}
1072	}
1073
1074	prog = dev_info->bprog;
1075	if (prog) {
1076		bpf_prog_inc(prog);
1077		ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1078		if (ret) {
1079			bpf_prog_put(prog);
1080			goto err1;
1081		}
1082	}
1083
1084	/* In any case device is now ready */
1085	nvdev->tx_disable = false;
1086	netif_device_attach(ndev);
1087
1088	/* Note: enable and attach happen when sub-channels setup */
1089	netif_carrier_off(ndev);
1090
1091	if (netif_running(ndev)) {
1092		ret = rndis_filter_open(nvdev);
1093		if (ret)
1094			goto err2;
1095
1096		rdev = nvdev->extension;
1097		if (!rdev->link_state)
1098			netif_carrier_on(ndev);
1099	}
1100
1101	return 0;
1102
1103err2:
1104	netif_device_detach(ndev);
1105
1106err1:
1107	rndis_filter_device_remove(hdev, nvdev);
1108
1109	return ret;
1110}
1111
1112static int netvsc_set_channels(struct net_device *net,
1113			       struct ethtool_channels *channels)
1114{
1115	struct net_device_context *net_device_ctx = netdev_priv(net);
1116	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1117	unsigned int orig, count = channels->combined_count;
1118	struct netvsc_device_info *device_info;
1119	int ret;
1120
1121	/* We do not support separate count for rx, tx, or other */
1122	if (count == 0 ||
1123	    channels->rx_count || channels->tx_count || channels->other_count)
1124		return -EINVAL;
1125
1126	if (!nvdev || nvdev->destroy)
1127		return -ENODEV;
1128
1129	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1130		return -EINVAL;
1131
1132	if (count > nvdev->max_chn)
1133		return -EINVAL;
1134
1135	orig = nvdev->num_chn;
1136
1137	device_info = netvsc_devinfo_get(nvdev);
1138
1139	if (!device_info)
1140		return -ENOMEM;
1141
1142	device_info->num_chn = count;
1143
1144	ret = netvsc_detach(net, nvdev);
1145	if (ret)
1146		goto out;
1147
1148	ret = netvsc_attach(net, device_info);
1149	if (ret) {
1150		device_info->num_chn = orig;
1151		if (netvsc_attach(net, device_info))
1152			netdev_err(net, "restoring channel setting failed\n");
1153	}
1154
1155out:
1156	netvsc_devinfo_put(device_info);
1157	return ret;
1158}
1159
1160static void netvsc_init_settings(struct net_device *dev)
1161{
1162	struct net_device_context *ndc = netdev_priv(dev);
1163
1164	ndc->l4_hash = HV_DEFAULT_L4HASH;
1165
1166	ndc->speed = SPEED_UNKNOWN;
1167	ndc->duplex = DUPLEX_FULL;
1168
1169	dev->features = NETIF_F_LRO;
1170}
1171
1172static int netvsc_get_link_ksettings(struct net_device *dev,
1173				     struct ethtool_link_ksettings *cmd)
1174{
1175	struct net_device_context *ndc = netdev_priv(dev);
1176	struct net_device *vf_netdev;
1177
1178	vf_netdev = rtnl_dereference(ndc->vf_netdev);
1179
1180	if (vf_netdev)
1181		return __ethtool_get_link_ksettings(vf_netdev, cmd);
1182
1183	cmd->base.speed = ndc->speed;
1184	cmd->base.duplex = ndc->duplex;
1185	cmd->base.port = PORT_OTHER;
1186
1187	return 0;
1188}
1189
1190static int netvsc_set_link_ksettings(struct net_device *dev,
1191				     const struct ethtool_link_ksettings *cmd)
1192{
1193	struct net_device_context *ndc = netdev_priv(dev);
1194	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1195
1196	if (vf_netdev) {
1197		if (!vf_netdev->ethtool_ops->set_link_ksettings)
1198			return -EOPNOTSUPP;
1199
1200		return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
1201								  cmd);
1202	}
1203
1204	return ethtool_virtdev_set_link_ksettings(dev, cmd,
1205						  &ndc->speed, &ndc->duplex);
1206}
1207
1208static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1209{
1210	struct net_device_context *ndevctx = netdev_priv(ndev);
1211	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1212	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1213	int orig_mtu = ndev->mtu;
1214	struct netvsc_device_info *device_info;
1215	int ret = 0;
1216
1217	if (!nvdev || nvdev->destroy)
1218		return -ENODEV;
1219
1220	device_info = netvsc_devinfo_get(nvdev);
1221
1222	if (!device_info)
1223		return -ENOMEM;
1224
1225	/* Change MTU of underlying VF netdev first. */
1226	if (vf_netdev) {
1227		ret = dev_set_mtu(vf_netdev, mtu);
1228		if (ret)
1229			goto out;
1230	}
1231
1232	ret = netvsc_detach(ndev, nvdev);
1233	if (ret)
1234		goto rollback_vf;
1235
1236	ndev->mtu = mtu;
1237
1238	ret = netvsc_attach(ndev, device_info);
1239	if (!ret)
1240		goto out;
1241
1242	/* Attempt rollback to original MTU */
1243	ndev->mtu = orig_mtu;
1244
1245	if (netvsc_attach(ndev, device_info))
1246		netdev_err(ndev, "restoring mtu failed\n");
1247rollback_vf:
1248	if (vf_netdev)
1249		dev_set_mtu(vf_netdev, orig_mtu);
1250
1251out:
1252	netvsc_devinfo_put(device_info);
1253	return ret;
1254}
1255
1256static void netvsc_get_vf_stats(struct net_device *net,
1257				struct netvsc_vf_pcpu_stats *tot)
1258{
1259	struct net_device_context *ndev_ctx = netdev_priv(net);
1260	int i;
1261
1262	memset(tot, 0, sizeof(*tot));
1263
1264	for_each_possible_cpu(i) {
1265		const struct netvsc_vf_pcpu_stats *stats
1266			= per_cpu_ptr(ndev_ctx->vf_stats, i);
1267		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1268		unsigned int start;
1269
1270		do {
1271			start = u64_stats_fetch_begin(&stats->syncp);
1272			rx_packets = stats->rx_packets;
1273			tx_packets = stats->tx_packets;
1274			rx_bytes = stats->rx_bytes;
1275			tx_bytes = stats->tx_bytes;
1276		} while (u64_stats_fetch_retry(&stats->syncp, start));
1277
1278		tot->rx_packets += rx_packets;
1279		tot->tx_packets += tx_packets;
1280		tot->rx_bytes   += rx_bytes;
1281		tot->tx_bytes   += tx_bytes;
1282		tot->tx_dropped += stats->tx_dropped;
1283	}
1284}
1285
1286static void netvsc_get_pcpu_stats(struct net_device *net,
1287				  struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1288{
1289	struct net_device_context *ndev_ctx = netdev_priv(net);
1290	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1291	int i;
1292
1293	/* fetch percpu stats of vf */
1294	for_each_possible_cpu(i) {
1295		const struct netvsc_vf_pcpu_stats *stats =
1296			per_cpu_ptr(ndev_ctx->vf_stats, i);
1297		struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1298		unsigned int start;
1299
1300		do {
1301			start = u64_stats_fetch_begin(&stats->syncp);
1302			this_tot->vf_rx_packets = stats->rx_packets;
1303			this_tot->vf_tx_packets = stats->tx_packets;
1304			this_tot->vf_rx_bytes = stats->rx_bytes;
1305			this_tot->vf_tx_bytes = stats->tx_bytes;
1306		} while (u64_stats_fetch_retry(&stats->syncp, start));
1307		this_tot->rx_packets = this_tot->vf_rx_packets;
1308		this_tot->tx_packets = this_tot->vf_tx_packets;
1309		this_tot->rx_bytes   = this_tot->vf_rx_bytes;
1310		this_tot->tx_bytes   = this_tot->vf_tx_bytes;
1311	}
1312
1313	/* fetch percpu stats of netvsc */
1314	for (i = 0; i < nvdev->num_chn; i++) {
1315		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1316		const struct netvsc_stats_tx *tx_stats;
1317		const struct netvsc_stats_rx *rx_stats;
1318		struct netvsc_ethtool_pcpu_stats *this_tot =
1319			&pcpu_tot[nvchan->channel->target_cpu];
1320		u64 packets, bytes;
1321		unsigned int start;
1322
1323		tx_stats = &nvchan->tx_stats;
1324		do {
1325			start = u64_stats_fetch_begin(&tx_stats->syncp);
1326			packets = tx_stats->packets;
1327			bytes = tx_stats->bytes;
1328		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1329
1330		this_tot->tx_bytes	+= bytes;
1331		this_tot->tx_packets	+= packets;
1332
1333		rx_stats = &nvchan->rx_stats;
1334		do {
1335			start = u64_stats_fetch_begin(&rx_stats->syncp);
1336			packets = rx_stats->packets;
1337			bytes = rx_stats->bytes;
1338		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1339
1340		this_tot->rx_bytes	+= bytes;
1341		this_tot->rx_packets	+= packets;
1342	}
1343}
1344
1345static void netvsc_get_stats64(struct net_device *net,
1346			       struct rtnl_link_stats64 *t)
1347{
1348	struct net_device_context *ndev_ctx = netdev_priv(net);
1349	struct netvsc_device *nvdev;
1350	struct netvsc_vf_pcpu_stats vf_tot;
1351	int i;
1352
1353	rcu_read_lock();
1354
1355	nvdev = rcu_dereference(ndev_ctx->nvdev);
1356	if (!nvdev)
1357		goto out;
1358
1359	netdev_stats_to_stats64(t, &net->stats);
1360
1361	netvsc_get_vf_stats(net, &vf_tot);
1362	t->rx_packets += vf_tot.rx_packets;
1363	t->tx_packets += vf_tot.tx_packets;
1364	t->rx_bytes   += vf_tot.rx_bytes;
1365	t->tx_bytes   += vf_tot.tx_bytes;
1366	t->tx_dropped += vf_tot.tx_dropped;
1367
1368	for (i = 0; i < nvdev->num_chn; i++) {
1369		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1370		const struct netvsc_stats_tx *tx_stats;
1371		const struct netvsc_stats_rx *rx_stats;
1372		u64 packets, bytes, multicast;
1373		unsigned int start;
1374
1375		tx_stats = &nvchan->tx_stats;
1376		do {
1377			start = u64_stats_fetch_begin(&tx_stats->syncp);
1378			packets = tx_stats->packets;
1379			bytes = tx_stats->bytes;
1380		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1381
1382		t->tx_bytes	+= bytes;
1383		t->tx_packets	+= packets;
1384
1385		rx_stats = &nvchan->rx_stats;
1386		do {
1387			start = u64_stats_fetch_begin(&rx_stats->syncp);
1388			packets = rx_stats->packets;
1389			bytes = rx_stats->bytes;
1390			multicast = rx_stats->multicast + rx_stats->broadcast;
1391		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1392
1393		t->rx_bytes	+= bytes;
1394		t->rx_packets	+= packets;
1395		t->multicast	+= multicast;
1396	}
1397out:
1398	rcu_read_unlock();
1399}
1400
1401static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1402{
1403	struct net_device_context *ndc = netdev_priv(ndev);
1404	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1405	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1406	struct sockaddr *addr = p;
1407	int err;
1408
1409	err = eth_prepare_mac_addr_change(ndev, p);
1410	if (err)
1411		return err;
1412
1413	if (!nvdev)
1414		return -ENODEV;
1415
1416	if (vf_netdev) {
1417		err = dev_set_mac_address(vf_netdev, addr, NULL);
1418		if (err)
1419			return err;
1420	}
1421
1422	err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1423	if (!err) {
1424		eth_commit_mac_addr_change(ndev, p);
1425	} else if (vf_netdev) {
1426		/* rollback change on VF */
1427		memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1428		dev_set_mac_address(vf_netdev, addr, NULL);
1429	}
1430
1431	return err;
1432}
1433
1434static const struct {
1435	char name[ETH_GSTRING_LEN];
1436	u16 offset;
1437} netvsc_stats[] = {
1438	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1439	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1440	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1441	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1442	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
1443	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1444	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1445	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1446	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1447	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1448	{ "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
1449}, pcpu_stats[] = {
1450	{ "cpu%u_rx_packets",
1451		offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1452	{ "cpu%u_rx_bytes",
1453		offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1454	{ "cpu%u_tx_packets",
1455		offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1456	{ "cpu%u_tx_bytes",
1457		offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1458	{ "cpu%u_vf_rx_packets",
1459		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1460	{ "cpu%u_vf_rx_bytes",
1461		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1462	{ "cpu%u_vf_tx_packets",
1463		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1464	{ "cpu%u_vf_tx_bytes",
1465		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1466}, vf_stats[] = {
1467	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1468	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1469	{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1470	{ "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1471	{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1472};
1473
1474#define NETVSC_GLOBAL_STATS_LEN	ARRAY_SIZE(netvsc_stats)
1475#define NETVSC_VF_STATS_LEN	ARRAY_SIZE(vf_stats)
1476
1477/* statistics per queue (rx/tx packets/bytes) */
1478#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1479
1480/* 8 statistics per queue (rx/tx packets/bytes, XDP actions) */
1481#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 8)
1482
1483static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1484{
1485	struct net_device_context *ndc = netdev_priv(dev);
1486	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1487
1488	if (!nvdev)
1489		return -ENODEV;
1490
1491	switch (string_set) {
1492	case ETH_SS_STATS:
1493		return NETVSC_GLOBAL_STATS_LEN
1494			+ NETVSC_VF_STATS_LEN
1495			+ NETVSC_QUEUE_STATS_LEN(nvdev)
1496			+ NETVSC_PCPU_STATS_LEN;
1497	default:
1498		return -EINVAL;
1499	}
1500}
1501
1502static void netvsc_get_ethtool_stats(struct net_device *dev,
1503				     struct ethtool_stats *stats, u64 *data)
1504{
1505	struct net_device_context *ndc = netdev_priv(dev);
1506	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1507	const void *nds = &ndc->eth_stats;
1508	const struct netvsc_stats_tx *tx_stats;
1509	const struct netvsc_stats_rx *rx_stats;
1510	struct netvsc_vf_pcpu_stats sum;
1511	struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1512	unsigned int start;
1513	u64 packets, bytes;
1514	u64 xdp_drop;
1515	u64 xdp_redirect;
1516	u64 xdp_tx;
1517	u64 xdp_xmit;
1518	int i, j, cpu;
1519
1520	if (!nvdev)
1521		return;
1522
1523	for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1524		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1525
1526	netvsc_get_vf_stats(dev, &sum);
1527	for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1528		data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1529
1530	for (j = 0; j < nvdev->num_chn; j++) {
1531		tx_stats = &nvdev->chan_table[j].tx_stats;
1532
1533		do {
1534			start = u64_stats_fetch_begin(&tx_stats->syncp);
1535			packets = tx_stats->packets;
1536			bytes = tx_stats->bytes;
1537			xdp_xmit = tx_stats->xdp_xmit;
1538		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1539		data[i++] = packets;
1540		data[i++] = bytes;
1541		data[i++] = xdp_xmit;
1542
1543		rx_stats = &nvdev->chan_table[j].rx_stats;
1544		do {
1545			start = u64_stats_fetch_begin(&rx_stats->syncp);
1546			packets = rx_stats->packets;
1547			bytes = rx_stats->bytes;
1548			xdp_drop = rx_stats->xdp_drop;
1549			xdp_redirect = rx_stats->xdp_redirect;
1550			xdp_tx = rx_stats->xdp_tx;
1551		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1552		data[i++] = packets;
1553		data[i++] = bytes;
1554		data[i++] = xdp_drop;
1555		data[i++] = xdp_redirect;
1556		data[i++] = xdp_tx;
1557	}
1558
1559	pcpu_sum = kvmalloc_array(num_possible_cpus(),
1560				  sizeof(struct netvsc_ethtool_pcpu_stats),
1561				  GFP_KERNEL);
1562	if (!pcpu_sum)
1563		return;
1564
1565	netvsc_get_pcpu_stats(dev, pcpu_sum);
1566	for_each_present_cpu(cpu) {
1567		struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1568
1569		for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1570			data[i++] = *(u64 *)((void *)this_sum
1571					     + pcpu_stats[j].offset);
1572	}
1573	kvfree(pcpu_sum);
1574}
1575
1576static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1577{
1578	struct net_device_context *ndc = netdev_priv(dev);
1579	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1580	u8 *p = data;
1581	int i, cpu;
1582
1583	if (!nvdev)
1584		return;
1585
1586	switch (stringset) {
1587	case ETH_SS_STATS:
1588		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1589			ethtool_puts(&p, netvsc_stats[i].name);
 
 
1590
1591		for (i = 0; i < ARRAY_SIZE(vf_stats); i++)
1592			ethtool_puts(&p, vf_stats[i].name);
 
 
1593
1594		for (i = 0; i < nvdev->num_chn; i++) {
1595			ethtool_sprintf(&p, "tx_queue_%u_packets", i);
1596			ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
1597			ethtool_sprintf(&p, "tx_queue_%u_xdp_xmit", i);
1598			ethtool_sprintf(&p, "rx_queue_%u_packets", i);
1599			ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
1600			ethtool_sprintf(&p, "rx_queue_%u_xdp_drop", i);
1601			ethtool_sprintf(&p, "rx_queue_%u_xdp_redirect", i);
1602			ethtool_sprintf(&p, "rx_queue_%u_xdp_tx", i);
 
 
1603		}
1604
1605		for_each_present_cpu(cpu) {
1606			for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++)
1607				ethtool_sprintf(&p, pcpu_stats[i].name, cpu);
 
 
1608		}
1609
1610		break;
1611	}
1612}
1613
1614static int
1615netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1616			 struct ethtool_rxnfc *info)
1617{
1618	const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1619
1620	info->data = RXH_IP_SRC | RXH_IP_DST;
1621
1622	switch (info->flow_type) {
1623	case TCP_V4_FLOW:
1624		if (ndc->l4_hash & HV_TCP4_L4HASH)
1625			info->data |= l4_flag;
1626
1627		break;
1628
1629	case TCP_V6_FLOW:
1630		if (ndc->l4_hash & HV_TCP6_L4HASH)
1631			info->data |= l4_flag;
1632
1633		break;
1634
1635	case UDP_V4_FLOW:
1636		if (ndc->l4_hash & HV_UDP4_L4HASH)
1637			info->data |= l4_flag;
1638
1639		break;
1640
1641	case UDP_V6_FLOW:
1642		if (ndc->l4_hash & HV_UDP6_L4HASH)
1643			info->data |= l4_flag;
1644
1645		break;
1646
1647	case IPV4_FLOW:
1648	case IPV6_FLOW:
1649		break;
1650	default:
1651		info->data = 0;
1652		break;
1653	}
1654
1655	return 0;
1656}
1657
1658static int
1659netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1660		 u32 *rules)
1661{
1662	struct net_device_context *ndc = netdev_priv(dev);
1663	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1664
1665	if (!nvdev)
1666		return -ENODEV;
1667
1668	switch (info->cmd) {
1669	case ETHTOOL_GRXRINGS:
1670		info->data = nvdev->num_chn;
1671		return 0;
1672
1673	case ETHTOOL_GRXFH:
1674		return netvsc_get_rss_hash_opts(ndc, info);
1675	}
1676	return -EOPNOTSUPP;
1677}
1678
1679static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1680				    struct ethtool_rxnfc *info)
1681{
1682	if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1683			   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1684		switch (info->flow_type) {
1685		case TCP_V4_FLOW:
1686			ndc->l4_hash |= HV_TCP4_L4HASH;
1687			break;
1688
1689		case TCP_V6_FLOW:
1690			ndc->l4_hash |= HV_TCP6_L4HASH;
1691			break;
1692
1693		case UDP_V4_FLOW:
1694			ndc->l4_hash |= HV_UDP4_L4HASH;
1695			break;
1696
1697		case UDP_V6_FLOW:
1698			ndc->l4_hash |= HV_UDP6_L4HASH;
1699			break;
1700
1701		default:
1702			return -EOPNOTSUPP;
1703		}
1704
1705		return 0;
1706	}
1707
1708	if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1709		switch (info->flow_type) {
1710		case TCP_V4_FLOW:
1711			ndc->l4_hash &= ~HV_TCP4_L4HASH;
1712			break;
1713
1714		case TCP_V6_FLOW:
1715			ndc->l4_hash &= ~HV_TCP6_L4HASH;
1716			break;
1717
1718		case UDP_V4_FLOW:
1719			ndc->l4_hash &= ~HV_UDP4_L4HASH;
1720			break;
1721
1722		case UDP_V6_FLOW:
1723			ndc->l4_hash &= ~HV_UDP6_L4HASH;
1724			break;
1725
1726		default:
1727			return -EOPNOTSUPP;
1728		}
1729
1730		return 0;
1731	}
1732
1733	return -EOPNOTSUPP;
1734}
1735
1736static int
1737netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1738{
1739	struct net_device_context *ndc = netdev_priv(ndev);
1740
1741	if (info->cmd == ETHTOOL_SRXFH)
1742		return netvsc_set_rss_hash_opts(ndc, info);
1743
1744	return -EOPNOTSUPP;
1745}
1746
1747static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1748{
1749	return NETVSC_HASH_KEYLEN;
1750}
1751
1752static u32 netvsc_rss_indir_size(struct net_device *dev)
1753{
1754	struct net_device_context *ndc = netdev_priv(dev);
1755
1756	return ndc->rx_table_sz;
1757}
1758
1759static int netvsc_get_rxfh(struct net_device *dev,
1760			   struct ethtool_rxfh_param *rxfh)
1761{
1762	struct net_device_context *ndc = netdev_priv(dev);
1763	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1764	struct rndis_device *rndis_dev;
1765	int i;
1766
1767	if (!ndev)
1768		return -ENODEV;
1769
1770	rxfh->hfunc = ETH_RSS_HASH_TOP;	/* Toeplitz */
 
1771
1772	rndis_dev = ndev->extension;
1773	if (rxfh->indir) {
1774		for (i = 0; i < ndc->rx_table_sz; i++)
1775			rxfh->indir[i] = ndc->rx_table[i];
1776	}
1777
1778	if (rxfh->key)
1779		memcpy(rxfh->key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1780
1781	return 0;
1782}
1783
1784static int netvsc_set_rxfh(struct net_device *dev,
1785			   struct ethtool_rxfh_param *rxfh,
1786			   struct netlink_ext_ack *extack)
1787{
1788	struct net_device_context *ndc = netdev_priv(dev);
1789	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1790	struct rndis_device *rndis_dev;
1791	u8 *key = rxfh->key;
1792	int i;
1793
1794	if (!ndev)
1795		return -ENODEV;
1796
1797	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1798	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1799		return -EOPNOTSUPP;
1800
1801	rndis_dev = ndev->extension;
1802	if (rxfh->indir) {
1803		for (i = 0; i < ndc->rx_table_sz; i++)
1804			if (rxfh->indir[i] >= ndev->num_chn)
1805				return -EINVAL;
1806
1807		for (i = 0; i < ndc->rx_table_sz; i++)
1808			ndc->rx_table[i] = rxfh->indir[i];
1809	}
1810
1811	if (!key) {
1812		if (!rxfh->indir)
1813			return 0;
1814
1815		key = rndis_dev->rss_key;
1816	}
1817
1818	return rndis_filter_set_rss_param(rndis_dev, key);
1819}
1820
1821/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1822 * It does have pre-allocated receive area which is divided into sections.
1823 */
1824static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1825				   struct ethtool_ringparam *ring)
1826{
1827	u32 max_buf_size;
1828
1829	ring->rx_pending = nvdev->recv_section_cnt;
1830	ring->tx_pending = nvdev->send_section_cnt;
1831
1832	if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1833		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1834	else
1835		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1836
1837	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1838	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1839		/ nvdev->send_section_size;
1840}
1841
1842static void netvsc_get_ringparam(struct net_device *ndev,
1843				 struct ethtool_ringparam *ring,
1844				 struct kernel_ethtool_ringparam *kernel_ring,
1845				 struct netlink_ext_ack *extack)
1846{
1847	struct net_device_context *ndevctx = netdev_priv(ndev);
1848	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1849
1850	if (!nvdev)
1851		return;
1852
1853	__netvsc_get_ringparam(nvdev, ring);
1854}
1855
1856static int netvsc_set_ringparam(struct net_device *ndev,
1857				struct ethtool_ringparam *ring,
1858				struct kernel_ethtool_ringparam *kernel_ring,
1859				struct netlink_ext_ack *extack)
1860{
1861	struct net_device_context *ndevctx = netdev_priv(ndev);
1862	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1863	struct netvsc_device_info *device_info;
1864	struct ethtool_ringparam orig;
1865	u32 new_tx, new_rx;
1866	int ret = 0;
1867
1868	if (!nvdev || nvdev->destroy)
1869		return -ENODEV;
1870
1871	memset(&orig, 0, sizeof(orig));
1872	__netvsc_get_ringparam(nvdev, &orig);
1873
1874	new_tx = clamp_t(u32, ring->tx_pending,
1875			 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1876	new_rx = clamp_t(u32, ring->rx_pending,
1877			 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1878
1879	if (new_tx == orig.tx_pending &&
1880	    new_rx == orig.rx_pending)
1881		return 0;	 /* no change */
1882
1883	device_info = netvsc_devinfo_get(nvdev);
1884
1885	if (!device_info)
1886		return -ENOMEM;
1887
1888	device_info->send_sections = new_tx;
1889	device_info->recv_sections = new_rx;
1890
1891	ret = netvsc_detach(ndev, nvdev);
1892	if (ret)
1893		goto out;
1894
1895	ret = netvsc_attach(ndev, device_info);
1896	if (ret) {
1897		device_info->send_sections = orig.tx_pending;
1898		device_info->recv_sections = orig.rx_pending;
1899
1900		if (netvsc_attach(ndev, device_info))
1901			netdev_err(ndev, "restoring ringparam failed");
1902	}
1903
1904out:
1905	netvsc_devinfo_put(device_info);
1906	return ret;
1907}
1908
1909static netdev_features_t netvsc_fix_features(struct net_device *ndev,
1910					     netdev_features_t features)
1911{
1912	struct net_device_context *ndevctx = netdev_priv(ndev);
1913	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1914
1915	if (!nvdev || nvdev->destroy)
1916		return features;
1917
1918	if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1919		features ^= NETIF_F_LRO;
1920		netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
1921	}
1922
1923	return features;
1924}
1925
1926static int netvsc_set_features(struct net_device *ndev,
1927			       netdev_features_t features)
1928{
1929	netdev_features_t change = features ^ ndev->features;
1930	struct net_device_context *ndevctx = netdev_priv(ndev);
1931	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1932	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1933	struct ndis_offload_params offloads;
1934	int ret = 0;
1935
1936	if (!nvdev || nvdev->destroy)
1937		return -ENODEV;
1938
1939	if (!(change & NETIF_F_LRO))
1940		goto syncvf;
1941
1942	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1943
1944	if (features & NETIF_F_LRO) {
1945		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1946		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1947	} else {
1948		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1949		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1950	}
1951
1952	ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1953
1954	if (ret) {
1955		features ^= NETIF_F_LRO;
1956		ndev->features = features;
1957	}
1958
1959syncvf:
1960	if (!vf_netdev)
1961		return ret;
1962
1963	vf_netdev->wanted_features = features;
1964	netdev_update_features(vf_netdev);
1965
1966	return ret;
1967}
1968
1969static int netvsc_get_regs_len(struct net_device *netdev)
1970{
1971	return VRSS_SEND_TAB_SIZE * sizeof(u32);
1972}
1973
1974static void netvsc_get_regs(struct net_device *netdev,
1975			    struct ethtool_regs *regs, void *p)
1976{
1977	struct net_device_context *ndc = netdev_priv(netdev);
1978	u32 *regs_buff = p;
1979
1980	/* increase the version, if buffer format is changed. */
1981	regs->version = 1;
1982
1983	memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
1984}
1985
1986static u32 netvsc_get_msglevel(struct net_device *ndev)
1987{
1988	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1989
1990	return ndev_ctx->msg_enable;
1991}
1992
1993static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1994{
1995	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1996
1997	ndev_ctx->msg_enable = val;
1998}
1999
2000static const struct ethtool_ops ethtool_ops = {
2001	.get_drvinfo	= netvsc_get_drvinfo,
2002	.get_regs_len	= netvsc_get_regs_len,
2003	.get_regs	= netvsc_get_regs,
2004	.get_msglevel	= netvsc_get_msglevel,
2005	.set_msglevel	= netvsc_set_msglevel,
2006	.get_link	= ethtool_op_get_link,
2007	.get_ethtool_stats = netvsc_get_ethtool_stats,
2008	.get_sset_count = netvsc_get_sset_count,
2009	.get_strings	= netvsc_get_strings,
2010	.get_channels   = netvsc_get_channels,
2011	.set_channels   = netvsc_set_channels,
2012	.get_ts_info	= ethtool_op_get_ts_info,
2013	.get_rxnfc	= netvsc_get_rxnfc,
2014	.set_rxnfc	= netvsc_set_rxnfc,
2015	.get_rxfh_key_size = netvsc_get_rxfh_key_size,
2016	.get_rxfh_indir_size = netvsc_rss_indir_size,
2017	.get_rxfh	= netvsc_get_rxfh,
2018	.set_rxfh	= netvsc_set_rxfh,
2019	.get_link_ksettings = netvsc_get_link_ksettings,
2020	.set_link_ksettings = netvsc_set_link_ksettings,
2021	.get_ringparam	= netvsc_get_ringparam,
2022	.set_ringparam	= netvsc_set_ringparam,
2023};
2024
2025static const struct net_device_ops device_ops = {
2026	.ndo_open =			netvsc_open,
2027	.ndo_stop =			netvsc_close,
2028	.ndo_start_xmit =		netvsc_start_xmit,
2029	.ndo_change_rx_flags =		netvsc_change_rx_flags,
2030	.ndo_set_rx_mode =		netvsc_set_rx_mode,
2031	.ndo_fix_features =		netvsc_fix_features,
2032	.ndo_set_features =		netvsc_set_features,
2033	.ndo_change_mtu =		netvsc_change_mtu,
2034	.ndo_validate_addr =		eth_validate_addr,
2035	.ndo_set_mac_address =		netvsc_set_mac_addr,
2036	.ndo_select_queue =		netvsc_select_queue,
2037	.ndo_get_stats64 =		netvsc_get_stats64,
2038	.ndo_bpf =			netvsc_bpf,
2039	.ndo_xdp_xmit =			netvsc_ndoxdp_xmit,
2040};
2041
2042/*
2043 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
2044 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
2045 * present send GARP packet to network peers with netif_notify_peers().
2046 */
2047static void netvsc_link_change(struct work_struct *w)
2048{
2049	struct net_device_context *ndev_ctx =
2050		container_of(w, struct net_device_context, dwork.work);
2051	struct hv_device *device_obj = ndev_ctx->device_ctx;
2052	struct net_device *net = hv_get_drvdata(device_obj);
2053	unsigned long flags, next_reconfig, delay;
2054	struct netvsc_reconfig *event = NULL;
2055	struct netvsc_device *net_device;
2056	struct rndis_device *rdev;
2057	bool reschedule = false;
 
 
2058
2059	/* if changes are happening, comeback later */
2060	if (!rtnl_trylock()) {
2061		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2062		return;
2063	}
2064
2065	net_device = rtnl_dereference(ndev_ctx->nvdev);
2066	if (!net_device)
2067		goto out_unlock;
2068
2069	rdev = net_device->extension;
2070
2071	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2072	if (time_is_after_jiffies(next_reconfig)) {
2073		/* link_watch only sends one notification with current state
2074		 * per second, avoid doing reconfig more frequently. Handle
2075		 * wrap around.
2076		 */
2077		delay = next_reconfig - jiffies;
2078		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
2079		schedule_delayed_work(&ndev_ctx->dwork, delay);
2080		goto out_unlock;
2081	}
2082	ndev_ctx->last_reconfig = jiffies;
2083
2084	spin_lock_irqsave(&ndev_ctx->lock, flags);
2085	if (!list_empty(&ndev_ctx->reconfig_events)) {
2086		event = list_first_entry(&ndev_ctx->reconfig_events,
2087					 struct netvsc_reconfig, list);
2088		list_del(&event->list);
2089		reschedule = !list_empty(&ndev_ctx->reconfig_events);
2090	}
2091	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2092
2093	if (!event)
2094		goto out_unlock;
2095
2096	switch (event->event) {
2097		/* Only the following events are possible due to the check in
2098		 * netvsc_linkstatus_callback()
2099		 */
2100	case RNDIS_STATUS_MEDIA_CONNECT:
2101		if (rdev->link_state) {
2102			rdev->link_state = false;
2103			netif_carrier_on(net);
2104			netvsc_tx_enable(net_device, net);
2105		} else {
2106			__netdev_notify_peers(net);
2107		}
2108		kfree(event);
2109		break;
2110	case RNDIS_STATUS_MEDIA_DISCONNECT:
2111		if (!rdev->link_state) {
2112			rdev->link_state = true;
2113			netif_carrier_off(net);
2114			netvsc_tx_disable(net_device, net);
2115		}
2116		kfree(event);
2117		break;
2118	case RNDIS_STATUS_NETWORK_CHANGE:
2119		/* Only makes sense if carrier is present */
2120		if (!rdev->link_state) {
2121			rdev->link_state = true;
2122			netif_carrier_off(net);
2123			netvsc_tx_disable(net_device, net);
2124			event->event = RNDIS_STATUS_MEDIA_CONNECT;
2125			spin_lock_irqsave(&ndev_ctx->lock, flags);
2126			list_add(&event->list, &ndev_ctx->reconfig_events);
2127			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2128			reschedule = true;
2129		}
2130		break;
2131	}
2132
2133	rtnl_unlock();
2134
 
 
 
2135	/* link_watch only sends one notification with current state per
2136	 * second, handle next reconfig event in 2 seconds.
2137	 */
2138	if (reschedule)
2139		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2140
2141	return;
2142
2143out_unlock:
2144	rtnl_unlock();
2145}
2146
2147static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
2148{
2149	struct net_device_context *net_device_ctx;
2150	struct net_device *dev;
2151
2152	dev = netdev_master_upper_dev_get(vf_netdev);
2153	if (!dev || dev->netdev_ops != &device_ops)
2154		return NULL;	/* not a netvsc device */
2155
2156	net_device_ctx = netdev_priv(dev);
2157	if (!rtnl_dereference(net_device_ctx->nvdev))
2158		return NULL;	/* device is removed */
2159
2160	return dev;
2161}
2162
2163/* Called when VF is injecting data into network stack.
2164 * Change the associated network device from VF to netvsc.
2165 * note: already called with rcu_read_lock
2166 */
2167static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2168{
2169	struct sk_buff *skb = *pskb;
2170	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2171	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2172	struct netvsc_vf_pcpu_stats *pcpu_stats
2173		 = this_cpu_ptr(ndev_ctx->vf_stats);
2174
2175	skb = skb_share_check(skb, GFP_ATOMIC);
2176	if (unlikely(!skb))
2177		return RX_HANDLER_CONSUMED;
2178
2179	*pskb = skb;
2180
2181	skb->dev = ndev;
2182
2183	u64_stats_update_begin(&pcpu_stats->syncp);
2184	pcpu_stats->rx_packets++;
2185	pcpu_stats->rx_bytes += skb->len;
2186	u64_stats_update_end(&pcpu_stats->syncp);
2187
2188	return RX_HANDLER_ANOTHER;
2189}
2190
2191static int netvsc_vf_join(struct net_device *vf_netdev,
2192			  struct net_device *ndev, int context)
2193{
2194	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2195	int ret;
2196
2197	ret = netdev_rx_handler_register(vf_netdev,
2198					 netvsc_vf_handle_frame, ndev);
2199	if (ret != 0) {
2200		netdev_err(vf_netdev,
2201			   "can not register netvsc VF receive handler (err = %d)\n",
2202			   ret);
2203		goto rx_handler_failed;
2204	}
2205
2206	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2207					   NULL, NULL, NULL);
2208	if (ret != 0) {
2209		netdev_err(vf_netdev,
2210			   "can not set master device %s (err = %d)\n",
2211			   ndev->name, ret);
2212		goto upper_link_failed;
2213	}
2214
2215	/* If this registration is called from probe context vf_takeover
2216	 * is taken care of later in probe itself.
2217	 */
2218	if (context == VF_REG_IN_NOTIFIER)
2219		schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2220
2221	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2222
2223	netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2224	return 0;
2225
2226upper_link_failed:
2227	netdev_rx_handler_unregister(vf_netdev);
2228rx_handler_failed:
2229	return ret;
2230}
2231
2232static void __netvsc_vf_setup(struct net_device *ndev,
2233			      struct net_device *vf_netdev)
2234{
2235	int ret;
2236
2237	/* Align MTU of VF with master */
2238	ret = dev_set_mtu(vf_netdev, ndev->mtu);
2239	if (ret)
2240		netdev_warn(vf_netdev,
2241			    "unable to change mtu to %u\n", ndev->mtu);
2242
2243	/* set multicast etc flags on VF */
2244	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2245
2246	/* sync address list from ndev to VF */
2247	netif_addr_lock_bh(ndev);
2248	dev_uc_sync(vf_netdev, ndev);
2249	dev_mc_sync(vf_netdev, ndev);
2250	netif_addr_unlock_bh(ndev);
2251
2252	if (netif_running(ndev)) {
2253		ret = dev_open(vf_netdev, NULL);
2254		if (ret)
2255			netdev_warn(vf_netdev,
2256				    "unable to open: %d\n", ret);
2257	}
2258}
2259
2260/* Setup VF as slave of the synthetic device.
2261 * Runs in workqueue to avoid recursion in netlink callbacks.
2262 */
2263static void netvsc_vf_setup(struct work_struct *w)
2264{
2265	struct net_device_context *ndev_ctx
2266		= container_of(w, struct net_device_context, vf_takeover.work);
2267	struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2268	struct net_device *vf_netdev;
2269
2270	if (!rtnl_trylock()) {
2271		schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2272		return;
2273	}
2274
2275	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2276	if (vf_netdev)
2277		__netvsc_vf_setup(ndev, vf_netdev);
2278
2279	rtnl_unlock();
2280}
2281
2282/* Find netvsc by VF serial number.
2283 * The PCI hyperv controller records the serial number as the slot kobj name.
2284 */
2285static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2286{
2287	struct device *parent = vf_netdev->dev.parent;
2288	struct net_device_context *ndev_ctx;
2289	struct net_device *ndev;
2290	struct pci_dev *pdev;
2291	u32 serial;
2292
2293	if (!parent || !dev_is_pci(parent))
2294		return NULL; /* not a PCI device */
2295
2296	pdev = to_pci_dev(parent);
2297	if (!pdev->slot) {
2298		netdev_notice(vf_netdev, "no PCI slot information\n");
2299		return NULL;
2300	}
2301
2302	if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2303		netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2304			      pci_slot_name(pdev->slot));
2305		return NULL;
2306	}
2307
2308	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2309		if (!ndev_ctx->vf_alloc)
2310			continue;
2311
2312		if (ndev_ctx->vf_serial != serial)
2313			continue;
2314
2315		ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2316		if (ndev->addr_len != vf_netdev->addr_len ||
2317		    memcmp(ndev->perm_addr, vf_netdev->perm_addr,
2318			   ndev->addr_len) != 0)
2319			continue;
2320
2321		return ndev;
2322
2323	}
2324
2325	/* Fallback path to check synthetic vf with help of mac addr.
2326	 * Because this function can be called before vf_netdev is
2327	 * initialized (NETDEV_POST_INIT) when its perm_addr has not been copied
2328	 * from dev_addr, also try to match to its dev_addr.
2329	 * Note: On Hyper-V and Azure, it's not possible to set a MAC address
2330	 * on a VF that matches to the MAC of a unrelated NETVSC device.
2331	 */
2332	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2333		ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2334		if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr) ||
2335		    ether_addr_equal(vf_netdev->dev_addr, ndev->perm_addr))
2336			return ndev;
2337	}
2338
2339	netdev_notice(vf_netdev,
2340		      "no netdev found for vf serial:%u\n", serial);
2341	return NULL;
2342}
2343
2344static int netvsc_prepare_bonding(struct net_device *vf_netdev)
2345{
2346	struct net_device *ndev;
2347
2348	ndev = get_netvsc_byslot(vf_netdev);
2349	if (!ndev)
2350		return NOTIFY_DONE;
2351
2352	/* set slave flag before open to prevent IPv6 addrconf */
2353	vf_netdev->flags |= IFF_SLAVE;
2354	return NOTIFY_DONE;
2355}
2356
2357static int netvsc_register_vf(struct net_device *vf_netdev, int context)
2358{
2359	struct net_device_context *net_device_ctx;
2360	struct netvsc_device *netvsc_dev;
2361	struct bpf_prog *prog;
2362	struct net_device *ndev;
2363	int ret;
2364
2365	if (vf_netdev->addr_len != ETH_ALEN)
2366		return NOTIFY_DONE;
2367
2368	ndev = get_netvsc_byslot(vf_netdev);
2369	if (!ndev)
2370		return NOTIFY_DONE;
2371
2372	net_device_ctx = netdev_priv(ndev);
2373	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2374	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2375		return NOTIFY_DONE;
2376
2377	/* if synthetic interface is a different namespace,
2378	 * then move the VF to that namespace; join will be
2379	 * done again in that context.
2380	 */
2381	if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2382		ret = dev_change_net_namespace(vf_netdev,
2383					       dev_net(ndev), "eth%d");
2384		if (ret)
2385			netdev_err(vf_netdev,
2386				   "could not move to same namespace as %s: %d\n",
2387				   ndev->name, ret);
2388		else
2389			netdev_info(vf_netdev,
2390				    "VF moved to namespace with: %s\n",
2391				    ndev->name);
2392		return NOTIFY_DONE;
2393	}
2394
2395	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2396
2397	if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
2398		return NOTIFY_DONE;
2399
2400	dev_hold(vf_netdev);
2401	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2402
2403	if (ndev->needed_headroom < vf_netdev->needed_headroom)
2404		ndev->needed_headroom = vf_netdev->needed_headroom;
2405
2406	vf_netdev->wanted_features = ndev->features;
2407	netdev_update_features(vf_netdev);
2408
2409	prog = netvsc_xdp_get(netvsc_dev);
2410	netvsc_vf_setxdp(vf_netdev, prog);
2411
2412	return NOTIFY_OK;
2413}
2414
2415/* Change the data path when VF UP/DOWN/CHANGE are detected.
2416 *
2417 * Typically a UP or DOWN event is followed by a CHANGE event, so
2418 * net_device_ctx->data_path_is_vf is used to cache the current data path
2419 * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
2420 * message.
2421 *
2422 * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
2423 * interface, there is only the CHANGE event and no UP or DOWN event.
2424 */
2425static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
2426{
2427	struct net_device_context *net_device_ctx;
2428	struct netvsc_device *netvsc_dev;
2429	struct net_device *ndev;
2430	bool vf_is_up = false;
2431	int ret;
2432
2433	if (event != NETDEV_GOING_DOWN)
2434		vf_is_up = netif_running(vf_netdev);
2435
2436	ndev = get_netvsc_byref(vf_netdev);
2437	if (!ndev)
2438		return NOTIFY_DONE;
2439
2440	net_device_ctx = netdev_priv(ndev);
2441	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2442	if (!netvsc_dev)
2443		return NOTIFY_DONE;
2444
2445	if (net_device_ctx->data_path_is_vf == vf_is_up)
2446		return NOTIFY_OK;
 
2447
2448	if (vf_is_up && !net_device_ctx->vf_alloc) {
2449		netdev_info(ndev, "Waiting for the VF association from host\n");
2450		wait_for_completion(&net_device_ctx->vf_add);
2451	}
2452
2453	ret = netvsc_switch_datapath(ndev, vf_is_up);
2454
2455	if (ret) {
2456		netdev_err(ndev,
2457			   "Data path failed to switch %s VF: %s, err: %d\n",
2458			   vf_is_up ? "to" : "from", vf_netdev->name, ret);
2459		return NOTIFY_DONE;
2460	} else {
2461		netdev_info(ndev, "Data path switched %s VF: %s\n",
2462			    vf_is_up ? "to" : "from", vf_netdev->name);
2463	}
2464
2465	return NOTIFY_OK;
2466}
2467
2468static int netvsc_unregister_vf(struct net_device *vf_netdev)
2469{
2470	struct net_device *ndev;
2471	struct net_device_context *net_device_ctx;
2472
2473	ndev = get_netvsc_byref(vf_netdev);
2474	if (!ndev)
2475		return NOTIFY_DONE;
2476
2477	net_device_ctx = netdev_priv(ndev);
2478	cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2479
2480	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2481
2482	netvsc_vf_setxdp(vf_netdev, NULL);
2483
2484	reinit_completion(&net_device_ctx->vf_add);
2485	netdev_rx_handler_unregister(vf_netdev);
2486	netdev_upper_dev_unlink(vf_netdev, ndev);
2487	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2488	dev_put(vf_netdev);
2489
2490	ndev->needed_headroom = RNDIS_AND_PPI_SIZE;
2491
2492	return NOTIFY_OK;
2493}
2494
2495static int check_dev_is_matching_vf(struct net_device *event_ndev)
2496{
2497	/* Skip NetVSC interfaces */
2498	if (event_ndev->netdev_ops == &device_ops)
2499		return -ENODEV;
2500
2501	/* Avoid non-Ethernet type devices */
2502	if (event_ndev->type != ARPHRD_ETHER)
2503		return -ENODEV;
2504
2505	/* Avoid Vlan dev with same MAC registering as VF */
2506	if (is_vlan_dev(event_ndev))
2507		return -ENODEV;
2508
2509	/* Avoid Bonding master dev with same MAC registering as VF */
2510	if (netif_is_bond_master(event_ndev))
2511		return -ENODEV;
2512
2513	return 0;
2514}
2515
2516static int netvsc_probe(struct hv_device *dev,
2517			const struct hv_vmbus_device_id *dev_id)
2518{
2519	struct net_device *net = NULL, *vf_netdev;
2520	struct net_device_context *net_device_ctx;
2521	struct netvsc_device_info *device_info = NULL;
2522	struct netvsc_device *nvdev;
2523	int ret = -ENOMEM;
2524
2525	net = alloc_etherdev_mq(sizeof(struct net_device_context),
2526				VRSS_CHANNEL_MAX);
2527	if (!net)
2528		goto no_net;
2529
2530	netif_carrier_off(net);
2531
2532	netvsc_init_settings(net);
2533
2534	net_device_ctx = netdev_priv(net);
2535	net_device_ctx->device_ctx = dev;
2536	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2537	if (netif_msg_probe(net_device_ctx))
2538		netdev_dbg(net, "netvsc msg_enable: %d\n",
2539			   net_device_ctx->msg_enable);
2540
2541	hv_set_drvdata(dev, net);
2542
2543	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2544
2545	init_completion(&net_device_ctx->vf_add);
2546	spin_lock_init(&net_device_ctx->lock);
2547	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2548	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2549
2550	net_device_ctx->vf_stats
2551		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2552	if (!net_device_ctx->vf_stats)
2553		goto no_stats;
2554
2555	net->netdev_ops = &device_ops;
2556	net->ethtool_ops = &ethtool_ops;
2557	SET_NETDEV_DEV(net, &dev->device);
2558	dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
2559
2560	/* We always need headroom for rndis header */
2561	net->needed_headroom = RNDIS_AND_PPI_SIZE;
2562
2563	/* Initialize the number of queues to be 1, we may change it if more
2564	 * channels are offered later.
2565	 */
2566	netif_set_real_num_tx_queues(net, 1);
2567	netif_set_real_num_rx_queues(net, 1);
2568
2569	/* Notify the netvsc driver of the new device */
2570	device_info = netvsc_devinfo_get(NULL);
2571
2572	if (!device_info) {
2573		ret = -ENOMEM;
2574		goto devinfo_failed;
2575	}
2576
 
 
 
 
 
 
 
 
 
2577	/* We must get rtnl lock before scheduling nvdev->subchan_work,
2578	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2579	 * all subchannels to show up, but that may not happen because
2580	 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2581	 * -> ... -> device_add() -> ... -> __device_attach() can't get
2582	 * the device lock, so all the subchannels can't be processed --
2583	 * finally netvsc_subchan_work() hangs forever.
2584	 *
2585	 * The rtnl lock also needs to be held before rndis_filter_device_add()
2586	 * which advertises nvsp_2_vsc_capability / sriov bit, and triggers
2587	 * VF NIC offering and registering. If VF NIC finished register_netdev()
2588	 * earlier it may cause name based config failure.
2589	 */
2590	rtnl_lock();
2591
2592	nvdev = rndis_filter_device_add(dev, device_info);
2593	if (IS_ERR(nvdev)) {
2594		ret = PTR_ERR(nvdev);
2595		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2596		goto rndis_failed;
2597	}
2598
2599	eth_hw_addr_set(net, device_info->mac_adr);
2600
2601	if (nvdev->num_chn > 1)
2602		schedule_work(&nvdev->subchan_work);
2603
2604	/* hw_features computed in rndis_netdev_set_hwcaps() */
2605	net->features = net->hw_features |
2606		NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
2607		NETIF_F_HW_VLAN_CTAG_RX;
2608	net->vlan_features = net->features;
2609
2610	netdev_lockdep_set_classes(net);
2611
2612	net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2613			    NETDEV_XDP_ACT_NDO_XMIT;
2614
2615	/* MTU range: 68 - 1500 or 65521 */
2616	net->min_mtu = NETVSC_MTU_MIN;
2617	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2618		net->max_mtu = NETVSC_MTU - ETH_HLEN;
2619	else
2620		net->max_mtu = ETH_DATA_LEN;
2621
2622	nvdev->tx_disable = false;
2623
2624	ret = register_netdevice(net);
2625	if (ret != 0) {
2626		pr_err("Unable to register netdev.\n");
2627		goto register_failed;
2628	}
2629
2630	list_add(&net_device_ctx->list, &netvsc_dev_list);
2631
2632	/* When the hv_netvsc driver is unloaded and reloaded, the
2633	 * NET_DEVICE_REGISTER for the vf device is replayed before probe
2634	 * is complete. This is because register_netdevice_notifier() gets
2635	 * registered before vmbus_driver_register() so that callback func
2636	 * is set before probe and we don't miss events like NETDEV_POST_INIT
2637	 * So, in this section we try to register the matching vf device that
2638	 * is present as a netdevice, knowing that its register call is not
2639	 * processed in the netvsc_netdev_notifier(as probing is progress and
2640	 * get_netvsc_byslot fails).
2641	 */
2642	for_each_netdev(dev_net(net), vf_netdev) {
2643		ret = check_dev_is_matching_vf(vf_netdev);
2644		if (ret != 0)
2645			continue;
2646
2647		if (net != get_netvsc_byslot(vf_netdev))
2648			continue;
2649
2650		netvsc_prepare_bonding(vf_netdev);
2651		netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
2652		__netvsc_vf_setup(net, vf_netdev);
2653		break;
2654	}
2655	rtnl_unlock();
2656
2657	netvsc_devinfo_put(device_info);
2658	return 0;
2659
2660register_failed:
 
2661	rndis_filter_device_remove(dev, nvdev);
2662rndis_failed:
2663	rtnl_unlock();
2664	netvsc_devinfo_put(device_info);
2665devinfo_failed:
2666	free_percpu(net_device_ctx->vf_stats);
2667no_stats:
2668	hv_set_drvdata(dev, NULL);
2669	free_netdev(net);
2670no_net:
2671	return ret;
2672}
2673
2674static void netvsc_remove(struct hv_device *dev)
2675{
2676	struct net_device_context *ndev_ctx;
2677	struct net_device *vf_netdev, *net;
2678	struct netvsc_device *nvdev;
2679
2680	net = hv_get_drvdata(dev);
2681	if (net == NULL) {
2682		dev_err(&dev->device, "No net device to remove\n");
2683		return;
2684	}
2685
2686	ndev_ctx = netdev_priv(net);
2687
2688	cancel_delayed_work_sync(&ndev_ctx->dwork);
2689
2690	rtnl_lock();
2691	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2692	if (nvdev) {
2693		cancel_work_sync(&nvdev->subchan_work);
2694		netvsc_xdp_set(net, NULL, NULL, nvdev);
2695	}
2696
2697	/*
2698	 * Call to the vsc driver to let it know that the device is being
2699	 * removed. Also blocks mtu and channel changes.
2700	 */
2701	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2702	if (vf_netdev)
2703		netvsc_unregister_vf(vf_netdev);
2704
2705	if (nvdev)
2706		rndis_filter_device_remove(dev, nvdev);
2707
2708	unregister_netdevice(net);
2709	list_del(&ndev_ctx->list);
2710
2711	rtnl_unlock();
2712
2713	hv_set_drvdata(dev, NULL);
2714
2715	free_percpu(ndev_ctx->vf_stats);
2716	free_netdev(net);
 
2717}
2718
2719static int netvsc_suspend(struct hv_device *dev)
2720{
2721	struct net_device_context *ndev_ctx;
2722	struct netvsc_device *nvdev;
2723	struct net_device *net;
2724	int ret;
2725
2726	net = hv_get_drvdata(dev);
2727
2728	ndev_ctx = netdev_priv(net);
2729	cancel_delayed_work_sync(&ndev_ctx->dwork);
2730
2731	rtnl_lock();
2732
2733	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2734	if (nvdev == NULL) {
2735		ret = -ENODEV;
2736		goto out;
2737	}
2738
2739	/* Save the current config info */
2740	ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2741	if (!ndev_ctx->saved_netvsc_dev_info) {
2742		ret = -ENOMEM;
2743		goto out;
2744	}
2745	ret = netvsc_detach(net, nvdev);
2746out:
2747	rtnl_unlock();
2748
2749	return ret;
2750}
2751
2752static int netvsc_resume(struct hv_device *dev)
2753{
2754	struct net_device *net = hv_get_drvdata(dev);
2755	struct net_device_context *net_device_ctx;
2756	struct netvsc_device_info *device_info;
2757	int ret;
2758
2759	rtnl_lock();
2760
2761	net_device_ctx = netdev_priv(net);
2762
2763	/* Reset the data path to the netvsc NIC before re-opening the vmbus
2764	 * channel. Later netvsc_netdev_event() will switch the data path to
2765	 * the VF upon the UP or CHANGE event.
2766	 */
2767	net_device_ctx->data_path_is_vf = false;
2768	device_info = net_device_ctx->saved_netvsc_dev_info;
2769
2770	ret = netvsc_attach(net, device_info);
2771
2772	netvsc_devinfo_put(device_info);
2773	net_device_ctx->saved_netvsc_dev_info = NULL;
2774
2775	rtnl_unlock();
2776
2777	return ret;
2778}
2779static const struct hv_vmbus_device_id id_table[] = {
2780	/* Network guid */
2781	{ HV_NIC_GUID, },
2782	{ },
2783};
2784
2785MODULE_DEVICE_TABLE(vmbus, id_table);
2786
2787/* The one and only one */
2788static struct  hv_driver netvsc_drv = {
2789	.name = KBUILD_MODNAME,
2790	.id_table = id_table,
2791	.probe = netvsc_probe,
2792	.remove = netvsc_remove,
2793	.suspend = netvsc_suspend,
2794	.resume = netvsc_resume,
2795	.driver = {
2796		.probe_type = PROBE_FORCE_SYNCHRONOUS,
2797	},
2798};
2799
2800/*
2801 * On Hyper-V, every VF interface is matched with a corresponding
2802 * synthetic interface. The synthetic interface is presented first
2803 * to the guest. When the corresponding VF instance is registered,
2804 * we will take care of switching the data path.
2805 */
2806static int netvsc_netdev_event(struct notifier_block *this,
2807			       unsigned long event, void *ptr)
2808{
2809	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2810	int ret = 0;
2811
2812	ret = check_dev_is_matching_vf(event_dev);
2813	if (ret != 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
2814		return NOTIFY_DONE;
2815
2816	switch (event) {
2817	case NETDEV_POST_INIT:
2818		return netvsc_prepare_bonding(event_dev);
2819	case NETDEV_REGISTER:
2820		return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
2821	case NETDEV_UNREGISTER:
2822		return netvsc_unregister_vf(event_dev);
2823	case NETDEV_UP:
2824	case NETDEV_DOWN:
2825	case NETDEV_CHANGE:
2826	case NETDEV_GOING_DOWN:
2827		return netvsc_vf_changed(event_dev, event);
2828	default:
2829		return NOTIFY_DONE;
2830	}
2831}
2832
2833static struct notifier_block netvsc_netdev_notifier = {
2834	.notifier_call = netvsc_netdev_event,
2835};
2836
2837static void __exit netvsc_drv_exit(void)
2838{
2839	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2840	vmbus_driver_unregister(&netvsc_drv);
2841}
2842
2843static int __init netvsc_drv_init(void)
2844{
2845	int ret;
2846
2847	if (ring_size < RING_SIZE_MIN) {
2848		ring_size = RING_SIZE_MIN;
2849		pr_info("Increased ring_size to %u (min allowed)\n",
2850			ring_size);
2851	}
2852	netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
2853
2854	register_netdevice_notifier(&netvsc_netdev_notifier);
2855
2856	ret = vmbus_driver_register(&netvsc_drv);
2857	if (ret)
2858		goto err_vmbus_reg;
2859
 
2860	return 0;
2861
2862err_vmbus_reg:
2863	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2864	return ret;
2865}
2866
2867MODULE_LICENSE("GPL");
2868MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2869
2870module_init(netvsc_drv_init);
2871module_exit(netvsc_drv_exit);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2009, Microsoft Corporation.
   4 *
   5 * Authors:
   6 *   Haiyang Zhang <haiyangz@microsoft.com>
   7 *   Hank Janssen  <hjanssen@microsoft.com>
   8 */
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/init.h>
  12#include <linux/atomic.h>
 
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/device.h>
  16#include <linux/io.h>
  17#include <linux/delay.h>
  18#include <linux/netdevice.h>
  19#include <linux/inetdevice.h>
  20#include <linux/etherdevice.h>
  21#include <linux/pci.h>
  22#include <linux/skbuff.h>
  23#include <linux/if_vlan.h>
  24#include <linux/in.h>
  25#include <linux/slab.h>
  26#include <linux/rtnetlink.h>
  27#include <linux/netpoll.h>
  28#include <linux/bpf.h>
  29
  30#include <net/arp.h>
  31#include <net/route.h>
  32#include <net/sock.h>
  33#include <net/pkt_sched.h>
  34#include <net/checksum.h>
  35#include <net/ip6_checksum.h>
  36
  37#include "hyperv_net.h"
  38
  39#define RING_SIZE_MIN	64
  40#define RETRY_US_LO	5000
  41#define RETRY_US_HI	10000
  42#define RETRY_MAX	2000	/* >10 sec */
  43
  44#define LINKCHANGE_INT (2 * HZ)
  45#define VF_TAKEOVER_INT (HZ / 10)
  46
 
 
 
 
  47static unsigned int ring_size __ro_after_init = 128;
  48module_param(ring_size, uint, 0444);
  49MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
  50unsigned int netvsc_ring_bytes __ro_after_init;
  51
  52static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  53				NETIF_MSG_LINK | NETIF_MSG_IFUP |
  54				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
  55				NETIF_MSG_TX_ERR;
  56
  57static int debug = -1;
  58module_param(debug, int, 0444);
  59MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  60
  61static LIST_HEAD(netvsc_dev_list);
  62
  63static void netvsc_change_rx_flags(struct net_device *net, int change)
  64{
  65	struct net_device_context *ndev_ctx = netdev_priv(net);
  66	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  67	int inc;
  68
  69	if (!vf_netdev)
  70		return;
  71
  72	if (change & IFF_PROMISC) {
  73		inc = (net->flags & IFF_PROMISC) ? 1 : -1;
  74		dev_set_promiscuity(vf_netdev, inc);
  75	}
  76
  77	if (change & IFF_ALLMULTI) {
  78		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
  79		dev_set_allmulti(vf_netdev, inc);
  80	}
  81}
  82
  83static void netvsc_set_rx_mode(struct net_device *net)
  84{
  85	struct net_device_context *ndev_ctx = netdev_priv(net);
  86	struct net_device *vf_netdev;
  87	struct netvsc_device *nvdev;
  88
  89	rcu_read_lock();
  90	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
  91	if (vf_netdev) {
  92		dev_uc_sync(vf_netdev, net);
  93		dev_mc_sync(vf_netdev, net);
  94	}
  95
  96	nvdev = rcu_dereference(ndev_ctx->nvdev);
  97	if (nvdev)
  98		rndis_filter_update(nvdev);
  99	rcu_read_unlock();
 100}
 101
 102static void netvsc_tx_enable(struct netvsc_device *nvscdev,
 103			     struct net_device *ndev)
 104{
 105	nvscdev->tx_disable = false;
 106	virt_wmb(); /* ensure queue wake up mechanism is on */
 107
 108	netif_tx_wake_all_queues(ndev);
 109}
 110
 111static int netvsc_open(struct net_device *net)
 112{
 113	struct net_device_context *ndev_ctx = netdev_priv(net);
 114	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
 115	struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
 116	struct rndis_device *rdev;
 117	int ret = 0;
 118
 119	netif_carrier_off(net);
 120
 121	/* Open up the device */
 122	ret = rndis_filter_open(nvdev);
 123	if (ret != 0) {
 124		netdev_err(net, "unable to open device (ret %d).\n", ret);
 125		return ret;
 126	}
 127
 128	rdev = nvdev->extension;
 129	if (!rdev->link_state) {
 130		netif_carrier_on(net);
 131		netvsc_tx_enable(nvdev, net);
 132	}
 133
 134	if (vf_netdev) {
 135		/* Setting synthetic device up transparently sets
 136		 * slave as up. If open fails, then slave will be
 137		 * still be offline (and not used).
 138		 */
 139		ret = dev_open(vf_netdev, NULL);
 140		if (ret)
 141			netdev_warn(net,
 142				    "unable to open slave: %s: %d\n",
 143				    vf_netdev->name, ret);
 144	}
 145	return 0;
 146}
 147
 148static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
 149{
 150	unsigned int retry = 0;
 151	int i;
 152
 153	/* Ensure pending bytes in ring are read */
 154	for (;;) {
 155		u32 aread = 0;
 156
 157		for (i = 0; i < nvdev->num_chn; i++) {
 158			struct vmbus_channel *chn
 159				= nvdev->chan_table[i].channel;
 160
 161			if (!chn)
 162				continue;
 163
 164			/* make sure receive not running now */
 165			napi_synchronize(&nvdev->chan_table[i].napi);
 166
 167			aread = hv_get_bytes_to_read(&chn->inbound);
 168			if (aread)
 169				break;
 170
 171			aread = hv_get_bytes_to_read(&chn->outbound);
 172			if (aread)
 173				break;
 174		}
 175
 176		if (aread == 0)
 177			return 0;
 178
 179		if (++retry > RETRY_MAX)
 180			return -ETIMEDOUT;
 181
 182		usleep_range(RETRY_US_LO, RETRY_US_HI);
 183	}
 184}
 185
 186static void netvsc_tx_disable(struct netvsc_device *nvscdev,
 187			      struct net_device *ndev)
 188{
 189	if (nvscdev) {
 190		nvscdev->tx_disable = true;
 191		virt_wmb(); /* ensure txq will not wake up after stop */
 192	}
 193
 194	netif_tx_disable(ndev);
 195}
 196
 197static int netvsc_close(struct net_device *net)
 198{
 199	struct net_device_context *net_device_ctx = netdev_priv(net);
 200	struct net_device *vf_netdev
 201		= rtnl_dereference(net_device_ctx->vf_netdev);
 202	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 203	int ret;
 204
 205	netvsc_tx_disable(nvdev, net);
 206
 207	/* No need to close rndis filter if it is removed already */
 208	if (!nvdev)
 209		return 0;
 210
 211	ret = rndis_filter_close(nvdev);
 212	if (ret != 0) {
 213		netdev_err(net, "unable to close device (ret %d).\n", ret);
 214		return ret;
 215	}
 216
 217	ret = netvsc_wait_until_empty(nvdev);
 218	if (ret)
 219		netdev_err(net, "Ring buffer not empty after closing rndis\n");
 220
 221	if (vf_netdev)
 222		dev_close(vf_netdev);
 223
 224	return ret;
 225}
 226
 227static inline void *init_ppi_data(struct rndis_message *msg,
 228				  u32 ppi_size, u32 pkt_type)
 229{
 230	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
 231	struct rndis_per_packet_info *ppi;
 232
 233	rndis_pkt->data_offset += ppi_size;
 234	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
 235		+ rndis_pkt->per_pkt_info_len;
 236
 237	ppi->size = ppi_size;
 238	ppi->type = pkt_type;
 239	ppi->internal = 0;
 240	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
 241
 242	rndis_pkt->per_pkt_info_len += ppi_size;
 243
 244	return ppi + 1;
 245}
 246
 247/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
 248 * packets. We can use ethtool to change UDP hash level when necessary.
 249 */
 250static inline u32 netvsc_get_hash(
 251	struct sk_buff *skb,
 252	const struct net_device_context *ndc)
 253{
 254	struct flow_keys flow;
 255	u32 hash, pkt_proto = 0;
 256	static u32 hashrnd __read_mostly;
 257
 258	net_get_random_once(&hashrnd, sizeof(hashrnd));
 259
 260	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
 261		return 0;
 262
 263	switch (flow.basic.ip_proto) {
 264	case IPPROTO_TCP:
 265		if (flow.basic.n_proto == htons(ETH_P_IP))
 266			pkt_proto = HV_TCP4_L4HASH;
 267		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
 268			pkt_proto = HV_TCP6_L4HASH;
 269
 270		break;
 271
 272	case IPPROTO_UDP:
 273		if (flow.basic.n_proto == htons(ETH_P_IP))
 274			pkt_proto = HV_UDP4_L4HASH;
 275		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
 276			pkt_proto = HV_UDP6_L4HASH;
 277
 278		break;
 279	}
 280
 281	if (pkt_proto & ndc->l4_hash) {
 282		return skb_get_hash(skb);
 283	} else {
 284		if (flow.basic.n_proto == htons(ETH_P_IP))
 285			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
 286		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
 287			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
 288		else
 289			return 0;
 290
 291		__skb_set_sw_hash(skb, hash, false);
 292	}
 293
 294	return hash;
 295}
 296
 297static inline int netvsc_get_tx_queue(struct net_device *ndev,
 298				      struct sk_buff *skb, int old_idx)
 299{
 300	const struct net_device_context *ndc = netdev_priv(ndev);
 301	struct sock *sk = skb->sk;
 302	int q_idx;
 303
 304	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
 305			      (VRSS_SEND_TAB_SIZE - 1)];
 306
 307	/* If queue index changed record the new value */
 308	if (q_idx != old_idx &&
 309	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
 310		sk_tx_queue_set(sk, q_idx);
 311
 312	return q_idx;
 313}
 314
 315/*
 316 * Select queue for transmit.
 317 *
 318 * If a valid queue has already been assigned, then use that.
 319 * Otherwise compute tx queue based on hash and the send table.
 320 *
 321 * This is basically similar to default (netdev_pick_tx) with the added step
 322 * of using the host send_table when no other queue has been assigned.
 323 *
 324 * TODO support XPS - but get_xps_queue not exported
 325 */
 326static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
 327{
 328	int q_idx = sk_tx_queue_get(skb->sk);
 329
 330	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
 331		/* If forwarding a packet, we use the recorded queue when
 332		 * available for better cache locality.
 333		 */
 334		if (skb_rx_queue_recorded(skb))
 335			q_idx = skb_get_rx_queue(skb);
 336		else
 337			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
 338	}
 339
 340	return q_idx;
 341}
 342
 343static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 344			       struct net_device *sb_dev)
 345{
 346	struct net_device_context *ndc = netdev_priv(ndev);
 347	struct net_device *vf_netdev;
 348	u16 txq;
 349
 350	rcu_read_lock();
 351	vf_netdev = rcu_dereference(ndc->vf_netdev);
 352	if (vf_netdev) {
 353		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
 354
 355		if (vf_ops->ndo_select_queue)
 356			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
 357		else
 358			txq = netdev_pick_tx(vf_netdev, skb, NULL);
 359
 360		/* Record the queue selected by VF so that it can be
 361		 * used for common case where VF has more queues than
 362		 * the synthetic device.
 363		 */
 364		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
 365	} else {
 366		txq = netvsc_pick_tx(ndev, skb);
 367	}
 368	rcu_read_unlock();
 369
 370	while (txq >= ndev->real_num_tx_queues)
 371		txq -= ndev->real_num_tx_queues;
 372
 373	return txq;
 374}
 375
 376static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
 377		       struct hv_page_buffer *pb)
 378{
 379	int j = 0;
 380
 381	/* Deal with compound pages by ignoring unused part
 382	 * of the page.
 383	 */
 384	page += (offset >> PAGE_SHIFT);
 385	offset &= ~PAGE_MASK;
 386
 387	while (len > 0) {
 388		unsigned long bytes;
 389
 390		bytes = PAGE_SIZE - offset;
 391		if (bytes > len)
 392			bytes = len;
 393		pb[j].pfn = page_to_pfn(page);
 394		pb[j].offset = offset;
 395		pb[j].len = bytes;
 396
 397		offset += bytes;
 398		len -= bytes;
 399
 400		if (offset == PAGE_SIZE && len) {
 401			page++;
 402			offset = 0;
 403			j++;
 404		}
 405	}
 406
 407	return j + 1;
 408}
 409
 410static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
 411			   struct hv_netvsc_packet *packet,
 412			   struct hv_page_buffer *pb)
 413{
 414	u32 slots_used = 0;
 415	char *data = skb->data;
 416	int frags = skb_shinfo(skb)->nr_frags;
 417	int i;
 418
 419	/* The packet is laid out thus:
 420	 * 1. hdr: RNDIS header and PPI
 421	 * 2. skb linear data
 422	 * 3. skb fragment data
 423	 */
 424	slots_used += fill_pg_buf(virt_to_page(hdr),
 425				  offset_in_page(hdr),
 426				  len, &pb[slots_used]);
 
 427
 428	packet->rmsg_size = len;
 429	packet->rmsg_pgcnt = slots_used;
 430
 431	slots_used += fill_pg_buf(virt_to_page(data),
 432				offset_in_page(data),
 433				skb_headlen(skb), &pb[slots_used]);
 
 434
 435	for (i = 0; i < frags; i++) {
 436		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 437
 438		slots_used += fill_pg_buf(skb_frag_page(frag),
 439					skb_frag_off(frag),
 440					skb_frag_size(frag), &pb[slots_used]);
 
 441	}
 442	return slots_used;
 443}
 444
 445static int count_skb_frag_slots(struct sk_buff *skb)
 446{
 447	int i, frags = skb_shinfo(skb)->nr_frags;
 448	int pages = 0;
 449
 450	for (i = 0; i < frags; i++) {
 451		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 452		unsigned long size = skb_frag_size(frag);
 453		unsigned long offset = skb_frag_off(frag);
 454
 455		/* Skip unused frames from start of page */
 456		offset &= ~PAGE_MASK;
 457		pages += PFN_UP(offset + size);
 458	}
 459	return pages;
 460}
 461
 462static int netvsc_get_slots(struct sk_buff *skb)
 463{
 464	char *data = skb->data;
 465	unsigned int offset = offset_in_page(data);
 466	unsigned int len = skb_headlen(skb);
 467	int slots;
 468	int frag_slots;
 469
 470	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
 471	frag_slots = count_skb_frag_slots(skb);
 472	return slots + frag_slots;
 473}
 474
 475static u32 net_checksum_info(struct sk_buff *skb)
 476{
 477	if (skb->protocol == htons(ETH_P_IP)) {
 478		struct iphdr *ip = ip_hdr(skb);
 479
 480		if (ip->protocol == IPPROTO_TCP)
 481			return TRANSPORT_INFO_IPV4_TCP;
 482		else if (ip->protocol == IPPROTO_UDP)
 483			return TRANSPORT_INFO_IPV4_UDP;
 484	} else {
 485		struct ipv6hdr *ip6 = ipv6_hdr(skb);
 486
 487		if (ip6->nexthdr == IPPROTO_TCP)
 488			return TRANSPORT_INFO_IPV6_TCP;
 489		else if (ip6->nexthdr == IPPROTO_UDP)
 490			return TRANSPORT_INFO_IPV6_UDP;
 491	}
 492
 493	return TRANSPORT_INFO_NOT_IP;
 494}
 495
 496/* Send skb on the slave VF device. */
 497static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
 498			  struct sk_buff *skb)
 499{
 500	struct net_device_context *ndev_ctx = netdev_priv(net);
 501	unsigned int len = skb->len;
 502	int rc;
 503
 504	skb->dev = vf_netdev;
 505	skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
 506
 507	rc = dev_queue_xmit(skb);
 508	if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
 509		struct netvsc_vf_pcpu_stats *pcpu_stats
 510			= this_cpu_ptr(ndev_ctx->vf_stats);
 511
 512		u64_stats_update_begin(&pcpu_stats->syncp);
 513		pcpu_stats->tx_packets++;
 514		pcpu_stats->tx_bytes += len;
 515		u64_stats_update_end(&pcpu_stats->syncp);
 516	} else {
 517		this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
 518	}
 519
 520	return rc;
 521}
 522
 523static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
 524{
 525	struct net_device_context *net_device_ctx = netdev_priv(net);
 526	struct hv_netvsc_packet *packet = NULL;
 527	int ret;
 528	unsigned int num_data_pgs;
 529	struct rndis_message *rndis_msg;
 530	struct net_device *vf_netdev;
 531	u32 rndis_msg_size;
 532	u32 hash;
 533	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
 534
 535	/* If VF is present and up then redirect packets to it.
 536	 * Skip the VF if it is marked down or has no carrier.
 537	 * If netpoll is in uses, then VF can not be used either.
 538	 */
 539	vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
 540	if (vf_netdev && netif_running(vf_netdev) &&
 541	    netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
 
 542		return netvsc_vf_xmit(net, vf_netdev, skb);
 543
 544	/* We will atmost need two pages to describe the rndis
 545	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
 546	 * of pages in a single packet. If skb is scattered around
 547	 * more pages we try linearizing it.
 548	 */
 549
 550	num_data_pgs = netvsc_get_slots(skb) + 2;
 551
 552	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
 553		++net_device_ctx->eth_stats.tx_scattered;
 554
 555		if (skb_linearize(skb))
 556			goto no_memory;
 557
 558		num_data_pgs = netvsc_get_slots(skb) + 2;
 559		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
 560			++net_device_ctx->eth_stats.tx_too_big;
 561			goto drop;
 562		}
 563	}
 564
 565	/*
 566	 * Place the rndis header in the skb head room and
 567	 * the skb->cb will be used for hv_netvsc_packet
 568	 * structure.
 569	 */
 570	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
 571	if (ret)
 572		goto no_memory;
 573
 574	/* Use the skb control buffer for building up the packet */
 575	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
 576			sizeof_field(struct sk_buff, cb));
 577	packet = (struct hv_netvsc_packet *)skb->cb;
 578
 579	packet->q_idx = skb_get_queue_mapping(skb);
 580
 581	packet->total_data_buflen = skb->len;
 582	packet->total_bytes = skb->len;
 583	packet->total_packets = 1;
 584
 585	rndis_msg = (struct rndis_message *)skb->head;
 586
 587	/* Add the rndis header */
 588	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 589	rndis_msg->msg_len = packet->total_data_buflen;
 590
 591	rndis_msg->msg.pkt = (struct rndis_packet) {
 592		.data_offset = sizeof(struct rndis_packet),
 593		.data_len = packet->total_data_buflen,
 594		.per_pkt_info_offset = sizeof(struct rndis_packet),
 595	};
 596
 597	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 598
 599	hash = skb_get_hash_raw(skb);
 600	if (hash != 0 && net->real_num_tx_queues > 1) {
 601		u32 *hash_info;
 602
 603		rndis_msg_size += NDIS_HASH_PPI_SIZE;
 604		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
 605					  NBL_HASH_VALUE);
 606		*hash_info = hash;
 607	}
 608
 609	/* When using AF_PACKET we need to drop VLAN header from
 610	 * the frame and update the SKB to allow the HOST OS
 611	 * to transmit the 802.1Q packet
 612	 */
 613	if (skb->protocol == htons(ETH_P_8021Q)) {
 614		u16 vlan_tci;
 615
 616		skb_reset_mac_header(skb);
 617		if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
 618			if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
 619				++net_device_ctx->eth_stats.vlan_error;
 620				goto drop;
 621			}
 622
 623			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
 624			/* Update the NDIS header pkt lengths */
 625			packet->total_data_buflen -= VLAN_HLEN;
 626			packet->total_bytes -= VLAN_HLEN;
 627			rndis_msg->msg_len = packet->total_data_buflen;
 628			rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
 629		}
 630	}
 631
 632	if (skb_vlan_tag_present(skb)) {
 633		struct ndis_pkt_8021q_info *vlan;
 634
 635		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
 636		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
 637				     IEEE_8021Q_INFO);
 638
 639		vlan->value = 0;
 640		vlan->vlanid = skb_vlan_tag_get_id(skb);
 641		vlan->cfi = skb_vlan_tag_get_cfi(skb);
 642		vlan->pri = skb_vlan_tag_get_prio(skb);
 643	}
 644
 645	if (skb_is_gso(skb)) {
 646		struct ndis_tcp_lso_info *lso_info;
 647
 648		rndis_msg_size += NDIS_LSO_PPI_SIZE;
 649		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
 650					 TCP_LARGESEND_PKTINFO);
 651
 652		lso_info->value = 0;
 653		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
 654		if (skb->protocol == htons(ETH_P_IP)) {
 655			lso_info->lso_v2_transmit.ip_version =
 656				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
 657			ip_hdr(skb)->tot_len = 0;
 658			ip_hdr(skb)->check = 0;
 659			tcp_hdr(skb)->check =
 660				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 661						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 662		} else {
 663			lso_info->lso_v2_transmit.ip_version =
 664				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
 665			tcp_v6_gso_csum_prep(skb);
 666		}
 667		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
 668		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
 669	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 670		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
 671			struct ndis_tcp_ip_checksum_info *csum_info;
 672
 673			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
 674			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
 675						  TCPIP_CHKSUM_PKTINFO);
 676
 677			csum_info->value = 0;
 678			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
 679
 680			if (skb->protocol == htons(ETH_P_IP)) {
 681				csum_info->transmit.is_ipv4 = 1;
 682
 683				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
 684					csum_info->transmit.tcp_checksum = 1;
 685				else
 686					csum_info->transmit.udp_checksum = 1;
 687			} else {
 688				csum_info->transmit.is_ipv6 = 1;
 689
 690				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 691					csum_info->transmit.tcp_checksum = 1;
 692				else
 693					csum_info->transmit.udp_checksum = 1;
 694			}
 695		} else {
 696			/* Can't do offload of this type of checksum */
 697			if (skb_checksum_help(skb))
 698				goto drop;
 699		}
 700	}
 701
 702	/* Start filling in the page buffers with the rndis hdr */
 703	rndis_msg->msg_len += rndis_msg_size;
 704	packet->total_data_buflen = rndis_msg->msg_len;
 705	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
 706					       skb, packet, pb);
 707
 708	/* timestamp packet in software */
 709	skb_tx_timestamp(skb);
 710
 711	ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
 712	if (likely(ret == 0))
 713		return NETDEV_TX_OK;
 714
 715	if (ret == -EAGAIN) {
 716		++net_device_ctx->eth_stats.tx_busy;
 717		return NETDEV_TX_BUSY;
 718	}
 719
 720	if (ret == -ENOSPC)
 721		++net_device_ctx->eth_stats.tx_no_space;
 722
 723drop:
 724	dev_kfree_skb_any(skb);
 725	net->stats.tx_dropped++;
 726
 727	return NETDEV_TX_OK;
 728
 729no_memory:
 730	++net_device_ctx->eth_stats.tx_no_memory;
 731	goto drop;
 732}
 733
 734static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
 735				     struct net_device *ndev)
 736{
 737	return netvsc_xmit(skb, ndev, false);
 738}
 739
 740/*
 741 * netvsc_linkstatus_callback - Link up/down notification
 742 */
 743void netvsc_linkstatus_callback(struct net_device *net,
 744				struct rndis_message *resp)
 
 745{
 746	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
 747	struct net_device_context *ndev_ctx = netdev_priv(net);
 748	struct netvsc_reconfig *event;
 749	unsigned long flags;
 750
 751	/* Ensure the packet is big enough to access its fields */
 752	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
 753		netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
 754			   resp->msg_len);
 755		return;
 756	}
 757
 
 
 
 758	/* Update the physical link speed when changing to another vSwitch */
 759	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
 760		u32 speed;
 761
 762		speed = *(u32 *)((void *)indicate
 763				 + indicate->status_buf_offset) / 10000;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 764		ndev_ctx->speed = speed;
 765		return;
 766	}
 767
 768	/* Handle these link change statuses below */
 769	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
 770	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
 771	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
 772		return;
 773
 774	if (net->reg_state != NETREG_REGISTERED)
 775		return;
 776
 777	event = kzalloc(sizeof(*event), GFP_ATOMIC);
 778	if (!event)
 779		return;
 780	event->event = indicate->status;
 781
 782	spin_lock_irqsave(&ndev_ctx->lock, flags);
 783	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
 784	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 785
 786	schedule_delayed_work(&ndev_ctx->dwork, 0);
 787}
 788
 789static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
 
 790{
 791	int rc;
 792
 793	skb->queue_mapping = skb_get_rx_queue(skb);
 794	__skb_push(skb, ETH_HLEN);
 795
 796	rc = netvsc_xmit(skb, ndev, true);
 797
 798	if (dev_xmit_complete(rc))
 799		return;
 800
 801	dev_kfree_skb_any(skb);
 802	ndev->stats.tx_dropped++;
 803}
 804
 805static void netvsc_comp_ipcsum(struct sk_buff *skb)
 806{
 807	struct iphdr *iph = (struct iphdr *)skb->data;
 808
 809	iph->check = 0;
 810	iph->check = ip_fast_csum(iph, iph->ihl);
 811}
 812
 813static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 814					     struct netvsc_channel *nvchan,
 815					     struct xdp_buff *xdp)
 816{
 817	struct napi_struct *napi = &nvchan->napi;
 818	const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
 819	const struct ndis_tcp_ip_checksum_info *csum_info =
 820						nvchan->rsc.csum_info;
 821	const u32 *hash_info = nvchan->rsc.hash_info;
 
 822	struct sk_buff *skb;
 823	void *xbuf = xdp->data_hard_start;
 824	int i;
 825
 826	if (xbuf) {
 827		unsigned int hdroom = xdp->data - xdp->data_hard_start;
 828		unsigned int xlen = xdp->data_end - xdp->data;
 829		unsigned int frag_size = xdp->frame_sz;
 830
 831		skb = build_skb(xbuf, frag_size);
 832
 833		if (!skb) {
 834			__free_page(virt_to_page(xbuf));
 835			return NULL;
 836		}
 837
 838		skb_reserve(skb, hdroom);
 839		skb_put(skb, xlen);
 840		skb->dev = napi->dev;
 841	} else {
 842		skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
 843
 844		if (!skb)
 845			return NULL;
 846
 847		/* Copy to skb. This copy is needed here since the memory
 848		 * pointed by hv_netvsc_packet cannot be deallocated.
 849		 */
 850		for (i = 0; i < nvchan->rsc.cnt; i++)
 851			skb_put_data(skb, nvchan->rsc.data[i],
 852				     nvchan->rsc.len[i]);
 853	}
 854
 855	skb->protocol = eth_type_trans(skb, net);
 856
 857	/* skb is already created with CHECKSUM_NONE */
 858	skb_checksum_none_assert(skb);
 859
 860	/* Incoming packets may have IP header checksum verified by the host.
 861	 * They may not have IP header checksum computed after coalescing.
 862	 * We compute it here if the flags are set, because on Linux, the IP
 863	 * checksum is always checked.
 864	 */
 865	if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
 866	    csum_info->receive.ip_checksum_succeeded &&
 867	    skb->protocol == htons(ETH_P_IP))
 
 
 
 
 
 868		netvsc_comp_ipcsum(skb);
 
 869
 870	/* Do L4 checksum offload if enabled and present. */
 871	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
 872		if (csum_info->receive.tcp_checksum_succeeded ||
 873		    csum_info->receive.udp_checksum_succeeded)
 874			skb->ip_summed = CHECKSUM_UNNECESSARY;
 875	}
 876
 877	if (hash_info && (net->features & NETIF_F_RXHASH))
 878		skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
 879
 880	if (vlan) {
 881		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
 882			(vlan->cfi ? VLAN_CFI_MASK : 0);
 883
 884		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 885				       vlan_tci);
 886	}
 887
 888	return skb;
 889}
 890
 891/*
 892 * netvsc_recv_callback -  Callback when we receive a packet from the
 893 * "wire" on the specified device.
 894 */
 895int netvsc_recv_callback(struct net_device *net,
 896			 struct netvsc_device *net_device,
 897			 struct netvsc_channel *nvchan)
 898{
 899	struct net_device_context *net_device_ctx = netdev_priv(net);
 900	struct vmbus_channel *channel = nvchan->channel;
 901	u16 q_idx = channel->offermsg.offer.sub_channel_index;
 902	struct sk_buff *skb;
 903	struct netvsc_stats *rx_stats = &nvchan->rx_stats;
 904	struct xdp_buff xdp;
 905	u32 act;
 906
 907	if (net->reg_state != NETREG_REGISTERED)
 908		return NVSP_STAT_FAIL;
 909
 910	act = netvsc_run_xdp(net, nvchan, &xdp);
 911
 
 
 
 912	if (act != XDP_PASS && act != XDP_TX) {
 913		u64_stats_update_begin(&rx_stats->syncp);
 914		rx_stats->xdp_drop++;
 915		u64_stats_update_end(&rx_stats->syncp);
 916
 917		return NVSP_STAT_SUCCESS; /* consumed by XDP */
 918	}
 919
 920	/* Allocate a skb - TODO direct I/O to pages? */
 921	skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
 922
 923	if (unlikely(!skb)) {
 924		++net_device_ctx->eth_stats.rx_no_memory;
 925		return NVSP_STAT_FAIL;
 926	}
 927
 928	skb_record_rx_queue(skb, q_idx);
 929
 930	/*
 931	 * Even if injecting the packet, record the statistics
 932	 * on the synthetic device because modifying the VF device
 933	 * statistics will not work correctly.
 934	 */
 935	u64_stats_update_begin(&rx_stats->syncp);
 
 
 
 936	rx_stats->packets++;
 937	rx_stats->bytes += nvchan->rsc.pktlen;
 938
 939	if (skb->pkt_type == PACKET_BROADCAST)
 940		++rx_stats->broadcast;
 941	else if (skb->pkt_type == PACKET_MULTICAST)
 942		++rx_stats->multicast;
 943	u64_stats_update_end(&rx_stats->syncp);
 944
 945	if (act == XDP_TX) {
 946		netvsc_xdp_xmit(skb, net);
 947		return NVSP_STAT_SUCCESS;
 948	}
 949
 950	napi_gro_receive(&nvchan->napi, skb);
 951	return NVSP_STAT_SUCCESS;
 952}
 953
 954static void netvsc_get_drvinfo(struct net_device *net,
 955			       struct ethtool_drvinfo *info)
 956{
 957	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 958	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
 959}
 960
 961static void netvsc_get_channels(struct net_device *net,
 962				struct ethtool_channels *channel)
 963{
 964	struct net_device_context *net_device_ctx = netdev_priv(net);
 965	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
 966
 967	if (nvdev) {
 968		channel->max_combined	= nvdev->max_chn;
 969		channel->combined_count = nvdev->num_chn;
 970	}
 971}
 972
 973/* Alloc struct netvsc_device_info, and initialize it from either existing
 974 * struct netvsc_device, or from default values.
 975 */
 976static
 977struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
 978{
 979	struct netvsc_device_info *dev_info;
 980	struct bpf_prog *prog;
 981
 982	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
 983
 984	if (!dev_info)
 985		return NULL;
 986
 987	if (nvdev) {
 988		ASSERT_RTNL();
 989
 990		dev_info->num_chn = nvdev->num_chn;
 991		dev_info->send_sections = nvdev->send_section_cnt;
 992		dev_info->send_section_size = nvdev->send_section_size;
 993		dev_info->recv_sections = nvdev->recv_section_cnt;
 994		dev_info->recv_section_size = nvdev->recv_section_size;
 995
 996		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
 997		       NETVSC_HASH_KEYLEN);
 998
 999		prog = netvsc_xdp_get(nvdev);
1000		if (prog) {
1001			bpf_prog_inc(prog);
1002			dev_info->bprog = prog;
1003		}
1004	} else {
1005		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
1006		dev_info->send_sections = NETVSC_DEFAULT_TX;
1007		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
1008		dev_info->recv_sections = NETVSC_DEFAULT_RX;
1009		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
1010	}
1011
1012	return dev_info;
1013}
1014
1015/* Free struct netvsc_device_info */
1016static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
1017{
1018	if (dev_info->bprog) {
1019		ASSERT_RTNL();
1020		bpf_prog_put(dev_info->bprog);
1021	}
1022
1023	kfree(dev_info);
1024}
1025
1026static int netvsc_detach(struct net_device *ndev,
1027			 struct netvsc_device *nvdev)
1028{
1029	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1030	struct hv_device *hdev = ndev_ctx->device_ctx;
1031	int ret;
1032
1033	/* Don't try continuing to try and setup sub channels */
1034	if (cancel_work_sync(&nvdev->subchan_work))
1035		nvdev->num_chn = 1;
1036
1037	netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1038
1039	/* If device was up (receiving) then shutdown */
1040	if (netif_running(ndev)) {
1041		netvsc_tx_disable(nvdev, ndev);
1042
1043		ret = rndis_filter_close(nvdev);
1044		if (ret) {
1045			netdev_err(ndev,
1046				   "unable to close device (ret %d).\n", ret);
1047			return ret;
1048		}
1049
1050		ret = netvsc_wait_until_empty(nvdev);
1051		if (ret) {
1052			netdev_err(ndev,
1053				   "Ring buffer not empty after closing rndis\n");
1054			return ret;
1055		}
1056	}
1057
1058	netif_device_detach(ndev);
1059
1060	rndis_filter_device_remove(hdev, nvdev);
1061
1062	return 0;
1063}
1064
1065static int netvsc_attach(struct net_device *ndev,
1066			 struct netvsc_device_info *dev_info)
1067{
1068	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1069	struct hv_device *hdev = ndev_ctx->device_ctx;
1070	struct netvsc_device *nvdev;
1071	struct rndis_device *rdev;
1072	struct bpf_prog *prog;
1073	int ret = 0;
1074
1075	nvdev = rndis_filter_device_add(hdev, dev_info);
1076	if (IS_ERR(nvdev))
1077		return PTR_ERR(nvdev);
1078
1079	if (nvdev->num_chn > 1) {
1080		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1081
1082		/* if unavailable, just proceed with one queue */
1083		if (ret) {
1084			nvdev->max_chn = 1;
1085			nvdev->num_chn = 1;
1086		}
1087	}
1088
1089	prog = dev_info->bprog;
1090	if (prog) {
1091		bpf_prog_inc(prog);
1092		ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1093		if (ret) {
1094			bpf_prog_put(prog);
1095			goto err1;
1096		}
1097	}
1098
1099	/* In any case device is now ready */
1100	nvdev->tx_disable = false;
1101	netif_device_attach(ndev);
1102
1103	/* Note: enable and attach happen when sub-channels setup */
1104	netif_carrier_off(ndev);
1105
1106	if (netif_running(ndev)) {
1107		ret = rndis_filter_open(nvdev);
1108		if (ret)
1109			goto err2;
1110
1111		rdev = nvdev->extension;
1112		if (!rdev->link_state)
1113			netif_carrier_on(ndev);
1114	}
1115
1116	return 0;
1117
1118err2:
1119	netif_device_detach(ndev);
1120
1121err1:
1122	rndis_filter_device_remove(hdev, nvdev);
1123
1124	return ret;
1125}
1126
1127static int netvsc_set_channels(struct net_device *net,
1128			       struct ethtool_channels *channels)
1129{
1130	struct net_device_context *net_device_ctx = netdev_priv(net);
1131	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1132	unsigned int orig, count = channels->combined_count;
1133	struct netvsc_device_info *device_info;
1134	int ret;
1135
1136	/* We do not support separate count for rx, tx, or other */
1137	if (count == 0 ||
1138	    channels->rx_count || channels->tx_count || channels->other_count)
1139		return -EINVAL;
1140
1141	if (!nvdev || nvdev->destroy)
1142		return -ENODEV;
1143
1144	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1145		return -EINVAL;
1146
1147	if (count > nvdev->max_chn)
1148		return -EINVAL;
1149
1150	orig = nvdev->num_chn;
1151
1152	device_info = netvsc_devinfo_get(nvdev);
1153
1154	if (!device_info)
1155		return -ENOMEM;
1156
1157	device_info->num_chn = count;
1158
1159	ret = netvsc_detach(net, nvdev);
1160	if (ret)
1161		goto out;
1162
1163	ret = netvsc_attach(net, device_info);
1164	if (ret) {
1165		device_info->num_chn = orig;
1166		if (netvsc_attach(net, device_info))
1167			netdev_err(net, "restoring channel setting failed\n");
1168	}
1169
1170out:
1171	netvsc_devinfo_put(device_info);
1172	return ret;
1173}
1174
1175static void netvsc_init_settings(struct net_device *dev)
1176{
1177	struct net_device_context *ndc = netdev_priv(dev);
1178
1179	ndc->l4_hash = HV_DEFAULT_L4HASH;
1180
1181	ndc->speed = SPEED_UNKNOWN;
1182	ndc->duplex = DUPLEX_FULL;
1183
1184	dev->features = NETIF_F_LRO;
1185}
1186
1187static int netvsc_get_link_ksettings(struct net_device *dev,
1188				     struct ethtool_link_ksettings *cmd)
1189{
1190	struct net_device_context *ndc = netdev_priv(dev);
1191	struct net_device *vf_netdev;
1192
1193	vf_netdev = rtnl_dereference(ndc->vf_netdev);
1194
1195	if (vf_netdev)
1196		return __ethtool_get_link_ksettings(vf_netdev, cmd);
1197
1198	cmd->base.speed = ndc->speed;
1199	cmd->base.duplex = ndc->duplex;
1200	cmd->base.port = PORT_OTHER;
1201
1202	return 0;
1203}
1204
1205static int netvsc_set_link_ksettings(struct net_device *dev,
1206				     const struct ethtool_link_ksettings *cmd)
1207{
1208	struct net_device_context *ndc = netdev_priv(dev);
1209	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1210
1211	if (vf_netdev) {
1212		if (!vf_netdev->ethtool_ops->set_link_ksettings)
1213			return -EOPNOTSUPP;
1214
1215		return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
1216								  cmd);
1217	}
1218
1219	return ethtool_virtdev_set_link_ksettings(dev, cmd,
1220						  &ndc->speed, &ndc->duplex);
1221}
1222
1223static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1224{
1225	struct net_device_context *ndevctx = netdev_priv(ndev);
1226	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1227	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1228	int orig_mtu = ndev->mtu;
1229	struct netvsc_device_info *device_info;
1230	int ret = 0;
1231
1232	if (!nvdev || nvdev->destroy)
1233		return -ENODEV;
1234
1235	device_info = netvsc_devinfo_get(nvdev);
1236
1237	if (!device_info)
1238		return -ENOMEM;
1239
1240	/* Change MTU of underlying VF netdev first. */
1241	if (vf_netdev) {
1242		ret = dev_set_mtu(vf_netdev, mtu);
1243		if (ret)
1244			goto out;
1245	}
1246
1247	ret = netvsc_detach(ndev, nvdev);
1248	if (ret)
1249		goto rollback_vf;
1250
1251	ndev->mtu = mtu;
1252
1253	ret = netvsc_attach(ndev, device_info);
1254	if (!ret)
1255		goto out;
1256
1257	/* Attempt rollback to original MTU */
1258	ndev->mtu = orig_mtu;
1259
1260	if (netvsc_attach(ndev, device_info))
1261		netdev_err(ndev, "restoring mtu failed\n");
1262rollback_vf:
1263	if (vf_netdev)
1264		dev_set_mtu(vf_netdev, orig_mtu);
1265
1266out:
1267	netvsc_devinfo_put(device_info);
1268	return ret;
1269}
1270
1271static void netvsc_get_vf_stats(struct net_device *net,
1272				struct netvsc_vf_pcpu_stats *tot)
1273{
1274	struct net_device_context *ndev_ctx = netdev_priv(net);
1275	int i;
1276
1277	memset(tot, 0, sizeof(*tot));
1278
1279	for_each_possible_cpu(i) {
1280		const struct netvsc_vf_pcpu_stats *stats
1281			= per_cpu_ptr(ndev_ctx->vf_stats, i);
1282		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1283		unsigned int start;
1284
1285		do {
1286			start = u64_stats_fetch_begin_irq(&stats->syncp);
1287			rx_packets = stats->rx_packets;
1288			tx_packets = stats->tx_packets;
1289			rx_bytes = stats->rx_bytes;
1290			tx_bytes = stats->tx_bytes;
1291		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1292
1293		tot->rx_packets += rx_packets;
1294		tot->tx_packets += tx_packets;
1295		tot->rx_bytes   += rx_bytes;
1296		tot->tx_bytes   += tx_bytes;
1297		tot->tx_dropped += stats->tx_dropped;
1298	}
1299}
1300
1301static void netvsc_get_pcpu_stats(struct net_device *net,
1302				  struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1303{
1304	struct net_device_context *ndev_ctx = netdev_priv(net);
1305	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1306	int i;
1307
1308	/* fetch percpu stats of vf */
1309	for_each_possible_cpu(i) {
1310		const struct netvsc_vf_pcpu_stats *stats =
1311			per_cpu_ptr(ndev_ctx->vf_stats, i);
1312		struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1313		unsigned int start;
1314
1315		do {
1316			start = u64_stats_fetch_begin_irq(&stats->syncp);
1317			this_tot->vf_rx_packets = stats->rx_packets;
1318			this_tot->vf_tx_packets = stats->tx_packets;
1319			this_tot->vf_rx_bytes = stats->rx_bytes;
1320			this_tot->vf_tx_bytes = stats->tx_bytes;
1321		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1322		this_tot->rx_packets = this_tot->vf_rx_packets;
1323		this_tot->tx_packets = this_tot->vf_tx_packets;
1324		this_tot->rx_bytes   = this_tot->vf_rx_bytes;
1325		this_tot->tx_bytes   = this_tot->vf_tx_bytes;
1326	}
1327
1328	/* fetch percpu stats of netvsc */
1329	for (i = 0; i < nvdev->num_chn; i++) {
1330		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1331		const struct netvsc_stats *stats;
 
1332		struct netvsc_ethtool_pcpu_stats *this_tot =
1333			&pcpu_tot[nvchan->channel->target_cpu];
1334		u64 packets, bytes;
1335		unsigned int start;
1336
1337		stats = &nvchan->tx_stats;
1338		do {
1339			start = u64_stats_fetch_begin_irq(&stats->syncp);
1340			packets = stats->packets;
1341			bytes = stats->bytes;
1342		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1343
1344		this_tot->tx_bytes	+= bytes;
1345		this_tot->tx_packets	+= packets;
1346
1347		stats = &nvchan->rx_stats;
1348		do {
1349			start = u64_stats_fetch_begin_irq(&stats->syncp);
1350			packets = stats->packets;
1351			bytes = stats->bytes;
1352		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1353
1354		this_tot->rx_bytes	+= bytes;
1355		this_tot->rx_packets	+= packets;
1356	}
1357}
1358
1359static void netvsc_get_stats64(struct net_device *net,
1360			       struct rtnl_link_stats64 *t)
1361{
1362	struct net_device_context *ndev_ctx = netdev_priv(net);
1363	struct netvsc_device *nvdev;
1364	struct netvsc_vf_pcpu_stats vf_tot;
1365	int i;
1366
1367	rcu_read_lock();
1368
1369	nvdev = rcu_dereference(ndev_ctx->nvdev);
1370	if (!nvdev)
1371		goto out;
1372
1373	netdev_stats_to_stats64(t, &net->stats);
1374
1375	netvsc_get_vf_stats(net, &vf_tot);
1376	t->rx_packets += vf_tot.rx_packets;
1377	t->tx_packets += vf_tot.tx_packets;
1378	t->rx_bytes   += vf_tot.rx_bytes;
1379	t->tx_bytes   += vf_tot.tx_bytes;
1380	t->tx_dropped += vf_tot.tx_dropped;
1381
1382	for (i = 0; i < nvdev->num_chn; i++) {
1383		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1384		const struct netvsc_stats *stats;
 
1385		u64 packets, bytes, multicast;
1386		unsigned int start;
1387
1388		stats = &nvchan->tx_stats;
1389		do {
1390			start = u64_stats_fetch_begin_irq(&stats->syncp);
1391			packets = stats->packets;
1392			bytes = stats->bytes;
1393		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1394
1395		t->tx_bytes	+= bytes;
1396		t->tx_packets	+= packets;
1397
1398		stats = &nvchan->rx_stats;
1399		do {
1400			start = u64_stats_fetch_begin_irq(&stats->syncp);
1401			packets = stats->packets;
1402			bytes = stats->bytes;
1403			multicast = stats->multicast + stats->broadcast;
1404		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1405
1406		t->rx_bytes	+= bytes;
1407		t->rx_packets	+= packets;
1408		t->multicast	+= multicast;
1409	}
1410out:
1411	rcu_read_unlock();
1412}
1413
1414static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1415{
1416	struct net_device_context *ndc = netdev_priv(ndev);
1417	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1418	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1419	struct sockaddr *addr = p;
1420	int err;
1421
1422	err = eth_prepare_mac_addr_change(ndev, p);
1423	if (err)
1424		return err;
1425
1426	if (!nvdev)
1427		return -ENODEV;
1428
1429	if (vf_netdev) {
1430		err = dev_set_mac_address(vf_netdev, addr, NULL);
1431		if (err)
1432			return err;
1433	}
1434
1435	err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1436	if (!err) {
1437		eth_commit_mac_addr_change(ndev, p);
1438	} else if (vf_netdev) {
1439		/* rollback change on VF */
1440		memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1441		dev_set_mac_address(vf_netdev, addr, NULL);
1442	}
1443
1444	return err;
1445}
1446
1447static const struct {
1448	char name[ETH_GSTRING_LEN];
1449	u16 offset;
1450} netvsc_stats[] = {
1451	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1452	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1453	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1454	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1455	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
1456	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1457	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1458	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1459	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1460	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1461	{ "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
1462}, pcpu_stats[] = {
1463	{ "cpu%u_rx_packets",
1464		offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1465	{ "cpu%u_rx_bytes",
1466		offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1467	{ "cpu%u_tx_packets",
1468		offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1469	{ "cpu%u_tx_bytes",
1470		offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1471	{ "cpu%u_vf_rx_packets",
1472		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1473	{ "cpu%u_vf_rx_bytes",
1474		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1475	{ "cpu%u_vf_tx_packets",
1476		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1477	{ "cpu%u_vf_tx_bytes",
1478		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1479}, vf_stats[] = {
1480	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1481	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1482	{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1483	{ "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1484	{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1485};
1486
1487#define NETVSC_GLOBAL_STATS_LEN	ARRAY_SIZE(netvsc_stats)
1488#define NETVSC_VF_STATS_LEN	ARRAY_SIZE(vf_stats)
1489
1490/* statistics per queue (rx/tx packets/bytes) */
1491#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1492
1493/* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
1494#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
1495
1496static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1497{
1498	struct net_device_context *ndc = netdev_priv(dev);
1499	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1500
1501	if (!nvdev)
1502		return -ENODEV;
1503
1504	switch (string_set) {
1505	case ETH_SS_STATS:
1506		return NETVSC_GLOBAL_STATS_LEN
1507			+ NETVSC_VF_STATS_LEN
1508			+ NETVSC_QUEUE_STATS_LEN(nvdev)
1509			+ NETVSC_PCPU_STATS_LEN;
1510	default:
1511		return -EINVAL;
1512	}
1513}
1514
1515static void netvsc_get_ethtool_stats(struct net_device *dev,
1516				     struct ethtool_stats *stats, u64 *data)
1517{
1518	struct net_device_context *ndc = netdev_priv(dev);
1519	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1520	const void *nds = &ndc->eth_stats;
1521	const struct netvsc_stats *qstats;
 
1522	struct netvsc_vf_pcpu_stats sum;
1523	struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1524	unsigned int start;
1525	u64 packets, bytes;
1526	u64 xdp_drop;
 
 
 
1527	int i, j, cpu;
1528
1529	if (!nvdev)
1530		return;
1531
1532	for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1533		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1534
1535	netvsc_get_vf_stats(dev, &sum);
1536	for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1537		data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1538
1539	for (j = 0; j < nvdev->num_chn; j++) {
1540		qstats = &nvdev->chan_table[j].tx_stats;
1541
1542		do {
1543			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1544			packets = qstats->packets;
1545			bytes = qstats->bytes;
1546		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
 
1547		data[i++] = packets;
1548		data[i++] = bytes;
 
1549
1550		qstats = &nvdev->chan_table[j].rx_stats;
1551		do {
1552			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1553			packets = qstats->packets;
1554			bytes = qstats->bytes;
1555			xdp_drop = qstats->xdp_drop;
1556		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
 
 
1557		data[i++] = packets;
1558		data[i++] = bytes;
1559		data[i++] = xdp_drop;
 
 
1560	}
1561
1562	pcpu_sum = kvmalloc_array(num_possible_cpus(),
1563				  sizeof(struct netvsc_ethtool_pcpu_stats),
1564				  GFP_KERNEL);
 
 
 
1565	netvsc_get_pcpu_stats(dev, pcpu_sum);
1566	for_each_present_cpu(cpu) {
1567		struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1568
1569		for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1570			data[i++] = *(u64 *)((void *)this_sum
1571					     + pcpu_stats[j].offset);
1572	}
1573	kvfree(pcpu_sum);
1574}
1575
1576static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1577{
1578	struct net_device_context *ndc = netdev_priv(dev);
1579	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1580	u8 *p = data;
1581	int i, cpu;
1582
1583	if (!nvdev)
1584		return;
1585
1586	switch (stringset) {
1587	case ETH_SS_STATS:
1588		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1589			memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1590			p += ETH_GSTRING_LEN;
1591		}
1592
1593		for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1594			memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1595			p += ETH_GSTRING_LEN;
1596		}
1597
1598		for (i = 0; i < nvdev->num_chn; i++) {
1599			sprintf(p, "tx_queue_%u_packets", i);
1600			p += ETH_GSTRING_LEN;
1601			sprintf(p, "tx_queue_%u_bytes", i);
1602			p += ETH_GSTRING_LEN;
1603			sprintf(p, "rx_queue_%u_packets", i);
1604			p += ETH_GSTRING_LEN;
1605			sprintf(p, "rx_queue_%u_bytes", i);
1606			p += ETH_GSTRING_LEN;
1607			sprintf(p, "rx_queue_%u_xdp_drop", i);
1608			p += ETH_GSTRING_LEN;
1609		}
1610
1611		for_each_present_cpu(cpu) {
1612			for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1613				sprintf(p, pcpu_stats[i].name, cpu);
1614				p += ETH_GSTRING_LEN;
1615			}
1616		}
1617
1618		break;
1619	}
1620}
1621
1622static int
1623netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1624			 struct ethtool_rxnfc *info)
1625{
1626	const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1627
1628	info->data = RXH_IP_SRC | RXH_IP_DST;
1629
1630	switch (info->flow_type) {
1631	case TCP_V4_FLOW:
1632		if (ndc->l4_hash & HV_TCP4_L4HASH)
1633			info->data |= l4_flag;
1634
1635		break;
1636
1637	case TCP_V6_FLOW:
1638		if (ndc->l4_hash & HV_TCP6_L4HASH)
1639			info->data |= l4_flag;
1640
1641		break;
1642
1643	case UDP_V4_FLOW:
1644		if (ndc->l4_hash & HV_UDP4_L4HASH)
1645			info->data |= l4_flag;
1646
1647		break;
1648
1649	case UDP_V6_FLOW:
1650		if (ndc->l4_hash & HV_UDP6_L4HASH)
1651			info->data |= l4_flag;
1652
1653		break;
1654
1655	case IPV4_FLOW:
1656	case IPV6_FLOW:
1657		break;
1658	default:
1659		info->data = 0;
1660		break;
1661	}
1662
1663	return 0;
1664}
1665
1666static int
1667netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1668		 u32 *rules)
1669{
1670	struct net_device_context *ndc = netdev_priv(dev);
1671	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1672
1673	if (!nvdev)
1674		return -ENODEV;
1675
1676	switch (info->cmd) {
1677	case ETHTOOL_GRXRINGS:
1678		info->data = nvdev->num_chn;
1679		return 0;
1680
1681	case ETHTOOL_GRXFH:
1682		return netvsc_get_rss_hash_opts(ndc, info);
1683	}
1684	return -EOPNOTSUPP;
1685}
1686
1687static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1688				    struct ethtool_rxnfc *info)
1689{
1690	if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1691			   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1692		switch (info->flow_type) {
1693		case TCP_V4_FLOW:
1694			ndc->l4_hash |= HV_TCP4_L4HASH;
1695			break;
1696
1697		case TCP_V6_FLOW:
1698			ndc->l4_hash |= HV_TCP6_L4HASH;
1699			break;
1700
1701		case UDP_V4_FLOW:
1702			ndc->l4_hash |= HV_UDP4_L4HASH;
1703			break;
1704
1705		case UDP_V6_FLOW:
1706			ndc->l4_hash |= HV_UDP6_L4HASH;
1707			break;
1708
1709		default:
1710			return -EOPNOTSUPP;
1711		}
1712
1713		return 0;
1714	}
1715
1716	if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1717		switch (info->flow_type) {
1718		case TCP_V4_FLOW:
1719			ndc->l4_hash &= ~HV_TCP4_L4HASH;
1720			break;
1721
1722		case TCP_V6_FLOW:
1723			ndc->l4_hash &= ~HV_TCP6_L4HASH;
1724			break;
1725
1726		case UDP_V4_FLOW:
1727			ndc->l4_hash &= ~HV_UDP4_L4HASH;
1728			break;
1729
1730		case UDP_V6_FLOW:
1731			ndc->l4_hash &= ~HV_UDP6_L4HASH;
1732			break;
1733
1734		default:
1735			return -EOPNOTSUPP;
1736		}
1737
1738		return 0;
1739	}
1740
1741	return -EOPNOTSUPP;
1742}
1743
1744static int
1745netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1746{
1747	struct net_device_context *ndc = netdev_priv(ndev);
1748
1749	if (info->cmd == ETHTOOL_SRXFH)
1750		return netvsc_set_rss_hash_opts(ndc, info);
1751
1752	return -EOPNOTSUPP;
1753}
1754
1755static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1756{
1757	return NETVSC_HASH_KEYLEN;
1758}
1759
1760static u32 netvsc_rss_indir_size(struct net_device *dev)
1761{
1762	return ITAB_NUM;
 
 
1763}
1764
1765static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1766			   u8 *hfunc)
1767{
1768	struct net_device_context *ndc = netdev_priv(dev);
1769	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1770	struct rndis_device *rndis_dev;
1771	int i;
1772
1773	if (!ndev)
1774		return -ENODEV;
1775
1776	if (hfunc)
1777		*hfunc = ETH_RSS_HASH_TOP;	/* Toeplitz */
1778
1779	rndis_dev = ndev->extension;
1780	if (indir) {
1781		for (i = 0; i < ITAB_NUM; i++)
1782			indir[i] = ndc->rx_table[i];
1783	}
1784
1785	if (key)
1786		memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1787
1788	return 0;
1789}
1790
1791static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1792			   const u8 *key, const u8 hfunc)
 
1793{
1794	struct net_device_context *ndc = netdev_priv(dev);
1795	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1796	struct rndis_device *rndis_dev;
 
1797	int i;
1798
1799	if (!ndev)
1800		return -ENODEV;
1801
1802	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 
1803		return -EOPNOTSUPP;
1804
1805	rndis_dev = ndev->extension;
1806	if (indir) {
1807		for (i = 0; i < ITAB_NUM; i++)
1808			if (indir[i] >= ndev->num_chn)
1809				return -EINVAL;
1810
1811		for (i = 0; i < ITAB_NUM; i++)
1812			ndc->rx_table[i] = indir[i];
1813	}
1814
1815	if (!key) {
1816		if (!indir)
1817			return 0;
1818
1819		key = rndis_dev->rss_key;
1820	}
1821
1822	return rndis_filter_set_rss_param(rndis_dev, key);
1823}
1824
1825/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1826 * It does have pre-allocated receive area which is divided into sections.
1827 */
1828static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1829				   struct ethtool_ringparam *ring)
1830{
1831	u32 max_buf_size;
1832
1833	ring->rx_pending = nvdev->recv_section_cnt;
1834	ring->tx_pending = nvdev->send_section_cnt;
1835
1836	if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1837		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1838	else
1839		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1840
1841	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1842	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1843		/ nvdev->send_section_size;
1844}
1845
1846static void netvsc_get_ringparam(struct net_device *ndev,
1847				 struct ethtool_ringparam *ring)
 
 
1848{
1849	struct net_device_context *ndevctx = netdev_priv(ndev);
1850	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1851
1852	if (!nvdev)
1853		return;
1854
1855	__netvsc_get_ringparam(nvdev, ring);
1856}
1857
1858static int netvsc_set_ringparam(struct net_device *ndev,
1859				struct ethtool_ringparam *ring)
 
 
1860{
1861	struct net_device_context *ndevctx = netdev_priv(ndev);
1862	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1863	struct netvsc_device_info *device_info;
1864	struct ethtool_ringparam orig;
1865	u32 new_tx, new_rx;
1866	int ret = 0;
1867
1868	if (!nvdev || nvdev->destroy)
1869		return -ENODEV;
1870
1871	memset(&orig, 0, sizeof(orig));
1872	__netvsc_get_ringparam(nvdev, &orig);
1873
1874	new_tx = clamp_t(u32, ring->tx_pending,
1875			 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1876	new_rx = clamp_t(u32, ring->rx_pending,
1877			 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1878
1879	if (new_tx == orig.tx_pending &&
1880	    new_rx == orig.rx_pending)
1881		return 0;	 /* no change */
1882
1883	device_info = netvsc_devinfo_get(nvdev);
1884
1885	if (!device_info)
1886		return -ENOMEM;
1887
1888	device_info->send_sections = new_tx;
1889	device_info->recv_sections = new_rx;
1890
1891	ret = netvsc_detach(ndev, nvdev);
1892	if (ret)
1893		goto out;
1894
1895	ret = netvsc_attach(ndev, device_info);
1896	if (ret) {
1897		device_info->send_sections = orig.tx_pending;
1898		device_info->recv_sections = orig.rx_pending;
1899
1900		if (netvsc_attach(ndev, device_info))
1901			netdev_err(ndev, "restoring ringparam failed");
1902	}
1903
1904out:
1905	netvsc_devinfo_put(device_info);
1906	return ret;
1907}
1908
1909static netdev_features_t netvsc_fix_features(struct net_device *ndev,
1910					     netdev_features_t features)
1911{
1912	struct net_device_context *ndevctx = netdev_priv(ndev);
1913	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1914
1915	if (!nvdev || nvdev->destroy)
1916		return features;
1917
1918	if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1919		features ^= NETIF_F_LRO;
1920		netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
1921	}
1922
1923	return features;
1924}
1925
1926static int netvsc_set_features(struct net_device *ndev,
1927			       netdev_features_t features)
1928{
1929	netdev_features_t change = features ^ ndev->features;
1930	struct net_device_context *ndevctx = netdev_priv(ndev);
1931	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1932	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1933	struct ndis_offload_params offloads;
1934	int ret = 0;
1935
1936	if (!nvdev || nvdev->destroy)
1937		return -ENODEV;
1938
1939	if (!(change & NETIF_F_LRO))
1940		goto syncvf;
1941
1942	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1943
1944	if (features & NETIF_F_LRO) {
1945		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1946		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1947	} else {
1948		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1949		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1950	}
1951
1952	ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1953
1954	if (ret) {
1955		features ^= NETIF_F_LRO;
1956		ndev->features = features;
1957	}
1958
1959syncvf:
1960	if (!vf_netdev)
1961		return ret;
1962
1963	vf_netdev->wanted_features = features;
1964	netdev_update_features(vf_netdev);
1965
1966	return ret;
1967}
1968
1969static int netvsc_get_regs_len(struct net_device *netdev)
1970{
1971	return VRSS_SEND_TAB_SIZE * sizeof(u32);
1972}
1973
1974static void netvsc_get_regs(struct net_device *netdev,
1975			    struct ethtool_regs *regs, void *p)
1976{
1977	struct net_device_context *ndc = netdev_priv(netdev);
1978	u32 *regs_buff = p;
1979
1980	/* increase the version, if buffer format is changed. */
1981	regs->version = 1;
1982
1983	memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
1984}
1985
1986static u32 netvsc_get_msglevel(struct net_device *ndev)
1987{
1988	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1989
1990	return ndev_ctx->msg_enable;
1991}
1992
1993static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1994{
1995	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1996
1997	ndev_ctx->msg_enable = val;
1998}
1999
2000static const struct ethtool_ops ethtool_ops = {
2001	.get_drvinfo	= netvsc_get_drvinfo,
2002	.get_regs_len	= netvsc_get_regs_len,
2003	.get_regs	= netvsc_get_regs,
2004	.get_msglevel	= netvsc_get_msglevel,
2005	.set_msglevel	= netvsc_set_msglevel,
2006	.get_link	= ethtool_op_get_link,
2007	.get_ethtool_stats = netvsc_get_ethtool_stats,
2008	.get_sset_count = netvsc_get_sset_count,
2009	.get_strings	= netvsc_get_strings,
2010	.get_channels   = netvsc_get_channels,
2011	.set_channels   = netvsc_set_channels,
2012	.get_ts_info	= ethtool_op_get_ts_info,
2013	.get_rxnfc	= netvsc_get_rxnfc,
2014	.set_rxnfc	= netvsc_set_rxnfc,
2015	.get_rxfh_key_size = netvsc_get_rxfh_key_size,
2016	.get_rxfh_indir_size = netvsc_rss_indir_size,
2017	.get_rxfh	= netvsc_get_rxfh,
2018	.set_rxfh	= netvsc_set_rxfh,
2019	.get_link_ksettings = netvsc_get_link_ksettings,
2020	.set_link_ksettings = netvsc_set_link_ksettings,
2021	.get_ringparam	= netvsc_get_ringparam,
2022	.set_ringparam	= netvsc_set_ringparam,
2023};
2024
2025static const struct net_device_ops device_ops = {
2026	.ndo_open =			netvsc_open,
2027	.ndo_stop =			netvsc_close,
2028	.ndo_start_xmit =		netvsc_start_xmit,
2029	.ndo_change_rx_flags =		netvsc_change_rx_flags,
2030	.ndo_set_rx_mode =		netvsc_set_rx_mode,
2031	.ndo_fix_features =		netvsc_fix_features,
2032	.ndo_set_features =		netvsc_set_features,
2033	.ndo_change_mtu =		netvsc_change_mtu,
2034	.ndo_validate_addr =		eth_validate_addr,
2035	.ndo_set_mac_address =		netvsc_set_mac_addr,
2036	.ndo_select_queue =		netvsc_select_queue,
2037	.ndo_get_stats64 =		netvsc_get_stats64,
2038	.ndo_bpf =			netvsc_bpf,
 
2039};
2040
2041/*
2042 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
2043 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
2044 * present send GARP packet to network peers with netif_notify_peers().
2045 */
2046static void netvsc_link_change(struct work_struct *w)
2047{
2048	struct net_device_context *ndev_ctx =
2049		container_of(w, struct net_device_context, dwork.work);
2050	struct hv_device *device_obj = ndev_ctx->device_ctx;
2051	struct net_device *net = hv_get_drvdata(device_obj);
 
 
2052	struct netvsc_device *net_device;
2053	struct rndis_device *rdev;
2054	struct netvsc_reconfig *event = NULL;
2055	bool notify = false, reschedule = false;
2056	unsigned long flags, next_reconfig, delay;
2057
2058	/* if changes are happening, comeback later */
2059	if (!rtnl_trylock()) {
2060		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2061		return;
2062	}
2063
2064	net_device = rtnl_dereference(ndev_ctx->nvdev);
2065	if (!net_device)
2066		goto out_unlock;
2067
2068	rdev = net_device->extension;
2069
2070	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2071	if (time_is_after_jiffies(next_reconfig)) {
2072		/* link_watch only sends one notification with current state
2073		 * per second, avoid doing reconfig more frequently. Handle
2074		 * wrap around.
2075		 */
2076		delay = next_reconfig - jiffies;
2077		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
2078		schedule_delayed_work(&ndev_ctx->dwork, delay);
2079		goto out_unlock;
2080	}
2081	ndev_ctx->last_reconfig = jiffies;
2082
2083	spin_lock_irqsave(&ndev_ctx->lock, flags);
2084	if (!list_empty(&ndev_ctx->reconfig_events)) {
2085		event = list_first_entry(&ndev_ctx->reconfig_events,
2086					 struct netvsc_reconfig, list);
2087		list_del(&event->list);
2088		reschedule = !list_empty(&ndev_ctx->reconfig_events);
2089	}
2090	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2091
2092	if (!event)
2093		goto out_unlock;
2094
2095	switch (event->event) {
2096		/* Only the following events are possible due to the check in
2097		 * netvsc_linkstatus_callback()
2098		 */
2099	case RNDIS_STATUS_MEDIA_CONNECT:
2100		if (rdev->link_state) {
2101			rdev->link_state = false;
2102			netif_carrier_on(net);
2103			netvsc_tx_enable(net_device, net);
2104		} else {
2105			notify = true;
2106		}
2107		kfree(event);
2108		break;
2109	case RNDIS_STATUS_MEDIA_DISCONNECT:
2110		if (!rdev->link_state) {
2111			rdev->link_state = true;
2112			netif_carrier_off(net);
2113			netvsc_tx_disable(net_device, net);
2114		}
2115		kfree(event);
2116		break;
2117	case RNDIS_STATUS_NETWORK_CHANGE:
2118		/* Only makes sense if carrier is present */
2119		if (!rdev->link_state) {
2120			rdev->link_state = true;
2121			netif_carrier_off(net);
2122			netvsc_tx_disable(net_device, net);
2123			event->event = RNDIS_STATUS_MEDIA_CONNECT;
2124			spin_lock_irqsave(&ndev_ctx->lock, flags);
2125			list_add(&event->list, &ndev_ctx->reconfig_events);
2126			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2127			reschedule = true;
2128		}
2129		break;
2130	}
2131
2132	rtnl_unlock();
2133
2134	if (notify)
2135		netdev_notify_peers(net);
2136
2137	/* link_watch only sends one notification with current state per
2138	 * second, handle next reconfig event in 2 seconds.
2139	 */
2140	if (reschedule)
2141		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2142
2143	return;
2144
2145out_unlock:
2146	rtnl_unlock();
2147}
2148
2149static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
2150{
2151	struct net_device_context *net_device_ctx;
2152	struct net_device *dev;
2153
2154	dev = netdev_master_upper_dev_get(vf_netdev);
2155	if (!dev || dev->netdev_ops != &device_ops)
2156		return NULL;	/* not a netvsc device */
2157
2158	net_device_ctx = netdev_priv(dev);
2159	if (!rtnl_dereference(net_device_ctx->nvdev))
2160		return NULL;	/* device is removed */
2161
2162	return dev;
2163}
2164
2165/* Called when VF is injecting data into network stack.
2166 * Change the associated network device from VF to netvsc.
2167 * note: already called with rcu_read_lock
2168 */
2169static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2170{
2171	struct sk_buff *skb = *pskb;
2172	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2173	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2174	struct netvsc_vf_pcpu_stats *pcpu_stats
2175		 = this_cpu_ptr(ndev_ctx->vf_stats);
2176
2177	skb = skb_share_check(skb, GFP_ATOMIC);
2178	if (unlikely(!skb))
2179		return RX_HANDLER_CONSUMED;
2180
2181	*pskb = skb;
2182
2183	skb->dev = ndev;
2184
2185	u64_stats_update_begin(&pcpu_stats->syncp);
2186	pcpu_stats->rx_packets++;
2187	pcpu_stats->rx_bytes += skb->len;
2188	u64_stats_update_end(&pcpu_stats->syncp);
2189
2190	return RX_HANDLER_ANOTHER;
2191}
2192
2193static int netvsc_vf_join(struct net_device *vf_netdev,
2194			  struct net_device *ndev)
2195{
2196	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2197	int ret;
2198
2199	ret = netdev_rx_handler_register(vf_netdev,
2200					 netvsc_vf_handle_frame, ndev);
2201	if (ret != 0) {
2202		netdev_err(vf_netdev,
2203			   "can not register netvsc VF receive handler (err = %d)\n",
2204			   ret);
2205		goto rx_handler_failed;
2206	}
2207
2208	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2209					   NULL, NULL, NULL);
2210	if (ret != 0) {
2211		netdev_err(vf_netdev,
2212			   "can not set master device %s (err = %d)\n",
2213			   ndev->name, ret);
2214		goto upper_link_failed;
2215	}
2216
2217	/* set slave flag before open to prevent IPv6 addrconf */
2218	vf_netdev->flags |= IFF_SLAVE;
2219
2220	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
 
2221
2222	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2223
2224	netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2225	return 0;
2226
2227upper_link_failed:
2228	netdev_rx_handler_unregister(vf_netdev);
2229rx_handler_failed:
2230	return ret;
2231}
2232
2233static void __netvsc_vf_setup(struct net_device *ndev,
2234			      struct net_device *vf_netdev)
2235{
2236	int ret;
2237
2238	/* Align MTU of VF with master */
2239	ret = dev_set_mtu(vf_netdev, ndev->mtu);
2240	if (ret)
2241		netdev_warn(vf_netdev,
2242			    "unable to change mtu to %u\n", ndev->mtu);
2243
2244	/* set multicast etc flags on VF */
2245	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2246
2247	/* sync address list from ndev to VF */
2248	netif_addr_lock_bh(ndev);
2249	dev_uc_sync(vf_netdev, ndev);
2250	dev_mc_sync(vf_netdev, ndev);
2251	netif_addr_unlock_bh(ndev);
2252
2253	if (netif_running(ndev)) {
2254		ret = dev_open(vf_netdev, NULL);
2255		if (ret)
2256			netdev_warn(vf_netdev,
2257				    "unable to open: %d\n", ret);
2258	}
2259}
2260
2261/* Setup VF as slave of the synthetic device.
2262 * Runs in workqueue to avoid recursion in netlink callbacks.
2263 */
2264static void netvsc_vf_setup(struct work_struct *w)
2265{
2266	struct net_device_context *ndev_ctx
2267		= container_of(w, struct net_device_context, vf_takeover.work);
2268	struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2269	struct net_device *vf_netdev;
2270
2271	if (!rtnl_trylock()) {
2272		schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2273		return;
2274	}
2275
2276	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2277	if (vf_netdev)
2278		__netvsc_vf_setup(ndev, vf_netdev);
2279
2280	rtnl_unlock();
2281}
2282
2283/* Find netvsc by VF serial number.
2284 * The PCI hyperv controller records the serial number as the slot kobj name.
2285 */
2286static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2287{
2288	struct device *parent = vf_netdev->dev.parent;
2289	struct net_device_context *ndev_ctx;
 
2290	struct pci_dev *pdev;
2291	u32 serial;
2292
2293	if (!parent || !dev_is_pci(parent))
2294		return NULL; /* not a PCI device */
2295
2296	pdev = to_pci_dev(parent);
2297	if (!pdev->slot) {
2298		netdev_notice(vf_netdev, "no PCI slot information\n");
2299		return NULL;
2300	}
2301
2302	if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2303		netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2304			      pci_slot_name(pdev->slot));
2305		return NULL;
2306	}
2307
2308	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2309		if (!ndev_ctx->vf_alloc)
2310			continue;
2311
2312		if (ndev_ctx->vf_serial == serial)
2313			return hv_get_drvdata(ndev_ctx->device_ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2314	}
2315
2316	netdev_notice(vf_netdev,
2317		      "no netdev found for vf serial:%u\n", serial);
2318	return NULL;
2319}
2320
2321static int netvsc_register_vf(struct net_device *vf_netdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
2322{
2323	struct net_device_context *net_device_ctx;
2324	struct netvsc_device *netvsc_dev;
2325	struct bpf_prog *prog;
2326	struct net_device *ndev;
2327	int ret;
2328
2329	if (vf_netdev->addr_len != ETH_ALEN)
2330		return NOTIFY_DONE;
2331
2332	ndev = get_netvsc_byslot(vf_netdev);
2333	if (!ndev)
2334		return NOTIFY_DONE;
2335
2336	net_device_ctx = netdev_priv(ndev);
2337	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2338	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2339		return NOTIFY_DONE;
2340
2341	/* if synthetic interface is a different namespace,
2342	 * then move the VF to that namespace; join will be
2343	 * done again in that context.
2344	 */
2345	if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2346		ret = dev_change_net_namespace(vf_netdev,
2347					       dev_net(ndev), "eth%d");
2348		if (ret)
2349			netdev_err(vf_netdev,
2350				   "could not move to same namespace as %s: %d\n",
2351				   ndev->name, ret);
2352		else
2353			netdev_info(vf_netdev,
2354				    "VF moved to namespace with: %s\n",
2355				    ndev->name);
2356		return NOTIFY_DONE;
2357	}
2358
2359	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2360
2361	if (netvsc_vf_join(vf_netdev, ndev) != 0)
2362		return NOTIFY_DONE;
2363
2364	dev_hold(vf_netdev);
2365	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2366
 
 
 
2367	vf_netdev->wanted_features = ndev->features;
2368	netdev_update_features(vf_netdev);
2369
2370	prog = netvsc_xdp_get(netvsc_dev);
2371	netvsc_vf_setxdp(vf_netdev, prog);
2372
2373	return NOTIFY_OK;
2374}
2375
2376/* Change the data path when VF UP/DOWN/CHANGE are detected.
2377 *
2378 * Typically a UP or DOWN event is followed by a CHANGE event, so
2379 * net_device_ctx->data_path_is_vf is used to cache the current data path
2380 * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
2381 * message.
2382 *
2383 * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
2384 * interface, there is only the CHANGE event and no UP or DOWN event.
2385 */
2386static int netvsc_vf_changed(struct net_device *vf_netdev)
2387{
2388	struct net_device_context *net_device_ctx;
2389	struct netvsc_device *netvsc_dev;
2390	struct net_device *ndev;
2391	bool vf_is_up = netif_running(vf_netdev);
 
 
 
 
2392
2393	ndev = get_netvsc_byref(vf_netdev);
2394	if (!ndev)
2395		return NOTIFY_DONE;
2396
2397	net_device_ctx = netdev_priv(ndev);
2398	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2399	if (!netvsc_dev)
2400		return NOTIFY_DONE;
2401
2402	if (net_device_ctx->data_path_is_vf == vf_is_up)
2403		return NOTIFY_OK;
2404	net_device_ctx->data_path_is_vf = vf_is_up;
2405
2406	netvsc_switch_datapath(ndev, vf_is_up);
2407	netdev_info(ndev, "Data path switched %s VF: %s\n",
2408		    vf_is_up ? "to" : "from", vf_netdev->name);
 
 
 
 
 
 
 
 
 
 
 
 
 
2409
2410	return NOTIFY_OK;
2411}
2412
2413static int netvsc_unregister_vf(struct net_device *vf_netdev)
2414{
2415	struct net_device *ndev;
2416	struct net_device_context *net_device_ctx;
2417
2418	ndev = get_netvsc_byref(vf_netdev);
2419	if (!ndev)
2420		return NOTIFY_DONE;
2421
2422	net_device_ctx = netdev_priv(ndev);
2423	cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2424
2425	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2426
2427	netvsc_vf_setxdp(vf_netdev, NULL);
2428
 
2429	netdev_rx_handler_unregister(vf_netdev);
2430	netdev_upper_dev_unlink(vf_netdev, ndev);
2431	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2432	dev_put(vf_netdev);
2433
 
 
2434	return NOTIFY_OK;
2435}
2436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2437static int netvsc_probe(struct hv_device *dev,
2438			const struct hv_vmbus_device_id *dev_id)
2439{
2440	struct net_device *net = NULL;
2441	struct net_device_context *net_device_ctx;
2442	struct netvsc_device_info *device_info = NULL;
2443	struct netvsc_device *nvdev;
2444	int ret = -ENOMEM;
2445
2446	net = alloc_etherdev_mq(sizeof(struct net_device_context),
2447				VRSS_CHANNEL_MAX);
2448	if (!net)
2449		goto no_net;
2450
2451	netif_carrier_off(net);
2452
2453	netvsc_init_settings(net);
2454
2455	net_device_ctx = netdev_priv(net);
2456	net_device_ctx->device_ctx = dev;
2457	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2458	if (netif_msg_probe(net_device_ctx))
2459		netdev_dbg(net, "netvsc msg_enable: %d\n",
2460			   net_device_ctx->msg_enable);
2461
2462	hv_set_drvdata(dev, net);
2463
2464	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2465
 
2466	spin_lock_init(&net_device_ctx->lock);
2467	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2468	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2469
2470	net_device_ctx->vf_stats
2471		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2472	if (!net_device_ctx->vf_stats)
2473		goto no_stats;
2474
2475	net->netdev_ops = &device_ops;
2476	net->ethtool_ops = &ethtool_ops;
2477	SET_NETDEV_DEV(net, &dev->device);
 
2478
2479	/* We always need headroom for rndis header */
2480	net->needed_headroom = RNDIS_AND_PPI_SIZE;
2481
2482	/* Initialize the number of queues to be 1, we may change it if more
2483	 * channels are offered later.
2484	 */
2485	netif_set_real_num_tx_queues(net, 1);
2486	netif_set_real_num_rx_queues(net, 1);
2487
2488	/* Notify the netvsc driver of the new device */
2489	device_info = netvsc_devinfo_get(NULL);
2490
2491	if (!device_info) {
2492		ret = -ENOMEM;
2493		goto devinfo_failed;
2494	}
2495
2496	nvdev = rndis_filter_device_add(dev, device_info);
2497	if (IS_ERR(nvdev)) {
2498		ret = PTR_ERR(nvdev);
2499		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2500		goto rndis_failed;
2501	}
2502
2503	memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2504
2505	/* We must get rtnl lock before scheduling nvdev->subchan_work,
2506	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2507	 * all subchannels to show up, but that may not happen because
2508	 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2509	 * -> ... -> device_add() -> ... -> __device_attach() can't get
2510	 * the device lock, so all the subchannels can't be processed --
2511	 * finally netvsc_subchan_work() hangs forever.
 
 
 
 
 
2512	 */
2513	rtnl_lock();
2514
 
 
 
 
 
 
 
 
 
2515	if (nvdev->num_chn > 1)
2516		schedule_work(&nvdev->subchan_work);
2517
2518	/* hw_features computed in rndis_netdev_set_hwcaps() */
2519	net->features = net->hw_features |
2520		NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
2521		NETIF_F_HW_VLAN_CTAG_RX;
2522	net->vlan_features = net->features;
2523
2524	netdev_lockdep_set_classes(net);
2525
 
 
 
2526	/* MTU range: 68 - 1500 or 65521 */
2527	net->min_mtu = NETVSC_MTU_MIN;
2528	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2529		net->max_mtu = NETVSC_MTU - ETH_HLEN;
2530	else
2531		net->max_mtu = ETH_DATA_LEN;
2532
2533	nvdev->tx_disable = false;
2534
2535	ret = register_netdevice(net);
2536	if (ret != 0) {
2537		pr_err("Unable to register netdev.\n");
2538		goto register_failed;
2539	}
2540
2541	list_add(&net_device_ctx->list, &netvsc_dev_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542	rtnl_unlock();
2543
2544	netvsc_devinfo_put(device_info);
2545	return 0;
2546
2547register_failed:
2548	rtnl_unlock();
2549	rndis_filter_device_remove(dev, nvdev);
2550rndis_failed:
 
2551	netvsc_devinfo_put(device_info);
2552devinfo_failed:
2553	free_percpu(net_device_ctx->vf_stats);
2554no_stats:
2555	hv_set_drvdata(dev, NULL);
2556	free_netdev(net);
2557no_net:
2558	return ret;
2559}
2560
2561static int netvsc_remove(struct hv_device *dev)
2562{
2563	struct net_device_context *ndev_ctx;
2564	struct net_device *vf_netdev, *net;
2565	struct netvsc_device *nvdev;
2566
2567	net = hv_get_drvdata(dev);
2568	if (net == NULL) {
2569		dev_err(&dev->device, "No net device to remove\n");
2570		return 0;
2571	}
2572
2573	ndev_ctx = netdev_priv(net);
2574
2575	cancel_delayed_work_sync(&ndev_ctx->dwork);
2576
2577	rtnl_lock();
2578	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2579	if (nvdev) {
2580		cancel_work_sync(&nvdev->subchan_work);
2581		netvsc_xdp_set(net, NULL, NULL, nvdev);
2582	}
2583
2584	/*
2585	 * Call to the vsc driver to let it know that the device is being
2586	 * removed. Also blocks mtu and channel changes.
2587	 */
2588	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2589	if (vf_netdev)
2590		netvsc_unregister_vf(vf_netdev);
2591
2592	if (nvdev)
2593		rndis_filter_device_remove(dev, nvdev);
2594
2595	unregister_netdevice(net);
2596	list_del(&ndev_ctx->list);
2597
2598	rtnl_unlock();
2599
2600	hv_set_drvdata(dev, NULL);
2601
2602	free_percpu(ndev_ctx->vf_stats);
2603	free_netdev(net);
2604	return 0;
2605}
2606
2607static int netvsc_suspend(struct hv_device *dev)
2608{
2609	struct net_device_context *ndev_ctx;
2610	struct netvsc_device *nvdev;
2611	struct net_device *net;
2612	int ret;
2613
2614	net = hv_get_drvdata(dev);
2615
2616	ndev_ctx = netdev_priv(net);
2617	cancel_delayed_work_sync(&ndev_ctx->dwork);
2618
2619	rtnl_lock();
2620
2621	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2622	if (nvdev == NULL) {
2623		ret = -ENODEV;
2624		goto out;
2625	}
2626
2627	/* Save the current config info */
2628	ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2629
 
 
 
2630	ret = netvsc_detach(net, nvdev);
2631out:
2632	rtnl_unlock();
2633
2634	return ret;
2635}
2636
2637static int netvsc_resume(struct hv_device *dev)
2638{
2639	struct net_device *net = hv_get_drvdata(dev);
2640	struct net_device_context *net_device_ctx;
2641	struct netvsc_device_info *device_info;
2642	int ret;
2643
2644	rtnl_lock();
2645
2646	net_device_ctx = netdev_priv(net);
2647
2648	/* Reset the data path to the netvsc NIC before re-opening the vmbus
2649	 * channel. Later netvsc_netdev_event() will switch the data path to
2650	 * the VF upon the UP or CHANGE event.
2651	 */
2652	net_device_ctx->data_path_is_vf = false;
2653	device_info = net_device_ctx->saved_netvsc_dev_info;
2654
2655	ret = netvsc_attach(net, device_info);
2656
2657	netvsc_devinfo_put(device_info);
2658	net_device_ctx->saved_netvsc_dev_info = NULL;
2659
2660	rtnl_unlock();
2661
2662	return ret;
2663}
2664static const struct hv_vmbus_device_id id_table[] = {
2665	/* Network guid */
2666	{ HV_NIC_GUID, },
2667	{ },
2668};
2669
2670MODULE_DEVICE_TABLE(vmbus, id_table);
2671
2672/* The one and only one */
2673static struct  hv_driver netvsc_drv = {
2674	.name = KBUILD_MODNAME,
2675	.id_table = id_table,
2676	.probe = netvsc_probe,
2677	.remove = netvsc_remove,
2678	.suspend = netvsc_suspend,
2679	.resume = netvsc_resume,
2680	.driver = {
2681		.probe_type = PROBE_FORCE_SYNCHRONOUS,
2682	},
2683};
2684
2685/*
2686 * On Hyper-V, every VF interface is matched with a corresponding
2687 * synthetic interface. The synthetic interface is presented first
2688 * to the guest. When the corresponding VF instance is registered,
2689 * we will take care of switching the data path.
2690 */
2691static int netvsc_netdev_event(struct notifier_block *this,
2692			       unsigned long event, void *ptr)
2693{
2694	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
2695
2696	/* Skip our own events */
2697	if (event_dev->netdev_ops == &device_ops)
2698		return NOTIFY_DONE;
2699
2700	/* Avoid non-Ethernet type devices */
2701	if (event_dev->type != ARPHRD_ETHER)
2702		return NOTIFY_DONE;
2703
2704	/* Avoid Vlan dev with same MAC registering as VF */
2705	if (is_vlan_dev(event_dev))
2706		return NOTIFY_DONE;
2707
2708	/* Avoid Bonding master dev with same MAC registering as VF */
2709	if ((event_dev->priv_flags & IFF_BONDING) &&
2710	    (event_dev->flags & IFF_MASTER))
2711		return NOTIFY_DONE;
2712
2713	switch (event) {
 
 
2714	case NETDEV_REGISTER:
2715		return netvsc_register_vf(event_dev);
2716	case NETDEV_UNREGISTER:
2717		return netvsc_unregister_vf(event_dev);
2718	case NETDEV_UP:
2719	case NETDEV_DOWN:
2720	case NETDEV_CHANGE:
2721		return netvsc_vf_changed(event_dev);
 
2722	default:
2723		return NOTIFY_DONE;
2724	}
2725}
2726
2727static struct notifier_block netvsc_netdev_notifier = {
2728	.notifier_call = netvsc_netdev_event,
2729};
2730
2731static void __exit netvsc_drv_exit(void)
2732{
2733	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2734	vmbus_driver_unregister(&netvsc_drv);
2735}
2736
2737static int __init netvsc_drv_init(void)
2738{
2739	int ret;
2740
2741	if (ring_size < RING_SIZE_MIN) {
2742		ring_size = RING_SIZE_MIN;
2743		pr_info("Increased ring_size to %u (min allowed)\n",
2744			ring_size);
2745	}
2746	netvsc_ring_bytes = ring_size * PAGE_SIZE;
 
 
2747
2748	ret = vmbus_driver_register(&netvsc_drv);
2749	if (ret)
2750		return ret;
2751
2752	register_netdevice_notifier(&netvsc_netdev_notifier);
2753	return 0;
 
 
 
 
2754}
2755
2756MODULE_LICENSE("GPL");
2757MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2758
2759module_init(netvsc_drv_init);
2760module_exit(netvsc_drv_exit);