Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright (c) 2009, Microsoft Corporation.
  3 *
  4 * This program is free software; you can redistribute it and/or modify it
  5 * under the terms and conditions of the GNU General Public License,
  6 * version 2, as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope it will be useful, but WITHOUT
  9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 11 * more details.
 12 *
 13 * You should have received a copy of the GNU General Public License along with
 14 * this program; if not, see <http://www.gnu.org/licenses/>.
 15 *
 16 * Authors:
 17 *   Haiyang Zhang <haiyangz@microsoft.com>
 18 *   Hank Janssen  <hjanssen@microsoft.com>
 19 */
 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 21
 22#include <linux/init.h>
 23#include <linux/atomic.h>
 24#include <linux/module.h>
 25#include <linux/highmem.h>
 26#include <linux/device.h>
 27#include <linux/io.h>
 28#include <linux/delay.h>
 29#include <linux/netdevice.h>
 30#include <linux/inetdevice.h>
 31#include <linux/etherdevice.h>
 32#include <linux/skbuff.h>
 33#include <linux/if_vlan.h>
 34#include <linux/in.h>
 35#include <linux/slab.h>
 36#include <net/arp.h>
 37#include <net/route.h>
 38#include <net/sock.h>
 39#include <net/pkt_sched.h>
 40
 41#include "hyperv_net.h"
 42
 43struct net_device_context {
 44	/* point back to our device context */
 45	struct hv_device *device_ctx;
 46	struct delayed_work dwork;
 47	struct work_struct work;
 48};
 49
 50#define RING_SIZE_MIN 64
 
 
 
 
 
 
 
 
 
 
 51static int ring_size = 128;
 52module_param(ring_size, int, S_IRUGO);
 53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 54
 
 
 
 
 
 
 
 
 
 
 
 55static void do_set_multicast(struct work_struct *w)
 56{
 57	struct net_device_context *ndevctx =
 58		container_of(w, struct net_device_context, work);
 59	struct netvsc_device *nvdev;
 
 
 60	struct rndis_device *rdev;
 61
 62	nvdev = hv_get_drvdata(ndevctx->device_ctx);
 63	if (nvdev == NULL || nvdev->ndev == NULL)
 64		return;
 65
 66	rdev = nvdev->extension;
 67	if (rdev == NULL)
 68		return;
 69
 70	if (nvdev->ndev->flags & IFF_PROMISC)
 71		rndis_filter_set_packet_filter(rdev,
 72			NDIS_PACKET_TYPE_PROMISCUOUS);
 73	else
 74		rndis_filter_set_packet_filter(rdev,
 75			NDIS_PACKET_TYPE_BROADCAST |
 76			NDIS_PACKET_TYPE_ALL_MULTICAST |
 77			NDIS_PACKET_TYPE_DIRECTED);
 78}
 79
 80static void netvsc_set_multicast_list(struct net_device *net)
 81{
 82	struct net_device_context *net_device_ctx = netdev_priv(net);
 83
 84	schedule_work(&net_device_ctx->work);
 85}
 86
 87static int netvsc_open(struct net_device *net)
 88{
 89	struct net_device_context *net_device_ctx = netdev_priv(net);
 90	struct hv_device *device_obj = net_device_ctx->device_ctx;
 91	struct netvsc_device *nvdev;
 92	struct rndis_device *rdev;
 93	int ret = 0;
 94
 95	netif_carrier_off(net);
 96
 97	/* Open up the device */
 98	ret = rndis_filter_open(device_obj);
 99	if (ret != 0) {
100		netdev_err(net, "unable to open device (ret %d).\n", ret);
101		return ret;
102	}
103
104	netif_start_queue(net);
105
106	nvdev = hv_get_drvdata(device_obj);
107	rdev = nvdev->extension;
108	if (!rdev->link_state)
109		netif_carrier_on(net);
110
111	return ret;
112}
113
114static int netvsc_close(struct net_device *net)
115{
116	struct net_device_context *net_device_ctx = netdev_priv(net);
117	struct hv_device *device_obj = net_device_ctx->device_ctx;
118	int ret;
 
 
119
120	netif_tx_disable(net);
121
122	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
123	cancel_work_sync(&net_device_ctx->work);
124	ret = rndis_filter_close(device_obj);
125	if (ret != 0)
126		netdev_err(net, "unable to close device (ret %d).\n", ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
128	return ret;
129}
130
131static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
132				int pkt_type)
133{
134	struct rndis_packet *rndis_pkt;
135	struct rndis_per_packet_info *ppi;
136
137	rndis_pkt = &msg->msg.pkt;
138	rndis_pkt->data_offset += ppi_size;
139
140	ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
141		rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
142
143	ppi->size = ppi_size;
144	ppi->type = pkt_type;
145	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
146
147	rndis_pkt->per_pkt_info_len += ppi_size;
148
149	return ppi;
150}
151
152static void netvsc_xmit_completion(void *context)
 
153{
154	struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
155	struct sk_buff *skb = (struct sk_buff *)
156		(unsigned long)packet->completion.send.send_completion_tid;
 
 
 
 
157
158	kfree(packet);
 
 
159
160	if (skb)
161		dev_kfree_skb_any(skb);
 
 
162}
163
164static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
165			struct hv_page_buffer *pb)
166{
167	int j = 0;
168
169	/* Deal with compund pages by ignoring unused part
170	 * of the page.
171	 */
172	page += (offset >> PAGE_SHIFT);
173	offset &= ~PAGE_MASK;
174
175	while (len > 0) {
176		unsigned long bytes;
177
178		bytes = PAGE_SIZE - offset;
179		if (bytes > len)
180			bytes = len;
181		pb[j].pfn = page_to_pfn(page);
182		pb[j].offset = offset;
183		pb[j].len = bytes;
184
185		offset += bytes;
186		len -= bytes;
187
188		if (offset == PAGE_SIZE && len) {
189			page++;
190			offset = 0;
191			j++;
192		}
193	}
194
195	return j + 1;
196}
197
198static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
199			   struct hv_page_buffer *pb)
 
200{
 
201	u32 slots_used = 0;
202	char *data = skb->data;
203	int frags = skb_shinfo(skb)->nr_frags;
204	int i;
205
206	/* The packet is laid out thus:
207	 * 1. hdr
208	 * 2. skb linear data
209	 * 3. skb fragment data
210	 */
211	if (hdr != NULL)
212		slots_used += fill_pg_buf(virt_to_page(hdr),
213					offset_in_page(hdr),
214					len, &pb[slots_used]);
215
 
 
 
216	slots_used += fill_pg_buf(virt_to_page(data),
217				offset_in_page(data),
218				skb_headlen(skb), &pb[slots_used]);
219
220	for (i = 0; i < frags; i++) {
221		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
222
223		slots_used += fill_pg_buf(skb_frag_page(frag),
224					frag->page_offset,
225					skb_frag_size(frag), &pb[slots_used]);
226	}
227	return slots_used;
228}
229
230static int count_skb_frag_slots(struct sk_buff *skb)
231{
232	int i, frags = skb_shinfo(skb)->nr_frags;
233	int pages = 0;
234
235	for (i = 0; i < frags; i++) {
236		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
237		unsigned long size = skb_frag_size(frag);
238		unsigned long offset = frag->page_offset;
239
240		/* Skip unused frames from start of page */
241		offset &= ~PAGE_MASK;
242		pages += PFN_UP(offset + size);
243	}
244	return pages;
245}
246
247static int netvsc_get_slots(struct sk_buff *skb)
248{
249	char *data = skb->data;
250	unsigned int offset = offset_in_page(data);
251	unsigned int len = skb_headlen(skb);
252	int slots;
253	int frag_slots;
254
255	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
256	frag_slots = count_skb_frag_slots(skb);
257	return slots + frag_slots;
258}
259
260static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
261{
262	u32 ret_val = TRANSPORT_INFO_NOT_IP;
263
264	if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
265		(eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
266		goto not_ip;
267	}
268
269	*trans_off = skb_transport_offset(skb);
270
271	if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
272		struct iphdr *iphdr = ip_hdr(skb);
273
274		if (iphdr->protocol == IPPROTO_TCP)
275			ret_val = TRANSPORT_INFO_IPV4_TCP;
276		else if (iphdr->protocol == IPPROTO_UDP)
277			ret_val = TRANSPORT_INFO_IPV4_UDP;
278	} else {
279		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
280			ret_val = TRANSPORT_INFO_IPV6_TCP;
281		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
282			ret_val = TRANSPORT_INFO_IPV6_UDP;
283	}
284
285not_ip:
286	return ret_val;
287}
288
289static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
290{
291	struct net_device_context *net_device_ctx = netdev_priv(net);
292	struct hv_netvsc_packet *packet;
293	int ret;
294	unsigned int num_data_pgs;
295	struct rndis_message *rndis_msg;
296	struct rndis_packet *rndis_pkt;
297	u32 rndis_msg_size;
298	bool isvlan;
299	struct rndis_per_packet_info *ppi;
300	struct ndis_tcp_ip_checksum_info *csum_info;
301	struct ndis_tcp_lso_info *lso_info;
302	int  hdr_offset;
303	u32 net_trans_info;
304
 
 
 
305
306	/* We will atmost need two pages to describe the rndis
307	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
308	 * of pages in a single packet.
 
309	 */
 
 
310	num_data_pgs = netvsc_get_slots(skb) + 2;
311	if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
312		netdev_err(net, "Packet too big: %u\n", skb->len);
313		dev_kfree_skb(skb);
314		net->stats.tx_dropped++;
315		return NETDEV_TX_OK;
316	}
317
318	/* Allocate a netvsc packet based on # of frags. */
319	packet = kzalloc(sizeof(struct hv_netvsc_packet) +
320			 (num_data_pgs * sizeof(struct hv_page_buffer)) +
321			 sizeof(struct rndis_message) +
322			 NDIS_VLAN_PPI_SIZE +
323			 NDIS_CSUM_PPI_SIZE +
324			 NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
325	if (!packet) {
326		/* out of memory, drop packet */
327		netdev_err(net, "unable to allocate hv_netvsc_packet\n");
328
329		dev_kfree_skb(skb);
330		net->stats.tx_dropped++;
331		return NETDEV_TX_OK;
 
 
 
 
 
332	}
333
334	packet->vlan_tci = skb->vlan_tci;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
336	packet->is_data_pkt = true;
337	packet->total_data_buflen = skb->len;
338
339	packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
340				sizeof(struct hv_netvsc_packet) +
341				(num_data_pgs * sizeof(struct hv_page_buffer)));
342
343	/* Set the completion routine */
344	packet->completion.send.send_completion = netvsc_xmit_completion;
345	packet->completion.send.send_completion_ctx = packet;
346	packet->completion.send.send_completion_tid = (unsigned long)skb;
347
348	isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
349
350	/* Add the rndis header */
351	rndis_msg = packet->rndis_msg;
352	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
353	rndis_msg->msg_len = packet->total_data_buflen;
354	rndis_pkt = &rndis_msg->msg.pkt;
355	rndis_pkt->data_offset = sizeof(struct rndis_packet);
356	rndis_pkt->data_len = packet->total_data_buflen;
357	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
358
359	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
360
361	if (isvlan) {
 
 
 
 
 
 
 
 
362		struct ndis_pkt_8021q_info *vlan;
363
364		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
365		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
366					IEEE_8021Q_INFO);
367		vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
368						ppi->ppi_offset);
369		vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
370		vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
371				VLAN_PRIO_SHIFT;
372	}
373
374	net_trans_info = get_net_transport_info(skb, &hdr_offset);
375	if (net_trans_info == TRANSPORT_INFO_NOT_IP)
376		goto do_send;
377
378	/*
379	 * Setup the sendside checksum offload only if this is not a
380	 * GSO packet.
381	 */
382	if (skb_is_gso(skb))
383		goto do_lso;
384
385	if ((skb->ip_summed == CHECKSUM_NONE) ||
386	    (skb->ip_summed == CHECKSUM_UNNECESSARY))
387		goto do_send;
388
389	rndis_msg_size += NDIS_CSUM_PPI_SIZE;
390	ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
391			    TCPIP_CHKSUM_PKTINFO);
392
393	csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
394			ppi->ppi_offset);
395
396	if (net_trans_info & (INFO_IPV4 << 16))
397		csum_info->transmit.is_ipv4 = 1;
398	else
399		csum_info->transmit.is_ipv6 = 1;
400
401	if (net_trans_info & INFO_TCP) {
402		csum_info->transmit.tcp_checksum = 1;
403		csum_info->transmit.tcp_header_offset = hdr_offset;
404	} else if (net_trans_info & INFO_UDP) {
405		/* UDP checksum offload is not supported on ws2008r2.
406		 * Furthermore, on ws2012 and ws2012r2, there are some
407		 * issues with udp checksum offload from Linux guests.
408		 * (these are host issues).
409		 * For now compute the checksum here.
410		 */
411		struct udphdr *uh;
412		u16 udp_len;
413
414		ret = skb_cow_head(skb, 0);
415		if (ret)
416			goto drop;
417
418		uh = udp_hdr(skb);
419		udp_len = ntohs(uh->len);
420		uh->check = 0;
421		uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
422					      ip_hdr(skb)->daddr,
423					      udp_len, IPPROTO_UDP,
424					      csum_partial(uh, udp_len, 0));
425		if (uh->check == 0)
426			uh->check = CSUM_MANGLED_0;
427
428		csum_info->transmit.udp_checksum = 0;
429	}
430	goto do_send;
431
432do_lso:
433	rndis_msg_size += NDIS_LSO_PPI_SIZE;
434	ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
435			    TCP_LARGESEND_PKTINFO);
436
437	lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
438			ppi->ppi_offset);
439
440	lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
441	if (net_trans_info & (INFO_IPV4 << 16)) {
442		lso_info->lso_v2_transmit.ip_version =
443			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
444		ip_hdr(skb)->tot_len = 0;
445		ip_hdr(skb)->check = 0;
446		tcp_hdr(skb)->check =
447		~csum_tcpudp_magic(ip_hdr(skb)->saddr,
448				   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
449	} else {
450		lso_info->lso_v2_transmit.ip_version =
451			NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
452		ipv6_hdr(skb)->payload_len = 0;
453		tcp_hdr(skb)->check =
454		~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
455				&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 
 
 
 
 
456	}
457	lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
458	lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
459
460do_send:
461	/* Start filling in the page buffers with the rndis hdr */
462	rndis_msg->msg_len += rndis_msg_size;
 
463	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
464					skb, &packet->page_buf[0]);
465
466	ret = netvsc_send(net_device_ctx->device_ctx, packet);
 
 
 
 
 
 
 
 
 
 
 
 
467
468drop:
469	if (ret == 0) {
470		net->stats.tx_bytes += skb->len;
471		net->stats.tx_packets++;
472	} else {
473		kfree(packet);
474		if (ret != -EAGAIN) {
475			dev_kfree_skb_any(skb);
476			net->stats.tx_dropped++;
477		}
478	}
479
480	return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
 
 
 
 
 
 
 
 
 
 
 
481}
482
483/*
484 * netvsc_linkstatus_callback - Link up/down notification
485 */
486void netvsc_linkstatus_callback(struct hv_device *device_obj,
487				       unsigned int status)
488{
 
489	struct net_device *net;
490	struct net_device_context *ndev_ctx;
491	struct netvsc_device *net_device;
492	struct rndis_device *rdev;
493
494	net_device = hv_get_drvdata(device_obj);
495	rdev = net_device->extension;
496
497	rdev->link_state = status != 1;
 
498
499	net = net_device->ndev;
500
501	if (!net || net->reg_state != NETREG_REGISTERED)
 
 
 
 
 
 
502		return;
 
503
504	ndev_ctx = netdev_priv(net);
505	if (status == 1) {
506		schedule_delayed_work(&ndev_ctx->dwork, 0);
507		schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
508	} else {
509		schedule_delayed_work(&ndev_ctx->dwork, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510	}
 
 
 
 
 
 
511}
512
513/*
514 * netvsc_recv_callback -  Callback when we receive a packet from the
515 * "wire" on the specified device.
516 */
517int netvsc_recv_callback(struct hv_device *device_obj,
518				struct hv_netvsc_packet *packet,
519				struct ndis_tcp_ip_checksum_info *csum_info)
 
 
 
520{
521	struct net_device *net;
 
 
522	struct sk_buff *skb;
 
523
524	net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
525	if (!net || net->reg_state != NETREG_REGISTERED) {
526		packet->status = NVSP_STAT_FAIL;
527		return 0;
528	}
 
 
 
 
 
 
 
 
 
529
530	/* Allocate a skb - TODO direct I/O to pages? */
531	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
532	if (unlikely(!skb)) {
533		++net->stats.rx_dropped;
534		packet->status = NVSP_STAT_FAIL;
535		return 0;
536	}
537
 
 
 
 
538	/*
539	 * Copy to skb. This copy is needed here since the memory pointed by
540	 * hv_netvsc_packet cannot be deallocated
 
541	 */
542	memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
543		packet->total_data_buflen);
544
545	skb->protocol = eth_type_trans(skb, net);
546	if (csum_info) {
547		/* We only look at the IP checksum here.
548		 * Should we be dropping the packet if checksum
549		 * failed? How do we deal with other checksums - TCP/UDP?
550		 */
551		if (csum_info->receive.ip_checksum_succeeded)
552			skb->ip_summed = CHECKSUM_UNNECESSARY;
553		else
554			skb->ip_summed = CHECKSUM_NONE;
555	}
556
557	if (packet->vlan_tci & VLAN_TAG_PRESENT)
558		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
559				       packet->vlan_tci);
560
561	net->stats.rx_packets++;
562	net->stats.rx_bytes += packet->total_data_buflen;
563
564	/*
565	 * Pass the skb back up. Network stack will deallocate the skb when it
566	 * is done.
567	 * TODO - use NAPI?
568	 */
569	netif_rx(skb);
 
570
571	return 0;
572}
573
574static void netvsc_get_drvinfo(struct net_device *net,
575			       struct ethtool_drvinfo *info)
576{
577	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
578	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
579}
580
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
581static int netvsc_change_mtu(struct net_device *ndev, int mtu)
582{
583	struct net_device_context *ndevctx = netdev_priv(ndev);
584	struct hv_device *hdev =  ndevctx->device_ctx;
585	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
586	struct netvsc_device_info device_info;
587	int limit = ETH_DATA_LEN;
 
588
589	if (nvdev == NULL || nvdev->destroy)
590		return -ENODEV;
591
592	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
593		limit = NETVSC_MTU;
 
594
595	if (mtu < 68 || mtu > limit)
596		return -EINVAL;
597
598	nvdev->start_remove = true;
599	cancel_work_sync(&ndevctx->work);
600	netif_tx_disable(ndev);
601	rndis_filter_device_remove(hdev);
602
603	ndev->mtu = mtu;
604
605	ndevctx->device_ctx = hdev;
606	hv_set_drvdata(hdev, ndev);
607	device_info.ring_size = ring_size;
 
 
608	rndis_filter_device_add(hdev, &device_info);
609	netif_wake_queue(ndev);
610
611	return 0;
 
 
 
 
 
 
 
612}
613
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614
615static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
616{
617	struct net_device_context *ndevctx = netdev_priv(ndev);
618	struct hv_device *hdev =  ndevctx->device_ctx;
619	struct sockaddr *addr = p;
620	char save_adr[ETH_ALEN];
621	unsigned char save_aatype;
622	int err;
623
624	memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
625	save_aatype = ndev->addr_assign_type;
626
627	err = eth_mac_addr(ndev, p);
628	if (err != 0)
629		return err;
630
631	err = rndis_filter_set_device_mac(hdev, addr->sa_data);
632	if (err != 0) {
633		/* roll back to saved MAC */
634		memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
635		ndev->addr_assign_type = save_aatype;
636	}
637
638	return err;
639}
640
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641
642static const struct ethtool_ops ethtool_ops = {
643	.get_drvinfo	= netvsc_get_drvinfo,
644	.get_link	= ethtool_op_get_link,
 
 
 
 
 
 
 
 
645};
646
647static const struct net_device_ops device_ops = {
648	.ndo_open =			netvsc_open,
649	.ndo_stop =			netvsc_close,
650	.ndo_start_xmit =		netvsc_start_xmit,
651	.ndo_set_rx_mode =		netvsc_set_multicast_list,
652	.ndo_change_mtu =		netvsc_change_mtu,
653	.ndo_validate_addr =		eth_validate_addr,
654	.ndo_set_mac_address =		netvsc_set_mac_addr,
 
 
 
 
 
655};
656
657/*
658 * Send GARP packet to network peers after migrations.
659 * After Quick Migration, the network is not immediately operational in the
660 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
661 * another netif_notify_peers() into a delayed work, otherwise GARP packet
662 * will not be sent after quick migration, and cause network disconnection.
663 * Also, we update the carrier status here.
664 */
665static void netvsc_link_change(struct work_struct *w)
666{
667	struct net_device_context *ndev_ctx;
668	struct net_device *net;
 
 
669	struct netvsc_device *net_device;
670	struct rndis_device *rdev;
671	bool notify;
 
 
672
673	rtnl_lock();
 
 
674
675	ndev_ctx = container_of(w, struct net_device_context, dwork.work);
676	net_device = hv_get_drvdata(ndev_ctx->device_ctx);
677	rdev = net_device->extension;
678	net = net_device->ndev;
679
680	if (rdev->link_state) {
681		netif_carrier_off(net);
682		notify = false;
683	} else {
684		netif_carrier_on(net);
685		notify = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686	}
687
688	rtnl_unlock();
689
690	if (notify)
691		netdev_notify_peers(net);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
692}
693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
694
695static int netvsc_probe(struct hv_device *dev,
696			const struct hv_vmbus_device_id *dev_id)
697{
698	struct net_device *net = NULL;
699	struct net_device_context *net_device_ctx;
700	struct netvsc_device_info device_info;
 
701	int ret;
702
703	net = alloc_etherdev(sizeof(struct net_device_context));
 
704	if (!net)
705		return -ENOMEM;
706
707	netif_carrier_off(net);
708
 
 
709	net_device_ctx = netdev_priv(net);
710	net_device_ctx->device_ctx = dev;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711	hv_set_drvdata(dev, net);
 
 
 
712	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
713	INIT_WORK(&net_device_ctx->work, do_set_multicast);
714
 
 
 
715	net->netdev_ops = &device_ops;
716
717	net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
718				NETIF_F_TSO;
719	net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
720			NETIF_F_IP_CSUM | NETIF_F_TSO;
721
722	SET_ETHTOOL_OPS(net, &ethtool_ops);
723	SET_NETDEV_DEV(net, &dev->device);
724
 
 
 
725	/* Notify the netvsc driver of the new device */
 
726	device_info.ring_size = ring_size;
 
727	ret = rndis_filter_device_add(dev, &device_info);
728	if (ret != 0) {
729		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
730		free_netdev(net);
731		hv_set_drvdata(dev, NULL);
732		return ret;
733	}
734	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
735
 
 
 
 
 
 
 
 
 
 
 
 
736	ret = register_netdev(net);
737	if (ret != 0) {
738		pr_err("Unable to register netdev.\n");
739		rndis_filter_device_remove(dev);
740		free_netdev(net);
741	} else {
742		schedule_delayed_work(&net_device_ctx->dwork, 0);
743	}
744
745	return ret;
746}
747
748static int netvsc_remove(struct hv_device *dev)
749{
750	struct net_device *net;
751	struct net_device_context *ndev_ctx;
752	struct netvsc_device *net_device;
753
754	net_device = hv_get_drvdata(dev);
755	net = net_device->ndev;
756
757	if (net == NULL) {
758		dev_err(&dev->device, "No net device to remove\n");
759		return 0;
760	}
761
762	net_device->start_remove = true;
763
764	ndev_ctx = netdev_priv(net);
 
 
 
 
 
 
 
 
 
765	cancel_delayed_work_sync(&ndev_ctx->dwork);
766	cancel_work_sync(&ndev_ctx->work);
767
768	/* Stop outbound asap */
769	netif_tx_disable(net);
770
771	unregister_netdev(net);
772
773	/*
774	 * Call to the vsc driver to let it know that the device is being
775	 * removed
776	 */
777	rndis_filter_device_remove(dev);
778
779	free_netdev(net);
 
 
780	return 0;
781}
782
783static const struct hv_vmbus_device_id id_table[] = {
784	/* Network guid */
785	{ HV_NIC_GUID, },
786	{ },
787};
788
789MODULE_DEVICE_TABLE(vmbus, id_table);
790
791/* The one and only one */
792static struct  hv_driver netvsc_drv = {
793	.name = KBUILD_MODNAME,
794	.id_table = id_table,
795	.probe = netvsc_probe,
796	.remove = netvsc_remove,
797};
798
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799static void __exit netvsc_drv_exit(void)
800{
 
801	vmbus_driver_unregister(&netvsc_drv);
802}
803
804static int __init netvsc_drv_init(void)
805{
 
 
806	if (ring_size < RING_SIZE_MIN) {
807		ring_size = RING_SIZE_MIN;
808		pr_info("Increased ring_size to %d (min allowed)\n",
809			ring_size);
810	}
811	return vmbus_driver_register(&netvsc_drv);
 
 
 
 
 
 
812}
813
814MODULE_LICENSE("GPL");
815MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
816
817module_init(netvsc_drv_init);
818module_exit(netvsc_drv_exit);
v4.10.11
   1/*
   2 * Copyright (c) 2009, Microsoft Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, see <http://www.gnu.org/licenses/>.
  15 *
  16 * Authors:
  17 *   Haiyang Zhang <haiyangz@microsoft.com>
  18 *   Hank Janssen  <hjanssen@microsoft.com>
  19 */
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/init.h>
  23#include <linux/atomic.h>
  24#include <linux/module.h>
  25#include <linux/highmem.h>
  26#include <linux/device.h>
  27#include <linux/io.h>
  28#include <linux/delay.h>
  29#include <linux/netdevice.h>
  30#include <linux/inetdevice.h>
  31#include <linux/etherdevice.h>
  32#include <linux/skbuff.h>
  33#include <linux/if_vlan.h>
  34#include <linux/in.h>
  35#include <linux/slab.h>
  36#include <net/arp.h>
  37#include <net/route.h>
  38#include <net/sock.h>
  39#include <net/pkt_sched.h>
  40
  41#include "hyperv_net.h"
  42
 
 
 
 
 
 
 
  43#define RING_SIZE_MIN 64
  44#define LINKCHANGE_INT (2 * HZ)
  45#define NETVSC_HW_FEATURES	(NETIF_F_RXCSUM | \
  46				 NETIF_F_SG | \
  47				 NETIF_F_TSO | \
  48				 NETIF_F_TSO6 | \
  49				 NETIF_F_HW_CSUM)
  50
  51/* Restrict GSO size to account for NVGRE */
  52#define NETVSC_GSO_MAX_SIZE	62768
  53
  54static int ring_size = 128;
  55module_param(ring_size, int, S_IRUGO);
  56MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
  57
  58static int max_num_vrss_chns = 8;
  59
  60static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  61				NETIF_MSG_LINK | NETIF_MSG_IFUP |
  62				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
  63				NETIF_MSG_TX_ERR;
  64
  65static int debug = -1;
  66module_param(debug, int, S_IRUGO);
  67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  68
  69static void do_set_multicast(struct work_struct *w)
  70{
  71	struct net_device_context *ndevctx =
  72		container_of(w, struct net_device_context, work);
  73	struct hv_device *device_obj = ndevctx->device_ctx;
  74	struct net_device *ndev = hv_get_drvdata(device_obj);
  75	struct netvsc_device *nvdev = ndevctx->nvdev;
  76	struct rndis_device *rdev;
  77
  78	if (!nvdev)
 
  79		return;
  80
  81	rdev = nvdev->extension;
  82	if (rdev == NULL)
  83		return;
  84
  85	if (ndev->flags & IFF_PROMISC)
  86		rndis_filter_set_packet_filter(rdev,
  87			NDIS_PACKET_TYPE_PROMISCUOUS);
  88	else
  89		rndis_filter_set_packet_filter(rdev,
  90			NDIS_PACKET_TYPE_BROADCAST |
  91			NDIS_PACKET_TYPE_ALL_MULTICAST |
  92			NDIS_PACKET_TYPE_DIRECTED);
  93}
  94
  95static void netvsc_set_multicast_list(struct net_device *net)
  96{
  97	struct net_device_context *net_device_ctx = netdev_priv(net);
  98
  99	schedule_work(&net_device_ctx->work);
 100}
 101
 102static int netvsc_open(struct net_device *net)
 103{
 104	struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
 
 
 105	struct rndis_device *rdev;
 106	int ret = 0;
 107
 108	netif_carrier_off(net);
 109
 110	/* Open up the device */
 111	ret = rndis_filter_open(nvdev);
 112	if (ret != 0) {
 113		netdev_err(net, "unable to open device (ret %d).\n", ret);
 114		return ret;
 115	}
 116
 117	netif_tx_wake_all_queues(net);
 118
 
 119	rdev = nvdev->extension;
 120	if (!rdev->link_state)
 121		netif_carrier_on(net);
 122
 123	return ret;
 124}
 125
 126static int netvsc_close(struct net_device *net)
 127{
 128	struct net_device_context *net_device_ctx = netdev_priv(net);
 129	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 130	int ret;
 131	u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
 132	struct vmbus_channel *chn;
 133
 134	netif_tx_disable(net);
 135
 136	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
 137	cancel_work_sync(&net_device_ctx->work);
 138	ret = rndis_filter_close(nvdev);
 139	if (ret != 0) {
 140		netdev_err(net, "unable to close device (ret %d).\n", ret);
 141		return ret;
 142	}
 143
 144	/* Ensure pending bytes in ring are read */
 145	while (true) {
 146		aread = 0;
 147		for (i = 0; i < nvdev->num_chn; i++) {
 148			chn = nvdev->chn_table[i];
 149			if (!chn)
 150				continue;
 151
 152			hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
 153						     &awrite);
 154
 155			if (aread)
 156				break;
 157
 158			hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
 159						     &awrite);
 160
 161			if (aread)
 162				break;
 163		}
 164
 165		retry++;
 166		if (retry > retry_max || aread == 0)
 167			break;
 168
 169		msleep(msec);
 170
 171		if (msec < 1000)
 172			msec *= 2;
 173	}
 174
 175	if (aread) {
 176		netdev_err(net, "Ring buffer not empty after closing rndis\n");
 177		ret = -ETIMEDOUT;
 178	}
 179
 180	return ret;
 181}
 182
 183static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
 184				int pkt_type)
 185{
 186	struct rndis_packet *rndis_pkt;
 187	struct rndis_per_packet_info *ppi;
 188
 189	rndis_pkt = &msg->msg.pkt;
 190	rndis_pkt->data_offset += ppi_size;
 191
 192	ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
 193		rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
 194
 195	ppi->size = ppi_size;
 196	ppi->type = pkt_type;
 197	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
 198
 199	rndis_pkt->per_pkt_info_len += ppi_size;
 200
 201	return ppi;
 202}
 203
 204static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 205			void *accel_priv, select_queue_fallback_t fallback)
 206{
 207	struct net_device_context *net_device_ctx = netdev_priv(ndev);
 208	struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
 209	u32 hash;
 210	u16 q_idx = 0;
 211
 212	if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
 213		return 0;
 214
 215	hash = skb_get_hash(skb);
 216	q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
 217		ndev->real_num_tx_queues;
 218
 219	if (!nvsc_dev->chn_table[q_idx])
 220		q_idx = 0;
 221
 222	return q_idx;
 223}
 224
 225static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
 226			struct hv_page_buffer *pb)
 227{
 228	int j = 0;
 229
 230	/* Deal with compund pages by ignoring unused part
 231	 * of the page.
 232	 */
 233	page += (offset >> PAGE_SHIFT);
 234	offset &= ~PAGE_MASK;
 235
 236	while (len > 0) {
 237		unsigned long bytes;
 238
 239		bytes = PAGE_SIZE - offset;
 240		if (bytes > len)
 241			bytes = len;
 242		pb[j].pfn = page_to_pfn(page);
 243		pb[j].offset = offset;
 244		pb[j].len = bytes;
 245
 246		offset += bytes;
 247		len -= bytes;
 248
 249		if (offset == PAGE_SIZE && len) {
 250			page++;
 251			offset = 0;
 252			j++;
 253		}
 254	}
 255
 256	return j + 1;
 257}
 258
 259static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
 260			   struct hv_netvsc_packet *packet,
 261			   struct hv_page_buffer **page_buf)
 262{
 263	struct hv_page_buffer *pb = *page_buf;
 264	u32 slots_used = 0;
 265	char *data = skb->data;
 266	int frags = skb_shinfo(skb)->nr_frags;
 267	int i;
 268
 269	/* The packet is laid out thus:
 270	 * 1. hdr: RNDIS header and PPI
 271	 * 2. skb linear data
 272	 * 3. skb fragment data
 273	 */
 274	if (hdr != NULL)
 275		slots_used += fill_pg_buf(virt_to_page(hdr),
 276					offset_in_page(hdr),
 277					len, &pb[slots_used]);
 278
 279	packet->rmsg_size = len;
 280	packet->rmsg_pgcnt = slots_used;
 281
 282	slots_used += fill_pg_buf(virt_to_page(data),
 283				offset_in_page(data),
 284				skb_headlen(skb), &pb[slots_used]);
 285
 286	for (i = 0; i < frags; i++) {
 287		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 288
 289		slots_used += fill_pg_buf(skb_frag_page(frag),
 290					frag->page_offset,
 291					skb_frag_size(frag), &pb[slots_used]);
 292	}
 293	return slots_used;
 294}
 295
 296static int count_skb_frag_slots(struct sk_buff *skb)
 297{
 298	int i, frags = skb_shinfo(skb)->nr_frags;
 299	int pages = 0;
 300
 301	for (i = 0; i < frags; i++) {
 302		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 303		unsigned long size = skb_frag_size(frag);
 304		unsigned long offset = frag->page_offset;
 305
 306		/* Skip unused frames from start of page */
 307		offset &= ~PAGE_MASK;
 308		pages += PFN_UP(offset + size);
 309	}
 310	return pages;
 311}
 312
 313static int netvsc_get_slots(struct sk_buff *skb)
 314{
 315	char *data = skb->data;
 316	unsigned int offset = offset_in_page(data);
 317	unsigned int len = skb_headlen(skb);
 318	int slots;
 319	int frag_slots;
 320
 321	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
 322	frag_slots = count_skb_frag_slots(skb);
 323	return slots + frag_slots;
 324}
 325
 326static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
 327{
 328	u32 ret_val = TRANSPORT_INFO_NOT_IP;
 329
 330	if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
 331		(eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
 332		goto not_ip;
 333	}
 334
 335	*trans_off = skb_transport_offset(skb);
 336
 337	if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
 338		struct iphdr *iphdr = ip_hdr(skb);
 339
 340		if (iphdr->protocol == IPPROTO_TCP)
 341			ret_val = TRANSPORT_INFO_IPV4_TCP;
 342		else if (iphdr->protocol == IPPROTO_UDP)
 343			ret_val = TRANSPORT_INFO_IPV4_UDP;
 344	} else {
 345		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 346			ret_val = TRANSPORT_INFO_IPV6_TCP;
 347		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
 348			ret_val = TRANSPORT_INFO_IPV6_UDP;
 349	}
 350
 351not_ip:
 352	return ret_val;
 353}
 354
 355static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 356{
 357	struct net_device_context *net_device_ctx = netdev_priv(net);
 358	struct hv_netvsc_packet *packet = NULL;
 359	int ret;
 360	unsigned int num_data_pgs;
 361	struct rndis_message *rndis_msg;
 362	struct rndis_packet *rndis_pkt;
 363	u32 rndis_msg_size;
 
 364	struct rndis_per_packet_info *ppi;
 365	struct ndis_tcp_ip_checksum_info *csum_info;
 
 366	int  hdr_offset;
 367	u32 net_trans_info;
 368	u32 hash;
 369	u32 skb_length;
 370	struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
 371	struct hv_page_buffer *pb = page_buf;
 372
 373	/* We will atmost need two pages to describe the rndis
 374	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
 375	 * of pages in a single packet. If skb is scattered around
 376	 * more pages we try linearizing it.
 377	 */
 378
 379	skb_length = skb->len;
 380	num_data_pgs = netvsc_get_slots(skb) + 2;
 
 
 
 
 
 
 381
 382	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
 383		++net_device_ctx->eth_stats.tx_scattered;
 
 
 
 
 
 
 
 
 384
 385		if (skb_linearize(skb))
 386			goto no_memory;
 387
 388		num_data_pgs = netvsc_get_slots(skb) + 2;
 389		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
 390			++net_device_ctx->eth_stats.tx_too_big;
 391			goto drop;
 392		}
 393	}
 394
 395	/*
 396	 * Place the rndis header in the skb head room and
 397	 * the skb->cb will be used for hv_netvsc_packet
 398	 * structure.
 399	 */
 400	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
 401	if (ret)
 402		goto no_memory;
 403
 404	/* Use the skb control buffer for building up the packet */
 405	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
 406			FIELD_SIZEOF(struct sk_buff, cb));
 407	packet = (struct hv_netvsc_packet *)skb->cb;
 408
 409	packet->q_idx = skb_get_queue_mapping(skb);
 410
 
 411	packet->total_data_buflen = skb->len;
 412
 413	rndis_msg = (struct rndis_message *)skb->head;
 
 
 
 
 
 
 
 414
 415	memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
 416
 417	/* Add the rndis header */
 
 418	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 419	rndis_msg->msg_len = packet->total_data_buflen;
 420	rndis_pkt = &rndis_msg->msg.pkt;
 421	rndis_pkt->data_offset = sizeof(struct rndis_packet);
 422	rndis_pkt->data_len = packet->total_data_buflen;
 423	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
 424
 425	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 426
 427	hash = skb_get_hash_raw(skb);
 428	if (hash != 0 && net->real_num_tx_queues > 1) {
 429		rndis_msg_size += NDIS_HASH_PPI_SIZE;
 430		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
 431				    NBL_HASH_VALUE);
 432		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
 433	}
 434
 435	if (skb_vlan_tag_present(skb)) {
 436		struct ndis_pkt_8021q_info *vlan;
 437
 438		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
 439		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
 440					IEEE_8021Q_INFO);
 441		vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
 442						ppi->ppi_offset);
 443		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
 444		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
 445				VLAN_PRIO_SHIFT;
 446	}
 447
 448	net_trans_info = get_net_transport_info(skb, &hdr_offset);
 
 
 449
 450	/*
 451	 * Setup the sendside checksum offload only if this is not a
 452	 * GSO packet.
 453	 */
 454	if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
 455		struct ndis_tcp_lso_info *lso_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456
 457		rndis_msg_size += NDIS_LSO_PPI_SIZE;
 458		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
 459				    TCP_LARGESEND_PKTINFO);
 460
 461		lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
 462							ppi->ppi_offset);
 463
 464		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
 465		if (net_trans_info & (INFO_IPV4 << 16)) {
 466			lso_info->lso_v2_transmit.ip_version =
 467				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
 468			ip_hdr(skb)->tot_len = 0;
 469			ip_hdr(skb)->check = 0;
 470			tcp_hdr(skb)->check =
 471				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 472						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 473		} else {
 474			lso_info->lso_v2_transmit.ip_version =
 475				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
 476			ipv6_hdr(skb)->payload_len = 0;
 477			tcp_hdr(skb)->check =
 478				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 479						 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
 480		}
 481		lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
 482		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
 483	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 484		if (net_trans_info & INFO_TCP) {
 485			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
 486			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
 487					    TCPIP_CHKSUM_PKTINFO);
 488
 489			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
 490									 ppi->ppi_offset);
 491
 492			if (net_trans_info & (INFO_IPV4 << 16))
 493				csum_info->transmit.is_ipv4 = 1;
 494			else
 495				csum_info->transmit.is_ipv6 = 1;
 496
 497			csum_info->transmit.tcp_checksum = 1;
 498			csum_info->transmit.tcp_header_offset = hdr_offset;
 499		} else {
 500			/* UDP checksum (and other) offload is not supported. */
 501			if (skb_checksum_help(skb))
 502				goto drop;
 503		}
 504	}
 
 
 505
 
 506	/* Start filling in the page buffers with the rndis hdr */
 507	rndis_msg->msg_len += rndis_msg_size;
 508	packet->total_data_buflen = rndis_msg->msg_len;
 509	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
 510					       skb, packet, &pb);
 511
 512	/* timestamp packet in software */
 513	skb_tx_timestamp(skb);
 514	ret = netvsc_send(net_device_ctx->device_ctx, packet,
 515			  rndis_msg, &pb, skb);
 516	if (likely(ret == 0)) {
 517		struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
 518
 519		u64_stats_update_begin(&tx_stats->syncp);
 520		tx_stats->packets++;
 521		tx_stats->bytes += skb_length;
 522		u64_stats_update_end(&tx_stats->syncp);
 523		return NETDEV_TX_OK;
 524	}
 525
 526	if (ret == -EAGAIN) {
 527		++net_device_ctx->eth_stats.tx_busy;
 528		return NETDEV_TX_BUSY;
 
 
 
 
 
 
 
 529	}
 530
 531	if (ret == -ENOSPC)
 532		++net_device_ctx->eth_stats.tx_no_space;
 533
 534drop:
 535	dev_kfree_skb_any(skb);
 536	net->stats.tx_dropped++;
 537
 538	return NETDEV_TX_OK;
 539
 540no_memory:
 541	++net_device_ctx->eth_stats.tx_no_memory;
 542	goto drop;
 543}
 544
 545/*
 546 * netvsc_linkstatus_callback - Link up/down notification
 547 */
 548void netvsc_linkstatus_callback(struct hv_device *device_obj,
 549				struct rndis_message *resp)
 550{
 551	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
 552	struct net_device *net;
 553	struct net_device_context *ndev_ctx;
 554	struct netvsc_reconfig *event;
 555	unsigned long flags;
 556
 557	net = hv_get_drvdata(device_obj);
 
 558
 559	if (!net)
 560		return;
 561
 562	ndev_ctx = netdev_priv(net);
 563
 564	/* Update the physical link speed when changing to another vSwitch */
 565	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
 566		u32 speed;
 567
 568		speed = *(u32 *)((void *)indicate + indicate->
 569				 status_buf_offset) / 10000;
 570		ndev_ctx->speed = speed;
 571		return;
 572	}
 573
 574	/* Handle these link change statuses below */
 575	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
 576	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
 577	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
 578		return;
 579
 580	if (net->reg_state != NETREG_REGISTERED)
 581		return;
 582
 583	event = kzalloc(sizeof(*event), GFP_ATOMIC);
 584	if (!event)
 585		return;
 586	event->event = indicate->status;
 587
 588	spin_lock_irqsave(&ndev_ctx->lock, flags);
 589	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
 590	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
 591
 592	schedule_delayed_work(&ndev_ctx->dwork, 0);
 593}
 594
 595static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
 596				struct hv_netvsc_packet *packet,
 597				struct ndis_tcp_ip_checksum_info *csum_info,
 598				void *data, u16 vlan_tci)
 599{
 600	struct sk_buff *skb;
 601
 602	skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
 603	if (!skb)
 604		return skb;
 605
 606	/*
 607	 * Copy to skb. This copy is needed here since the memory pointed by
 608	 * hv_netvsc_packet cannot be deallocated
 609	 */
 610	memcpy(skb_put(skb, packet->total_data_buflen), data,
 611	       packet->total_data_buflen);
 612
 613	skb->protocol = eth_type_trans(skb, net);
 614
 615	/* skb is already created with CHECKSUM_NONE */
 616	skb_checksum_none_assert(skb);
 617
 618	/*
 619	 * In Linux, the IP checksum is always checked.
 620	 * Do L4 checksum offload if enabled and present.
 621	 */
 622	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
 623		if (csum_info->receive.tcp_checksum_succeeded ||
 624		    csum_info->receive.udp_checksum_succeeded)
 625			skb->ip_summed = CHECKSUM_UNNECESSARY;
 626	}
 627
 628	if (vlan_tci & VLAN_TAG_PRESENT)
 629		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 630				       vlan_tci);
 631
 632	return skb;
 633}
 634
 635/*
 636 * netvsc_recv_callback -  Callback when we receive a packet from the
 637 * "wire" on the specified device.
 638 */
 639int netvsc_recv_callback(struct hv_device *device_obj,
 640				struct hv_netvsc_packet *packet,
 641				void **data,
 642				struct ndis_tcp_ip_checksum_info *csum_info,
 643				struct vmbus_channel *channel,
 644				u16 vlan_tci)
 645{
 646	struct net_device *net = hv_get_drvdata(device_obj);
 647	struct net_device_context *net_device_ctx = netdev_priv(net);
 648	struct net_device *vf_netdev;
 649	struct sk_buff *skb;
 650	struct netvsc_stats *rx_stats;
 651
 652	if (net->reg_state != NETREG_REGISTERED)
 653		return NVSP_STAT_FAIL;
 654
 655	/*
 656	 * If necessary, inject this packet into the VF interface.
 657	 * On Hyper-V, multicast and brodcast packets are only delivered
 658	 * to the synthetic interface (after subjecting these to
 659	 * policy filters on the host). Deliver these via the VF
 660	 * interface in the guest.
 661	 */
 662	rcu_read_lock();
 663	vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
 664	if (vf_netdev && (vf_netdev->flags & IFF_UP))
 665		net = vf_netdev;
 666
 667	/* Allocate a skb - TODO direct I/O to pages? */
 668	skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
 669	if (unlikely(!skb)) {
 670		++net->stats.rx_dropped;
 671		rcu_read_unlock();
 672		return NVSP_STAT_FAIL;
 673	}
 674
 675	if (net != vf_netdev)
 676		skb_record_rx_queue(skb,
 677				    channel->offermsg.offer.sub_channel_index);
 678
 679	/*
 680	 * Even if injecting the packet, record the statistics
 681	 * on the synthetic device because modifying the VF device
 682	 * statistics will not work correctly.
 683	 */
 684	rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
 685	u64_stats_update_begin(&rx_stats->syncp);
 686	rx_stats->packets++;
 687	rx_stats->bytes += packet->total_data_buflen;
 688
 689	if (skb->pkt_type == PACKET_BROADCAST)
 690		++rx_stats->broadcast;
 691	else if (skb->pkt_type == PACKET_MULTICAST)
 692		++rx_stats->multicast;
 693	u64_stats_update_end(&rx_stats->syncp);
 
 
 
 
 
 
 
 
 
 
 
 694
 695	/*
 696	 * Pass the skb back up. Network stack will deallocate the skb when it
 697	 * is done.
 698	 * TODO - use NAPI?
 699	 */
 700	netif_rx(skb);
 701	rcu_read_unlock();
 702
 703	return 0;
 704}
 705
 706static void netvsc_get_drvinfo(struct net_device *net,
 707			       struct ethtool_drvinfo *info)
 708{
 709	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 710	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
 711}
 712
 713static void netvsc_get_channels(struct net_device *net,
 714				struct ethtool_channels *channel)
 715{
 716	struct net_device_context *net_device_ctx = netdev_priv(net);
 717	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 718
 719	if (nvdev) {
 720		channel->max_combined	= nvdev->max_chn;
 721		channel->combined_count = nvdev->num_chn;
 722	}
 723}
 724
 725static int netvsc_set_channels(struct net_device *net,
 726			       struct ethtool_channels *channels)
 727{
 728	struct net_device_context *net_device_ctx = netdev_priv(net);
 729	struct hv_device *dev = net_device_ctx->device_ctx;
 730	struct netvsc_device *nvdev = net_device_ctx->nvdev;
 731	struct netvsc_device_info device_info;
 732	u32 num_chn;
 733	u32 max_chn;
 734	int ret = 0;
 735	bool recovering = false;
 736
 737	if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
 738		return -ENODEV;
 739
 740	num_chn = nvdev->num_chn;
 741	max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
 742
 743	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
 744		pr_info("vRSS unsupported before NVSP Version 5\n");
 745		return -EINVAL;
 746	}
 747
 748	/* We do not support rx, tx, or other */
 749	if (!channels ||
 750	    channels->rx_count ||
 751	    channels->tx_count ||
 752	    channels->other_count ||
 753	    (channels->combined_count < 1))
 754		return -EINVAL;
 755
 756	if (channels->combined_count > max_chn) {
 757		pr_info("combined channels too high, using %d\n", max_chn);
 758		channels->combined_count = max_chn;
 759	}
 760
 761	ret = netvsc_close(net);
 762	if (ret)
 763		goto out;
 764
 765 do_set:
 766	net_device_ctx->start_remove = true;
 767	rndis_filter_device_remove(dev);
 768
 769	nvdev->num_chn = channels->combined_count;
 770
 771	memset(&device_info, 0, sizeof(device_info));
 772	device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
 773	device_info.ring_size = ring_size;
 774	device_info.max_num_vrss_chns = max_num_vrss_chns;
 775
 776	ret = rndis_filter_device_add(dev, &device_info);
 777	if (ret) {
 778		if (recovering) {
 779			netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
 780			return ret;
 781		}
 782		goto recover;
 783	}
 784
 785	nvdev = net_device_ctx->nvdev;
 786
 787	ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
 788	if (ret) {
 789		if (recovering) {
 790			netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
 791			return ret;
 792		}
 793		goto recover;
 794	}
 795
 796	ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
 797	if (ret) {
 798		if (recovering) {
 799			netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
 800			return ret;
 801		}
 802		goto recover;
 803	}
 804
 805 out:
 806	netvsc_open(net);
 807	net_device_ctx->start_remove = false;
 808	/* We may have missed link change notifications */
 809	schedule_delayed_work(&net_device_ctx->dwork, 0);
 810
 811	return ret;
 812
 813 recover:
 814	/* If the above failed, we attempt to recover through the same
 815	 * process but with the original number of channels.
 816	 */
 817	netdev_err(net, "could not set channels, recovering\n");
 818	recovering = true;
 819	channels->combined_count = num_chn;
 820	goto do_set;
 821}
 822
 823static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
 824{
 825	struct ethtool_cmd diff1 = *cmd;
 826	struct ethtool_cmd diff2 = {};
 827
 828	ethtool_cmd_speed_set(&diff1, 0);
 829	diff1.duplex = 0;
 830	/* advertising and cmd are usually set */
 831	diff1.advertising = 0;
 832	diff1.cmd = 0;
 833	/* We set port to PORT_OTHER */
 834	diff2.port = PORT_OTHER;
 835
 836	return !memcmp(&diff1, &diff2, sizeof(diff1));
 837}
 838
 839static void netvsc_init_settings(struct net_device *dev)
 840{
 841	struct net_device_context *ndc = netdev_priv(dev);
 842
 843	ndc->speed = SPEED_UNKNOWN;
 844	ndc->duplex = DUPLEX_UNKNOWN;
 845}
 846
 847static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 848{
 849	struct net_device_context *ndc = netdev_priv(dev);
 850
 851	ethtool_cmd_speed_set(cmd, ndc->speed);
 852	cmd->duplex = ndc->duplex;
 853	cmd->port = PORT_OTHER;
 854
 855	return 0;
 856}
 857
 858static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 859{
 860	struct net_device_context *ndc = netdev_priv(dev);
 861	u32 speed;
 862
 863	speed = ethtool_cmd_speed(cmd);
 864	if (!ethtool_validate_speed(speed) ||
 865	    !ethtool_validate_duplex(cmd->duplex) ||
 866	    !netvsc_validate_ethtool_ss_cmd(cmd))
 867		return -EINVAL;
 868
 869	ndc->speed = speed;
 870	ndc->duplex = cmd->duplex;
 871
 872	return 0;
 873}
 874
 875static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 876{
 877	struct net_device_context *ndevctx = netdev_priv(ndev);
 878	struct netvsc_device *nvdev = ndevctx->nvdev;
 879	struct hv_device *hdev = ndevctx->device_ctx;
 880	struct netvsc_device_info device_info;
 881	u32 num_chn;
 882	int ret = 0;
 883
 884	if (ndevctx->start_remove || !nvdev || nvdev->destroy)
 885		return -ENODEV;
 886
 887	ret = netvsc_close(ndev);
 888	if (ret)
 889		goto out;
 890
 891	num_chn = nvdev->num_chn;
 
 892
 893	ndevctx->start_remove = true;
 
 
 894	rndis_filter_device_remove(hdev);
 895
 896	ndev->mtu = mtu;
 897
 898	memset(&device_info, 0, sizeof(device_info));
 
 899	device_info.ring_size = ring_size;
 900	device_info.num_chn = num_chn;
 901	device_info.max_num_vrss_chns = max_num_vrss_chns;
 902	rndis_filter_device_add(hdev, &device_info);
 
 903
 904out:
 905	netvsc_open(ndev);
 906	ndevctx->start_remove = false;
 907
 908	/* We may have missed link change notifications */
 909	schedule_delayed_work(&ndevctx->dwork, 0);
 910
 911	return ret;
 912}
 913
 914static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
 915						    struct rtnl_link_stats64 *t)
 916{
 917	struct net_device_context *ndev_ctx = netdev_priv(net);
 918	int cpu;
 919
 920	for_each_possible_cpu(cpu) {
 921		struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
 922							    cpu);
 923		struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
 924							    cpu);
 925		u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
 926		unsigned int start;
 927
 928		do {
 929			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
 930			tx_packets = tx_stats->packets;
 931			tx_bytes = tx_stats->bytes;
 932		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
 933
 934		do {
 935			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
 936			rx_packets = rx_stats->packets;
 937			rx_bytes = rx_stats->bytes;
 938			rx_multicast = rx_stats->multicast + rx_stats->broadcast;
 939		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
 940
 941		t->tx_bytes	+= tx_bytes;
 942		t->tx_packets	+= tx_packets;
 943		t->rx_bytes	+= rx_bytes;
 944		t->rx_packets	+= rx_packets;
 945		t->multicast	+= rx_multicast;
 946	}
 947
 948	t->tx_dropped	= net->stats.tx_dropped;
 949	t->tx_errors	= net->stats.tx_dropped;
 950
 951	t->rx_dropped	= net->stats.rx_dropped;
 952	t->rx_errors	= net->stats.rx_errors;
 953
 954	return t;
 955}
 956
 957static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
 958{
 
 
 959	struct sockaddr *addr = p;
 960	char save_adr[ETH_ALEN];
 961	unsigned char save_aatype;
 962	int err;
 963
 964	memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
 965	save_aatype = ndev->addr_assign_type;
 966
 967	err = eth_mac_addr(ndev, p);
 968	if (err != 0)
 969		return err;
 970
 971	err = rndis_filter_set_device_mac(ndev, addr->sa_data);
 972	if (err != 0) {
 973		/* roll back to saved MAC */
 974		memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
 975		ndev->addr_assign_type = save_aatype;
 976	}
 977
 978	return err;
 979}
 980
 981static const struct {
 982	char name[ETH_GSTRING_LEN];
 983	u16 offset;
 984} netvsc_stats[] = {
 985	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
 986	{ "tx_no_memory",  offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
 987	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
 988	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
 989	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
 990};
 991
 992static int netvsc_get_sset_count(struct net_device *dev, int string_set)
 993{
 994	switch (string_set) {
 995	case ETH_SS_STATS:
 996		return ARRAY_SIZE(netvsc_stats);
 997	default:
 998		return -EINVAL;
 999	}
1000}
1001
1002static void netvsc_get_ethtool_stats(struct net_device *dev,
1003				     struct ethtool_stats *stats, u64 *data)
1004{
1005	struct net_device_context *ndc = netdev_priv(dev);
1006	const void *nds = &ndc->eth_stats;
1007	int i;
1008
1009	for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1010		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1011}
1012
1013static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1014{
1015	int i;
1016
1017	switch (stringset) {
1018	case ETH_SS_STATS:
1019		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1020			memcpy(data + i * ETH_GSTRING_LEN,
1021			       netvsc_stats[i].name, ETH_GSTRING_LEN);
1022		break;
1023	}
1024}
1025
1026#ifdef CONFIG_NET_POLL_CONTROLLER
1027static void netvsc_poll_controller(struct net_device *net)
1028{
1029	/* As netvsc_start_xmit() works synchronous we don't have to
1030	 * trigger anything here.
1031	 */
1032}
1033#endif
1034
1035static const struct ethtool_ops ethtool_ops = {
1036	.get_drvinfo	= netvsc_get_drvinfo,
1037	.get_link	= ethtool_op_get_link,
1038	.get_ethtool_stats = netvsc_get_ethtool_stats,
1039	.get_sset_count = netvsc_get_sset_count,
1040	.get_strings	= netvsc_get_strings,
1041	.get_channels   = netvsc_get_channels,
1042	.set_channels   = netvsc_set_channels,
1043	.get_ts_info	= ethtool_op_get_ts_info,
1044	.get_settings	= netvsc_get_settings,
1045	.set_settings	= netvsc_set_settings,
1046};
1047
1048static const struct net_device_ops device_ops = {
1049	.ndo_open =			netvsc_open,
1050	.ndo_stop =			netvsc_close,
1051	.ndo_start_xmit =		netvsc_start_xmit,
1052	.ndo_set_rx_mode =		netvsc_set_multicast_list,
1053	.ndo_change_mtu =		netvsc_change_mtu,
1054	.ndo_validate_addr =		eth_validate_addr,
1055	.ndo_set_mac_address =		netvsc_set_mac_addr,
1056	.ndo_select_queue =		netvsc_select_queue,
1057	.ndo_get_stats64 =		netvsc_get_stats64,
1058#ifdef CONFIG_NET_POLL_CONTROLLER
1059	.ndo_poll_controller =		netvsc_poll_controller,
1060#endif
1061};
1062
1063/*
1064 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1065 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1066 * present send GARP packet to network peers with netif_notify_peers().
 
 
 
1067 */
1068static void netvsc_link_change(struct work_struct *w)
1069{
1070	struct net_device_context *ndev_ctx =
1071		container_of(w, struct net_device_context, dwork.work);
1072	struct hv_device *device_obj = ndev_ctx->device_ctx;
1073	struct net_device *net = hv_get_drvdata(device_obj);
1074	struct netvsc_device *net_device;
1075	struct rndis_device *rdev;
1076	struct netvsc_reconfig *event = NULL;
1077	bool notify = false, reschedule = false;
1078	unsigned long flags, next_reconfig, delay;
1079
1080	rtnl_lock();
1081	if (ndev_ctx->start_remove)
1082		goto out_unlock;
1083
1084	net_device = ndev_ctx->nvdev;
 
1085	rdev = net_device->extension;
 
1086
1087	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1088	if (time_is_after_jiffies(next_reconfig)) {
1089		/* link_watch only sends one notification with current state
1090		 * per second, avoid doing reconfig more frequently. Handle
1091		 * wrap around.
1092		 */
1093		delay = next_reconfig - jiffies;
1094		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1095		schedule_delayed_work(&ndev_ctx->dwork, delay);
1096		goto out_unlock;
1097	}
1098	ndev_ctx->last_reconfig = jiffies;
1099
1100	spin_lock_irqsave(&ndev_ctx->lock, flags);
1101	if (!list_empty(&ndev_ctx->reconfig_events)) {
1102		event = list_first_entry(&ndev_ctx->reconfig_events,
1103					 struct netvsc_reconfig, list);
1104		list_del(&event->list);
1105		reschedule = !list_empty(&ndev_ctx->reconfig_events);
1106	}
1107	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1108
1109	if (!event)
1110		goto out_unlock;
1111
1112	switch (event->event) {
1113		/* Only the following events are possible due to the check in
1114		 * netvsc_linkstatus_callback()
1115		 */
1116	case RNDIS_STATUS_MEDIA_CONNECT:
1117		if (rdev->link_state) {
1118			rdev->link_state = false;
1119			netif_carrier_on(net);
1120			netif_tx_wake_all_queues(net);
1121		} else {
1122			notify = true;
1123		}
1124		kfree(event);
1125		break;
1126	case RNDIS_STATUS_MEDIA_DISCONNECT:
1127		if (!rdev->link_state) {
1128			rdev->link_state = true;
1129			netif_carrier_off(net);
1130			netif_tx_stop_all_queues(net);
1131		}
1132		kfree(event);
1133		break;
1134	case RNDIS_STATUS_NETWORK_CHANGE:
1135		/* Only makes sense if carrier is present */
1136		if (!rdev->link_state) {
1137			rdev->link_state = true;
1138			netif_carrier_off(net);
1139			netif_tx_stop_all_queues(net);
1140			event->event = RNDIS_STATUS_MEDIA_CONNECT;
1141			spin_lock_irqsave(&ndev_ctx->lock, flags);
1142			list_add(&event->list, &ndev_ctx->reconfig_events);
1143			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1144			reschedule = true;
1145		}
1146		break;
1147	}
1148
1149	rtnl_unlock();
1150
1151	if (notify)
1152		netdev_notify_peers(net);
1153
1154	/* link_watch only sends one notification with current state per
1155	 * second, handle next reconfig event in 2 seconds.
1156	 */
1157	if (reschedule)
1158		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1159
1160	return;
1161
1162out_unlock:
1163	rtnl_unlock();
1164}
1165
1166static void netvsc_free_netdev(struct net_device *netdev)
1167{
1168	struct net_device_context *net_device_ctx = netdev_priv(netdev);
1169
1170	free_percpu(net_device_ctx->tx_stats);
1171	free_percpu(net_device_ctx->rx_stats);
1172	free_netdev(netdev);
1173}
1174
1175static struct net_device *get_netvsc_bymac(const u8 *mac)
1176{
1177	struct net_device *dev;
1178
1179	ASSERT_RTNL();
1180
1181	for_each_netdev(&init_net, dev) {
1182		if (dev->netdev_ops != &device_ops)
1183			continue;	/* not a netvsc device */
1184
1185		if (ether_addr_equal(mac, dev->perm_addr))
1186			return dev;
1187	}
1188
1189	return NULL;
1190}
1191
1192static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1193{
1194	struct net_device *dev;
1195
1196	ASSERT_RTNL();
1197
1198	for_each_netdev(&init_net, dev) {
1199		struct net_device_context *net_device_ctx;
1200
1201		if (dev->netdev_ops != &device_ops)
1202			continue;	/* not a netvsc device */
1203
1204		net_device_ctx = netdev_priv(dev);
1205		if (net_device_ctx->nvdev == NULL)
1206			continue;	/* device is removed */
1207
1208		if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
1209			return dev;	/* a match */
1210	}
1211
1212	return NULL;
1213}
1214
1215static int netvsc_register_vf(struct net_device *vf_netdev)
1216{
1217	struct net_device *ndev;
1218	struct net_device_context *net_device_ctx;
1219	struct netvsc_device *netvsc_dev;
1220
1221	if (vf_netdev->addr_len != ETH_ALEN)
1222		return NOTIFY_DONE;
1223
1224	/*
1225	 * We will use the MAC address to locate the synthetic interface to
1226	 * associate with the VF interface. If we don't find a matching
1227	 * synthetic interface, move on.
1228	 */
1229	ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1230	if (!ndev)
1231		return NOTIFY_DONE;
1232
1233	net_device_ctx = netdev_priv(ndev);
1234	netvsc_dev = net_device_ctx->nvdev;
1235	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1236		return NOTIFY_DONE;
1237
1238	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1239	/*
1240	 * Take a reference on the module.
1241	 */
1242	try_module_get(THIS_MODULE);
1243
1244	dev_hold(vf_netdev);
1245	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1246	return NOTIFY_OK;
1247}
1248
1249static int netvsc_vf_up(struct net_device *vf_netdev)
1250{
1251	struct net_device *ndev;
1252	struct netvsc_device *netvsc_dev;
1253	struct net_device_context *net_device_ctx;
1254
1255	ndev = get_netvsc_byref(vf_netdev);
1256	if (!ndev)
1257		return NOTIFY_DONE;
1258
1259	net_device_ctx = netdev_priv(ndev);
1260	netvsc_dev = net_device_ctx->nvdev;
1261
1262	netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1263
1264	/*
1265	 * Open the device before switching data path.
1266	 */
1267	rndis_filter_open(netvsc_dev);
1268
1269	/*
1270	 * notify the host to switch the data path.
1271	 */
1272	netvsc_switch_datapath(ndev, true);
1273	netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
1274
1275	netif_carrier_off(ndev);
1276
1277	/* Now notify peers through VF device. */
1278	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1279
1280	return NOTIFY_OK;
1281}
1282
1283static int netvsc_vf_down(struct net_device *vf_netdev)
1284{
1285	struct net_device *ndev;
1286	struct netvsc_device *netvsc_dev;
1287	struct net_device_context *net_device_ctx;
1288
1289	ndev = get_netvsc_byref(vf_netdev);
1290	if (!ndev)
1291		return NOTIFY_DONE;
1292
1293	net_device_ctx = netdev_priv(ndev);
1294	netvsc_dev = net_device_ctx->nvdev;
1295
1296	netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1297	netvsc_switch_datapath(ndev, false);
1298	netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1299	rndis_filter_close(netvsc_dev);
1300	netif_carrier_on(ndev);
1301
1302	/* Now notify peers through netvsc device. */
1303	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1304
1305	return NOTIFY_OK;
1306}
1307
1308static int netvsc_unregister_vf(struct net_device *vf_netdev)
1309{
1310	struct net_device *ndev;
1311	struct netvsc_device *netvsc_dev;
1312	struct net_device_context *net_device_ctx;
1313
1314	ndev = get_netvsc_byref(vf_netdev);
1315	if (!ndev)
1316		return NOTIFY_DONE;
1317
1318	net_device_ctx = netdev_priv(ndev);
1319	netvsc_dev = net_device_ctx->nvdev;
1320
1321	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1322
1323	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
1324	dev_put(vf_netdev);
1325	module_put(THIS_MODULE);
1326	return NOTIFY_OK;
1327}
1328
1329static int netvsc_probe(struct hv_device *dev,
1330			const struct hv_vmbus_device_id *dev_id)
1331{
1332	struct net_device *net = NULL;
1333	struct net_device_context *net_device_ctx;
1334	struct netvsc_device_info device_info;
1335	struct netvsc_device *nvdev;
1336	int ret;
1337
1338	net = alloc_etherdev_mq(sizeof(struct net_device_context),
1339				num_online_cpus());
1340	if (!net)
1341		return -ENOMEM;
1342
1343	netif_carrier_off(net);
1344
1345	netvsc_init_settings(net);
1346
1347	net_device_ctx = netdev_priv(net);
1348	net_device_ctx->device_ctx = dev;
1349	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1350	if (netif_msg_probe(net_device_ctx))
1351		netdev_dbg(net, "netvsc msg_enable: %d\n",
1352			   net_device_ctx->msg_enable);
1353
1354	net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1355	if (!net_device_ctx->tx_stats) {
1356		free_netdev(net);
1357		return -ENOMEM;
1358	}
1359	net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1360	if (!net_device_ctx->rx_stats) {
1361		free_percpu(net_device_ctx->tx_stats);
1362		free_netdev(net);
1363		return -ENOMEM;
1364	}
1365
1366	hv_set_drvdata(dev, net);
1367
1368	net_device_ctx->start_remove = false;
1369
1370	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1371	INIT_WORK(&net_device_ctx->work, do_set_multicast);
1372
1373	spin_lock_init(&net_device_ctx->lock);
1374	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1375
1376	net->netdev_ops = &device_ops;
1377
1378	net->hw_features = NETVSC_HW_FEATURES;
1379	net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
 
 
1380
1381	net->ethtool_ops = &ethtool_ops;
1382	SET_NETDEV_DEV(net, &dev->device);
1383
1384	/* We always need headroom for rndis header */
1385	net->needed_headroom = RNDIS_AND_PPI_SIZE;
1386
1387	/* Notify the netvsc driver of the new device */
1388	memset(&device_info, 0, sizeof(device_info));
1389	device_info.ring_size = ring_size;
1390	device_info.max_num_vrss_chns = max_num_vrss_chns;
1391	ret = rndis_filter_device_add(dev, &device_info);
1392	if (ret != 0) {
1393		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1394		netvsc_free_netdev(net);
1395		hv_set_drvdata(dev, NULL);
1396		return ret;
1397	}
1398	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1399
1400	nvdev = net_device_ctx->nvdev;
1401	netif_set_real_num_tx_queues(net, nvdev->num_chn);
1402	netif_set_real_num_rx_queues(net, nvdev->num_chn);
1403	netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
1404
1405	/* MTU range: 68 - 1500 or 65521 */
1406	net->min_mtu = NETVSC_MTU_MIN;
1407	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1408		net->max_mtu = NETVSC_MTU - ETH_HLEN;
1409	else
1410		net->max_mtu = ETH_DATA_LEN;
1411
1412	ret = register_netdev(net);
1413	if (ret != 0) {
1414		pr_err("Unable to register netdev.\n");
1415		rndis_filter_device_remove(dev);
1416		netvsc_free_netdev(net);
 
 
1417	}
1418
1419	return ret;
1420}
1421
1422static int netvsc_remove(struct hv_device *dev)
1423{
1424	struct net_device *net;
1425	struct net_device_context *ndev_ctx;
1426	struct netvsc_device *net_device;
1427
1428	net = hv_get_drvdata(dev);
 
1429
1430	if (net == NULL) {
1431		dev_err(&dev->device, "No net device to remove\n");
1432		return 0;
1433	}
1434
 
 
1435	ndev_ctx = netdev_priv(net);
1436	net_device = ndev_ctx->nvdev;
1437
1438	/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1439	 * removing the device.
1440	 */
1441	rtnl_lock();
1442	ndev_ctx->start_remove = true;
1443	rtnl_unlock();
1444
1445	cancel_delayed_work_sync(&ndev_ctx->dwork);
1446	cancel_work_sync(&ndev_ctx->work);
1447
1448	/* Stop outbound asap */
1449	netif_tx_disable(net);
1450
1451	unregister_netdev(net);
1452
1453	/*
1454	 * Call to the vsc driver to let it know that the device is being
1455	 * removed
1456	 */
1457	rndis_filter_device_remove(dev);
1458
1459	hv_set_drvdata(dev, NULL);
1460
1461	netvsc_free_netdev(net);
1462	return 0;
1463}
1464
1465static const struct hv_vmbus_device_id id_table[] = {
1466	/* Network guid */
1467	{ HV_NIC_GUID, },
1468	{ },
1469};
1470
1471MODULE_DEVICE_TABLE(vmbus, id_table);
1472
1473/* The one and only one */
1474static struct  hv_driver netvsc_drv = {
1475	.name = KBUILD_MODNAME,
1476	.id_table = id_table,
1477	.probe = netvsc_probe,
1478	.remove = netvsc_remove,
1479};
1480
1481/*
1482 * On Hyper-V, every VF interface is matched with a corresponding
1483 * synthetic interface. The synthetic interface is presented first
1484 * to the guest. When the corresponding VF instance is registered,
1485 * we will take care of switching the data path.
1486 */
1487static int netvsc_netdev_event(struct notifier_block *this,
1488			       unsigned long event, void *ptr)
1489{
1490	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1491
1492	/* Skip our own events */
1493	if (event_dev->netdev_ops == &device_ops)
1494		return NOTIFY_DONE;
1495
1496	/* Avoid non-Ethernet type devices */
1497	if (event_dev->type != ARPHRD_ETHER)
1498		return NOTIFY_DONE;
1499
1500	/* Avoid Vlan dev with same MAC registering as VF */
1501	if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1502		return NOTIFY_DONE;
1503
1504	/* Avoid Bonding master dev with same MAC registering as VF */
1505	if ((event_dev->priv_flags & IFF_BONDING) &&
1506	    (event_dev->flags & IFF_MASTER))
1507		return NOTIFY_DONE;
1508
1509	switch (event) {
1510	case NETDEV_REGISTER:
1511		return netvsc_register_vf(event_dev);
1512	case NETDEV_UNREGISTER:
1513		return netvsc_unregister_vf(event_dev);
1514	case NETDEV_UP:
1515		return netvsc_vf_up(event_dev);
1516	case NETDEV_DOWN:
1517		return netvsc_vf_down(event_dev);
1518	default:
1519		return NOTIFY_DONE;
1520	}
1521}
1522
1523static struct notifier_block netvsc_netdev_notifier = {
1524	.notifier_call = netvsc_netdev_event,
1525};
1526
1527static void __exit netvsc_drv_exit(void)
1528{
1529	unregister_netdevice_notifier(&netvsc_netdev_notifier);
1530	vmbus_driver_unregister(&netvsc_drv);
1531}
1532
1533static int __init netvsc_drv_init(void)
1534{
1535	int ret;
1536
1537	if (ring_size < RING_SIZE_MIN) {
1538		ring_size = RING_SIZE_MIN;
1539		pr_info("Increased ring_size to %d (min allowed)\n",
1540			ring_size);
1541	}
1542	ret = vmbus_driver_register(&netvsc_drv);
1543
1544	if (ret)
1545		return ret;
1546
1547	register_netdevice_notifier(&netvsc_netdev_notifier);
1548	return 0;
1549}
1550
1551MODULE_LICENSE("GPL");
1552MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1553
1554module_init(netvsc_drv_init);
1555module_exit(netvsc_drv_exit);