Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/init.h>
24#include <linux/atomic.h>
25#include <linux/module.h>
26#include <linux/highmem.h>
27#include <linux/device.h>
28#include <linux/io.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/in.h>
35#include <linux/slab.h>
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
40
41#include "hyperv_net.h"
42
43struct net_device_context {
44 /* point back to our device context */
45 struct hv_device *device_ctx;
46 struct delayed_work dwork;
47 struct work_struct work;
48};
49
50
51static int ring_size = 128;
52module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
54
55static void do_set_multicast(struct work_struct *w)
56{
57 struct net_device_context *ndevctx =
58 container_of(w, struct net_device_context, work);
59 struct netvsc_device *nvdev;
60 struct rndis_device *rdev;
61
62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
63 if (nvdev == NULL || nvdev->ndev == NULL)
64 return;
65
66 rdev = nvdev->extension;
67 if (rdev == NULL)
68 return;
69
70 if (nvdev->ndev->flags & IFF_PROMISC)
71 rndis_filter_set_packet_filter(rdev,
72 NDIS_PACKET_TYPE_PROMISCUOUS);
73 else
74 rndis_filter_set_packet_filter(rdev,
75 NDIS_PACKET_TYPE_BROADCAST |
76 NDIS_PACKET_TYPE_ALL_MULTICAST |
77 NDIS_PACKET_TYPE_DIRECTED);
78}
79
80static void netvsc_set_multicast_list(struct net_device *net)
81{
82 struct net_device_context *net_device_ctx = netdev_priv(net);
83
84 schedule_work(&net_device_ctx->work);
85}
86
87static int netvsc_open(struct net_device *net)
88{
89 struct net_device_context *net_device_ctx = netdev_priv(net);
90 struct hv_device *device_obj = net_device_ctx->device_ctx;
91 int ret = 0;
92
93 /* Open up the device */
94 ret = rndis_filter_open(device_obj);
95 if (ret != 0) {
96 netdev_err(net, "unable to open device (ret %d).\n", ret);
97 return ret;
98 }
99
100 netif_start_queue(net);
101
102 return ret;
103}
104
105static int netvsc_close(struct net_device *net)
106{
107 struct net_device_context *net_device_ctx = netdev_priv(net);
108 struct hv_device *device_obj = net_device_ctx->device_ctx;
109 int ret;
110
111 netif_tx_disable(net);
112
113 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
114 cancel_work_sync(&net_device_ctx->work);
115 ret = rndis_filter_close(device_obj);
116 if (ret != 0)
117 netdev_err(net, "unable to close device (ret %d).\n", ret);
118
119 return ret;
120}
121
122static void netvsc_xmit_completion(void *context)
123{
124 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
125 struct sk_buff *skb = (struct sk_buff *)
126 (unsigned long)packet->completion.send.send_completion_tid;
127
128 kfree(packet);
129
130 if (skb)
131 dev_kfree_skb_any(skb);
132}
133
134static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
135{
136 struct net_device_context *net_device_ctx = netdev_priv(net);
137 struct hv_netvsc_packet *packet;
138 int ret;
139 unsigned int i, num_pages, npg_data;
140
141 /* Add multipages for skb->data and additional 2 for RNDIS */
142 npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
143 >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
144 num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
145
146 /* Allocate a netvsc packet based on # of frags. */
147 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
148 (num_pages * sizeof(struct hv_page_buffer)) +
149 sizeof(struct rndis_filter_packet) +
150 NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
151 if (!packet) {
152 /* out of memory, drop packet */
153 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
154
155 dev_kfree_skb(skb);
156 net->stats.tx_dropped++;
157 return NETDEV_TX_OK;
158 }
159
160 packet->vlan_tci = skb->vlan_tci;
161
162 packet->extension = (void *)(unsigned long)packet +
163 sizeof(struct hv_netvsc_packet) +
164 (num_pages * sizeof(struct hv_page_buffer));
165
166 /* If the rndis msg goes beyond 1 page, we will add 1 later */
167 packet->page_buf_cnt = num_pages - 1;
168
169 /* Initialize it from the skb */
170 packet->total_data_buflen = skb->len;
171
172 /* Start filling in the page buffers starting after RNDIS buffer. */
173 packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
174 packet->page_buf[1].offset
175 = (unsigned long)skb->data & (PAGE_SIZE - 1);
176 if (npg_data == 1)
177 packet->page_buf[1].len = skb_headlen(skb);
178 else
179 packet->page_buf[1].len = PAGE_SIZE
180 - packet->page_buf[1].offset;
181
182 for (i = 2; i <= npg_data; i++) {
183 packet->page_buf[i].pfn = virt_to_phys(skb->data
184 + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
185 packet->page_buf[i].offset = 0;
186 packet->page_buf[i].len = PAGE_SIZE;
187 }
188 if (npg_data > 1)
189 packet->page_buf[npg_data].len = (((unsigned long)skb->data
190 + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
191
192 /* Additional fragments are after SKB data */
193 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
194 const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
195
196 packet->page_buf[i+npg_data+1].pfn =
197 page_to_pfn(skb_frag_page(f));
198 packet->page_buf[i+npg_data+1].offset = f->page_offset;
199 packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
200 }
201
202 /* Set the completion routine */
203 packet->completion.send.send_completion = netvsc_xmit_completion;
204 packet->completion.send.send_completion_ctx = packet;
205 packet->completion.send.send_completion_tid = (unsigned long)skb;
206
207 ret = rndis_filter_send(net_device_ctx->device_ctx,
208 packet);
209 if (ret == 0) {
210 net->stats.tx_bytes += skb->len;
211 net->stats.tx_packets++;
212 } else {
213 kfree(packet);
214 if (ret != -EAGAIN) {
215 dev_kfree_skb_any(skb);
216 net->stats.tx_dropped++;
217 }
218 }
219
220 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
221}
222
223/*
224 * netvsc_linkstatus_callback - Link up/down notification
225 */
226void netvsc_linkstatus_callback(struct hv_device *device_obj,
227 unsigned int status)
228{
229 struct net_device *net;
230 struct net_device_context *ndev_ctx;
231 struct netvsc_device *net_device;
232
233 net_device = hv_get_drvdata(device_obj);
234 net = net_device->ndev;
235
236 if (!net) {
237 netdev_err(net, "got link status but net device "
238 "not initialized yet\n");
239 return;
240 }
241
242 if (status == 1) {
243 netif_carrier_on(net);
244 netif_wake_queue(net);
245 ndev_ctx = netdev_priv(net);
246 schedule_delayed_work(&ndev_ctx->dwork, 0);
247 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
248 } else {
249 netif_carrier_off(net);
250 netif_tx_disable(net);
251 }
252}
253
254/*
255 * netvsc_recv_callback - Callback when we receive a packet from the
256 * "wire" on the specified device.
257 */
258int netvsc_recv_callback(struct hv_device *device_obj,
259 struct hv_netvsc_packet *packet)
260{
261 struct net_device *net;
262 struct sk_buff *skb;
263
264 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
265 if (!net) {
266 netdev_err(net, "got receive callback but net device"
267 " not initialized yet\n");
268 return 0;
269 }
270
271 /* Allocate a skb - TODO direct I/O to pages? */
272 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
273 if (unlikely(!skb)) {
274 ++net->stats.rx_dropped;
275 return 0;
276 }
277
278 /*
279 * Copy to skb. This copy is needed here since the memory pointed by
280 * hv_netvsc_packet cannot be deallocated
281 */
282 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
283 packet->total_data_buflen);
284
285 skb->protocol = eth_type_trans(skb, net);
286 skb->ip_summed = CHECKSUM_NONE;
287 skb->vlan_tci = packet->vlan_tci;
288
289 net->stats.rx_packets++;
290 net->stats.rx_bytes += packet->total_data_buflen;
291
292 /*
293 * Pass the skb back up. Network stack will deallocate the skb when it
294 * is done.
295 * TODO - use NAPI?
296 */
297 netif_rx(skb);
298
299 return 0;
300}
301
302static void netvsc_get_drvinfo(struct net_device *net,
303 struct ethtool_drvinfo *info)
304{
305 strcpy(info->driver, KBUILD_MODNAME);
306 strcpy(info->version, HV_DRV_VERSION);
307 strcpy(info->fw_version, "N/A");
308}
309
310static int netvsc_change_mtu(struct net_device *ndev, int mtu)
311{
312 struct net_device_context *ndevctx = netdev_priv(ndev);
313 struct hv_device *hdev = ndevctx->device_ctx;
314 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
315 struct netvsc_device_info device_info;
316 int limit = ETH_DATA_LEN;
317
318 if (nvdev == NULL || nvdev->destroy)
319 return -ENODEV;
320
321 if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
322 limit = NETVSC_MTU;
323
324 if (mtu < 68 || mtu > limit)
325 return -EINVAL;
326
327 nvdev->start_remove = true;
328 cancel_delayed_work_sync(&ndevctx->dwork);
329 cancel_work_sync(&ndevctx->work);
330 netif_tx_disable(ndev);
331 rndis_filter_device_remove(hdev);
332
333 ndev->mtu = mtu;
334
335 ndevctx->device_ctx = hdev;
336 hv_set_drvdata(hdev, ndev);
337 device_info.ring_size = ring_size;
338 rndis_filter_device_add(hdev, &device_info);
339 netif_wake_queue(ndev);
340
341 return 0;
342}
343
344static const struct ethtool_ops ethtool_ops = {
345 .get_drvinfo = netvsc_get_drvinfo,
346 .get_link = ethtool_op_get_link,
347};
348
349static const struct net_device_ops device_ops = {
350 .ndo_open = netvsc_open,
351 .ndo_stop = netvsc_close,
352 .ndo_start_xmit = netvsc_start_xmit,
353 .ndo_set_rx_mode = netvsc_set_multicast_list,
354 .ndo_change_mtu = netvsc_change_mtu,
355 .ndo_validate_addr = eth_validate_addr,
356 .ndo_set_mac_address = eth_mac_addr,
357};
358
359/*
360 * Send GARP packet to network peers after migrations.
361 * After Quick Migration, the network is not immediately operational in the
362 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
363 * another netif_notify_peers() into a delayed work, otherwise GARP packet
364 * will not be sent after quick migration, and cause network disconnection.
365 */
366static void netvsc_send_garp(struct work_struct *w)
367{
368 struct net_device_context *ndev_ctx;
369 struct net_device *net;
370 struct netvsc_device *net_device;
371
372 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
373 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
374 net = net_device->ndev;
375 netif_notify_peers(net);
376}
377
378
379static int netvsc_probe(struct hv_device *dev,
380 const struct hv_vmbus_device_id *dev_id)
381{
382 struct net_device *net = NULL;
383 struct net_device_context *net_device_ctx;
384 struct netvsc_device_info device_info;
385 int ret;
386
387 net = alloc_etherdev(sizeof(struct net_device_context));
388 if (!net)
389 return -ENOMEM;
390
391 /* Set initial state */
392 netif_carrier_off(net);
393
394 net_device_ctx = netdev_priv(net);
395 net_device_ctx->device_ctx = dev;
396 hv_set_drvdata(dev, net);
397 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
398 INIT_WORK(&net_device_ctx->work, do_set_multicast);
399
400 net->netdev_ops = &device_ops;
401
402 /* TODO: Add GSO and Checksum offload */
403 net->hw_features = NETIF_F_SG;
404 net->features = NETIF_F_SG | NETIF_F_HW_VLAN_TX;
405
406 SET_ETHTOOL_OPS(net, ðtool_ops);
407 SET_NETDEV_DEV(net, &dev->device);
408
409 ret = register_netdev(net);
410 if (ret != 0) {
411 pr_err("Unable to register netdev.\n");
412 free_netdev(net);
413 goto out;
414 }
415
416 /* Notify the netvsc driver of the new device */
417 device_info.ring_size = ring_size;
418 ret = rndis_filter_device_add(dev, &device_info);
419 if (ret != 0) {
420 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
421 unregister_netdev(net);
422 free_netdev(net);
423 hv_set_drvdata(dev, NULL);
424 return ret;
425 }
426 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
427
428 netif_carrier_on(net);
429
430out:
431 return ret;
432}
433
434static int netvsc_remove(struct hv_device *dev)
435{
436 struct net_device *net;
437 struct net_device_context *ndev_ctx;
438 struct netvsc_device *net_device;
439
440 net_device = hv_get_drvdata(dev);
441 net = net_device->ndev;
442
443 if (net == NULL) {
444 dev_err(&dev->device, "No net device to remove\n");
445 return 0;
446 }
447
448 net_device->start_remove = true;
449
450 ndev_ctx = netdev_priv(net);
451 cancel_delayed_work_sync(&ndev_ctx->dwork);
452 cancel_work_sync(&ndev_ctx->work);
453
454 /* Stop outbound asap */
455 netif_tx_disable(net);
456
457 unregister_netdev(net);
458
459 /*
460 * Call to the vsc driver to let it know that the device is being
461 * removed
462 */
463 rndis_filter_device_remove(dev);
464
465 free_netdev(net);
466 return 0;
467}
468
469static const struct hv_vmbus_device_id id_table[] = {
470 /* Network guid */
471 { VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
472 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
473 { },
474};
475
476MODULE_DEVICE_TABLE(vmbus, id_table);
477
478/* The one and only one */
479static struct hv_driver netvsc_drv = {
480 .name = KBUILD_MODNAME,
481 .id_table = id_table,
482 .probe = netvsc_probe,
483 .remove = netvsc_remove,
484};
485
486static void __exit netvsc_drv_exit(void)
487{
488 vmbus_driver_unregister(&netvsc_drv);
489}
490
491static int __init netvsc_drv_init(void)
492{
493 return vmbus_driver_register(&netvsc_drv);
494}
495
496MODULE_LICENSE("GPL");
497MODULE_VERSION(HV_DRV_VERSION);
498MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
499
500module_init(netvsc_drv_init);
501module_exit(netvsc_drv_exit);