Loading...
Note: File does not exist in v3.5.6.
1/*
2 * Copyright (C) 2017 - Cambridge Greys Limited
3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
4 * Licensed under the GPL.
5 */
6
7#include <linux/etherdevice.h>
8#include <linux/netdevice.h>
9#include <linux/skbuff.h>
10#include <linux/slab.h>
11#include <asm/byteorder.h>
12#include <uapi/linux/ip.h>
13#include <uapi/linux/virtio_net.h>
14#include <linux/virtio_net.h>
15#include <linux/virtio_byteorder.h>
16#include <linux/netdev_features.h>
17#include "vector_user.h"
18#include "vector_kern.h"
19
20#define GOOD_LINEAR 512
21#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
22
23struct gre_minimal_header {
24 uint16_t header;
25 uint16_t arptype;
26};
27
28
29struct uml_gre_data {
30 uint32_t rx_key;
31 uint32_t tx_key;
32 uint32_t sequence;
33
34 bool ipv6;
35 bool has_sequence;
36 bool pin_sequence;
37 bool checksum;
38 bool key;
39 struct gre_minimal_header expected_header;
40
41 uint32_t checksum_offset;
42 uint32_t key_offset;
43 uint32_t sequence_offset;
44
45};
46
47struct uml_l2tpv3_data {
48 uint64_t rx_cookie;
49 uint64_t tx_cookie;
50 uint64_t rx_session;
51 uint64_t tx_session;
52 uint32_t counter;
53
54 bool udp;
55 bool ipv6;
56 bool has_counter;
57 bool pin_counter;
58 bool cookie;
59 bool cookie_is_64;
60
61 uint32_t cookie_offset;
62 uint32_t session_offset;
63 uint32_t counter_offset;
64};
65
66static int l2tpv3_form_header(uint8_t *header,
67 struct sk_buff *skb, struct vector_private *vp)
68{
69 struct uml_l2tpv3_data *td = vp->transport_data;
70 uint32_t *counter;
71
72 if (td->udp)
73 *(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
74 (*(uint32_t *) (header + td->session_offset)) = td->tx_session;
75
76 if (td->cookie) {
77 if (td->cookie_is_64)
78 (*(uint64_t *)(header + td->cookie_offset)) =
79 td->tx_cookie;
80 else
81 (*(uint32_t *)(header + td->cookie_offset)) =
82 td->tx_cookie;
83 }
84 if (td->has_counter) {
85 counter = (uint32_t *)(header + td->counter_offset);
86 if (td->pin_counter) {
87 *counter = 0;
88 } else {
89 td->counter++;
90 *counter = cpu_to_be32(td->counter);
91 }
92 }
93 return 0;
94}
95
96static int gre_form_header(uint8_t *header,
97 struct sk_buff *skb, struct vector_private *vp)
98{
99 struct uml_gre_data *td = vp->transport_data;
100 uint32_t *sequence;
101 *((uint32_t *) header) = *((uint32_t *) &td->expected_header);
102 if (td->key)
103 (*(uint32_t *) (header + td->key_offset)) = td->tx_key;
104 if (td->has_sequence) {
105 sequence = (uint32_t *)(header + td->sequence_offset);
106 if (td->pin_sequence)
107 *sequence = 0;
108 else
109 *sequence = cpu_to_be32(++td->sequence);
110 }
111 return 0;
112}
113
114static int raw_form_header(uint8_t *header,
115 struct sk_buff *skb, struct vector_private *vp)
116{
117 struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
118
119 virtio_net_hdr_from_skb(
120 skb,
121 vheader,
122 virtio_legacy_is_little_endian(),
123 false
124 );
125
126 return 0;
127}
128
129static int l2tpv3_verify_header(
130 uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
131{
132 struct uml_l2tpv3_data *td = vp->transport_data;
133 uint32_t *session;
134 uint64_t cookie;
135
136 if ((!td->udp) && (!td->ipv6))
137 header += sizeof(struct iphdr) /* fix for ipv4 raw */;
138
139 /* we do not do a strict check for "data" packets as per
140 * the RFC spec because the pure IP spec does not have
141 * that anyway.
142 */
143
144 if (td->cookie) {
145 if (td->cookie_is_64)
146 cookie = *(uint64_t *)(header + td->cookie_offset);
147 else
148 cookie = *(uint32_t *)(header + td->cookie_offset);
149 if (cookie != td->rx_cookie) {
150 if (net_ratelimit())
151 netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
152 return -1;
153 }
154 }
155 session = (uint32_t *) (header + td->session_offset);
156 if (*session != td->rx_session) {
157 if (net_ratelimit())
158 netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
159 return -1;
160 }
161 return 0;
162}
163
164static int gre_verify_header(
165 uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
166{
167
168 uint32_t key;
169 struct uml_gre_data *td = vp->transport_data;
170
171 if (!td->ipv6)
172 header += sizeof(struct iphdr) /* fix for ipv4 raw */;
173
174 if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
175 if (net_ratelimit())
176 netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
177 *((uint32_t *) &td->expected_header),
178 *((uint32_t *) header)
179 );
180 return -1;
181 }
182
183 if (td->key) {
184 key = (*(uint32_t *)(header + td->key_offset));
185 if (key != td->rx_key) {
186 if (net_ratelimit())
187 netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
188 key, td->rx_key);
189 return -1;
190 }
191 }
192 return 0;
193}
194
195static int raw_verify_header(
196 uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
197{
198 struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
199
200 if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
201 (vp->req_size != 65536)) {
202 if (net_ratelimit())
203 netdev_err(
204 vp->dev,
205 GSO_ERROR
206 );
207 }
208 if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
209 return 1;
210
211 virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
212 return 0;
213}
214
215static bool get_uint_param(
216 struct arglist *def, char *param, unsigned int *result)
217{
218 char *arg = uml_vector_fetch_arg(def, param);
219
220 if (arg != NULL) {
221 if (kstrtoint(arg, 0, result) == 0)
222 return true;
223 }
224 return false;
225}
226
227static bool get_ulong_param(
228 struct arglist *def, char *param, unsigned long *result)
229{
230 char *arg = uml_vector_fetch_arg(def, param);
231
232 if (arg != NULL) {
233 if (kstrtoul(arg, 0, result) == 0)
234 return true;
235 return true;
236 }
237 return false;
238}
239
240static int build_gre_transport_data(struct vector_private *vp)
241{
242 struct uml_gre_data *td;
243 int temp_int;
244 int temp_rx;
245 int temp_tx;
246
247 vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
248 if (vp->transport_data == NULL)
249 return -ENOMEM;
250 td = vp->transport_data;
251 td->sequence = 0;
252
253 td->expected_header.arptype = GRE_IRB;
254 td->expected_header.header = 0;
255
256 vp->form_header = &gre_form_header;
257 vp->verify_header = &gre_verify_header;
258 vp->header_size = 4;
259 td->key_offset = 4;
260 td->sequence_offset = 4;
261 td->checksum_offset = 4;
262
263 td->ipv6 = false;
264 if (get_uint_param(vp->parsed, "v6", &temp_int)) {
265 if (temp_int > 0)
266 td->ipv6 = true;
267 }
268 td->key = false;
269 if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
270 if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
271 td->key = true;
272 td->expected_header.header |= GRE_MODE_KEY;
273 td->rx_key = cpu_to_be32(temp_rx);
274 td->tx_key = cpu_to_be32(temp_tx);
275 vp->header_size += 4;
276 td->sequence_offset += 4;
277 } else {
278 return -EINVAL;
279 }
280 }
281
282 td->sequence = false;
283 if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
284 if (temp_int > 0) {
285 vp->header_size += 4;
286 td->has_sequence = true;
287 td->expected_header.header |= GRE_MODE_SEQUENCE;
288 if (get_uint_param(
289 vp->parsed, "pin_sequence", &temp_int)) {
290 if (temp_int > 0)
291 td->pin_sequence = true;
292 }
293 }
294 }
295 vp->rx_header_size = vp->header_size;
296 if (!td->ipv6)
297 vp->rx_header_size += sizeof(struct iphdr);
298 return 0;
299}
300
301static int build_l2tpv3_transport_data(struct vector_private *vp)
302{
303
304 struct uml_l2tpv3_data *td;
305 int temp_int, temp_rxs, temp_txs;
306 unsigned long temp_rx;
307 unsigned long temp_tx;
308
309 vp->transport_data = kmalloc(
310 sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
311
312 if (vp->transport_data == NULL)
313 return -ENOMEM;
314
315 td = vp->transport_data;
316
317 vp->form_header = &l2tpv3_form_header;
318 vp->verify_header = &l2tpv3_verify_header;
319 td->counter = 0;
320
321 vp->header_size = 4;
322 td->session_offset = 0;
323 td->cookie_offset = 4;
324 td->counter_offset = 4;
325
326
327 td->ipv6 = false;
328 if (get_uint_param(vp->parsed, "v6", &temp_int)) {
329 if (temp_int > 0)
330 td->ipv6 = true;
331 }
332
333 if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
334 if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
335 td->tx_session = cpu_to_be32(temp_txs);
336 td->rx_session = cpu_to_be32(temp_rxs);
337 } else {
338 return -EINVAL;
339 }
340 } else {
341 return -EINVAL;
342 }
343
344 td->cookie_is_64 = false;
345 if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
346 if (temp_int > 0)
347 td->cookie_is_64 = true;
348 }
349 td->cookie = false;
350 if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
351 if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
352 td->cookie = true;
353 if (td->cookie_is_64) {
354 td->rx_cookie = cpu_to_be64(temp_rx);
355 td->tx_cookie = cpu_to_be64(temp_tx);
356 vp->header_size += 8;
357 td->counter_offset += 8;
358 } else {
359 td->rx_cookie = cpu_to_be32(temp_rx);
360 td->tx_cookie = cpu_to_be32(temp_tx);
361 vp->header_size += 4;
362 td->counter_offset += 4;
363 }
364 } else {
365 return -EINVAL;
366 }
367 }
368
369 td->has_counter = false;
370 if (get_uint_param(vp->parsed, "counter", &temp_int)) {
371 if (temp_int > 0) {
372 td->has_counter = true;
373 vp->header_size += 4;
374 if (get_uint_param(
375 vp->parsed, "pin_counter", &temp_int)) {
376 if (temp_int > 0)
377 td->pin_counter = true;
378 }
379 }
380 }
381
382 if (get_uint_param(vp->parsed, "udp", &temp_int)) {
383 if (temp_int > 0) {
384 td->udp = true;
385 vp->header_size += 4;
386 td->counter_offset += 4;
387 td->session_offset += 4;
388 td->cookie_offset += 4;
389 }
390 }
391
392 vp->rx_header_size = vp->header_size;
393 if ((!td->ipv6) && (!td->udp))
394 vp->rx_header_size += sizeof(struct iphdr);
395
396 return 0;
397}
398
399static int build_raw_transport_data(struct vector_private *vp)
400{
401 if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
402 if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
403 return -1;
404 vp->form_header = &raw_form_header;
405 vp->verify_header = &raw_verify_header;
406 vp->header_size = sizeof(struct virtio_net_hdr);
407 vp->rx_header_size = sizeof(struct virtio_net_hdr);
408 vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
409 vp->dev->features |=
410 (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
411 NETIF_F_TSO | NETIF_F_GRO);
412 netdev_info(
413 vp->dev,
414 "raw: using vnet headers for tso and tx/rx checksum"
415 );
416 }
417 return 0;
418}
419
420static int build_tap_transport_data(struct vector_private *vp)
421{
422 if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
423 vp->form_header = &raw_form_header;
424 vp->verify_header = &raw_verify_header;
425 vp->header_size = sizeof(struct virtio_net_hdr);
426 vp->rx_header_size = sizeof(struct virtio_net_hdr);
427 vp->dev->hw_features |=
428 (NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
429 vp->dev->features |=
430 (NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
431 NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
432 netdev_info(
433 vp->dev,
434 "tap/raw: using vnet headers for tso and tx/rx checksum"
435 );
436 } else {
437 return 0; /* do not try to enable tap too if raw failed */
438 }
439 if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
440 return 0;
441 return -1;
442}
443
444int build_transport_data(struct vector_private *vp)
445{
446 char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
447
448 if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
449 return build_gre_transport_data(vp);
450 if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
451 return build_l2tpv3_transport_data(vp);
452 if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
453 return build_raw_transport_data(vp);
454 if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
455 return build_tap_transport_data(vp);
456 return 0;
457}
458