Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 - Cambridge Greys Limited
  4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
 
  5 */
  6
  7#include <linux/etherdevice.h>
  8#include <linux/netdevice.h>
  9#include <linux/skbuff.h>
 10#include <linux/slab.h>
 11#include <asm/byteorder.h>
 12#include <uapi/linux/ip.h>
 13#include <uapi/linux/virtio_net.h>
 14#include <linux/virtio_net.h>
 15#include <linux/virtio_byteorder.h>
 16#include <linux/netdev_features.h>
 17#include "vector_user.h"
 18#include "vector_kern.h"
 19
 20#define GOOD_LINEAR 512
 21#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
 22
 23struct gre_minimal_header {
 24	uint16_t header;
 25	uint16_t arptype;
 26};
 27
 28
 29struct uml_gre_data {
 30	uint32_t rx_key;
 31	uint32_t tx_key;
 32	uint32_t sequence;
 33
 34	bool ipv6;
 35	bool has_sequence;
 36	bool pin_sequence;
 37	bool checksum;
 38	bool key;
 39	struct gre_minimal_header expected_header;
 40
 41	uint32_t checksum_offset;
 42	uint32_t key_offset;
 43	uint32_t sequence_offset;
 44
 45};
 46
 47struct uml_l2tpv3_data {
 48	uint64_t rx_cookie;
 49	uint64_t tx_cookie;
 50	uint64_t rx_session;
 51	uint64_t tx_session;
 52	uint32_t counter;
 53
 54	bool udp;
 55	bool ipv6;
 56	bool has_counter;
 57	bool pin_counter;
 58	bool cookie;
 59	bool cookie_is_64;
 60
 61	uint32_t cookie_offset;
 62	uint32_t session_offset;
 63	uint32_t counter_offset;
 64};
 65
 66static int l2tpv3_form_header(uint8_t *header,
 67	struct sk_buff *skb, struct vector_private *vp)
 68{
 69	struct uml_l2tpv3_data *td = vp->transport_data;
 70	uint32_t *counter;
 71
 72	if (td->udp)
 73		*(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
 74	(*(uint32_t *) (header + td->session_offset)) = td->tx_session;
 75
 76	if (td->cookie) {
 77		if (td->cookie_is_64)
 78			(*(uint64_t *)(header + td->cookie_offset)) =
 79				td->tx_cookie;
 80		else
 81			(*(uint32_t *)(header + td->cookie_offset)) =
 82				td->tx_cookie;
 83	}
 84	if (td->has_counter) {
 85		counter = (uint32_t *)(header + td->counter_offset);
 86		if (td->pin_counter) {
 87			*counter = 0;
 88		} else {
 89			td->counter++;
 90			*counter = cpu_to_be32(td->counter);
 91		}
 92	}
 93	return 0;
 94}
 95
 96static int gre_form_header(uint8_t *header,
 97		struct sk_buff *skb, struct vector_private *vp)
 98{
 99	struct uml_gre_data *td = vp->transport_data;
100	uint32_t *sequence;
101	*((uint32_t *) header) = *((uint32_t *) &td->expected_header);
102	if (td->key)
103		(*(uint32_t *) (header + td->key_offset)) = td->tx_key;
104	if (td->has_sequence) {
105		sequence = (uint32_t *)(header + td->sequence_offset);
106		if (td->pin_sequence)
107			*sequence = 0;
108		else
109			*sequence = cpu_to_be32(++td->sequence);
110	}
111	return 0;
112}
113
114static int raw_form_header(uint8_t *header,
115		struct sk_buff *skb, struct vector_private *vp)
116{
117	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
118
119	virtio_net_hdr_from_skb(
120		skb,
121		vheader,
122		virtio_legacy_is_little_endian(),
123		false,
124		0
125	);
126
127	return 0;
128}
129
130static int l2tpv3_verify_header(
131	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
132{
133	struct uml_l2tpv3_data *td = vp->transport_data;
134	uint32_t *session;
135	uint64_t cookie;
136
137	if ((!td->udp) && (!td->ipv6))
138		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
139
140	/* we do not do a strict check for "data" packets as per
141	 * the RFC spec because the pure IP spec does not have
142	 * that anyway.
143	 */
144
145	if (td->cookie) {
146		if (td->cookie_is_64)
147			cookie = *(uint64_t *)(header + td->cookie_offset);
148		else
149			cookie = *(uint32_t *)(header + td->cookie_offset);
150		if (cookie != td->rx_cookie) {
151			if (net_ratelimit())
152				netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
153			return -1;
154		}
155	}
156	session = (uint32_t *) (header + td->session_offset);
157	if (*session != td->rx_session) {
158		if (net_ratelimit())
159			netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
160		return -1;
161	}
162	return 0;
163}
164
165static int gre_verify_header(
166	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
167{
168
169	uint32_t key;
170	struct uml_gre_data *td = vp->transport_data;
171
172	if (!td->ipv6)
173		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
174
175	if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
176		if (net_ratelimit())
177			netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
178				*((uint32_t *) &td->expected_header),
179				*((uint32_t *) header)
180			);
181		return -1;
182	}
183
184	if (td->key) {
185		key = (*(uint32_t *)(header + td->key_offset));
186		if (key != td->rx_key) {
187			if (net_ratelimit())
188				netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
189						key, td->rx_key);
190			return -1;
191		}
192	}
193	return 0;
194}
195
196static int raw_verify_header(
197	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
198{
199	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
200
201	if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
202		(vp->req_size != 65536)) {
203		if (net_ratelimit())
204			netdev_err(
205				vp->dev,
206				GSO_ERROR
207		);
208	}
209	if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
210		return 1;
211
212	virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
213	return 0;
214}
215
216static bool get_uint_param(
217	struct arglist *def, char *param, unsigned int *result)
218{
219	char *arg = uml_vector_fetch_arg(def, param);
220
221	if (arg != NULL) {
222		if (kstrtoint(arg, 0, result) == 0)
223			return true;
224	}
225	return false;
226}
227
228static bool get_ulong_param(
229	struct arglist *def, char *param, unsigned long *result)
230{
231	char *arg = uml_vector_fetch_arg(def, param);
232
233	if (arg != NULL) {
234		if (kstrtoul(arg, 0, result) == 0)
235			return true;
236		return true;
237	}
238	return false;
239}
240
241static int build_gre_transport_data(struct vector_private *vp)
242{
243	struct uml_gre_data *td;
244	int temp_int;
245	int temp_rx;
246	int temp_tx;
247
248	vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
249	if (vp->transport_data == NULL)
250		return -ENOMEM;
251	td = vp->transport_data;
252	td->sequence = 0;
253
254	td->expected_header.arptype = GRE_IRB;
255	td->expected_header.header = 0;
256
257	vp->form_header = &gre_form_header;
258	vp->verify_header = &gre_verify_header;
259	vp->header_size = 4;
260	td->key_offset = 4;
261	td->sequence_offset = 4;
262	td->checksum_offset = 4;
263
264	td->ipv6 = false;
265	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
266		if (temp_int > 0)
267			td->ipv6 = true;
268	}
269	td->key = false;
270	if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
271		if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
272			td->key = true;
273			td->expected_header.header |= GRE_MODE_KEY;
274			td->rx_key = cpu_to_be32(temp_rx);
275			td->tx_key = cpu_to_be32(temp_tx);
276			vp->header_size += 4;
277			td->sequence_offset += 4;
278		} else {
279			return -EINVAL;
280		}
281	}
282
283	td->sequence = false;
284	if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
285		if (temp_int > 0) {
286			vp->header_size += 4;
287			td->has_sequence = true;
288			td->expected_header.header |= GRE_MODE_SEQUENCE;
289			if (get_uint_param(
290				vp->parsed, "pin_sequence", &temp_int)) {
291				if (temp_int > 0)
292					td->pin_sequence = true;
293			}
294		}
295	}
296	vp->rx_header_size = vp->header_size;
297	if (!td->ipv6)
298		vp->rx_header_size += sizeof(struct iphdr);
299	return 0;
300}
301
302static int build_l2tpv3_transport_data(struct vector_private *vp)
303{
304
305	struct uml_l2tpv3_data *td;
306	int temp_int, temp_rxs, temp_txs;
307	unsigned long temp_rx;
308	unsigned long temp_tx;
309
310	vp->transport_data = kmalloc(
311		sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
312
313	if (vp->transport_data == NULL)
314		return -ENOMEM;
315
316	td = vp->transport_data;
317
318	vp->form_header = &l2tpv3_form_header;
319	vp->verify_header = &l2tpv3_verify_header;
320	td->counter = 0;
321
322	vp->header_size = 4;
323	td->session_offset = 0;
324	td->cookie_offset = 4;
325	td->counter_offset = 4;
326
327
328	td->ipv6 = false;
329	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
330		if (temp_int > 0)
331			td->ipv6 = true;
332	}
333
334	if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
335		if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
336			td->tx_session = cpu_to_be32(temp_txs);
337			td->rx_session = cpu_to_be32(temp_rxs);
338		} else {
339			return -EINVAL;
340		}
341	} else {
342		return -EINVAL;
343	}
344
345	td->cookie_is_64  = false;
346	if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
347		if (temp_int > 0)
348			td->cookie_is_64  = true;
349	}
350	td->cookie = false;
351	if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
352		if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
353			td->cookie = true;
354			if (td->cookie_is_64) {
355				td->rx_cookie = cpu_to_be64(temp_rx);
356				td->tx_cookie = cpu_to_be64(temp_tx);
357				vp->header_size += 8;
358				td->counter_offset += 8;
359			} else {
360				td->rx_cookie = cpu_to_be32(temp_rx);
361				td->tx_cookie = cpu_to_be32(temp_tx);
362				vp->header_size += 4;
363				td->counter_offset += 4;
364			}
365		} else {
366			return -EINVAL;
367		}
368	}
369
370	td->has_counter = false;
371	if (get_uint_param(vp->parsed, "counter", &temp_int)) {
372		if (temp_int > 0) {
373			td->has_counter = true;
374			vp->header_size += 4;
375			if (get_uint_param(
376				vp->parsed, "pin_counter", &temp_int)) {
377				if (temp_int > 0)
378					td->pin_counter = true;
379			}
380		}
381	}
382
383	if (get_uint_param(vp->parsed, "udp", &temp_int)) {
384		if (temp_int > 0) {
385			td->udp = true;
386			vp->header_size += 4;
387			td->counter_offset += 4;
388			td->session_offset += 4;
389			td->cookie_offset += 4;
390		}
391	}
392
393	vp->rx_header_size = vp->header_size;
394	if ((!td->ipv6) && (!td->udp))
395		vp->rx_header_size += sizeof(struct iphdr);
396
397	return 0;
398}
399
400static int build_raw_transport_data(struct vector_private *vp)
401{
402	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
403		if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
404			return -1;
405		vp->form_header = &raw_form_header;
406		vp->verify_header = &raw_verify_header;
407		vp->header_size = sizeof(struct virtio_net_hdr);
408		vp->rx_header_size = sizeof(struct virtio_net_hdr);
409		vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
410		vp->dev->features |=
411			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
412				NETIF_F_TSO | NETIF_F_GRO);
413		netdev_info(
414			vp->dev,
415			"raw: using vnet headers for tso and tx/rx checksum"
416		);
417	}
418	return 0;
419}
420
421static int build_hybrid_transport_data(struct vector_private *vp)
422{
423	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
424		vp->form_header = &raw_form_header;
425		vp->verify_header = &raw_verify_header;
426		vp->header_size = sizeof(struct virtio_net_hdr);
427		vp->rx_header_size = sizeof(struct virtio_net_hdr);
428		vp->dev->hw_features |=
429			(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
430		vp->dev->features |=
431			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
432				NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
433		netdev_info(
434			vp->dev,
435			"tap/raw hybrid: using vnet headers for tso and tx/rx checksum"
436		);
437	} else {
438		return 0; /* do not try to enable tap too if raw failed */
439	}
440	if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
441		return 0;
442	return -1;
443}
444
445static int build_tap_transport_data(struct vector_private *vp)
446{
447	/* "Pure" tap uses the same fd for rx and tx */
448	if (uml_tap_enable_vnet_headers(vp->fds->tx_fd)) {
449		vp->form_header = &raw_form_header;
450		vp->verify_header = &raw_verify_header;
451		vp->header_size = sizeof(struct virtio_net_hdr);
452		vp->rx_header_size = sizeof(struct virtio_net_hdr);
453		vp->dev->hw_features |=
454			(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
455		vp->dev->features |=
456			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
457				NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
458		netdev_info(
459			vp->dev,
460			"tap: using vnet headers for tso and tx/rx checksum"
461		);
462		return 0;
463	}
464	return -1;
465}
466
467
468static int build_bess_transport_data(struct vector_private *vp)
469{
470	vp->form_header = NULL;
471	vp->verify_header = NULL;
472	vp->header_size = 0;
473	vp->rx_header_size = 0;
474	return 0;
475}
476
477int build_transport_data(struct vector_private *vp)
478{
479	char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
480
481	if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
482		return build_gre_transport_data(vp);
483	if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
484		return build_l2tpv3_transport_data(vp);
485	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
486		return build_raw_transport_data(vp);
487	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
488		return build_tap_transport_data(vp);
489	if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
490		return build_hybrid_transport_data(vp);
491	if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
492		return build_bess_transport_data(vp);
493	return 0;
494}
495
v4.17
 
  1/*
  2 * Copyright (C) 2017 - Cambridge Greys Limited
  3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
  4 * Licensed under the GPL.
  5 */
  6
  7#include <linux/etherdevice.h>
  8#include <linux/netdevice.h>
  9#include <linux/skbuff.h>
 10#include <linux/slab.h>
 11#include <asm/byteorder.h>
 12#include <uapi/linux/ip.h>
 13#include <uapi/linux/virtio_net.h>
 14#include <linux/virtio_net.h>
 15#include <linux/virtio_byteorder.h>
 16#include <linux/netdev_features.h>
 17#include "vector_user.h"
 18#include "vector_kern.h"
 19
 20#define GOOD_LINEAR 512
 21#define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
 22
 23struct gre_minimal_header {
 24	uint16_t header;
 25	uint16_t arptype;
 26};
 27
 28
 29struct uml_gre_data {
 30	uint32_t rx_key;
 31	uint32_t tx_key;
 32	uint32_t sequence;
 33
 34	bool ipv6;
 35	bool has_sequence;
 36	bool pin_sequence;
 37	bool checksum;
 38	bool key;
 39	struct gre_minimal_header expected_header;
 40
 41	uint32_t checksum_offset;
 42	uint32_t key_offset;
 43	uint32_t sequence_offset;
 44
 45};
 46
 47struct uml_l2tpv3_data {
 48	uint64_t rx_cookie;
 49	uint64_t tx_cookie;
 50	uint64_t rx_session;
 51	uint64_t tx_session;
 52	uint32_t counter;
 53
 54	bool udp;
 55	bool ipv6;
 56	bool has_counter;
 57	bool pin_counter;
 58	bool cookie;
 59	bool cookie_is_64;
 60
 61	uint32_t cookie_offset;
 62	uint32_t session_offset;
 63	uint32_t counter_offset;
 64};
 65
 66static int l2tpv3_form_header(uint8_t *header,
 67	struct sk_buff *skb, struct vector_private *vp)
 68{
 69	struct uml_l2tpv3_data *td = vp->transport_data;
 70	uint32_t *counter;
 71
 72	if (td->udp)
 73		*(uint32_t *) header = cpu_to_be32(L2TPV3_DATA_PACKET);
 74	(*(uint32_t *) (header + td->session_offset)) = td->tx_session;
 75
 76	if (td->cookie) {
 77		if (td->cookie_is_64)
 78			(*(uint64_t *)(header + td->cookie_offset)) =
 79				td->tx_cookie;
 80		else
 81			(*(uint32_t *)(header + td->cookie_offset)) =
 82				td->tx_cookie;
 83	}
 84	if (td->has_counter) {
 85		counter = (uint32_t *)(header + td->counter_offset);
 86		if (td->pin_counter) {
 87			*counter = 0;
 88		} else {
 89			td->counter++;
 90			*counter = cpu_to_be32(td->counter);
 91		}
 92	}
 93	return 0;
 94}
 95
 96static int gre_form_header(uint8_t *header,
 97		struct sk_buff *skb, struct vector_private *vp)
 98{
 99	struct uml_gre_data *td = vp->transport_data;
100	uint32_t *sequence;
101	*((uint32_t *) header) = *((uint32_t *) &td->expected_header);
102	if (td->key)
103		(*(uint32_t *) (header + td->key_offset)) = td->tx_key;
104	if (td->has_sequence) {
105		sequence = (uint32_t *)(header + td->sequence_offset);
106		if (td->pin_sequence)
107			*sequence = 0;
108		else
109			*sequence = cpu_to_be32(++td->sequence);
110	}
111	return 0;
112}
113
114static int raw_form_header(uint8_t *header,
115		struct sk_buff *skb, struct vector_private *vp)
116{
117	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
118
119	virtio_net_hdr_from_skb(
120		skb,
121		vheader,
122		virtio_legacy_is_little_endian(),
123		false
 
124	);
125
126	return 0;
127}
128
129static int l2tpv3_verify_header(
130	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
131{
132	struct uml_l2tpv3_data *td = vp->transport_data;
133	uint32_t *session;
134	uint64_t cookie;
135
136	if ((!td->udp) && (!td->ipv6))
137		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
138
139	/* we do not do a strict check for "data" packets as per
140	 * the RFC spec because the pure IP spec does not have
141	 * that anyway.
142	 */
143
144	if (td->cookie) {
145		if (td->cookie_is_64)
146			cookie = *(uint64_t *)(header + td->cookie_offset);
147		else
148			cookie = *(uint32_t *)(header + td->cookie_offset);
149		if (cookie != td->rx_cookie) {
150			if (net_ratelimit())
151				netdev_err(vp->dev, "uml_l2tpv3: unknown cookie id");
152			return -1;
153		}
154	}
155	session = (uint32_t *) (header + td->session_offset);
156	if (*session != td->rx_session) {
157		if (net_ratelimit())
158			netdev_err(vp->dev, "uml_l2tpv3: session mismatch");
159		return -1;
160	}
161	return 0;
162}
163
164static int gre_verify_header(
165	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
166{
167
168	uint32_t key;
169	struct uml_gre_data *td = vp->transport_data;
170
171	if (!td->ipv6)
172		header += sizeof(struct iphdr) /* fix for ipv4 raw */;
173
174	if (*((uint32_t *) header) != *((uint32_t *) &td->expected_header)) {
175		if (net_ratelimit())
176			netdev_err(vp->dev, "header type disagreement, expecting %0x, got %0x",
177				*((uint32_t *) &td->expected_header),
178				*((uint32_t *) header)
179			);
180		return -1;
181	}
182
183	if (td->key) {
184		key = (*(uint32_t *)(header + td->key_offset));
185		if (key != td->rx_key) {
186			if (net_ratelimit())
187				netdev_err(vp->dev, "unknown key id %0x, expecting %0x",
188						key, td->rx_key);
189			return -1;
190		}
191	}
192	return 0;
193}
194
195static int raw_verify_header(
196	uint8_t *header, struct sk_buff *skb, struct vector_private *vp)
197{
198	struct virtio_net_hdr *vheader = (struct virtio_net_hdr *) header;
199
200	if ((vheader->gso_type != VIRTIO_NET_HDR_GSO_NONE) &&
201		(vp->req_size != 65536)) {
202		if (net_ratelimit())
203			netdev_err(
204				vp->dev,
205				GSO_ERROR
206		);
207	}
208	if ((vheader->flags & VIRTIO_NET_HDR_F_DATA_VALID) > 0)
209		return 1;
210
211	virtio_net_hdr_to_skb(skb, vheader, virtio_legacy_is_little_endian());
212	return 0;
213}
214
215static bool get_uint_param(
216	struct arglist *def, char *param, unsigned int *result)
217{
218	char *arg = uml_vector_fetch_arg(def, param);
219
220	if (arg != NULL) {
221		if (kstrtoint(arg, 0, result) == 0)
222			return true;
223	}
224	return false;
225}
226
227static bool get_ulong_param(
228	struct arglist *def, char *param, unsigned long *result)
229{
230	char *arg = uml_vector_fetch_arg(def, param);
231
232	if (arg != NULL) {
233		if (kstrtoul(arg, 0, result) == 0)
234			return true;
235		return true;
236	}
237	return false;
238}
239
240static int build_gre_transport_data(struct vector_private *vp)
241{
242	struct uml_gre_data *td;
243	int temp_int;
244	int temp_rx;
245	int temp_tx;
246
247	vp->transport_data = kmalloc(sizeof(struct uml_gre_data), GFP_KERNEL);
248	if (vp->transport_data == NULL)
249		return -ENOMEM;
250	td = vp->transport_data;
251	td->sequence = 0;
252
253	td->expected_header.arptype = GRE_IRB;
254	td->expected_header.header = 0;
255
256	vp->form_header = &gre_form_header;
257	vp->verify_header = &gre_verify_header;
258	vp->header_size = 4;
259	td->key_offset = 4;
260	td->sequence_offset = 4;
261	td->checksum_offset = 4;
262
263	td->ipv6 = false;
264	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
265		if (temp_int > 0)
266			td->ipv6 = true;
267	}
268	td->key = false;
269	if (get_uint_param(vp->parsed, "rx_key", &temp_rx)) {
270		if (get_uint_param(vp->parsed, "tx_key", &temp_tx)) {
271			td->key = true;
272			td->expected_header.header |= GRE_MODE_KEY;
273			td->rx_key = cpu_to_be32(temp_rx);
274			td->tx_key = cpu_to_be32(temp_tx);
275			vp->header_size += 4;
276			td->sequence_offset += 4;
277		} else {
278			return -EINVAL;
279		}
280	}
281
282	td->sequence = false;
283	if (get_uint_param(vp->parsed, "sequence", &temp_int)) {
284		if (temp_int > 0) {
285			vp->header_size += 4;
286			td->has_sequence = true;
287			td->expected_header.header |= GRE_MODE_SEQUENCE;
288			if (get_uint_param(
289				vp->parsed, "pin_sequence", &temp_int)) {
290				if (temp_int > 0)
291					td->pin_sequence = true;
292			}
293		}
294	}
295	vp->rx_header_size = vp->header_size;
296	if (!td->ipv6)
297		vp->rx_header_size += sizeof(struct iphdr);
298	return 0;
299}
300
301static int build_l2tpv3_transport_data(struct vector_private *vp)
302{
303
304	struct uml_l2tpv3_data *td;
305	int temp_int, temp_rxs, temp_txs;
306	unsigned long temp_rx;
307	unsigned long temp_tx;
308
309	vp->transport_data = kmalloc(
310		sizeof(struct uml_l2tpv3_data), GFP_KERNEL);
311
312	if (vp->transport_data == NULL)
313		return -ENOMEM;
314
315	td = vp->transport_data;
316
317	vp->form_header = &l2tpv3_form_header;
318	vp->verify_header = &l2tpv3_verify_header;
319	td->counter = 0;
320
321	vp->header_size = 4;
322	td->session_offset = 0;
323	td->cookie_offset = 4;
324	td->counter_offset = 4;
325
326
327	td->ipv6 = false;
328	if (get_uint_param(vp->parsed, "v6", &temp_int)) {
329		if (temp_int > 0)
330			td->ipv6 = true;
331	}
332
333	if (get_uint_param(vp->parsed, "rx_session", &temp_rxs)) {
334		if (get_uint_param(vp->parsed, "tx_session", &temp_txs)) {
335			td->tx_session = cpu_to_be32(temp_txs);
336			td->rx_session = cpu_to_be32(temp_rxs);
337		} else {
338			return -EINVAL;
339		}
340	} else {
341		return -EINVAL;
342	}
343
344	td->cookie_is_64  = false;
345	if (get_uint_param(vp->parsed, "cookie64", &temp_int)) {
346		if (temp_int > 0)
347			td->cookie_is_64  = true;
348	}
349	td->cookie = false;
350	if (get_ulong_param(vp->parsed, "rx_cookie", &temp_rx)) {
351		if (get_ulong_param(vp->parsed, "tx_cookie", &temp_tx)) {
352			td->cookie = true;
353			if (td->cookie_is_64) {
354				td->rx_cookie = cpu_to_be64(temp_rx);
355				td->tx_cookie = cpu_to_be64(temp_tx);
356				vp->header_size += 8;
357				td->counter_offset += 8;
358			} else {
359				td->rx_cookie = cpu_to_be32(temp_rx);
360				td->tx_cookie = cpu_to_be32(temp_tx);
361				vp->header_size += 4;
362				td->counter_offset += 4;
363			}
364		} else {
365			return -EINVAL;
366		}
367	}
368
369	td->has_counter = false;
370	if (get_uint_param(vp->parsed, "counter", &temp_int)) {
371		if (temp_int > 0) {
372			td->has_counter = true;
373			vp->header_size += 4;
374			if (get_uint_param(
375				vp->parsed, "pin_counter", &temp_int)) {
376				if (temp_int > 0)
377					td->pin_counter = true;
378			}
379		}
380	}
381
382	if (get_uint_param(vp->parsed, "udp", &temp_int)) {
383		if (temp_int > 0) {
384			td->udp = true;
385			vp->header_size += 4;
386			td->counter_offset += 4;
387			td->session_offset += 4;
388			td->cookie_offset += 4;
389		}
390	}
391
392	vp->rx_header_size = vp->header_size;
393	if ((!td->ipv6) && (!td->udp))
394		vp->rx_header_size += sizeof(struct iphdr);
395
396	return 0;
397}
398
399static int build_raw_transport_data(struct vector_private *vp)
400{
401	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
402		if (!uml_raw_enable_vnet_headers(vp->fds->tx_fd))
403			return -1;
404		vp->form_header = &raw_form_header;
405		vp->verify_header = &raw_verify_header;
406		vp->header_size = sizeof(struct virtio_net_hdr);
407		vp->rx_header_size = sizeof(struct virtio_net_hdr);
408		vp->dev->hw_features |= (NETIF_F_TSO | NETIF_F_GRO);
409		vp->dev->features |=
410			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
411				NETIF_F_TSO | NETIF_F_GRO);
412		netdev_info(
413			vp->dev,
414			"raw: using vnet headers for tso and tx/rx checksum"
415		);
416	}
417	return 0;
418}
419
420static int build_tap_transport_data(struct vector_private *vp)
421{
422	if (uml_raw_enable_vnet_headers(vp->fds->rx_fd)) {
423		vp->form_header = &raw_form_header;
424		vp->verify_header = &raw_verify_header;
425		vp->header_size = sizeof(struct virtio_net_hdr);
426		vp->rx_header_size = sizeof(struct virtio_net_hdr);
427		vp->dev->hw_features |=
428			(NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
429		vp->dev->features |=
430			(NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
431				NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GRO);
432		netdev_info(
433			vp->dev,
434			"tap/raw: using vnet headers for tso and tx/rx checksum"
435		);
436	} else {
437		return 0; /* do not try to enable tap too if raw failed */
438	}
439	if (uml_tap_enable_vnet_headers(vp->fds->tx_fd))
440		return 0;
441	return -1;
442}
443
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444int build_transport_data(struct vector_private *vp)
445{
446	char *transport = uml_vector_fetch_arg(vp->parsed, "transport");
447
448	if (strncmp(transport, TRANS_GRE, TRANS_GRE_LEN) == 0)
449		return build_gre_transport_data(vp);
450	if (strncmp(transport, TRANS_L2TPV3, TRANS_L2TPV3_LEN) == 0)
451		return build_l2tpv3_transport_data(vp);
452	if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
453		return build_raw_transport_data(vp);
454	if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
455		return build_tap_transport_data(vp);
 
 
 
 
456	return 0;
457}
458