Linux Audio

Check our new training course

Loading...
v4.17
  1/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  2 *
  3 * This program is free software; you can redistribute it and/or modify
  4 * it under the terms of the GNU General Public License version 2 and
  5 * only version 2 as published by the Free Software Foundation.
  6 *
  7 * This program is distributed in the hope that it will be useful,
  8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 10 * GNU General Public License for more details.
 11 *
 12 * RMNET Data MAP protocol
 13 *
 14 */
 15
 16#include <linux/netdevice.h>
 17#include <linux/ip.h>
 18#include <linux/ipv6.h>
 19#include <net/ip6_checksum.h>
 
 20#include "rmnet_config.h"
 21#include "rmnet_map.h"
 22#include "rmnet_private.h"
 23
 24#define RMNET_MAP_DEAGGR_SPACING  64
 25#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 26
 27static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 28					 const void *txporthdr)
 29{
 30	__sum16 *check = NULL;
 31
 32	switch (protocol) {
 33	case IPPROTO_TCP:
 34		check = &(((struct tcphdr *)txporthdr)->check);
 35		break;
 36
 37	case IPPROTO_UDP:
 38		check = &(((struct udphdr *)txporthdr)->check);
 39		break;
 40
 41	default:
 42		check = NULL;
 43		break;
 44	}
 45
 46	return check;
 47}
 48
 49static int
 50rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
 51			       struct rmnet_map_dl_csum_trailer *csum_trailer)
 
 52{
 53	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
 54	u16 csum_value, csum_value_final;
 55	struct iphdr *ip4h;
 56	void *txporthdr;
 57	__be16 addend;
 58
 59	ip4h = (struct iphdr *)(skb->data);
 60	if ((ntohs(ip4h->frag_off) & IP_MF) ||
 61	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
 62		return -EOPNOTSUPP;
 
 
 
 63
 64	txporthdr = skb->data + ip4h->ihl * 4;
 
 
 
 
 65
 
 66	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
 67
 68	if (!csum_field)
 69		return -EPROTONOSUPPORT;
 
 70
 71	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
 72	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
 
 73		return 0;
 74
 75	csum_value = ~ntohs(csum_trailer->csum_value);
 76	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
 77	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
 78				     (__force __be16)hdr_csum);
 79
 80	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
 81					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
 82					 ip4h->protocol, 0);
 83	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
 84	pseudo_csum = csum16_add(ip_payload_csum, addend);
 85
 86	addend = (__force __be16)ntohs((__force __be16)*csum_field);
 87	csum_temp = ~csum16_sub(pseudo_csum, addend);
 88	csum_value_final = (__force u16)csum_temp;
 89
 90	if (unlikely(csum_value_final == 0)) {
 91		switch (ip4h->protocol) {
 92		case IPPROTO_UDP:
 93			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
 94			csum_value_final = ~csum_value_final;
 95			break;
 96
 97		case IPPROTO_TCP:
 98			/* DL4 Non-RFC compliant TCP checksum found */
 99			if (*csum_field == (__force __sum16)0xFFFF)
100				csum_value_final = ~csum_value_final;
101			break;
102		}
103	}
104
105	if (csum_value_final == ntohs((__force __be16)*csum_field))
106		return 0;
107	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108		return -EINVAL;
 
 
 
 
109}
110
111#if IS_ENABLED(CONFIG_IPV6)
112static int
113rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
114			       struct rmnet_map_dl_csum_trailer *csum_trailer)
 
115{
116	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
117	u16 csum_value, csum_value_final;
118	__be16 ip6_hdr_csum, addend;
119	struct ipv6hdr *ip6h;
120	void *txporthdr;
121	u32 length;
122
123	ip6h = (struct ipv6hdr *)(skb->data);
124
125	txporthdr = skb->data + sizeof(struct ipv6hdr);
126	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
127
128	if (!csum_field)
129		return -EPROTONOSUPPORT;
130
131	csum_value = ~ntohs(csum_trailer->csum_value);
132	ip6_hdr_csum = (__force __be16)
133			~ntohs((__force __be16)ip_compute_csum(ip6h,
134			       (int)(txporthdr - (void *)(skb->data))));
135	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
136				      ip6_hdr_csum);
137
138	length = (ip6h->nexthdr == IPPROTO_UDP) ?
139		 ntohs(((struct udphdr *)txporthdr)->len) :
140		 ntohs(ip6h->payload_len);
141	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
142			     length, ip6h->nexthdr, 0));
143	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
144	pseudo_csum = csum16_add(ip6_payload_csum, addend);
145
146	addend = (__force __be16)ntohs((__force __be16)*csum_field);
147	csum_temp = ~csum16_sub(pseudo_csum, addend);
148	csum_value_final = (__force u16)csum_temp;
149
150	if (unlikely(csum_value_final == 0)) {
151		switch (ip6h->nexthdr) {
152		case IPPROTO_UDP:
153			/* RFC 2460 section 8.1
154			 * DL6 One's complement rule for UDP checksum 0
155			 */
156			csum_value_final = ~csum_value_final;
157			break;
158
159		case IPPROTO_TCP:
160			/* DL6 Non-RFC compliant TCP checksum found */
161			if (*csum_field == (__force __sum16)0xFFFF)
162				csum_value_final = ~csum_value_final;
163			break;
164		}
165	}
166
167	if (csum_value_final == ntohs((__force __be16)*csum_field))
168		return 0;
169	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
171}
172#endif
173
174static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
175{
176	struct iphdr *ip4h = (struct iphdr *)iphdr;
177	void *txphdr;
178	u16 *csum;
179
180	txphdr = iphdr + ip4h->ihl * 4;
181
182	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
183		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
184		*csum = ~(*csum);
185	}
186}
187
188static void
189rmnet_map_ipv4_ul_csum_header(void *iphdr,
190			      struct rmnet_map_ul_csum_header *ul_header,
191			      struct sk_buff *skb)
192{
193	struct iphdr *ip4h = (struct iphdr *)iphdr;
194	__be16 *hdr = (__be16 *)ul_header, offset;
195
196	offset = htons((__force u16)(skb_transport_header(skb) -
197				     (unsigned char *)iphdr));
198	ul_header->csum_start_offset = offset;
199	ul_header->csum_insert_offset = skb->csum_offset;
200	ul_header->csum_enabled = 1;
201	if (ip4h->protocol == IPPROTO_UDP)
202		ul_header->udp_ip4_ind = 1;
203	else
204		ul_header->udp_ip4_ind = 0;
205
206	/* Changing remaining fields to network order */
207	hdr++;
208	*hdr = htons((__force u16)*hdr);
209
210	skb->ip_summed = CHECKSUM_NONE;
211
212	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
213}
214
215#if IS_ENABLED(CONFIG_IPV6)
216static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
 
217{
218	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
219	void *txphdr;
220	u16 *csum;
221
222	txphdr = ip6hdr + sizeof(struct ipv6hdr);
223
224	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
225		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
226		*csum = ~(*csum);
227	}
228}
229
230static void
231rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
232			      struct rmnet_map_ul_csum_header *ul_header,
233			      struct sk_buff *skb)
234{
235	__be16 *hdr = (__be16 *)ul_header, offset;
 
 
 
 
 
236
237	offset = htons((__force u16)(skb_transport_header(skb) -
238				     (unsigned char *)ip6hdr));
239	ul_header->csum_start_offset = offset;
240	ul_header->csum_insert_offset = skb->csum_offset;
241	ul_header->csum_enabled = 1;
242	ul_header->udp_ip4_ind = 0;
243
244	/* Changing remaining fields to network order */
245	hdr++;
246	*hdr = htons((__force u16)*hdr);
247
248	skb->ip_summed = CHECKSUM_NONE;
249
250	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
 
 
 
 
 
 
 
251}
252#endif
253
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254/* Adds MAP header to front of skb->data
255 * Padding is calculated and set appropriately in MAP header. Mux ID is
256 * initialized to 0.
257 */
258struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
259						  int hdrlen, int pad)
 
 
260{
261	struct rmnet_map_header *map_header;
262	u32 padding, map_datalen;
263	u8 *padbytes;
264
265	map_datalen = skb->len - hdrlen;
266	map_header = (struct rmnet_map_header *)
267			skb_push(skb, sizeof(struct rmnet_map_header));
268	memset(map_header, 0, sizeof(struct rmnet_map_header));
269
 
 
 
 
270	if (pad == RMNET_MAP_NO_PAD_BYTES) {
271		map_header->pkt_len = htons(map_datalen);
272		return map_header;
273	}
274
 
275	padding = ALIGN(map_datalen, 4) - map_datalen;
276
277	if (padding == 0)
278		goto done;
279
280	if (skb_tailroom(skb) < padding)
281		return NULL;
282
283	padbytes = (u8 *)skb_put(skb, padding);
284	memset(padbytes, 0, padding);
285
286done:
287	map_header->pkt_len = htons(map_datalen + padding);
288	map_header->pad_len = padding & 0x3F;
 
289
290	return map_header;
291}
292
293/* Deaggregates a single packet
294 * A whole new buffer is allocated for each portion of an aggregated frame.
295 * Caller should keep calling deaggregate() on the source skb until 0 is
296 * returned, indicating that there are no more packets to deaggregate. Caller
297 * is responsible for freeing the original skb.
298 */
299struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
300				      struct rmnet_port *port)
301{
 
302	struct rmnet_map_header *maph;
 
303	struct sk_buff *skbn;
 
304	u32 packet_len;
305
306	if (skb->len == 0)
307		return NULL;
308
309	maph = (struct rmnet_map_header *)skb->data;
310	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
311
312	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
313		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
 
 
 
 
 
 
 
 
 
 
314
315	if (((int)skb->len - (int)packet_len) < 0)
316		return NULL;
317
318	/* Some hardware can send us empty frames. Catch them */
319	if (ntohs(maph->pkt_len) == 0)
320		return NULL;
321
 
 
 
 
 
 
 
322	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
323	if (!skbn)
324		return NULL;
325
326	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
327	skb_put(skbn, packet_len);
328	memcpy(skbn->data, skb->data, packet_len);
329	skb_pull(skb, packet_len);
330
331	return skbn;
332}
333
334/* Validates packet checksums. Function takes a pointer to
335 * the beginning of a buffer which contains the IP payload +
336 * padding + checksum trailer.
337 * Only IPv4 and IPv6 are supported along with TCP & UDP.
338 * Fragmented or tunneled packets are not supported.
339 */
340int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
341{
 
342	struct rmnet_map_dl_csum_trailer *csum_trailer;
343
344	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
 
345		return -EOPNOTSUPP;
 
346
347	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
348
349	if (!csum_trailer->valid)
 
350		return -EINVAL;
 
351
352	if (skb->protocol == htons(ETH_P_IP))
353		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
354	else if (skb->protocol == htons(ETH_P_IPV6))
355#if IS_ENABLED(CONFIG_IPV6)
356		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
357#else
358		return -EPROTONOSUPPORT;
359#endif
360
361	return 0;
 
 
 
 
 
362}
363
364/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
365 * packets that are supported for UL checksum offload.
366 */
367void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
368				      struct net_device *orig_dev)
369{
 
370	struct rmnet_map_ul_csum_header *ul_header;
371	void *iphdr;
372
373	ul_header = (struct rmnet_map_ul_csum_header *)
374		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
375
376	if (unlikely(!(orig_dev->features &
377		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
378		goto sw_csum;
379
380	if (skb->ip_summed == CHECKSUM_PARTIAL) {
381		iphdr = (char *)ul_header +
382			sizeof(struct rmnet_map_ul_csum_header);
383
384		if (skb->protocol == htons(ETH_P_IP)) {
385			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
386			return;
387		} else if (skb->protocol == htons(ETH_P_IPV6)) {
388#if IS_ENABLED(CONFIG_IPV6)
389			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
390			return;
391#else
392			goto sw_csum;
393#endif
394		}
 
 
395	}
396
 
 
397sw_csum:
398	ul_header->csum_start_offset = 0;
399	ul_header->csum_insert_offset = 0;
400	ul_header->csum_enabled = 0;
401	ul_header->udp_ip4_ind = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
  3 *
  4 * RMNET Data MAP protocol
 
  5 */
  6
  7#include <linux/netdevice.h>
  8#include <linux/ip.h>
  9#include <linux/ipv6.h>
 10#include <net/ip6_checksum.h>
 11#include <linux/bitfield.h>
 12#include "rmnet_config.h"
 13#include "rmnet_map.h"
 14#include "rmnet_private.h"
 15
 16#define RMNET_MAP_DEAGGR_SPACING  64
 17#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 18
 19static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 20					 const void *txporthdr)
 21{
 22	if (protocol == IPPROTO_TCP)
 23		return &((struct tcphdr *)txporthdr)->check;
 
 
 
 
 24
 25	if (protocol == IPPROTO_UDP)
 26		return &((struct udphdr *)txporthdr)->check;
 
 27
 28	return NULL;
 
 
 
 
 
 29}
 30
 31static int
 32rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
 33			       struct rmnet_map_dl_csum_trailer *csum_trailer,
 34			       struct rmnet_priv *priv)
 35{
 36	struct iphdr *ip4h = (struct iphdr *)skb->data;
 37	void *txporthdr = skb->data + ip4h->ihl * 4;
 38	__sum16 *csum_field, pseudo_csum;
 39	__sum16 ip_payload_csum;
 40
 41	/* Computing the checksum over just the IPv4 header--including its
 42	 * checksum field--should yield 0.  If it doesn't, the IP header
 43	 * is bad, so return an error and let the IP layer drop it.
 44	 */
 45	if (ip_fast_csum(ip4h, ip4h->ihl)) {
 46		priv->stats.csum_ip4_header_bad++;
 47		return -EINVAL;
 48	}
 49
 50	/* We don't support checksum offload on IPv4 fragments */
 51	if (ip_is_fragment(ip4h)) {
 52		priv->stats.csum_fragmented_pkt++;
 53		return -EOPNOTSUPP;
 54	}
 55
 56	/* Checksum offload is only supported for UDP and TCP protocols */
 57	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
 58	if (!csum_field) {
 59		priv->stats.csum_err_invalid_transport++;
 60		return -EPROTONOSUPPORT;
 61	}
 62
 63	/* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
 64	if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
 65		priv->stats.csum_skipped++;
 66		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67	}
 68
 69	/* The checksum value in the trailer is computed over the entire
 70	 * IP packet, including the IP header and payload.  To derive the
 71	 * transport checksum from this, we first subract the contribution
 72	 * of the IP header from the trailer checksum.  We then add the
 73	 * checksum computed over the pseudo header.
 74	 *
 75	 * We verified above that the IP header contributes zero to the
 76	 * trailer checksum.  Therefore the checksum in the trailer is
 77	 * just the checksum computed over the IP payload.
 78
 79	 * If the IP payload arrives intact, adding the pseudo header
 80	 * checksum to the IP payload checksum will yield 0xffff (negative
 81	 * zero).  This means the trailer checksum and the pseudo checksum
 82	 * are additive inverses of each other.  Put another way, the
 83	 * message passes the checksum test if the trailer checksum value
 84	 * is the negated pseudo header checksum.
 85	 *
 86	 * Knowing this, we don't even need to examine the transport
 87	 * header checksum value; it is already accounted for in the
 88	 * checksum value found in the trailer.
 89	 */
 90	ip_payload_csum = csum_trailer->csum_value;
 91
 92	pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
 93					ntohs(ip4h->tot_len) - ip4h->ihl * 4,
 94					ip4h->protocol, 0);
 95
 96	/* The cast is required to ensure only the low 16 bits are examined */
 97	if (ip_payload_csum != (__sum16)~pseudo_csum) {
 98		priv->stats.csum_validation_failed++;
 99		return -EINVAL;
100	}
101
102	priv->stats.csum_ok++;
103	return 0;
104}
105
106#if IS_ENABLED(CONFIG_IPV6)
107static int
108rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
109			       struct rmnet_map_dl_csum_trailer *csum_trailer,
110			       struct rmnet_priv *priv)
111{
112	struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
113	void *txporthdr = skb->data + sizeof(*ip6h);
114	__sum16 *csum_field, pseudo_csum;
115	__sum16 ip6_payload_csum;
116	__be16 ip_header_csum;
117
118	/* Checksum offload is only supported for UDP and TCP protocols;
119	 * the packet cannot include any IPv6 extension headers
120	 */
 
121	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
122	if (!csum_field) {
123		priv->stats.csum_err_invalid_transport++;
124		return -EPROTONOSUPPORT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125	}
126
127	/* The checksum value in the trailer is computed over the entire
128	 * IP packet, including the IP header and payload.  To derive the
129	 * transport checksum from this, we first subract the contribution
130	 * of the IP header from the trailer checksum.  We then add the
131	 * checksum computed over the pseudo header.
132	 */
133	ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
134	ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
135
136	pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
137				      ntohs(ip6h->payload_len),
138				      ip6h->nexthdr, 0);
139
140	/* It's sufficient to compare the IP payload checksum with the
141	 * negated pseudo checksum to determine whether the packet
142	 * checksum was good.  (See further explanation in comments
143	 * in rmnet_map_ipv4_dl_csum_trailer()).
144	 *
145	 * The cast is required to ensure only the low 16 bits are
146	 * examined.
147	 */
148	if (ip6_payload_csum != (__sum16)~pseudo_csum) {
149		priv->stats.csum_validation_failed++;
150		return -EINVAL;
151	}
152
153	priv->stats.csum_ok++;
154	return 0;
155}
156#else
157static int
158rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
159			       struct rmnet_map_dl_csum_trailer *csum_trailer,
160			       struct rmnet_priv *priv)
161{
162	return 0;
163}
164#endif
165
166static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
167{
 
168	void *txphdr;
169	u16 *csum;
170
171	txphdr = (void *)ip4h + ip4h->ihl * 4;
172
173	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
174		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
175		*csum = ~(*csum);
176	}
177}
178
179static void
180rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
181			      struct rmnet_map_ul_csum_header *ul_header,
182			      struct sk_buff *skb)
183{
184	u16 val;
 
185
186	val = MAP_CSUM_UL_ENABLED_FLAG;
187	if (iphdr->protocol == IPPROTO_UDP)
188		val |= MAP_CSUM_UL_UDP_FLAG;
189	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
190
191	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
192	ul_header->csum_info = htons(val);
 
 
 
 
 
 
193
194	skb->ip_summed = CHECKSUM_NONE;
195
196	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
197}
198
199#if IS_ENABLED(CONFIG_IPV6)
200static void
201rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
202{
 
203	void *txphdr;
204	u16 *csum;
205
206	txphdr = ip6h + 1;
207
208	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
209		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
210		*csum = ~(*csum);
211	}
212}
213
214static void
215rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
216			      struct rmnet_map_ul_csum_header *ul_header,
217			      struct sk_buff *skb)
218{
219	u16 val;
220
221	val = MAP_CSUM_UL_ENABLED_FLAG;
222	if (ipv6hdr->nexthdr == IPPROTO_UDP)
223		val |= MAP_CSUM_UL_UDP_FLAG;
224	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
225
226	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
227	ul_header->csum_info = htons(val);
 
 
 
 
 
 
 
 
228
229	skb->ip_summed = CHECKSUM_NONE;
230
231	rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
232}
233#else
234static void
235rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
236			      struct rmnet_map_ul_csum_header *ul_header,
237			      struct sk_buff *skb)
238{
239}
240#endif
241
242static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
243						struct rmnet_port *port,
244						struct net_device *orig_dev)
245{
246	struct rmnet_priv *priv = netdev_priv(orig_dev);
247	struct rmnet_map_v5_csum_header *ul_header;
248
249	ul_header = skb_push(skb, sizeof(*ul_header));
250	memset(ul_header, 0, sizeof(*ul_header));
251	ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
252						MAPV5_HDRINFO_HDR_TYPE_FMASK);
253
254	if (skb->ip_summed == CHECKSUM_PARTIAL) {
255		void *iph = ip_hdr(skb);
256		__sum16 *check;
257		void *trans;
258		u8 proto;
259
260		if (skb->protocol == htons(ETH_P_IP)) {
261			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
262
263			proto = ((struct iphdr *)iph)->protocol;
264			trans = iph + ip_len;
265		} else if (IS_ENABLED(CONFIG_IPV6) &&
266			   skb->protocol == htons(ETH_P_IPV6)) {
267			u16 ip_len = sizeof(struct ipv6hdr);
268
269			proto = ((struct ipv6hdr *)iph)->nexthdr;
270			trans = iph + ip_len;
271		} else {
272			priv->stats.csum_err_invalid_ip_version++;
273			goto sw_csum;
274		}
275
276		check = rmnet_map_get_csum_field(proto, trans);
277		if (check) {
278			skb->ip_summed = CHECKSUM_NONE;
279			/* Ask for checksum offloading */
280			ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
281			priv->stats.csum_hw++;
282			return;
283		}
284	}
285
286sw_csum:
287	priv->stats.csum_sw++;
288}
289
290/* Adds MAP header to front of skb->data
291 * Padding is calculated and set appropriately in MAP header. Mux ID is
292 * initialized to 0.
293 */
294struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
295						  int hdrlen,
296						  struct rmnet_port *port,
297						  int pad)
298{
299	struct rmnet_map_header *map_header;
300	u32 padding, map_datalen;
 
301
302	map_datalen = skb->len - hdrlen;
303	map_header = (struct rmnet_map_header *)
304			skb_push(skb, sizeof(struct rmnet_map_header));
305	memset(map_header, 0, sizeof(struct rmnet_map_header));
306
307	/* Set next_hdr bit for csum offload packets */
308	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
309		map_header->flags |= MAP_NEXT_HEADER_FLAG;
310
311	if (pad == RMNET_MAP_NO_PAD_BYTES) {
312		map_header->pkt_len = htons(map_datalen);
313		return map_header;
314	}
315
316	BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
317	padding = ALIGN(map_datalen, 4) - map_datalen;
318
319	if (padding == 0)
320		goto done;
321
322	if (skb_tailroom(skb) < padding)
323		return NULL;
324
325	skb_put_zero(skb, padding);
 
326
327done:
328	map_header->pkt_len = htons(map_datalen + padding);
329	/* This is a data packet, so the CMD bit is 0 */
330	map_header->flags = padding & MAP_PAD_LEN_MASK;
331
332	return map_header;
333}
334
335/* Deaggregates a single packet
336 * A whole new buffer is allocated for each portion of an aggregated frame.
337 * Caller should keep calling deaggregate() on the source skb until 0 is
338 * returned, indicating that there are no more packets to deaggregate. Caller
339 * is responsible for freeing the original skb.
340 */
341struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
342				      struct rmnet_port *port)
343{
344	struct rmnet_map_v5_csum_header *next_hdr = NULL;
345	struct rmnet_map_header *maph;
346	void *data = skb->data;
347	struct sk_buff *skbn;
348	u8 nexthdr_type;
349	u32 packet_len;
350
351	if (skb->len == 0)
352		return NULL;
353
354	maph = (struct rmnet_map_header *)skb->data;
355	packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
356
357	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
358		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
359	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
360		if (!(maph->flags & MAP_CMD_FLAG)) {
361			packet_len += sizeof(*next_hdr);
362			if (maph->flags & MAP_NEXT_HEADER_FLAG)
363				next_hdr = data + sizeof(*maph);
364			else
365				/* Mapv5 data pkt without csum hdr is invalid */
366				return NULL;
367		}
368	}
369
370	if (((int)skb->len - (int)packet_len) < 0)
371		return NULL;
372
373	/* Some hardware can send us empty frames. Catch them */
374	if (!maph->pkt_len)
375		return NULL;
376
377	if (next_hdr) {
378		nexthdr_type = u8_get_bits(next_hdr->header_info,
379					   MAPV5_HDRINFO_HDR_TYPE_FMASK);
380		if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
381			return NULL;
382	}
383
384	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
385	if (!skbn)
386		return NULL;
387
388	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
389	skb_put(skbn, packet_len);
390	memcpy(skbn->data, skb->data, packet_len);
391	skb_pull(skb, packet_len);
392
393	return skbn;
394}
395
396/* Validates packet checksums. Function takes a pointer to
397 * the beginning of a buffer which contains the IP payload +
398 * padding + checksum trailer.
399 * Only IPv4 and IPv6 are supported along with TCP & UDP.
400 * Fragmented or tunneled packets are not supported.
401 */
402int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
403{
404	struct rmnet_priv *priv = netdev_priv(skb->dev);
405	struct rmnet_map_dl_csum_trailer *csum_trailer;
406
407	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
408		priv->stats.csum_sw++;
409		return -EOPNOTSUPP;
410	}
411
412	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
413
414	if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
415		priv->stats.csum_valid_unset++;
416		return -EINVAL;
417	}
418
419	if (skb->protocol == htons(ETH_P_IP))
420		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
 
 
 
 
 
 
421
422	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
423		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
424
425	priv->stats.csum_err_invalid_ip_version++;
426
427	return -EPROTONOSUPPORT;
428}
429
430static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
431						struct net_device *orig_dev)
 
 
 
432{
433	struct rmnet_priv *priv = netdev_priv(orig_dev);
434	struct rmnet_map_ul_csum_header *ul_header;
435	void *iphdr;
436
437	ul_header = (struct rmnet_map_ul_csum_header *)
438		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
439
440	if (unlikely(!(orig_dev->features &
441		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
442		goto sw_csum;
443
444	if (skb->ip_summed != CHECKSUM_PARTIAL)
445		goto sw_csum;
 
446
447	iphdr = (char *)ul_header +
448		sizeof(struct rmnet_map_ul_csum_header);
449
450	if (skb->protocol == htons(ETH_P_IP)) {
451		rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
452		priv->stats.csum_hw++;
453		return;
454	}
455
456	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
457		rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
458		priv->stats.csum_hw++;
459		return;
460	}
461
462	priv->stats.csum_err_invalid_ip_version++;
463
464sw_csum:
465	memset(ul_header, 0, sizeof(*ul_header));
466
467	priv->stats.csum_sw++;
468}
469
470/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
471 * packets that are supported for UL checksum offload.
472 */
473void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
474				      struct rmnet_port *port,
475				      struct net_device *orig_dev,
476				      int csum_type)
477{
478	switch (csum_type) {
479	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
480		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
481		break;
482	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
483		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
484		break;
485	default:
486		break;
487	}
488}
489
490/* Process a MAPv5 packet header */
491int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
492				      u16 len)
493{
494	struct rmnet_priv *priv = netdev_priv(skb->dev);
495	struct rmnet_map_v5_csum_header *next_hdr;
496	u8 nexthdr_type;
497
498	next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
499			sizeof(struct rmnet_map_header));
500
501	nexthdr_type = u8_get_bits(next_hdr->header_info,
502				   MAPV5_HDRINFO_HDR_TYPE_FMASK);
503
504	if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
505		return -EINVAL;
506
507	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
508		priv->stats.csum_sw++;
509	} else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
510		priv->stats.csum_ok++;
511		skb->ip_summed = CHECKSUM_UNNECESSARY;
512	} else {
513		priv->stats.csum_valid_unset++;
514	}
515
516	/* Pull csum v5 header */
517	skb_pull(skb, sizeof(*next_hdr));
518
519	return 0;
520}