Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
  3 *
  4 * RMNET Data MAP protocol
  5 */
  6
  7#include <linux/netdevice.h>
  8#include <linux/ip.h>
  9#include <linux/ipv6.h>
 10#include <net/ip6_checksum.h>
 11#include <linux/bitfield.h>
 12#include "rmnet_config.h"
 13#include "rmnet_map.h"
 14#include "rmnet_private.h"
 15#include "rmnet_vnd.h"
 16
 17#define RMNET_MAP_DEAGGR_SPACING  64
 18#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 19
 20static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 21					 const void *txporthdr)
 22{
 23	if (protocol == IPPROTO_TCP)
 24		return &((struct tcphdr *)txporthdr)->check;
 25
 26	if (protocol == IPPROTO_UDP)
 27		return &((struct udphdr *)txporthdr)->check;
 28
 29	return NULL;
 30}
 31
 32static int
 33rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
 34			       struct rmnet_map_dl_csum_trailer *csum_trailer,
 35			       struct rmnet_priv *priv)
 36{
 37	struct iphdr *ip4h = (struct iphdr *)skb->data;
 38	void *txporthdr = skb->data + ip4h->ihl * 4;
 39	__sum16 *csum_field, pseudo_csum;
 40	__sum16 ip_payload_csum;
 41
 42	/* Computing the checksum over just the IPv4 header--including its
 43	 * checksum field--should yield 0.  If it doesn't, the IP header
 44	 * is bad, so return an error and let the IP layer drop it.
 45	 */
 46	if (ip_fast_csum(ip4h, ip4h->ihl)) {
 47		priv->stats.csum_ip4_header_bad++;
 48		return -EINVAL;
 49	}
 50
 51	/* We don't support checksum offload on IPv4 fragments */
 52	if (ip_is_fragment(ip4h)) {
 53		priv->stats.csum_fragmented_pkt++;
 54		return -EOPNOTSUPP;
 55	}
 56
 57	/* Checksum offload is only supported for UDP and TCP protocols */
 58	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
 59	if (!csum_field) {
 60		priv->stats.csum_err_invalid_transport++;
 61		return -EPROTONOSUPPORT;
 62	}
 63
 64	/* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
 65	if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
 66		priv->stats.csum_skipped++;
 67		return 0;
 68	}
 69
 70	/* The checksum value in the trailer is computed over the entire
 71	 * IP packet, including the IP header and payload.  To derive the
 72	 * transport checksum from this, we first subract the contribution
 73	 * of the IP header from the trailer checksum.  We then add the
 74	 * checksum computed over the pseudo header.
 75	 *
 76	 * We verified above that the IP header contributes zero to the
 77	 * trailer checksum.  Therefore the checksum in the trailer is
 78	 * just the checksum computed over the IP payload.
 79
 80	 * If the IP payload arrives intact, adding the pseudo header
 81	 * checksum to the IP payload checksum will yield 0xffff (negative
 82	 * zero).  This means the trailer checksum and the pseudo checksum
 83	 * are additive inverses of each other.  Put another way, the
 84	 * message passes the checksum test if the trailer checksum value
 85	 * is the negated pseudo header checksum.
 86	 *
 87	 * Knowing this, we don't even need to examine the transport
 88	 * header checksum value; it is already accounted for in the
 89	 * checksum value found in the trailer.
 90	 */
 91	ip_payload_csum = csum_trailer->csum_value;
 92
 93	pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
 94					ntohs(ip4h->tot_len) - ip4h->ihl * 4,
 95					ip4h->protocol, 0);
 96
 97	/* The cast is required to ensure only the low 16 bits are examined */
 98	if (ip_payload_csum != (__sum16)~pseudo_csum) {
 99		priv->stats.csum_validation_failed++;
100		return -EINVAL;
101	}
102
103	priv->stats.csum_ok++;
104	return 0;
105}
106
107#if IS_ENABLED(CONFIG_IPV6)
108static int
109rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
110			       struct rmnet_map_dl_csum_trailer *csum_trailer,
111			       struct rmnet_priv *priv)
112{
113	struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
114	void *txporthdr = skb->data + sizeof(*ip6h);
115	__sum16 *csum_field, pseudo_csum;
116	__sum16 ip6_payload_csum;
117	__be16 ip_header_csum;
118
119	/* Checksum offload is only supported for UDP and TCP protocols;
120	 * the packet cannot include any IPv6 extension headers
121	 */
122	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
123	if (!csum_field) {
124		priv->stats.csum_err_invalid_transport++;
125		return -EPROTONOSUPPORT;
126	}
127
128	/* The checksum value in the trailer is computed over the entire
129	 * IP packet, including the IP header and payload.  To derive the
130	 * transport checksum from this, we first subract the contribution
131	 * of the IP header from the trailer checksum.  We then add the
132	 * checksum computed over the pseudo header.
133	 */
134	ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
135	ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
136
137	pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
138				      ntohs(ip6h->payload_len),
139				      ip6h->nexthdr, 0);
140
141	/* It's sufficient to compare the IP payload checksum with the
142	 * negated pseudo checksum to determine whether the packet
143	 * checksum was good.  (See further explanation in comments
144	 * in rmnet_map_ipv4_dl_csum_trailer()).
145	 *
146	 * The cast is required to ensure only the low 16 bits are
147	 * examined.
148	 */
149	if (ip6_payload_csum != (__sum16)~pseudo_csum) {
150		priv->stats.csum_validation_failed++;
151		return -EINVAL;
152	}
153
154	priv->stats.csum_ok++;
155	return 0;
156}
157#else
158static int
159rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
160			       struct rmnet_map_dl_csum_trailer *csum_trailer,
161			       struct rmnet_priv *priv)
162{
163	return 0;
164}
165#endif
166
167static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
168{
169	void *txphdr;
170	u16 *csum;
171
172	txphdr = (void *)ip4h + ip4h->ihl * 4;
173
174	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
175		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
176		*csum = ~(*csum);
177	}
178}
179
180static void
181rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
182			      struct rmnet_map_ul_csum_header *ul_header,
183			      struct sk_buff *skb)
184{
185	u16 val;
186
187	val = MAP_CSUM_UL_ENABLED_FLAG;
188	if (iphdr->protocol == IPPROTO_UDP)
189		val |= MAP_CSUM_UL_UDP_FLAG;
190	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
191
192	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
193	ul_header->csum_info = htons(val);
194
195	skb->ip_summed = CHECKSUM_NONE;
196
197	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
198}
199
200#if IS_ENABLED(CONFIG_IPV6)
201static void
202rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
203{
204	void *txphdr;
205	u16 *csum;
206
207	txphdr = ip6h + 1;
208
209	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
210		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
211		*csum = ~(*csum);
212	}
213}
214
215static void
216rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
217			      struct rmnet_map_ul_csum_header *ul_header,
218			      struct sk_buff *skb)
219{
220	u16 val;
221
222	val = MAP_CSUM_UL_ENABLED_FLAG;
223	if (ipv6hdr->nexthdr == IPPROTO_UDP)
224		val |= MAP_CSUM_UL_UDP_FLAG;
225	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
226
227	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
228	ul_header->csum_info = htons(val);
229
230	skb->ip_summed = CHECKSUM_NONE;
231
232	rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
233}
234#else
235static void
236rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
237			      struct rmnet_map_ul_csum_header *ul_header,
238			      struct sk_buff *skb)
239{
240}
241#endif
242
243static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
244						struct rmnet_port *port,
245						struct net_device *orig_dev)
246{
247	struct rmnet_priv *priv = netdev_priv(orig_dev);
248	struct rmnet_map_v5_csum_header *ul_header;
249
250	ul_header = skb_push(skb, sizeof(*ul_header));
251	memset(ul_header, 0, sizeof(*ul_header));
252	ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
253						MAPV5_HDRINFO_HDR_TYPE_FMASK);
254
255	if (skb->ip_summed == CHECKSUM_PARTIAL) {
256		void *iph = ip_hdr(skb);
257		__sum16 *check;
258		void *trans;
259		u8 proto;
260
261		if (skb->protocol == htons(ETH_P_IP)) {
262			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
263
264			proto = ((struct iphdr *)iph)->protocol;
265			trans = iph + ip_len;
266		} else if (IS_ENABLED(CONFIG_IPV6) &&
267			   skb->protocol == htons(ETH_P_IPV6)) {
268			u16 ip_len = sizeof(struct ipv6hdr);
269
270			proto = ((struct ipv6hdr *)iph)->nexthdr;
271			trans = iph + ip_len;
272		} else {
273			priv->stats.csum_err_invalid_ip_version++;
274			goto sw_csum;
275		}
276
277		check = rmnet_map_get_csum_field(proto, trans);
278		if (check) {
279			skb->ip_summed = CHECKSUM_NONE;
280			/* Ask for checksum offloading */
281			ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
282			priv->stats.csum_hw++;
283			return;
284		}
285	}
286
287sw_csum:
288	priv->stats.csum_sw++;
289}
290
291/* Adds MAP header to front of skb->data
292 * Padding is calculated and set appropriately in MAP header. Mux ID is
293 * initialized to 0.
294 */
295struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
296						  int hdrlen,
297						  struct rmnet_port *port,
298						  int pad)
299{
300	struct rmnet_map_header *map_header;
301	u32 padding, map_datalen;
302
303	map_datalen = skb->len - hdrlen;
304	map_header = (struct rmnet_map_header *)
305			skb_push(skb, sizeof(struct rmnet_map_header));
306	memset(map_header, 0, sizeof(struct rmnet_map_header));
307
308	/* Set next_hdr bit for csum offload packets */
309	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
310		map_header->flags |= MAP_NEXT_HEADER_FLAG;
311
312	if (pad == RMNET_MAP_NO_PAD_BYTES) {
313		map_header->pkt_len = htons(map_datalen);
314		return map_header;
315	}
316
317	BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
318	padding = ALIGN(map_datalen, 4) - map_datalen;
319
320	if (padding == 0)
321		goto done;
322
323	if (skb_tailroom(skb) < padding)
324		return NULL;
325
326	skb_put_zero(skb, padding);
327
328done:
329	map_header->pkt_len = htons(map_datalen + padding);
330	/* This is a data packet, so the CMD bit is 0 */
331	map_header->flags = padding & MAP_PAD_LEN_MASK;
332
333	return map_header;
334}
335
336/* Deaggregates a single packet
337 * A whole new buffer is allocated for each portion of an aggregated frame.
338 * Caller should keep calling deaggregate() on the source skb until 0 is
339 * returned, indicating that there are no more packets to deaggregate. Caller
340 * is responsible for freeing the original skb.
341 */
342struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
343				      struct rmnet_port *port)
344{
345	struct rmnet_map_v5_csum_header *next_hdr = NULL;
346	struct rmnet_map_header *maph;
347	void *data = skb->data;
348	struct sk_buff *skbn;
349	u8 nexthdr_type;
350	u32 packet_len;
351
352	if (skb->len == 0)
353		return NULL;
354
355	maph = (struct rmnet_map_header *)skb->data;
356	packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
357
358	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
359		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
360	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
361		if (!(maph->flags & MAP_CMD_FLAG)) {
362			packet_len += sizeof(*next_hdr);
363			if (maph->flags & MAP_NEXT_HEADER_FLAG)
364				next_hdr = data + sizeof(*maph);
365			else
366				/* Mapv5 data pkt without csum hdr is invalid */
367				return NULL;
368		}
369	}
370
371	if (((int)skb->len - (int)packet_len) < 0)
372		return NULL;
373
374	/* Some hardware can send us empty frames. Catch them */
375	if (!maph->pkt_len)
376		return NULL;
377
378	if (next_hdr) {
379		nexthdr_type = u8_get_bits(next_hdr->header_info,
380					   MAPV5_HDRINFO_HDR_TYPE_FMASK);
381		if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
382			return NULL;
383	}
384
385	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
386	if (!skbn)
387		return NULL;
388
389	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
390	skb_put(skbn, packet_len);
391	memcpy(skbn->data, skb->data, packet_len);
392	skb_pull(skb, packet_len);
393
394	return skbn;
395}
396
397/* Validates packet checksums. Function takes a pointer to
398 * the beginning of a buffer which contains the IP payload +
399 * padding + checksum trailer.
400 * Only IPv4 and IPv6 are supported along with TCP & UDP.
401 * Fragmented or tunneled packets are not supported.
402 */
403int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
404{
405	struct rmnet_priv *priv = netdev_priv(skb->dev);
406	struct rmnet_map_dl_csum_trailer *csum_trailer;
407
408	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
409		priv->stats.csum_sw++;
410		return -EOPNOTSUPP;
411	}
412
413	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
414
415	if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
416		priv->stats.csum_valid_unset++;
417		return -EINVAL;
418	}
419
420	if (skb->protocol == htons(ETH_P_IP))
421		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
422
423	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
424		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
425
426	priv->stats.csum_err_invalid_ip_version++;
427
428	return -EPROTONOSUPPORT;
429}
430
431static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
432						struct net_device *orig_dev)
433{
434	struct rmnet_priv *priv = netdev_priv(orig_dev);
435	struct rmnet_map_ul_csum_header *ul_header;
436	void *iphdr;
437
438	ul_header = (struct rmnet_map_ul_csum_header *)
439		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
440
441	if (unlikely(!(orig_dev->features &
442		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
443		goto sw_csum;
444
445	if (skb->ip_summed != CHECKSUM_PARTIAL)
446		goto sw_csum;
447
448	iphdr = (char *)ul_header +
449		sizeof(struct rmnet_map_ul_csum_header);
450
451	if (skb->protocol == htons(ETH_P_IP)) {
452		rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
453		priv->stats.csum_hw++;
454		return;
455	}
456
457	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
458		rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
459		priv->stats.csum_hw++;
460		return;
461	}
462
463	priv->stats.csum_err_invalid_ip_version++;
464
465sw_csum:
466	memset(ul_header, 0, sizeof(*ul_header));
467
468	priv->stats.csum_sw++;
469}
470
471/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
472 * packets that are supported for UL checksum offload.
473 */
474void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
475				      struct rmnet_port *port,
476				      struct net_device *orig_dev,
477				      int csum_type)
478{
479	switch (csum_type) {
480	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
481		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
482		break;
483	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
484		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
485		break;
486	default:
487		break;
488	}
489}
490
491/* Process a MAPv5 packet header */
492int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
493				      u16 len)
494{
495	struct rmnet_priv *priv = netdev_priv(skb->dev);
496	struct rmnet_map_v5_csum_header *next_hdr;
497	u8 nexthdr_type;
498
499	next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
500			sizeof(struct rmnet_map_header));
501
502	nexthdr_type = u8_get_bits(next_hdr->header_info,
503				   MAPV5_HDRINFO_HDR_TYPE_FMASK);
504
505	if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
506		return -EINVAL;
507
508	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
509		priv->stats.csum_sw++;
510	} else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
511		priv->stats.csum_ok++;
512		skb->ip_summed = CHECKSUM_UNNECESSARY;
513	} else {
514		priv->stats.csum_valid_unset++;
515	}
516
517	/* Pull csum v5 header */
518	skb_pull(skb, sizeof(*next_hdr));
519
520	return 0;
521}
522
523#define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
524
525static void reset_aggr_params(struct rmnet_port *port)
526{
527	port->skbagg_head = NULL;
528	port->agg_count = 0;
529	port->agg_state = 0;
530	memset(&port->agg_time, 0, sizeof(struct timespec64));
531}
532
533static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
534{
535	if (skb_needs_linearize(skb, port->dev->features)) {
536		if (unlikely(__skb_linearize(skb))) {
537			struct rmnet_priv *priv;
538
539			priv = netdev_priv(port->rmnet_dev);
540			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
541			dev_kfree_skb_any(skb);
542			return;
543		}
544	}
545
546	dev_queue_xmit(skb);
547}
548
549static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
550{
551	struct sk_buff *skb = NULL;
552	struct rmnet_port *port;
553
554	port = container_of(work, struct rmnet_port, agg_wq);
555
556	spin_lock_bh(&port->agg_lock);
557	if (likely(port->agg_state == -EINPROGRESS)) {
558		/* Buffer may have already been shipped out */
559		if (likely(port->skbagg_head)) {
560			skb = port->skbagg_head;
561			reset_aggr_params(port);
562		}
563		port->agg_state = 0;
564	}
565
566	spin_unlock_bh(&port->agg_lock);
567	if (skb)
568		rmnet_send_skb(port, skb);
569}
570
571static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
572{
573	struct rmnet_port *port;
574
575	port = container_of(t, struct rmnet_port, hrtimer);
576
577	schedule_work(&port->agg_wq);
578
579	return HRTIMER_NORESTART;
580}
581
582unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
583				    struct net_device *orig_dev)
584{
585	struct timespec64 diff, last;
586	unsigned int len = skb->len;
587	struct sk_buff *agg_skb;
588	int size;
589
590	spin_lock_bh(&port->agg_lock);
591	memcpy(&last, &port->agg_last, sizeof(struct timespec64));
592	ktime_get_real_ts64(&port->agg_last);
593
594	if (!port->skbagg_head) {
595		/* Check to see if we should agg first. If the traffic is very
596		 * sparse, don't aggregate.
597		 */
598new_packet:
599		diff = timespec64_sub(port->agg_last, last);
600		size = port->egress_agg_params.bytes - skb->len;
601
602		if (size < 0) {
603			/* dropped */
604			spin_unlock_bh(&port->agg_lock);
605			return 0;
606		}
607
608		if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
609		    size == 0)
610			goto no_aggr;
611
612		port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
613		if (!port->skbagg_head)
614			goto no_aggr;
615
616		dev_kfree_skb_any(skb);
617		port->skbagg_head->protocol = htons(ETH_P_MAP);
618		port->agg_count = 1;
619		ktime_get_real_ts64(&port->agg_time);
620		skb_frag_list_init(port->skbagg_head);
621		goto schedule;
622	}
623	diff = timespec64_sub(port->agg_last, port->agg_time);
624	size = port->egress_agg_params.bytes - port->skbagg_head->len;
625
626	if (skb->len > size) {
627		agg_skb = port->skbagg_head;
628		reset_aggr_params(port);
629		spin_unlock_bh(&port->agg_lock);
630		hrtimer_cancel(&port->hrtimer);
631		rmnet_send_skb(port, agg_skb);
632		spin_lock_bh(&port->agg_lock);
633		goto new_packet;
634	}
635
636	if (skb_has_frag_list(port->skbagg_head))
637		port->skbagg_tail->next = skb;
638	else
639		skb_shinfo(port->skbagg_head)->frag_list = skb;
640
641	port->skbagg_head->len += skb->len;
642	port->skbagg_head->data_len += skb->len;
643	port->skbagg_head->truesize += skb->truesize;
644	port->skbagg_tail = skb;
645	port->agg_count++;
646
647	if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
648	    port->agg_count >= port->egress_agg_params.count ||
649	    port->skbagg_head->len == port->egress_agg_params.bytes) {
650		agg_skb = port->skbagg_head;
651		reset_aggr_params(port);
652		spin_unlock_bh(&port->agg_lock);
653		hrtimer_cancel(&port->hrtimer);
654		rmnet_send_skb(port, agg_skb);
655		return len;
656	}
657
658schedule:
659	if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
660		port->agg_state = -EINPROGRESS;
661		hrtimer_start(&port->hrtimer,
662			      ns_to_ktime(port->egress_agg_params.time_nsec),
663			      HRTIMER_MODE_REL);
664	}
665	spin_unlock_bh(&port->agg_lock);
666
667	return len;
668
669no_aggr:
670	spin_unlock_bh(&port->agg_lock);
671	skb->protocol = htons(ETH_P_MAP);
672	dev_queue_xmit(skb);
673
674	return len;
675}
676
677void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
678				    u32 count, u32 time)
679{
680	spin_lock_bh(&port->agg_lock);
681	port->egress_agg_params.bytes = size;
682	WRITE_ONCE(port->egress_agg_params.count, count);
683	port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
684	spin_unlock_bh(&port->agg_lock);
685}
686
687void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
688{
689	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
690	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
691	spin_lock_init(&port->agg_lock);
692	rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
693	INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
694}
695
696void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
697{
698	hrtimer_cancel(&port->hrtimer);
699	cancel_work_sync(&port->agg_wq);
700
701	spin_lock_bh(&port->agg_lock);
702	if (port->agg_state == -EINPROGRESS) {
703		if (port->skbagg_head) {
704			dev_kfree_skb_any(port->skbagg_head);
705			reset_aggr_params(port);
706		}
707
708		port->agg_state = 0;
709	}
710	spin_unlock_bh(&port->agg_lock);
711}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
  3 *
  4 * RMNET Data MAP protocol
  5 */
  6
  7#include <linux/netdevice.h>
  8#include <linux/ip.h>
  9#include <linux/ipv6.h>
 10#include <net/ip6_checksum.h>
 11#include <linux/bitfield.h>
 12#include "rmnet_config.h"
 13#include "rmnet_map.h"
 14#include "rmnet_private.h"
 
 15
 16#define RMNET_MAP_DEAGGR_SPACING  64
 17#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 18
 19static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 20					 const void *txporthdr)
 21{
 22	if (protocol == IPPROTO_TCP)
 23		return &((struct tcphdr *)txporthdr)->check;
 24
 25	if (protocol == IPPROTO_UDP)
 26		return &((struct udphdr *)txporthdr)->check;
 27
 28	return NULL;
 29}
 30
 31static int
 32rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
 33			       struct rmnet_map_dl_csum_trailer *csum_trailer,
 34			       struct rmnet_priv *priv)
 35{
 36	struct iphdr *ip4h = (struct iphdr *)skb->data;
 37	void *txporthdr = skb->data + ip4h->ihl * 4;
 38	__sum16 *csum_field, pseudo_csum;
 39	__sum16 ip_payload_csum;
 40
 41	/* Computing the checksum over just the IPv4 header--including its
 42	 * checksum field--should yield 0.  If it doesn't, the IP header
 43	 * is bad, so return an error and let the IP layer drop it.
 44	 */
 45	if (ip_fast_csum(ip4h, ip4h->ihl)) {
 46		priv->stats.csum_ip4_header_bad++;
 47		return -EINVAL;
 48	}
 49
 50	/* We don't support checksum offload on IPv4 fragments */
 51	if (ip_is_fragment(ip4h)) {
 52		priv->stats.csum_fragmented_pkt++;
 53		return -EOPNOTSUPP;
 54	}
 55
 56	/* Checksum offload is only supported for UDP and TCP protocols */
 57	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
 58	if (!csum_field) {
 59		priv->stats.csum_err_invalid_transport++;
 60		return -EPROTONOSUPPORT;
 61	}
 62
 63	/* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
 64	if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
 65		priv->stats.csum_skipped++;
 66		return 0;
 67	}
 68
 69	/* The checksum value in the trailer is computed over the entire
 70	 * IP packet, including the IP header and payload.  To derive the
 71	 * transport checksum from this, we first subract the contribution
 72	 * of the IP header from the trailer checksum.  We then add the
 73	 * checksum computed over the pseudo header.
 74	 *
 75	 * We verified above that the IP header contributes zero to the
 76	 * trailer checksum.  Therefore the checksum in the trailer is
 77	 * just the checksum computed over the IP payload.
 78
 79	 * If the IP payload arrives intact, adding the pseudo header
 80	 * checksum to the IP payload checksum will yield 0xffff (negative
 81	 * zero).  This means the trailer checksum and the pseudo checksum
 82	 * are additive inverses of each other.  Put another way, the
 83	 * message passes the checksum test if the trailer checksum value
 84	 * is the negated pseudo header checksum.
 85	 *
 86	 * Knowing this, we don't even need to examine the transport
 87	 * header checksum value; it is already accounted for in the
 88	 * checksum value found in the trailer.
 89	 */
 90	ip_payload_csum = csum_trailer->csum_value;
 91
 92	pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
 93					ntohs(ip4h->tot_len) - ip4h->ihl * 4,
 94					ip4h->protocol, 0);
 95
 96	/* The cast is required to ensure only the low 16 bits are examined */
 97	if (ip_payload_csum != (__sum16)~pseudo_csum) {
 98		priv->stats.csum_validation_failed++;
 99		return -EINVAL;
100	}
101
102	priv->stats.csum_ok++;
103	return 0;
104}
105
106#if IS_ENABLED(CONFIG_IPV6)
107static int
108rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
109			       struct rmnet_map_dl_csum_trailer *csum_trailer,
110			       struct rmnet_priv *priv)
111{
112	struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
113	void *txporthdr = skb->data + sizeof(*ip6h);
114	__sum16 *csum_field, pseudo_csum;
115	__sum16 ip6_payload_csum;
116	__be16 ip_header_csum;
117
118	/* Checksum offload is only supported for UDP and TCP protocols;
119	 * the packet cannot include any IPv6 extension headers
120	 */
121	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
122	if (!csum_field) {
123		priv->stats.csum_err_invalid_transport++;
124		return -EPROTONOSUPPORT;
125	}
126
127	/* The checksum value in the trailer is computed over the entire
128	 * IP packet, including the IP header and payload.  To derive the
129	 * transport checksum from this, we first subract the contribution
130	 * of the IP header from the trailer checksum.  We then add the
131	 * checksum computed over the pseudo header.
132	 */
133	ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
134	ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
135
136	pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
137				      ntohs(ip6h->payload_len),
138				      ip6h->nexthdr, 0);
139
140	/* It's sufficient to compare the IP payload checksum with the
141	 * negated pseudo checksum to determine whether the packet
142	 * checksum was good.  (See further explanation in comments
143	 * in rmnet_map_ipv4_dl_csum_trailer()).
144	 *
145	 * The cast is required to ensure only the low 16 bits are
146	 * examined.
147	 */
148	if (ip6_payload_csum != (__sum16)~pseudo_csum) {
149		priv->stats.csum_validation_failed++;
150		return -EINVAL;
151	}
152
153	priv->stats.csum_ok++;
154	return 0;
155}
156#else
157static int
158rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
159			       struct rmnet_map_dl_csum_trailer *csum_trailer,
160			       struct rmnet_priv *priv)
161{
162	return 0;
163}
164#endif
165
166static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
167{
168	void *txphdr;
169	u16 *csum;
170
171	txphdr = (void *)ip4h + ip4h->ihl * 4;
172
173	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
174		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
175		*csum = ~(*csum);
176	}
177}
178
179static void
180rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
181			      struct rmnet_map_ul_csum_header *ul_header,
182			      struct sk_buff *skb)
183{
184	u16 val;
185
186	val = MAP_CSUM_UL_ENABLED_FLAG;
187	if (iphdr->protocol == IPPROTO_UDP)
188		val |= MAP_CSUM_UL_UDP_FLAG;
189	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
190
191	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
192	ul_header->csum_info = htons(val);
193
194	skb->ip_summed = CHECKSUM_NONE;
195
196	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
197}
198
199#if IS_ENABLED(CONFIG_IPV6)
200static void
201rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
202{
203	void *txphdr;
204	u16 *csum;
205
206	txphdr = ip6h + 1;
207
208	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
209		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
210		*csum = ~(*csum);
211	}
212}
213
214static void
215rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
216			      struct rmnet_map_ul_csum_header *ul_header,
217			      struct sk_buff *skb)
218{
219	u16 val;
220
221	val = MAP_CSUM_UL_ENABLED_FLAG;
222	if (ipv6hdr->nexthdr == IPPROTO_UDP)
223		val |= MAP_CSUM_UL_UDP_FLAG;
224	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
225
226	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
227	ul_header->csum_info = htons(val);
228
229	skb->ip_summed = CHECKSUM_NONE;
230
231	rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
232}
233#else
234static void
235rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
236			      struct rmnet_map_ul_csum_header *ul_header,
237			      struct sk_buff *skb)
238{
239}
240#endif
241
242static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
243						struct rmnet_port *port,
244						struct net_device *orig_dev)
245{
246	struct rmnet_priv *priv = netdev_priv(orig_dev);
247	struct rmnet_map_v5_csum_header *ul_header;
248
249	ul_header = skb_push(skb, sizeof(*ul_header));
250	memset(ul_header, 0, sizeof(*ul_header));
251	ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
252						MAPV5_HDRINFO_HDR_TYPE_FMASK);
253
254	if (skb->ip_summed == CHECKSUM_PARTIAL) {
255		void *iph = ip_hdr(skb);
256		__sum16 *check;
257		void *trans;
258		u8 proto;
259
260		if (skb->protocol == htons(ETH_P_IP)) {
261			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
262
263			proto = ((struct iphdr *)iph)->protocol;
264			trans = iph + ip_len;
265		} else if (IS_ENABLED(CONFIG_IPV6) &&
266			   skb->protocol == htons(ETH_P_IPV6)) {
267			u16 ip_len = sizeof(struct ipv6hdr);
268
269			proto = ((struct ipv6hdr *)iph)->nexthdr;
270			trans = iph + ip_len;
271		} else {
272			priv->stats.csum_err_invalid_ip_version++;
273			goto sw_csum;
274		}
275
276		check = rmnet_map_get_csum_field(proto, trans);
277		if (check) {
278			skb->ip_summed = CHECKSUM_NONE;
279			/* Ask for checksum offloading */
280			ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
281			priv->stats.csum_hw++;
282			return;
283		}
284	}
285
286sw_csum:
287	priv->stats.csum_sw++;
288}
289
290/* Adds MAP header to front of skb->data
291 * Padding is calculated and set appropriately in MAP header. Mux ID is
292 * initialized to 0.
293 */
294struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
295						  int hdrlen,
296						  struct rmnet_port *port,
297						  int pad)
298{
299	struct rmnet_map_header *map_header;
300	u32 padding, map_datalen;
301
302	map_datalen = skb->len - hdrlen;
303	map_header = (struct rmnet_map_header *)
304			skb_push(skb, sizeof(struct rmnet_map_header));
305	memset(map_header, 0, sizeof(struct rmnet_map_header));
306
307	/* Set next_hdr bit for csum offload packets */
308	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
309		map_header->flags |= MAP_NEXT_HEADER_FLAG;
310
311	if (pad == RMNET_MAP_NO_PAD_BYTES) {
312		map_header->pkt_len = htons(map_datalen);
313		return map_header;
314	}
315
316	BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
317	padding = ALIGN(map_datalen, 4) - map_datalen;
318
319	if (padding == 0)
320		goto done;
321
322	if (skb_tailroom(skb) < padding)
323		return NULL;
324
325	skb_put_zero(skb, padding);
326
327done:
328	map_header->pkt_len = htons(map_datalen + padding);
329	/* This is a data packet, so the CMD bit is 0 */
330	map_header->flags = padding & MAP_PAD_LEN_MASK;
331
332	return map_header;
333}
334
335/* Deaggregates a single packet
336 * A whole new buffer is allocated for each portion of an aggregated frame.
337 * Caller should keep calling deaggregate() on the source skb until 0 is
338 * returned, indicating that there are no more packets to deaggregate. Caller
339 * is responsible for freeing the original skb.
340 */
341struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
342				      struct rmnet_port *port)
343{
344	struct rmnet_map_v5_csum_header *next_hdr = NULL;
345	struct rmnet_map_header *maph;
346	void *data = skb->data;
347	struct sk_buff *skbn;
348	u8 nexthdr_type;
349	u32 packet_len;
350
351	if (skb->len == 0)
352		return NULL;
353
354	maph = (struct rmnet_map_header *)skb->data;
355	packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
356
357	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
358		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
359	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
360		if (!(maph->flags & MAP_CMD_FLAG)) {
361			packet_len += sizeof(*next_hdr);
362			if (maph->flags & MAP_NEXT_HEADER_FLAG)
363				next_hdr = data + sizeof(*maph);
364			else
365				/* Mapv5 data pkt without csum hdr is invalid */
366				return NULL;
367		}
368	}
369
370	if (((int)skb->len - (int)packet_len) < 0)
371		return NULL;
372
373	/* Some hardware can send us empty frames. Catch them */
374	if (!maph->pkt_len)
375		return NULL;
376
377	if (next_hdr) {
378		nexthdr_type = u8_get_bits(next_hdr->header_info,
379					   MAPV5_HDRINFO_HDR_TYPE_FMASK);
380		if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
381			return NULL;
382	}
383
384	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
385	if (!skbn)
386		return NULL;
387
388	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
389	skb_put(skbn, packet_len);
390	memcpy(skbn->data, skb->data, packet_len);
391	skb_pull(skb, packet_len);
392
393	return skbn;
394}
395
396/* Validates packet checksums. Function takes a pointer to
397 * the beginning of a buffer which contains the IP payload +
398 * padding + checksum trailer.
399 * Only IPv4 and IPv6 are supported along with TCP & UDP.
400 * Fragmented or tunneled packets are not supported.
401 */
402int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
403{
404	struct rmnet_priv *priv = netdev_priv(skb->dev);
405	struct rmnet_map_dl_csum_trailer *csum_trailer;
406
407	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
408		priv->stats.csum_sw++;
409		return -EOPNOTSUPP;
410	}
411
412	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
413
414	if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
415		priv->stats.csum_valid_unset++;
416		return -EINVAL;
417	}
418
419	if (skb->protocol == htons(ETH_P_IP))
420		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
421
422	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
423		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
424
425	priv->stats.csum_err_invalid_ip_version++;
426
427	return -EPROTONOSUPPORT;
428}
429
430static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
431						struct net_device *orig_dev)
432{
433	struct rmnet_priv *priv = netdev_priv(orig_dev);
434	struct rmnet_map_ul_csum_header *ul_header;
435	void *iphdr;
436
437	ul_header = (struct rmnet_map_ul_csum_header *)
438		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
439
440	if (unlikely(!(orig_dev->features &
441		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
442		goto sw_csum;
443
444	if (skb->ip_summed != CHECKSUM_PARTIAL)
445		goto sw_csum;
446
447	iphdr = (char *)ul_header +
448		sizeof(struct rmnet_map_ul_csum_header);
449
450	if (skb->protocol == htons(ETH_P_IP)) {
451		rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
452		priv->stats.csum_hw++;
453		return;
454	}
455
456	if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
457		rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
458		priv->stats.csum_hw++;
459		return;
460	}
461
462	priv->stats.csum_err_invalid_ip_version++;
463
464sw_csum:
465	memset(ul_header, 0, sizeof(*ul_header));
466
467	priv->stats.csum_sw++;
468}
469
470/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
471 * packets that are supported for UL checksum offload.
472 */
473void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
474				      struct rmnet_port *port,
475				      struct net_device *orig_dev,
476				      int csum_type)
477{
478	switch (csum_type) {
479	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
480		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
481		break;
482	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
483		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
484		break;
485	default:
486		break;
487	}
488}
489
490/* Process a MAPv5 packet header */
491int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
492				      u16 len)
493{
494	struct rmnet_priv *priv = netdev_priv(skb->dev);
495	struct rmnet_map_v5_csum_header *next_hdr;
496	u8 nexthdr_type;
497
498	next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
499			sizeof(struct rmnet_map_header));
500
501	nexthdr_type = u8_get_bits(next_hdr->header_info,
502				   MAPV5_HDRINFO_HDR_TYPE_FMASK);
503
504	if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
505		return -EINVAL;
506
507	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
508		priv->stats.csum_sw++;
509	} else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
510		priv->stats.csum_ok++;
511		skb->ip_summed = CHECKSUM_UNNECESSARY;
512	} else {
513		priv->stats.csum_valid_unset++;
514	}
515
516	/* Pull csum v5 header */
517	skb_pull(skb, sizeof(*next_hdr));
518
519	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520}