Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
 
 
 
 
 
  6 *	TCPv4 GSO/GRO support
  7 */
  8
  9#include <linux/indirect_call_wrapper.h>
 10#include <linux/skbuff.h>
 11#include <net/tcp.h>
 12#include <net/protocol.h>
 13
 14static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 15			   unsigned int seq, unsigned int mss)
 16{
 17	while (skb) {
 18		if (before(ts_seq, seq + mss)) {
 19			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 20			skb_shinfo(skb)->tskey = ts_seq;
 21			return;
 22		}
 23
 24		skb = skb->next;
 25		seq += mss;
 26	}
 27}
 28
 29static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 30					netdev_features_t features)
 31{
 32	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 33		return ERR_PTR(-EINVAL);
 34
 35	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 36		return ERR_PTR(-EINVAL);
 37
 38	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 39		const struct iphdr *iph = ip_hdr(skb);
 40		struct tcphdr *th = tcp_hdr(skb);
 41
 42		/* Set up checksum pseudo header, usually expect stack to
 43		 * have done this already.
 44		 */
 45
 46		th->check = 0;
 47		skb->ip_summed = CHECKSUM_PARTIAL;
 48		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 49	}
 50
 51	return tcp_gso_segment(skb, features);
 52}
 53
 54struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 55				netdev_features_t features)
 56{
 57	struct sk_buff *segs = ERR_PTR(-EINVAL);
 58	unsigned int sum_truesize = 0;
 59	struct tcphdr *th;
 60	unsigned int thlen;
 61	unsigned int seq;
 62	__be32 delta;
 63	unsigned int oldlen;
 64	unsigned int mss;
 65	struct sk_buff *gso_skb = skb;
 66	__sum16 newcheck;
 67	bool ooo_okay, copy_destructor;
 68
 69	th = tcp_hdr(skb);
 70	thlen = th->doff * 4;
 71	if (thlen < sizeof(*th))
 72		goto out;
 73
 74	if (!pskb_may_pull(skb, thlen))
 75		goto out;
 76
 77	oldlen = (u16)~skb->len;
 78	__skb_pull(skb, thlen);
 79
 80	mss = skb_shinfo(skb)->gso_size;
 81	if (unlikely(skb->len <= mss))
 82		goto out;
 83
 84	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 85		/* Packet is from an untrusted source, reset gso_segs. */
 86
 87		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 88
 89		segs = NULL;
 90		goto out;
 91	}
 92
 93	copy_destructor = gso_skb->destructor == tcp_wfree;
 94	ooo_okay = gso_skb->ooo_okay;
 95	/* All segments but the first should have ooo_okay cleared */
 96	skb->ooo_okay = 0;
 97
 98	segs = skb_segment(skb, features);
 99	if (IS_ERR(segs))
100		goto out;
101
102	/* Only first segment might have ooo_okay set */
103	segs->ooo_okay = ooo_okay;
104
105	/* GSO partial and frag_list segmentation only requires splitting
106	 * the frame into an MSS multiple and possibly a remainder, both
107	 * cases return a GSO skb. So update the mss now.
108	 */
109	if (skb_is_gso(segs))
110		mss *= skb_shinfo(segs)->gso_segs;
111
112	delta = htonl(oldlen + (thlen + mss));
113
114	skb = segs;
115	th = tcp_hdr(skb);
116	seq = ntohl(th->seq);
117
118	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
119		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
120
121	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
122					       (__force u32)delta));
123
124	while (skb->next) {
125		th->fin = th->psh = 0;
126		th->check = newcheck;
127
128		if (skb->ip_summed == CHECKSUM_PARTIAL)
129			gso_reset_checksum(skb, ~th->check);
130		else
131			th->check = gso_make_checksum(skb, ~th->check);
132
133		seq += mss;
134		if (copy_destructor) {
135			skb->destructor = gso_skb->destructor;
136			skb->sk = gso_skb->sk;
137			sum_truesize += skb->truesize;
138		}
139		skb = skb->next;
140		th = tcp_hdr(skb);
141
142		th->seq = htonl(seq);
143		th->cwr = 0;
144	}
145
146	/* Following permits TCP Small Queues to work well with GSO :
147	 * The callback to TCP stack will be called at the time last frag
148	 * is freed at TX completion, and not right now when gso_skb
149	 * is freed by GSO engine
150	 */
151	if (copy_destructor) {
152		int delta;
153
154		swap(gso_skb->sk, skb->sk);
155		swap(gso_skb->destructor, skb->destructor);
156		sum_truesize += skb->truesize;
157		delta = sum_truesize - gso_skb->truesize;
158		/* In some pathological cases, delta can be negative.
159		 * We need to either use refcount_add() or refcount_sub_and_test()
160		 */
161		if (likely(delta >= 0))
162			refcount_add(delta, &skb->sk->sk_wmem_alloc);
163		else
164			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
165	}
166
167	delta = htonl(oldlen + (skb_tail_pointer(skb) -
168				skb_transport_header(skb)) +
169		      skb->data_len);
170	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
171				(__force u32)delta));
172	if (skb->ip_summed == CHECKSUM_PARTIAL)
173		gso_reset_checksum(skb, ~th->check);
174	else
175		th->check = gso_make_checksum(skb, ~th->check);
176out:
177	return segs;
178}
179
180struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
181{
182	struct sk_buff *pp = NULL;
183	struct sk_buff *p;
184	struct tcphdr *th;
185	struct tcphdr *th2;
186	unsigned int len;
187	unsigned int thlen;
188	__be32 flags;
189	unsigned int mss = 1;
190	unsigned int hlen;
191	unsigned int off;
192	int flush = 1;
193	int i;
194
195	off = skb_gro_offset(skb);
196	hlen = off + sizeof(*th);
197	th = skb_gro_header_fast(skb, off);
198	if (skb_gro_header_hard(skb, hlen)) {
199		th = skb_gro_header_slow(skb, hlen, off);
200		if (unlikely(!th))
201			goto out;
202	}
203
204	thlen = th->doff * 4;
205	if (thlen < sizeof(*th))
206		goto out;
207
208	hlen = off + thlen;
209	if (skb_gro_header_hard(skb, hlen)) {
210		th = skb_gro_header_slow(skb, hlen, off);
211		if (unlikely(!th))
212			goto out;
213	}
214
215	skb_gro_pull(skb, thlen);
216
217	len = skb_gro_len(skb);
218	flags = tcp_flag_word(th);
219
220	list_for_each_entry(p, head, list) {
221		if (!NAPI_GRO_CB(p)->same_flow)
222			continue;
223
224		th2 = tcp_hdr(p);
225
226		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
227			NAPI_GRO_CB(p)->same_flow = 0;
228			continue;
229		}
230
231		goto found;
232	}
233	p = NULL;
234	goto out_check_final;
235
236found:
237	/* Include the IP ID check below from the inner most IP hdr */
238	flush = NAPI_GRO_CB(p)->flush;
239	flush |= (__force int)(flags & TCP_FLAG_CWR);
240	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
241		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
242	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
243	for (i = sizeof(*th); i < thlen; i += 4)
244		flush |= *(u32 *)((u8 *)th + i) ^
245			 *(u32 *)((u8 *)th2 + i);
246
247	/* When we receive our second frame we can made a decision on if we
248	 * continue this flow as an atomic flow with a fixed ID or if we use
249	 * an incrementing ID.
250	 */
251	if (NAPI_GRO_CB(p)->flush_id != 1 ||
252	    NAPI_GRO_CB(p)->count != 1 ||
253	    !NAPI_GRO_CB(p)->is_atomic)
254		flush |= NAPI_GRO_CB(p)->flush_id;
255	else
256		NAPI_GRO_CB(p)->is_atomic = false;
257
258	mss = skb_shinfo(p)->gso_size;
259
260	flush |= (len - 1) >= mss;
261	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
262#ifdef CONFIG_TLS_DEVICE
263	flush |= p->decrypted ^ skb->decrypted;
264#endif
265
266	if (flush || skb_gro_receive(p, skb)) {
267		mss = 1;
268		goto out_check_final;
269	}
270
 
 
271	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
272
273out_check_final:
274	flush = len < mss;
275	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
276					TCP_FLAG_RST | TCP_FLAG_SYN |
277					TCP_FLAG_FIN));
278
279	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
280		pp = p;
281
282out:
283	NAPI_GRO_CB(skb)->flush |= (flush != 0);
284
285	return pp;
286}
287
288int tcp_gro_complete(struct sk_buff *skb)
289{
290	struct tcphdr *th = tcp_hdr(skb);
291
292	skb->csum_start = (unsigned char *)th - skb->head;
293	skb->csum_offset = offsetof(struct tcphdr, check);
294	skb->ip_summed = CHECKSUM_PARTIAL;
295
296	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
297
298	if (th->cwr)
299		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
300
301	return 0;
302}
303EXPORT_SYMBOL(tcp_gro_complete);
304
305INDIRECT_CALLABLE_SCOPE
306struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
307{
308	/* Don't bother verifying checksum if we're going to flush anyway. */
309	if (!NAPI_GRO_CB(skb)->flush &&
310	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
311				      inet_gro_compute_pseudo)) {
312		NAPI_GRO_CB(skb)->flush = 1;
313		return NULL;
314	}
315
316	return tcp_gro_receive(head, skb);
317}
318
319INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
320{
321	const struct iphdr *iph = ip_hdr(skb);
322	struct tcphdr *th = tcp_hdr(skb);
323
324	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
325				  iph->daddr, 0);
326	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
327
328	if (NAPI_GRO_CB(skb)->is_atomic)
329		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
330
331	return tcp_gro_complete(skb);
332}
333
334static const struct net_offload tcpv4_offload = {
335	.callbacks = {
336		.gso_segment	=	tcp4_gso_segment,
337		.gro_receive	=	tcp4_gro_receive,
338		.gro_complete	=	tcp4_gro_complete,
339	},
340};
341
342int __init tcpv4_offload_init(void)
343{
344	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
345}
v4.10.11
 
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	TCPv4 GSO/GRO support
 11 */
 12
 
 13#include <linux/skbuff.h>
 14#include <net/tcp.h>
 15#include <net/protocol.h>
 16
 17static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 18			   unsigned int seq, unsigned int mss)
 19{
 20	while (skb) {
 21		if (before(ts_seq, seq + mss)) {
 22			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 23			skb_shinfo(skb)->tskey = ts_seq;
 24			return;
 25		}
 26
 27		skb = skb->next;
 28		seq += mss;
 29	}
 30}
 31
 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 33					netdev_features_t features)
 34{
 
 
 
 35	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 36		return ERR_PTR(-EINVAL);
 37
 38	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 39		const struct iphdr *iph = ip_hdr(skb);
 40		struct tcphdr *th = tcp_hdr(skb);
 41
 42		/* Set up checksum pseudo header, usually expect stack to
 43		 * have done this already.
 44		 */
 45
 46		th->check = 0;
 47		skb->ip_summed = CHECKSUM_PARTIAL;
 48		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 49	}
 50
 51	return tcp_gso_segment(skb, features);
 52}
 53
 54struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 55				netdev_features_t features)
 56{
 57	struct sk_buff *segs = ERR_PTR(-EINVAL);
 58	unsigned int sum_truesize = 0;
 59	struct tcphdr *th;
 60	unsigned int thlen;
 61	unsigned int seq;
 62	__be32 delta;
 63	unsigned int oldlen;
 64	unsigned int mss;
 65	struct sk_buff *gso_skb = skb;
 66	__sum16 newcheck;
 67	bool ooo_okay, copy_destructor;
 68
 69	th = tcp_hdr(skb);
 70	thlen = th->doff * 4;
 71	if (thlen < sizeof(*th))
 72		goto out;
 73
 74	if (!pskb_may_pull(skb, thlen))
 75		goto out;
 76
 77	oldlen = (u16)~skb->len;
 78	__skb_pull(skb, thlen);
 79
 80	mss = skb_shinfo(skb)->gso_size;
 81	if (unlikely(skb->len <= mss))
 82		goto out;
 83
 84	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 85		/* Packet is from an untrusted source, reset gso_segs. */
 86
 87		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 88
 89		segs = NULL;
 90		goto out;
 91	}
 92
 93	copy_destructor = gso_skb->destructor == tcp_wfree;
 94	ooo_okay = gso_skb->ooo_okay;
 95	/* All segments but the first should have ooo_okay cleared */
 96	skb->ooo_okay = 0;
 97
 98	segs = skb_segment(skb, features);
 99	if (IS_ERR(segs))
100		goto out;
101
102	/* Only first segment might have ooo_okay set */
103	segs->ooo_okay = ooo_okay;
104
105	/* GSO partial and frag_list segmentation only requires splitting
106	 * the frame into an MSS multiple and possibly a remainder, both
107	 * cases return a GSO skb. So update the mss now.
108	 */
109	if (skb_is_gso(segs))
110		mss *= skb_shinfo(segs)->gso_segs;
111
112	delta = htonl(oldlen + (thlen + mss));
113
114	skb = segs;
115	th = tcp_hdr(skb);
116	seq = ntohl(th->seq);
117
118	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
119		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
120
121	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
122					       (__force u32)delta));
123
124	while (skb->next) {
125		th->fin = th->psh = 0;
126		th->check = newcheck;
127
128		if (skb->ip_summed == CHECKSUM_PARTIAL)
129			gso_reset_checksum(skb, ~th->check);
130		else
131			th->check = gso_make_checksum(skb, ~th->check);
132
133		seq += mss;
134		if (copy_destructor) {
135			skb->destructor = gso_skb->destructor;
136			skb->sk = gso_skb->sk;
137			sum_truesize += skb->truesize;
138		}
139		skb = skb->next;
140		th = tcp_hdr(skb);
141
142		th->seq = htonl(seq);
143		th->cwr = 0;
144	}
145
146	/* Following permits TCP Small Queues to work well with GSO :
147	 * The callback to TCP stack will be called at the time last frag
148	 * is freed at TX completion, and not right now when gso_skb
149	 * is freed by GSO engine
150	 */
151	if (copy_destructor) {
 
 
152		swap(gso_skb->sk, skb->sk);
153		swap(gso_skb->destructor, skb->destructor);
154		sum_truesize += skb->truesize;
155		atomic_add(sum_truesize - gso_skb->truesize,
156			   &skb->sk->sk_wmem_alloc);
 
 
 
 
 
 
157	}
158
159	delta = htonl(oldlen + (skb_tail_pointer(skb) -
160				skb_transport_header(skb)) +
161		      skb->data_len);
162	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
163				(__force u32)delta));
164	if (skb->ip_summed == CHECKSUM_PARTIAL)
165		gso_reset_checksum(skb, ~th->check);
166	else
167		th->check = gso_make_checksum(skb, ~th->check);
168out:
169	return segs;
170}
171
172struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
173{
174	struct sk_buff **pp = NULL;
175	struct sk_buff *p;
176	struct tcphdr *th;
177	struct tcphdr *th2;
178	unsigned int len;
179	unsigned int thlen;
180	__be32 flags;
181	unsigned int mss = 1;
182	unsigned int hlen;
183	unsigned int off;
184	int flush = 1;
185	int i;
186
187	off = skb_gro_offset(skb);
188	hlen = off + sizeof(*th);
189	th = skb_gro_header_fast(skb, off);
190	if (skb_gro_header_hard(skb, hlen)) {
191		th = skb_gro_header_slow(skb, hlen, off);
192		if (unlikely(!th))
193			goto out;
194	}
195
196	thlen = th->doff * 4;
197	if (thlen < sizeof(*th))
198		goto out;
199
200	hlen = off + thlen;
201	if (skb_gro_header_hard(skb, hlen)) {
202		th = skb_gro_header_slow(skb, hlen, off);
203		if (unlikely(!th))
204			goto out;
205	}
206
207	skb_gro_pull(skb, thlen);
208
209	len = skb_gro_len(skb);
210	flags = tcp_flag_word(th);
211
212	for (; (p = *head); head = &p->next) {
213		if (!NAPI_GRO_CB(p)->same_flow)
214			continue;
215
216		th2 = tcp_hdr(p);
217
218		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
219			NAPI_GRO_CB(p)->same_flow = 0;
220			continue;
221		}
222
223		goto found;
224	}
225
226	goto out_check_final;
227
228found:
229	/* Include the IP ID check below from the inner most IP hdr */
230	flush = NAPI_GRO_CB(p)->flush;
231	flush |= (__force int)(flags & TCP_FLAG_CWR);
232	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
233		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
234	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
235	for (i = sizeof(*th); i < thlen; i += 4)
236		flush |= *(u32 *)((u8 *)th + i) ^
237			 *(u32 *)((u8 *)th2 + i);
238
239	/* When we receive our second frame we can made a decision on if we
240	 * continue this flow as an atomic flow with a fixed ID or if we use
241	 * an incrementing ID.
242	 */
243	if (NAPI_GRO_CB(p)->flush_id != 1 ||
244	    NAPI_GRO_CB(p)->count != 1 ||
245	    !NAPI_GRO_CB(p)->is_atomic)
246		flush |= NAPI_GRO_CB(p)->flush_id;
247	else
248		NAPI_GRO_CB(p)->is_atomic = false;
249
250	mss = skb_shinfo(p)->gso_size;
251
252	flush |= (len - 1) >= mss;
253	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
 
 
 
254
255	if (flush || skb_gro_receive(head, skb)) {
256		mss = 1;
257		goto out_check_final;
258	}
259
260	p = *head;
261	th2 = tcp_hdr(p);
262	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
263
264out_check_final:
265	flush = len < mss;
266	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
267					TCP_FLAG_RST | TCP_FLAG_SYN |
268					TCP_FLAG_FIN));
269
270	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
271		pp = head;
272
273out:
274	NAPI_GRO_CB(skb)->flush |= (flush != 0);
275
276	return pp;
277}
278
279int tcp_gro_complete(struct sk_buff *skb)
280{
281	struct tcphdr *th = tcp_hdr(skb);
282
283	skb->csum_start = (unsigned char *)th - skb->head;
284	skb->csum_offset = offsetof(struct tcphdr, check);
285	skb->ip_summed = CHECKSUM_PARTIAL;
286
287	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
288
289	if (th->cwr)
290		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
291
292	return 0;
293}
294EXPORT_SYMBOL(tcp_gro_complete);
295
296static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
297{
298	/* Don't bother verifying checksum if we're going to flush anyway. */
299	if (!NAPI_GRO_CB(skb)->flush &&
300	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
301				      inet_gro_compute_pseudo)) {
302		NAPI_GRO_CB(skb)->flush = 1;
303		return NULL;
304	}
305
306	return tcp_gro_receive(head, skb);
307}
308
309static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
310{
311	const struct iphdr *iph = ip_hdr(skb);
312	struct tcphdr *th = tcp_hdr(skb);
313
314	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
315				  iph->daddr, 0);
316	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
317
318	if (NAPI_GRO_CB(skb)->is_atomic)
319		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
320
321	return tcp_gro_complete(skb);
322}
323
324static const struct net_offload tcpv4_offload = {
325	.callbacks = {
326		.gso_segment	=	tcp4_gso_segment,
327		.gro_receive	=	tcp4_gro_receive,
328		.gro_complete	=	tcp4_gro_complete,
329	},
330};
331
332int __init tcpv4_offload_init(void)
333{
334	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
335}