Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	TCPv4 GSO/GRO support
 11 */
 12
 13#include <linux/skbuff.h>
 14#include <net/tcp.h>
 15#include <net/protocol.h>
 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 18				netdev_features_t features)
 19{
 20	struct sk_buff *segs = ERR_PTR(-EINVAL);
 21	unsigned int sum_truesize = 0;
 22	struct tcphdr *th;
 23	unsigned int thlen;
 24	unsigned int seq;
 25	__be32 delta;
 26	unsigned int oldlen;
 27	unsigned int mss;
 28	struct sk_buff *gso_skb = skb;
 29	__sum16 newcheck;
 30	bool ooo_okay, copy_destructor;
 31
 32	if (!pskb_may_pull(skb, sizeof(*th)))
 33		goto out;
 34
 35	th = tcp_hdr(skb);
 36	thlen = th->doff * 4;
 37	if (thlen < sizeof(*th))
 38		goto out;
 39
 40	if (!pskb_may_pull(skb, thlen))
 41		goto out;
 42
 43	oldlen = (u16)~skb->len;
 44	__skb_pull(skb, thlen);
 45
 46	mss = tcp_skb_mss(skb);
 47	if (unlikely(skb->len <= mss))
 48		goto out;
 49
 50	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 51		/* Packet is from an untrusted source, reset gso_segs. */
 52		int type = skb_shinfo(skb)->gso_type;
 53
 54		if (unlikely(type &
 55			     ~(SKB_GSO_TCPV4 |
 56			       SKB_GSO_DODGY |
 57			       SKB_GSO_TCP_ECN |
 58			       SKB_GSO_TCPV6 |
 59			       SKB_GSO_GRE |
 60			       SKB_GSO_IPIP |
 61			       SKB_GSO_SIT |
 62			       SKB_GSO_MPLS |
 63			       SKB_GSO_UDP_TUNNEL |
 64			       0) ||
 65			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
 66			goto out;
 67
 68		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 69
 70		segs = NULL;
 71		goto out;
 72	}
 73
 74	copy_destructor = gso_skb->destructor == tcp_wfree;
 75	ooo_okay = gso_skb->ooo_okay;
 76	/* All segments but the first should have ooo_okay cleared */
 77	skb->ooo_okay = 0;
 78
 79	segs = skb_segment(skb, features);
 80	if (IS_ERR(segs))
 81		goto out;
 82
 83	/* Only first segment might have ooo_okay set */
 84	segs->ooo_okay = ooo_okay;
 85
 
 
 
 
 
 
 
 86	delta = htonl(oldlen + (thlen + mss));
 87
 88	skb = segs;
 89	th = tcp_hdr(skb);
 90	seq = ntohl(th->seq);
 91
 
 
 
 92	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
 93					       (__force u32)delta));
 94
 95	do {
 96		th->fin = th->psh = 0;
 97		th->check = newcheck;
 98
 99		if (skb->ip_summed != CHECKSUM_PARTIAL)
100			th->check =
101			     csum_fold(csum_partial(skb_transport_header(skb),
102						    thlen, skb->csum));
103
104		seq += mss;
105		if (copy_destructor) {
106			skb->destructor = gso_skb->destructor;
107			skb->sk = gso_skb->sk;
108			sum_truesize += skb->truesize;
109		}
110		skb = skb->next;
111		th = tcp_hdr(skb);
112
113		th->seq = htonl(seq);
114		th->cwr = 0;
115	} while (skb->next);
116
117	/* Following permits TCP Small Queues to work well with GSO :
118	 * The callback to TCP stack will be called at the time last frag
119	 * is freed at TX completion, and not right now when gso_skb
120	 * is freed by GSO engine
121	 */
122	if (copy_destructor) {
 
 
123		swap(gso_skb->sk, skb->sk);
124		swap(gso_skb->destructor, skb->destructor);
125		sum_truesize += skb->truesize;
126		atomic_add(sum_truesize - gso_skb->truesize,
127			   &skb->sk->sk_wmem_alloc);
 
 
 
 
 
 
128	}
129
130	delta = htonl(oldlen + (skb_tail_pointer(skb) -
131				skb_transport_header(skb)) +
132		      skb->data_len);
133	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134				(__force u32)delta));
135	if (skb->ip_summed != CHECKSUM_PARTIAL)
136		th->check = csum_fold(csum_partial(skb_transport_header(skb),
137						   thlen, skb->csum));
 
138out:
139	return segs;
140}
141
142struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
143{
144	struct sk_buff **pp = NULL;
145	struct sk_buff *p;
146	struct tcphdr *th;
147	struct tcphdr *th2;
148	unsigned int len;
149	unsigned int thlen;
150	__be32 flags;
151	unsigned int mss = 1;
152	unsigned int hlen;
153	unsigned int off;
154	int flush = 1;
155	int i;
156
157	off = skb_gro_offset(skb);
158	hlen = off + sizeof(*th);
159	th = skb_gro_header_fast(skb, off);
160	if (skb_gro_header_hard(skb, hlen)) {
161		th = skb_gro_header_slow(skb, hlen, off);
162		if (unlikely(!th))
163			goto out;
164	}
165
166	thlen = th->doff * 4;
167	if (thlen < sizeof(*th))
168		goto out;
169
170	hlen = off + thlen;
171	if (skb_gro_header_hard(skb, hlen)) {
172		th = skb_gro_header_slow(skb, hlen, off);
173		if (unlikely(!th))
174			goto out;
175	}
176
177	skb_gro_pull(skb, thlen);
178
179	len = skb_gro_len(skb);
180	flags = tcp_flag_word(th);
181
182	for (; (p = *head); head = &p->next) {
183		if (!NAPI_GRO_CB(p)->same_flow)
184			continue;
185
186		th2 = tcp_hdr(p);
187
188		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
189			NAPI_GRO_CB(p)->same_flow = 0;
190			continue;
191		}
192
193		goto found;
194	}
195
196	goto out_check_final;
197
198found:
199	/* Include the IP ID check below from the inner most IP hdr */
200	flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
201	flush |= (__force int)(flags & TCP_FLAG_CWR);
202	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
203		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
204	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
205	for (i = sizeof(*th); i < thlen; i += 4)
206		flush |= *(u32 *)((u8 *)th + i) ^
207			 *(u32 *)((u8 *)th2 + i);
208
209	mss = tcp_skb_mss(p);
 
 
 
 
 
 
 
 
 
 
 
210
211	flush |= (len - 1) >= mss;
212	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
213
214	if (flush || skb_gro_receive(head, skb)) {
215		mss = 1;
216		goto out_check_final;
217	}
218
219	p = *head;
220	th2 = tcp_hdr(p);
221	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
222
223out_check_final:
224	flush = len < mss;
225	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
226					TCP_FLAG_RST | TCP_FLAG_SYN |
227					TCP_FLAG_FIN));
228
229	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
230		pp = head;
231
232out:
233	NAPI_GRO_CB(skb)->flush |= (flush != 0);
234
235	return pp;
236}
237
238int tcp_gro_complete(struct sk_buff *skb)
239{
240	struct tcphdr *th = tcp_hdr(skb);
241
242	skb->csum_start = (unsigned char *)th - skb->head;
243	skb->csum_offset = offsetof(struct tcphdr, check);
244	skb->ip_summed = CHECKSUM_PARTIAL;
245
246	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
247
248	if (th->cwr)
249		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
250
251	return 0;
252}
253EXPORT_SYMBOL(tcp_gro_complete);
254
255static int tcp_v4_gso_send_check(struct sk_buff *skb)
256{
257	const struct iphdr *iph;
258	struct tcphdr *th;
259
260	if (!pskb_may_pull(skb, sizeof(*th)))
261		return -EINVAL;
262
263	iph = ip_hdr(skb);
264	th = tcp_hdr(skb);
265
266	th->check = 0;
267	skb->ip_summed = CHECKSUM_PARTIAL;
268	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
269	return 0;
270}
271
272static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
273{
274	/* Use the IP hdr immediately proceeding for this transport */
275	const struct iphdr *iph = skb_gro_network_header(skb);
276	__wsum wsum;
277
278	/* Don't bother verifying checksum if we're going to flush anyway. */
279	if (NAPI_GRO_CB(skb)->flush)
280		goto skip_csum;
281
282	wsum = NAPI_GRO_CB(skb)->csum;
283
284	switch (skb->ip_summed) {
285	case CHECKSUM_NONE:
286		wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
287				    0);
288
289		/* fall through */
290
291	case CHECKSUM_COMPLETE:
292		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
293				  wsum)) {
294			skb->ip_summed = CHECKSUM_UNNECESSARY;
295			break;
296		}
297
298		NAPI_GRO_CB(skb)->flush = 1;
299		return NULL;
300	}
301
302skip_csum:
303	return tcp_gro_receive(head, skb);
304}
305
306static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
307{
308	const struct iphdr *iph = ip_hdr(skb);
309	struct tcphdr *th = tcp_hdr(skb);
310
311	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
312				  iph->daddr, 0);
313	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 
 
 
314
315	return tcp_gro_complete(skb);
316}
317
318static const struct net_offload tcpv4_offload = {
319	.callbacks = {
320		.gso_send_check	=	tcp_v4_gso_send_check,
321		.gso_segment	=	tcp_gso_segment,
322		.gro_receive	=	tcp4_gro_receive,
323		.gro_complete	=	tcp4_gro_complete,
324	},
325};
326
327int __init tcpv4_offload_init(void)
328{
329	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
330}
v4.17
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	TCPv4 GSO/GRO support
 11 */
 12
 13#include <linux/skbuff.h>
 14#include <net/tcp.h>
 15#include <net/protocol.h>
 16
 17static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 18			   unsigned int seq, unsigned int mss)
 19{
 20	while (skb) {
 21		if (before(ts_seq, seq + mss)) {
 22			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 23			skb_shinfo(skb)->tskey = ts_seq;
 24			return;
 25		}
 26
 27		skb = skb->next;
 28		seq += mss;
 29	}
 30}
 31
 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 33					netdev_features_t features)
 34{
 35	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 36		return ERR_PTR(-EINVAL);
 37
 38	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 39		return ERR_PTR(-EINVAL);
 40
 41	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 42		const struct iphdr *iph = ip_hdr(skb);
 43		struct tcphdr *th = tcp_hdr(skb);
 44
 45		/* Set up checksum pseudo header, usually expect stack to
 46		 * have done this already.
 47		 */
 48
 49		th->check = 0;
 50		skb->ip_summed = CHECKSUM_PARTIAL;
 51		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 52	}
 53
 54	return tcp_gso_segment(skb, features);
 55}
 56
 57struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 58				netdev_features_t features)
 59{
 60	struct sk_buff *segs = ERR_PTR(-EINVAL);
 61	unsigned int sum_truesize = 0;
 62	struct tcphdr *th;
 63	unsigned int thlen;
 64	unsigned int seq;
 65	__be32 delta;
 66	unsigned int oldlen;
 67	unsigned int mss;
 68	struct sk_buff *gso_skb = skb;
 69	__sum16 newcheck;
 70	bool ooo_okay, copy_destructor;
 71
 
 
 
 72	th = tcp_hdr(skb);
 73	thlen = th->doff * 4;
 74	if (thlen < sizeof(*th))
 75		goto out;
 76
 77	if (!pskb_may_pull(skb, thlen))
 78		goto out;
 79
 80	oldlen = (u16)~skb->len;
 81	__skb_pull(skb, thlen);
 82
 83	mss = skb_shinfo(skb)->gso_size;
 84	if (unlikely(skb->len <= mss))
 85		goto out;
 86
 87	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 88		/* Packet is from an untrusted source, reset gso_segs. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89
 90		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 91
 92		segs = NULL;
 93		goto out;
 94	}
 95
 96	copy_destructor = gso_skb->destructor == tcp_wfree;
 97	ooo_okay = gso_skb->ooo_okay;
 98	/* All segments but the first should have ooo_okay cleared */
 99	skb->ooo_okay = 0;
100
101	segs = skb_segment(skb, features);
102	if (IS_ERR(segs))
103		goto out;
104
105	/* Only first segment might have ooo_okay set */
106	segs->ooo_okay = ooo_okay;
107
108	/* GSO partial and frag_list segmentation only requires splitting
109	 * the frame into an MSS multiple and possibly a remainder, both
110	 * cases return a GSO skb. So update the mss now.
111	 */
112	if (skb_is_gso(segs))
113		mss *= skb_shinfo(segs)->gso_segs;
114
115	delta = htonl(oldlen + (thlen + mss));
116
117	skb = segs;
118	th = tcp_hdr(skb);
119	seq = ntohl(th->seq);
120
121	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
122		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123
124	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
125					       (__force u32)delta));
126
127	while (skb->next) {
128		th->fin = th->psh = 0;
129		th->check = newcheck;
130
131		if (skb->ip_summed == CHECKSUM_PARTIAL)
132			gso_reset_checksum(skb, ~th->check);
133		else
134			th->check = gso_make_checksum(skb, ~th->check);
135
136		seq += mss;
137		if (copy_destructor) {
138			skb->destructor = gso_skb->destructor;
139			skb->sk = gso_skb->sk;
140			sum_truesize += skb->truesize;
141		}
142		skb = skb->next;
143		th = tcp_hdr(skb);
144
145		th->seq = htonl(seq);
146		th->cwr = 0;
147	}
148
149	/* Following permits TCP Small Queues to work well with GSO :
150	 * The callback to TCP stack will be called at the time last frag
151	 * is freed at TX completion, and not right now when gso_skb
152	 * is freed by GSO engine
153	 */
154	if (copy_destructor) {
155		int delta;
156
157		swap(gso_skb->sk, skb->sk);
158		swap(gso_skb->destructor, skb->destructor);
159		sum_truesize += skb->truesize;
160		delta = sum_truesize - gso_skb->truesize;
161		/* In some pathological cases, delta can be negative.
162		 * We need to either use refcount_add() or refcount_sub_and_test()
163		 */
164		if (likely(delta >= 0))
165			refcount_add(delta, &skb->sk->sk_wmem_alloc);
166		else
167			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
168	}
169
170	delta = htonl(oldlen + (skb_tail_pointer(skb) -
171				skb_transport_header(skb)) +
172		      skb->data_len);
173	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
174				(__force u32)delta));
175	if (skb->ip_summed == CHECKSUM_PARTIAL)
176		gso_reset_checksum(skb, ~th->check);
177	else
178		th->check = gso_make_checksum(skb, ~th->check);
179out:
180	return segs;
181}
182
183struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
184{
185	struct sk_buff **pp = NULL;
186	struct sk_buff *p;
187	struct tcphdr *th;
188	struct tcphdr *th2;
189	unsigned int len;
190	unsigned int thlen;
191	__be32 flags;
192	unsigned int mss = 1;
193	unsigned int hlen;
194	unsigned int off;
195	int flush = 1;
196	int i;
197
198	off = skb_gro_offset(skb);
199	hlen = off + sizeof(*th);
200	th = skb_gro_header_fast(skb, off);
201	if (skb_gro_header_hard(skb, hlen)) {
202		th = skb_gro_header_slow(skb, hlen, off);
203		if (unlikely(!th))
204			goto out;
205	}
206
207	thlen = th->doff * 4;
208	if (thlen < sizeof(*th))
209		goto out;
210
211	hlen = off + thlen;
212	if (skb_gro_header_hard(skb, hlen)) {
213		th = skb_gro_header_slow(skb, hlen, off);
214		if (unlikely(!th))
215			goto out;
216	}
217
218	skb_gro_pull(skb, thlen);
219
220	len = skb_gro_len(skb);
221	flags = tcp_flag_word(th);
222
223	for (; (p = *head); head = &p->next) {
224		if (!NAPI_GRO_CB(p)->same_flow)
225			continue;
226
227		th2 = tcp_hdr(p);
228
229		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
230			NAPI_GRO_CB(p)->same_flow = 0;
231			continue;
232		}
233
234		goto found;
235	}
236
237	goto out_check_final;
238
239found:
240	/* Include the IP ID check below from the inner most IP hdr */
241	flush = NAPI_GRO_CB(p)->flush;
242	flush |= (__force int)(flags & TCP_FLAG_CWR);
243	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
244		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
245	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
246	for (i = sizeof(*th); i < thlen; i += 4)
247		flush |= *(u32 *)((u8 *)th + i) ^
248			 *(u32 *)((u8 *)th2 + i);
249
250	/* When we receive our second frame we can made a decision on if we
251	 * continue this flow as an atomic flow with a fixed ID or if we use
252	 * an incrementing ID.
253	 */
254	if (NAPI_GRO_CB(p)->flush_id != 1 ||
255	    NAPI_GRO_CB(p)->count != 1 ||
256	    !NAPI_GRO_CB(p)->is_atomic)
257		flush |= NAPI_GRO_CB(p)->flush_id;
258	else
259		NAPI_GRO_CB(p)->is_atomic = false;
260
261	mss = skb_shinfo(p)->gso_size;
262
263	flush |= (len - 1) >= mss;
264	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
265
266	if (flush || skb_gro_receive(head, skb)) {
267		mss = 1;
268		goto out_check_final;
269	}
270
271	p = *head;
272	th2 = tcp_hdr(p);
273	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
274
275out_check_final:
276	flush = len < mss;
277	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
278					TCP_FLAG_RST | TCP_FLAG_SYN |
279					TCP_FLAG_FIN));
280
281	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
282		pp = head;
283
284out:
285	NAPI_GRO_CB(skb)->flush |= (flush != 0);
286
287	return pp;
288}
289
290int tcp_gro_complete(struct sk_buff *skb)
291{
292	struct tcphdr *th = tcp_hdr(skb);
293
294	skb->csum_start = (unsigned char *)th - skb->head;
295	skb->csum_offset = offsetof(struct tcphdr, check);
296	skb->ip_summed = CHECKSUM_PARTIAL;
297
298	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
299
300	if (th->cwr)
301		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
302
303	return 0;
304}
305EXPORT_SYMBOL(tcp_gro_complete);
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
308{
 
 
 
 
309	/* Don't bother verifying checksum if we're going to flush anyway. */
310	if (!NAPI_GRO_CB(skb)->flush &&
311	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
312				      inet_gro_compute_pseudo)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313		NAPI_GRO_CB(skb)->flush = 1;
314		return NULL;
315	}
316
 
317	return tcp_gro_receive(head, skb);
318}
319
320static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
321{
322	const struct iphdr *iph = ip_hdr(skb);
323	struct tcphdr *th = tcp_hdr(skb);
324
325	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
326				  iph->daddr, 0);
327	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
328
329	if (NAPI_GRO_CB(skb)->is_atomic)
330		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
331
332	return tcp_gro_complete(skb);
333}
334
335static const struct net_offload tcpv4_offload = {
336	.callbacks = {
337		.gso_segment	=	tcp4_gso_segment,
 
338		.gro_receive	=	tcp4_gro_receive,
339		.gro_complete	=	tcp4_gro_complete,
340	},
341};
342
343int __init tcpv4_offload_init(void)
344{
345	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
346}