Loading...
1/*
2 * net/dccp/output.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/dccp.h>
14#include <linux/kernel.h>
15#include <linux/skbuff.h>
16#include <linux/slab.h>
17
18#include <net/inet_sock.h>
19#include <net/sock.h>
20
21#include "ackvec.h"
22#include "ccid.h"
23#include "dccp.h"
24
25static inline void dccp_event_ack_sent(struct sock *sk)
26{
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28}
29
30/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
31static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
32{
33 skb_set_owner_w(skb, sk);
34 WARN_ON(sk->sk_send_head);
35 sk->sk_send_head = skb;
36 return skb_clone(sk->sk_send_head, gfp_any());
37}
38
39/*
40 * All SKB's seen here are completely headerless. It is our
41 * job to build the DCCP header, and pass the packet down to
42 * IP so it can do the same plus pass the packet off to the
43 * device.
44 */
45static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
46{
47 if (likely(skb != NULL)) {
48 struct inet_sock *inet = inet_sk(sk);
49 const struct inet_connection_sock *icsk = inet_csk(sk);
50 struct dccp_sock *dp = dccp_sk(sk);
51 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
52 struct dccp_hdr *dh;
53 /* XXX For now we're using only 48 bits sequence numbers */
54 const u32 dccp_header_size = sizeof(*dh) +
55 sizeof(struct dccp_hdr_ext) +
56 dccp_packet_hdr_len(dcb->dccpd_type);
57 int err, set_ack = 1;
58 u64 ackno = dp->dccps_gsr;
59 /*
60 * Increment GSS here already in case the option code needs it.
61 * Update GSS for real only if option processing below succeeds.
62 */
63 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
64
65 switch (dcb->dccpd_type) {
66 case DCCP_PKT_DATA:
67 set_ack = 0;
68 /* fall through */
69 case DCCP_PKT_DATAACK:
70 case DCCP_PKT_RESET:
71 break;
72
73 case DCCP_PKT_REQUEST:
74 set_ack = 0;
75 /* Use ISS on the first (non-retransmitted) Request. */
76 if (icsk->icsk_retransmits == 0)
77 dcb->dccpd_seq = dp->dccps_iss;
78 /* fall through */
79
80 case DCCP_PKT_SYNC:
81 case DCCP_PKT_SYNCACK:
82 ackno = dcb->dccpd_ack_seq;
83 /* fall through */
84 default:
85 /*
86 * Set owner/destructor: some skbs are allocated via
87 * alloc_skb (e.g. when retransmission may happen).
88 * Only Data, DataAck, and Reset packets should come
89 * through here with skb->sk set.
90 */
91 WARN_ON(skb->sk);
92 skb_set_owner_w(skb, sk);
93 break;
94 }
95
96 if (dccp_insert_options(sk, skb)) {
97 kfree_skb(skb);
98 return -EPROTO;
99 }
100
101
102 /* Build DCCP header and checksum it. */
103 dh = dccp_zeroed_hdr(skb, dccp_header_size);
104 dh->dccph_type = dcb->dccpd_type;
105 dh->dccph_sport = inet->inet_sport;
106 dh->dccph_dport = inet->inet_dport;
107 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
108 dh->dccph_ccval = dcb->dccpd_ccval;
109 dh->dccph_cscov = dp->dccps_pcslen;
110 /* XXX For now we're using only 48 bits sequence numbers */
111 dh->dccph_x = 1;
112
113 dccp_update_gss(sk, dcb->dccpd_seq);
114 dccp_hdr_set_seq(dh, dp->dccps_gss);
115 if (set_ack)
116 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
117
118 switch (dcb->dccpd_type) {
119 case DCCP_PKT_REQUEST:
120 dccp_hdr_request(skb)->dccph_req_service =
121 dp->dccps_service;
122 /*
123 * Limit Ack window to ISS <= P.ackno <= GSS, so that
124 * only Responses to Requests we sent are considered.
125 */
126 dp->dccps_awl = dp->dccps_iss;
127 break;
128 case DCCP_PKT_RESET:
129 dccp_hdr_reset(skb)->dccph_reset_code =
130 dcb->dccpd_reset_code;
131 break;
132 }
133
134 icsk->icsk_af_ops->send_check(sk, skb);
135
136 if (set_ack)
137 dccp_event_ack_sent(sk);
138
139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
140
141 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
142 return net_xmit_eval(err);
143 }
144 return -ENOBUFS;
145}
146
147/**
148 * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
149 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
150 * since the RX CCID is restricted to feedback packets (Acks), which are small
151 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
152 */
153static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
154{
155 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
156
157 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
158 return 0;
159 return tx_ccid->ccid_ops->ccid_ccmps;
160}
161
162unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
163{
164 struct inet_connection_sock *icsk = inet_csk(sk);
165 struct dccp_sock *dp = dccp_sk(sk);
166 u32 ccmps = dccp_determine_ccmps(dp);
167 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
168
169 /* Account for header lengths and IPv4/v6 option overhead */
170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
171 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
172
173 /*
174 * Leave enough headroom for common DCCP header options.
175 * This only considers options which may appear on DCCP-Data packets, as
176 * per table 3 in RFC 4340, 5.8. When running out of space for other
177 * options (eg. Ack Vector which can take up to 255 bytes), it is better
178 * to schedule a separate Ack. Thus we leave headroom for the following:
179 * - 1 byte for Slow Receiver (11.6)
180 * - 6 bytes for Timestamp (13.1)
181 * - 10 bytes for Timestamp Echo (13.3)
182 * - 8 bytes for NDP count (7.7, when activated)
183 * - 6 bytes for Data Checksum (9.3)
184 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
185 */
186 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
187 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
188
189 /* And store cached results */
190 icsk->icsk_pmtu_cookie = pmtu;
191 dp->dccps_mss_cache = cur_mps;
192
193 return cur_mps;
194}
195
196EXPORT_SYMBOL_GPL(dccp_sync_mss);
197
198void dccp_write_space(struct sock *sk)
199{
200 struct socket_wq *wq;
201
202 rcu_read_lock();
203 wq = rcu_dereference(sk->sk_wq);
204 if (wq_has_sleeper(wq))
205 wake_up_interruptible(&wq->wait);
206 /* Should agree with poll, otherwise some programs break */
207 if (sock_writeable(sk))
208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
209
210 rcu_read_unlock();
211}
212
213/**
214 * dccp_wait_for_ccid - Await CCID send permission
215 * @sk: socket to wait for
216 * @delay: timeout in jiffies
217 * This is used by CCIDs which need to delay the send time in process context.
218 */
219static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
220{
221 DEFINE_WAIT(wait);
222 long remaining;
223
224 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
225 sk->sk_write_pending++;
226 release_sock(sk);
227
228 remaining = schedule_timeout(delay);
229
230 lock_sock(sk);
231 sk->sk_write_pending--;
232 finish_wait(sk_sleep(sk), &wait);
233
234 if (signal_pending(current) || sk->sk_err)
235 return -1;
236 return remaining;
237}
238
239/**
240 * dccp_xmit_packet - Send data packet under control of CCID
241 * Transmits next-queued payload and informs CCID to account for the packet.
242 */
243static void dccp_xmit_packet(struct sock *sk)
244{
245 int err, len;
246 struct dccp_sock *dp = dccp_sk(sk);
247 struct sk_buff *skb = dccp_qpolicy_pop(sk);
248
249 if (unlikely(skb == NULL))
250 return;
251 len = skb->len;
252
253 if (sk->sk_state == DCCP_PARTOPEN) {
254 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
255 /*
256 * See 8.1.5 - Handshake Completion.
257 *
258 * For robustness we resend Confirm options until the client has
259 * entered OPEN. During the initial feature negotiation, the MPS
260 * is smaller than usual, reduced by the Change/Confirm options.
261 */
262 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
263 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
264 dccp_send_ack(sk);
265 dccp_feat_list_purge(&dp->dccps_featneg);
266 }
267
268 inet_csk_schedule_ack(sk);
269 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
270 inet_csk(sk)->icsk_rto,
271 DCCP_RTO_MAX);
272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 } else if (dccp_ack_pending(sk)) {
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
275 } else {
276 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
277 }
278
279 err = dccp_transmit_skb(sk, skb);
280 if (err)
281 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
282 /*
283 * Register this one as sent even if an error occurred. To the remote
284 * end a local packet drop is indistinguishable from network loss, i.e.
285 * any local drop will eventually be reported via receiver feedback.
286 */
287 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
288
289 /*
290 * If the CCID needs to transfer additional header options out-of-band
291 * (e.g. Ack Vectors or feature-negotiation options), it activates this
292 * flag to schedule a Sync. The Sync will automatically incorporate all
293 * currently pending header options, thus clearing the backlog.
294 */
295 if (dp->dccps_sync_scheduled)
296 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
297}
298
299/**
300 * dccp_flush_write_queue - Drain queue at end of connection
301 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
302 * happen that the TX queue is not empty at the end of a connection. We give the
303 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
304 * returns with a non-empty write queue, it will be purged later.
305 */
306void dccp_flush_write_queue(struct sock *sk, long *time_budget)
307{
308 struct dccp_sock *dp = dccp_sk(sk);
309 struct sk_buff *skb;
310 long delay, rc;
311
312 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
313 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
314
315 switch (ccid_packet_dequeue_eval(rc)) {
316 case CCID_PACKET_WILL_DEQUEUE_LATER:
317 /*
318 * If the CCID determines when to send, the next sending
319 * time is unknown or the CCID may not even send again
320 * (e.g. remote host crashes or lost Ack packets).
321 */
322 DCCP_WARN("CCID did not manage to send all packets\n");
323 return;
324 case CCID_PACKET_DELAY:
325 delay = msecs_to_jiffies(rc);
326 if (delay > *time_budget)
327 return;
328 rc = dccp_wait_for_ccid(sk, delay);
329 if (rc < 0)
330 return;
331 *time_budget -= (delay - rc);
332 /* check again if we can send now */
333 break;
334 case CCID_PACKET_SEND_AT_ONCE:
335 dccp_xmit_packet(sk);
336 break;
337 case CCID_PACKET_ERR:
338 skb_dequeue(&sk->sk_write_queue);
339 kfree_skb(skb);
340 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
341 }
342 }
343}
344
345void dccp_write_xmit(struct sock *sk)
346{
347 struct dccp_sock *dp = dccp_sk(sk);
348 struct sk_buff *skb;
349
350 while ((skb = dccp_qpolicy_top(sk))) {
351 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
352
353 switch (ccid_packet_dequeue_eval(rc)) {
354 case CCID_PACKET_WILL_DEQUEUE_LATER:
355 return;
356 case CCID_PACKET_DELAY:
357 sk_reset_timer(sk, &dp->dccps_xmit_timer,
358 jiffies + msecs_to_jiffies(rc));
359 return;
360 case CCID_PACKET_SEND_AT_ONCE:
361 dccp_xmit_packet(sk);
362 break;
363 case CCID_PACKET_ERR:
364 dccp_qpolicy_drop(sk, skb);
365 dccp_pr_debug("packet discarded due to err=%d\n", rc);
366 }
367 }
368}
369
370/**
371 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
372 * There are only four retransmittable packet types in DCCP:
373 * - Request in client-REQUEST state (sec. 8.1.1),
374 * - CloseReq in server-CLOSEREQ state (sec. 8.3),
375 * - Close in node-CLOSING state (sec. 8.3),
376 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
377 * This function expects sk->sk_send_head to contain the original skb.
378 */
379int dccp_retransmit_skb(struct sock *sk)
380{
381 WARN_ON(sk->sk_send_head == NULL);
382
383 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
384 return -EHOSTUNREACH; /* Routing failure or similar. */
385
386 /* this count is used to distinguish original and retransmitted skb */
387 inet_csk(sk)->icsk_retransmits++;
388
389 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
390}
391
392struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
393 struct request_sock *req)
394{
395 struct dccp_hdr *dh;
396 struct dccp_request_sock *dreq;
397 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
398 sizeof(struct dccp_hdr_ext) +
399 sizeof(struct dccp_hdr_response);
400 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
401 GFP_ATOMIC);
402 if (skb == NULL)
403 return NULL;
404
405 /* Reserve space for headers. */
406 skb_reserve(skb, sk->sk_prot->max_header);
407
408 skb_dst_set(skb, dst_clone(dst));
409
410 dreq = dccp_rsk(req);
411 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
412 dccp_inc_seqno(&dreq->dreq_iss);
413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
415
416 /* Resolve feature dependencies resulting from choice of CCID */
417 if (dccp_feat_server_ccid_dependencies(dreq))
418 goto response_failed;
419
420 if (dccp_insert_options_rsk(dreq, skb))
421 goto response_failed;
422
423 /* Build and checksum header */
424 dh = dccp_zeroed_hdr(skb, dccp_header_size);
425
426 dh->dccph_sport = inet_rsk(req)->loc_port;
427 dh->dccph_dport = inet_rsk(req)->rmt_port;
428 dh->dccph_doff = (dccp_header_size +
429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
430 dh->dccph_type = DCCP_PKT_RESPONSE;
431 dh->dccph_x = 1;
432 dccp_hdr_set_seq(dh, dreq->dreq_iss);
433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
435
436 dccp_csum_outgoing(skb);
437
438 /* We use `acked' to remember that a Response was already sent. */
439 inet_rsk(req)->acked = 1;
440 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
441 return skb;
442response_failed:
443 kfree_skb(skb);
444 return NULL;
445}
446
447EXPORT_SYMBOL_GPL(dccp_make_response);
448
449/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
450struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
451{
452 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
453 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
454 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
455 sizeof(struct dccp_hdr_ext) +
456 sizeof(struct dccp_hdr_reset);
457 struct dccp_hdr_reset *dhr;
458 struct sk_buff *skb;
459
460 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
461 if (skb == NULL)
462 return NULL;
463
464 skb_reserve(skb, sk->sk_prot->max_header);
465
466 /* Swap the send and the receive. */
467 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
468 dh->dccph_type = DCCP_PKT_RESET;
469 dh->dccph_sport = rxdh->dccph_dport;
470 dh->dccph_dport = rxdh->dccph_sport;
471 dh->dccph_doff = dccp_hdr_reset_len / 4;
472 dh->dccph_x = 1;
473
474 dhr = dccp_hdr_reset(skb);
475 dhr->dccph_reset_code = dcb->dccpd_reset_code;
476
477 switch (dcb->dccpd_reset_code) {
478 case DCCP_RESET_CODE_PACKET_ERROR:
479 dhr->dccph_reset_data[0] = rxdh->dccph_type;
480 break;
481 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
482 case DCCP_RESET_CODE_MANDATORY_ERROR:
483 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
484 break;
485 }
486 /*
487 * From RFC 4340, 8.3.1:
488 * If P.ackno exists, set R.seqno := P.ackno + 1.
489 * Else set R.seqno := 0.
490 */
491 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
492 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
493 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
494
495 dccp_csum_outgoing(skb);
496 return skb;
497}
498
499EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
500
501/* send Reset on established socket, to close or abort the connection */
502int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
503{
504 struct sk_buff *skb;
505 /*
506 * FIXME: what if rebuild_header fails?
507 * Should we be doing a rebuild_header here?
508 */
509 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
510
511 if (err != 0)
512 return err;
513
514 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
515 if (skb == NULL)
516 return -ENOBUFS;
517
518 /* Reserve space for headers and prepare control bits. */
519 skb_reserve(skb, sk->sk_prot->max_header);
520 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
521 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
522
523 return dccp_transmit_skb(sk, skb);
524}
525
526/*
527 * Do all connect socket setups that can be done AF independent.
528 */
529int dccp_connect(struct sock *sk)
530{
531 struct sk_buff *skb;
532 struct dccp_sock *dp = dccp_sk(sk);
533 struct dst_entry *dst = __sk_dst_get(sk);
534 struct inet_connection_sock *icsk = inet_csk(sk);
535
536 sk->sk_err = 0;
537 sock_reset_flag(sk, SOCK_DONE);
538
539 dccp_sync_mss(sk, dst_mtu(dst));
540
541 /* do not connect if feature negotiation setup fails */
542 if (dccp_feat_finalise_settings(dccp_sk(sk)))
543 return -EPROTO;
544
545 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
546 dp->dccps_gar = dp->dccps_iss;
547
548 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
549 if (unlikely(skb == NULL))
550 return -ENOBUFS;
551
552 /* Reserve space for headers. */
553 skb_reserve(skb, sk->sk_prot->max_header);
554
555 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
556
557 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
558 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
559
560 /* Timer for repeating the REQUEST until an answer. */
561 icsk->icsk_retransmits = 0;
562 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
563 icsk->icsk_rto, DCCP_RTO_MAX);
564 return 0;
565}
566
567EXPORT_SYMBOL_GPL(dccp_connect);
568
569void dccp_send_ack(struct sock *sk)
570{
571 /* If we have been reset, we may not send again. */
572 if (sk->sk_state != DCCP_CLOSED) {
573 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
574 GFP_ATOMIC);
575
576 if (skb == NULL) {
577 inet_csk_schedule_ack(sk);
578 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
579 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
580 TCP_DELACK_MAX,
581 DCCP_RTO_MAX);
582 return;
583 }
584
585 /* Reserve space for headers */
586 skb_reserve(skb, sk->sk_prot->max_header);
587 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
588 dccp_transmit_skb(sk, skb);
589 }
590}
591
592EXPORT_SYMBOL_GPL(dccp_send_ack);
593
594#if 0
595/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
596void dccp_send_delayed_ack(struct sock *sk)
597{
598 struct inet_connection_sock *icsk = inet_csk(sk);
599 /*
600 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
601 * with using 2s, and active senders also piggyback the ACK into a
602 * DATAACK packet, so this is really for quiescent senders.
603 */
604 unsigned long timeout = jiffies + 2 * HZ;
605
606 /* Use new timeout only if there wasn't a older one earlier. */
607 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
608 /* If delack timer was blocked or is about to expire,
609 * send ACK now.
610 *
611 * FIXME: check the "about to expire" part
612 */
613 if (icsk->icsk_ack.blocked) {
614 dccp_send_ack(sk);
615 return;
616 }
617
618 if (!time_before(timeout, icsk->icsk_ack.timeout))
619 timeout = icsk->icsk_ack.timeout;
620 }
621 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
622 icsk->icsk_ack.timeout = timeout;
623 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
624}
625#endif
626
627void dccp_send_sync(struct sock *sk, const u64 ackno,
628 const enum dccp_pkt_type pkt_type)
629{
630 /*
631 * We are not putting this on the write queue, so
632 * dccp_transmit_skb() will set the ownership to this
633 * sock.
634 */
635 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
636
637 if (skb == NULL) {
638 /* FIXME: how to make sure the sync is sent? */
639 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
640 return;
641 }
642
643 /* Reserve space for headers and prepare control bits. */
644 skb_reserve(skb, sk->sk_prot->max_header);
645 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
646 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
647
648 /*
649 * Clear the flag in case the Sync was scheduled for out-of-band data,
650 * such as carrying a long Ack Vector.
651 */
652 dccp_sk(sk)->dccps_sync_scheduled = 0;
653
654 dccp_transmit_skb(sk, skb);
655}
656
657EXPORT_SYMBOL_GPL(dccp_send_sync);
658
659/*
660 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
661 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
662 * any circumstances.
663 */
664void dccp_send_close(struct sock *sk, const int active)
665{
666 struct dccp_sock *dp = dccp_sk(sk);
667 struct sk_buff *skb;
668 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
669
670 skb = alloc_skb(sk->sk_prot->max_header, prio);
671 if (skb == NULL)
672 return;
673
674 /* Reserve space for headers and prepare control bits. */
675 skb_reserve(skb, sk->sk_prot->max_header);
676 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
677 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
678 else
679 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
680
681 if (active) {
682 skb = dccp_skb_entail(sk, skb);
683 /*
684 * Retransmission timer for active-close: RFC 4340, 8.3 requires
685 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
686 * state can be left. The initial timeout is 2 RTTs.
687 * Since RTT measurement is done by the CCIDs, there is no easy
688 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
689 * is too low (200ms); we use a high value to avoid unnecessary
690 * retransmissions when the link RTT is > 0.2 seconds.
691 * FIXME: Let main module sample RTTs and use that instead.
692 */
693 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
694 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
695 }
696 dccp_transmit_skb(sk, skb);
697}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/dccp/output.c
4 *
5 * An implementation of the DCCP protocol
6 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 */
8
9#include <linux/dccp.h>
10#include <linux/kernel.h>
11#include <linux/skbuff.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14
15#include <net/inet_sock.h>
16#include <net/sock.h>
17
18#include "ackvec.h"
19#include "ccid.h"
20#include "dccp.h"
21
22static inline void dccp_event_ack_sent(struct sock *sk)
23{
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25}
26
27/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
28static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
29{
30 skb_set_owner_w(skb, sk);
31 WARN_ON(sk->sk_send_head);
32 sk->sk_send_head = skb;
33 return skb_clone(sk->sk_send_head, gfp_any());
34}
35
36/*
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
40 * device.
41 */
42static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
43{
44 if (likely(skb != NULL)) {
45 struct inet_sock *inet = inet_sk(sk);
46 const struct inet_connection_sock *icsk = inet_csk(sk);
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
49 struct dccp_hdr *dh;
50 /* XXX For now we're using only 48 bits sequence numbers */
51 const u32 dccp_header_size = sizeof(*dh) +
52 sizeof(struct dccp_hdr_ext) +
53 dccp_packet_hdr_len(dcb->dccpd_type);
54 int err, set_ack = 1;
55 u64 ackno = dp->dccps_gsr;
56 /*
57 * Increment GSS here already in case the option code needs it.
58 * Update GSS for real only if option processing below succeeds.
59 */
60 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
61
62 switch (dcb->dccpd_type) {
63 case DCCP_PKT_DATA:
64 set_ack = 0;
65 fallthrough;
66 case DCCP_PKT_DATAACK:
67 case DCCP_PKT_RESET:
68 break;
69
70 case DCCP_PKT_REQUEST:
71 set_ack = 0;
72 /* Use ISS on the first (non-retransmitted) Request. */
73 if (icsk->icsk_retransmits == 0)
74 dcb->dccpd_seq = dp->dccps_iss;
75 fallthrough;
76
77 case DCCP_PKT_SYNC:
78 case DCCP_PKT_SYNCACK:
79 ackno = dcb->dccpd_ack_seq;
80 fallthrough;
81 default:
82 /*
83 * Set owner/destructor: some skbs are allocated via
84 * alloc_skb (e.g. when retransmission may happen).
85 * Only Data, DataAck, and Reset packets should come
86 * through here with skb->sk set.
87 */
88 WARN_ON(skb->sk);
89 skb_set_owner_w(skb, sk);
90 break;
91 }
92
93 if (dccp_insert_options(sk, skb)) {
94 kfree_skb(skb);
95 return -EPROTO;
96 }
97
98
99 /* Build DCCP header and checksum it. */
100 dh = dccp_zeroed_hdr(skb, dccp_header_size);
101 dh->dccph_type = dcb->dccpd_type;
102 dh->dccph_sport = inet->inet_sport;
103 dh->dccph_dport = inet->inet_dport;
104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105 dh->dccph_ccval = dcb->dccpd_ccval;
106 dh->dccph_cscov = dp->dccps_pcslen;
107 /* XXX For now we're using only 48 bits sequence numbers */
108 dh->dccph_x = 1;
109
110 dccp_update_gss(sk, dcb->dccpd_seq);
111 dccp_hdr_set_seq(dh, dp->dccps_gss);
112 if (set_ack)
113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
114
115 switch (dcb->dccpd_type) {
116 case DCCP_PKT_REQUEST:
117 dccp_hdr_request(skb)->dccph_req_service =
118 dp->dccps_service;
119 /*
120 * Limit Ack window to ISS <= P.ackno <= GSS, so that
121 * only Responses to Requests we sent are considered.
122 */
123 dp->dccps_awl = dp->dccps_iss;
124 break;
125 case DCCP_PKT_RESET:
126 dccp_hdr_reset(skb)->dccph_reset_code =
127 dcb->dccpd_reset_code;
128 break;
129 }
130
131 icsk->icsk_af_ops->send_check(sk, skb);
132
133 if (set_ack)
134 dccp_event_ack_sent(sk);
135
136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
137
138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
139 return net_xmit_eval(err);
140 }
141 return -ENOBUFS;
142}
143
144/**
145 * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
146 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
147 * since the RX CCID is restricted to feedback packets (Acks), which are small
148 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
149 */
150static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
151{
152 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
153
154 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
155 return 0;
156 return tx_ccid->ccid_ops->ccid_ccmps;
157}
158
159unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
160{
161 struct inet_connection_sock *icsk = inet_csk(sk);
162 struct dccp_sock *dp = dccp_sk(sk);
163 u32 ccmps = dccp_determine_ccmps(dp);
164 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
165
166 /* Account for header lengths and IPv4/v6 option overhead */
167 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
168 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
169
170 /*
171 * Leave enough headroom for common DCCP header options.
172 * This only considers options which may appear on DCCP-Data packets, as
173 * per table 3 in RFC 4340, 5.8. When running out of space for other
174 * options (eg. Ack Vector which can take up to 255 bytes), it is better
175 * to schedule a separate Ack. Thus we leave headroom for the following:
176 * - 1 byte for Slow Receiver (11.6)
177 * - 6 bytes for Timestamp (13.1)
178 * - 10 bytes for Timestamp Echo (13.3)
179 * - 8 bytes for NDP count (7.7, when activated)
180 * - 6 bytes for Data Checksum (9.3)
181 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
182 */
183 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
184 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
185
186 /* And store cached results */
187 icsk->icsk_pmtu_cookie = pmtu;
188 dp->dccps_mss_cache = cur_mps;
189
190 return cur_mps;
191}
192
193EXPORT_SYMBOL_GPL(dccp_sync_mss);
194
195void dccp_write_space(struct sock *sk)
196{
197 struct socket_wq *wq;
198
199 rcu_read_lock();
200 wq = rcu_dereference(sk->sk_wq);
201 if (skwq_has_sleeper(wq))
202 wake_up_interruptible(&wq->wait);
203 /* Should agree with poll, otherwise some programs break */
204 if (sock_writeable(sk))
205 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
206
207 rcu_read_unlock();
208}
209
210/**
211 * dccp_wait_for_ccid - Await CCID send permission
212 * @sk: socket to wait for
213 * @delay: timeout in jiffies
214 *
215 * This is used by CCIDs which need to delay the send time in process context.
216 */
217static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
218{
219 DEFINE_WAIT(wait);
220 long remaining;
221
222 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
223 sk->sk_write_pending++;
224 release_sock(sk);
225
226 remaining = schedule_timeout(delay);
227
228 lock_sock(sk);
229 sk->sk_write_pending--;
230 finish_wait(sk_sleep(sk), &wait);
231
232 if (signal_pending(current) || sk->sk_err)
233 return -1;
234 return remaining;
235}
236
237/**
238 * dccp_xmit_packet - Send data packet under control of CCID
239 * Transmits next-queued payload and informs CCID to account for the packet.
240 */
241static void dccp_xmit_packet(struct sock *sk)
242{
243 int err, len;
244 struct dccp_sock *dp = dccp_sk(sk);
245 struct sk_buff *skb = dccp_qpolicy_pop(sk);
246
247 if (unlikely(skb == NULL))
248 return;
249 len = skb->len;
250
251 if (sk->sk_state == DCCP_PARTOPEN) {
252 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
253 /*
254 * See 8.1.5 - Handshake Completion.
255 *
256 * For robustness we resend Confirm options until the client has
257 * entered OPEN. During the initial feature negotiation, the MPS
258 * is smaller than usual, reduced by the Change/Confirm options.
259 */
260 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
261 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
262 dccp_send_ack(sk);
263 dccp_feat_list_purge(&dp->dccps_featneg);
264 }
265
266 inet_csk_schedule_ack(sk);
267 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
268 inet_csk(sk)->icsk_rto,
269 DCCP_RTO_MAX);
270 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271 } else if (dccp_ack_pending(sk)) {
272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 } else {
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
275 }
276
277 err = dccp_transmit_skb(sk, skb);
278 if (err)
279 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
280 /*
281 * Register this one as sent even if an error occurred. To the remote
282 * end a local packet drop is indistinguishable from network loss, i.e.
283 * any local drop will eventually be reported via receiver feedback.
284 */
285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286
287 /*
288 * If the CCID needs to transfer additional header options out-of-band
289 * (e.g. Ack Vectors or feature-negotiation options), it activates this
290 * flag to schedule a Sync. The Sync will automatically incorporate all
291 * currently pending header options, thus clearing the backlog.
292 */
293 if (dp->dccps_sync_scheduled)
294 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
295}
296
297/**
298 * dccp_flush_write_queue - Drain queue at end of connection
299 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
300 * happen that the TX queue is not empty at the end of a connection. We give the
301 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
302 * returns with a non-empty write queue, it will be purged later.
303 */
304void dccp_flush_write_queue(struct sock *sk, long *time_budget)
305{
306 struct dccp_sock *dp = dccp_sk(sk);
307 struct sk_buff *skb;
308 long delay, rc;
309
310 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
311 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
312
313 switch (ccid_packet_dequeue_eval(rc)) {
314 case CCID_PACKET_WILL_DEQUEUE_LATER:
315 /*
316 * If the CCID determines when to send, the next sending
317 * time is unknown or the CCID may not even send again
318 * (e.g. remote host crashes or lost Ack packets).
319 */
320 DCCP_WARN("CCID did not manage to send all packets\n");
321 return;
322 case CCID_PACKET_DELAY:
323 delay = msecs_to_jiffies(rc);
324 if (delay > *time_budget)
325 return;
326 rc = dccp_wait_for_ccid(sk, delay);
327 if (rc < 0)
328 return;
329 *time_budget -= (delay - rc);
330 /* check again if we can send now */
331 break;
332 case CCID_PACKET_SEND_AT_ONCE:
333 dccp_xmit_packet(sk);
334 break;
335 case CCID_PACKET_ERR:
336 skb_dequeue(&sk->sk_write_queue);
337 kfree_skb(skb);
338 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
339 }
340 }
341}
342
343void dccp_write_xmit(struct sock *sk)
344{
345 struct dccp_sock *dp = dccp_sk(sk);
346 struct sk_buff *skb;
347
348 while ((skb = dccp_qpolicy_top(sk))) {
349 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
350
351 switch (ccid_packet_dequeue_eval(rc)) {
352 case CCID_PACKET_WILL_DEQUEUE_LATER:
353 return;
354 case CCID_PACKET_DELAY:
355 sk_reset_timer(sk, &dp->dccps_xmit_timer,
356 jiffies + msecs_to_jiffies(rc));
357 return;
358 case CCID_PACKET_SEND_AT_ONCE:
359 dccp_xmit_packet(sk);
360 break;
361 case CCID_PACKET_ERR:
362 dccp_qpolicy_drop(sk, skb);
363 dccp_pr_debug("packet discarded due to err=%d\n", rc);
364 }
365 }
366}
367
368/**
369 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
370 * There are only four retransmittable packet types in DCCP:
371 * - Request in client-REQUEST state (sec. 8.1.1),
372 * - CloseReq in server-CLOSEREQ state (sec. 8.3),
373 * - Close in node-CLOSING state (sec. 8.3),
374 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
375 * This function expects sk->sk_send_head to contain the original skb.
376 */
377int dccp_retransmit_skb(struct sock *sk)
378{
379 WARN_ON(sk->sk_send_head == NULL);
380
381 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
382 return -EHOSTUNREACH; /* Routing failure or similar. */
383
384 /* this count is used to distinguish original and retransmitted skb */
385 inet_csk(sk)->icsk_retransmits++;
386
387 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
388}
389
390struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
391 struct request_sock *req)
392{
393 struct dccp_hdr *dh;
394 struct dccp_request_sock *dreq;
395 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
396 sizeof(struct dccp_hdr_ext) +
397 sizeof(struct dccp_hdr_response);
398 struct sk_buff *skb;
399
400 /* sk is marked const to clearly express we dont hold socket lock.
401 * sock_wmalloc() will atomically change sk->sk_wmem_alloc,
402 * it is safe to promote sk to non const.
403 */
404 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
405 GFP_ATOMIC);
406 if (!skb)
407 return NULL;
408
409 skb_reserve(skb, MAX_DCCP_HEADER);
410
411 skb_dst_set(skb, dst_clone(dst));
412
413 dreq = dccp_rsk(req);
414 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */
415 dccp_inc_seqno(&dreq->dreq_gss);
416 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
417 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
418
419 /* Resolve feature dependencies resulting from choice of CCID */
420 if (dccp_feat_server_ccid_dependencies(dreq))
421 goto response_failed;
422
423 if (dccp_insert_options_rsk(dreq, skb))
424 goto response_failed;
425
426 /* Build and checksum header */
427 dh = dccp_zeroed_hdr(skb, dccp_header_size);
428
429 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
430 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
431 dh->dccph_doff = (dccp_header_size +
432 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
433 dh->dccph_type = DCCP_PKT_RESPONSE;
434 dh->dccph_x = 1;
435 dccp_hdr_set_seq(dh, dreq->dreq_gss);
436 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
437 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
438
439 dccp_csum_outgoing(skb);
440
441 /* We use `acked' to remember that a Response was already sent. */
442 inet_rsk(req)->acked = 1;
443 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
444 return skb;
445response_failed:
446 kfree_skb(skb);
447 return NULL;
448}
449
450EXPORT_SYMBOL_GPL(dccp_make_response);
451
452/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
453struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
454{
455 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
456 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
457 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
458 sizeof(struct dccp_hdr_ext) +
459 sizeof(struct dccp_hdr_reset);
460 struct dccp_hdr_reset *dhr;
461 struct sk_buff *skb;
462
463 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
464 if (skb == NULL)
465 return NULL;
466
467 skb_reserve(skb, sk->sk_prot->max_header);
468
469 /* Swap the send and the receive. */
470 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
471 dh->dccph_type = DCCP_PKT_RESET;
472 dh->dccph_sport = rxdh->dccph_dport;
473 dh->dccph_dport = rxdh->dccph_sport;
474 dh->dccph_doff = dccp_hdr_reset_len / 4;
475 dh->dccph_x = 1;
476
477 dhr = dccp_hdr_reset(skb);
478 dhr->dccph_reset_code = dcb->dccpd_reset_code;
479
480 switch (dcb->dccpd_reset_code) {
481 case DCCP_RESET_CODE_PACKET_ERROR:
482 dhr->dccph_reset_data[0] = rxdh->dccph_type;
483 break;
484 case DCCP_RESET_CODE_OPTION_ERROR:
485 case DCCP_RESET_CODE_MANDATORY_ERROR:
486 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
487 break;
488 }
489 /*
490 * From RFC 4340, 8.3.1:
491 * If P.ackno exists, set R.seqno := P.ackno + 1.
492 * Else set R.seqno := 0.
493 */
494 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
495 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
496 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
497
498 dccp_csum_outgoing(skb);
499 return skb;
500}
501
502EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
503
504/* send Reset on established socket, to close or abort the connection */
505int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
506{
507 struct sk_buff *skb;
508 /*
509 * FIXME: what if rebuild_header fails?
510 * Should we be doing a rebuild_header here?
511 */
512 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
513
514 if (err != 0)
515 return err;
516
517 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
518 if (skb == NULL)
519 return -ENOBUFS;
520
521 /* Reserve space for headers and prepare control bits. */
522 skb_reserve(skb, sk->sk_prot->max_header);
523 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
524 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
525
526 return dccp_transmit_skb(sk, skb);
527}
528
529/*
530 * Do all connect socket setups that can be done AF independent.
531 */
532int dccp_connect(struct sock *sk)
533{
534 struct sk_buff *skb;
535 struct dccp_sock *dp = dccp_sk(sk);
536 struct dst_entry *dst = __sk_dst_get(sk);
537 struct inet_connection_sock *icsk = inet_csk(sk);
538
539 sk->sk_err = 0;
540 sock_reset_flag(sk, SOCK_DONE);
541
542 dccp_sync_mss(sk, dst_mtu(dst));
543
544 /* do not connect if feature negotiation setup fails */
545 if (dccp_feat_finalise_settings(dccp_sk(sk)))
546 return -EPROTO;
547
548 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
549 dp->dccps_gar = dp->dccps_iss;
550
551 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
552 if (unlikely(skb == NULL))
553 return -ENOBUFS;
554
555 /* Reserve space for headers. */
556 skb_reserve(skb, sk->sk_prot->max_header);
557
558 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
559
560 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
561 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
562
563 /* Timer for repeating the REQUEST until an answer. */
564 icsk->icsk_retransmits = 0;
565 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
566 icsk->icsk_rto, DCCP_RTO_MAX);
567 return 0;
568}
569
570EXPORT_SYMBOL_GPL(dccp_connect);
571
572void dccp_send_ack(struct sock *sk)
573{
574 /* If we have been reset, we may not send again. */
575 if (sk->sk_state != DCCP_CLOSED) {
576 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
577 GFP_ATOMIC);
578
579 if (skb == NULL) {
580 inet_csk_schedule_ack(sk);
581 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
582 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
583 TCP_DELACK_MAX,
584 DCCP_RTO_MAX);
585 return;
586 }
587
588 /* Reserve space for headers */
589 skb_reserve(skb, sk->sk_prot->max_header);
590 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
591 dccp_transmit_skb(sk, skb);
592 }
593}
594
595EXPORT_SYMBOL_GPL(dccp_send_ack);
596
597#if 0
598/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
599void dccp_send_delayed_ack(struct sock *sk)
600{
601 struct inet_connection_sock *icsk = inet_csk(sk);
602 /*
603 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
604 * with using 2s, and active senders also piggyback the ACK into a
605 * DATAACK packet, so this is really for quiescent senders.
606 */
607 unsigned long timeout = jiffies + 2 * HZ;
608
609 /* Use new timeout only if there wasn't a older one earlier. */
610 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
611 /* If delack timer was blocked or is about to expire,
612 * send ACK now.
613 *
614 * FIXME: check the "about to expire" part
615 */
616 if (icsk->icsk_ack.blocked) {
617 dccp_send_ack(sk);
618 return;
619 }
620
621 if (!time_before(timeout, icsk->icsk_ack.timeout))
622 timeout = icsk->icsk_ack.timeout;
623 }
624 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
625 icsk->icsk_ack.timeout = timeout;
626 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
627}
628#endif
629
630void dccp_send_sync(struct sock *sk, const u64 ackno,
631 const enum dccp_pkt_type pkt_type)
632{
633 /*
634 * We are not putting this on the write queue, so
635 * dccp_transmit_skb() will set the ownership to this
636 * sock.
637 */
638 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
639
640 if (skb == NULL) {
641 /* FIXME: how to make sure the sync is sent? */
642 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
643 return;
644 }
645
646 /* Reserve space for headers and prepare control bits. */
647 skb_reserve(skb, sk->sk_prot->max_header);
648 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
649 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
650
651 /*
652 * Clear the flag in case the Sync was scheduled for out-of-band data,
653 * such as carrying a long Ack Vector.
654 */
655 dccp_sk(sk)->dccps_sync_scheduled = 0;
656
657 dccp_transmit_skb(sk, skb);
658}
659
660EXPORT_SYMBOL_GPL(dccp_send_sync);
661
662/*
663 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
664 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
665 * any circumstances.
666 */
667void dccp_send_close(struct sock *sk, const int active)
668{
669 struct dccp_sock *dp = dccp_sk(sk);
670 struct sk_buff *skb;
671 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
672
673 skb = alloc_skb(sk->sk_prot->max_header, prio);
674 if (skb == NULL)
675 return;
676
677 /* Reserve space for headers and prepare control bits. */
678 skb_reserve(skb, sk->sk_prot->max_header);
679 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
680 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
681 else
682 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
683
684 if (active) {
685 skb = dccp_skb_entail(sk, skb);
686 /*
687 * Retransmission timer for active-close: RFC 4340, 8.3 requires
688 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
689 * state can be left. The initial timeout is 2 RTTs.
690 * Since RTT measurement is done by the CCIDs, there is no easy
691 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
692 * is too low (200ms); we use a high value to avoid unnecessary
693 * retransmissions when the link RTT is > 0.2 seconds.
694 * FIXME: Let main module sample RTTs and use that instead.
695 */
696 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
697 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
698 }
699 dccp_transmit_skb(sk, skb);
700}