4
* An implementation of the DCCP protocol
5
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public License
9
* as published by the Free Software Foundation; either version
10
* 2 of the License, or (at your option) any later version.
13
#include <linux/dccp.h>
14
#include <linux/kernel.h>
15
#include <linux/skbuff.h>
16
#include <linux/slab.h>
18
#include <net/inet_sock.h>
25
static inline void dccp_event_ack_sent(struct sock *sk)
27
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
30
/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
31
static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
33
skb_set_owner_w(skb, sk);
34
WARN_ON(sk->sk_send_head);
35
sk->sk_send_head = skb;
36
return skb_clone(sk->sk_send_head, gfp_any());
40
* All SKB's seen here are completely headerless. It is our
41
* job to build the DCCP header, and pass the packet down to
42
* IP so it can do the same plus pass the packet off to the
45
static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
47
if (likely(skb != NULL)) {
48
struct inet_sock *inet = inet_sk(sk);
49
const struct inet_connection_sock *icsk = inet_csk(sk);
50
struct dccp_sock *dp = dccp_sk(sk);
51
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
53
/* XXX For now we're using only 48 bits sequence numbers */
54
const u32 dccp_header_size = sizeof(*dh) +
55
sizeof(struct dccp_hdr_ext) +
56
dccp_packet_hdr_len(dcb->dccpd_type);
58
u64 ackno = dp->dccps_gsr;
60
* Increment GSS here already in case the option code needs it.
61
* Update GSS for real only if option processing below succeeds.
63
dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
65
switch (dcb->dccpd_type) {
69
case DCCP_PKT_DATAACK:
73
case DCCP_PKT_REQUEST:
75
/* Use ISS on the first (non-retransmitted) Request. */
76
if (icsk->icsk_retransmits == 0)
77
dcb->dccpd_seq = dp->dccps_iss;
81
case DCCP_PKT_SYNCACK:
82
ackno = dcb->dccpd_ack_seq;
86
* Set owner/destructor: some skbs are allocated via
87
* alloc_skb (e.g. when retransmission may happen).
88
* Only Data, DataAck, and Reset packets should come
89
* through here with skb->sk set.
92
skb_set_owner_w(skb, sk);
96
if (dccp_insert_options(sk, skb)) {
102
/* Build DCCP header and checksum it. */
103
dh = dccp_zeroed_hdr(skb, dccp_header_size);
104
dh->dccph_type = dcb->dccpd_type;
105
dh->dccph_sport = inet->inet_sport;
106
dh->dccph_dport = inet->inet_dport;
107
dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
108
dh->dccph_ccval = dcb->dccpd_ccval;
109
dh->dccph_cscov = dp->dccps_pcslen;
110
/* XXX For now we're using only 48 bits sequence numbers */
113
dccp_update_gss(sk, dcb->dccpd_seq);
114
dccp_hdr_set_seq(dh, dp->dccps_gss);
116
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
118
switch (dcb->dccpd_type) {
119
case DCCP_PKT_REQUEST:
120
dccp_hdr_request(skb)->dccph_req_service =
123
* Limit Ack window to ISS <= P.ackno <= GSS, so that
124
* only Responses to Requests we sent are considered.
126
dp->dccps_awl = dp->dccps_iss;
129
dccp_hdr_reset(skb)->dccph_reset_code =
130
dcb->dccpd_reset_code;
134
icsk->icsk_af_ops->send_check(sk, skb);
137
dccp_event_ack_sent(sk);
139
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
141
err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
142
return net_xmit_eval(err);
148
* dccp_determine_ccmps - Find out about CCID-specific packet-size limits
149
* We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
150
* since the RX CCID is restricted to feedback packets (Acks), which are small
151
* in comparison with the data traffic. A value of 0 means "no current CCMPS".
153
static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
155
const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
157
if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
159
return tx_ccid->ccid_ops->ccid_ccmps;
162
unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
164
struct inet_connection_sock *icsk = inet_csk(sk);
165
struct dccp_sock *dp = dccp_sk(sk);
166
u32 ccmps = dccp_determine_ccmps(dp);
167
u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
169
/* Account for header lengths and IPv4/v6 option overhead */
170
cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
171
sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
174
* Leave enough headroom for common DCCP header options.
175
* This only considers options which may appear on DCCP-Data packets, as
176
* per table 3 in RFC 4340, 5.8. When running out of space for other
177
* options (eg. Ack Vector which can take up to 255 bytes), it is better
178
* to schedule a separate Ack. Thus we leave headroom for the following:
179
* - 1 byte for Slow Receiver (11.6)
180
* - 6 bytes for Timestamp (13.1)
181
* - 10 bytes for Timestamp Echo (13.3)
182
* - 8 bytes for NDP count (7.7, when activated)
183
* - 6 bytes for Data Checksum (9.3)
184
* - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
186
cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
187
(dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
189
/* And store cached results */
190
icsk->icsk_pmtu_cookie = pmtu;
191
dp->dccps_mss_cache = cur_mps;
196
EXPORT_SYMBOL_GPL(dccp_sync_mss);
198
void dccp_write_space(struct sock *sk)
200
struct socket_wq *wq;
203
wq = rcu_dereference(sk->sk_wq);
204
if (wq_has_sleeper(wq))
205
wake_up_interruptible(&wq->wait);
206
/* Should agree with poll, otherwise some programs break */
207
if (sock_writeable(sk))
208
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
214
* dccp_wait_for_ccid - Await CCID send permission
215
* @sk: socket to wait for
216
* @delay: timeout in jiffies
217
* This is used by CCIDs which need to delay the send time in process context.
219
static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
224
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
225
sk->sk_write_pending++;
228
remaining = schedule_timeout(delay);
231
sk->sk_write_pending--;
232
finish_wait(sk_sleep(sk), &wait);
234
if (signal_pending(current) || sk->sk_err)
240
* dccp_xmit_packet - Send data packet under control of CCID
241
* Transmits next-queued payload and informs CCID to account for the packet.
243
static void dccp_xmit_packet(struct sock *sk)
246
struct dccp_sock *dp = dccp_sk(sk);
247
struct sk_buff *skb = dccp_qpolicy_pop(sk);
249
if (unlikely(skb == NULL))
253
if (sk->sk_state == DCCP_PARTOPEN) {
254
const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
256
* See 8.1.5 - Handshake Completion.
258
* For robustness we resend Confirm options until the client has
259
* entered OPEN. During the initial feature negotiation, the MPS
260
* is smaller than usual, reduced by the Change/Confirm options.
262
if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
263
DCCP_WARN("Payload too large (%d) for featneg.\n", len);
265
dccp_feat_list_purge(&dp->dccps_featneg);
268
inet_csk_schedule_ack(sk);
269
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
270
inet_csk(sk)->icsk_rto,
272
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273
} else if (dccp_ack_pending(sk)) {
274
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
276
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
279
err = dccp_transmit_skb(sk, skb);
281
dccp_pr_debug("transmit_skb() returned err=%d\n", err);
283
* Register this one as sent even if an error occurred. To the remote
284
* end a local packet drop is indistinguishable from network loss, i.e.
285
* any local drop will eventually be reported via receiver feedback.
287
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
290
* If the CCID needs to transfer additional header options out-of-band
291
* (e.g. Ack Vectors or feature-negotiation options), it activates this
292
* flag to schedule a Sync. The Sync will automatically incorporate all
293
* currently pending header options, thus clearing the backlog.
295
if (dp->dccps_sync_scheduled)
296
dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
300
* dccp_flush_write_queue - Drain queue at end of connection
301
* Since dccp_sendmsg queues packets without waiting for them to be sent, it may
302
* happen that the TX queue is not empty at the end of a connection. We give the
303
* HC-sender CCID a grace period of up to @time_budget jiffies. If this function
304
* returns with a non-empty write queue, it will be purged later.
306
void dccp_flush_write_queue(struct sock *sk, long *time_budget)
308
struct dccp_sock *dp = dccp_sk(sk);
312
while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
313
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
315
switch (ccid_packet_dequeue_eval(rc)) {
316
case CCID_PACKET_WILL_DEQUEUE_LATER:
318
* If the CCID determines when to send, the next sending
319
* time is unknown or the CCID may not even send again
320
* (e.g. remote host crashes or lost Ack packets).
322
DCCP_WARN("CCID did not manage to send all packets\n");
324
case CCID_PACKET_DELAY:
325
delay = msecs_to_jiffies(rc);
326
if (delay > *time_budget)
328
rc = dccp_wait_for_ccid(sk, delay);
331
*time_budget -= (delay - rc);
332
/* check again if we can send now */
334
case CCID_PACKET_SEND_AT_ONCE:
335
dccp_xmit_packet(sk);
337
case CCID_PACKET_ERR:
338
skb_dequeue(&sk->sk_write_queue);
340
dccp_pr_debug("packet discarded due to err=%ld\n", rc);
345
void dccp_write_xmit(struct sock *sk)
347
struct dccp_sock *dp = dccp_sk(sk);
350
while ((skb = dccp_qpolicy_top(sk))) {
351
int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
353
switch (ccid_packet_dequeue_eval(rc)) {
354
case CCID_PACKET_WILL_DEQUEUE_LATER:
356
case CCID_PACKET_DELAY:
357
sk_reset_timer(sk, &dp->dccps_xmit_timer,
358
jiffies + msecs_to_jiffies(rc));
360
case CCID_PACKET_SEND_AT_ONCE:
361
dccp_xmit_packet(sk);
363
case CCID_PACKET_ERR:
364
dccp_qpolicy_drop(sk, skb);
365
dccp_pr_debug("packet discarded due to err=%d\n", rc);
371
* dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets
372
* There are only four retransmittable packet types in DCCP:
373
* - Request in client-REQUEST state (sec. 8.1.1),
374
* - CloseReq in server-CLOSEREQ state (sec. 8.3),
375
* - Close in node-CLOSING state (sec. 8.3),
376
* - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
377
* This function expects sk->sk_send_head to contain the original skb.
379
int dccp_retransmit_skb(struct sock *sk)
381
WARN_ON(sk->sk_send_head == NULL);
383
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
384
return -EHOSTUNREACH; /* Routing failure or similar. */
386
/* this count is used to distinguish original and retransmitted skb */
387
inet_csk(sk)->icsk_retransmits++;
389
return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
392
struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
393
struct request_sock *req)
396
struct dccp_request_sock *dreq;
397
const u32 dccp_header_size = sizeof(struct dccp_hdr) +
398
sizeof(struct dccp_hdr_ext) +
399
sizeof(struct dccp_hdr_response);
400
struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
405
/* Reserve space for headers. */
406
skb_reserve(skb, sk->sk_prot->max_header);
408
skb_dst_set(skb, dst_clone(dst));
410
dreq = dccp_rsk(req);
411
if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
412
dccp_inc_seqno(&dreq->dreq_iss);
413
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
414
DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
416
/* Resolve feature dependencies resulting from choice of CCID */
417
if (dccp_feat_server_ccid_dependencies(dreq))
418
goto response_failed;
420
if (dccp_insert_options_rsk(dreq, skb))
421
goto response_failed;
423
/* Build and checksum header */
424
dh = dccp_zeroed_hdr(skb, dccp_header_size);
426
dh->dccph_sport = inet_rsk(req)->loc_port;
427
dh->dccph_dport = inet_rsk(req)->rmt_port;
428
dh->dccph_doff = (dccp_header_size +
429
DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
430
dh->dccph_type = DCCP_PKT_RESPONSE;
432
dccp_hdr_set_seq(dh, dreq->dreq_iss);
433
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
434
dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
436
dccp_csum_outgoing(skb);
438
/* We use `acked' to remember that a Response was already sent. */
439
inet_rsk(req)->acked = 1;
440
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
447
EXPORT_SYMBOL_GPL(dccp_make_response);
449
/* answer offending packet in @rcv_skb with Reset from control socket @ctl */
450
struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
452
struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
453
struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
454
const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
455
sizeof(struct dccp_hdr_ext) +
456
sizeof(struct dccp_hdr_reset);
457
struct dccp_hdr_reset *dhr;
460
skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
464
skb_reserve(skb, sk->sk_prot->max_header);
466
/* Swap the send and the receive. */
467
dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
468
dh->dccph_type = DCCP_PKT_RESET;
469
dh->dccph_sport = rxdh->dccph_dport;
470
dh->dccph_dport = rxdh->dccph_sport;
471
dh->dccph_doff = dccp_hdr_reset_len / 4;
474
dhr = dccp_hdr_reset(skb);
475
dhr->dccph_reset_code = dcb->dccpd_reset_code;
477
switch (dcb->dccpd_reset_code) {
478
case DCCP_RESET_CODE_PACKET_ERROR:
479
dhr->dccph_reset_data[0] = rxdh->dccph_type;
481
case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */
482
case DCCP_RESET_CODE_MANDATORY_ERROR:
483
memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
487
* From RFC 4340, 8.3.1:
488
* If P.ackno exists, set R.seqno := P.ackno + 1.
489
* Else set R.seqno := 0.
491
if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
492
dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
493
dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
495
dccp_csum_outgoing(skb);
499
EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
501
/* send Reset on established socket, to close or abort the connection */
502
int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
506
* FIXME: what if rebuild_header fails?
507
* Should we be doing a rebuild_header here?
509
int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
514
skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
518
/* Reserve space for headers and prepare control bits. */
519
skb_reserve(skb, sk->sk_prot->max_header);
520
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
521
DCCP_SKB_CB(skb)->dccpd_reset_code = code;
523
return dccp_transmit_skb(sk, skb);
527
* Do all connect socket setups that can be done AF independent.
529
int dccp_connect(struct sock *sk)
532
struct dccp_sock *dp = dccp_sk(sk);
533
struct dst_entry *dst = __sk_dst_get(sk);
534
struct inet_connection_sock *icsk = inet_csk(sk);
537
sock_reset_flag(sk, SOCK_DONE);
539
dccp_sync_mss(sk, dst_mtu(dst));
541
/* do not connect if feature negotiation setup fails */
542
if (dccp_feat_finalise_settings(dccp_sk(sk)))
545
/* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
546
dp->dccps_gar = dp->dccps_iss;
548
skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
549
if (unlikely(skb == NULL))
552
/* Reserve space for headers. */
553
skb_reserve(skb, sk->sk_prot->max_header);
555
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
557
dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
558
DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
560
/* Timer for repeating the REQUEST until an answer. */
561
icsk->icsk_retransmits = 0;
562
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
563
icsk->icsk_rto, DCCP_RTO_MAX);
567
EXPORT_SYMBOL_GPL(dccp_connect);
569
void dccp_send_ack(struct sock *sk)
571
/* If we have been reset, we may not send again. */
572
if (sk->sk_state != DCCP_CLOSED) {
573
struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
577
inet_csk_schedule_ack(sk);
578
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
579
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
585
/* Reserve space for headers */
586
skb_reserve(skb, sk->sk_prot->max_header);
587
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
588
dccp_transmit_skb(sk, skb);
592
EXPORT_SYMBOL_GPL(dccp_send_ack);
595
/* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
596
void dccp_send_delayed_ack(struct sock *sk)
598
struct inet_connection_sock *icsk = inet_csk(sk);
600
* FIXME: tune this timer. elapsed time fixes the skew, so no problem
601
* with using 2s, and active senders also piggyback the ACK into a
602
* DATAACK packet, so this is really for quiescent senders.
604
unsigned long timeout = jiffies + 2 * HZ;
606
/* Use new timeout only if there wasn't a older one earlier. */
607
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
608
/* If delack timer was blocked or is about to expire,
611
* FIXME: check the "about to expire" part
613
if (icsk->icsk_ack.blocked) {
618
if (!time_before(timeout, icsk->icsk_ack.timeout))
619
timeout = icsk->icsk_ack.timeout;
621
icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
622
icsk->icsk_ack.timeout = timeout;
623
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
627
void dccp_send_sync(struct sock *sk, const u64 ackno,
628
const enum dccp_pkt_type pkt_type)
631
* We are not putting this on the write queue, so
632
* dccp_transmit_skb() will set the ownership to this
635
struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
638
/* FIXME: how to make sure the sync is sent? */
639
DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
643
/* Reserve space for headers and prepare control bits. */
644
skb_reserve(skb, sk->sk_prot->max_header);
645
DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
646
DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
649
* Clear the flag in case the Sync was scheduled for out-of-band data,
650
* such as carrying a long Ack Vector.
652
dccp_sk(sk)->dccps_sync_scheduled = 0;
654
dccp_transmit_skb(sk, skb);
657
EXPORT_SYMBOL_GPL(dccp_send_sync);
660
* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
661
* cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
664
void dccp_send_close(struct sock *sk, const int active)
666
struct dccp_sock *dp = dccp_sk(sk);
668
const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
670
skb = alloc_skb(sk->sk_prot->max_header, prio);
674
/* Reserve space for headers and prepare control bits. */
675
skb_reserve(skb, sk->sk_prot->max_header);
676
if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
677
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
679
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
682
skb = dccp_skb_entail(sk, skb);
684
* Retransmission timer for active-close: RFC 4340, 8.3 requires
685
* to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
686
* state can be left. The initial timeout is 2 RTTs.
687
* Since RTT measurement is done by the CCIDs, there is no easy
688
* way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
689
* is too low (200ms); we use a high value to avoid unnecessary
690
* retransmissions when the link RTT is > 0.2 seconds.
691
* FIXME: Let main module sample RTTs and use that instead.
693
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
694
DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
696
dccp_transmit_skb(sk, skb);