1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/dccp/output.c 4 * 5 * An implementation of the DCCP protocol 6 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 7 */ 8 9 #include <linux/dccp.h> 10 #include <linux/kernel.h> 11 #include <linux/skbuff.h> 12 #include <linux/slab.h> 13 #include <linux/sched/signal.h> 14 15 #include <net/inet_sock.h> 16 #include <net/sock.h> 17 18 #include "ackvec.h" 19 #include "ccid.h" 20 #include "dccp.h" 21 22 static inline void dccp_event_ack_sent(struct sock *sk) 23 { 24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 25 } 26 27 /* enqueue @skb on sk_send_head for retransmission, return clone to send now */ 28 static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 29 { 30 skb_set_owner_w(skb, sk); 31 WARN_ON(sk->sk_send_head); 32 sk->sk_send_head = skb; 33 return skb_clone(sk->sk_send_head, gfp_any()); 34 } 35 36 /* 37 * All SKB's seen here are completely headerless. It is our 38 * job to build the DCCP header, and pass the packet down to 39 * IP so it can do the same plus pass the packet off to the 40 * device. 41 */ 42 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) 43 { 44 if (likely(skb != NULL)) { 45 struct inet_sock *inet = inet_sk(sk); 46 const struct inet_connection_sock *icsk = inet_csk(sk); 47 struct dccp_sock *dp = dccp_sk(sk); 48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_hdr *dh; 50 /* XXX For now we're using only 48 bits sequence numbers */ 51 const u32 dccp_header_size = sizeof(*dh) + 52 sizeof(struct dccp_hdr_ext) + 53 dccp_packet_hdr_len(dcb->dccpd_type); 54 int err, set_ack = 1; 55 u64 ackno = dp->dccps_gsr; 56 /* 57 * Increment GSS here already in case the option code needs it. 58 * Update GSS for real only if option processing below succeeds. 59 */ 60 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); 61 62 switch (dcb->dccpd_type) { 63 case DCCP_PKT_DATA: 64 set_ack = 0; 65 /* fall through */ 66 case DCCP_PKT_DATAACK: 67 case DCCP_PKT_RESET: 68 break; 69 70 case DCCP_PKT_REQUEST: 71 set_ack = 0; 72 /* Use ISS on the first (non-retransmitted) Request. */ 73 if (icsk->icsk_retransmits == 0) 74 dcb->dccpd_seq = dp->dccps_iss; 75 /* fall through */ 76 77 case DCCP_PKT_SYNC: 78 case DCCP_PKT_SYNCACK: 79 ackno = dcb->dccpd_ack_seq; 80 /* fall through */ 81 default: 82 /* 83 * Set owner/destructor: some skbs are allocated via 84 * alloc_skb (e.g. when retransmission may happen). 85 * Only Data, DataAck, and Reset packets should come 86 * through here with skb->sk set. 87 */ 88 WARN_ON(skb->sk); 89 skb_set_owner_w(skb, sk); 90 break; 91 } 92 93 if (dccp_insert_options(sk, skb)) { 94 kfree_skb(skb); 95 return -EPROTO; 96 } 97 98 99 /* Build DCCP header and checksum it. */ 100 dh = dccp_zeroed_hdr(skb, dccp_header_size); 101 dh->dccph_type = dcb->dccpd_type; 102 dh->dccph_sport = inet->inet_sport; 103 dh->dccph_dport = inet->inet_dport; 104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; 105 dh->dccph_ccval = dcb->dccpd_ccval; 106 dh->dccph_cscov = dp->dccps_pcslen; 107 /* XXX For now we're using only 48 bits sequence numbers */ 108 dh->dccph_x = 1; 109 110 dccp_update_gss(sk, dcb->dccpd_seq); 111 dccp_hdr_set_seq(dh, dp->dccps_gss); 112 if (set_ack) 113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); 114 115 switch (dcb->dccpd_type) { 116 case DCCP_PKT_REQUEST: 117 dccp_hdr_request(skb)->dccph_req_service = 118 dp->dccps_service; 119 /* 120 * Limit Ack window to ISS <= P.ackno <= GSS, so that 121 * only Responses to Requests we sent are considered. 122 */ 123 dp->dccps_awl = dp->dccps_iss; 124 break; 125 case DCCP_PKT_RESET: 126 dccp_hdr_reset(skb)->dccph_reset_code = 127 dcb->dccpd_reset_code; 128 break; 129 } 130 131 icsk->icsk_af_ops->send_check(sk, skb); 132 133 if (set_ack) 134 dccp_event_ack_sent(sk); 135 136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 137 138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 139 return net_xmit_eval(err); 140 } 141 return -ENOBUFS; 142 } 143 144 /** 145 * dccp_determine_ccmps - Find out about CCID-specific packet-size limits 146 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), 147 * since the RX CCID is restricted to feedback packets (Acks), which are small 148 * in comparison with the data traffic. A value of 0 means "no current CCMPS". 149 */ 150 static u32 dccp_determine_ccmps(const struct dccp_sock *dp) 151 { 152 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid; 153 154 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL) 155 return 0; 156 return tx_ccid->ccid_ops->ccid_ccmps; 157 } 158 159 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) 160 { 161 struct inet_connection_sock *icsk = inet_csk(sk); 162 struct dccp_sock *dp = dccp_sk(sk); 163 u32 ccmps = dccp_determine_ccmps(dp); 164 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu; 165 166 /* Account for header lengths and IPv4/v6 option overhead */ 167 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + 168 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext)); 169 170 /* 171 * Leave enough headroom for common DCCP header options. 172 * This only considers options which may appear on DCCP-Data packets, as 173 * per table 3 in RFC 4340, 5.8. When running out of space for other 174 * options (eg. Ack Vector which can take up to 255 bytes), it is better 175 * to schedule a separate Ack. Thus we leave headroom for the following: 176 * - 1 byte for Slow Receiver (11.6) 177 * - 6 bytes for Timestamp (13.1) 178 * - 10 bytes for Timestamp Echo (13.3) 179 * - 8 bytes for NDP count (7.7, when activated) 180 * - 6 bytes for Data Checksum (9.3) 181 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled) 182 */ 183 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 + 184 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4); 185 186 /* And store cached results */ 187 icsk->icsk_pmtu_cookie = pmtu; 188 dp->dccps_mss_cache = cur_mps; 189 190 return cur_mps; 191 } 192 193 EXPORT_SYMBOL_GPL(dccp_sync_mss); 194 195 void dccp_write_space(struct sock *sk) 196 { 197 struct socket_wq *wq; 198 199 rcu_read_lock(); 200 wq = rcu_dereference(sk->sk_wq); 201 if (skwq_has_sleeper(wq)) 202 wake_up_interruptible(&wq->wait); 203 /* Should agree with poll, otherwise some programs break */ 204 if (sock_writeable(sk)) 205 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 206 207 rcu_read_unlock(); 208 } 209 210 /** 211 * dccp_wait_for_ccid - Await CCID send permission 212 * @sk: socket to wait for 213 * @delay: timeout in jiffies 214 * 215 * This is used by CCIDs which need to delay the send time in process context. 216 */ 217 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) 218 { 219 DEFINE_WAIT(wait); 220 long remaining; 221 222 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 223 sk->sk_write_pending++; 224 release_sock(sk); 225 226 remaining = schedule_timeout(delay); 227 228 lock_sock(sk); 229 sk->sk_write_pending--; 230 finish_wait(sk_sleep(sk), &wait); 231 232 if (signal_pending(current) || sk->sk_err) 233 return -1; 234 return remaining; 235 } 236 237 /** 238 * dccp_xmit_packet - Send data packet under control of CCID 239 * Transmits next-queued payload and informs CCID to account for the packet. 240 */ 241 static void dccp_xmit_packet(struct sock *sk) 242 { 243 int err, len; 244 struct dccp_sock *dp = dccp_sk(sk); 245 struct sk_buff *skb = dccp_qpolicy_pop(sk); 246 247 if (unlikely(skb == NULL)) 248 return; 249 len = skb->len; 250 251 if (sk->sk_state == DCCP_PARTOPEN) { 252 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; 253 /* 254 * See 8.1.5 - Handshake Completion. 255 * 256 * For robustness we resend Confirm options until the client has 257 * entered OPEN. During the initial feature negotiation, the MPS 258 * is smaller than usual, reduced by the Change/Confirm options. 259 */ 260 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { 261 DCCP_WARN("Payload too large (%d) for featneg.\n", len); 262 dccp_send_ack(sk); 263 dccp_feat_list_purge(&dp->dccps_featneg); 264 } 265 266 inet_csk_schedule_ack(sk); 267 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 268 inet_csk(sk)->icsk_rto, 269 DCCP_RTO_MAX); 270 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; 271 } else if (dccp_ack_pending(sk)) { 272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; 273 } else { 274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA; 275 } 276 277 err = dccp_transmit_skb(sk, skb); 278 if (err) 279 dccp_pr_debug("transmit_skb() returned err=%d\n", err); 280 /* 281 * Register this one as sent even if an error occurred. To the remote 282 * end a local packet drop is indistinguishable from network loss, i.e. 283 * any local drop will eventually be reported via receiver feedback. 284 */ 285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); 286 287 /* 288 * If the CCID needs to transfer additional header options out-of-band 289 * (e.g. Ack Vectors or feature-negotiation options), it activates this 290 * flag to schedule a Sync. The Sync will automatically incorporate all 291 * currently pending header options, thus clearing the backlog. 292 */ 293 if (dp->dccps_sync_scheduled) 294 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); 295 } 296 297 /** 298 * dccp_flush_write_queue - Drain queue at end of connection 299 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may 300 * happen that the TX queue is not empty at the end of a connection. We give the 301 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function 302 * returns with a non-empty write queue, it will be purged later. 303 */ 304 void dccp_flush_write_queue(struct sock *sk, long *time_budget) 305 { 306 struct dccp_sock *dp = dccp_sk(sk); 307 struct sk_buff *skb; 308 long delay, rc; 309 310 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { 311 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 312 313 switch (ccid_packet_dequeue_eval(rc)) { 314 case CCID_PACKET_WILL_DEQUEUE_LATER: 315 /* 316 * If the CCID determines when to send, the next sending 317 * time is unknown or the CCID may not even send again 318 * (e.g. remote host crashes or lost Ack packets). 319 */ 320 DCCP_WARN("CCID did not manage to send all packets\n"); 321 return; 322 case CCID_PACKET_DELAY: 323 delay = msecs_to_jiffies(rc); 324 if (delay > *time_budget) 325 return; 326 rc = dccp_wait_for_ccid(sk, delay); 327 if (rc < 0) 328 return; 329 *time_budget -= (delay - rc); 330 /* check again if we can send now */ 331 break; 332 case CCID_PACKET_SEND_AT_ONCE: 333 dccp_xmit_packet(sk); 334 break; 335 case CCID_PACKET_ERR: 336 skb_dequeue(&sk->sk_write_queue); 337 kfree_skb(skb); 338 dccp_pr_debug("packet discarded due to err=%ld\n", rc); 339 } 340 } 341 } 342 343 void dccp_write_xmit(struct sock *sk) 344 { 345 struct dccp_sock *dp = dccp_sk(sk); 346 struct sk_buff *skb; 347 348 while ((skb = dccp_qpolicy_top(sk))) { 349 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 350 351 switch (ccid_packet_dequeue_eval(rc)) { 352 case CCID_PACKET_WILL_DEQUEUE_LATER: 353 return; 354 case CCID_PACKET_DELAY: 355 sk_reset_timer(sk, &dp->dccps_xmit_timer, 356 jiffies + msecs_to_jiffies(rc)); 357 return; 358 case CCID_PACKET_SEND_AT_ONCE: 359 dccp_xmit_packet(sk); 360 break; 361 case CCID_PACKET_ERR: 362 dccp_qpolicy_drop(sk, skb); 363 dccp_pr_debug("packet discarded due to err=%d\n", rc); 364 } 365 } 366 } 367 368 /** 369 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets 370 * There are only four retransmittable packet types in DCCP: 371 * - Request in client-REQUEST state (sec. 8.1.1), 372 * - CloseReq in server-CLOSEREQ state (sec. 8.3), 373 * - Close in node-CLOSING state (sec. 8.3), 374 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()). 375 * This function expects sk->sk_send_head to contain the original skb. 376 */ 377 int dccp_retransmit_skb(struct sock *sk) 378 { 379 WARN_ON(sk->sk_send_head == NULL); 380 381 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) 382 return -EHOSTUNREACH; /* Routing failure or similar. */ 383 384 /* this count is used to distinguish original and retransmitted skb */ 385 inet_csk(sk)->icsk_retransmits++; 386 387 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); 388 } 389 390 struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, 391 struct request_sock *req) 392 { 393 struct dccp_hdr *dh; 394 struct dccp_request_sock *dreq; 395 const u32 dccp_header_size = sizeof(struct dccp_hdr) + 396 sizeof(struct dccp_hdr_ext) + 397 sizeof(struct dccp_hdr_response); 398 struct sk_buff *skb; 399 400 /* sk is marked const to clearly express we dont hold socket lock. 401 * sock_wmalloc() will atomically change sk->sk_wmem_alloc, 402 * it is safe to promote sk to non const. 403 */ 404 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1, 405 GFP_ATOMIC); 406 if (!skb) 407 return NULL; 408 409 skb_reserve(skb, MAX_DCCP_HEADER); 410 411 skb_dst_set(skb, dst_clone(dst)); 412 413 dreq = dccp_rsk(req); 414 if (inet_rsk(req)->acked) /* increase GSS upon retransmission */ 415 dccp_inc_seqno(&dreq->dreq_gss); 416 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 417 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; 418 419 /* Resolve feature dependencies resulting from choice of CCID */ 420 if (dccp_feat_server_ccid_dependencies(dreq)) 421 goto response_failed; 422 423 if (dccp_insert_options_rsk(dreq, skb)) 424 goto response_failed; 425 426 /* Build and checksum header */ 427 dh = dccp_zeroed_hdr(skb, dccp_header_size); 428 429 dh->dccph_sport = htons(inet_rsk(req)->ir_num); 430 dh->dccph_dport = inet_rsk(req)->ir_rmt_port; 431 dh->dccph_doff = (dccp_header_size + 432 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 433 dh->dccph_type = DCCP_PKT_RESPONSE; 434 dh->dccph_x = 1; 435 dccp_hdr_set_seq(dh, dreq->dreq_gss); 436 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); 437 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 438 439 dccp_csum_outgoing(skb); 440 441 /* We use `acked' to remember that a Response was already sent. */ 442 inet_rsk(req)->acked = 1; 443 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 444 return skb; 445 response_failed: 446 kfree_skb(skb); 447 return NULL; 448 } 449 450 EXPORT_SYMBOL_GPL(dccp_make_response); 451 452 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */ 453 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) 454 { 455 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; 456 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); 457 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + 458 sizeof(struct dccp_hdr_ext) + 459 sizeof(struct dccp_hdr_reset); 460 struct dccp_hdr_reset *dhr; 461 struct sk_buff *skb; 462 463 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); 464 if (skb == NULL) 465 return NULL; 466 467 skb_reserve(skb, sk->sk_prot->max_header); 468 469 /* Swap the send and the receive. */ 470 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); 471 dh->dccph_type = DCCP_PKT_RESET; 472 dh->dccph_sport = rxdh->dccph_dport; 473 dh->dccph_dport = rxdh->dccph_sport; 474 dh->dccph_doff = dccp_hdr_reset_len / 4; 475 dh->dccph_x = 1; 476 477 dhr = dccp_hdr_reset(skb); 478 dhr->dccph_reset_code = dcb->dccpd_reset_code; 479 480 switch (dcb->dccpd_reset_code) { 481 case DCCP_RESET_CODE_PACKET_ERROR: 482 dhr->dccph_reset_data[0] = rxdh->dccph_type; 483 break; 484 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ 485 case DCCP_RESET_CODE_MANDATORY_ERROR: 486 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); 487 break; 488 } 489 /* 490 * From RFC 4340, 8.3.1: 491 * If P.ackno exists, set R.seqno := P.ackno + 1. 492 * Else set R.seqno := 0. 493 */ 494 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 495 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1)); 496 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq); 497 498 dccp_csum_outgoing(skb); 499 return skb; 500 } 501 502 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset); 503 504 /* send Reset on established socket, to close or abort the connection */ 505 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) 506 { 507 struct sk_buff *skb; 508 /* 509 * FIXME: what if rebuild_header fails? 510 * Should we be doing a rebuild_header here? 511 */ 512 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); 513 514 if (err != 0) 515 return err; 516 517 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); 518 if (skb == NULL) 519 return -ENOBUFS; 520 521 /* Reserve space for headers and prepare control bits. */ 522 skb_reserve(skb, sk->sk_prot->max_header); 523 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; 524 DCCP_SKB_CB(skb)->dccpd_reset_code = code; 525 526 return dccp_transmit_skb(sk, skb); 527 } 528 529 /* 530 * Do all connect socket setups that can be done AF independent. 531 */ 532 int dccp_connect(struct sock *sk) 533 { 534 struct sk_buff *skb; 535 struct dccp_sock *dp = dccp_sk(sk); 536 struct dst_entry *dst = __sk_dst_get(sk); 537 struct inet_connection_sock *icsk = inet_csk(sk); 538 539 sk->sk_err = 0; 540 sock_reset_flag(sk, SOCK_DONE); 541 542 dccp_sync_mss(sk, dst_mtu(dst)); 543 544 /* do not connect if feature negotiation setup fails */ 545 if (dccp_feat_finalise_settings(dccp_sk(sk))) 546 return -EPROTO; 547 548 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */ 549 dp->dccps_gar = dp->dccps_iss; 550 551 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); 552 if (unlikely(skb == NULL)) 553 return -ENOBUFS; 554 555 /* Reserve space for headers. */ 556 skb_reserve(skb, sk->sk_prot->max_header); 557 558 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 559 560 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb)); 561 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); 562 563 /* Timer for repeating the REQUEST until an answer. */ 564 icsk->icsk_retransmits = 0; 565 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 566 icsk->icsk_rto, DCCP_RTO_MAX); 567 return 0; 568 } 569 570 EXPORT_SYMBOL_GPL(dccp_connect); 571 572 void dccp_send_ack(struct sock *sk) 573 { 574 /* If we have been reset, we may not send again. */ 575 if (sk->sk_state != DCCP_CLOSED) { 576 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, 577 GFP_ATOMIC); 578 579 if (skb == NULL) { 580 inet_csk_schedule_ack(sk); 581 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 582 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 583 TCP_DELACK_MAX, 584 DCCP_RTO_MAX); 585 return; 586 } 587 588 /* Reserve space for headers */ 589 skb_reserve(skb, sk->sk_prot->max_header); 590 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; 591 dccp_transmit_skb(sk, skb); 592 } 593 } 594 595 EXPORT_SYMBOL_GPL(dccp_send_ack); 596 597 #if 0 598 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ 599 void dccp_send_delayed_ack(struct sock *sk) 600 { 601 struct inet_connection_sock *icsk = inet_csk(sk); 602 /* 603 * FIXME: tune this timer. elapsed time fixes the skew, so no problem 604 * with using 2s, and active senders also piggyback the ACK into a 605 * DATAACK packet, so this is really for quiescent senders. 606 */ 607 unsigned long timeout = jiffies + 2 * HZ; 608 609 /* Use new timeout only if there wasn't a older one earlier. */ 610 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 611 /* If delack timer was blocked or is about to expire, 612 * send ACK now. 613 * 614 * FIXME: check the "about to expire" part 615 */ 616 if (icsk->icsk_ack.blocked) { 617 dccp_send_ack(sk); 618 return; 619 } 620 621 if (!time_before(timeout, icsk->icsk_ack.timeout)) 622 timeout = icsk->icsk_ack.timeout; 623 } 624 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 625 icsk->icsk_ack.timeout = timeout; 626 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 627 } 628 #endif 629 630 void dccp_send_sync(struct sock *sk, const u64 ackno, 631 const enum dccp_pkt_type pkt_type) 632 { 633 /* 634 * We are not putting this on the write queue, so 635 * dccp_transmit_skb() will set the ownership to this 636 * sock. 637 */ 638 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); 639 640 if (skb == NULL) { 641 /* FIXME: how to make sure the sync is sent? */ 642 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type)); 643 return; 644 } 645 646 /* Reserve space for headers and prepare control bits. */ 647 skb_reserve(skb, sk->sk_prot->max_header); 648 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 649 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; 650 651 /* 652 * Clear the flag in case the Sync was scheduled for out-of-band data, 653 * such as carrying a long Ack Vector. 654 */ 655 dccp_sk(sk)->dccps_sync_scheduled = 0; 656 657 dccp_transmit_skb(sk, skb); 658 } 659 660 EXPORT_SYMBOL_GPL(dccp_send_sync); 661 662 /* 663 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This 664 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 665 * any circumstances. 666 */ 667 void dccp_send_close(struct sock *sk, const int active) 668 { 669 struct dccp_sock *dp = dccp_sk(sk); 670 struct sk_buff *skb; 671 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; 672 673 skb = alloc_skb(sk->sk_prot->max_header, prio); 674 if (skb == NULL) 675 return; 676 677 /* Reserve space for headers and prepare control bits. */ 678 skb_reserve(skb, sk->sk_prot->max_header); 679 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait) 680 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ; 681 else 682 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; 683 684 if (active) { 685 skb = dccp_skb_entail(sk, skb); 686 /* 687 * Retransmission timer for active-close: RFC 4340, 8.3 requires 688 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ 689 * state can be left. The initial timeout is 2 RTTs. 690 * Since RTT measurement is done by the CCIDs, there is no easy 691 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4 692 * is too low (200ms); we use a high value to avoid unnecessary 693 * retransmissions when the link RTT is > 0.2 seconds. 694 * FIXME: Let main module sample RTTs and use that instead. 695 */ 696 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 697 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); 698 } 699 dccp_transmit_skb(sk, skb); 700 } 701