1 /* 2 * net/dccp/output.c 3 * 4 * An implementation of the DCCP protocol 5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/dccp.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/slab.h> 17 18 #include <net/inet_sock.h> 19 #include <net/sock.h> 20 21 #include "ackvec.h" 22 #include "ccid.h" 23 #include "dccp.h" 24 25 static inline void dccp_event_ack_sent(struct sock *sk) 26 { 27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 28 } 29 30 static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 31 { 32 skb_set_owner_w(skb, sk); 33 WARN_ON(sk->sk_send_head); 34 sk->sk_send_head = skb; 35 } 36 37 /* 38 * All SKB's seen here are completely headerless. It is our 39 * job to build the DCCP header, and pass the packet down to 40 * IP so it can do the same plus pass the packet off to the 41 * device. 42 */ 43 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) 44 { 45 if (likely(skb != NULL)) { 46 const struct inet_sock *inet = inet_sk(sk); 47 const struct inet_connection_sock *icsk = inet_csk(sk); 48 struct dccp_sock *dp = dccp_sk(sk); 49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 50 struct dccp_hdr *dh; 51 /* XXX For now we're using only 48 bits sequence numbers */ 52 const u32 dccp_header_size = sizeof(*dh) + 53 sizeof(struct dccp_hdr_ext) + 54 dccp_packet_hdr_len(dcb->dccpd_type); 55 int err, set_ack = 1; 56 u64 ackno = dp->dccps_gsr; 57 /* 58 * Increment GSS here already in case the option code needs it. 59 * Update GSS for real only if option processing below succeeds. 60 */ 61 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); 62 63 switch (dcb->dccpd_type) { 64 case DCCP_PKT_DATA: 65 set_ack = 0; 66 /* fall through */ 67 case DCCP_PKT_DATAACK: 68 case DCCP_PKT_RESET: 69 break; 70 71 case DCCP_PKT_REQUEST: 72 set_ack = 0; 73 /* Use ISS on the first (non-retransmitted) Request. */ 74 if (icsk->icsk_retransmits == 0) 75 dcb->dccpd_seq = dp->dccps_iss; 76 /* fall through */ 77 78 case DCCP_PKT_SYNC: 79 case DCCP_PKT_SYNCACK: 80 ackno = dcb->dccpd_ack_seq; 81 /* fall through */ 82 default: 83 /* 84 * Set owner/destructor: some skbs are allocated via 85 * alloc_skb (e.g. when retransmission may happen). 86 * Only Data, DataAck, and Reset packets should come 87 * through here with skb->sk set. 88 */ 89 WARN_ON(skb->sk); 90 skb_set_owner_w(skb, sk); 91 break; 92 } 93 94 if (dccp_insert_options(sk, skb)) { 95 kfree_skb(skb); 96 return -EPROTO; 97 } 98 99 100 /* Build DCCP header and checksum it. */ 101 dh = dccp_zeroed_hdr(skb, dccp_header_size); 102 dh->dccph_type = dcb->dccpd_type; 103 dh->dccph_sport = inet->inet_sport; 104 dh->dccph_dport = inet->inet_dport; 105 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; 106 dh->dccph_ccval = dcb->dccpd_ccval; 107 dh->dccph_cscov = dp->dccps_pcslen; 108 /* XXX For now we're using only 48 bits sequence numbers */ 109 dh->dccph_x = 1; 110 111 dccp_update_gss(sk, dcb->dccpd_seq); 112 dccp_hdr_set_seq(dh, dp->dccps_gss); 113 if (set_ack) 114 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); 115 116 switch (dcb->dccpd_type) { 117 case DCCP_PKT_REQUEST: 118 dccp_hdr_request(skb)->dccph_req_service = 119 dp->dccps_service; 120 /* 121 * Limit Ack window to ISS <= P.ackno <= GSS, so that 122 * only Responses to Requests we sent are considered. 123 */ 124 dp->dccps_awl = dp->dccps_iss; 125 break; 126 case DCCP_PKT_RESET: 127 dccp_hdr_reset(skb)->dccph_reset_code = 128 dcb->dccpd_reset_code; 129 break; 130 } 131 132 icsk->icsk_af_ops->send_check(sk, skb); 133 134 if (set_ack) 135 dccp_event_ack_sent(sk); 136 137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 138 139 err = icsk->icsk_af_ops->queue_xmit(skb); 140 return net_xmit_eval(err); 141 } 142 return -ENOBUFS; 143 } 144 145 /** 146 * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits 147 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), 148 * since the RX CCID is restricted to feedback packets (Acks), which are small 149 * in comparison with the data traffic. A value of 0 means "no current CCMPS". 150 */ 151 static u32 dccp_determine_ccmps(const struct dccp_sock *dp) 152 { 153 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid; 154 155 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL) 156 return 0; 157 return tx_ccid->ccid_ops->ccid_ccmps; 158 } 159 160 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) 161 { 162 struct inet_connection_sock *icsk = inet_csk(sk); 163 struct dccp_sock *dp = dccp_sk(sk); 164 u32 ccmps = dccp_determine_ccmps(dp); 165 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu; 166 167 /* Account for header lengths and IPv4/v6 option overhead */ 168 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + 169 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext)); 170 171 /* 172 * Leave enough headroom for common DCCP header options. 173 * This only considers options which may appear on DCCP-Data packets, as 174 * per table 3 in RFC 4340, 5.8. When running out of space for other 175 * options (eg. Ack Vector which can take up to 255 bytes), it is better 176 * to schedule a separate Ack. Thus we leave headroom for the following: 177 * - 1 byte for Slow Receiver (11.6) 178 * - 6 bytes for Timestamp (13.1) 179 * - 10 bytes for Timestamp Echo (13.3) 180 * - 8 bytes for NDP count (7.7, when activated) 181 * - 6 bytes for Data Checksum (9.3) 182 * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled) 183 */ 184 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 + 185 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4); 186 187 /* And store cached results */ 188 icsk->icsk_pmtu_cookie = pmtu; 189 dp->dccps_mss_cache = cur_mps; 190 191 return cur_mps; 192 } 193 194 EXPORT_SYMBOL_GPL(dccp_sync_mss); 195 196 void dccp_write_space(struct sock *sk) 197 { 198 struct socket_wq *wq; 199 200 rcu_read_lock(); 201 wq = rcu_dereference(sk->sk_wq); 202 if (wq_has_sleeper(wq)) 203 wake_up_interruptible(&wq->wait); 204 /* Should agree with poll, otherwise some programs break */ 205 if (sock_writeable(sk)) 206 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 207 208 rcu_read_unlock(); 209 } 210 211 /** 212 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet 213 * @sk: socket to wait for 214 * @skb: current skb to pass on for waiting 215 * @delay: sleep timeout in milliseconds (> 0) 216 * This function is called by default when the socket is closed, and 217 * when a non-zero linger time is set on the socket. For consistency 218 */ 219 static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) 220 { 221 struct dccp_sock *dp = dccp_sk(sk); 222 DEFINE_WAIT(wait); 223 unsigned long jiffdelay; 224 int rc; 225 226 do { 227 dccp_pr_debug("delayed send by %d msec\n", delay); 228 jiffdelay = msecs_to_jiffies(delay); 229 230 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 231 232 sk->sk_write_pending++; 233 release_sock(sk); 234 schedule_timeout(jiffdelay); 235 lock_sock(sk); 236 sk->sk_write_pending--; 237 238 if (sk->sk_err) 239 goto do_error; 240 if (signal_pending(current)) 241 goto do_interrupted; 242 243 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 244 } while ((delay = rc) > 0); 245 out: 246 finish_wait(sk_sleep(sk), &wait); 247 return rc; 248 249 do_error: 250 rc = -EPIPE; 251 goto out; 252 do_interrupted: 253 rc = -EINTR; 254 goto out; 255 } 256 257 void dccp_write_xmit(struct sock *sk, int block) 258 { 259 struct dccp_sock *dp = dccp_sk(sk); 260 struct sk_buff *skb; 261 262 while ((skb = skb_peek(&sk->sk_write_queue))) { 263 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 264 265 if (err > 0) { 266 if (!block) { 267 sk_reset_timer(sk, &dp->dccps_xmit_timer, 268 msecs_to_jiffies(err)+jiffies); 269 break; 270 } else 271 err = dccp_wait_for_ccid(sk, skb, err); 272 if (err && err != -EINTR) 273 DCCP_BUG("err=%d after dccp_wait_for_ccid", err); 274 } 275 276 skb_dequeue(&sk->sk_write_queue); 277 if (err == 0) { 278 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 279 const int len = skb->len; 280 281 if (sk->sk_state == DCCP_PARTOPEN) { 282 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; 283 /* 284 * See 8.1.5 - Handshake Completion. 285 * 286 * For robustness we resend Confirm options until the client has 287 * entered OPEN. During the initial feature negotiation, the MPS 288 * is smaller than usual, reduced by the Change/Confirm options. 289 */ 290 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { 291 DCCP_WARN("Payload too large (%d) for featneg.\n", len); 292 dccp_send_ack(sk); 293 dccp_feat_list_purge(&dp->dccps_featneg); 294 } 295 296 inet_csk_schedule_ack(sk); 297 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 298 inet_csk(sk)->icsk_rto, 299 DCCP_RTO_MAX); 300 dcb->dccpd_type = DCCP_PKT_DATAACK; 301 } else if (dccp_ack_pending(sk)) 302 dcb->dccpd_type = DCCP_PKT_DATAACK; 303 else 304 dcb->dccpd_type = DCCP_PKT_DATA; 305 306 err = dccp_transmit_skb(sk, skb); 307 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); 308 if (err) 309 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", 310 err); 311 } else { 312 dccp_pr_debug("packet discarded due to err=%d\n", err); 313 kfree_skb(skb); 314 } 315 } 316 } 317 318 /** 319 * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets 320 * There are only four retransmittable packet types in DCCP: 321 * - Request in client-REQUEST state (sec. 8.1.1), 322 * - CloseReq in server-CLOSEREQ state (sec. 8.3), 323 * - Close in node-CLOSING state (sec. 8.3), 324 * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()). 325 * This function expects sk->sk_send_head to contain the original skb. 326 */ 327 int dccp_retransmit_skb(struct sock *sk) 328 { 329 WARN_ON(sk->sk_send_head == NULL); 330 331 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) 332 return -EHOSTUNREACH; /* Routing failure or similar. */ 333 334 /* this count is used to distinguish original and retransmitted skb */ 335 inet_csk(sk)->icsk_retransmits++; 336 337 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); 338 } 339 340 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, 341 struct request_sock *req) 342 { 343 struct dccp_hdr *dh; 344 struct dccp_request_sock *dreq; 345 const u32 dccp_header_size = sizeof(struct dccp_hdr) + 346 sizeof(struct dccp_hdr_ext) + 347 sizeof(struct dccp_hdr_response); 348 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, 349 GFP_ATOMIC); 350 if (skb == NULL) 351 return NULL; 352 353 /* Reserve space for headers. */ 354 skb_reserve(skb, sk->sk_prot->max_header); 355 356 skb_dst_set(skb, dst_clone(dst)); 357 358 dreq = dccp_rsk(req); 359 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ 360 dccp_inc_seqno(&dreq->dreq_iss); 361 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; 362 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; 363 364 /* Resolve feature dependencies resulting from choice of CCID */ 365 if (dccp_feat_server_ccid_dependencies(dreq)) 366 goto response_failed; 367 368 if (dccp_insert_options_rsk(dreq, skb)) 369 goto response_failed; 370 371 /* Build and checksum header */ 372 dh = dccp_zeroed_hdr(skb, dccp_header_size); 373 374 dh->dccph_sport = inet_rsk(req)->loc_port; 375 dh->dccph_dport = inet_rsk(req)->rmt_port; 376 dh->dccph_doff = (dccp_header_size + 377 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 378 dh->dccph_type = DCCP_PKT_RESPONSE; 379 dh->dccph_x = 1; 380 dccp_hdr_set_seq(dh, dreq->dreq_iss); 381 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); 382 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; 383 384 dccp_csum_outgoing(skb); 385 386 /* We use `acked' to remember that a Response was already sent. */ 387 inet_rsk(req)->acked = 1; 388 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 389 return skb; 390 response_failed: 391 kfree_skb(skb); 392 return NULL; 393 } 394 395 EXPORT_SYMBOL_GPL(dccp_make_response); 396 397 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */ 398 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) 399 { 400 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; 401 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); 402 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + 403 sizeof(struct dccp_hdr_ext) + 404 sizeof(struct dccp_hdr_reset); 405 struct dccp_hdr_reset *dhr; 406 struct sk_buff *skb; 407 408 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); 409 if (skb == NULL) 410 return NULL; 411 412 skb_reserve(skb, sk->sk_prot->max_header); 413 414 /* Swap the send and the receive. */ 415 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); 416 dh->dccph_type = DCCP_PKT_RESET; 417 dh->dccph_sport = rxdh->dccph_dport; 418 dh->dccph_dport = rxdh->dccph_sport; 419 dh->dccph_doff = dccp_hdr_reset_len / 4; 420 dh->dccph_x = 1; 421 422 dhr = dccp_hdr_reset(skb); 423 dhr->dccph_reset_code = dcb->dccpd_reset_code; 424 425 switch (dcb->dccpd_reset_code) { 426 case DCCP_RESET_CODE_PACKET_ERROR: 427 dhr->dccph_reset_data[0] = rxdh->dccph_type; 428 break; 429 case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ 430 case DCCP_RESET_CODE_MANDATORY_ERROR: 431 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); 432 break; 433 } 434 /* 435 * From RFC 4340, 8.3.1: 436 * If P.ackno exists, set R.seqno := P.ackno + 1. 437 * Else set R.seqno := 0. 438 */ 439 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 440 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1)); 441 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq); 442 443 dccp_csum_outgoing(skb); 444 return skb; 445 } 446 447 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset); 448 449 /* send Reset on established socket, to close or abort the connection */ 450 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) 451 { 452 struct sk_buff *skb; 453 /* 454 * FIXME: what if rebuild_header fails? 455 * Should we be doing a rebuild_header here? 456 */ 457 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); 458 459 if (err != 0) 460 return err; 461 462 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); 463 if (skb == NULL) 464 return -ENOBUFS; 465 466 /* Reserve space for headers and prepare control bits. */ 467 skb_reserve(skb, sk->sk_prot->max_header); 468 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; 469 DCCP_SKB_CB(skb)->dccpd_reset_code = code; 470 471 return dccp_transmit_skb(sk, skb); 472 } 473 474 /* 475 * Do all connect socket setups that can be done AF independent. 476 */ 477 static inline void dccp_connect_init(struct sock *sk) 478 { 479 struct dccp_sock *dp = dccp_sk(sk); 480 struct dst_entry *dst = __sk_dst_get(sk); 481 struct inet_connection_sock *icsk = inet_csk(sk); 482 483 sk->sk_err = 0; 484 sock_reset_flag(sk, SOCK_DONE); 485 486 dccp_sync_mss(sk, dst_mtu(dst)); 487 488 /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */ 489 dp->dccps_gar = dp->dccps_iss; 490 491 icsk->icsk_retransmits = 0; 492 } 493 494 int dccp_connect(struct sock *sk) 495 { 496 struct sk_buff *skb; 497 struct inet_connection_sock *icsk = inet_csk(sk); 498 499 /* do not connect if feature negotiation setup fails */ 500 if (dccp_feat_finalise_settings(dccp_sk(sk))) 501 return -EPROTO; 502 503 dccp_connect_init(sk); 504 505 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); 506 if (unlikely(skb == NULL)) 507 return -ENOBUFS; 508 509 /* Reserve space for headers. */ 510 skb_reserve(skb, sk->sk_prot->max_header); 511 512 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 513 514 dccp_skb_entail(sk, skb); 515 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); 516 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); 517 518 /* Timer for repeating the REQUEST until an answer. */ 519 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 520 icsk->icsk_rto, DCCP_RTO_MAX); 521 return 0; 522 } 523 524 EXPORT_SYMBOL_GPL(dccp_connect); 525 526 void dccp_send_ack(struct sock *sk) 527 { 528 /* If we have been reset, we may not send again. */ 529 if (sk->sk_state != DCCP_CLOSED) { 530 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, 531 GFP_ATOMIC); 532 533 if (skb == NULL) { 534 inet_csk_schedule_ack(sk); 535 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 536 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 537 TCP_DELACK_MAX, 538 DCCP_RTO_MAX); 539 return; 540 } 541 542 /* Reserve space for headers */ 543 skb_reserve(skb, sk->sk_prot->max_header); 544 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; 545 dccp_transmit_skb(sk, skb); 546 } 547 } 548 549 EXPORT_SYMBOL_GPL(dccp_send_ack); 550 551 #if 0 552 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ 553 void dccp_send_delayed_ack(struct sock *sk) 554 { 555 struct inet_connection_sock *icsk = inet_csk(sk); 556 /* 557 * FIXME: tune this timer. elapsed time fixes the skew, so no problem 558 * with using 2s, and active senders also piggyback the ACK into a 559 * DATAACK packet, so this is really for quiescent senders. 560 */ 561 unsigned long timeout = jiffies + 2 * HZ; 562 563 /* Use new timeout only if there wasn't a older one earlier. */ 564 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 565 /* If delack timer was blocked or is about to expire, 566 * send ACK now. 567 * 568 * FIXME: check the "about to expire" part 569 */ 570 if (icsk->icsk_ack.blocked) { 571 dccp_send_ack(sk); 572 return; 573 } 574 575 if (!time_before(timeout, icsk->icsk_ack.timeout)) 576 timeout = icsk->icsk_ack.timeout; 577 } 578 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 579 icsk->icsk_ack.timeout = timeout; 580 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 581 } 582 #endif 583 584 void dccp_send_sync(struct sock *sk, const u64 ackno, 585 const enum dccp_pkt_type pkt_type) 586 { 587 /* 588 * We are not putting this on the write queue, so 589 * dccp_transmit_skb() will set the ownership to this 590 * sock. 591 */ 592 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); 593 594 if (skb == NULL) { 595 /* FIXME: how to make sure the sync is sent? */ 596 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type)); 597 return; 598 } 599 600 /* Reserve space for headers and prepare control bits. */ 601 skb_reserve(skb, sk->sk_prot->max_header); 602 DCCP_SKB_CB(skb)->dccpd_type = pkt_type; 603 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; 604 605 dccp_transmit_skb(sk, skb); 606 } 607 608 EXPORT_SYMBOL_GPL(dccp_send_sync); 609 610 /* 611 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This 612 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under 613 * any circumstances. 614 */ 615 void dccp_send_close(struct sock *sk, const int active) 616 { 617 struct dccp_sock *dp = dccp_sk(sk); 618 struct sk_buff *skb; 619 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; 620 621 skb = alloc_skb(sk->sk_prot->max_header, prio); 622 if (skb == NULL) 623 return; 624 625 /* Reserve space for headers and prepare control bits. */ 626 skb_reserve(skb, sk->sk_prot->max_header); 627 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait) 628 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ; 629 else 630 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; 631 632 if (active) { 633 dccp_write_xmit(sk, 1); 634 dccp_skb_entail(sk, skb); 635 dccp_transmit_skb(sk, skb_clone(skb, prio)); 636 /* 637 * Retransmission timer for active-close: RFC 4340, 8.3 requires 638 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ 639 * state can be left. The initial timeout is 2 RTTs. 640 * Since RTT measurement is done by the CCIDs, there is no easy 641 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4 642 * is too low (200ms); we use a high value to avoid unnecessary 643 * retransmissions when the link RTT is > 0.2 seconds. 644 * FIXME: Let main module sample RTTs and use that instead. 645 */ 646 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 647 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); 648 } else 649 dccp_transmit_skb(sk, skb); 650 } 651