1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* RxRPC packet transmission 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <net/sock.h> 15 #include <net/af_rxrpc.h> 16 #include <net/udp.h> 17 #include "ar-internal.h" 18 19 extern int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 20 21 static ssize_t do_udp_sendmsg(struct socket *socket, struct msghdr *msg, size_t len) 22 { 23 struct sockaddr *sa = msg->msg_name; 24 struct sock *sk = socket->sk; 25 26 if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6)) { 27 if (sa->sa_family == AF_INET6) { 28 if (sk->sk_family != AF_INET6) { 29 pr_warn("AF_INET6 address on AF_INET socket\n"); 30 return -ENOPROTOOPT; 31 } 32 return udpv6_sendmsg(sk, msg, len); 33 } 34 } 35 return udp_sendmsg(sk, msg, len); 36 } 37 38 struct rxrpc_abort_buffer { 39 struct rxrpc_wire_header whdr; 40 __be32 abort_code; 41 }; 42 43 static const char rxrpc_keepalive_string[] = ""; 44 45 /* 46 * Increase Tx backoff on transmission failure and clear it on success. 47 */ 48 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret) 49 { 50 if (ret < 0) { 51 u16 tx_backoff = READ_ONCE(call->tx_backoff); 52 53 if (tx_backoff < HZ) 54 WRITE_ONCE(call->tx_backoff, tx_backoff + 1); 55 } else { 56 WRITE_ONCE(call->tx_backoff, 0); 57 } 58 } 59 60 /* 61 * Arrange for a keepalive ping a certain time after we last transmitted. This 62 * lets the far side know we're still interested in this call and helps keep 63 * the route through any intervening firewall open. 64 * 65 * Receiving a response to the ping will prevent the ->expect_rx_by timer from 66 * expiring. 67 */ 68 static void rxrpc_set_keepalive(struct rxrpc_call *call) 69 { 70 unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; 71 72 keepalive_at += now; 73 WRITE_ONCE(call->keepalive_at, keepalive_at); 74 rxrpc_reduce_call_timer(call, keepalive_at, now, 75 rxrpc_timer_set_for_keepalive); 76 } 77 78 /* 79 * Fill out an ACK packet. 80 */ 81 static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, 82 struct rxrpc_call *call, 83 struct rxrpc_txbuf *txb) 84 { 85 struct rxrpc_ackinfo ackinfo; 86 unsigned int qsize; 87 rxrpc_seq_t window, wtop, wrap_point, ix, first; 88 int rsize; 89 u64 wtmp; 90 u32 mtu, jmax; 91 u8 *ackp = txb->acks; 92 u8 sack_buffer[sizeof(call->ackr_sack_table)] __aligned(8); 93 94 atomic_set(&call->ackr_nr_unacked, 0); 95 atomic_set(&call->ackr_nr_consumed, 0); 96 rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill); 97 98 /* Barrier against rxrpc_input_data(). */ 99 retry: 100 wtmp = atomic64_read_acquire(&call->ackr_window); 101 window = lower_32_bits(wtmp); 102 wtop = upper_32_bits(wtmp); 103 txb->ack.firstPacket = htonl(window); 104 txb->ack.nAcks = 0; 105 106 if (after(wtop, window)) { 107 /* Try to copy the SACK ring locklessly. We can use the copy, 108 * only if the now-current top of the window didn't go past the 109 * previously read base - otherwise we can't know whether we 110 * have old data or new data. 111 */ 112 memcpy(sack_buffer, call->ackr_sack_table, sizeof(sack_buffer)); 113 wrap_point = window + RXRPC_SACK_SIZE - 1; 114 wtmp = atomic64_read_acquire(&call->ackr_window); 115 window = lower_32_bits(wtmp); 116 wtop = upper_32_bits(wtmp); 117 if (after(wtop, wrap_point)) { 118 cond_resched(); 119 goto retry; 120 } 121 122 /* The buffer is maintained as a ring with an invariant mapping 123 * between bit position and sequence number, so we'll probably 124 * need to rotate it. 125 */ 126 txb->ack.nAcks = wtop - window; 127 ix = window % RXRPC_SACK_SIZE; 128 first = sizeof(sack_buffer) - ix; 129 130 if (ix + txb->ack.nAcks <= RXRPC_SACK_SIZE) { 131 memcpy(txb->acks, sack_buffer + ix, txb->ack.nAcks); 132 } else { 133 memcpy(txb->acks, sack_buffer + ix, first); 134 memcpy(txb->acks + first, sack_buffer, 135 txb->ack.nAcks - first); 136 } 137 138 ackp += txb->ack.nAcks; 139 } else if (before(wtop, window)) { 140 pr_warn("ack window backward %x %x", window, wtop); 141 } else if (txb->ack.reason == RXRPC_ACK_DELAY) { 142 txb->ack.reason = RXRPC_ACK_IDLE; 143 } 144 145 mtu = conn->peer->if_mtu; 146 mtu -= conn->peer->hdrsize; 147 jmax = rxrpc_rx_jumbo_max; 148 qsize = (window - 1) - call->rx_consumed; 149 rsize = max_t(int, call->rx_winsize - qsize, 0); 150 ackinfo.rxMTU = htonl(rxrpc_rx_mtu); 151 ackinfo.maxMTU = htonl(mtu); 152 ackinfo.rwind = htonl(rsize); 153 ackinfo.jumbo_max = htonl(jmax); 154 155 *ackp++ = 0; 156 *ackp++ = 0; 157 *ackp++ = 0; 158 memcpy(ackp, &ackinfo, sizeof(ackinfo)); 159 return txb->ack.nAcks + 3 + sizeof(ackinfo); 160 } 161 162 /* 163 * Record the beginning of an RTT probe. 164 */ 165 static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial, 166 enum rxrpc_rtt_tx_trace why) 167 { 168 unsigned long avail = call->rtt_avail; 169 int rtt_slot = 9; 170 171 if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK)) 172 goto no_slot; 173 174 rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK); 175 if (!test_and_clear_bit(rtt_slot, &call->rtt_avail)) 176 goto no_slot; 177 178 call->rtt_serial[rtt_slot] = serial; 179 call->rtt_sent_at[rtt_slot] = ktime_get_real(); 180 smp_wmb(); /* Write data before avail bit */ 181 set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 182 183 trace_rxrpc_rtt_tx(call, why, rtt_slot, serial); 184 return rtt_slot; 185 186 no_slot: 187 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial); 188 return -1; 189 } 190 191 /* 192 * Cancel an RTT probe. 193 */ 194 static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call, 195 rxrpc_serial_t serial, int rtt_slot) 196 { 197 if (rtt_slot != -1) { 198 clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 199 smp_wmb(); /* Clear pending bit before setting slot */ 200 set_bit(rtt_slot, &call->rtt_avail); 201 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial); 202 } 203 } 204 205 /* 206 * Transmit an ACK packet. 207 */ 208 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) 209 { 210 struct rxrpc_connection *conn; 211 struct msghdr msg; 212 struct kvec iov[1]; 213 rxrpc_serial_t serial; 214 size_t len, n; 215 int ret, rtt_slot = -1; 216 217 if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 218 return -ECONNRESET; 219 220 conn = call->conn; 221 222 msg.msg_name = &call->peer->srx.transport; 223 msg.msg_namelen = call->peer->srx.transport_len; 224 msg.msg_control = NULL; 225 msg.msg_controllen = 0; 226 msg.msg_flags = 0; 227 228 if (txb->ack.reason == RXRPC_ACK_PING) 229 txb->wire.flags |= RXRPC_REQUEST_ACK; 230 231 n = rxrpc_fill_out_ack(conn, call, txb); 232 if (n == 0) 233 return 0; 234 235 iov[0].iov_base = &txb->wire; 236 iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n; 237 len = iov[0].iov_len; 238 239 serial = atomic_inc_return(&conn->serial); 240 txb->wire.serial = htonl(serial); 241 trace_rxrpc_tx_ack(call->debug_id, serial, 242 ntohl(txb->ack.firstPacket), 243 ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks); 244 245 if (txb->ack.reason == RXRPC_ACK_PING) 246 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping); 247 248 rxrpc_inc_stat(call->rxnet, stat_tx_ack_send); 249 250 /* Grab the highest received seq as late as possible */ 251 txb->ack.previousPacket = htonl(call->rx_highest_seq); 252 253 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); 254 ret = do_udp_sendmsg(conn->local->socket, &msg, len); 255 call->peer->last_tx_at = ktime_get_seconds(); 256 if (ret < 0) 257 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 258 rxrpc_tx_point_call_ack); 259 else 260 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 261 rxrpc_tx_point_call_ack); 262 rxrpc_tx_backoff(call, ret); 263 264 if (call->state < RXRPC_CALL_COMPLETE) { 265 if (ret < 0) 266 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 267 rxrpc_set_keepalive(call); 268 } 269 270 return ret; 271 } 272 273 /* 274 * Send an ABORT call packet. 275 */ 276 int rxrpc_send_abort_packet(struct rxrpc_call *call) 277 { 278 struct rxrpc_connection *conn; 279 struct rxrpc_abort_buffer pkt; 280 struct msghdr msg; 281 struct kvec iov[1]; 282 rxrpc_serial_t serial; 283 int ret; 284 285 /* Don't bother sending aborts for a client call once the server has 286 * hard-ACK'd all of its request data. After that point, we're not 287 * going to stop the operation proceeding, and whilst we might limit 288 * the reply, it's not worth it if we can send a new call on the same 289 * channel instead, thereby closing off this call. 290 */ 291 if (rxrpc_is_client_call(call) && 292 test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags)) 293 return 0; 294 295 if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 296 return -ECONNRESET; 297 298 conn = call->conn; 299 300 msg.msg_name = &call->peer->srx.transport; 301 msg.msg_namelen = call->peer->srx.transport_len; 302 msg.msg_control = NULL; 303 msg.msg_controllen = 0; 304 msg.msg_flags = 0; 305 306 pkt.whdr.epoch = htonl(conn->proto.epoch); 307 pkt.whdr.cid = htonl(call->cid); 308 pkt.whdr.callNumber = htonl(call->call_id); 309 pkt.whdr.seq = 0; 310 pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT; 311 pkt.whdr.flags = conn->out_clientflag; 312 pkt.whdr.userStatus = 0; 313 pkt.whdr.securityIndex = call->security_ix; 314 pkt.whdr._rsvd = 0; 315 pkt.whdr.serviceId = htons(call->dest_srx.srx_service); 316 pkt.abort_code = htonl(call->abort_code); 317 318 iov[0].iov_base = &pkt; 319 iov[0].iov_len = sizeof(pkt); 320 321 serial = atomic_inc_return(&conn->serial); 322 pkt.whdr.serial = htonl(serial); 323 324 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt)); 325 ret = do_udp_sendmsg(conn->local->socket, &msg, sizeof(pkt)); 326 conn->peer->last_tx_at = ktime_get_seconds(); 327 if (ret < 0) 328 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 329 rxrpc_tx_point_call_abort); 330 else 331 trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr, 332 rxrpc_tx_point_call_abort); 333 rxrpc_tx_backoff(call, ret); 334 return ret; 335 } 336 337 /* 338 * send a packet through the transport endpoint 339 */ 340 int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) 341 { 342 enum rxrpc_req_ack_trace why; 343 struct rxrpc_connection *conn = call->conn; 344 struct msghdr msg; 345 struct kvec iov[1]; 346 rxrpc_serial_t serial; 347 size_t len; 348 int ret, rtt_slot = -1; 349 350 _enter("%x,{%d}", txb->seq, txb->len); 351 352 /* Each transmission of a Tx packet needs a new serial number */ 353 serial = atomic_inc_return(&conn->serial); 354 txb->wire.serial = htonl(serial); 355 356 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) && 357 txb->seq == 1) 358 txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE; 359 360 iov[0].iov_base = &txb->wire; 361 iov[0].iov_len = sizeof(txb->wire) + txb->len; 362 len = iov[0].iov_len; 363 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); 364 365 msg.msg_name = &call->peer->srx.transport; 366 msg.msg_namelen = call->peer->srx.transport_len; 367 msg.msg_control = NULL; 368 msg.msg_controllen = 0; 369 msg.msg_flags = 0; 370 371 /* If our RTT cache needs working on, request an ACK. Also request 372 * ACKs if a DATA packet appears to have been lost. 373 * 374 * However, we mustn't request an ACK on the last reply packet of a 375 * service call, lest OpenAFS incorrectly send us an ACK with some 376 * soft-ACKs in it and then never follow up with a proper hard ACK. 377 */ 378 if (txb->wire.flags & RXRPC_REQUEST_ACK) 379 why = rxrpc_reqack_already_on; 380 else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb)) 381 why = rxrpc_reqack_no_srv_last; 382 else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) 383 why = rxrpc_reqack_ack_lost; 384 else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags)) 385 why = rxrpc_reqack_retrans; 386 else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2) 387 why = rxrpc_reqack_slow_start; 388 else if (call->tx_winsize <= 2) 389 why = rxrpc_reqack_small_txwin; 390 else if (call->peer->rtt_count < 3 && txb->seq & 1) 391 why = rxrpc_reqack_more_rtt; 392 else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real())) 393 why = rxrpc_reqack_old_rtt; 394 else 395 goto dont_set_request_ack; 396 397 rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]); 398 trace_rxrpc_req_ack(call->debug_id, txb->seq, why); 399 if (why != rxrpc_reqack_no_srv_last) 400 txb->wire.flags |= RXRPC_REQUEST_ACK; 401 dont_set_request_ack: 402 403 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { 404 static int lose; 405 if ((lose++ & 7) == 7) { 406 ret = 0; 407 trace_rxrpc_tx_data(call, txb->seq, serial, 408 txb->wire.flags, 409 test_bit(RXRPC_TXBUF_RESENT, &txb->flags), 410 true); 411 goto done; 412 } 413 } 414 415 trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags, 416 test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false); 417 418 /* Track what we've attempted to transmit at least once so that the 419 * retransmission algorithm doesn't try to resend what we haven't sent 420 * yet. However, this can race as we can receive an ACK before we get 421 * to this point. But, OTOH, if we won't get an ACK mentioning this 422 * packet unless the far side received it (though it could have 423 * discarded it anyway and NAK'd it). 424 */ 425 cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq); 426 427 /* send the packet with the don't fragment bit set if we currently 428 * think it's small enough */ 429 if (txb->len >= call->peer->maxdata) 430 goto send_fragmentable; 431 432 down_read(&conn->local->defrag_sem); 433 434 txb->last_sent = ktime_get_real(); 435 if (txb->wire.flags & RXRPC_REQUEST_ACK) 436 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); 437 438 /* send the packet by UDP 439 * - returns -EMSGSIZE if UDP would have to fragment the packet 440 * to go out of the interface 441 * - in which case, we'll have processed the ICMP error 442 * message and update the peer record 443 */ 444 rxrpc_inc_stat(call->rxnet, stat_tx_data_send); 445 ret = do_udp_sendmsg(conn->local->socket, &msg, len); 446 conn->peer->last_tx_at = ktime_get_seconds(); 447 448 up_read(&conn->local->defrag_sem); 449 if (ret < 0) { 450 rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail); 451 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 452 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 453 rxrpc_tx_point_call_data_nofrag); 454 } else { 455 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 456 rxrpc_tx_point_call_data_nofrag); 457 } 458 459 rxrpc_tx_backoff(call, ret); 460 if (ret == -EMSGSIZE) 461 goto send_fragmentable; 462 463 done: 464 if (ret >= 0) { 465 call->tx_last_sent = txb->last_sent; 466 if (txb->wire.flags & RXRPC_REQUEST_ACK) { 467 call->peer->rtt_last_req = txb->last_sent; 468 if (call->peer->rtt_count > 1) { 469 unsigned long nowj = jiffies, ack_lost_at; 470 471 ack_lost_at = rxrpc_get_rto_backoff(call->peer, false); 472 ack_lost_at += nowj; 473 WRITE_ONCE(call->ack_lost_at, ack_lost_at); 474 rxrpc_reduce_call_timer(call, ack_lost_at, nowj, 475 rxrpc_timer_set_for_lost_ack); 476 } 477 } 478 479 if (txb->seq == 1 && 480 !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, 481 &call->flags)) { 482 unsigned long nowj = jiffies, expect_rx_by; 483 484 expect_rx_by = nowj + call->next_rx_timo; 485 WRITE_ONCE(call->expect_rx_by, expect_rx_by); 486 rxrpc_reduce_call_timer(call, expect_rx_by, nowj, 487 rxrpc_timer_set_for_normal); 488 } 489 490 rxrpc_set_keepalive(call); 491 } else { 492 /* Cancel the call if the initial transmission fails, 493 * particularly if that's due to network routing issues that 494 * aren't going away anytime soon. The layer above can arrange 495 * the retransmission. 496 */ 497 if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) 498 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 499 RX_USER_ABORT, ret); 500 } 501 502 _leave(" = %d [%u]", ret, call->peer->maxdata); 503 return ret; 504 505 send_fragmentable: 506 /* attempt to send this message with fragmentation enabled */ 507 _debug("send fragment"); 508 509 down_write(&conn->local->defrag_sem); 510 511 txb->last_sent = ktime_get_real(); 512 if (txb->wire.flags & RXRPC_REQUEST_ACK) 513 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); 514 515 switch (conn->local->srx.transport.family) { 516 case AF_INET6: 517 case AF_INET: 518 ip_sock_set_mtu_discover(conn->local->socket->sk, 519 IP_PMTUDISC_DONT); 520 rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag); 521 ret = do_udp_sendmsg(conn->local->socket, &msg, len); 522 conn->peer->last_tx_at = ktime_get_seconds(); 523 524 ip_sock_set_mtu_discover(conn->local->socket->sk, 525 IP_PMTUDISC_DO); 526 break; 527 528 default: 529 BUG(); 530 } 531 532 if (ret < 0) { 533 rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail); 534 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 535 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 536 rxrpc_tx_point_call_data_frag); 537 } else { 538 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 539 rxrpc_tx_point_call_data_frag); 540 } 541 rxrpc_tx_backoff(call, ret); 542 543 up_write(&conn->local->defrag_sem); 544 goto done; 545 } 546 547 /* 548 * Reject a packet through the local endpoint. 549 */ 550 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) 551 { 552 struct rxrpc_wire_header whdr; 553 struct sockaddr_rxrpc srx; 554 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 555 struct msghdr msg; 556 struct kvec iov[2]; 557 size_t size; 558 __be32 code; 559 int ret, ioc; 560 561 rxrpc_see_skb(skb, rxrpc_skb_see_reject); 562 563 iov[0].iov_base = &whdr; 564 iov[0].iov_len = sizeof(whdr); 565 iov[1].iov_base = &code; 566 iov[1].iov_len = sizeof(code); 567 568 msg.msg_name = &srx.transport; 569 msg.msg_control = NULL; 570 msg.msg_controllen = 0; 571 msg.msg_flags = 0; 572 573 memset(&whdr, 0, sizeof(whdr)); 574 575 switch (skb->mark) { 576 case RXRPC_SKB_MARK_REJECT_BUSY: 577 whdr.type = RXRPC_PACKET_TYPE_BUSY; 578 size = sizeof(whdr); 579 ioc = 1; 580 break; 581 case RXRPC_SKB_MARK_REJECT_ABORT: 582 whdr.type = RXRPC_PACKET_TYPE_ABORT; 583 code = htonl(skb->priority); 584 size = sizeof(whdr) + sizeof(code); 585 ioc = 2; 586 break; 587 default: 588 return; 589 } 590 591 if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) { 592 msg.msg_namelen = srx.transport_len; 593 594 whdr.epoch = htonl(sp->hdr.epoch); 595 whdr.cid = htonl(sp->hdr.cid); 596 whdr.callNumber = htonl(sp->hdr.callNumber); 597 whdr.serviceId = htons(sp->hdr.serviceId); 598 whdr.flags = sp->hdr.flags; 599 whdr.flags ^= RXRPC_CLIENT_INITIATED; 600 whdr.flags &= RXRPC_CLIENT_INITIATED; 601 602 iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size); 603 ret = do_udp_sendmsg(local->socket, &msg, size); 604 if (ret < 0) 605 trace_rxrpc_tx_fail(local->debug_id, 0, ret, 606 rxrpc_tx_point_reject); 607 else 608 trace_rxrpc_tx_packet(local->debug_id, &whdr, 609 rxrpc_tx_point_reject); 610 } 611 } 612 613 /* 614 * Send a VERSION reply to a peer as a keepalive. 615 */ 616 void rxrpc_send_keepalive(struct rxrpc_peer *peer) 617 { 618 struct rxrpc_wire_header whdr; 619 struct msghdr msg; 620 struct kvec iov[2]; 621 size_t len; 622 int ret; 623 624 _enter(""); 625 626 msg.msg_name = &peer->srx.transport; 627 msg.msg_namelen = peer->srx.transport_len; 628 msg.msg_control = NULL; 629 msg.msg_controllen = 0; 630 msg.msg_flags = 0; 631 632 whdr.epoch = htonl(peer->local->rxnet->epoch); 633 whdr.cid = 0; 634 whdr.callNumber = 0; 635 whdr.seq = 0; 636 whdr.serial = 0; 637 whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */ 638 whdr.flags = RXRPC_LAST_PACKET; 639 whdr.userStatus = 0; 640 whdr.securityIndex = 0; 641 whdr._rsvd = 0; 642 whdr.serviceId = 0; 643 644 iov[0].iov_base = &whdr; 645 iov[0].iov_len = sizeof(whdr); 646 iov[1].iov_base = (char *)rxrpc_keepalive_string; 647 iov[1].iov_len = sizeof(rxrpc_keepalive_string); 648 649 len = iov[0].iov_len + iov[1].iov_len; 650 651 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len); 652 ret = do_udp_sendmsg(peer->local->socket, &msg, len); 653 if (ret < 0) 654 trace_rxrpc_tx_fail(peer->debug_id, 0, ret, 655 rxrpc_tx_point_version_keepalive); 656 else 657 trace_rxrpc_tx_packet(peer->debug_id, &whdr, 658 rxrpc_tx_point_version_keepalive); 659 660 peer->last_tx_at = ktime_get_seconds(); 661 _leave(""); 662 } 663 664 /* 665 * Schedule an instant Tx resend. 666 */ 667 static inline void rxrpc_instant_resend(struct rxrpc_call *call, 668 struct rxrpc_txbuf *txb) 669 { 670 if (call->state < RXRPC_CALL_COMPLETE) 671 kdebug("resend"); 672 } 673 674 /* 675 * Transmit one packet. 676 */ 677 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb) 678 { 679 int ret; 680 681 ret = rxrpc_send_data_packet(call, txb); 682 if (ret < 0) { 683 switch (ret) { 684 case -ENETUNREACH: 685 case -EHOSTUNREACH: 686 case -ECONNREFUSED: 687 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 688 0, ret); 689 break; 690 default: 691 _debug("need instant resend %d", ret); 692 rxrpc_instant_resend(call, txb); 693 } 694 } else { 695 unsigned long now = jiffies; 696 unsigned long resend_at = now + call->peer->rto_j; 697 698 WRITE_ONCE(call->resend_at, resend_at); 699 rxrpc_reduce_call_timer(call, resend_at, now, 700 rxrpc_timer_set_for_send); 701 } 702 } 703