1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* RxRPC packet transmission 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/net.h> 11 #include <linux/gfp.h> 12 #include <linux/skbuff.h> 13 #include <linux/export.h> 14 #include <net/sock.h> 15 #include <net/af_rxrpc.h> 16 #include <net/udp.h> 17 #include "ar-internal.h" 18 19 extern int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 20 21 static ssize_t do_udp_sendmsg(struct socket *socket, struct msghdr *msg, size_t len) 22 { 23 struct sockaddr *sa = msg->msg_name; 24 struct sock *sk = socket->sk; 25 26 if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6)) { 27 if (sa->sa_family == AF_INET6) { 28 if (sk->sk_family != AF_INET6) { 29 pr_warn("AF_INET6 address on AF_INET socket\n"); 30 return -ENOPROTOOPT; 31 } 32 return udpv6_sendmsg(sk, msg, len); 33 } 34 } 35 return udp_sendmsg(sk, msg, len); 36 } 37 38 struct rxrpc_abort_buffer { 39 struct rxrpc_wire_header whdr; 40 __be32 abort_code; 41 }; 42 43 static const char rxrpc_keepalive_string[] = ""; 44 45 /* 46 * Increase Tx backoff on transmission failure and clear it on success. 47 */ 48 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret) 49 { 50 if (ret < 0) { 51 u16 tx_backoff = READ_ONCE(call->tx_backoff); 52 53 if (tx_backoff < HZ) 54 WRITE_ONCE(call->tx_backoff, tx_backoff + 1); 55 } else { 56 WRITE_ONCE(call->tx_backoff, 0); 57 } 58 } 59 60 /* 61 * Arrange for a keepalive ping a certain time after we last transmitted. This 62 * lets the far side know we're still interested in this call and helps keep 63 * the route through any intervening firewall open. 64 * 65 * Receiving a response to the ping will prevent the ->expect_rx_by timer from 66 * expiring. 67 */ 68 static void rxrpc_set_keepalive(struct rxrpc_call *call) 69 { 70 unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; 71 72 keepalive_at += now; 73 WRITE_ONCE(call->keepalive_at, keepalive_at); 74 rxrpc_reduce_call_timer(call, keepalive_at, now, 75 rxrpc_timer_set_for_keepalive); 76 } 77 78 /* 79 * Fill out an ACK packet. 80 */ 81 static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, 82 struct rxrpc_call *call, 83 struct rxrpc_txbuf *txb) 84 { 85 struct rxrpc_ackinfo ackinfo; 86 unsigned int qsize; 87 rxrpc_seq_t window, wtop, wrap_point, ix, first; 88 int rsize; 89 u64 wtmp; 90 u32 mtu, jmax; 91 u8 *ackp = txb->acks; 92 u8 sack_buffer[sizeof(call->ackr_sack_table)] __aligned(8); 93 94 atomic_set(&call->ackr_nr_unacked, 0); 95 atomic_set(&call->ackr_nr_consumed, 0); 96 rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill); 97 98 /* Barrier against rxrpc_input_data(). */ 99 retry: 100 wtmp = atomic64_read_acquire(&call->ackr_window); 101 window = lower_32_bits(wtmp); 102 wtop = upper_32_bits(wtmp); 103 txb->ack.firstPacket = htonl(window); 104 txb->ack.nAcks = 0; 105 106 if (after(wtop, window)) { 107 /* Try to copy the SACK ring locklessly. We can use the copy, 108 * only if the now-current top of the window didn't go past the 109 * previously read base - otherwise we can't know whether we 110 * have old data or new data. 111 */ 112 memcpy(sack_buffer, call->ackr_sack_table, sizeof(sack_buffer)); 113 wrap_point = window + RXRPC_SACK_SIZE - 1; 114 wtmp = atomic64_read_acquire(&call->ackr_window); 115 window = lower_32_bits(wtmp); 116 wtop = upper_32_bits(wtmp); 117 if (after(wtop, wrap_point)) { 118 cond_resched(); 119 goto retry; 120 } 121 122 /* The buffer is maintained as a ring with an invariant mapping 123 * between bit position and sequence number, so we'll probably 124 * need to rotate it. 125 */ 126 txb->ack.nAcks = wtop - window; 127 ix = window % RXRPC_SACK_SIZE; 128 first = sizeof(sack_buffer) - ix; 129 130 if (ix + txb->ack.nAcks <= RXRPC_SACK_SIZE) { 131 memcpy(txb->acks, sack_buffer + ix, txb->ack.nAcks); 132 } else { 133 memcpy(txb->acks, sack_buffer + ix, first); 134 memcpy(txb->acks + first, sack_buffer, 135 txb->ack.nAcks - first); 136 } 137 138 ackp += txb->ack.nAcks; 139 } else if (before(wtop, window)) { 140 pr_warn("ack window backward %x %x", window, wtop); 141 } else if (txb->ack.reason == RXRPC_ACK_DELAY) { 142 txb->ack.reason = RXRPC_ACK_IDLE; 143 } 144 145 mtu = conn->params.peer->if_mtu; 146 mtu -= conn->params.peer->hdrsize; 147 jmax = rxrpc_rx_jumbo_max; 148 qsize = (window - 1) - call->rx_consumed; 149 rsize = max_t(int, call->rx_winsize - qsize, 0); 150 ackinfo.rxMTU = htonl(rxrpc_rx_mtu); 151 ackinfo.maxMTU = htonl(mtu); 152 ackinfo.rwind = htonl(rsize); 153 ackinfo.jumbo_max = htonl(jmax); 154 155 *ackp++ = 0; 156 *ackp++ = 0; 157 *ackp++ = 0; 158 memcpy(ackp, &ackinfo, sizeof(ackinfo)); 159 return txb->ack.nAcks + 3 + sizeof(ackinfo); 160 } 161 162 /* 163 * Record the beginning of an RTT probe. 164 */ 165 static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial, 166 enum rxrpc_rtt_tx_trace why) 167 { 168 unsigned long avail = call->rtt_avail; 169 int rtt_slot = 9; 170 171 if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK)) 172 goto no_slot; 173 174 rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK); 175 if (!test_and_clear_bit(rtt_slot, &call->rtt_avail)) 176 goto no_slot; 177 178 call->rtt_serial[rtt_slot] = serial; 179 call->rtt_sent_at[rtt_slot] = ktime_get_real(); 180 smp_wmb(); /* Write data before avail bit */ 181 set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 182 183 trace_rxrpc_rtt_tx(call, why, rtt_slot, serial); 184 return rtt_slot; 185 186 no_slot: 187 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial); 188 return -1; 189 } 190 191 /* 192 * Cancel an RTT probe. 193 */ 194 static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call, 195 rxrpc_serial_t serial, int rtt_slot) 196 { 197 if (rtt_slot != -1) { 198 clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 199 smp_wmb(); /* Clear pending bit before setting slot */ 200 set_bit(rtt_slot, &call->rtt_avail); 201 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial); 202 } 203 } 204 205 /* 206 * Send an ACK call packet. 207 */ 208 static int rxrpc_send_ack_packet(struct rxrpc_local *local, struct rxrpc_txbuf *txb) 209 { 210 struct rxrpc_connection *conn; 211 struct rxrpc_call *call = txb->call; 212 struct msghdr msg; 213 struct kvec iov[1]; 214 rxrpc_serial_t serial; 215 size_t len, n; 216 int ret, rtt_slot = -1; 217 218 if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 219 return -ECONNRESET; 220 221 conn = call->conn; 222 223 msg.msg_name = &call->peer->srx.transport; 224 msg.msg_namelen = call->peer->srx.transport_len; 225 msg.msg_control = NULL; 226 msg.msg_controllen = 0; 227 msg.msg_flags = 0; 228 229 if (txb->ack.reason == RXRPC_ACK_PING) 230 txb->wire.flags |= RXRPC_REQUEST_ACK; 231 232 if (txb->ack.reason == RXRPC_ACK_DELAY) 233 clear_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags); 234 if (txb->ack.reason == RXRPC_ACK_IDLE) 235 clear_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags); 236 237 n = rxrpc_fill_out_ack(conn, call, txb); 238 if (n == 0) 239 return 0; 240 241 iov[0].iov_base = &txb->wire; 242 iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n; 243 len = iov[0].iov_len; 244 245 serial = atomic_inc_return(&conn->serial); 246 txb->wire.serial = htonl(serial); 247 trace_rxrpc_tx_ack(call->debug_id, serial, 248 ntohl(txb->ack.firstPacket), 249 ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks); 250 if (txb->ack_why == rxrpc_propose_ack_ping_for_lost_ack) 251 call->acks_lost_ping = serial; 252 253 if (txb->ack.reason == RXRPC_ACK_PING) 254 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping); 255 256 rxrpc_inc_stat(call->rxnet, stat_tx_ack_send); 257 258 /* Grab the highest received seq as late as possible */ 259 txb->ack.previousPacket = htonl(call->rx_highest_seq); 260 261 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); 262 ret = do_udp_sendmsg(conn->params.local->socket, &msg, len); 263 call->peer->last_tx_at = ktime_get_seconds(); 264 if (ret < 0) 265 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 266 rxrpc_tx_point_call_ack); 267 else 268 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 269 rxrpc_tx_point_call_ack); 270 rxrpc_tx_backoff(call, ret); 271 272 if (call->state < RXRPC_CALL_COMPLETE) { 273 if (ret < 0) 274 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 275 rxrpc_set_keepalive(call); 276 } 277 278 return ret; 279 } 280 281 /* 282 * ACK transmitter for a local endpoint. The UDP socket locks around each 283 * transmission, so we can only transmit one packet at a time, ACK, DATA or 284 * otherwise. 285 */ 286 void rxrpc_transmit_ack_packets(struct rxrpc_local *local) 287 { 288 LIST_HEAD(queue); 289 int ret; 290 291 trace_rxrpc_local(local->debug_id, rxrpc_local_tx_ack, 292 refcount_read(&local->ref), NULL); 293 294 if (list_empty(&local->ack_tx_queue)) 295 return; 296 297 spin_lock_bh(&local->ack_tx_lock); 298 list_splice_tail_init(&local->ack_tx_queue, &queue); 299 spin_unlock_bh(&local->ack_tx_lock); 300 301 while (!list_empty(&queue)) { 302 struct rxrpc_txbuf *txb = 303 list_entry(queue.next, struct rxrpc_txbuf, tx_link); 304 305 ret = rxrpc_send_ack_packet(local, txb); 306 if (ret < 0 && ret != -ECONNRESET) { 307 spin_lock_bh(&local->ack_tx_lock); 308 list_splice_init(&queue, &local->ack_tx_queue); 309 spin_unlock_bh(&local->ack_tx_lock); 310 break; 311 } 312 313 list_del_init(&txb->tx_link); 314 rxrpc_put_call(txb->call, rxrpc_call_put); 315 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx); 316 } 317 } 318 319 /* 320 * Send an ABORT call packet. 321 */ 322 int rxrpc_send_abort_packet(struct rxrpc_call *call) 323 { 324 struct rxrpc_connection *conn; 325 struct rxrpc_abort_buffer pkt; 326 struct msghdr msg; 327 struct kvec iov[1]; 328 rxrpc_serial_t serial; 329 int ret; 330 331 /* Don't bother sending aborts for a client call once the server has 332 * hard-ACK'd all of its request data. After that point, we're not 333 * going to stop the operation proceeding, and whilst we might limit 334 * the reply, it's not worth it if we can send a new call on the same 335 * channel instead, thereby closing off this call. 336 */ 337 if (rxrpc_is_client_call(call) && 338 test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags)) 339 return 0; 340 341 if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 342 return -ECONNRESET; 343 344 conn = call->conn; 345 346 msg.msg_name = &call->peer->srx.transport; 347 msg.msg_namelen = call->peer->srx.transport_len; 348 msg.msg_control = NULL; 349 msg.msg_controllen = 0; 350 msg.msg_flags = 0; 351 352 pkt.whdr.epoch = htonl(conn->proto.epoch); 353 pkt.whdr.cid = htonl(call->cid); 354 pkt.whdr.callNumber = htonl(call->call_id); 355 pkt.whdr.seq = 0; 356 pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT; 357 pkt.whdr.flags = conn->out_clientflag; 358 pkt.whdr.userStatus = 0; 359 pkt.whdr.securityIndex = call->security_ix; 360 pkt.whdr._rsvd = 0; 361 pkt.whdr.serviceId = htons(call->service_id); 362 pkt.abort_code = htonl(call->abort_code); 363 364 iov[0].iov_base = &pkt; 365 iov[0].iov_len = sizeof(pkt); 366 367 serial = atomic_inc_return(&conn->serial); 368 pkt.whdr.serial = htonl(serial); 369 370 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt)); 371 ret = do_udp_sendmsg(conn->params.local->socket, &msg, sizeof(pkt)); 372 conn->params.peer->last_tx_at = ktime_get_seconds(); 373 if (ret < 0) 374 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 375 rxrpc_tx_point_call_abort); 376 else 377 trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr, 378 rxrpc_tx_point_call_abort); 379 rxrpc_tx_backoff(call, ret); 380 return ret; 381 } 382 383 /* 384 * send a packet through the transport endpoint 385 */ 386 int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) 387 { 388 enum rxrpc_req_ack_trace why; 389 struct rxrpc_connection *conn = call->conn; 390 struct msghdr msg; 391 struct kvec iov[1]; 392 rxrpc_serial_t serial; 393 size_t len; 394 int ret, rtt_slot = -1; 395 396 _enter("%x,{%d}", txb->seq, txb->len); 397 398 if (hlist_unhashed(&call->error_link)) { 399 spin_lock_bh(&call->peer->lock); 400 hlist_add_head_rcu(&call->error_link, &call->peer->error_targets); 401 spin_unlock_bh(&call->peer->lock); 402 } 403 404 /* Each transmission of a Tx packet needs a new serial number */ 405 serial = atomic_inc_return(&conn->serial); 406 txb->wire.serial = htonl(serial); 407 408 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) && 409 txb->seq == 1) 410 txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE; 411 412 iov[0].iov_base = &txb->wire; 413 iov[0].iov_len = sizeof(txb->wire) + txb->len; 414 len = iov[0].iov_len; 415 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len); 416 417 msg.msg_name = &call->peer->srx.transport; 418 msg.msg_namelen = call->peer->srx.transport_len; 419 msg.msg_control = NULL; 420 msg.msg_controllen = 0; 421 msg.msg_flags = 0; 422 423 /* If our RTT cache needs working on, request an ACK. Also request 424 * ACKs if a DATA packet appears to have been lost. 425 * 426 * However, we mustn't request an ACK on the last reply packet of a 427 * service call, lest OpenAFS incorrectly send us an ACK with some 428 * soft-ACKs in it and then never follow up with a proper hard ACK. 429 */ 430 if (txb->wire.flags & RXRPC_REQUEST_ACK) 431 why = rxrpc_reqack_already_on; 432 else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb)) 433 why = rxrpc_reqack_no_srv_last; 434 else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) 435 why = rxrpc_reqack_ack_lost; 436 else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags)) 437 why = rxrpc_reqack_retrans; 438 else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2) 439 why = rxrpc_reqack_slow_start; 440 else if (call->tx_winsize <= 2) 441 why = rxrpc_reqack_small_txwin; 442 else if (call->peer->rtt_count < 3 && txb->seq & 1) 443 why = rxrpc_reqack_more_rtt; 444 else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real())) 445 why = rxrpc_reqack_old_rtt; 446 else 447 goto dont_set_request_ack; 448 449 rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]); 450 trace_rxrpc_req_ack(call->debug_id, txb->seq, why); 451 if (why != rxrpc_reqack_no_srv_last) 452 txb->wire.flags |= RXRPC_REQUEST_ACK; 453 dont_set_request_ack: 454 455 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { 456 static int lose; 457 if ((lose++ & 7) == 7) { 458 ret = 0; 459 trace_rxrpc_tx_data(call, txb->seq, serial, 460 txb->wire.flags, 461 test_bit(RXRPC_TXBUF_RESENT, &txb->flags), 462 true); 463 goto done; 464 } 465 } 466 467 trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags, 468 test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false); 469 cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq); 470 471 /* send the packet with the don't fragment bit set if we currently 472 * think it's small enough */ 473 if (txb->len >= call->peer->maxdata) 474 goto send_fragmentable; 475 476 down_read(&conn->params.local->defrag_sem); 477 478 txb->last_sent = ktime_get_real(); 479 if (txb->wire.flags & RXRPC_REQUEST_ACK) 480 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); 481 482 /* send the packet by UDP 483 * - returns -EMSGSIZE if UDP would have to fragment the packet 484 * to go out of the interface 485 * - in which case, we'll have processed the ICMP error 486 * message and update the peer record 487 */ 488 rxrpc_inc_stat(call->rxnet, stat_tx_data_send); 489 ret = do_udp_sendmsg(conn->params.local->socket, &msg, len); 490 conn->params.peer->last_tx_at = ktime_get_seconds(); 491 492 up_read(&conn->params.local->defrag_sem); 493 if (ret < 0) { 494 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 495 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 496 rxrpc_tx_point_call_data_nofrag); 497 } else { 498 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 499 rxrpc_tx_point_call_data_nofrag); 500 } 501 502 rxrpc_tx_backoff(call, ret); 503 if (ret == -EMSGSIZE) 504 goto send_fragmentable; 505 506 done: 507 if (ret >= 0) { 508 call->tx_last_sent = txb->last_sent; 509 if (txb->wire.flags & RXRPC_REQUEST_ACK) { 510 call->peer->rtt_last_req = txb->last_sent; 511 if (call->peer->rtt_count > 1) { 512 unsigned long nowj = jiffies, ack_lost_at; 513 514 ack_lost_at = rxrpc_get_rto_backoff(call->peer, false); 515 ack_lost_at += nowj; 516 WRITE_ONCE(call->ack_lost_at, ack_lost_at); 517 rxrpc_reduce_call_timer(call, ack_lost_at, nowj, 518 rxrpc_timer_set_for_lost_ack); 519 } 520 } 521 522 if (txb->seq == 1 && 523 !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, 524 &call->flags)) { 525 unsigned long nowj = jiffies, expect_rx_by; 526 527 expect_rx_by = nowj + call->next_rx_timo; 528 WRITE_ONCE(call->expect_rx_by, expect_rx_by); 529 rxrpc_reduce_call_timer(call, expect_rx_by, nowj, 530 rxrpc_timer_set_for_normal); 531 } 532 533 rxrpc_set_keepalive(call); 534 } else { 535 /* Cancel the call if the initial transmission fails, 536 * particularly if that's due to network routing issues that 537 * aren't going away anytime soon. The layer above can arrange 538 * the retransmission. 539 */ 540 if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) 541 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 542 RX_USER_ABORT, ret); 543 } 544 545 _leave(" = %d [%u]", ret, call->peer->maxdata); 546 return ret; 547 548 send_fragmentable: 549 /* attempt to send this message with fragmentation enabled */ 550 _debug("send fragment"); 551 552 down_write(&conn->params.local->defrag_sem); 553 554 txb->last_sent = ktime_get_real(); 555 if (txb->wire.flags & RXRPC_REQUEST_ACK) 556 rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data); 557 558 switch (conn->params.local->srx.transport.family) { 559 case AF_INET6: 560 case AF_INET: 561 ip_sock_set_mtu_discover(conn->params.local->socket->sk, 562 IP_PMTUDISC_DONT); 563 rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag); 564 ret = do_udp_sendmsg(conn->params.local->socket, &msg, len); 565 conn->params.peer->last_tx_at = ktime_get_seconds(); 566 567 ip_sock_set_mtu_discover(conn->params.local->socket->sk, 568 IP_PMTUDISC_DO); 569 break; 570 571 default: 572 BUG(); 573 } 574 575 if (ret < 0) { 576 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 577 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 578 rxrpc_tx_point_call_data_frag); 579 } else { 580 trace_rxrpc_tx_packet(call->debug_id, &txb->wire, 581 rxrpc_tx_point_call_data_frag); 582 } 583 rxrpc_tx_backoff(call, ret); 584 585 up_write(&conn->params.local->defrag_sem); 586 goto done; 587 } 588 589 /* 590 * reject packets through the local endpoint 591 */ 592 void rxrpc_reject_packets(struct rxrpc_local *local) 593 { 594 struct sockaddr_rxrpc srx; 595 struct rxrpc_skb_priv *sp; 596 struct rxrpc_wire_header whdr; 597 struct sk_buff *skb; 598 struct msghdr msg; 599 struct kvec iov[2]; 600 size_t size; 601 __be32 code; 602 int ret, ioc; 603 604 _enter("%d", local->debug_id); 605 606 iov[0].iov_base = &whdr; 607 iov[0].iov_len = sizeof(whdr); 608 iov[1].iov_base = &code; 609 iov[1].iov_len = sizeof(code); 610 611 msg.msg_name = &srx.transport; 612 msg.msg_control = NULL; 613 msg.msg_controllen = 0; 614 msg.msg_flags = 0; 615 616 memset(&whdr, 0, sizeof(whdr)); 617 618 while ((skb = skb_dequeue(&local->reject_queue))) { 619 rxrpc_see_skb(skb, rxrpc_skb_seen); 620 sp = rxrpc_skb(skb); 621 622 switch (skb->mark) { 623 case RXRPC_SKB_MARK_REJECT_BUSY: 624 whdr.type = RXRPC_PACKET_TYPE_BUSY; 625 size = sizeof(whdr); 626 ioc = 1; 627 break; 628 case RXRPC_SKB_MARK_REJECT_ABORT: 629 whdr.type = RXRPC_PACKET_TYPE_ABORT; 630 code = htonl(skb->priority); 631 size = sizeof(whdr) + sizeof(code); 632 ioc = 2; 633 break; 634 default: 635 rxrpc_free_skb(skb, rxrpc_skb_freed); 636 continue; 637 } 638 639 if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) { 640 msg.msg_namelen = srx.transport_len; 641 642 whdr.epoch = htonl(sp->hdr.epoch); 643 whdr.cid = htonl(sp->hdr.cid); 644 whdr.callNumber = htonl(sp->hdr.callNumber); 645 whdr.serviceId = htons(sp->hdr.serviceId); 646 whdr.flags = sp->hdr.flags; 647 whdr.flags ^= RXRPC_CLIENT_INITIATED; 648 whdr.flags &= RXRPC_CLIENT_INITIATED; 649 650 iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size); 651 ret = do_udp_sendmsg(local->socket, &msg, size); 652 if (ret < 0) 653 trace_rxrpc_tx_fail(local->debug_id, 0, ret, 654 rxrpc_tx_point_reject); 655 else 656 trace_rxrpc_tx_packet(local->debug_id, &whdr, 657 rxrpc_tx_point_reject); 658 } 659 660 rxrpc_free_skb(skb, rxrpc_skb_freed); 661 } 662 663 _leave(""); 664 } 665 666 /* 667 * Send a VERSION reply to a peer as a keepalive. 668 */ 669 void rxrpc_send_keepalive(struct rxrpc_peer *peer) 670 { 671 struct rxrpc_wire_header whdr; 672 struct msghdr msg; 673 struct kvec iov[2]; 674 size_t len; 675 int ret; 676 677 _enter(""); 678 679 msg.msg_name = &peer->srx.transport; 680 msg.msg_namelen = peer->srx.transport_len; 681 msg.msg_control = NULL; 682 msg.msg_controllen = 0; 683 msg.msg_flags = 0; 684 685 whdr.epoch = htonl(peer->local->rxnet->epoch); 686 whdr.cid = 0; 687 whdr.callNumber = 0; 688 whdr.seq = 0; 689 whdr.serial = 0; 690 whdr.type = RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */ 691 whdr.flags = RXRPC_LAST_PACKET; 692 whdr.userStatus = 0; 693 whdr.securityIndex = 0; 694 whdr._rsvd = 0; 695 whdr.serviceId = 0; 696 697 iov[0].iov_base = &whdr; 698 iov[0].iov_len = sizeof(whdr); 699 iov[1].iov_base = (char *)rxrpc_keepalive_string; 700 iov[1].iov_len = sizeof(rxrpc_keepalive_string); 701 702 len = iov[0].iov_len + iov[1].iov_len; 703 704 _proto("Tx VERSION (keepalive)"); 705 706 iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len); 707 ret = do_udp_sendmsg(peer->local->socket, &msg, len); 708 if (ret < 0) 709 trace_rxrpc_tx_fail(peer->debug_id, 0, ret, 710 rxrpc_tx_point_version_keepalive); 711 else 712 trace_rxrpc_tx_packet(peer->debug_id, &whdr, 713 rxrpc_tx_point_version_keepalive); 714 715 peer->last_tx_at = ktime_get_seconds(); 716 _leave(""); 717 } 718