1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/circ_buf.h> 12 #include <linux/net.h> 13 #include <linux/skbuff.h> 14 #include <linux/slab.h> 15 #include <linux/udp.h> 16 #include <net/sock.h> 17 #include <net/af_rxrpc.h> 18 #include "ar-internal.h" 19 20 /* 21 * Propose a PING ACK be sent. 22 */ 23 static void rxrpc_propose_ping(struct rxrpc_call *call, 24 bool immediate, bool background) 25 { 26 if (immediate) { 27 if (background && 28 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 29 rxrpc_queue_call(call); 30 } else { 31 unsigned long now = jiffies; 32 unsigned long ping_at = now + rxrpc_idle_ack_delay; 33 34 if (time_before(ping_at, call->ping_at)) { 35 WRITE_ONCE(call->ping_at, ping_at); 36 rxrpc_reduce_call_timer(call, ping_at, now, 37 rxrpc_timer_set_for_ping); 38 } 39 } 40 } 41 42 /* 43 * propose an ACK be sent 44 */ 45 static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 46 u32 serial, bool immediate, bool background, 47 enum rxrpc_propose_ack_trace why) 48 { 49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 50 unsigned long expiry = rxrpc_soft_ack_delay; 51 s8 prior = rxrpc_ack_priority[ack_reason]; 52 53 /* Pings are handled specially because we don't want to accidentally 54 * lose a ping response by subsuming it into a ping. 55 */ 56 if (ack_reason == RXRPC_ACK_PING) { 57 rxrpc_propose_ping(call, immediate, background); 58 goto trace; 59 } 60 61 /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial 62 * numbers, but we don't alter the timeout. 63 */ 64 _debug("prior %u %u vs %u %u", 65 ack_reason, prior, 66 call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]); 67 if (ack_reason == call->ackr_reason) { 68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { 69 outcome = rxrpc_propose_ack_update; 70 call->ackr_serial = serial; 71 } 72 if (!immediate) 73 goto trace; 74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { 75 call->ackr_reason = ack_reason; 76 call->ackr_serial = serial; 77 } else { 78 outcome = rxrpc_propose_ack_subsume; 79 } 80 81 switch (ack_reason) { 82 case RXRPC_ACK_REQUESTED: 83 if (rxrpc_requested_ack_delay < expiry) 84 expiry = rxrpc_requested_ack_delay; 85 if (serial == 1) 86 immediate = false; 87 break; 88 89 case RXRPC_ACK_DELAY: 90 if (rxrpc_soft_ack_delay < expiry) 91 expiry = rxrpc_soft_ack_delay; 92 break; 93 94 case RXRPC_ACK_IDLE: 95 if (rxrpc_idle_ack_delay < expiry) 96 expiry = rxrpc_idle_ack_delay; 97 break; 98 99 default: 100 immediate = true; 101 break; 102 } 103 104 if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) { 105 _debug("already scheduled"); 106 } else if (immediate || expiry == 0) { 107 _debug("immediate ACK %lx", call->events); 108 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) && 109 background) 110 rxrpc_queue_call(call); 111 } else { 112 unsigned long now = jiffies, ack_at; 113 114 if (call->peer->rtt_usage > 0) 115 ack_at = nsecs_to_jiffies(call->peer->rtt); 116 else 117 ack_at = expiry; 118 119 ack_at += READ_ONCE(call->tx_backoff); 120 ack_at += now; 121 if (time_before(ack_at, call->ack_at)) { 122 WRITE_ONCE(call->ack_at, ack_at); 123 rxrpc_reduce_call_timer(call, ack_at, now, 124 rxrpc_timer_set_for_ack); 125 } 126 } 127 128 trace: 129 trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate, 130 background, outcome); 131 } 132 133 /* 134 * propose an ACK be sent, locking the call structure 135 */ 136 void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 137 u32 serial, bool immediate, bool background, 138 enum rxrpc_propose_ack_trace why) 139 { 140 spin_lock_bh(&call->lock); 141 __rxrpc_propose_ACK(call, ack_reason, serial, 142 immediate, background, why); 143 spin_unlock_bh(&call->lock); 144 } 145 146 /* 147 * Handle congestion being detected by the retransmit timeout. 148 */ 149 static void rxrpc_congestion_timeout(struct rxrpc_call *call) 150 { 151 set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags); 152 } 153 154 /* 155 * Perform retransmission of NAK'd and unack'd packets. 156 */ 157 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 158 { 159 struct sk_buff *skb; 160 unsigned long resend_at; 161 rxrpc_seq_t cursor, seq, top; 162 ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; 163 int ix; 164 u8 annotation, anno_type, retrans = 0, unacked = 0; 165 166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 167 168 if (call->peer->rtt_usage > 1) 169 timeout = ns_to_ktime(call->peer->rtt * 3 / 2); 170 else 171 timeout = ms_to_ktime(rxrpc_resend_timeout); 172 min_timeo = ns_to_ktime((1000000000 / HZ) * 4); 173 if (ktime_before(timeout, min_timeo)) 174 timeout = min_timeo; 175 176 now = ktime_get_real(); 177 max_age = ktime_sub(now, timeout); 178 179 spin_lock_bh(&call->lock); 180 181 cursor = call->tx_hard_ack; 182 top = call->tx_top; 183 ASSERT(before_eq(cursor, top)); 184 if (cursor == top) 185 goto out_unlock; 186 187 /* Scan the packet list without dropping the lock and decide which of 188 * the packets in the Tx buffer we're going to resend and what the new 189 * resend timeout will be. 190 */ 191 trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK); 192 oldest = now; 193 for (seq = cursor + 1; before_eq(seq, top); seq++) { 194 ix = seq & RXRPC_RXTX_BUFF_MASK; 195 annotation = call->rxtx_annotations[ix]; 196 anno_type = annotation & RXRPC_TX_ANNO_MASK; 197 annotation &= ~RXRPC_TX_ANNO_MASK; 198 if (anno_type == RXRPC_TX_ANNO_ACK) 199 continue; 200 201 skb = call->rxtx_buffer[ix]; 202 rxrpc_see_skb(skb, rxrpc_skb_seen); 203 204 if (anno_type == RXRPC_TX_ANNO_UNACK) { 205 if (ktime_after(skb->tstamp, max_age)) { 206 if (ktime_before(skb->tstamp, oldest)) 207 oldest = skb->tstamp; 208 continue; 209 } 210 if (!(annotation & RXRPC_TX_ANNO_RESENT)) 211 unacked++; 212 } 213 214 /* Okay, we need to retransmit a packet. */ 215 call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation; 216 retrans++; 217 trace_rxrpc_retransmit(call, seq, annotation | anno_type, 218 ktime_to_ns(ktime_sub(skb->tstamp, max_age))); 219 } 220 221 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); 222 resend_at += jiffies + rxrpc_resend_timeout; 223 WRITE_ONCE(call->resend_at, resend_at); 224 225 if (unacked) 226 rxrpc_congestion_timeout(call); 227 228 /* If there was nothing that needed retransmission then it's likely 229 * that an ACK got lost somewhere. Send a ping to find out instead of 230 * retransmitting data. 231 */ 232 if (!retrans) { 233 rxrpc_reduce_call_timer(call, resend_at, now_j, 234 rxrpc_timer_set_for_resend); 235 spin_unlock_bh(&call->lock); 236 ack_ts = ktime_sub(now, call->acks_latest_ts); 237 if (ktime_to_ns(ack_ts) < call->peer->rtt) 238 goto out; 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 240 rxrpc_propose_ack_ping_for_lost_ack); 241 rxrpc_send_ack_packet(call, true, NULL); 242 goto out; 243 } 244 245 /* Now go through the Tx window and perform the retransmissions. We 246 * have to drop the lock for each send. If an ACK comes in whilst the 247 * lock is dropped, it may clear some of the retransmission markers for 248 * packets that it soft-ACKs. 249 */ 250 for (seq = cursor + 1; before_eq(seq, top); seq++) { 251 ix = seq & RXRPC_RXTX_BUFF_MASK; 252 annotation = call->rxtx_annotations[ix]; 253 anno_type = annotation & RXRPC_TX_ANNO_MASK; 254 if (anno_type != RXRPC_TX_ANNO_RETRANS) 255 continue; 256 257 skb = call->rxtx_buffer[ix]; 258 rxrpc_get_skb(skb, rxrpc_skb_got); 259 spin_unlock_bh(&call->lock); 260 261 if (rxrpc_send_data_packet(call, skb, true) < 0) { 262 rxrpc_free_skb(skb, rxrpc_skb_freed); 263 return; 264 } 265 266 if (rxrpc_is_client_call(call)) 267 rxrpc_expose_client_call(call); 268 269 rxrpc_free_skb(skb, rxrpc_skb_freed); 270 spin_lock_bh(&call->lock); 271 272 /* We need to clear the retransmit state, but there are two 273 * things we need to be aware of: A new ACK/NAK might have been 274 * received and the packet might have been hard-ACK'd (in which 275 * case it will no longer be in the buffer). 276 */ 277 if (after(seq, call->tx_hard_ack)) { 278 annotation = call->rxtx_annotations[ix]; 279 anno_type = annotation & RXRPC_TX_ANNO_MASK; 280 if (anno_type == RXRPC_TX_ANNO_RETRANS || 281 anno_type == RXRPC_TX_ANNO_NAK) { 282 annotation &= ~RXRPC_TX_ANNO_MASK; 283 annotation |= RXRPC_TX_ANNO_UNACK; 284 } 285 annotation |= RXRPC_TX_ANNO_RESENT; 286 call->rxtx_annotations[ix] = annotation; 287 } 288 289 if (after(call->tx_hard_ack, seq)) 290 seq = call->tx_hard_ack; 291 } 292 293 out_unlock: 294 spin_unlock_bh(&call->lock); 295 out: 296 _leave(""); 297 } 298 299 /* 300 * Handle retransmission and deferred ACK/abort generation. 301 */ 302 void rxrpc_process_call(struct work_struct *work) 303 { 304 struct rxrpc_call *call = 305 container_of(work, struct rxrpc_call, processor); 306 rxrpc_serial_t *send_ack; 307 unsigned long now, next, t; 308 unsigned int iterations = 0; 309 310 rxrpc_see_call(call); 311 312 //printk("\n--------------------\n"); 313 _enter("{%d,%s,%lx}", 314 call->debug_id, rxrpc_call_states[call->state], call->events); 315 316 recheck_state: 317 /* Limit the number of times we do this before returning to the manager */ 318 iterations++; 319 if (iterations > 5) 320 goto requeue; 321 322 if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) { 323 rxrpc_send_abort_packet(call); 324 goto recheck_state; 325 } 326 327 if (call->state == RXRPC_CALL_COMPLETE) { 328 del_timer_sync(&call->timer); 329 rxrpc_notify_socket(call); 330 goto out_put; 331 } 332 333 /* Work out if any timeouts tripped */ 334 now = jiffies; 335 t = READ_ONCE(call->expect_rx_by); 336 if (time_after_eq(now, t)) { 337 trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now); 338 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 339 } 340 341 t = READ_ONCE(call->expect_req_by); 342 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && 343 time_after_eq(now, t)) { 344 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); 345 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 346 } 347 348 t = READ_ONCE(call->expect_term_by); 349 if (time_after_eq(now, t)) { 350 trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now); 351 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); 352 } 353 354 t = READ_ONCE(call->ack_at); 355 if (time_after_eq(now, t)) { 356 trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now); 357 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); 358 set_bit(RXRPC_CALL_EV_ACK, &call->events); 359 } 360 361 t = READ_ONCE(call->ack_lost_at); 362 if (time_after_eq(now, t)) { 363 trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now); 364 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); 365 set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events); 366 } 367 368 t = READ_ONCE(call->keepalive_at); 369 if (time_after_eq(now, t)) { 370 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 371 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 372 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true, 373 rxrpc_propose_ack_ping_for_keepalive); 374 set_bit(RXRPC_CALL_EV_PING, &call->events); 375 } 376 377 t = READ_ONCE(call->ping_at); 378 if (time_after_eq(now, t)) { 379 trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now); 380 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); 381 set_bit(RXRPC_CALL_EV_PING, &call->events); 382 } 383 384 t = READ_ONCE(call->resend_at); 385 if (time_after_eq(now, t)) { 386 trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now); 387 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); 388 set_bit(RXRPC_CALL_EV_RESEND, &call->events); 389 } 390 391 /* Process events */ 392 if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) { 393 if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) && 394 (int)call->conn->hi_serial - (int)call->rx_serial > 0) { 395 trace_rxrpc_call_reset(call); 396 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET); 397 } else { 398 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); 399 } 400 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 401 goto recheck_state; 402 } 403 404 send_ack = NULL; 405 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 406 call->acks_lost_top = call->tx_top; 407 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 408 rxrpc_propose_ack_ping_for_lost_ack); 409 send_ack = &call->acks_lost_ping; 410 } 411 412 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) || 413 send_ack) { 414 if (call->ackr_reason) { 415 rxrpc_send_ack_packet(call, false, send_ack); 416 goto recheck_state; 417 } 418 } 419 420 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { 421 rxrpc_send_ack_packet(call, true, NULL); 422 goto recheck_state; 423 } 424 425 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) { 426 rxrpc_resend(call, now); 427 goto recheck_state; 428 } 429 430 /* Make sure the timer is restarted */ 431 next = call->expect_rx_by; 432 433 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } 434 435 set(call->expect_req_by); 436 set(call->expect_term_by); 437 set(call->ack_at); 438 set(call->ack_lost_at); 439 set(call->resend_at); 440 set(call->keepalive_at); 441 set(call->ping_at); 442 443 now = jiffies; 444 if (time_after_eq(now, next)) 445 goto recheck_state; 446 447 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); 448 449 /* other events may have been raised since we started checking */ 450 if (call->events && call->state < RXRPC_CALL_COMPLETE) 451 goto requeue; 452 453 out_put: 454 rxrpc_put_call(call, rxrpc_call_put); 455 out: 456 _leave(""); 457 return; 458 459 requeue: 460 __rxrpc_queue_call(call); 461 goto out; 462 } 463