1 /* 2 * net/dccp/packet_history.c 3 * 4 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK 5 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand. 6 * 7 * An implementation of the DCCP protocol 8 * 9 * This code has been developed by the University of Waikato WAND 10 * research group. For further information please see http://www.wand.net.nz/ 11 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz 12 * 13 * This code also uses code from Lulea University, rereleased as GPL by its 14 * authors: 15 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon 16 * 17 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft 18 * and to make it work as a loadable module in the DCCP stack written by 19 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>. 20 * 21 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> 22 * 23 * This program is free software; you can redistribute it and/or modify 24 * it under the terms of the GNU General Public License as published by 25 * the Free Software Foundation; either version 2 of the License, or 26 * (at your option) any later version. 27 * 28 * This program is distributed in the hope that it will be useful, 29 * but WITHOUT ANY WARRANTY; without even the implied warranty of 30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 * GNU General Public License for more details. 32 * 33 * You should have received a copy of the GNU General Public License 34 * along with this program; if not, write to the Free Software 35 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 36 */ 37 38 #include <linux/string.h> 39 #include <linux/slab.h> 40 #include "packet_history.h" 41 #include "../../dccp.h" 42 43 /** 44 * tfrc_tx_hist_entry - Simple singly-linked TX history list 45 * @next: next oldest entry (LIFO order) 46 * @seqno: sequence number of this entry 47 * @stamp: send time of packet with sequence number @seqno 48 */ 49 struct tfrc_tx_hist_entry { 50 struct tfrc_tx_hist_entry *next; 51 u64 seqno; 52 ktime_t stamp; 53 }; 54 55 /* 56 * Transmitter History Routines 57 */ 58 static struct kmem_cache *tfrc_tx_hist_slab; 59 60 int __init tfrc_tx_packet_history_init(void) 61 { 62 tfrc_tx_hist_slab = kmem_cache_create("tfrc_tx_hist", 63 sizeof(struct tfrc_tx_hist_entry), 64 0, SLAB_HWCACHE_ALIGN, NULL); 65 return tfrc_tx_hist_slab == NULL ? -ENOBUFS : 0; 66 } 67 68 void tfrc_tx_packet_history_exit(void) 69 { 70 if (tfrc_tx_hist_slab != NULL) { 71 kmem_cache_destroy(tfrc_tx_hist_slab); 72 tfrc_tx_hist_slab = NULL; 73 } 74 } 75 76 static struct tfrc_tx_hist_entry * 77 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno) 78 { 79 while (head != NULL && head->seqno != seqno) 80 head = head->next; 81 82 return head; 83 } 84 85 int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) 86 { 87 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any()); 88 89 if (entry == NULL) 90 return -ENOBUFS; 91 entry->seqno = seqno; 92 entry->stamp = ktime_get_real(); 93 entry->next = *headp; 94 *headp = entry; 95 return 0; 96 } 97 98 void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp) 99 { 100 struct tfrc_tx_hist_entry *head = *headp; 101 102 while (head != NULL) { 103 struct tfrc_tx_hist_entry *next = head->next; 104 105 kmem_cache_free(tfrc_tx_hist_slab, head); 106 head = next; 107 } 108 109 *headp = NULL; 110 } 111 112 u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno, 113 const ktime_t now) 114 { 115 u32 rtt = 0; 116 struct tfrc_tx_hist_entry *packet = tfrc_tx_hist_find_entry(head, seqno); 117 118 if (packet != NULL) { 119 rtt = ktime_us_delta(now, packet->stamp); 120 /* 121 * Garbage-collect older (irrelevant) entries: 122 */ 123 tfrc_tx_hist_purge(&packet->next); 124 } 125 126 return rtt; 127 } 128 129 130 /* 131 * Receiver History Routines 132 */ 133 static struct kmem_cache *tfrc_rx_hist_slab; 134 135 int __init tfrc_rx_packet_history_init(void) 136 { 137 tfrc_rx_hist_slab = kmem_cache_create("tfrc_rxh_cache", 138 sizeof(struct tfrc_rx_hist_entry), 139 0, SLAB_HWCACHE_ALIGN, NULL); 140 return tfrc_rx_hist_slab == NULL ? -ENOBUFS : 0; 141 } 142 143 void tfrc_rx_packet_history_exit(void) 144 { 145 if (tfrc_rx_hist_slab != NULL) { 146 kmem_cache_destroy(tfrc_rx_hist_slab); 147 tfrc_rx_hist_slab = NULL; 148 } 149 } 150 151 static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry, 152 const struct sk_buff *skb, 153 const u64 ndp) 154 { 155 const struct dccp_hdr *dh = dccp_hdr(skb); 156 157 entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq; 158 entry->tfrchrx_ccval = dh->dccph_ccval; 159 entry->tfrchrx_type = dh->dccph_type; 160 entry->tfrchrx_ndp = ndp; 161 entry->tfrchrx_tstamp = ktime_get_real(); 162 } 163 164 void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, 165 const struct sk_buff *skb, 166 const u64 ndp) 167 { 168 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h); 169 170 tfrc_rx_hist_entry_from_skb(entry, skb, ndp); 171 } 172 173 /* has the packet contained in skb been seen before? */ 174 int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb) 175 { 176 const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq; 177 int i; 178 179 if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0) 180 return 1; 181 182 for (i = 1; i <= h->loss_count; i++) 183 if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq) 184 return 1; 185 186 return 0; 187 } 188 189 static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b) 190 { 191 const u8 idx_a = tfrc_rx_hist_index(h, a), 192 idx_b = tfrc_rx_hist_index(h, b); 193 struct tfrc_rx_hist_entry *tmp = h->ring[idx_a]; 194 195 h->ring[idx_a] = h->ring[idx_b]; 196 h->ring[idx_b] = tmp; 197 } 198 199 /* 200 * Private helper functions for loss detection. 201 * 202 * In the descriptions, `Si' refers to the sequence number of entry number i, 203 * whose NDP count is `Ni' (lower case is used for variables). 204 * Note: All __xxx_loss functions expect that a test against duplicates has been 205 * performed already: the seqno of the skb must not be less than the seqno 206 * of loss_prev; and it must not equal that of any valid history entry. 207 */ 208 static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1) 209 { 210 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 211 s1 = DCCP_SKB_CB(skb)->dccpd_seq; 212 213 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */ 214 h->loss_count = 1; 215 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1); 216 } 217 } 218 219 static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2) 220 { 221 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 222 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 223 s2 = DCCP_SKB_CB(skb)->dccpd_seq; 224 225 if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */ 226 h->loss_count = 2; 227 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2); 228 return; 229 } 230 231 /* S0 < S2 < S1 */ 232 233 if (dccp_loss_free(s0, s2, n2)) { 234 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; 235 236 if (dccp_loss_free(s2, s1, n1)) { 237 /* hole is filled: S0, S2, and S1 are consecutive */ 238 h->loss_count = 0; 239 h->loss_start = tfrc_rx_hist_index(h, 1); 240 } else 241 /* gap between S2 and S1: just update loss_prev */ 242 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2); 243 244 } else { /* gap between S0 and S2 */ 245 /* 246 * Reorder history to insert S2 between S0 and S1 247 */ 248 tfrc_rx_hist_swap(h, 0, 3); 249 h->loss_start = tfrc_rx_hist_index(h, 3); 250 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2); 251 h->loss_count = 2; 252 } 253 } 254 255 /* return 1 if a new loss event has been identified */ 256 static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3) 257 { 258 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, 259 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 260 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, 261 s3 = DCCP_SKB_CB(skb)->dccpd_seq; 262 263 if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */ 264 h->loss_count = 3; 265 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3); 266 return 1; 267 } 268 269 /* S3 < S2 */ 270 271 if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */ 272 /* 273 * Reorder history to insert S3 between S1 and S2 274 */ 275 tfrc_rx_hist_swap(h, 2, 3); 276 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3); 277 h->loss_count = 3; 278 return 1; 279 } 280 281 /* S0 < S3 < S1 */ 282 283 if (dccp_loss_free(s0, s3, n3)) { 284 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp; 285 286 if (dccp_loss_free(s3, s1, n1)) { 287 /* hole between S0 and S1 filled by S3 */ 288 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp; 289 290 if (dccp_loss_free(s1, s2, n2)) { 291 /* entire hole filled by S0, S3, S1, S2 */ 292 h->loss_start = tfrc_rx_hist_index(h, 2); 293 h->loss_count = 0; 294 } else { 295 /* gap remains between S1 and S2 */ 296 h->loss_start = tfrc_rx_hist_index(h, 1); 297 h->loss_count = 1; 298 } 299 300 } else /* gap exists between S3 and S1, loss_count stays at 2 */ 301 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3); 302 303 return 0; 304 } 305 306 /* 307 * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3 308 * Reorder history to insert S3 between S0 and S1. 309 */ 310 tfrc_rx_hist_swap(h, 0, 3); 311 h->loss_start = tfrc_rx_hist_index(h, 3); 312 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3); 313 h->loss_count = 3; 314 315 return 1; 316 } 317 318 /* recycle RX history records to continue loss detection if necessary */ 319 static void __three_after_loss(struct tfrc_rx_hist *h) 320 { 321 /* 322 * At this stage we know already that there is a gap between S0 and S1 323 * (since S0 was the highest sequence number received before detecting 324 * the loss). To recycle the loss record, it is thus only necessary to 325 * check for other possible gaps between S1/S2 and between S2/S3. 326 */ 327 u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno, 328 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno, 329 s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno; 330 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp, 331 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp; 332 333 if (dccp_loss_free(s1, s2, n2)) { 334 335 if (dccp_loss_free(s2, s3, n3)) { 336 /* no gap between S2 and S3: entire hole is filled */ 337 h->loss_start = tfrc_rx_hist_index(h, 3); 338 h->loss_count = 0; 339 } else { 340 /* gap between S2 and S3 */ 341 h->loss_start = tfrc_rx_hist_index(h, 2); 342 h->loss_count = 1; 343 } 344 345 } else { /* gap between S1 and S2 */ 346 h->loss_start = tfrc_rx_hist_index(h, 1); 347 h->loss_count = 2; 348 } 349 } 350 351 /** 352 * tfrc_rx_handle_loss - Loss detection and further processing 353 * @h: The non-empty RX history object 354 * @lh: Loss Intervals database to update 355 * @skb: Currently received packet 356 * @ndp: The NDP count belonging to @skb 357 * @calc_first_li: Caller-dependent computation of first loss interval in @lh 358 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add) 359 * Chooses action according to pending loss, updates LI database when a new 360 * loss was detected, and does required post-processing. Returns 1 when caller 361 * should send feedback, 0 otherwise. 362 * Since it also takes care of reordering during loss detection and updates the 363 * records accordingly, the caller should not perform any more RX history 364 * operations when loss_count is greater than 0 after calling this function. 365 */ 366 int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, 367 struct tfrc_loss_hist *lh, 368 struct sk_buff *skb, const u64 ndp, 369 u32 (*calc_first_li)(struct sock *), struct sock *sk) 370 { 371 int is_new_loss = 0; 372 373 if (h->loss_count == 0) { 374 __do_track_loss(h, skb, ndp); 375 } else if (h->loss_count == 1) { 376 __one_after_loss(h, skb, ndp); 377 } else if (h->loss_count != 2) { 378 DCCP_BUG("invalid loss_count %d", h->loss_count); 379 } else if (__two_after_loss(h, skb, ndp)) { 380 /* 381 * Update Loss Interval database and recycle RX records 382 */ 383 is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk); 384 __three_after_loss(h); 385 } 386 return is_new_loss; 387 } 388 389 int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h) 390 { 391 int i; 392 393 for (i = 0; i <= TFRC_NDUPACK; i++) { 394 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC); 395 if (h->ring[i] == NULL) 396 goto out_free; 397 } 398 399 h->loss_count = h->loss_start = 0; 400 return 0; 401 402 out_free: 403 while (i-- != 0) { 404 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); 405 h->ring[i] = NULL; 406 } 407 return -ENOBUFS; 408 } 409 410 void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) 411 { 412 int i; 413 414 for (i = 0; i <= TFRC_NDUPACK; ++i) 415 if (h->ring[i] != NULL) { 416 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]); 417 h->ring[i] = NULL; 418 } 419 } 420 421 /** 422 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against 423 */ 424 static inline struct tfrc_rx_hist_entry * 425 tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h) 426 { 427 return h->ring[0]; 428 } 429 430 /** 431 * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry 432 */ 433 static inline struct tfrc_rx_hist_entry * 434 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) 435 { 436 return h->ring[h->rtt_sample_prev]; 437 } 438 439 /** 440 * tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal 441 * Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able 442 * to compute a sample with given data - calling function should check this. 443 */ 444 u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb) 445 { 446 u32 sample = 0, 447 delta_v = SUB16(dccp_hdr(skb)->dccph_ccval, 448 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 449 450 if (delta_v < 1 || delta_v > 4) { /* unsuitable CCVal delta */ 451 if (h->rtt_sample_prev == 2) { /* previous candidate stored */ 452 sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, 453 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 454 if (sample) 455 sample = 4 / sample * 456 ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp, 457 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp); 458 else /* 459 * FIXME: This condition is in principle not 460 * possible but occurs when CCID is used for 461 * two-way data traffic. I have tried to trace 462 * it, but the cause does not seem to be here. 463 */ 464 DCCP_BUG("please report to dccp@vger.kernel.org" 465 " => prev = %u, last = %u", 466 tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval, 467 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval); 468 } else if (delta_v < 1) { 469 h->rtt_sample_prev = 1; 470 goto keep_ref_for_next_time; 471 } 472 473 } else if (delta_v == 4) /* optimal match */ 474 sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp)); 475 else { /* suboptimal match */ 476 h->rtt_sample_prev = 2; 477 goto keep_ref_for_next_time; 478 } 479 480 if (unlikely(sample > DCCP_SANE_RTT_MAX)) { 481 DCCP_WARN("RTT sample %u too large, using max\n", sample); 482 sample = DCCP_SANE_RTT_MAX; 483 } 484 485 h->rtt_sample_prev = 0; /* use current entry as next reference */ 486 keep_ref_for_next_time: 487 488 return sample; 489 } 490