1 /* 2 * 3 * YeAH TCP 4 * 5 * For further details look at: 6 * http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf 7 * 8 */ 9 #include <linux/mm.h> 10 #include <linux/module.h> 11 #include <linux/skbuff.h> 12 #include <linux/inet_diag.h> 13 14 #include <net/tcp.h> 15 16 #include "tcp_vegas.h" 17 18 #define TCP_YEAH_ALPHA 80 //lin number of packets queued at the bottleneck 19 #define TCP_YEAH_GAMMA 1 //lin fraction of queue to be removed per rtt 20 #define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss 21 #define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion 22 #define TCP_YEAH_PHY 8 //lin maximum delta from base 23 #define TCP_YEAH_RHO 16 //lin minumum number of consecutive rtt to consider competition on loss 24 #define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count 25 26 #define TCP_SCALABLE_AI_CNT 100U 27 28 /* YeAH variables */ 29 struct yeah { 30 struct vegas vegas; /* must be first */ 31 32 /* YeAH */ 33 u32 lastQ; 34 u32 doing_reno_now; 35 36 u32 reno_count; 37 u32 fast_count; 38 39 u32 pkts_acked; 40 }; 41 42 static void tcp_yeah_init(struct sock *sk) 43 { 44 struct tcp_sock *tp = tcp_sk(sk); 45 struct yeah *yeah = inet_csk_ca(sk); 46 47 tcp_vegas_init(sk); 48 49 yeah->doing_reno_now = 0; 50 yeah->lastQ = 0; 51 52 yeah->reno_count = 2; 53 54 /* Ensure the MD arithmetic works. This is somewhat pedantic, 55 * since I don't think we will see a cwnd this large. :) */ 56 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 57 58 } 59 60 61 static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) 62 { 63 const struct inet_connection_sock *icsk = inet_csk(sk); 64 struct yeah *yeah = inet_csk_ca(sk); 65 66 if (icsk->icsk_ca_state == TCP_CA_Open) 67 yeah->pkts_acked = pkts_acked; 68 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 70 } 71 72 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 73 { 74 struct tcp_sock *tp = tcp_sk(sk); 75 struct yeah *yeah = inet_csk_ca(sk); 76 77 if (!tcp_is_cwnd_limited(sk, in_flight)) 78 return; 79 80 if (tp->snd_cwnd <= tp->snd_ssthresh) 81 tcp_slow_start(tp); 82 83 else if (!yeah->doing_reno_now) { 84 /* Scalable */ 85 86 tp->snd_cwnd_cnt+=yeah->pkts_acked; 87 if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ 88 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 89 tp->snd_cwnd++; 90 tp->snd_cwnd_cnt = 0; 91 } 92 93 yeah->pkts_acked = 1; 94 95 } else { 96 /* Reno */ 97 98 if (tp->snd_cwnd_cnt < tp->snd_cwnd) 99 tp->snd_cwnd_cnt++; 100 101 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 102 tp->snd_cwnd++; 103 tp->snd_cwnd_cnt = 0; 104 } 105 } 106 107 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 108 * 109 * These are so named because they represent the approximate values 110 * of snd_una and snd_nxt at the beginning of the current RTT. More 111 * precisely, they represent the amount of data sent during the RTT. 112 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, 113 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding 114 * bytes of data have been ACKed during the course of the RTT, giving 115 * an "actual" rate of: 116 * 117 * (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration) 118 * 119 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una, 120 * because delayed ACKs can cover more than one segment, so they 121 * don't line up yeahly with the boundaries of RTTs. 122 * 123 * Another unfortunate fact of life is that delayed ACKs delay the 124 * advance of the left edge of our send window, so that the number 125 * of bytes we send in an RTT is often less than our cwnd will allow. 126 * So we keep track of our cwnd separately, in v_beg_snd_cwnd. 127 */ 128 129 if (after(ack, yeah->vegas.beg_snd_nxt)) { 130 131 /* We do the Vegas calculations only if we got enough RTT 132 * samples that we can be reasonably sure that we got 133 * at least one RTT sample that wasn't from a delayed ACK. 134 * If we only had 2 samples total, 135 * then that means we're getting only 1 ACK per RTT, which 136 * means they're almost certainly delayed ACKs. 137 * If we have 3 samples, we should be OK. 138 */ 139 140 if (yeah->vegas.cntRTT > 2) { 141 u32 rtt, queue; 142 u64 bw; 143 144 /* We have enough RTT samples, so, using the Vegas 145 * algorithm, we determine if we should increase or 146 * decrease cwnd, and by how much. 147 */ 148 149 /* Pluck out the RTT we are using for the Vegas 150 * calculations. This is the min RTT seen during the 151 * last RTT. Taking the min filters out the effects 152 * of delayed ACKs, at the cost of noticing congestion 153 * a bit later. 154 */ 155 rtt = yeah->vegas.minRTT; 156 157 /* Compute excess number of packets above bandwidth 158 * Avoid doing full 64 bit divide. 159 */ 160 bw = tp->snd_cwnd; 161 bw *= rtt - yeah->vegas.baseRTT; 162 do_div(bw, rtt); 163 queue = bw; 164 165 if (queue > TCP_YEAH_ALPHA || 166 rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) { 167 if (queue > TCP_YEAH_ALPHA 168 && tp->snd_cwnd > yeah->reno_count) { 169 u32 reduction = min(queue / TCP_YEAH_GAMMA , 170 tp->snd_cwnd >> TCP_YEAH_EPSILON); 171 172 tp->snd_cwnd -= reduction; 173 174 tp->snd_cwnd = max(tp->snd_cwnd, 175 yeah->reno_count); 176 177 tp->snd_ssthresh = tp->snd_cwnd; 178 } 179 180 if (yeah->reno_count <= 2) 181 yeah->reno_count = max(tp->snd_cwnd>>1, 2U); 182 else 183 yeah->reno_count++; 184 185 yeah->doing_reno_now = min(yeah->doing_reno_now + 1, 186 0xffffffU); 187 } else { 188 yeah->fast_count++; 189 190 if (yeah->fast_count > TCP_YEAH_ZETA) { 191 yeah->reno_count = 2; 192 yeah->fast_count = 0; 193 } 194 195 yeah->doing_reno_now = 0; 196 } 197 198 yeah->lastQ = queue; 199 200 } 201 202 /* Save the extent of the current window so we can use this 203 * at the end of the next RTT. 204 */ 205 yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt; 206 yeah->vegas.beg_snd_nxt = tp->snd_nxt; 207 yeah->vegas.beg_snd_cwnd = tp->snd_cwnd; 208 209 /* Wipe the slate clean for the next RTT. */ 210 yeah->vegas.cntRTT = 0; 211 yeah->vegas.minRTT = 0x7fffffff; 212 } 213 } 214 215 static u32 tcp_yeah_ssthresh(struct sock *sk) { 216 const struct tcp_sock *tp = tcp_sk(sk); 217 struct yeah *yeah = inet_csk_ca(sk); 218 u32 reduction; 219 220 if (yeah->doing_reno_now < TCP_YEAH_RHO) { 221 reduction = yeah->lastQ; 222 223 reduction = min( reduction, max(tp->snd_cwnd>>1, 2U) ); 224 225 reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); 226 } else 227 reduction = max(tp->snd_cwnd>>1,2U); 228 229 yeah->fast_count = 0; 230 yeah->reno_count = max(yeah->reno_count>>1, 2U); 231 232 return tp->snd_cwnd - reduction; 233 } 234 235 static struct tcp_congestion_ops tcp_yeah = { 236 .flags = TCP_CONG_RTT_STAMP, 237 .init = tcp_yeah_init, 238 .ssthresh = tcp_yeah_ssthresh, 239 .cong_avoid = tcp_yeah_cong_avoid, 240 .min_cwnd = tcp_reno_min_cwnd, 241 .set_state = tcp_vegas_state, 242 .cwnd_event = tcp_vegas_cwnd_event, 243 .get_info = tcp_vegas_get_info, 244 .pkts_acked = tcp_yeah_pkts_acked, 245 246 .owner = THIS_MODULE, 247 .name = "yeah", 248 }; 249 250 static int __init tcp_yeah_register(void) 251 { 252 BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE); 253 tcp_register_congestion_control(&tcp_yeah); 254 return 0; 255 } 256 257 static void __exit tcp_yeah_unregister(void) 258 { 259 tcp_unregister_congestion_control(&tcp_yeah); 260 } 261 262 module_init(tcp_yeah_register); 263 module_exit(tcp_yeah_unregister); 264 265 MODULE_AUTHOR("Angelo P. Castellani"); 266 MODULE_LICENSE("GPL"); 267 MODULE_DESCRIPTION("YeAH TCP"); 268