1 /* 2 * 3 * YeAH TCP 4 * 5 * For further details look at: 6 * http://wil.cs.caltech.edu/pfldnet2007/paper/YeAH_TCP.pdf 7 * 8 */ 9 #include <linux/mm.h> 10 #include <linux/module.h> 11 #include <linux/skbuff.h> 12 #include <linux/inet_diag.h> 13 14 #include <net/tcp.h> 15 16 #include "tcp_vegas.h" 17 18 #define TCP_YEAH_ALPHA 80 //lin number of packets queued at the bottleneck 19 #define TCP_YEAH_GAMMA 1 //lin fraction of queue to be removed per rtt 20 #define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss 21 #define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion 22 #define TCP_YEAH_PHY 8 //lin maximum delta from base 23 #define TCP_YEAH_RHO 16 //lin minumum number of consecutive rtt to consider competition on loss 24 #define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count 25 26 #define TCP_SCALABLE_AI_CNT 100U 27 28 /* YeAH variables */ 29 struct yeah { 30 struct vegas vegas; /* must be first */ 31 32 /* YeAH */ 33 u32 lastQ; 34 u32 doing_reno_now; 35 36 u32 reno_count; 37 u32 fast_count; 38 39 u32 pkts_acked; 40 }; 41 42 static void tcp_yeah_init(struct sock *sk) 43 { 44 struct tcp_sock *tp = tcp_sk(sk); 45 struct yeah *yeah = inet_csk_ca(sk); 46 47 tcp_vegas_init(sk); 48 49 yeah->doing_reno_now = 0; 50 yeah->lastQ = 0; 51 52 yeah->reno_count = 2; 53 54 /* Ensure the MD arithmetic works. This is somewhat pedantic, 55 * since I don't think we will see a cwnd this large. :) */ 56 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 57 58 } 59 60 61 static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last) 62 { 63 const struct inet_connection_sock *icsk = inet_csk(sk); 64 struct yeah *yeah = inet_csk_ca(sk); 65 66 if (icsk->icsk_ca_state == TCP_CA_Open) 67 yeah->pkts_acked = pkts_acked; 68 69 tcp_vegas_pkts_acked(sk, pkts_acked, last); 70 } 71 72 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, 73 u32 seq_rtt, u32 in_flight, int flag) 74 { 75 struct tcp_sock *tp = tcp_sk(sk); 76 struct yeah *yeah = inet_csk_ca(sk); 77 78 if (!tcp_is_cwnd_limited(sk, in_flight)) 79 return; 80 81 if (tp->snd_cwnd <= tp->snd_ssthresh) 82 tcp_slow_start(tp); 83 84 else if (!yeah->doing_reno_now) { 85 /* Scalable */ 86 87 tp->snd_cwnd_cnt+=yeah->pkts_acked; 88 if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ 89 if (tp->snd_cwnd < tp->snd_cwnd_clamp) 90 tp->snd_cwnd++; 91 tp->snd_cwnd_cnt = 0; 92 } 93 94 yeah->pkts_acked = 1; 95 96 } else { 97 /* Reno */ 98 99 if (tp->snd_cwnd_cnt < tp->snd_cwnd) 100 tp->snd_cwnd_cnt++; 101 102 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 103 tp->snd_cwnd++; 104 tp->snd_cwnd_cnt = 0; 105 } 106 } 107 108 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 109 * 110 * These are so named because they represent the approximate values 111 * of snd_una and snd_nxt at the beginning of the current RTT. More 112 * precisely, they represent the amount of data sent during the RTT. 113 * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, 114 * we will calculate that (v_beg_snd_nxt - v_vegas.beg_snd_una) outstanding 115 * bytes of data have been ACKed during the course of the RTT, giving 116 * an "actual" rate of: 117 * 118 * (v_beg_snd_nxt - v_vegas.beg_snd_una) / (rtt duration) 119 * 120 * Unfortunately, v_vegas.beg_snd_una is not exactly equal to snd_una, 121 * because delayed ACKs can cover more than one segment, so they 122 * don't line up yeahly with the boundaries of RTTs. 123 * 124 * Another unfortunate fact of life is that delayed ACKs delay the 125 * advance of the left edge of our send window, so that the number 126 * of bytes we send in an RTT is often less than our cwnd will allow. 127 * So we keep track of our cwnd separately, in v_beg_snd_cwnd. 128 */ 129 130 if (after(ack, yeah->vegas.beg_snd_nxt)) { 131 132 /* We do the Vegas calculations only if we got enough RTT 133 * samples that we can be reasonably sure that we got 134 * at least one RTT sample that wasn't from a delayed ACK. 135 * If we only had 2 samples total, 136 * then that means we're getting only 1 ACK per RTT, which 137 * means they're almost certainly delayed ACKs. 138 * If we have 3 samples, we should be OK. 139 */ 140 141 if (yeah->vegas.cntRTT > 2) { 142 u32 rtt, queue; 143 u64 bw; 144 145 /* We have enough RTT samples, so, using the Vegas 146 * algorithm, we determine if we should increase or 147 * decrease cwnd, and by how much. 148 */ 149 150 /* Pluck out the RTT we are using for the Vegas 151 * calculations. This is the min RTT seen during the 152 * last RTT. Taking the min filters out the effects 153 * of delayed ACKs, at the cost of noticing congestion 154 * a bit later. 155 */ 156 rtt = yeah->vegas.minRTT; 157 158 /* Compute excess number of packets above bandwidth 159 * Avoid doing full 64 bit divide. 160 */ 161 bw = tp->snd_cwnd; 162 bw *= rtt - yeah->vegas.baseRTT; 163 do_div(bw, rtt); 164 queue = bw; 165 166 if (queue > TCP_YEAH_ALPHA || 167 rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) { 168 if (queue > TCP_YEAH_ALPHA 169 && tp->snd_cwnd > yeah->reno_count) { 170 u32 reduction = min(queue / TCP_YEAH_GAMMA , 171 tp->snd_cwnd >> TCP_YEAH_EPSILON); 172 173 tp->snd_cwnd -= reduction; 174 175 tp->snd_cwnd = max(tp->snd_cwnd, 176 yeah->reno_count); 177 178 tp->snd_ssthresh = tp->snd_cwnd; 179 } 180 181 if (yeah->reno_count <= 2) 182 yeah->reno_count = max(tp->snd_cwnd>>1, 2U); 183 else 184 yeah->reno_count++; 185 186 yeah->doing_reno_now = min(yeah->doing_reno_now + 1, 187 0xffffffU); 188 } else { 189 yeah->fast_count++; 190 191 if (yeah->fast_count > TCP_YEAH_ZETA) { 192 yeah->reno_count = 2; 193 yeah->fast_count = 0; 194 } 195 196 yeah->doing_reno_now = 0; 197 } 198 199 yeah->lastQ = queue; 200 201 } 202 203 /* Save the extent of the current window so we can use this 204 * at the end of the next RTT. 205 */ 206 yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt; 207 yeah->vegas.beg_snd_nxt = tp->snd_nxt; 208 yeah->vegas.beg_snd_cwnd = tp->snd_cwnd; 209 210 /* Wipe the slate clean for the next RTT. */ 211 yeah->vegas.cntRTT = 0; 212 yeah->vegas.minRTT = 0x7fffffff; 213 } 214 } 215 216 static u32 tcp_yeah_ssthresh(struct sock *sk) { 217 const struct tcp_sock *tp = tcp_sk(sk); 218 struct yeah *yeah = inet_csk_ca(sk); 219 u32 reduction; 220 221 if (yeah->doing_reno_now < TCP_YEAH_RHO) { 222 reduction = yeah->lastQ; 223 224 reduction = min( reduction, max(tp->snd_cwnd>>1, 2U) ); 225 226 reduction = max( reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); 227 } else 228 reduction = max(tp->snd_cwnd>>1,2U); 229 230 yeah->fast_count = 0; 231 yeah->reno_count = max(yeah->reno_count>>1, 2U); 232 233 return tp->snd_cwnd - reduction; 234 } 235 236 static struct tcp_congestion_ops tcp_yeah = { 237 .flags = TCP_CONG_RTT_STAMP, 238 .init = tcp_yeah_init, 239 .ssthresh = tcp_yeah_ssthresh, 240 .cong_avoid = tcp_yeah_cong_avoid, 241 .min_cwnd = tcp_reno_min_cwnd, 242 .set_state = tcp_vegas_state, 243 .cwnd_event = tcp_vegas_cwnd_event, 244 .get_info = tcp_vegas_get_info, 245 .pkts_acked = tcp_yeah_pkts_acked, 246 247 .owner = THIS_MODULE, 248 .name = "yeah", 249 }; 250 251 static int __init tcp_yeah_register(void) 252 { 253 BUG_ON(sizeof(struct yeah) > ICSK_CA_PRIV_SIZE); 254 tcp_register_congestion_control(&tcp_yeah); 255 return 0; 256 } 257 258 static void __exit tcp_yeah_unregister(void) 259 { 260 tcp_unregister_congestion_control(&tcp_yeah); 261 } 262 263 module_init(tcp_yeah_register); 264 module_exit(tcp_yeah_unregister); 265 266 MODULE_AUTHOR("Angelo P. Castellani"); 267 MODULE_LICENSE("GPL"); 268 MODULE_DESCRIPTION("YeAH TCP"); 269