xref: /openbmc/linux/net/ipv4/tcp_dctcp.c (revision 9b93eb47)
1 /* DataCenter TCP (DCTCP) congestion control.
2  *
3  * http://simula.stanford.edu/~alizade/Site/DCTCP.html
4  *
5  * This is an implementation of DCTCP over Reno, an enhancement to the
6  * TCP congestion control algorithm designed for data centers. DCTCP
7  * leverages Explicit Congestion Notification (ECN) in the network to
8  * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet
9  * the following three data center transport requirements:
10  *
11  *  - High burst tolerance (incast due to partition/aggregate)
12  *  - Low latency (short flows, queries)
13  *  - High throughput (continuous data updates, large file transfers)
14  *    with commodity shallow buffered switches
15  *
16  * The algorithm is described in detail in the following two papers:
17  *
18  * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye,
19  *    Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan:
20  *      "Data Center TCP (DCTCP)", Data Center Networks session
21  *      Proc. ACM SIGCOMM, New Delhi, 2010.
22  *   http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
23  *
24  * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar:
25  *      "Analysis of DCTCP: Stability, Convergence, and Fairness"
26  *      Proc. ACM SIGMETRICS, San Jose, 2011.
27  *   http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf
28  *
29  * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh.
30  *
31  * Authors:
32  *
33  *	Daniel Borkmann <dborkman@redhat.com>
34  *	Florian Westphal <fw@strlen.de>
35  *	Glenn Judd <glenn.judd@morganstanley.com>
36  *
37  * This program is free software; you can redistribute it and/or modify
38  * it under the terms of the GNU General Public License as published by
39  * the Free Software Foundation; either version 2 of the License, or (at
40  * your option) any later version.
41  */
42 
43 #include <linux/module.h>
44 #include <linux/mm.h>
45 #include <net/tcp.h>
46 #include <linux/inet_diag.h>
47 #include "tcp_dctcp.h"
48 
49 #define DCTCP_MAX_ALPHA	1024U
50 
51 struct dctcp {
52 	u32 old_delivered;
53 	u32 old_delivered_ce;
54 	u32 prior_rcv_nxt;
55 	u32 dctcp_alpha;
56 	u32 next_seq;
57 	u32 ce_state;
58 	u32 loss_cwnd;
59 };
60 
61 static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
62 module_param(dctcp_shift_g, uint, 0644);
63 MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
64 
65 static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
66 module_param(dctcp_alpha_on_init, uint, 0644);
67 MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
68 
69 static struct tcp_congestion_ops dctcp_reno;
70 
71 static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
72 {
73 	ca->next_seq = tp->snd_nxt;
74 
75 	ca->old_delivered = tp->delivered;
76 	ca->old_delivered_ce = tp->delivered_ce;
77 }
78 
79 static void dctcp_init(struct sock *sk)
80 {
81 	const struct tcp_sock *tp = tcp_sk(sk);
82 
83 	if ((tp->ecn_flags & TCP_ECN_OK) ||
84 	    (sk->sk_state == TCP_LISTEN ||
85 	     sk->sk_state == TCP_CLOSE)) {
86 		struct dctcp *ca = inet_csk_ca(sk);
87 
88 		ca->prior_rcv_nxt = tp->rcv_nxt;
89 
90 		ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
91 
92 		ca->loss_cwnd = 0;
93 		ca->ce_state = 0;
94 
95 		dctcp_reset(tp, ca);
96 		return;
97 	}
98 
99 	/* No ECN support? Fall back to Reno. Also need to clear
100 	 * ECT from sk since it is set during 3WHS for DCTCP.
101 	 */
102 	inet_csk(sk)->icsk_ca_ops = &dctcp_reno;
103 	INET_ECN_dontxmit(sk);
104 }
105 
106 static u32 dctcp_ssthresh(struct sock *sk)
107 {
108 	struct dctcp *ca = inet_csk_ca(sk);
109 	struct tcp_sock *tp = tcp_sk(sk);
110 
111 	ca->loss_cwnd = tp->snd_cwnd;
112 	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
113 }
114 
115 static void dctcp_update_alpha(struct sock *sk, u32 flags)
116 {
117 	const struct tcp_sock *tp = tcp_sk(sk);
118 	struct dctcp *ca = inet_csk_ca(sk);
119 
120 	/* Expired RTT */
121 	if (!before(tp->snd_una, ca->next_seq)) {
122 		u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
123 		u32 alpha = ca->dctcp_alpha;
124 
125 		/* alpha = (1 - g) * alpha + g * F */
126 
127 		alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
128 		if (delivered_ce) {
129 			u32 delivered = tp->delivered - ca->old_delivered;
130 
131 			/* If dctcp_shift_g == 1, a 32bit value would overflow
132 			 * after 8 M packets.
133 			 */
134 			delivered_ce <<= (10 - dctcp_shift_g);
135 			delivered_ce /= max(1U, delivered);
136 
137 			alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
138 		}
139 		/* dctcp_alpha can be read from dctcp_get_info() without
140 		 * synchro, so we ask compiler to not use dctcp_alpha
141 		 * as a temporary variable in prior operations.
142 		 */
143 		WRITE_ONCE(ca->dctcp_alpha, alpha);
144 		dctcp_reset(tp, ca);
145 	}
146 }
147 
148 static void dctcp_react_to_loss(struct sock *sk)
149 {
150 	struct dctcp *ca = inet_csk_ca(sk);
151 	struct tcp_sock *tp = tcp_sk(sk);
152 
153 	ca->loss_cwnd = tp->snd_cwnd;
154 	tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
155 }
156 
157 static void dctcp_state(struct sock *sk, u8 new_state)
158 {
159 	if (new_state == TCP_CA_Recovery &&
160 	    new_state != inet_csk(sk)->icsk_ca_state)
161 		dctcp_react_to_loss(sk);
162 	/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
163 	 * one loss-adjustment per RTT.
164 	 */
165 }
166 
167 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
168 {
169 	struct dctcp *ca = inet_csk_ca(sk);
170 
171 	switch (ev) {
172 	case CA_EVENT_ECN_IS_CE:
173 	case CA_EVENT_ECN_NO_CE:
174 		dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
175 		break;
176 	case CA_EVENT_LOSS:
177 		dctcp_react_to_loss(sk);
178 		break;
179 	default:
180 		/* Don't care for the rest. */
181 		break;
182 	}
183 }
184 
185 static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
186 			     union tcp_cc_info *info)
187 {
188 	const struct dctcp *ca = inet_csk_ca(sk);
189 	const struct tcp_sock *tp = tcp_sk(sk);
190 
191 	/* Fill it also in case of VEGASINFO due to req struct limits.
192 	 * We can still correctly retrieve it later.
193 	 */
194 	if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
195 	    ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
196 		memset(&info->dctcp, 0, sizeof(info->dctcp));
197 		if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
198 			info->dctcp.dctcp_enabled = 1;
199 			info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
200 			info->dctcp.dctcp_alpha = ca->dctcp_alpha;
201 			info->dctcp.dctcp_ab_ecn = tp->mss_cache *
202 						   (tp->delivered_ce - ca->old_delivered_ce);
203 			info->dctcp.dctcp_ab_tot = tp->mss_cache *
204 						   (tp->delivered - ca->old_delivered);
205 		}
206 
207 		*attr = INET_DIAG_DCTCPINFO;
208 		return sizeof(info->dctcp);
209 	}
210 	return 0;
211 }
212 
213 static u32 dctcp_cwnd_undo(struct sock *sk)
214 {
215 	const struct dctcp *ca = inet_csk_ca(sk);
216 
217 	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
218 }
219 
220 static struct tcp_congestion_ops dctcp __read_mostly = {
221 	.init		= dctcp_init,
222 	.in_ack_event   = dctcp_update_alpha,
223 	.cwnd_event	= dctcp_cwnd_event,
224 	.ssthresh	= dctcp_ssthresh,
225 	.cong_avoid	= tcp_reno_cong_avoid,
226 	.undo_cwnd	= dctcp_cwnd_undo,
227 	.set_state	= dctcp_state,
228 	.get_info	= dctcp_get_info,
229 	.flags		= TCP_CONG_NEEDS_ECN,
230 	.owner		= THIS_MODULE,
231 	.name		= "dctcp",
232 };
233 
234 static struct tcp_congestion_ops dctcp_reno __read_mostly = {
235 	.ssthresh	= tcp_reno_ssthresh,
236 	.cong_avoid	= tcp_reno_cong_avoid,
237 	.undo_cwnd	= tcp_reno_undo_cwnd,
238 	.get_info	= dctcp_get_info,
239 	.owner		= THIS_MODULE,
240 	.name		= "dctcp-reno",
241 };
242 
243 static int __init dctcp_register(void)
244 {
245 	BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
246 	return tcp_register_congestion_control(&dctcp);
247 }
248 
249 static void __exit dctcp_unregister(void)
250 {
251 	tcp_unregister_congestion_control(&dctcp);
252 }
253 
254 module_init(dctcp_register);
255 module_exit(dctcp_unregister);
256 
257 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
258 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
259 MODULE_AUTHOR("Glenn Judd <glenn.judd@morganstanley.com>");
260 
261 MODULE_LICENSE("GPL v2");
262 MODULE_DESCRIPTION("DataCenter TCP (DCTCP)");
263