xref: /openbmc/linux/net/ipv4/tcp_illinois.c (revision 65d1b4a7e73fe0e1f5275ad7d2d3547981480886)
1c462238dSStephen Hemminger /*
2c462238dSStephen Hemminger  * TCP Illinois congestion control.
3c462238dSStephen Hemminger  * Home page:
4c462238dSStephen Hemminger  *	http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
5c462238dSStephen Hemminger  *
6c462238dSStephen Hemminger  * The algorithm is described in:
7c462238dSStephen Hemminger  * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
8c462238dSStephen Hemminger  *  for High-Speed Networks"
9c462238dSStephen Hemminger  * http://www.ews.uiuc.edu/~shaoliu/papersandslides/liubassri06perf.pdf
10c462238dSStephen Hemminger  *
11c462238dSStephen Hemminger  * Implemented from description in paper and ns-2 simulation.
12c462238dSStephen Hemminger  * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
13c462238dSStephen Hemminger  */
14c462238dSStephen Hemminger 
15c462238dSStephen Hemminger #include <linux/module.h>
16c462238dSStephen Hemminger #include <linux/skbuff.h>
17c462238dSStephen Hemminger #include <linux/inet_diag.h>
18c462238dSStephen Hemminger #include <asm/div64.h>
19c462238dSStephen Hemminger #include <net/tcp.h>
20c462238dSStephen Hemminger 
21c462238dSStephen Hemminger #define ALPHA_SHIFT	7
22c462238dSStephen Hemminger #define ALPHA_SCALE	(1u<<ALPHA_SHIFT)
23c462238dSStephen Hemminger #define ALPHA_MIN	((3*ALPHA_SCALE)/10)	/* ~0.3 */
24c462238dSStephen Hemminger #define ALPHA_MAX	(10*ALPHA_SCALE)	/* 10.0 */
25c462238dSStephen Hemminger #define ALPHA_BASE	ALPHA_SCALE		/* 1.0 */
26*65d1b4a7SStephen Hemminger #define U32_MAX		((u32)~0U)
27*65d1b4a7SStephen Hemminger #define RTT_MAX		(U32_MAX / ALPHA_MAX)	/* 3.3 secs */
28c462238dSStephen Hemminger 
29c462238dSStephen Hemminger #define BETA_SHIFT	6
30c462238dSStephen Hemminger #define BETA_SCALE	(1u<<BETA_SHIFT)
31*65d1b4a7SStephen Hemminger #define BETA_MIN	(BETA_SCALE/8)		/* 0.125 */
32*65d1b4a7SStephen Hemminger #define BETA_MAX	(BETA_SCALE/2)		/* 0.5 */
33*65d1b4a7SStephen Hemminger #define BETA_BASE	BETA_MAX
34c462238dSStephen Hemminger 
35c462238dSStephen Hemminger static int win_thresh __read_mostly = 15;
36*65d1b4a7SStephen Hemminger module_param(win_thresh, int, 0);
37c462238dSStephen Hemminger MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
38c462238dSStephen Hemminger 
39*65d1b4a7SStephen Hemminger static int theta __read_mostly = 5;
40*65d1b4a7SStephen Hemminger module_param(theta, int, 0);
41*65d1b4a7SStephen Hemminger MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
42c462238dSStephen Hemminger 
43c462238dSStephen Hemminger /* TCP Illinois Parameters */
44*65d1b4a7SStephen Hemminger struct illinois {
45*65d1b4a7SStephen Hemminger 	u64	sum_rtt;	/* sum of rtt's measured within last rtt */
46*65d1b4a7SStephen Hemminger 	u16	cnt_rtt;	/* # of rtts measured within last rtt */
47*65d1b4a7SStephen Hemminger 	u32	base_rtt;	/* min of all rtt in usec */
48*65d1b4a7SStephen Hemminger 	u32	max_rtt;	/* max of all rtt in usec */
49*65d1b4a7SStephen Hemminger 	u32	end_seq;	/* right edge of current RTT */
50*65d1b4a7SStephen Hemminger 	u32	alpha;		/* Additive increase */
51*65d1b4a7SStephen Hemminger 	u32	beta;		/* Muliplicative decrease */
52*65d1b4a7SStephen Hemminger 	u16	acked;		/* # packets acked by current ACK */
53*65d1b4a7SStephen Hemminger 	u8	rtt_above;	/* average rtt has gone above threshold */
54*65d1b4a7SStephen Hemminger 	u8	rtt_low;	/* # of rtts measurements below threshold */
55c462238dSStephen Hemminger };
56c462238dSStephen Hemminger 
57*65d1b4a7SStephen Hemminger static void rtt_reset(struct sock *sk)
58*65d1b4a7SStephen Hemminger {
59*65d1b4a7SStephen Hemminger 	struct tcp_sock *tp = tcp_sk(sk);
60*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
61*65d1b4a7SStephen Hemminger 
62*65d1b4a7SStephen Hemminger 	ca->end_seq = tp->snd_nxt;
63*65d1b4a7SStephen Hemminger 	ca->cnt_rtt = 0;
64*65d1b4a7SStephen Hemminger 	ca->sum_rtt = 0;
65*65d1b4a7SStephen Hemminger 
66*65d1b4a7SStephen Hemminger 	/* TODO: age max_rtt? */
67*65d1b4a7SStephen Hemminger }
68*65d1b4a7SStephen Hemminger 
69c462238dSStephen Hemminger static void tcp_illinois_init(struct sock *sk)
70c462238dSStephen Hemminger {
71*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
72c462238dSStephen Hemminger 
73*65d1b4a7SStephen Hemminger 	ca->alpha = ALPHA_MAX;
74*65d1b4a7SStephen Hemminger 	ca->beta = BETA_BASE;
75*65d1b4a7SStephen Hemminger 	ca->base_rtt = 0x7fffffff;
76*65d1b4a7SStephen Hemminger 	ca->max_rtt = 0;
77*65d1b4a7SStephen Hemminger 
78*65d1b4a7SStephen Hemminger 	ca->acked = 0;
79*65d1b4a7SStephen Hemminger 	ca->rtt_low = 0;
80*65d1b4a7SStephen Hemminger 	ca->rtt_above = 0;
81*65d1b4a7SStephen Hemminger 
82*65d1b4a7SStephen Hemminger 	rtt_reset(sk);
83c462238dSStephen Hemminger }
84c462238dSStephen Hemminger 
85*65d1b4a7SStephen Hemminger /* Measure RTT for each ack. */
86*65d1b4a7SStephen Hemminger static void tcp_illinois_rtt_sample(struct sock *sk, u32 rtt)
87c462238dSStephen Hemminger {
88*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
89c462238dSStephen Hemminger 
90*65d1b4a7SStephen Hemminger 	/* ignore bogus values, this prevents wraparound in alpha math */
91*65d1b4a7SStephen Hemminger 	if (rtt > RTT_MAX)
92*65d1b4a7SStephen Hemminger 		rtt = RTT_MAX;
93*65d1b4a7SStephen Hemminger 
94*65d1b4a7SStephen Hemminger 	/* keep track of minimum RTT seen so far */
95*65d1b4a7SStephen Hemminger 	if (ca->base_rtt > rtt)
96*65d1b4a7SStephen Hemminger 		ca->base_rtt = rtt;
97*65d1b4a7SStephen Hemminger 
98*65d1b4a7SStephen Hemminger 	/* and max */
99*65d1b4a7SStephen Hemminger 	if (ca->max_rtt < rtt)
100c462238dSStephen Hemminger 		ca->max_rtt = rtt;
101c462238dSStephen Hemminger 
102*65d1b4a7SStephen Hemminger 	++ca->cnt_rtt;
103c462238dSStephen Hemminger 	ca->sum_rtt += rtt;
104c462238dSStephen Hemminger }
105c462238dSStephen Hemminger 
106*65d1b4a7SStephen Hemminger /* Capture count of packets covered by ack, to adjust for delayed acks */
107*65d1b4a7SStephen Hemminger static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked)
108c462238dSStephen Hemminger {
109*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
110*65d1b4a7SStephen Hemminger 	ca->acked = pkts_acked;
111c462238dSStephen Hemminger }
112c462238dSStephen Hemminger 
113*65d1b4a7SStephen Hemminger /* Maximum queuing delay */
114*65d1b4a7SStephen Hemminger static inline u32 max_delay(const struct illinois *ca)
115c462238dSStephen Hemminger {
116*65d1b4a7SStephen Hemminger 	return ca->max_rtt - ca->base_rtt;
117*65d1b4a7SStephen Hemminger }
118c462238dSStephen Hemminger 
119*65d1b4a7SStephen Hemminger /* Average queuing delay */
120*65d1b4a7SStephen Hemminger static inline u32 avg_delay(const struct illinois *ca)
121*65d1b4a7SStephen Hemminger {
122*65d1b4a7SStephen Hemminger 	u64 t = ca->sum_rtt;
123c462238dSStephen Hemminger 
124*65d1b4a7SStephen Hemminger 	do_div(t, ca->cnt_rtt);
125*65d1b4a7SStephen Hemminger 	return t - ca->base_rtt;
126c462238dSStephen Hemminger }
127c462238dSStephen Hemminger 
128c462238dSStephen Hemminger /*
129c462238dSStephen Hemminger  * Compute value of alpha used for additive increase.
130c462238dSStephen Hemminger  * If small window then use 1.0, equivalent to Reno.
131c462238dSStephen Hemminger  *
132c462238dSStephen Hemminger  * For larger windows, adjust based on average delay.
133c462238dSStephen Hemminger  * A. If average delay is at minimum (we are uncongested),
134c462238dSStephen Hemminger  *    then use large alpha (10.0) to increase faster.
135c462238dSStephen Hemminger  * B. If average delay is at maximum (getting congested)
136*65d1b4a7SStephen Hemminger  *    then use small alpha (0.3)
137c462238dSStephen Hemminger  *
138c462238dSStephen Hemminger  * The result is a convex window growth curve.
139c462238dSStephen Hemminger  */
140*65d1b4a7SStephen Hemminger static u32 alpha(struct illinois *ca, u32 da, u32 dm)
141c462238dSStephen Hemminger {
142*65d1b4a7SStephen Hemminger 	u32 d1 = dm / 100;	/* Low threshold */
143c462238dSStephen Hemminger 
144c462238dSStephen Hemminger 	if (da <= d1) {
145*65d1b4a7SStephen Hemminger 		/* If never got out of low delay zone, then use max */
146*65d1b4a7SStephen Hemminger 		if (!ca->rtt_above)
147*65d1b4a7SStephen Hemminger 			return ALPHA_MAX;
148*65d1b4a7SStephen Hemminger 
149*65d1b4a7SStephen Hemminger 		/* Wait for 5 good RTT's before allowing alpha to go alpha max.
150*65d1b4a7SStephen Hemminger 		 * This prevents one good RTT from causing sudden window increase.
151*65d1b4a7SStephen Hemminger 		 */
152*65d1b4a7SStephen Hemminger 		if (++ca->rtt_low < theta)
153*65d1b4a7SStephen Hemminger 			return ca->alpha;
154*65d1b4a7SStephen Hemminger 
155*65d1b4a7SStephen Hemminger 		ca->rtt_low = 0;
156*65d1b4a7SStephen Hemminger 		ca->rtt_above = 0;
157c462238dSStephen Hemminger 		return ALPHA_MAX;
158c462238dSStephen Hemminger 	}
159c462238dSStephen Hemminger 
160*65d1b4a7SStephen Hemminger 	ca->rtt_above = 1;
161c462238dSStephen Hemminger 
162c462238dSStephen Hemminger 	/*
163c462238dSStephen Hemminger 	 * Based on:
164c462238dSStephen Hemminger 	 *
165c462238dSStephen Hemminger 	 *      (dm - d1) amin amax
166c462238dSStephen Hemminger 	 * k1 = -------------------
167c462238dSStephen Hemminger 	 *         amax - amin
168c462238dSStephen Hemminger 	 *
169c462238dSStephen Hemminger 	 *       (dm - d1) amin
170c462238dSStephen Hemminger 	 * k2 = ----------------  - d1
171c462238dSStephen Hemminger 	 *        amax - amin
172c462238dSStephen Hemminger 	 *
173c462238dSStephen Hemminger 	 *             k1
174c462238dSStephen Hemminger 	 * alpha = ----------
175c462238dSStephen Hemminger 	 *          k2 + da
176c462238dSStephen Hemminger 	 */
177c462238dSStephen Hemminger 
178c462238dSStephen Hemminger 	dm -= d1;
179c462238dSStephen Hemminger 	da -= d1;
180*65d1b4a7SStephen Hemminger 	return (dm * ALPHA_MAX) /
181*65d1b4a7SStephen Hemminger 		(dm + (da  * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
182c462238dSStephen Hemminger }
183c462238dSStephen Hemminger 
184c462238dSStephen Hemminger /*
185c462238dSStephen Hemminger  * Beta used for multiplicative decrease.
186c462238dSStephen Hemminger  * For small window sizes returns same value as Reno (0.5)
187c462238dSStephen Hemminger  *
188c462238dSStephen Hemminger  * If delay is small (10% of max) then beta = 1/8
189c462238dSStephen Hemminger  * If delay is up to 80% of max then beta = 1/2
190c462238dSStephen Hemminger  * In between is a linear function
191c462238dSStephen Hemminger  */
192*65d1b4a7SStephen Hemminger static u32 beta(u32 da, u32 dm)
193c462238dSStephen Hemminger {
194c462238dSStephen Hemminger 	u32 d2, d3;
195c462238dSStephen Hemminger 
196c462238dSStephen Hemminger 	d2 = dm / 10;
197c462238dSStephen Hemminger 	if (da <= d2)
198c462238dSStephen Hemminger 		return BETA_MIN;
199*65d1b4a7SStephen Hemminger 
200c462238dSStephen Hemminger 	d3 = (8 * dm) / 10;
201c462238dSStephen Hemminger 	if (da >= d3 || d3 <= d2)
202c462238dSStephen Hemminger 		return BETA_MAX;
203c462238dSStephen Hemminger 
204c462238dSStephen Hemminger 	/*
205c462238dSStephen Hemminger 	 * Based on:
206c462238dSStephen Hemminger 	 *
207c462238dSStephen Hemminger 	 *       bmin d3 - bmax d2
208c462238dSStephen Hemminger 	 * k3 = -------------------
209c462238dSStephen Hemminger 	 *         d3 - d2
210c462238dSStephen Hemminger 	 *
211c462238dSStephen Hemminger 	 *       bmax - bmin
212c462238dSStephen Hemminger 	 * k4 = -------------
213c462238dSStephen Hemminger 	 *         d3 - d2
214c462238dSStephen Hemminger 	 *
215c462238dSStephen Hemminger 	 * b = k3 + k4 da
216c462238dSStephen Hemminger 	 */
217c462238dSStephen Hemminger 	return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
218c462238dSStephen Hemminger 		/ (d3 - d2);
219c462238dSStephen Hemminger }
220c462238dSStephen Hemminger 
221*65d1b4a7SStephen Hemminger /* Update alpha and beta values once per RTT */
222*65d1b4a7SStephen Hemminger static void update_params(struct sock *sk)
223*65d1b4a7SStephen Hemminger {
224*65d1b4a7SStephen Hemminger 	struct tcp_sock *tp = tcp_sk(sk);
225*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
226*65d1b4a7SStephen Hemminger 
227*65d1b4a7SStephen Hemminger 	if (tp->snd_cwnd < win_thresh) {
228*65d1b4a7SStephen Hemminger 		ca->alpha = ALPHA_BASE;
229*65d1b4a7SStephen Hemminger 		ca->beta = BETA_BASE;
230*65d1b4a7SStephen Hemminger 	} else if (ca->cnt_rtt > 0) {
231*65d1b4a7SStephen Hemminger 		u32 dm = max_delay(ca);
232*65d1b4a7SStephen Hemminger 		u32 da = avg_delay(ca);
233*65d1b4a7SStephen Hemminger 
234*65d1b4a7SStephen Hemminger 		ca->alpha = alpha(ca, da, dm);
235*65d1b4a7SStephen Hemminger 		ca->beta = beta(da, dm);
236*65d1b4a7SStephen Hemminger 	}
237*65d1b4a7SStephen Hemminger 
238*65d1b4a7SStephen Hemminger 	rtt_reset(sk);
239*65d1b4a7SStephen Hemminger }
240*65d1b4a7SStephen Hemminger 
241*65d1b4a7SStephen Hemminger /*
242*65d1b4a7SStephen Hemminger  * In case of loss, reset to default values
243*65d1b4a7SStephen Hemminger  */
244*65d1b4a7SStephen Hemminger static void tcp_illinois_state(struct sock *sk, u8 new_state)
245*65d1b4a7SStephen Hemminger {
246*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
247*65d1b4a7SStephen Hemminger 
248*65d1b4a7SStephen Hemminger 	if (new_state == TCP_CA_Loss) {
249*65d1b4a7SStephen Hemminger 		ca->alpha = ALPHA_BASE;
250*65d1b4a7SStephen Hemminger 		ca->beta = BETA_BASE;
251*65d1b4a7SStephen Hemminger 		ca->rtt_low = 0;
252*65d1b4a7SStephen Hemminger 		ca->rtt_above = 0;
253*65d1b4a7SStephen Hemminger 		rtt_reset(sk);
254*65d1b4a7SStephen Hemminger 	}
255*65d1b4a7SStephen Hemminger }
256*65d1b4a7SStephen Hemminger 
257*65d1b4a7SStephen Hemminger /*
258*65d1b4a7SStephen Hemminger  * Increase window in response to successful acknowledgment.
259*65d1b4a7SStephen Hemminger  */
260*65d1b4a7SStephen Hemminger static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
261*65d1b4a7SStephen Hemminger 				    u32 in_flight, int flag)
262*65d1b4a7SStephen Hemminger {
263*65d1b4a7SStephen Hemminger 	struct tcp_sock *tp = tcp_sk(sk);
264*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
265*65d1b4a7SStephen Hemminger 
266*65d1b4a7SStephen Hemminger 	if (after(ack, ca->end_seq))
267*65d1b4a7SStephen Hemminger 		update_params(sk);
268*65d1b4a7SStephen Hemminger 
269*65d1b4a7SStephen Hemminger 	/* RFC2861 only increase cwnd if fully utilized */
270*65d1b4a7SStephen Hemminger 	if (!tcp_is_cwnd_limited(sk, in_flight))
271*65d1b4a7SStephen Hemminger 		return;
272*65d1b4a7SStephen Hemminger 
273*65d1b4a7SStephen Hemminger 	/* In slow start */
274*65d1b4a7SStephen Hemminger 	if (tp->snd_cwnd <= tp->snd_ssthresh)
275*65d1b4a7SStephen Hemminger 		tcp_slow_start(tp);
276*65d1b4a7SStephen Hemminger 
277*65d1b4a7SStephen Hemminger 	else {
278*65d1b4a7SStephen Hemminger 		u32 delta;
279*65d1b4a7SStephen Hemminger 
280*65d1b4a7SStephen Hemminger 		/* snd_cwnd_cnt is # of packets since last cwnd increment */
281*65d1b4a7SStephen Hemminger 		tp->snd_cwnd_cnt += ca->acked;
282*65d1b4a7SStephen Hemminger 		ca->acked = 1;
283*65d1b4a7SStephen Hemminger 
284*65d1b4a7SStephen Hemminger 		/* This is close approximation of:
285*65d1b4a7SStephen Hemminger 		 * tp->snd_cwnd += alpha/tp->snd_cwnd
286*65d1b4a7SStephen Hemminger 		*/
287*65d1b4a7SStephen Hemminger 		delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
288*65d1b4a7SStephen Hemminger 		if (delta >= tp->snd_cwnd) {
289*65d1b4a7SStephen Hemminger 			tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
290*65d1b4a7SStephen Hemminger 					   (u32) tp->snd_cwnd_clamp);
291*65d1b4a7SStephen Hemminger 			tp->snd_cwnd_cnt = 0;
292*65d1b4a7SStephen Hemminger 		}
293*65d1b4a7SStephen Hemminger 	}
294*65d1b4a7SStephen Hemminger }
295*65d1b4a7SStephen Hemminger 
296c462238dSStephen Hemminger static u32 tcp_illinois_ssthresh(struct sock *sk)
297c462238dSStephen Hemminger {
298c462238dSStephen Hemminger 	struct tcp_sock *tp = tcp_sk(sk);
299*65d1b4a7SStephen Hemminger 	struct illinois *ca = inet_csk_ca(sk);
300c462238dSStephen Hemminger 
301c462238dSStephen Hemminger 	/* Multiplicative decrease */
302*65d1b4a7SStephen Hemminger 	return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U);
303c462238dSStephen Hemminger }
304c462238dSStephen Hemminger 
305*65d1b4a7SStephen Hemminger 
306*65d1b4a7SStephen Hemminger /* Extract info for Tcp socket info provided via netlink. */
307*65d1b4a7SStephen Hemminger static void tcp_illinois_info(struct sock *sk, u32 ext,
308c462238dSStephen Hemminger 			      struct sk_buff *skb)
309c462238dSStephen Hemminger {
310*65d1b4a7SStephen Hemminger 	const struct illinois *ca = inet_csk_ca(sk);
311c462238dSStephen Hemminger 
312c462238dSStephen Hemminger 	if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
313c462238dSStephen Hemminger 		struct tcpvegas_info info = {
314c462238dSStephen Hemminger 			.tcpv_enabled = 1,
315*65d1b4a7SStephen Hemminger 			.tcpv_rttcnt = ca->cnt_rtt,
316*65d1b4a7SStephen Hemminger 			.tcpv_minrtt = ca->base_rtt,
317c462238dSStephen Hemminger 		};
318*65d1b4a7SStephen Hemminger 		u64 t = ca->sum_rtt;
319*65d1b4a7SStephen Hemminger 
320*65d1b4a7SStephen Hemminger 		do_div(t, ca->cnt_rtt);
321*65d1b4a7SStephen Hemminger 		info.tcpv_rtt = t;
322c462238dSStephen Hemminger 
323c462238dSStephen Hemminger 		nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
324c462238dSStephen Hemminger 	}
325c462238dSStephen Hemminger }
326c462238dSStephen Hemminger 
327c462238dSStephen Hemminger static struct tcp_congestion_ops tcp_illinois = {
328c462238dSStephen Hemminger 	.init		= tcp_illinois_init,
329c462238dSStephen Hemminger 	.ssthresh	= tcp_illinois_ssthresh,
330c462238dSStephen Hemminger 	.min_cwnd	= tcp_reno_min_cwnd,
331c462238dSStephen Hemminger 	.cong_avoid	= tcp_illinois_cong_avoid,
332*65d1b4a7SStephen Hemminger 	.set_state	= tcp_illinois_state,
333*65d1b4a7SStephen Hemminger 	.rtt_sample	= tcp_illinois_rtt_sample,
334*65d1b4a7SStephen Hemminger 	.get_info	= tcp_illinois_info,
335*65d1b4a7SStephen Hemminger 	.pkts_acked	= tcp_illinois_acked,
336c462238dSStephen Hemminger 
337c462238dSStephen Hemminger 	.owner		= THIS_MODULE,
338c462238dSStephen Hemminger 	.name		= "illinois",
339c462238dSStephen Hemminger };
340c462238dSStephen Hemminger 
341c462238dSStephen Hemminger static int __init tcp_illinois_register(void)
342c462238dSStephen Hemminger {
343*65d1b4a7SStephen Hemminger 	BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
344c462238dSStephen Hemminger 	return tcp_register_congestion_control(&tcp_illinois);
345c462238dSStephen Hemminger }
346c462238dSStephen Hemminger 
347c462238dSStephen Hemminger static void __exit tcp_illinois_unregister(void)
348c462238dSStephen Hemminger {
349c462238dSStephen Hemminger 	tcp_unregister_congestion_control(&tcp_illinois);
350c462238dSStephen Hemminger }
351c462238dSStephen Hemminger 
352c462238dSStephen Hemminger module_init(tcp_illinois_register);
353c462238dSStephen Hemminger module_exit(tcp_illinois_unregister);
354c462238dSStephen Hemminger 
355c462238dSStephen Hemminger MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
356c462238dSStephen Hemminger MODULE_LICENSE("GPL");
357c462238dSStephen Hemminger MODULE_DESCRIPTION("TCP Illinois");
358*65d1b4a7SStephen Hemminger MODULE_VERSION("1.0");
359