1 /*
2 * Codel - The Controlled-Delay Active Queue Management algorithm
3 *
4 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
6 *
7 * Implemented on linux by :
8 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * Alternatively, provided that this notice is retained in full, this
24 * software may be distributed under the terms of the GNU General
25 * Public License ("GPL") version 2, in which case the provisions of the
26 * GPL apply INSTEAD OF those given above.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/types.h>
46 #include <linux/kernel.h>
47 #include <linux/errno.h>
48 #include <linux/skbuff.h>
49 #include <linux/prefetch.h>
50 #include <net/pkt_sched.h>
51 #include <net/codel.h>
52 #include <net/codel_impl.h>
53 #include <net/codel_qdisc.h>
54
55
56 #define DEFAULT_CODEL_LIMIT 1000
57
58 struct codel_sched_data {
59 struct codel_params params;
60 struct codel_vars vars;
61 struct codel_stats stats;
62 u32 drop_overlimit;
63 };
64
65 /* This is the specific function called from codel_dequeue()
66 * to dequeue a packet from queue. Note: backlog is handled in
67 * codel, we dont need to reduce it here.
68 */
dequeue_func(struct codel_vars * vars,void * ctx)69 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
70 {
71 struct Qdisc *sch = ctx;
72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73
74 if (skb) {
75 sch->qstats.backlog -= qdisc_pkt_len(skb);
76 prefetch(&skb->end); /* we'll need skb_shinfo() */
77 }
78 return skb;
79 }
80
drop_func(struct sk_buff * skb,void * ctx)81 static void drop_func(struct sk_buff *skb, void *ctx)
82 {
83 struct Qdisc *sch = ctx;
84
85 kfree_skb(skb);
86 qdisc_qstats_drop(sch);
87 }
88
codel_qdisc_dequeue(struct Qdisc * sch)89 static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
90 {
91 struct codel_sched_data *q = qdisc_priv(sch);
92 struct sk_buff *skb;
93
94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
95 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
96 drop_func, dequeue_func);
97
98 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
99 * or HTB crashes. Defer it for next round.
100 */
101 if (q->stats.drop_count && sch->q.qlen) {
102 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
103 q->stats.drop_count = 0;
104 q->stats.drop_len = 0;
105 }
106 if (skb)
107 qdisc_bstats_update(sch, skb);
108 return skb;
109 }
110
codel_qdisc_enqueue(struct sk_buff * skb,struct Qdisc * sch,struct sk_buff ** to_free)111 static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
112 struct sk_buff **to_free)
113 {
114 struct codel_sched_data *q;
115
116 if (likely(qdisc_qlen(sch) < sch->limit)) {
117 codel_set_enqueue_time(skb);
118 return qdisc_enqueue_tail(skb, sch);
119 }
120 q = qdisc_priv(sch);
121 q->drop_overlimit++;
122 return qdisc_drop(skb, sch, to_free);
123 }
124
125 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
126 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
127 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
128 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
129 [TCA_CODEL_ECN] = { .type = NLA_U32 },
130 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
131 };
132
codel_change(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)133 static int codel_change(struct Qdisc *sch, struct nlattr *opt,
134 struct netlink_ext_ack *extack)
135 {
136 struct codel_sched_data *q = qdisc_priv(sch);
137 struct nlattr *tb[TCA_CODEL_MAX + 1];
138 unsigned int qlen, dropped = 0;
139 int err;
140
141 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
142 codel_policy, NULL);
143 if (err < 0)
144 return err;
145
146 sch_tree_lock(sch);
147
148 if (tb[TCA_CODEL_TARGET]) {
149 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
150
151 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
152 }
153
154 if (tb[TCA_CODEL_CE_THRESHOLD]) {
155 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
156
157 q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
158 }
159
160 if (tb[TCA_CODEL_INTERVAL]) {
161 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
162
163 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
164 }
165
166 if (tb[TCA_CODEL_LIMIT])
167 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
168
169 if (tb[TCA_CODEL_ECN])
170 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
171
172 qlen = sch->q.qlen;
173 while (sch->q.qlen > sch->limit) {
174 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
175
176 dropped += qdisc_pkt_len(skb);
177 qdisc_qstats_backlog_dec(sch, skb);
178 rtnl_qdisc_drop(skb, sch);
179 }
180 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
181
182 sch_tree_unlock(sch);
183 return 0;
184 }
185
codel_init(struct Qdisc * sch,struct nlattr * opt,struct netlink_ext_ack * extack)186 static int codel_init(struct Qdisc *sch, struct nlattr *opt,
187 struct netlink_ext_ack *extack)
188 {
189 struct codel_sched_data *q = qdisc_priv(sch);
190
191 sch->limit = DEFAULT_CODEL_LIMIT;
192
193 codel_params_init(&q->params);
194 codel_vars_init(&q->vars);
195 codel_stats_init(&q->stats);
196 q->params.mtu = psched_mtu(qdisc_dev(sch));
197
198 if (opt) {
199 int err = codel_change(sch, opt, extack);
200
201 if (err)
202 return err;
203 }
204
205 if (sch->limit >= 1)
206 sch->flags |= TCQ_F_CAN_BYPASS;
207 else
208 sch->flags &= ~TCQ_F_CAN_BYPASS;
209
210 return 0;
211 }
212
codel_dump(struct Qdisc * sch,struct sk_buff * skb)213 static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
214 {
215 struct codel_sched_data *q = qdisc_priv(sch);
216 struct nlattr *opts;
217
218 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
219 if (opts == NULL)
220 goto nla_put_failure;
221
222 if (nla_put_u32(skb, TCA_CODEL_TARGET,
223 codel_time_to_us(q->params.target)) ||
224 nla_put_u32(skb, TCA_CODEL_LIMIT,
225 sch->limit) ||
226 nla_put_u32(skb, TCA_CODEL_INTERVAL,
227 codel_time_to_us(q->params.interval)) ||
228 nla_put_u32(skb, TCA_CODEL_ECN,
229 q->params.ecn))
230 goto nla_put_failure;
231 if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
232 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
233 codel_time_to_us(q->params.ce_threshold)))
234 goto nla_put_failure;
235 return nla_nest_end(skb, opts);
236
237 nla_put_failure:
238 nla_nest_cancel(skb, opts);
239 return -1;
240 }
241
codel_dump_stats(struct Qdisc * sch,struct gnet_dump * d)242 static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
243 {
244 const struct codel_sched_data *q = qdisc_priv(sch);
245 struct tc_codel_xstats st = {
246 .maxpacket = q->stats.maxpacket,
247 .count = q->vars.count,
248 .lastcount = q->vars.lastcount,
249 .drop_overlimit = q->drop_overlimit,
250 .ldelay = codel_time_to_us(q->vars.ldelay),
251 .dropping = q->vars.dropping,
252 .ecn_mark = q->stats.ecn_mark,
253 .ce_mark = q->stats.ce_mark,
254 };
255
256 if (q->vars.dropping) {
257 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
258
259 if (delta >= 0)
260 st.drop_next = codel_time_to_us(delta);
261 else
262 st.drop_next = -codel_time_to_us(-delta);
263 }
264
265 return gnet_stats_copy_app(d, &st, sizeof(st));
266 }
267
codel_reset(struct Qdisc * sch)268 static void codel_reset(struct Qdisc *sch)
269 {
270 struct codel_sched_data *q = qdisc_priv(sch);
271
272 qdisc_reset_queue(sch);
273 codel_vars_init(&q->vars);
274 }
275
276 static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
277 .id = "codel",
278 .priv_size = sizeof(struct codel_sched_data),
279
280 .enqueue = codel_qdisc_enqueue,
281 .dequeue = codel_qdisc_dequeue,
282 .peek = qdisc_peek_dequeued,
283 .init = codel_init,
284 .reset = codel_reset,
285 .change = codel_change,
286 .dump = codel_dump,
287 .dump_stats = codel_dump_stats,
288 .owner = THIS_MODULE,
289 };
290
codel_module_init(void)291 static int __init codel_module_init(void)
292 {
293 return register_qdisc(&codel_qdisc_ops);
294 }
295
codel_module_exit(void)296 static void __exit codel_module_exit(void)
297 {
298 unregister_qdisc(&codel_qdisc_ops);
299 }
300
301 module_init(codel_module_init)
302 module_exit(codel_module_exit)
303
304 MODULE_DESCRIPTION("Controlled Delay queue discipline");
305 MODULE_AUTHOR("Dave Taht");
306 MODULE_AUTHOR("Eric Dumazet");
307 MODULE_LICENSE("Dual BSD/GPL");
308