xref: /openbmc/linux/net/sched/sch_skbprio.c (revision 37744fee)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_skbprio.c  SKB Priority Queue.
4  *
5  * Authors:	Nishanth Devarajan, <ndev2021@gmail.com>
6  *		Cody Doucette, <doucette@bu.edu>
7  *	        original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
8  */
9 
10 #include <linux/string.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <net/pkt_sched.h>
18 #include <net/sch_generic.h>
19 #include <net/inet_ecn.h>
20 
21 /*		SKB Priority Queue
22  *	=================================
23  *
24  * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
25  * packets according to their skb->priority field. Under congestion,
26  * Skbprio drops already-enqueued lower priority packets to make space
27  * available for higher priority packets; it was conceived as a solution
28  * for denial-of-service defenses that need to route packets with different
29  * priorities as a mean to overcome DoS attacks.
30  */
31 
32 struct skbprio_sched_data {
33 	/* Queue state. */
34 	struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
35 	struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
36 	u16 highest_prio;
37 	u16 lowest_prio;
38 };
39 
40 static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
41 {
42 	int prio;
43 
44 	for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
45 		if (!skb_queue_empty(&q->qdiscs[prio]))
46 			return prio;
47 	}
48 
49 	/* SKB queue is empty, return 0 (default highest priority setting). */
50 	return 0;
51 }
52 
53 static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
54 {
55 	int prio;
56 
57 	for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
58 		if (!skb_queue_empty(&q->qdiscs[prio]))
59 			return prio;
60 	}
61 
62 	/* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
63 	 * (default lowest priority setting).
64 	 */
65 	return SKBPRIO_MAX_PRIORITY - 1;
66 }
67 
68 static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
69 			  struct sk_buff **to_free)
70 {
71 	const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
72 	struct skbprio_sched_data *q = qdisc_priv(sch);
73 	struct sk_buff_head *qdisc;
74 	struct sk_buff_head *lp_qdisc;
75 	struct sk_buff *to_drop;
76 	u16 prio, lp;
77 
78 	/* Obtain the priority of @skb. */
79 	prio = min(skb->priority, max_priority);
80 
81 	qdisc = &q->qdiscs[prio];
82 	if (sch->q.qlen < sch->limit) {
83 		__skb_queue_tail(qdisc, skb);
84 		qdisc_qstats_backlog_inc(sch, skb);
85 		q->qstats[prio].backlog += qdisc_pkt_len(skb);
86 
87 		/* Check to update highest and lowest priorities. */
88 		if (prio > q->highest_prio)
89 			q->highest_prio = prio;
90 
91 		if (prio < q->lowest_prio)
92 			q->lowest_prio = prio;
93 
94 		sch->q.qlen++;
95 		return NET_XMIT_SUCCESS;
96 	}
97 
98 	/* If this packet has the lowest priority, drop it. */
99 	lp = q->lowest_prio;
100 	if (prio <= lp) {
101 		q->qstats[prio].drops++;
102 		q->qstats[prio].overlimits++;
103 		return qdisc_drop(skb, sch, to_free);
104 	}
105 
106 	__skb_queue_tail(qdisc, skb);
107 	qdisc_qstats_backlog_inc(sch, skb);
108 	q->qstats[prio].backlog += qdisc_pkt_len(skb);
109 
110 	/* Drop the packet at the tail of the lowest priority qdisc. */
111 	lp_qdisc = &q->qdiscs[lp];
112 	to_drop = __skb_dequeue_tail(lp_qdisc);
113 	BUG_ON(!to_drop);
114 	qdisc_qstats_backlog_dec(sch, to_drop);
115 	qdisc_drop(to_drop, sch, to_free);
116 
117 	q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
118 	q->qstats[lp].drops++;
119 	q->qstats[lp].overlimits++;
120 
121 	/* Check to update highest and lowest priorities. */
122 	if (skb_queue_empty(lp_qdisc)) {
123 		if (q->lowest_prio == q->highest_prio) {
124 			/* The incoming packet is the only packet in queue. */
125 			BUG_ON(sch->q.qlen != 1);
126 			q->lowest_prio = prio;
127 			q->highest_prio = prio;
128 		} else {
129 			q->lowest_prio = calc_new_low_prio(q);
130 		}
131 	}
132 
133 	if (prio > q->highest_prio)
134 		q->highest_prio = prio;
135 
136 	return NET_XMIT_CN;
137 }
138 
139 static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
140 {
141 	struct skbprio_sched_data *q = qdisc_priv(sch);
142 	struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
143 	struct sk_buff *skb = __skb_dequeue(hpq);
144 
145 	if (unlikely(!skb))
146 		return NULL;
147 
148 	sch->q.qlen--;
149 	qdisc_qstats_backlog_dec(sch, skb);
150 	qdisc_bstats_update(sch, skb);
151 
152 	q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
153 
154 	/* Update highest priority field. */
155 	if (skb_queue_empty(hpq)) {
156 		if (q->lowest_prio == q->highest_prio) {
157 			BUG_ON(sch->q.qlen);
158 			q->highest_prio = 0;
159 			q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
160 		} else {
161 			q->highest_prio = calc_new_high_prio(q);
162 		}
163 	}
164 	return skb;
165 }
166 
167 static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
168 			struct netlink_ext_ack *extack)
169 {
170 	struct tc_skbprio_qopt *ctl = nla_data(opt);
171 
172 	if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
173 		return -EINVAL;
174 
175 	sch->limit = ctl->limit;
176 	return 0;
177 }
178 
179 static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
180 			struct netlink_ext_ack *extack)
181 {
182 	struct skbprio_sched_data *q = qdisc_priv(sch);
183 	int prio;
184 
185 	/* Initialise all queues, one for each possible priority. */
186 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
187 		__skb_queue_head_init(&q->qdiscs[prio]);
188 
189 	memset(&q->qstats, 0, sizeof(q->qstats));
190 	q->highest_prio = 0;
191 	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
192 	sch->limit = 64;
193 	if (!opt)
194 		return 0;
195 
196 	return skbprio_change(sch, opt, extack);
197 }
198 
199 static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
200 {
201 	struct tc_skbprio_qopt opt;
202 
203 	opt.limit = sch->limit;
204 
205 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
206 		return -1;
207 
208 	return skb->len;
209 }
210 
211 static void skbprio_reset(struct Qdisc *sch)
212 {
213 	struct skbprio_sched_data *q = qdisc_priv(sch);
214 	int prio;
215 
216 	sch->qstats.backlog = 0;
217 	sch->q.qlen = 0;
218 
219 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
220 		__skb_queue_purge(&q->qdiscs[prio]);
221 
222 	memset(&q->qstats, 0, sizeof(q->qstats));
223 	q->highest_prio = 0;
224 	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
225 }
226 
227 static void skbprio_destroy(struct Qdisc *sch)
228 {
229 	struct skbprio_sched_data *q = qdisc_priv(sch);
230 	int prio;
231 
232 	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
233 		__skb_queue_purge(&q->qdiscs[prio]);
234 }
235 
236 static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
237 {
238 	return NULL;
239 }
240 
241 static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
242 {
243 	return 0;
244 }
245 
246 static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
247 			     struct sk_buff *skb, struct tcmsg *tcm)
248 {
249 	tcm->tcm_handle |= TC_H_MIN(cl);
250 	return 0;
251 }
252 
253 static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
254 				   struct gnet_dump *d)
255 {
256 	struct skbprio_sched_data *q = qdisc_priv(sch);
257 	if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
258 		q->qstats[cl - 1].qlen) < 0)
259 		return -1;
260 	return 0;
261 }
262 
263 static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
264 {
265 	unsigned int i;
266 
267 	if (arg->stop)
268 		return;
269 
270 	for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
271 		if (arg->count < arg->skip) {
272 			arg->count++;
273 			continue;
274 		}
275 		if (arg->fn(sch, i + 1, arg) < 0) {
276 			arg->stop = 1;
277 			break;
278 		}
279 		arg->count++;
280 	}
281 }
282 
283 static const struct Qdisc_class_ops skbprio_class_ops = {
284 	.leaf		=	skbprio_leaf,
285 	.find		=	skbprio_find,
286 	.dump		=	skbprio_dump_class,
287 	.dump_stats	=	skbprio_dump_class_stats,
288 	.walk		=	skbprio_walk,
289 };
290 
291 static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
292 	.cl_ops		=	&skbprio_class_ops,
293 	.id		=	"skbprio",
294 	.priv_size	=	sizeof(struct skbprio_sched_data),
295 	.enqueue	=	skbprio_enqueue,
296 	.dequeue	=	skbprio_dequeue,
297 	.peek		=	qdisc_peek_dequeued,
298 	.init		=	skbprio_init,
299 	.reset		=	skbprio_reset,
300 	.change		=	skbprio_change,
301 	.dump		=	skbprio_dump,
302 	.destroy	=	skbprio_destroy,
303 	.owner		=	THIS_MODULE,
304 };
305 
306 static int __init skbprio_module_init(void)
307 {
308 	return register_qdisc(&skbprio_qdisc_ops);
309 }
310 
311 static void __exit skbprio_module_exit(void)
312 {
313 	unregister_qdisc(&skbprio_qdisc_ops);
314 }
315 
316 module_init(skbprio_module_init)
317 module_exit(skbprio_module_exit)
318 
319 MODULE_LICENSE("GPL");
320