xref: /openbmc/linux/net/sched/sch_gred.c (revision 50dc9a85)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_gred.c	Generic Random Early Detection queue.
4  *
5  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
6  *
7  *             991129: -  Bug fix with grio mode
8  *		       - a better sing. AvgQ mode with Grio(WRED)
9  *		       - A finer grained VQ dequeue based on suggestion
10  *		         from Ren Liu
11  *		       - More error checks
12  *
13  *  For all the glorious comments look at include/net/red.h
14  */
15 
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/red.h>
24 
25 #define GRED_DEF_PRIO (MAX_DPs / 2)
26 #define GRED_VQ_MASK (MAX_DPs - 1)
27 
28 #define GRED_VQ_RED_FLAGS	(TC_RED_ECN | TC_RED_HARDDROP)
29 
30 struct gred_sched_data;
31 struct gred_sched;
32 
33 struct gred_sched_data {
34 	u32		limit;		/* HARD maximal queue length	*/
35 	u32		DP;		/* the drop parameters */
36 	u32		red_flags;	/* virtualQ version of red_flags */
37 	u64		bytesin;	/* bytes seen on virtualQ so far*/
38 	u32		packetsin;	/* packets seen on virtualQ so far*/
39 	u32		backlog;	/* bytes on the virtualQ */
40 	u8		prio;		/* the prio of this vq */
41 
42 	struct red_parms parms;
43 	struct red_vars  vars;
44 	struct red_stats stats;
45 };
46 
47 enum {
48 	GRED_WRED_MODE = 1,
49 	GRED_RIO_MODE,
50 };
51 
52 struct gred_sched {
53 	struct gred_sched_data *tab[MAX_DPs];
54 	unsigned long	flags;
55 	u32		red_flags;
56 	u32 		DPs;
57 	u32 		def;
58 	struct red_vars wred_set;
59 };
60 
61 static inline int gred_wred_mode(struct gred_sched *table)
62 {
63 	return test_bit(GRED_WRED_MODE, &table->flags);
64 }
65 
66 static inline void gred_enable_wred_mode(struct gred_sched *table)
67 {
68 	__set_bit(GRED_WRED_MODE, &table->flags);
69 }
70 
71 static inline void gred_disable_wred_mode(struct gred_sched *table)
72 {
73 	__clear_bit(GRED_WRED_MODE, &table->flags);
74 }
75 
76 static inline int gred_rio_mode(struct gred_sched *table)
77 {
78 	return test_bit(GRED_RIO_MODE, &table->flags);
79 }
80 
81 static inline void gred_enable_rio_mode(struct gred_sched *table)
82 {
83 	__set_bit(GRED_RIO_MODE, &table->flags);
84 }
85 
86 static inline void gred_disable_rio_mode(struct gred_sched *table)
87 {
88 	__clear_bit(GRED_RIO_MODE, &table->flags);
89 }
90 
91 static inline int gred_wred_mode_check(struct Qdisc *sch)
92 {
93 	struct gred_sched *table = qdisc_priv(sch);
94 	int i;
95 
96 	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
97 	for (i = 0; i < table->DPs; i++) {
98 		struct gred_sched_data *q = table->tab[i];
99 		int n;
100 
101 		if (q == NULL)
102 			continue;
103 
104 		for (n = i + 1; n < table->DPs; n++)
105 			if (table->tab[n] && table->tab[n]->prio == q->prio)
106 				return 1;
107 	}
108 
109 	return 0;
110 }
111 
112 static inline unsigned int gred_backlog(struct gred_sched *table,
113 					struct gred_sched_data *q,
114 					struct Qdisc *sch)
115 {
116 	if (gred_wred_mode(table))
117 		return sch->qstats.backlog;
118 	else
119 		return q->backlog;
120 }
121 
122 static inline u16 tc_index_to_dp(struct sk_buff *skb)
123 {
124 	return skb->tc_index & GRED_VQ_MASK;
125 }
126 
127 static inline void gred_load_wred_set(const struct gred_sched *table,
128 				      struct gred_sched_data *q)
129 {
130 	q->vars.qavg = table->wred_set.qavg;
131 	q->vars.qidlestart = table->wred_set.qidlestart;
132 }
133 
134 static inline void gred_store_wred_set(struct gred_sched *table,
135 				       struct gred_sched_data *q)
136 {
137 	table->wred_set.qavg = q->vars.qavg;
138 	table->wred_set.qidlestart = q->vars.qidlestart;
139 }
140 
141 static int gred_use_ecn(struct gred_sched_data *q)
142 {
143 	return q->red_flags & TC_RED_ECN;
144 }
145 
146 static int gred_use_harddrop(struct gred_sched_data *q)
147 {
148 	return q->red_flags & TC_RED_HARDDROP;
149 }
150 
151 static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152 {
153 	unsigned int i;
154 
155 	/* Local per-vq flags couldn't have been set unless global are 0 */
156 	if (table->red_flags)
157 		return false;
158 	for (i = 0; i < MAX_DPs; i++)
159 		if (table->tab[i] && table->tab[i]->red_flags)
160 			return true;
161 	return false;
162 }
163 
164 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
165 			struct sk_buff **to_free)
166 {
167 	struct gred_sched_data *q = NULL;
168 	struct gred_sched *t = qdisc_priv(sch);
169 	unsigned long qavg = 0;
170 	u16 dp = tc_index_to_dp(skb);
171 
172 	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
173 		dp = t->def;
174 
175 		q = t->tab[dp];
176 		if (!q) {
177 			/* Pass through packets not assigned to a DP
178 			 * if no default DP has been configured. This
179 			 * allows for DP flows to be left untouched.
180 			 */
181 			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
182 					sch->limit))
183 				return qdisc_enqueue_tail(skb, sch);
184 			else
185 				goto drop;
186 		}
187 
188 		/* fix tc_index? --could be controversial but needed for
189 		   requeueing */
190 		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
191 	}
192 
193 	/* sum up all the qaves of prios < ours to get the new qave */
194 	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
195 		int i;
196 
197 		for (i = 0; i < t->DPs; i++) {
198 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
199 			    !red_is_idling(&t->tab[i]->vars))
200 				qavg += t->tab[i]->vars.qavg;
201 		}
202 
203 	}
204 
205 	q->packetsin++;
206 	q->bytesin += qdisc_pkt_len(skb);
207 
208 	if (gred_wred_mode(t))
209 		gred_load_wred_set(t, q);
210 
211 	q->vars.qavg = red_calc_qavg(&q->parms,
212 				     &q->vars,
213 				     gred_backlog(t, q, sch));
214 
215 	if (red_is_idling(&q->vars))
216 		red_end_of_idle_period(&q->vars);
217 
218 	if (gred_wred_mode(t))
219 		gred_store_wred_set(t, q);
220 
221 	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
222 	case RED_DONT_MARK:
223 		break;
224 
225 	case RED_PROB_MARK:
226 		qdisc_qstats_overlimit(sch);
227 		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
228 			q->stats.prob_drop++;
229 			goto congestion_drop;
230 		}
231 
232 		q->stats.prob_mark++;
233 		break;
234 
235 	case RED_HARD_MARK:
236 		qdisc_qstats_overlimit(sch);
237 		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
238 		    !INET_ECN_set_ce(skb)) {
239 			q->stats.forced_drop++;
240 			goto congestion_drop;
241 		}
242 		q->stats.forced_mark++;
243 		break;
244 	}
245 
246 	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
247 		q->backlog += qdisc_pkt_len(skb);
248 		return qdisc_enqueue_tail(skb, sch);
249 	}
250 
251 	q->stats.pdrop++;
252 drop:
253 	return qdisc_drop(skb, sch, to_free);
254 
255 congestion_drop:
256 	qdisc_drop(skb, sch, to_free);
257 	return NET_XMIT_CN;
258 }
259 
260 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
261 {
262 	struct sk_buff *skb;
263 	struct gred_sched *t = qdisc_priv(sch);
264 
265 	skb = qdisc_dequeue_head(sch);
266 
267 	if (skb) {
268 		struct gred_sched_data *q;
269 		u16 dp = tc_index_to_dp(skb);
270 
271 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
272 			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
273 					     tc_index_to_dp(skb));
274 		} else {
275 			q->backlog -= qdisc_pkt_len(skb);
276 
277 			if (gred_wred_mode(t)) {
278 				if (!sch->qstats.backlog)
279 					red_start_of_idle_period(&t->wred_set);
280 			} else {
281 				if (!q->backlog)
282 					red_start_of_idle_period(&q->vars);
283 			}
284 		}
285 
286 		return skb;
287 	}
288 
289 	return NULL;
290 }
291 
292 static void gred_reset(struct Qdisc *sch)
293 {
294 	int i;
295 	struct gred_sched *t = qdisc_priv(sch);
296 
297 	qdisc_reset_queue(sch);
298 
299 	for (i = 0; i < t->DPs; i++) {
300 		struct gred_sched_data *q = t->tab[i];
301 
302 		if (!q)
303 			continue;
304 
305 		red_restart(&q->vars);
306 		q->backlog = 0;
307 	}
308 }
309 
310 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311 {
312 	struct gred_sched *table = qdisc_priv(sch);
313 	struct net_device *dev = qdisc_dev(sch);
314 	struct tc_gred_qopt_offload opt = {
315 		.command	= command,
316 		.handle		= sch->handle,
317 		.parent		= sch->parent,
318 	};
319 
320 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321 		return;
322 
323 	if (command == TC_GRED_REPLACE) {
324 		unsigned int i;
325 
326 		opt.set.grio_on = gred_rio_mode(table);
327 		opt.set.wred_on = gred_wred_mode(table);
328 		opt.set.dp_cnt = table->DPs;
329 		opt.set.dp_def = table->def;
330 
331 		for (i = 0; i < table->DPs; i++) {
332 			struct gred_sched_data *q = table->tab[i];
333 
334 			if (!q)
335 				continue;
336 			opt.set.tab[i].present = true;
337 			opt.set.tab[i].limit = q->limit;
338 			opt.set.tab[i].prio = q->prio;
339 			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340 			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341 			opt.set.tab[i].is_ecn = gred_use_ecn(q);
342 			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343 			opt.set.tab[i].probability = q->parms.max_P;
344 			opt.set.tab[i].backlog = &q->backlog;
345 		}
346 		opt.set.qstats = &sch->qstats;
347 	}
348 
349 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350 }
351 
352 static int gred_offload_dump_stats(struct Qdisc *sch)
353 {
354 	struct gred_sched *table = qdisc_priv(sch);
355 	struct tc_gred_qopt_offload *hw_stats;
356 	u64 bytes = 0, packets = 0;
357 	unsigned int i;
358 	int ret;
359 
360 	hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
361 	if (!hw_stats)
362 		return -ENOMEM;
363 
364 	hw_stats->command = TC_GRED_STATS;
365 	hw_stats->handle = sch->handle;
366 	hw_stats->parent = sch->parent;
367 
368 	for (i = 0; i < MAX_DPs; i++) {
369 		gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
370 		if (table->tab[i])
371 			hw_stats->stats.xstats[i] = &table->tab[i]->stats;
372 	}
373 
374 	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
375 	/* Even if driver returns failure adjust the stats - in case offload
376 	 * ended but driver still wants to adjust the values.
377 	 */
378 	for (i = 0; i < MAX_DPs; i++) {
379 		if (!table->tab[i])
380 			continue;
381 		table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
382 		table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
383 		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
384 
385 		bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
386 		packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
387 		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
388 		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
389 		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
390 		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
391 		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
392 	}
393 	_bstats_update(&sch->bstats, bytes, packets);
394 
395 	kfree(hw_stats);
396 	return ret;
397 }
398 
399 static inline void gred_destroy_vq(struct gred_sched_data *q)
400 {
401 	kfree(q);
402 }
403 
404 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
405 				 struct netlink_ext_ack *extack)
406 {
407 	struct gred_sched *table = qdisc_priv(sch);
408 	struct tc_gred_sopt *sopt;
409 	bool red_flags_changed;
410 	int i;
411 
412 	if (!dps)
413 		return -EINVAL;
414 
415 	sopt = nla_data(dps);
416 
417 	if (sopt->DPs > MAX_DPs) {
418 		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
419 		return -EINVAL;
420 	}
421 	if (sopt->DPs == 0) {
422 		NL_SET_ERR_MSG_MOD(extack,
423 				   "number of virtual queues can't be 0");
424 		return -EINVAL;
425 	}
426 	if (sopt->def_DP >= sopt->DPs) {
427 		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
428 		return -EINVAL;
429 	}
430 	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
431 		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
432 		return -EINVAL;
433 	}
434 
435 	sch_tree_lock(sch);
436 	table->DPs = sopt->DPs;
437 	table->def = sopt->def_DP;
438 	red_flags_changed = table->red_flags != sopt->flags;
439 	table->red_flags = sopt->flags;
440 
441 	/*
442 	 * Every entry point to GRED is synchronized with the above code
443 	 * and the DP is checked against DPs, i.e. shadowed VQs can no
444 	 * longer be found so we can unlock right here.
445 	 */
446 	sch_tree_unlock(sch);
447 
448 	if (sopt->grio) {
449 		gred_enable_rio_mode(table);
450 		gred_disable_wred_mode(table);
451 		if (gred_wred_mode_check(sch))
452 			gred_enable_wred_mode(table);
453 	} else {
454 		gred_disable_rio_mode(table);
455 		gred_disable_wred_mode(table);
456 	}
457 
458 	if (red_flags_changed)
459 		for (i = 0; i < table->DPs; i++)
460 			if (table->tab[i])
461 				table->tab[i]->red_flags =
462 					table->red_flags & GRED_VQ_RED_FLAGS;
463 
464 	for (i = table->DPs; i < MAX_DPs; i++) {
465 		if (table->tab[i]) {
466 			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
467 				i);
468 			gred_destroy_vq(table->tab[i]);
469 			table->tab[i] = NULL;
470 		}
471 	}
472 
473 	gred_offload(sch, TC_GRED_REPLACE);
474 	return 0;
475 }
476 
477 static inline int gred_change_vq(struct Qdisc *sch, int dp,
478 				 struct tc_gred_qopt *ctl, int prio,
479 				 u8 *stab, u32 max_P,
480 				 struct gred_sched_data **prealloc,
481 				 struct netlink_ext_ack *extack)
482 {
483 	struct gred_sched *table = qdisc_priv(sch);
484 	struct gred_sched_data *q = table->tab[dp];
485 
486 	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
487 		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
488 		return -EINVAL;
489 	}
490 
491 	if (!q) {
492 		table->tab[dp] = q = *prealloc;
493 		*prealloc = NULL;
494 		if (!q)
495 			return -ENOMEM;
496 		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
497 	}
498 
499 	q->DP = dp;
500 	q->prio = prio;
501 	if (ctl->limit > sch->limit)
502 		q->limit = sch->limit;
503 	else
504 		q->limit = ctl->limit;
505 
506 	if (q->backlog == 0)
507 		red_end_of_idle_period(&q->vars);
508 
509 	red_set_parms(&q->parms,
510 		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
511 		      ctl->Scell_log, stab, max_P);
512 	red_set_vars(&q->vars);
513 	return 0;
514 }
515 
516 static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
517 	[TCA_GRED_VQ_DP]	= { .type = NLA_U32 },
518 	[TCA_GRED_VQ_FLAGS]	= { .type = NLA_U32 },
519 };
520 
521 static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
522 	[TCA_GRED_VQ_ENTRY]	= { .type = NLA_NESTED },
523 };
524 
525 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
526 	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
527 	[TCA_GRED_STAB]		= { .len = 256 },
528 	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
529 	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
530 	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
531 	[TCA_GRED_VQ_LIST]	= { .type = NLA_NESTED },
532 };
533 
534 static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
535 {
536 	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
537 	u32 dp;
538 
539 	nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
540 				    gred_vq_policy, NULL);
541 
542 	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
543 
544 	if (tb[TCA_GRED_VQ_FLAGS])
545 		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
546 }
547 
548 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
549 {
550 	const struct nlattr *attr;
551 	int rem;
552 
553 	nla_for_each_nested(attr, vqs, rem) {
554 		switch (nla_type(attr)) {
555 		case TCA_GRED_VQ_ENTRY:
556 			gred_vq_apply(table, attr);
557 			break;
558 		}
559 	}
560 }
561 
562 static int gred_vq_validate(struct gred_sched *table, u32 cdp,
563 			    const struct nlattr *entry,
564 			    struct netlink_ext_ack *extack)
565 {
566 	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
567 	int err;
568 	u32 dp;
569 
570 	err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
571 					  gred_vq_policy, extack);
572 	if (err < 0)
573 		return err;
574 
575 	if (!tb[TCA_GRED_VQ_DP]) {
576 		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
577 		return -EINVAL;
578 	}
579 	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
580 	if (dp >= table->DPs) {
581 		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
582 		return -EINVAL;
583 	}
584 	if (dp != cdp && !table->tab[dp]) {
585 		NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
586 		return -EINVAL;
587 	}
588 
589 	if (tb[TCA_GRED_VQ_FLAGS]) {
590 		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
591 
592 		if (table->red_flags && table->red_flags != red_flags) {
593 			NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
594 			return -EINVAL;
595 		}
596 		if (red_flags & ~GRED_VQ_RED_FLAGS) {
597 			NL_SET_ERR_MSG_MOD(extack,
598 					   "invalid RED flags specified");
599 			return -EINVAL;
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
607 			     struct nlattr *vqs, struct netlink_ext_ack *extack)
608 {
609 	const struct nlattr *attr;
610 	int rem, err;
611 
612 	err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
613 					     gred_vqe_policy, extack);
614 	if (err < 0)
615 		return err;
616 
617 	nla_for_each_nested(attr, vqs, rem) {
618 		switch (nla_type(attr)) {
619 		case TCA_GRED_VQ_ENTRY:
620 			err = gred_vq_validate(table, cdp, attr, extack);
621 			if (err)
622 				return err;
623 			break;
624 		default:
625 			NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
626 			return -EINVAL;
627 		}
628 	}
629 
630 	if (rem > 0) {
631 		NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
632 		return -EINVAL;
633 	}
634 
635 	return 0;
636 }
637 
638 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
639 		       struct netlink_ext_ack *extack)
640 {
641 	struct gred_sched *table = qdisc_priv(sch);
642 	struct tc_gred_qopt *ctl;
643 	struct nlattr *tb[TCA_GRED_MAX + 1];
644 	int err, prio = GRED_DEF_PRIO;
645 	u8 *stab;
646 	u32 max_P;
647 	struct gred_sched_data *prealloc;
648 
649 	if (opt == NULL)
650 		return -EINVAL;
651 
652 	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
653 					  extack);
654 	if (err < 0)
655 		return err;
656 
657 	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
658 		if (tb[TCA_GRED_LIMIT] != NULL)
659 			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
660 		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
661 	}
662 
663 	if (tb[TCA_GRED_PARMS] == NULL ||
664 	    tb[TCA_GRED_STAB] == NULL ||
665 	    tb[TCA_GRED_LIMIT] != NULL) {
666 		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
667 		return -EINVAL;
668 	}
669 
670 	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
671 
672 	ctl = nla_data(tb[TCA_GRED_PARMS]);
673 	stab = nla_data(tb[TCA_GRED_STAB]);
674 
675 	if (ctl->DP >= table->DPs) {
676 		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
677 		return -EINVAL;
678 	}
679 
680 	if (tb[TCA_GRED_VQ_LIST]) {
681 		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
682 					extack);
683 		if (err)
684 			return err;
685 	}
686 
687 	if (gred_rio_mode(table)) {
688 		if (ctl->prio == 0) {
689 			int def_prio = GRED_DEF_PRIO;
690 
691 			if (table->tab[table->def])
692 				def_prio = table->tab[table->def]->prio;
693 
694 			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
695 			       "setting default to %d\n", ctl->DP, def_prio);
696 
697 			prio = def_prio;
698 		} else
699 			prio = ctl->prio;
700 	}
701 
702 	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
703 	sch_tree_lock(sch);
704 
705 	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
706 			     extack);
707 	if (err < 0)
708 		goto err_unlock_free;
709 
710 	if (tb[TCA_GRED_VQ_LIST])
711 		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
712 
713 	if (gred_rio_mode(table)) {
714 		gred_disable_wred_mode(table);
715 		if (gred_wred_mode_check(sch))
716 			gred_enable_wred_mode(table);
717 	}
718 
719 	sch_tree_unlock(sch);
720 	kfree(prealloc);
721 
722 	gred_offload(sch, TC_GRED_REPLACE);
723 	return 0;
724 
725 err_unlock_free:
726 	sch_tree_unlock(sch);
727 	kfree(prealloc);
728 	return err;
729 }
730 
731 static int gred_init(struct Qdisc *sch, struct nlattr *opt,
732 		     struct netlink_ext_ack *extack)
733 {
734 	struct nlattr *tb[TCA_GRED_MAX + 1];
735 	int err;
736 
737 	if (!opt)
738 		return -EINVAL;
739 
740 	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
741 					  extack);
742 	if (err < 0)
743 		return err;
744 
745 	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
746 		NL_SET_ERR_MSG_MOD(extack,
747 				   "virtual queue configuration can't be specified at initialization time");
748 		return -EINVAL;
749 	}
750 
751 	if (tb[TCA_GRED_LIMIT])
752 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
753 	else
754 		sch->limit = qdisc_dev(sch)->tx_queue_len
755 		             * psched_mtu(qdisc_dev(sch));
756 
757 	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
758 }
759 
760 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
761 {
762 	struct gred_sched *table = qdisc_priv(sch);
763 	struct nlattr *parms, *vqs, *opts = NULL;
764 	int i;
765 	u32 max_p[MAX_DPs];
766 	struct tc_gred_sopt sopt = {
767 		.DPs	= table->DPs,
768 		.def_DP	= table->def,
769 		.grio	= gred_rio_mode(table),
770 		.flags	= table->red_flags,
771 	};
772 
773 	if (gred_offload_dump_stats(sch))
774 		goto nla_put_failure;
775 
776 	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
777 	if (opts == NULL)
778 		goto nla_put_failure;
779 	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
780 		goto nla_put_failure;
781 
782 	for (i = 0; i < MAX_DPs; i++) {
783 		struct gred_sched_data *q = table->tab[i];
784 
785 		max_p[i] = q ? q->parms.max_P : 0;
786 	}
787 	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
788 		goto nla_put_failure;
789 
790 	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
791 		goto nla_put_failure;
792 
793 	/* Old style all-in-one dump of VQs */
794 	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
795 	if (parms == NULL)
796 		goto nla_put_failure;
797 
798 	for (i = 0; i < MAX_DPs; i++) {
799 		struct gred_sched_data *q = table->tab[i];
800 		struct tc_gred_qopt opt;
801 		unsigned long qavg;
802 
803 		memset(&opt, 0, sizeof(opt));
804 
805 		if (!q) {
806 			/* hack -- fix at some point with proper message
807 			   This is how we indicate to tc that there is no VQ
808 			   at this DP */
809 
810 			opt.DP = MAX_DPs + i;
811 			goto append_opt;
812 		}
813 
814 		opt.limit	= q->limit;
815 		opt.DP		= q->DP;
816 		opt.backlog	= gred_backlog(table, q, sch);
817 		opt.prio	= q->prio;
818 		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
819 		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
820 		opt.Wlog	= q->parms.Wlog;
821 		opt.Plog	= q->parms.Plog;
822 		opt.Scell_log	= q->parms.Scell_log;
823 		opt.other	= q->stats.other;
824 		opt.early	= q->stats.prob_drop;
825 		opt.forced	= q->stats.forced_drop;
826 		opt.pdrop	= q->stats.pdrop;
827 		opt.packets	= q->packetsin;
828 		opt.bytesin	= q->bytesin;
829 
830 		if (gred_wred_mode(table))
831 			gred_load_wred_set(table, q);
832 
833 		qavg = red_calc_qavg(&q->parms, &q->vars,
834 				     q->vars.qavg >> q->parms.Wlog);
835 		opt.qave = qavg >> q->parms.Wlog;
836 
837 append_opt:
838 		if (nla_append(skb, sizeof(opt), &opt) < 0)
839 			goto nla_put_failure;
840 	}
841 
842 	nla_nest_end(skb, parms);
843 
844 	/* Dump the VQs again, in more structured way */
845 	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
846 	if (!vqs)
847 		goto nla_put_failure;
848 
849 	for (i = 0; i < MAX_DPs; i++) {
850 		struct gred_sched_data *q = table->tab[i];
851 		struct nlattr *vq;
852 
853 		if (!q)
854 			continue;
855 
856 		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
857 		if (!vq)
858 			goto nla_put_failure;
859 
860 		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
861 			goto nla_put_failure;
862 
863 		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
864 			goto nla_put_failure;
865 
866 		/* Stats */
867 		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
868 				      TCA_GRED_VQ_PAD))
869 			goto nla_put_failure;
870 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
871 			goto nla_put_failure;
872 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
873 				gred_backlog(table, q, sch)))
874 			goto nla_put_failure;
875 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
876 				q->stats.prob_drop))
877 			goto nla_put_failure;
878 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
879 				q->stats.prob_mark))
880 			goto nla_put_failure;
881 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
882 				q->stats.forced_drop))
883 			goto nla_put_failure;
884 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
885 				q->stats.forced_mark))
886 			goto nla_put_failure;
887 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
888 			goto nla_put_failure;
889 		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
890 			goto nla_put_failure;
891 
892 		nla_nest_end(skb, vq);
893 	}
894 	nla_nest_end(skb, vqs);
895 
896 	return nla_nest_end(skb, opts);
897 
898 nla_put_failure:
899 	nla_nest_cancel(skb, opts);
900 	return -EMSGSIZE;
901 }
902 
903 static void gred_destroy(struct Qdisc *sch)
904 {
905 	struct gred_sched *table = qdisc_priv(sch);
906 	int i;
907 
908 	for (i = 0; i < table->DPs; i++) {
909 		if (table->tab[i])
910 			gred_destroy_vq(table->tab[i]);
911 	}
912 	gred_offload(sch, TC_GRED_DESTROY);
913 }
914 
915 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
916 	.id		=	"gred",
917 	.priv_size	=	sizeof(struct gred_sched),
918 	.enqueue	=	gred_enqueue,
919 	.dequeue	=	gred_dequeue,
920 	.peek		=	qdisc_peek_head,
921 	.init		=	gred_init,
922 	.reset		=	gred_reset,
923 	.destroy	=	gred_destroy,
924 	.change		=	gred_change,
925 	.dump		=	gred_dump,
926 	.owner		=	THIS_MODULE,
927 };
928 
929 static int __init gred_module_init(void)
930 {
931 	return register_qdisc(&gred_qdisc_ops);
932 }
933 
934 static void __exit gred_module_exit(void)
935 {
936 	unregister_qdisc(&gred_qdisc_ops);
937 }
938 
939 module_init(gred_module_init)
940 module_exit(gred_module_exit)
941 
942 MODULE_LICENSE("GPL");
943