xref: /openbmc/linux/include/net/pkt_cls.h (revision f9834f18)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 struct tcf_block_ext_info {
29 	enum flow_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_block_cb;
36 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
37 
38 #ifdef CONFIG_NET_CLS
39 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 				       u32 chain_index);
41 void tcf_chain_put_by_act(struct tcf_chain *chain);
42 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 				     struct tcf_chain *chain);
44 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
45 				     struct tcf_proto *tp, bool rtnl_held);
46 void tcf_block_netif_keep_dst(struct tcf_block *block);
47 int tcf_block_get(struct tcf_block **p_block,
48 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 		  struct netlink_ext_ack *extack);
50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 		      struct tcf_block_ext_info *ei,
52 		      struct netlink_ext_ack *extack);
53 void tcf_block_put(struct tcf_block *block);
54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 		       struct tcf_block_ext_info *ei);
56 
57 static inline bool tcf_block_shared(struct tcf_block *block)
58 {
59 	return block->index;
60 }
61 
62 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63 {
64 	return block && block->index;
65 }
66 
67 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68 {
69 	WARN_ON(tcf_block_shared(block));
70 	return block->q;
71 }
72 
73 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 		 struct tcf_result *res, bool compat_mode);
75 
76 #else
77 static inline bool tcf_block_shared(struct tcf_block *block)
78 {
79 	return false;
80 }
81 
82 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
83 {
84 	return false;
85 }
86 
87 static inline
88 int tcf_block_get(struct tcf_block **p_block,
89 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
90 		  struct netlink_ext_ack *extack)
91 {
92 	return 0;
93 }
94 
95 static inline
96 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
97 		      struct tcf_block_ext_info *ei,
98 		      struct netlink_ext_ack *extack)
99 {
100 	return 0;
101 }
102 
103 static inline void tcf_block_put(struct tcf_block *block)
104 {
105 }
106 
107 static inline
108 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
109 		       struct tcf_block_ext_info *ei)
110 {
111 }
112 
113 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
114 {
115 	return NULL;
116 }
117 
118 static inline
119 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
120 			       void *cb_priv)
121 {
122 	return 0;
123 }
124 
125 static inline
126 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
127 				  void *cb_priv)
128 {
129 }
130 
131 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 			       struct tcf_result *res, bool compat_mode)
133 {
134 	return TC_ACT_UNSPEC;
135 }
136 #endif
137 
138 static inline unsigned long
139 __cls_set_class(unsigned long *clp, unsigned long cl)
140 {
141 	return xchg(clp, cl);
142 }
143 
144 static inline void
145 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
146 {
147 	unsigned long cl;
148 
149 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
150 	cl = __cls_set_class(&r->class, cl);
151 	if (cl)
152 		q->ops->cl_ops->unbind_tcf(q, cl);
153 }
154 
155 static inline void
156 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
157 {
158 	struct Qdisc *q = tp->chain->block->q;
159 
160 	/* Check q as it is not set for shared blocks. In that case,
161 	 * setting class is not supported.
162 	 */
163 	if (!q)
164 		return;
165 	sch_tree_lock(q);
166 	__tcf_bind_filter(q, r, base);
167 	sch_tree_unlock(q);
168 }
169 
170 static inline void
171 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
172 {
173 	unsigned long cl;
174 
175 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
176 		q->ops->cl_ops->unbind_tcf(q, cl);
177 }
178 
179 static inline void
180 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
181 {
182 	struct Qdisc *q = tp->chain->block->q;
183 
184 	if (!q)
185 		return;
186 	__tcf_unbind_filter(q, r);
187 }
188 
189 struct tcf_exts {
190 #ifdef CONFIG_NET_CLS_ACT
191 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
192 	int nr_actions;
193 	struct tc_action **actions;
194 	struct net *net;
195 #endif
196 	/* Map to export classifier specific extension TLV types to the
197 	 * generic extensions API. Unsupported extensions must be set to 0.
198 	 */
199 	int action;
200 	int police;
201 };
202 
203 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
204 				int action, int police)
205 {
206 #ifdef CONFIG_NET_CLS_ACT
207 	exts->type = 0;
208 	exts->nr_actions = 0;
209 	exts->net = net;
210 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
211 				GFP_KERNEL);
212 	if (!exts->actions)
213 		return -ENOMEM;
214 #endif
215 	exts->action = action;
216 	exts->police = police;
217 	return 0;
218 }
219 
220 /* Return false if the netns is being destroyed in cleanup_net(). Callers
221  * need to do cleanup synchronously in this case, otherwise may race with
222  * tc_action_net_exit(). Return true for other cases.
223  */
224 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
225 {
226 #ifdef CONFIG_NET_CLS_ACT
227 	exts->net = maybe_get_net(exts->net);
228 	return exts->net != NULL;
229 #else
230 	return true;
231 #endif
232 }
233 
234 static inline void tcf_exts_put_net(struct tcf_exts *exts)
235 {
236 #ifdef CONFIG_NET_CLS_ACT
237 	if (exts->net)
238 		put_net(exts->net);
239 #endif
240 }
241 
242 #ifdef CONFIG_NET_CLS_ACT
243 #define tcf_exts_for_each_action(i, a, exts) \
244 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
245 #else
246 #define tcf_exts_for_each_action(i, a, exts) \
247 	for (; 0; (void)(i), (void)(a), (void)(exts))
248 #endif
249 
250 static inline void
251 tcf_exts_stats_update(const struct tcf_exts *exts,
252 		      u64 bytes, u64 packets, u64 lastuse)
253 {
254 #ifdef CONFIG_NET_CLS_ACT
255 	int i;
256 
257 	preempt_disable();
258 
259 	for (i = 0; i < exts->nr_actions; i++) {
260 		struct tc_action *a = exts->actions[i];
261 
262 		tcf_action_stats_update(a, bytes, packets, lastuse, true);
263 	}
264 
265 	preempt_enable();
266 #endif
267 }
268 
269 /**
270  * tcf_exts_has_actions - check if at least one action is present
271  * @exts: tc filter extensions handle
272  *
273  * Returns true if at least one action is present.
274  */
275 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
276 {
277 #ifdef CONFIG_NET_CLS_ACT
278 	return exts->nr_actions;
279 #else
280 	return false;
281 #endif
282 }
283 
284 /**
285  * tcf_exts_exec - execute tc filter extensions
286  * @skb: socket buffer
287  * @exts: tc filter extensions handle
288  * @res: desired result
289  *
290  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
291  * a negative number if the filter must be considered unmatched or
292  * a positive action code (TC_ACT_*) which must be returned to the
293  * underlying layer.
294  */
295 static inline int
296 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
297 	      struct tcf_result *res)
298 {
299 #ifdef CONFIG_NET_CLS_ACT
300 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
301 #endif
302 	return TC_ACT_OK;
303 }
304 
305 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
306 		      struct nlattr **tb, struct nlattr *rate_tlv,
307 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
308 		      struct netlink_ext_ack *extack);
309 void tcf_exts_destroy(struct tcf_exts *exts);
310 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
311 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
312 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
313 
314 /**
315  * struct tcf_pkt_info - packet information
316  */
317 struct tcf_pkt_info {
318 	unsigned char *		ptr;
319 	int			nexthdr;
320 };
321 
322 #ifdef CONFIG_NET_EMATCH
323 
324 struct tcf_ematch_ops;
325 
326 /**
327  * struct tcf_ematch - extended match (ematch)
328  *
329  * @matchid: identifier to allow userspace to reidentify a match
330  * @flags: flags specifying attributes and the relation to other matches
331  * @ops: the operations lookup table of the corresponding ematch module
332  * @datalen: length of the ematch specific configuration data
333  * @data: ematch specific data
334  */
335 struct tcf_ematch {
336 	struct tcf_ematch_ops * ops;
337 	unsigned long		data;
338 	unsigned int		datalen;
339 	u16			matchid;
340 	u16			flags;
341 	struct net		*net;
342 };
343 
344 static inline int tcf_em_is_container(struct tcf_ematch *em)
345 {
346 	return !em->ops;
347 }
348 
349 static inline int tcf_em_is_simple(struct tcf_ematch *em)
350 {
351 	return em->flags & TCF_EM_SIMPLE;
352 }
353 
354 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
355 {
356 	return em->flags & TCF_EM_INVERT;
357 }
358 
359 static inline int tcf_em_last_match(struct tcf_ematch *em)
360 {
361 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
362 }
363 
364 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
365 {
366 	if (tcf_em_last_match(em))
367 		return 1;
368 
369 	if (result == 0 && em->flags & TCF_EM_REL_AND)
370 		return 1;
371 
372 	if (result != 0 && em->flags & TCF_EM_REL_OR)
373 		return 1;
374 
375 	return 0;
376 }
377 
378 /**
379  * struct tcf_ematch_tree - ematch tree handle
380  *
381  * @hdr: ematch tree header supplied by userspace
382  * @matches: array of ematches
383  */
384 struct tcf_ematch_tree {
385 	struct tcf_ematch_tree_hdr hdr;
386 	struct tcf_ematch *	matches;
387 
388 };
389 
390 /**
391  * struct tcf_ematch_ops - ematch module operations
392  *
393  * @kind: identifier (kind) of this ematch module
394  * @datalen: length of expected configuration data (optional)
395  * @change: called during validation (optional)
396  * @match: called during ematch tree evaluation, must return 1/0
397  * @destroy: called during destroyage (optional)
398  * @dump: called during dumping process (optional)
399  * @owner: owner, must be set to THIS_MODULE
400  * @link: link to previous/next ematch module (internal use)
401  */
402 struct tcf_ematch_ops {
403 	int			kind;
404 	int			datalen;
405 	int			(*change)(struct net *net, void *,
406 					  int, struct tcf_ematch *);
407 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
408 					 struct tcf_pkt_info *);
409 	void			(*destroy)(struct tcf_ematch *);
410 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
411 	struct module		*owner;
412 	struct list_head	link;
413 };
414 
415 int tcf_em_register(struct tcf_ematch_ops *);
416 void tcf_em_unregister(struct tcf_ematch_ops *);
417 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
418 			 struct tcf_ematch_tree *);
419 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
420 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
421 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
422 			struct tcf_pkt_info *);
423 
424 /**
425  * tcf_em_tree_match - evaulate an ematch tree
426  *
427  * @skb: socket buffer of the packet in question
428  * @tree: ematch tree to be used for evaluation
429  * @info: packet information examined by classifier
430  *
431  * This function matches @skb against the ematch tree in @tree by going
432  * through all ematches respecting their logic relations returning
433  * as soon as the result is obvious.
434  *
435  * Returns 1 if the ematch tree as-one matches, no ematches are configured
436  * or ematch is not enabled in the kernel, otherwise 0 is returned.
437  */
438 static inline int tcf_em_tree_match(struct sk_buff *skb,
439 				    struct tcf_ematch_tree *tree,
440 				    struct tcf_pkt_info *info)
441 {
442 	if (tree->hdr.nmatches)
443 		return __tcf_em_tree_match(skb, tree, info);
444 	else
445 		return 1;
446 }
447 
448 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
449 
450 #else /* CONFIG_NET_EMATCH */
451 
452 struct tcf_ematch_tree {
453 };
454 
455 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
456 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
457 #define tcf_em_tree_dump(skb, t, tlv) (0)
458 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
459 
460 #endif /* CONFIG_NET_EMATCH */
461 
462 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
463 {
464 	switch (layer) {
465 		case TCF_LAYER_LINK:
466 			return skb_mac_header(skb);
467 		case TCF_LAYER_NETWORK:
468 			return skb_network_header(skb);
469 		case TCF_LAYER_TRANSPORT:
470 			return skb_transport_header(skb);
471 	}
472 
473 	return NULL;
474 }
475 
476 static inline int tcf_valid_offset(const struct sk_buff *skb,
477 				   const unsigned char *ptr, const int len)
478 {
479 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
480 		      ptr >= skb->head &&
481 		      (ptr <= (ptr + len)));
482 }
483 
484 static inline int
485 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
486 		 struct netlink_ext_ack *extack)
487 {
488 	char indev[IFNAMSIZ];
489 	struct net_device *dev;
490 
491 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
492 		NL_SET_ERR_MSG(extack, "Interface name too long");
493 		return -EINVAL;
494 	}
495 	dev = __dev_get_by_name(net, indev);
496 	if (!dev)
497 		return -ENODEV;
498 	return dev->ifindex;
499 }
500 
501 static inline bool
502 tcf_match_indev(struct sk_buff *skb, int ifindex)
503 {
504 	if (!ifindex)
505 		return true;
506 	if  (!skb->skb_iif)
507 		return false;
508 	return ifindex == skb->skb_iif;
509 }
510 
511 int tc_setup_flow_action(struct flow_action *flow_action,
512 			 const struct tcf_exts *exts, bool rtnl_held);
513 void tc_cleanup_flow_action(struct flow_action *flow_action);
514 
515 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
516 		     void *type_data, bool err_stop, bool rtnl_held);
517 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
518 		    enum tc_setup_type type, void *type_data, bool err_stop,
519 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
520 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
521 			enum tc_setup_type type, void *type_data, bool err_stop,
522 			u32 *old_flags, unsigned int *old_in_hw_count,
523 			u32 *new_flags, unsigned int *new_in_hw_count,
524 			bool rtnl_held);
525 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
526 			enum tc_setup_type type, void *type_data, bool err_stop,
527 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
528 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
529 			  bool add, flow_setup_cb_t *cb,
530 			  enum tc_setup_type type, void *type_data,
531 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
532 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
533 
534 struct tc_cls_u32_knode {
535 	struct tcf_exts *exts;
536 	struct tcf_result *res;
537 	struct tc_u32_sel *sel;
538 	u32 handle;
539 	u32 val;
540 	u32 mask;
541 	u32 link_handle;
542 	u8 fshift;
543 };
544 
545 struct tc_cls_u32_hnode {
546 	u32 handle;
547 	u32 prio;
548 	unsigned int divisor;
549 };
550 
551 enum tc_clsu32_command {
552 	TC_CLSU32_NEW_KNODE,
553 	TC_CLSU32_REPLACE_KNODE,
554 	TC_CLSU32_DELETE_KNODE,
555 	TC_CLSU32_NEW_HNODE,
556 	TC_CLSU32_REPLACE_HNODE,
557 	TC_CLSU32_DELETE_HNODE,
558 };
559 
560 struct tc_cls_u32_offload {
561 	struct flow_cls_common_offload common;
562 	/* knode values */
563 	enum tc_clsu32_command command;
564 	union {
565 		struct tc_cls_u32_knode knode;
566 		struct tc_cls_u32_hnode hnode;
567 	};
568 };
569 
570 static inline bool tc_can_offload(const struct net_device *dev)
571 {
572 	return dev->features & NETIF_F_HW_TC;
573 }
574 
575 static inline bool tc_can_offload_extack(const struct net_device *dev,
576 					 struct netlink_ext_ack *extack)
577 {
578 	bool can = tc_can_offload(dev);
579 
580 	if (!can)
581 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
582 
583 	return can;
584 }
585 
586 static inline bool
587 tc_cls_can_offload_and_chain0(const struct net_device *dev,
588 			      struct flow_cls_common_offload *common)
589 {
590 	if (!tc_can_offload_extack(dev, common->extack))
591 		return false;
592 	if (common->chain_index) {
593 		NL_SET_ERR_MSG(common->extack,
594 			       "Driver supports only offload of chain 0");
595 		return false;
596 	}
597 	return true;
598 }
599 
600 static inline bool tc_skip_hw(u32 flags)
601 {
602 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
603 }
604 
605 static inline bool tc_skip_sw(u32 flags)
606 {
607 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
608 }
609 
610 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
611 static inline bool tc_flags_valid(u32 flags)
612 {
613 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
614 		      TCA_CLS_FLAGS_VERBOSE))
615 		return false;
616 
617 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
618 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
619 		return false;
620 
621 	return true;
622 }
623 
624 static inline bool tc_in_hw(u32 flags)
625 {
626 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
627 }
628 
629 static inline void
630 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
631 			   const struct tcf_proto *tp, u32 flags,
632 			   struct netlink_ext_ack *extack)
633 {
634 	cls_common->chain_index = tp->chain->index;
635 	cls_common->protocol = tp->protocol;
636 	cls_common->prio = tp->prio >> 16;
637 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
638 		cls_common->extack = extack;
639 }
640 
641 enum tc_matchall_command {
642 	TC_CLSMATCHALL_REPLACE,
643 	TC_CLSMATCHALL_DESTROY,
644 	TC_CLSMATCHALL_STATS,
645 };
646 
647 struct tc_cls_matchall_offload {
648 	struct flow_cls_common_offload common;
649 	enum tc_matchall_command command;
650 	struct flow_rule *rule;
651 	struct flow_stats stats;
652 	unsigned long cookie;
653 };
654 
655 enum tc_clsbpf_command {
656 	TC_CLSBPF_OFFLOAD,
657 	TC_CLSBPF_STATS,
658 };
659 
660 struct tc_cls_bpf_offload {
661 	struct flow_cls_common_offload common;
662 	enum tc_clsbpf_command command;
663 	struct tcf_exts *exts;
664 	struct bpf_prog *prog;
665 	struct bpf_prog *oldprog;
666 	const char *name;
667 	bool exts_integrated;
668 };
669 
670 struct tc_mqprio_qopt_offload {
671 	/* struct tc_mqprio_qopt must always be the first element */
672 	struct tc_mqprio_qopt qopt;
673 	u16 mode;
674 	u16 shaper;
675 	u32 flags;
676 	u64 min_rate[TC_QOPT_MAX_QUEUE];
677 	u64 max_rate[TC_QOPT_MAX_QUEUE];
678 };
679 
680 /* This structure holds cookie structure that is passed from user
681  * to the kernel for actions and classifiers
682  */
683 struct tc_cookie {
684 	u8  *data;
685 	u32 len;
686 	struct rcu_head rcu;
687 };
688 
689 struct tc_qopt_offload_stats {
690 	struct gnet_stats_basic_packed *bstats;
691 	struct gnet_stats_queue *qstats;
692 };
693 
694 enum tc_mq_command {
695 	TC_MQ_CREATE,
696 	TC_MQ_DESTROY,
697 	TC_MQ_STATS,
698 	TC_MQ_GRAFT,
699 };
700 
701 struct tc_mq_opt_offload_graft_params {
702 	unsigned long queue;
703 	u32 child_handle;
704 };
705 
706 struct tc_mq_qopt_offload {
707 	enum tc_mq_command command;
708 	u32 handle;
709 	union {
710 		struct tc_qopt_offload_stats stats;
711 		struct tc_mq_opt_offload_graft_params graft_params;
712 	};
713 };
714 
715 enum tc_red_command {
716 	TC_RED_REPLACE,
717 	TC_RED_DESTROY,
718 	TC_RED_STATS,
719 	TC_RED_XSTATS,
720 	TC_RED_GRAFT,
721 };
722 
723 struct tc_red_qopt_offload_params {
724 	u32 min;
725 	u32 max;
726 	u32 probability;
727 	u32 limit;
728 	bool is_ecn;
729 	bool is_harddrop;
730 	struct gnet_stats_queue *qstats;
731 };
732 
733 struct tc_red_qopt_offload {
734 	enum tc_red_command command;
735 	u32 handle;
736 	u32 parent;
737 	union {
738 		struct tc_red_qopt_offload_params set;
739 		struct tc_qopt_offload_stats stats;
740 		struct red_stats *xstats;
741 		u32 child_handle;
742 	};
743 };
744 
745 enum tc_gred_command {
746 	TC_GRED_REPLACE,
747 	TC_GRED_DESTROY,
748 	TC_GRED_STATS,
749 };
750 
751 struct tc_gred_vq_qopt_offload_params {
752 	bool present;
753 	u32 limit;
754 	u32 prio;
755 	u32 min;
756 	u32 max;
757 	bool is_ecn;
758 	bool is_harddrop;
759 	u32 probability;
760 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
761 	u32 *backlog;
762 };
763 
764 struct tc_gred_qopt_offload_params {
765 	bool grio_on;
766 	bool wred_on;
767 	unsigned int dp_cnt;
768 	unsigned int dp_def;
769 	struct gnet_stats_queue *qstats;
770 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
771 };
772 
773 struct tc_gred_qopt_offload_stats {
774 	struct gnet_stats_basic_packed bstats[MAX_DPs];
775 	struct gnet_stats_queue qstats[MAX_DPs];
776 	struct red_stats *xstats[MAX_DPs];
777 };
778 
779 struct tc_gred_qopt_offload {
780 	enum tc_gred_command command;
781 	u32 handle;
782 	u32 parent;
783 	union {
784 		struct tc_gred_qopt_offload_params set;
785 		struct tc_gred_qopt_offload_stats stats;
786 	};
787 };
788 
789 enum tc_prio_command {
790 	TC_PRIO_REPLACE,
791 	TC_PRIO_DESTROY,
792 	TC_PRIO_STATS,
793 	TC_PRIO_GRAFT,
794 };
795 
796 struct tc_prio_qopt_offload_params {
797 	int bands;
798 	u8 priomap[TC_PRIO_MAX + 1];
799 	/* At the point of un-offloading the Qdisc, the reported backlog and
800 	 * qlen need to be reduced by the portion that is in HW.
801 	 */
802 	struct gnet_stats_queue *qstats;
803 };
804 
805 struct tc_prio_qopt_offload_graft_params {
806 	u8 band;
807 	u32 child_handle;
808 };
809 
810 struct tc_prio_qopt_offload {
811 	enum tc_prio_command command;
812 	u32 handle;
813 	u32 parent;
814 	union {
815 		struct tc_prio_qopt_offload_params replace_params;
816 		struct tc_qopt_offload_stats stats;
817 		struct tc_prio_qopt_offload_graft_params graft_params;
818 	};
819 };
820 
821 enum tc_root_command {
822 	TC_ROOT_GRAFT,
823 };
824 
825 struct tc_root_qopt_offload {
826 	enum tc_root_command command;
827 	u32 handle;
828 	bool ingress;
829 };
830 
831 enum tc_ets_command {
832 	TC_ETS_REPLACE,
833 	TC_ETS_DESTROY,
834 	TC_ETS_STATS,
835 	TC_ETS_GRAFT,
836 };
837 
838 struct tc_ets_qopt_offload_replace_params {
839 	unsigned int bands;
840 	u8 priomap[TC_PRIO_MAX + 1];
841 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
842 	unsigned int weights[TCQ_ETS_MAX_BANDS];
843 	struct gnet_stats_queue *qstats;
844 };
845 
846 struct tc_ets_qopt_offload_graft_params {
847 	u8 band;
848 	u32 child_handle;
849 };
850 
851 struct tc_ets_qopt_offload {
852 	enum tc_ets_command command;
853 	u32 handle;
854 	u32 parent;
855 	union {
856 		struct tc_ets_qopt_offload_replace_params replace_params;
857 		struct tc_qopt_offload_stats stats;
858 		struct tc_ets_qopt_offload_graft_params graft_params;
859 	};
860 };
861 
862 enum tc_tbf_command {
863 	TC_TBF_REPLACE,
864 	TC_TBF_DESTROY,
865 	TC_TBF_STATS,
866 };
867 
868 struct tc_tbf_qopt_offload_replace_params {
869 	struct psched_ratecfg rate;
870 	u32 max_size;
871 	struct gnet_stats_queue *qstats;
872 };
873 
874 struct tc_tbf_qopt_offload {
875 	enum tc_tbf_command command;
876 	u32 handle;
877 	u32 parent;
878 	union {
879 		struct tc_tbf_qopt_offload_replace_params replace_params;
880 		struct tc_qopt_offload_stats stats;
881 	};
882 };
883 
884 #endif
885