xref: /openbmc/linux/include/net/pkt_cls.h (revision 4aea96f4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 
10 /* TC action not accessible from user space */
11 #define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1)
12 
13 /* Basic packet classifier frontend definitions. */
14 
15 struct tcf_walker {
16 	int	stop;
17 	int	skip;
18 	int	count;
19 	unsigned long cookie;
20 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
21 };
22 
23 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
24 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
25 
26 enum tcf_block_binder_type {
27 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
28 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
29 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
30 };
31 
32 struct tcf_block_ext_info {
33 	enum tcf_block_binder_type binder_type;
34 	tcf_chain_head_change_t *chain_head_change;
35 	void *chain_head_change_priv;
36 	u32 block_index;
37 };
38 
39 struct tcf_block_cb;
40 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
41 
42 #ifdef CONFIG_NET_CLS
43 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
44 				       u32 chain_index);
45 void tcf_chain_put_by_act(struct tcf_chain *chain);
46 void tcf_block_netif_keep_dst(struct tcf_block *block);
47 int tcf_block_get(struct tcf_block **p_block,
48 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 		  struct netlink_ext_ack *extack);
50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 		      struct tcf_block_ext_info *ei,
52 		      struct netlink_ext_ack *extack);
53 void tcf_block_put(struct tcf_block *block);
54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 		       struct tcf_block_ext_info *ei);
56 
57 static inline bool tcf_block_shared(struct tcf_block *block)
58 {
59 	return block->index;
60 }
61 
62 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
63 {
64 	WARN_ON(tcf_block_shared(block));
65 	return block->q;
66 }
67 
68 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
69 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
70 					 tc_setup_cb_t *cb, void *cb_ident);
71 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
72 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
73 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
74 					     tc_setup_cb_t *cb, void *cb_ident,
75 					     void *cb_priv,
76 					     struct netlink_ext_ack *extack);
77 int tcf_block_cb_register(struct tcf_block *block,
78 			  tc_setup_cb_t *cb, void *cb_ident,
79 			  void *cb_priv, struct netlink_ext_ack *extack);
80 void __tcf_block_cb_unregister(struct tcf_block *block,
81 			       struct tcf_block_cb *block_cb);
82 void tcf_block_cb_unregister(struct tcf_block *block,
83 			     tc_setup_cb_t *cb, void *cb_ident);
84 
85 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86 		 struct tcf_result *res, bool compat_mode);
87 
88 #else
89 static inline
90 int tcf_block_get(struct tcf_block **p_block,
91 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
92 		  struct netlink_ext_ack *extack)
93 {
94 	return 0;
95 }
96 
97 static inline
98 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
99 		      struct tcf_block_ext_info *ei,
100 		      struct netlink_ext_ack *extack)
101 {
102 	return 0;
103 }
104 
105 static inline void tcf_block_put(struct tcf_block *block)
106 {
107 }
108 
109 static inline
110 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
111 		       struct tcf_block_ext_info *ei)
112 {
113 }
114 
115 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
116 {
117 	return NULL;
118 }
119 
120 static inline
121 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
122 			       void *cb_priv)
123 {
124 	return 0;
125 }
126 
127 static inline
128 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
129 				  void *cb_priv)
130 {
131 }
132 
133 static inline
134 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
135 {
136 	return NULL;
137 }
138 
139 static inline
140 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
141 					 tc_setup_cb_t *cb, void *cb_ident)
142 {
143 	return NULL;
144 }
145 
146 static inline
147 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
148 {
149 }
150 
151 static inline
152 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
153 {
154 	return 0;
155 }
156 
157 static inline
158 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
159 					     tc_setup_cb_t *cb, void *cb_ident,
160 					     void *cb_priv,
161 					     struct netlink_ext_ack *extack)
162 {
163 	return NULL;
164 }
165 
166 static inline
167 int tcf_block_cb_register(struct tcf_block *block,
168 			  tc_setup_cb_t *cb, void *cb_ident,
169 			  void *cb_priv, struct netlink_ext_ack *extack)
170 {
171 	return 0;
172 }
173 
174 static inline
175 void __tcf_block_cb_unregister(struct tcf_block *block,
176 			       struct tcf_block_cb *block_cb)
177 {
178 }
179 
180 static inline
181 void tcf_block_cb_unregister(struct tcf_block *block,
182 			     tc_setup_cb_t *cb, void *cb_ident)
183 {
184 }
185 
186 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
187 			       struct tcf_result *res, bool compat_mode)
188 {
189 	return TC_ACT_UNSPEC;
190 }
191 #endif
192 
193 static inline unsigned long
194 __cls_set_class(unsigned long *clp, unsigned long cl)
195 {
196 	return xchg(clp, cl);
197 }
198 
199 static inline unsigned long
200 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
201 {
202 	unsigned long old_cl;
203 
204 	sch_tree_lock(q);
205 	old_cl = __cls_set_class(clp, cl);
206 	sch_tree_unlock(q);
207 	return old_cl;
208 }
209 
210 static inline void
211 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
212 {
213 	struct Qdisc *q = tp->chain->block->q;
214 	unsigned long cl;
215 
216 	/* Check q as it is not set for shared blocks. In that case,
217 	 * setting class is not supported.
218 	 */
219 	if (!q)
220 		return;
221 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
222 	cl = cls_set_class(q, &r->class, cl);
223 	if (cl)
224 		q->ops->cl_ops->unbind_tcf(q, cl);
225 }
226 
227 static inline void
228 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
229 {
230 	struct Qdisc *q = tp->chain->block->q;
231 	unsigned long cl;
232 
233 	if (!q)
234 		return;
235 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
236 		q->ops->cl_ops->unbind_tcf(q, cl);
237 }
238 
239 struct tcf_exts {
240 #ifdef CONFIG_NET_CLS_ACT
241 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
242 	int nr_actions;
243 	struct tc_action **actions;
244 	struct net *net;
245 #endif
246 	/* Map to export classifier specific extension TLV types to the
247 	 * generic extensions API. Unsupported extensions must be set to 0.
248 	 */
249 	int action;
250 	int police;
251 };
252 
253 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
254 {
255 #ifdef CONFIG_NET_CLS_ACT
256 	exts->type = 0;
257 	exts->nr_actions = 0;
258 	exts->net = NULL;
259 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
260 				GFP_KERNEL);
261 	if (!exts->actions)
262 		return -ENOMEM;
263 #endif
264 	exts->action = action;
265 	exts->police = police;
266 	return 0;
267 }
268 
269 /* Return false if the netns is being destroyed in cleanup_net(). Callers
270  * need to do cleanup synchronously in this case, otherwise may race with
271  * tc_action_net_exit(). Return true for other cases.
272  */
273 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
274 {
275 #ifdef CONFIG_NET_CLS_ACT
276 	exts->net = maybe_get_net(exts->net);
277 	return exts->net != NULL;
278 #else
279 	return true;
280 #endif
281 }
282 
283 static inline void tcf_exts_put_net(struct tcf_exts *exts)
284 {
285 #ifdef CONFIG_NET_CLS_ACT
286 	if (exts->net)
287 		put_net(exts->net);
288 #endif
289 }
290 
291 #ifdef CONFIG_NET_CLS_ACT
292 #define tcf_exts_for_each_action(i, a, exts) \
293 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
294 #else
295 #define tcf_exts_for_each_action(i, a, exts) \
296 	for (; 0; (void)(i), (void)(a), (void)(exts))
297 #endif
298 
299 static inline void
300 tcf_exts_stats_update(const struct tcf_exts *exts,
301 		      u64 bytes, u64 packets, u64 lastuse)
302 {
303 #ifdef CONFIG_NET_CLS_ACT
304 	int i;
305 
306 	preempt_disable();
307 
308 	for (i = 0; i < exts->nr_actions; i++) {
309 		struct tc_action *a = exts->actions[i];
310 
311 		tcf_action_stats_update(a, bytes, packets, lastuse, true);
312 	}
313 
314 	preempt_enable();
315 #endif
316 }
317 
318 /**
319  * tcf_exts_has_actions - check if at least one action is present
320  * @exts: tc filter extensions handle
321  *
322  * Returns true if at least one action is present.
323  */
324 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
325 {
326 #ifdef CONFIG_NET_CLS_ACT
327 	return exts->nr_actions;
328 #else
329 	return false;
330 #endif
331 }
332 
333 /**
334  * tcf_exts_has_one_action - check if exactly one action is present
335  * @exts: tc filter extensions handle
336  *
337  * Returns true if exactly one action is present.
338  */
339 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
340 {
341 #ifdef CONFIG_NET_CLS_ACT
342 	return exts->nr_actions == 1;
343 #else
344 	return false;
345 #endif
346 }
347 
348 static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
349 {
350 #ifdef CONFIG_NET_CLS_ACT
351 	return exts->actions[0];
352 #else
353 	return NULL;
354 #endif
355 }
356 
357 /**
358  * tcf_exts_exec - execute tc filter extensions
359  * @skb: socket buffer
360  * @exts: tc filter extensions handle
361  * @res: desired result
362  *
363  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
364  * a negative number if the filter must be considered unmatched or
365  * a positive action code (TC_ACT_*) which must be returned to the
366  * underlying layer.
367  */
368 static inline int
369 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
370 	      struct tcf_result *res)
371 {
372 #ifdef CONFIG_NET_CLS_ACT
373 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
374 #endif
375 	return TC_ACT_OK;
376 }
377 
378 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
379 		      struct nlattr **tb, struct nlattr *rate_tlv,
380 		      struct tcf_exts *exts, bool ovr,
381 		      struct netlink_ext_ack *extack);
382 void tcf_exts_destroy(struct tcf_exts *exts);
383 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
384 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
385 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
386 
387 /**
388  * struct tcf_pkt_info - packet information
389  */
390 struct tcf_pkt_info {
391 	unsigned char *		ptr;
392 	int			nexthdr;
393 };
394 
395 #ifdef CONFIG_NET_EMATCH
396 
397 struct tcf_ematch_ops;
398 
399 /**
400  * struct tcf_ematch - extended match (ematch)
401  *
402  * @matchid: identifier to allow userspace to reidentify a match
403  * @flags: flags specifying attributes and the relation to other matches
404  * @ops: the operations lookup table of the corresponding ematch module
405  * @datalen: length of the ematch specific configuration data
406  * @data: ematch specific data
407  */
408 struct tcf_ematch {
409 	struct tcf_ematch_ops * ops;
410 	unsigned long		data;
411 	unsigned int		datalen;
412 	u16			matchid;
413 	u16			flags;
414 	struct net		*net;
415 };
416 
417 static inline int tcf_em_is_container(struct tcf_ematch *em)
418 {
419 	return !em->ops;
420 }
421 
422 static inline int tcf_em_is_simple(struct tcf_ematch *em)
423 {
424 	return em->flags & TCF_EM_SIMPLE;
425 }
426 
427 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
428 {
429 	return em->flags & TCF_EM_INVERT;
430 }
431 
432 static inline int tcf_em_last_match(struct tcf_ematch *em)
433 {
434 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
435 }
436 
437 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
438 {
439 	if (tcf_em_last_match(em))
440 		return 1;
441 
442 	if (result == 0 && em->flags & TCF_EM_REL_AND)
443 		return 1;
444 
445 	if (result != 0 && em->flags & TCF_EM_REL_OR)
446 		return 1;
447 
448 	return 0;
449 }
450 
451 /**
452  * struct tcf_ematch_tree - ematch tree handle
453  *
454  * @hdr: ematch tree header supplied by userspace
455  * @matches: array of ematches
456  */
457 struct tcf_ematch_tree {
458 	struct tcf_ematch_tree_hdr hdr;
459 	struct tcf_ematch *	matches;
460 
461 };
462 
463 /**
464  * struct tcf_ematch_ops - ematch module operations
465  *
466  * @kind: identifier (kind) of this ematch module
467  * @datalen: length of expected configuration data (optional)
468  * @change: called during validation (optional)
469  * @match: called during ematch tree evaluation, must return 1/0
470  * @destroy: called during destroyage (optional)
471  * @dump: called during dumping process (optional)
472  * @owner: owner, must be set to THIS_MODULE
473  * @link: link to previous/next ematch module (internal use)
474  */
475 struct tcf_ematch_ops {
476 	int			kind;
477 	int			datalen;
478 	int			(*change)(struct net *net, void *,
479 					  int, struct tcf_ematch *);
480 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
481 					 struct tcf_pkt_info *);
482 	void			(*destroy)(struct tcf_ematch *);
483 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
484 	struct module		*owner;
485 	struct list_head	link;
486 };
487 
488 int tcf_em_register(struct tcf_ematch_ops *);
489 void tcf_em_unregister(struct tcf_ematch_ops *);
490 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
491 			 struct tcf_ematch_tree *);
492 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
493 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
494 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
495 			struct tcf_pkt_info *);
496 
497 /**
498  * tcf_em_tree_match - evaulate an ematch tree
499  *
500  * @skb: socket buffer of the packet in question
501  * @tree: ematch tree to be used for evaluation
502  * @info: packet information examined by classifier
503  *
504  * This function matches @skb against the ematch tree in @tree by going
505  * through all ematches respecting their logic relations returning
506  * as soon as the result is obvious.
507  *
508  * Returns 1 if the ematch tree as-one matches, no ematches are configured
509  * or ematch is not enabled in the kernel, otherwise 0 is returned.
510  */
511 static inline int tcf_em_tree_match(struct sk_buff *skb,
512 				    struct tcf_ematch_tree *tree,
513 				    struct tcf_pkt_info *info)
514 {
515 	if (tree->hdr.nmatches)
516 		return __tcf_em_tree_match(skb, tree, info);
517 	else
518 		return 1;
519 }
520 
521 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
522 
523 #else /* CONFIG_NET_EMATCH */
524 
525 struct tcf_ematch_tree {
526 };
527 
528 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
529 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
530 #define tcf_em_tree_dump(skb, t, tlv) (0)
531 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
532 
533 #endif /* CONFIG_NET_EMATCH */
534 
535 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
536 {
537 	switch (layer) {
538 		case TCF_LAYER_LINK:
539 			return skb_mac_header(skb);
540 		case TCF_LAYER_NETWORK:
541 			return skb_network_header(skb);
542 		case TCF_LAYER_TRANSPORT:
543 			return skb_transport_header(skb);
544 	}
545 
546 	return NULL;
547 }
548 
549 static inline int tcf_valid_offset(const struct sk_buff *skb,
550 				   const unsigned char *ptr, const int len)
551 {
552 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
553 		      ptr >= skb->head &&
554 		      (ptr <= (ptr + len)));
555 }
556 
557 #ifdef CONFIG_NET_CLS_IND
558 #include <net/net_namespace.h>
559 
560 static inline int
561 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
562 		 struct netlink_ext_ack *extack)
563 {
564 	char indev[IFNAMSIZ];
565 	struct net_device *dev;
566 
567 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
568 		NL_SET_ERR_MSG(extack, "Interface name too long");
569 		return -EINVAL;
570 	}
571 	dev = __dev_get_by_name(net, indev);
572 	if (!dev)
573 		return -ENODEV;
574 	return dev->ifindex;
575 }
576 
577 static inline bool
578 tcf_match_indev(struct sk_buff *skb, int ifindex)
579 {
580 	if (!ifindex)
581 		return true;
582 	if  (!skb->skb_iif)
583 		return false;
584 	return ifindex == skb->skb_iif;
585 }
586 #endif /* CONFIG_NET_CLS_IND */
587 
588 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
589 		     enum tc_setup_type type, void *type_data, bool err_stop);
590 
591 enum tc_block_command {
592 	TC_BLOCK_BIND,
593 	TC_BLOCK_UNBIND,
594 };
595 
596 struct tc_block_offload {
597 	enum tc_block_command command;
598 	enum tcf_block_binder_type binder_type;
599 	struct tcf_block *block;
600 	struct netlink_ext_ack *extack;
601 };
602 
603 struct tc_cls_common_offload {
604 	u32 chain_index;
605 	__be16 protocol;
606 	u32 prio;
607 	struct netlink_ext_ack *extack;
608 };
609 
610 struct tc_cls_u32_knode {
611 	struct tcf_exts *exts;
612 	struct tc_u32_sel *sel;
613 	u32 handle;
614 	u32 val;
615 	u32 mask;
616 	u32 link_handle;
617 	u8 fshift;
618 };
619 
620 struct tc_cls_u32_hnode {
621 	u32 handle;
622 	u32 prio;
623 	unsigned int divisor;
624 };
625 
626 enum tc_clsu32_command {
627 	TC_CLSU32_NEW_KNODE,
628 	TC_CLSU32_REPLACE_KNODE,
629 	TC_CLSU32_DELETE_KNODE,
630 	TC_CLSU32_NEW_HNODE,
631 	TC_CLSU32_REPLACE_HNODE,
632 	TC_CLSU32_DELETE_HNODE,
633 };
634 
635 struct tc_cls_u32_offload {
636 	struct tc_cls_common_offload common;
637 	/* knode values */
638 	enum tc_clsu32_command command;
639 	union {
640 		struct tc_cls_u32_knode knode;
641 		struct tc_cls_u32_hnode hnode;
642 	};
643 };
644 
645 static inline bool tc_can_offload(const struct net_device *dev)
646 {
647 	return dev->features & NETIF_F_HW_TC;
648 }
649 
650 static inline bool tc_can_offload_extack(const struct net_device *dev,
651 					 struct netlink_ext_ack *extack)
652 {
653 	bool can = tc_can_offload(dev);
654 
655 	if (!can)
656 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
657 
658 	return can;
659 }
660 
661 static inline bool
662 tc_cls_can_offload_and_chain0(const struct net_device *dev,
663 			      struct tc_cls_common_offload *common)
664 {
665 	if (!tc_can_offload_extack(dev, common->extack))
666 		return false;
667 	if (common->chain_index) {
668 		NL_SET_ERR_MSG(common->extack,
669 			       "Driver supports only offload of chain 0");
670 		return false;
671 	}
672 	return true;
673 }
674 
675 static inline bool tc_skip_hw(u32 flags)
676 {
677 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
678 }
679 
680 static inline bool tc_skip_sw(u32 flags)
681 {
682 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
683 }
684 
685 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
686 static inline bool tc_flags_valid(u32 flags)
687 {
688 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
689 		      TCA_CLS_FLAGS_VERBOSE))
690 		return false;
691 
692 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
693 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
694 		return false;
695 
696 	return true;
697 }
698 
699 static inline bool tc_in_hw(u32 flags)
700 {
701 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
702 }
703 
704 static inline void
705 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
706 			   const struct tcf_proto *tp, u32 flags,
707 			   struct netlink_ext_ack *extack)
708 {
709 	cls_common->chain_index = tp->chain->index;
710 	cls_common->protocol = tp->protocol;
711 	cls_common->prio = tp->prio;
712 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
713 		cls_common->extack = extack;
714 }
715 
716 enum tc_fl_command {
717 	TC_CLSFLOWER_REPLACE,
718 	TC_CLSFLOWER_DESTROY,
719 	TC_CLSFLOWER_STATS,
720 	TC_CLSFLOWER_TMPLT_CREATE,
721 	TC_CLSFLOWER_TMPLT_DESTROY,
722 };
723 
724 struct tc_cls_flower_offload {
725 	struct tc_cls_common_offload common;
726 	enum tc_fl_command command;
727 	unsigned long cookie;
728 	struct flow_dissector *dissector;
729 	struct fl_flow_key *mask;
730 	struct fl_flow_key *key;
731 	struct tcf_exts *exts;
732 	u32 classid;
733 };
734 
735 enum tc_matchall_command {
736 	TC_CLSMATCHALL_REPLACE,
737 	TC_CLSMATCHALL_DESTROY,
738 };
739 
740 struct tc_cls_matchall_offload {
741 	struct tc_cls_common_offload common;
742 	enum tc_matchall_command command;
743 	struct tcf_exts *exts;
744 	unsigned long cookie;
745 };
746 
747 enum tc_clsbpf_command {
748 	TC_CLSBPF_OFFLOAD,
749 	TC_CLSBPF_STATS,
750 };
751 
752 struct tc_cls_bpf_offload {
753 	struct tc_cls_common_offload common;
754 	enum tc_clsbpf_command command;
755 	struct tcf_exts *exts;
756 	struct bpf_prog *prog;
757 	struct bpf_prog *oldprog;
758 	const char *name;
759 	bool exts_integrated;
760 };
761 
762 struct tc_mqprio_qopt_offload {
763 	/* struct tc_mqprio_qopt must always be the first element */
764 	struct tc_mqprio_qopt qopt;
765 	u16 mode;
766 	u16 shaper;
767 	u32 flags;
768 	u64 min_rate[TC_QOPT_MAX_QUEUE];
769 	u64 max_rate[TC_QOPT_MAX_QUEUE];
770 };
771 
772 /* This structure holds cookie structure that is passed from user
773  * to the kernel for actions and classifiers
774  */
775 struct tc_cookie {
776 	u8  *data;
777 	u32 len;
778 	struct rcu_head rcu;
779 };
780 
781 struct tc_qopt_offload_stats {
782 	struct gnet_stats_basic_packed *bstats;
783 	struct gnet_stats_queue *qstats;
784 };
785 
786 enum tc_mq_command {
787 	TC_MQ_CREATE,
788 	TC_MQ_DESTROY,
789 	TC_MQ_STATS,
790 };
791 
792 struct tc_mq_qopt_offload {
793 	enum tc_mq_command command;
794 	u32 handle;
795 	struct tc_qopt_offload_stats stats;
796 };
797 
798 enum tc_red_command {
799 	TC_RED_REPLACE,
800 	TC_RED_DESTROY,
801 	TC_RED_STATS,
802 	TC_RED_XSTATS,
803 };
804 
805 struct tc_red_qopt_offload_params {
806 	u32 min;
807 	u32 max;
808 	u32 probability;
809 	bool is_ecn;
810 	struct gnet_stats_queue *qstats;
811 };
812 
813 struct tc_red_qopt_offload {
814 	enum tc_red_command command;
815 	u32 handle;
816 	u32 parent;
817 	union {
818 		struct tc_red_qopt_offload_params set;
819 		struct tc_qopt_offload_stats stats;
820 		struct red_stats *xstats;
821 	};
822 };
823 
824 enum tc_prio_command {
825 	TC_PRIO_REPLACE,
826 	TC_PRIO_DESTROY,
827 	TC_PRIO_STATS,
828 	TC_PRIO_GRAFT,
829 };
830 
831 struct tc_prio_qopt_offload_params {
832 	int bands;
833 	u8 priomap[TC_PRIO_MAX + 1];
834 	/* In case that a prio qdisc is offloaded and now is changed to a
835 	 * non-offloadedable config, it needs to update the backlog & qlen
836 	 * values to negate the HW backlog & qlen values (and only them).
837 	 */
838 	struct gnet_stats_queue *qstats;
839 };
840 
841 struct tc_prio_qopt_offload_graft_params {
842 	u8 band;
843 	u32 child_handle;
844 };
845 
846 struct tc_prio_qopt_offload {
847 	enum tc_prio_command command;
848 	u32 handle;
849 	u32 parent;
850 	union {
851 		struct tc_prio_qopt_offload_params replace_params;
852 		struct tc_qopt_offload_stats stats;
853 		struct tc_prio_qopt_offload_graft_params graft_params;
854 	};
855 };
856 
857 #endif
858