xref: /openbmc/linux/include/net/pkt_cls.h (revision e1e38ea1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 
10 /* TC action not accessible from user space */
11 #define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1)
12 
13 /* Basic packet classifier frontend definitions. */
14 
15 struct tcf_walker {
16 	int	stop;
17 	int	skip;
18 	int	count;
19 	unsigned long cookie;
20 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
21 };
22 
23 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
24 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
25 
26 enum tcf_block_binder_type {
27 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
28 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
29 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
30 };
31 
32 struct tcf_block_ext_info {
33 	enum tcf_block_binder_type binder_type;
34 	tcf_chain_head_change_t *chain_head_change;
35 	void *chain_head_change_priv;
36 	u32 block_index;
37 };
38 
39 struct tcf_block_cb;
40 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
41 
42 #ifdef CONFIG_NET_CLS
43 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
44 				       u32 chain_index);
45 void tcf_chain_put_by_act(struct tcf_chain *chain);
46 void tcf_block_netif_keep_dst(struct tcf_block *block);
47 int tcf_block_get(struct tcf_block **p_block,
48 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 		  struct netlink_ext_ack *extack);
50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 		      struct tcf_block_ext_info *ei,
52 		      struct netlink_ext_ack *extack);
53 void tcf_block_put(struct tcf_block *block);
54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 		       struct tcf_block_ext_info *ei);
56 
57 static inline bool tcf_block_shared(struct tcf_block *block)
58 {
59 	return block->index;
60 }
61 
62 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
63 {
64 	WARN_ON(tcf_block_shared(block));
65 	return block->q;
66 }
67 
68 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
69 {
70 	return tcf_block_q(block)->dev_queue->dev;
71 }
72 
73 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
74 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
75 					 tc_setup_cb_t *cb, void *cb_ident);
76 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
77 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
78 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
79 					     tc_setup_cb_t *cb, void *cb_ident,
80 					     void *cb_priv,
81 					     struct netlink_ext_ack *extack);
82 int tcf_block_cb_register(struct tcf_block *block,
83 			  tc_setup_cb_t *cb, void *cb_ident,
84 			  void *cb_priv, struct netlink_ext_ack *extack);
85 void __tcf_block_cb_unregister(struct tcf_block *block,
86 			       struct tcf_block_cb *block_cb);
87 void tcf_block_cb_unregister(struct tcf_block *block,
88 			     tc_setup_cb_t *cb, void *cb_ident);
89 
90 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
91 		 struct tcf_result *res, bool compat_mode);
92 
93 #else
94 static inline
95 int tcf_block_get(struct tcf_block **p_block,
96 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
97 		  struct netlink_ext_ack *extack)
98 {
99 	return 0;
100 }
101 
102 static inline
103 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
104 		      struct tcf_block_ext_info *ei,
105 		      struct netlink_ext_ack *extack)
106 {
107 	return 0;
108 }
109 
110 static inline void tcf_block_put(struct tcf_block *block)
111 {
112 }
113 
114 static inline
115 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
116 		       struct tcf_block_ext_info *ei)
117 {
118 }
119 
120 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
121 {
122 	return NULL;
123 }
124 
125 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
126 {
127 	return NULL;
128 }
129 
130 static inline
131 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
132 			       void *cb_priv)
133 {
134 	return 0;
135 }
136 
137 static inline
138 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
139 				  void *cb_priv)
140 {
141 }
142 
143 static inline
144 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
145 {
146 	return NULL;
147 }
148 
149 static inline
150 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
151 					 tc_setup_cb_t *cb, void *cb_ident)
152 {
153 	return NULL;
154 }
155 
156 static inline
157 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
158 {
159 }
160 
161 static inline
162 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
163 {
164 	return 0;
165 }
166 
167 static inline
168 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
169 					     tc_setup_cb_t *cb, void *cb_ident,
170 					     void *cb_priv,
171 					     struct netlink_ext_ack *extack)
172 {
173 	return NULL;
174 }
175 
176 static inline
177 int tcf_block_cb_register(struct tcf_block *block,
178 			  tc_setup_cb_t *cb, void *cb_ident,
179 			  void *cb_priv, struct netlink_ext_ack *extack)
180 {
181 	return 0;
182 }
183 
184 static inline
185 void __tcf_block_cb_unregister(struct tcf_block *block,
186 			       struct tcf_block_cb *block_cb)
187 {
188 }
189 
190 static inline
191 void tcf_block_cb_unregister(struct tcf_block *block,
192 			     tc_setup_cb_t *cb, void *cb_ident)
193 {
194 }
195 
196 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
197 			       struct tcf_result *res, bool compat_mode)
198 {
199 	return TC_ACT_UNSPEC;
200 }
201 #endif
202 
203 static inline unsigned long
204 __cls_set_class(unsigned long *clp, unsigned long cl)
205 {
206 	return xchg(clp, cl);
207 }
208 
209 static inline unsigned long
210 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
211 {
212 	unsigned long old_cl;
213 
214 	sch_tree_lock(q);
215 	old_cl = __cls_set_class(clp, cl);
216 	sch_tree_unlock(q);
217 	return old_cl;
218 }
219 
220 static inline void
221 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
222 {
223 	struct Qdisc *q = tp->chain->block->q;
224 	unsigned long cl;
225 
226 	/* Check q as it is not set for shared blocks. In that case,
227 	 * setting class is not supported.
228 	 */
229 	if (!q)
230 		return;
231 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
232 	cl = cls_set_class(q, &r->class, cl);
233 	if (cl)
234 		q->ops->cl_ops->unbind_tcf(q, cl);
235 }
236 
237 static inline void
238 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
239 {
240 	struct Qdisc *q = tp->chain->block->q;
241 	unsigned long cl;
242 
243 	if (!q)
244 		return;
245 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
246 		q->ops->cl_ops->unbind_tcf(q, cl);
247 }
248 
249 struct tcf_exts {
250 #ifdef CONFIG_NET_CLS_ACT
251 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
252 	int nr_actions;
253 	struct tc_action **actions;
254 	struct net *net;
255 #endif
256 	/* Map to export classifier specific extension TLV types to the
257 	 * generic extensions API. Unsupported extensions must be set to 0.
258 	 */
259 	int action;
260 	int police;
261 };
262 
263 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
264 {
265 #ifdef CONFIG_NET_CLS_ACT
266 	exts->type = 0;
267 	exts->nr_actions = 0;
268 	exts->net = NULL;
269 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
270 				GFP_KERNEL);
271 	if (!exts->actions)
272 		return -ENOMEM;
273 #endif
274 	exts->action = action;
275 	exts->police = police;
276 	return 0;
277 }
278 
279 /* Return false if the netns is being destroyed in cleanup_net(). Callers
280  * need to do cleanup synchronously in this case, otherwise may race with
281  * tc_action_net_exit(). Return true for other cases.
282  */
283 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
284 {
285 #ifdef CONFIG_NET_CLS_ACT
286 	exts->net = maybe_get_net(exts->net);
287 	return exts->net != NULL;
288 #else
289 	return true;
290 #endif
291 }
292 
293 static inline void tcf_exts_put_net(struct tcf_exts *exts)
294 {
295 #ifdef CONFIG_NET_CLS_ACT
296 	if (exts->net)
297 		put_net(exts->net);
298 #endif
299 }
300 
301 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
302 				    struct list_head *actions)
303 {
304 #ifdef CONFIG_NET_CLS_ACT
305 	int i;
306 
307 	for (i = 0; i < exts->nr_actions; i++) {
308 		struct tc_action *a = exts->actions[i];
309 
310 		list_add_tail(&a->list, actions);
311 	}
312 #endif
313 }
314 
315 static inline void
316 tcf_exts_stats_update(const struct tcf_exts *exts,
317 		      u64 bytes, u64 packets, u64 lastuse)
318 {
319 #ifdef CONFIG_NET_CLS_ACT
320 	int i;
321 
322 	preempt_disable();
323 
324 	for (i = 0; i < exts->nr_actions; i++) {
325 		struct tc_action *a = exts->actions[i];
326 
327 		tcf_action_stats_update(a, bytes, packets, lastuse);
328 	}
329 
330 	preempt_enable();
331 #endif
332 }
333 
334 /**
335  * tcf_exts_has_actions - check if at least one action is present
336  * @exts: tc filter extensions handle
337  *
338  * Returns true if at least one action is present.
339  */
340 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
341 {
342 #ifdef CONFIG_NET_CLS_ACT
343 	return exts->nr_actions;
344 #else
345 	return false;
346 #endif
347 }
348 
349 /**
350  * tcf_exts_has_one_action - check if exactly one action is present
351  * @exts: tc filter extensions handle
352  *
353  * Returns true if exactly one action is present.
354  */
355 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
356 {
357 #ifdef CONFIG_NET_CLS_ACT
358 	return exts->nr_actions == 1;
359 #else
360 	return false;
361 #endif
362 }
363 
364 /**
365  * tcf_exts_exec - execute tc filter extensions
366  * @skb: socket buffer
367  * @exts: tc filter extensions handle
368  * @res: desired result
369  *
370  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
371  * a negative number if the filter must be considered unmatched or
372  * a positive action code (TC_ACT_*) which must be returned to the
373  * underlying layer.
374  */
375 static inline int
376 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
377 	      struct tcf_result *res)
378 {
379 #ifdef CONFIG_NET_CLS_ACT
380 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
381 #endif
382 	return TC_ACT_OK;
383 }
384 
385 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
386 		      struct nlattr **tb, struct nlattr *rate_tlv,
387 		      struct tcf_exts *exts, bool ovr,
388 		      struct netlink_ext_ack *extack);
389 void tcf_exts_destroy(struct tcf_exts *exts);
390 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
391 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
392 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
393 
394 /**
395  * struct tcf_pkt_info - packet information
396  */
397 struct tcf_pkt_info {
398 	unsigned char *		ptr;
399 	int			nexthdr;
400 };
401 
402 #ifdef CONFIG_NET_EMATCH
403 
404 struct tcf_ematch_ops;
405 
406 /**
407  * struct tcf_ematch - extended match (ematch)
408  *
409  * @matchid: identifier to allow userspace to reidentify a match
410  * @flags: flags specifying attributes and the relation to other matches
411  * @ops: the operations lookup table of the corresponding ematch module
412  * @datalen: length of the ematch specific configuration data
413  * @data: ematch specific data
414  */
415 struct tcf_ematch {
416 	struct tcf_ematch_ops * ops;
417 	unsigned long		data;
418 	unsigned int		datalen;
419 	u16			matchid;
420 	u16			flags;
421 	struct net		*net;
422 };
423 
424 static inline int tcf_em_is_container(struct tcf_ematch *em)
425 {
426 	return !em->ops;
427 }
428 
429 static inline int tcf_em_is_simple(struct tcf_ematch *em)
430 {
431 	return em->flags & TCF_EM_SIMPLE;
432 }
433 
434 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
435 {
436 	return em->flags & TCF_EM_INVERT;
437 }
438 
439 static inline int tcf_em_last_match(struct tcf_ematch *em)
440 {
441 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
442 }
443 
444 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
445 {
446 	if (tcf_em_last_match(em))
447 		return 1;
448 
449 	if (result == 0 && em->flags & TCF_EM_REL_AND)
450 		return 1;
451 
452 	if (result != 0 && em->flags & TCF_EM_REL_OR)
453 		return 1;
454 
455 	return 0;
456 }
457 
458 /**
459  * struct tcf_ematch_tree - ematch tree handle
460  *
461  * @hdr: ematch tree header supplied by userspace
462  * @matches: array of ematches
463  */
464 struct tcf_ematch_tree {
465 	struct tcf_ematch_tree_hdr hdr;
466 	struct tcf_ematch *	matches;
467 
468 };
469 
470 /**
471  * struct tcf_ematch_ops - ematch module operations
472  *
473  * @kind: identifier (kind) of this ematch module
474  * @datalen: length of expected configuration data (optional)
475  * @change: called during validation (optional)
476  * @match: called during ematch tree evaluation, must return 1/0
477  * @destroy: called during destroyage (optional)
478  * @dump: called during dumping process (optional)
479  * @owner: owner, must be set to THIS_MODULE
480  * @link: link to previous/next ematch module (internal use)
481  */
482 struct tcf_ematch_ops {
483 	int			kind;
484 	int			datalen;
485 	int			(*change)(struct net *net, void *,
486 					  int, struct tcf_ematch *);
487 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
488 					 struct tcf_pkt_info *);
489 	void			(*destroy)(struct tcf_ematch *);
490 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
491 	struct module		*owner;
492 	struct list_head	link;
493 };
494 
495 int tcf_em_register(struct tcf_ematch_ops *);
496 void tcf_em_unregister(struct tcf_ematch_ops *);
497 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
498 			 struct tcf_ematch_tree *);
499 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
500 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
501 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
502 			struct tcf_pkt_info *);
503 
504 /**
505  * tcf_em_tree_match - evaulate an ematch tree
506  *
507  * @skb: socket buffer of the packet in question
508  * @tree: ematch tree to be used for evaluation
509  * @info: packet information examined by classifier
510  *
511  * This function matches @skb against the ematch tree in @tree by going
512  * through all ematches respecting their logic relations returning
513  * as soon as the result is obvious.
514  *
515  * Returns 1 if the ematch tree as-one matches, no ematches are configured
516  * or ematch is not enabled in the kernel, otherwise 0 is returned.
517  */
518 static inline int tcf_em_tree_match(struct sk_buff *skb,
519 				    struct tcf_ematch_tree *tree,
520 				    struct tcf_pkt_info *info)
521 {
522 	if (tree->hdr.nmatches)
523 		return __tcf_em_tree_match(skb, tree, info);
524 	else
525 		return 1;
526 }
527 
528 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
529 
530 #else /* CONFIG_NET_EMATCH */
531 
532 struct tcf_ematch_tree {
533 };
534 
535 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
536 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
537 #define tcf_em_tree_dump(skb, t, tlv) (0)
538 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
539 
540 #endif /* CONFIG_NET_EMATCH */
541 
542 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
543 {
544 	switch (layer) {
545 		case TCF_LAYER_LINK:
546 			return skb_mac_header(skb);
547 		case TCF_LAYER_NETWORK:
548 			return skb_network_header(skb);
549 		case TCF_LAYER_TRANSPORT:
550 			return skb_transport_header(skb);
551 	}
552 
553 	return NULL;
554 }
555 
556 static inline int tcf_valid_offset(const struct sk_buff *skb,
557 				   const unsigned char *ptr, const int len)
558 {
559 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
560 		      ptr >= skb->head &&
561 		      (ptr <= (ptr + len)));
562 }
563 
564 #ifdef CONFIG_NET_CLS_IND
565 #include <net/net_namespace.h>
566 
567 static inline int
568 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
569 		 struct netlink_ext_ack *extack)
570 {
571 	char indev[IFNAMSIZ];
572 	struct net_device *dev;
573 
574 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
575 		NL_SET_ERR_MSG(extack, "Interface name too long");
576 		return -EINVAL;
577 	}
578 	dev = __dev_get_by_name(net, indev);
579 	if (!dev)
580 		return -ENODEV;
581 	return dev->ifindex;
582 }
583 
584 static inline bool
585 tcf_match_indev(struct sk_buff *skb, int ifindex)
586 {
587 	if (!ifindex)
588 		return true;
589 	if  (!skb->skb_iif)
590 		return false;
591 	return ifindex == skb->skb_iif;
592 }
593 #endif /* CONFIG_NET_CLS_IND */
594 
595 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
596 		     enum tc_setup_type type, void *type_data, bool err_stop);
597 
598 enum tc_block_command {
599 	TC_BLOCK_BIND,
600 	TC_BLOCK_UNBIND,
601 };
602 
603 struct tc_block_offload {
604 	enum tc_block_command command;
605 	enum tcf_block_binder_type binder_type;
606 	struct tcf_block *block;
607 	struct netlink_ext_ack *extack;
608 };
609 
610 struct tc_cls_common_offload {
611 	u32 chain_index;
612 	__be16 protocol;
613 	u32 prio;
614 	struct netlink_ext_ack *extack;
615 };
616 
617 struct tc_cls_u32_knode {
618 	struct tcf_exts *exts;
619 	struct tc_u32_sel *sel;
620 	u32 handle;
621 	u32 val;
622 	u32 mask;
623 	u32 link_handle;
624 	u8 fshift;
625 };
626 
627 struct tc_cls_u32_hnode {
628 	u32 handle;
629 	u32 prio;
630 	unsigned int divisor;
631 };
632 
633 enum tc_clsu32_command {
634 	TC_CLSU32_NEW_KNODE,
635 	TC_CLSU32_REPLACE_KNODE,
636 	TC_CLSU32_DELETE_KNODE,
637 	TC_CLSU32_NEW_HNODE,
638 	TC_CLSU32_REPLACE_HNODE,
639 	TC_CLSU32_DELETE_HNODE,
640 };
641 
642 struct tc_cls_u32_offload {
643 	struct tc_cls_common_offload common;
644 	/* knode values */
645 	enum tc_clsu32_command command;
646 	union {
647 		struct tc_cls_u32_knode knode;
648 		struct tc_cls_u32_hnode hnode;
649 	};
650 };
651 
652 static inline bool tc_can_offload(const struct net_device *dev)
653 {
654 	return dev->features & NETIF_F_HW_TC;
655 }
656 
657 static inline bool tc_can_offload_extack(const struct net_device *dev,
658 					 struct netlink_ext_ack *extack)
659 {
660 	bool can = tc_can_offload(dev);
661 
662 	if (!can)
663 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
664 
665 	return can;
666 }
667 
668 static inline bool
669 tc_cls_can_offload_and_chain0(const struct net_device *dev,
670 			      struct tc_cls_common_offload *common)
671 {
672 	if (!tc_can_offload_extack(dev, common->extack))
673 		return false;
674 	if (common->chain_index) {
675 		NL_SET_ERR_MSG(common->extack,
676 			       "Driver supports only offload of chain 0");
677 		return false;
678 	}
679 	return true;
680 }
681 
682 static inline bool tc_skip_hw(u32 flags)
683 {
684 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
685 }
686 
687 static inline bool tc_skip_sw(u32 flags)
688 {
689 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
690 }
691 
692 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
693 static inline bool tc_flags_valid(u32 flags)
694 {
695 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
696 		      TCA_CLS_FLAGS_VERBOSE))
697 		return false;
698 
699 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
700 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
701 		return false;
702 
703 	return true;
704 }
705 
706 static inline bool tc_in_hw(u32 flags)
707 {
708 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
709 }
710 
711 static inline void
712 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
713 			   const struct tcf_proto *tp, u32 flags,
714 			   struct netlink_ext_ack *extack)
715 {
716 	cls_common->chain_index = tp->chain->index;
717 	cls_common->protocol = tp->protocol;
718 	cls_common->prio = tp->prio;
719 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
720 		cls_common->extack = extack;
721 }
722 
723 enum tc_fl_command {
724 	TC_CLSFLOWER_REPLACE,
725 	TC_CLSFLOWER_DESTROY,
726 	TC_CLSFLOWER_STATS,
727 	TC_CLSFLOWER_TMPLT_CREATE,
728 	TC_CLSFLOWER_TMPLT_DESTROY,
729 };
730 
731 struct tc_cls_flower_offload {
732 	struct tc_cls_common_offload common;
733 	enum tc_fl_command command;
734 	unsigned long cookie;
735 	struct flow_dissector *dissector;
736 	struct fl_flow_key *mask;
737 	struct fl_flow_key *key;
738 	struct tcf_exts *exts;
739 	u32 classid;
740 };
741 
742 enum tc_matchall_command {
743 	TC_CLSMATCHALL_REPLACE,
744 	TC_CLSMATCHALL_DESTROY,
745 };
746 
747 struct tc_cls_matchall_offload {
748 	struct tc_cls_common_offload common;
749 	enum tc_matchall_command command;
750 	struct tcf_exts *exts;
751 	unsigned long cookie;
752 };
753 
754 enum tc_clsbpf_command {
755 	TC_CLSBPF_OFFLOAD,
756 	TC_CLSBPF_STATS,
757 };
758 
759 struct tc_cls_bpf_offload {
760 	struct tc_cls_common_offload common;
761 	enum tc_clsbpf_command command;
762 	struct tcf_exts *exts;
763 	struct bpf_prog *prog;
764 	struct bpf_prog *oldprog;
765 	const char *name;
766 	bool exts_integrated;
767 };
768 
769 struct tc_mqprio_qopt_offload {
770 	/* struct tc_mqprio_qopt must always be the first element */
771 	struct tc_mqprio_qopt qopt;
772 	u16 mode;
773 	u16 shaper;
774 	u32 flags;
775 	u64 min_rate[TC_QOPT_MAX_QUEUE];
776 	u64 max_rate[TC_QOPT_MAX_QUEUE];
777 };
778 
779 /* This structure holds cookie structure that is passed from user
780  * to the kernel for actions and classifiers
781  */
782 struct tc_cookie {
783 	u8  *data;
784 	u32 len;
785 	struct rcu_head rcu;
786 };
787 
788 struct tc_qopt_offload_stats {
789 	struct gnet_stats_basic_packed *bstats;
790 	struct gnet_stats_queue *qstats;
791 };
792 
793 enum tc_mq_command {
794 	TC_MQ_CREATE,
795 	TC_MQ_DESTROY,
796 	TC_MQ_STATS,
797 };
798 
799 struct tc_mq_qopt_offload {
800 	enum tc_mq_command command;
801 	u32 handle;
802 	struct tc_qopt_offload_stats stats;
803 };
804 
805 enum tc_red_command {
806 	TC_RED_REPLACE,
807 	TC_RED_DESTROY,
808 	TC_RED_STATS,
809 	TC_RED_XSTATS,
810 };
811 
812 struct tc_red_qopt_offload_params {
813 	u32 min;
814 	u32 max;
815 	u32 probability;
816 	bool is_ecn;
817 	struct gnet_stats_queue *qstats;
818 };
819 
820 struct tc_red_qopt_offload {
821 	enum tc_red_command command;
822 	u32 handle;
823 	u32 parent;
824 	union {
825 		struct tc_red_qopt_offload_params set;
826 		struct tc_qopt_offload_stats stats;
827 		struct red_stats *xstats;
828 	};
829 };
830 
831 enum tc_prio_command {
832 	TC_PRIO_REPLACE,
833 	TC_PRIO_DESTROY,
834 	TC_PRIO_STATS,
835 	TC_PRIO_GRAFT,
836 };
837 
838 struct tc_prio_qopt_offload_params {
839 	int bands;
840 	u8 priomap[TC_PRIO_MAX + 1];
841 	/* In case that a prio qdisc is offloaded and now is changed to a
842 	 * non-offloadedable config, it needs to update the backlog & qlen
843 	 * values to negate the HW backlog & qlen values (and only them).
844 	 */
845 	struct gnet_stats_queue *qstats;
846 };
847 
848 struct tc_prio_qopt_offload_graft_params {
849 	u8 band;
850 	u32 child_handle;
851 };
852 
853 struct tc_prio_qopt_offload {
854 	enum tc_prio_command command;
855 	u32 handle;
856 	u32 parent;
857 	union {
858 		struct tc_prio_qopt_offload_params replace_params;
859 		struct tc_qopt_offload_stats stats;
860 		struct tc_prio_qopt_offload_graft_params graft_params;
861 	};
862 };
863 
864 #endif
865