xref: /openbmc/linux/include/net/pkt_cls.h (revision cd238eff)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/flow_offload.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 enum tcf_block_binder_type {
29 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
30 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
31 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
32 };
33 
34 struct tcf_block_ext_info {
35 	enum tcf_block_binder_type binder_type;
36 	tcf_chain_head_change_t *chain_head_change;
37 	void *chain_head_change_priv;
38 	u32 block_index;
39 };
40 
41 struct tcf_block_cb;
42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
43 
44 #ifdef CONFIG_NET_CLS
45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 				       u32 chain_index);
47 void tcf_chain_put_by_act(struct tcf_chain *chain);
48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 				     struct tcf_chain *chain);
50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
51 				     struct tcf_proto *tp, bool rtnl_held);
52 void tcf_block_netif_keep_dst(struct tcf_block *block);
53 int tcf_block_get(struct tcf_block **p_block,
54 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 		  struct netlink_ext_ack *extack);
56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
57 		      struct tcf_block_ext_info *ei,
58 		      struct netlink_ext_ack *extack);
59 void tcf_block_put(struct tcf_block *block);
60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
61 		       struct tcf_block_ext_info *ei);
62 
63 static inline bool tcf_block_shared(struct tcf_block *block)
64 {
65 	return block->index;
66 }
67 
68 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
69 {
70 	WARN_ON(tcf_block_shared(block));
71 	return block->q;
72 }
73 
74 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
75 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
76 					 tc_setup_cb_t *cb, void *cb_ident);
77 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
78 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
79 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
80 					     tc_setup_cb_t *cb, void *cb_ident,
81 					     void *cb_priv,
82 					     struct netlink_ext_ack *extack);
83 int tcf_block_cb_register(struct tcf_block *block,
84 			  tc_setup_cb_t *cb, void *cb_ident,
85 			  void *cb_priv, struct netlink_ext_ack *extack);
86 void __tcf_block_cb_unregister(struct tcf_block *block,
87 			       struct tcf_block_cb *block_cb);
88 void tcf_block_cb_unregister(struct tcf_block *block,
89 			     tc_setup_cb_t *cb, void *cb_ident);
90 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
91 				tc_indr_block_bind_cb_t *cb, void *cb_ident);
92 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
93 			      tc_indr_block_bind_cb_t *cb, void *cb_ident);
94 void __tc_indr_block_cb_unregister(struct net_device *dev,
95 				   tc_indr_block_bind_cb_t *cb, void *cb_ident);
96 void tc_indr_block_cb_unregister(struct net_device *dev,
97 				 tc_indr_block_bind_cb_t *cb, void *cb_ident);
98 
99 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
100 		 struct tcf_result *res, bool compat_mode);
101 
102 #else
103 static inline bool tcf_block_shared(struct tcf_block *block)
104 {
105 	return false;
106 }
107 
108 static inline
109 int tcf_block_get(struct tcf_block **p_block,
110 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
111 		  struct netlink_ext_ack *extack)
112 {
113 	return 0;
114 }
115 
116 static inline
117 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
118 		      struct tcf_block_ext_info *ei,
119 		      struct netlink_ext_ack *extack)
120 {
121 	return 0;
122 }
123 
124 static inline void tcf_block_put(struct tcf_block *block)
125 {
126 }
127 
128 static inline
129 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
130 		       struct tcf_block_ext_info *ei)
131 {
132 }
133 
134 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
135 {
136 	return NULL;
137 }
138 
139 static inline
140 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
141 			       void *cb_priv)
142 {
143 	return 0;
144 }
145 
146 static inline
147 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
148 				  void *cb_priv)
149 {
150 }
151 
152 static inline
153 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
154 {
155 	return NULL;
156 }
157 
158 static inline
159 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
160 					 tc_setup_cb_t *cb, void *cb_ident)
161 {
162 	return NULL;
163 }
164 
165 static inline
166 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
167 {
168 }
169 
170 static inline
171 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
172 {
173 	return 0;
174 }
175 
176 static inline
177 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
178 					     tc_setup_cb_t *cb, void *cb_ident,
179 					     void *cb_priv,
180 					     struct netlink_ext_ack *extack)
181 {
182 	return NULL;
183 }
184 
185 static inline
186 int tcf_block_cb_register(struct tcf_block *block,
187 			  tc_setup_cb_t *cb, void *cb_ident,
188 			  void *cb_priv, struct netlink_ext_ack *extack)
189 {
190 	return 0;
191 }
192 
193 static inline
194 void __tcf_block_cb_unregister(struct tcf_block *block,
195 			       struct tcf_block_cb *block_cb)
196 {
197 }
198 
199 static inline
200 void tcf_block_cb_unregister(struct tcf_block *block,
201 			     tc_setup_cb_t *cb, void *cb_ident)
202 {
203 }
204 
205 static inline
206 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
207 				tc_indr_block_bind_cb_t *cb, void *cb_ident)
208 {
209 	return 0;
210 }
211 
212 static inline
213 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
214 			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
215 {
216 	return 0;
217 }
218 
219 static inline
220 void __tc_indr_block_cb_unregister(struct net_device *dev,
221 				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
222 {
223 }
224 
225 static inline
226 void tc_indr_block_cb_unregister(struct net_device *dev,
227 				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
228 {
229 }
230 
231 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
232 			       struct tcf_result *res, bool compat_mode)
233 {
234 	return TC_ACT_UNSPEC;
235 }
236 #endif
237 
238 static inline unsigned long
239 __cls_set_class(unsigned long *clp, unsigned long cl)
240 {
241 	return xchg(clp, cl);
242 }
243 
244 static inline unsigned long
245 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
246 {
247 	unsigned long old_cl;
248 
249 	sch_tree_lock(q);
250 	old_cl = __cls_set_class(clp, cl);
251 	sch_tree_unlock(q);
252 	return old_cl;
253 }
254 
255 static inline void
256 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
257 {
258 	struct Qdisc *q = tp->chain->block->q;
259 	unsigned long cl;
260 
261 	/* Check q as it is not set for shared blocks. In that case,
262 	 * setting class is not supported.
263 	 */
264 	if (!q)
265 		return;
266 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
267 	cl = cls_set_class(q, &r->class, cl);
268 	if (cl)
269 		q->ops->cl_ops->unbind_tcf(q, cl);
270 }
271 
272 static inline void
273 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
274 {
275 	struct Qdisc *q = tp->chain->block->q;
276 	unsigned long cl;
277 
278 	if (!q)
279 		return;
280 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
281 		q->ops->cl_ops->unbind_tcf(q, cl);
282 }
283 
284 struct tcf_exts {
285 #ifdef CONFIG_NET_CLS_ACT
286 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
287 	int nr_actions;
288 	struct tc_action **actions;
289 	struct net *net;
290 #endif
291 	/* Map to export classifier specific extension TLV types to the
292 	 * generic extensions API. Unsupported extensions must be set to 0.
293 	 */
294 	int action;
295 	int police;
296 };
297 
298 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
299 				int action, int police)
300 {
301 #ifdef CONFIG_NET_CLS_ACT
302 	exts->type = 0;
303 	exts->nr_actions = 0;
304 	exts->net = net;
305 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
306 				GFP_KERNEL);
307 	if (!exts->actions)
308 		return -ENOMEM;
309 #endif
310 	exts->action = action;
311 	exts->police = police;
312 	return 0;
313 }
314 
315 /* Return false if the netns is being destroyed in cleanup_net(). Callers
316  * need to do cleanup synchronously in this case, otherwise may race with
317  * tc_action_net_exit(). Return true for other cases.
318  */
319 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
320 {
321 #ifdef CONFIG_NET_CLS_ACT
322 	exts->net = maybe_get_net(exts->net);
323 	return exts->net != NULL;
324 #else
325 	return true;
326 #endif
327 }
328 
329 static inline void tcf_exts_put_net(struct tcf_exts *exts)
330 {
331 #ifdef CONFIG_NET_CLS_ACT
332 	if (exts->net)
333 		put_net(exts->net);
334 #endif
335 }
336 
337 #ifdef CONFIG_NET_CLS_ACT
338 #define tcf_exts_for_each_action(i, a, exts) \
339 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
340 #else
341 #define tcf_exts_for_each_action(i, a, exts) \
342 	for (; 0; (void)(i), (void)(a), (void)(exts))
343 #endif
344 
345 static inline void
346 tcf_exts_stats_update(const struct tcf_exts *exts,
347 		      u64 bytes, u64 packets, u64 lastuse)
348 {
349 #ifdef CONFIG_NET_CLS_ACT
350 	int i;
351 
352 	preempt_disable();
353 
354 	for (i = 0; i < exts->nr_actions; i++) {
355 		struct tc_action *a = exts->actions[i];
356 
357 		tcf_action_stats_update(a, bytes, packets, lastuse, true);
358 	}
359 
360 	preempt_enable();
361 #endif
362 }
363 
364 /**
365  * tcf_exts_has_actions - check if at least one action is present
366  * @exts: tc filter extensions handle
367  *
368  * Returns true if at least one action is present.
369  */
370 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
371 {
372 #ifdef CONFIG_NET_CLS_ACT
373 	return exts->nr_actions;
374 #else
375 	return false;
376 #endif
377 }
378 
379 /**
380  * tcf_exts_exec - execute tc filter extensions
381  * @skb: socket buffer
382  * @exts: tc filter extensions handle
383  * @res: desired result
384  *
385  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
386  * a negative number if the filter must be considered unmatched or
387  * a positive action code (TC_ACT_*) which must be returned to the
388  * underlying layer.
389  */
390 static inline int
391 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
392 	      struct tcf_result *res)
393 {
394 #ifdef CONFIG_NET_CLS_ACT
395 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
396 #endif
397 	return TC_ACT_OK;
398 }
399 
400 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
401 		      struct nlattr **tb, struct nlattr *rate_tlv,
402 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
403 		      struct netlink_ext_ack *extack);
404 void tcf_exts_destroy(struct tcf_exts *exts);
405 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
406 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
407 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
408 
409 /**
410  * struct tcf_pkt_info - packet information
411  */
412 struct tcf_pkt_info {
413 	unsigned char *		ptr;
414 	int			nexthdr;
415 };
416 
417 #ifdef CONFIG_NET_EMATCH
418 
419 struct tcf_ematch_ops;
420 
421 /**
422  * struct tcf_ematch - extended match (ematch)
423  *
424  * @matchid: identifier to allow userspace to reidentify a match
425  * @flags: flags specifying attributes and the relation to other matches
426  * @ops: the operations lookup table of the corresponding ematch module
427  * @datalen: length of the ematch specific configuration data
428  * @data: ematch specific data
429  */
430 struct tcf_ematch {
431 	struct tcf_ematch_ops * ops;
432 	unsigned long		data;
433 	unsigned int		datalen;
434 	u16			matchid;
435 	u16			flags;
436 	struct net		*net;
437 };
438 
439 static inline int tcf_em_is_container(struct tcf_ematch *em)
440 {
441 	return !em->ops;
442 }
443 
444 static inline int tcf_em_is_simple(struct tcf_ematch *em)
445 {
446 	return em->flags & TCF_EM_SIMPLE;
447 }
448 
449 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
450 {
451 	return em->flags & TCF_EM_INVERT;
452 }
453 
454 static inline int tcf_em_last_match(struct tcf_ematch *em)
455 {
456 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
457 }
458 
459 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
460 {
461 	if (tcf_em_last_match(em))
462 		return 1;
463 
464 	if (result == 0 && em->flags & TCF_EM_REL_AND)
465 		return 1;
466 
467 	if (result != 0 && em->flags & TCF_EM_REL_OR)
468 		return 1;
469 
470 	return 0;
471 }
472 
473 /**
474  * struct tcf_ematch_tree - ematch tree handle
475  *
476  * @hdr: ematch tree header supplied by userspace
477  * @matches: array of ematches
478  */
479 struct tcf_ematch_tree {
480 	struct tcf_ematch_tree_hdr hdr;
481 	struct tcf_ematch *	matches;
482 
483 };
484 
485 /**
486  * struct tcf_ematch_ops - ematch module operations
487  *
488  * @kind: identifier (kind) of this ematch module
489  * @datalen: length of expected configuration data (optional)
490  * @change: called during validation (optional)
491  * @match: called during ematch tree evaluation, must return 1/0
492  * @destroy: called during destroyage (optional)
493  * @dump: called during dumping process (optional)
494  * @owner: owner, must be set to THIS_MODULE
495  * @link: link to previous/next ematch module (internal use)
496  */
497 struct tcf_ematch_ops {
498 	int			kind;
499 	int			datalen;
500 	int			(*change)(struct net *net, void *,
501 					  int, struct tcf_ematch *);
502 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
503 					 struct tcf_pkt_info *);
504 	void			(*destroy)(struct tcf_ematch *);
505 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
506 	struct module		*owner;
507 	struct list_head	link;
508 };
509 
510 int tcf_em_register(struct tcf_ematch_ops *);
511 void tcf_em_unregister(struct tcf_ematch_ops *);
512 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
513 			 struct tcf_ematch_tree *);
514 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
515 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
516 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
517 			struct tcf_pkt_info *);
518 
519 /**
520  * tcf_em_tree_match - evaulate an ematch tree
521  *
522  * @skb: socket buffer of the packet in question
523  * @tree: ematch tree to be used for evaluation
524  * @info: packet information examined by classifier
525  *
526  * This function matches @skb against the ematch tree in @tree by going
527  * through all ematches respecting their logic relations returning
528  * as soon as the result is obvious.
529  *
530  * Returns 1 if the ematch tree as-one matches, no ematches are configured
531  * or ematch is not enabled in the kernel, otherwise 0 is returned.
532  */
533 static inline int tcf_em_tree_match(struct sk_buff *skb,
534 				    struct tcf_ematch_tree *tree,
535 				    struct tcf_pkt_info *info)
536 {
537 	if (tree->hdr.nmatches)
538 		return __tcf_em_tree_match(skb, tree, info);
539 	else
540 		return 1;
541 }
542 
543 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
544 
545 #else /* CONFIG_NET_EMATCH */
546 
547 struct tcf_ematch_tree {
548 };
549 
550 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
551 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
552 #define tcf_em_tree_dump(skb, t, tlv) (0)
553 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
554 
555 #endif /* CONFIG_NET_EMATCH */
556 
557 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
558 {
559 	switch (layer) {
560 		case TCF_LAYER_LINK:
561 			return skb_mac_header(skb);
562 		case TCF_LAYER_NETWORK:
563 			return skb_network_header(skb);
564 		case TCF_LAYER_TRANSPORT:
565 			return skb_transport_header(skb);
566 	}
567 
568 	return NULL;
569 }
570 
571 static inline int tcf_valid_offset(const struct sk_buff *skb,
572 				   const unsigned char *ptr, const int len)
573 {
574 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
575 		      ptr >= skb->head &&
576 		      (ptr <= (ptr + len)));
577 }
578 
579 #ifdef CONFIG_NET_CLS_IND
580 #include <net/net_namespace.h>
581 
582 static inline int
583 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
584 		 struct netlink_ext_ack *extack)
585 {
586 	char indev[IFNAMSIZ];
587 	struct net_device *dev;
588 
589 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
590 		NL_SET_ERR_MSG(extack, "Interface name too long");
591 		return -EINVAL;
592 	}
593 	dev = __dev_get_by_name(net, indev);
594 	if (!dev)
595 		return -ENODEV;
596 	return dev->ifindex;
597 }
598 
599 static inline bool
600 tcf_match_indev(struct sk_buff *skb, int ifindex)
601 {
602 	if (!ifindex)
603 		return true;
604 	if  (!skb->skb_iif)
605 		return false;
606 	return ifindex == skb->skb_iif;
607 }
608 #endif /* CONFIG_NET_CLS_IND */
609 
610 int tc_setup_flow_action(struct flow_action *flow_action,
611 			 const struct tcf_exts *exts);
612 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
613 		     void *type_data, bool err_stop);
614 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
615 
616 enum tc_block_command {
617 	TC_BLOCK_BIND,
618 	TC_BLOCK_UNBIND,
619 };
620 
621 struct tc_block_offload {
622 	enum tc_block_command command;
623 	enum tcf_block_binder_type binder_type;
624 	struct tcf_block *block;
625 	struct netlink_ext_ack *extack;
626 };
627 
628 struct tc_cls_common_offload {
629 	u32 chain_index;
630 	__be16 protocol;
631 	u32 prio;
632 	struct netlink_ext_ack *extack;
633 };
634 
635 struct tc_cls_u32_knode {
636 	struct tcf_exts *exts;
637 	struct tcf_result *res;
638 	struct tc_u32_sel *sel;
639 	u32 handle;
640 	u32 val;
641 	u32 mask;
642 	u32 link_handle;
643 	u8 fshift;
644 };
645 
646 struct tc_cls_u32_hnode {
647 	u32 handle;
648 	u32 prio;
649 	unsigned int divisor;
650 };
651 
652 enum tc_clsu32_command {
653 	TC_CLSU32_NEW_KNODE,
654 	TC_CLSU32_REPLACE_KNODE,
655 	TC_CLSU32_DELETE_KNODE,
656 	TC_CLSU32_NEW_HNODE,
657 	TC_CLSU32_REPLACE_HNODE,
658 	TC_CLSU32_DELETE_HNODE,
659 };
660 
661 struct tc_cls_u32_offload {
662 	struct tc_cls_common_offload common;
663 	/* knode values */
664 	enum tc_clsu32_command command;
665 	union {
666 		struct tc_cls_u32_knode knode;
667 		struct tc_cls_u32_hnode hnode;
668 	};
669 };
670 
671 static inline bool tc_can_offload(const struct net_device *dev)
672 {
673 	return dev->features & NETIF_F_HW_TC;
674 }
675 
676 static inline bool tc_can_offload_extack(const struct net_device *dev,
677 					 struct netlink_ext_ack *extack)
678 {
679 	bool can = tc_can_offload(dev);
680 
681 	if (!can)
682 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
683 
684 	return can;
685 }
686 
687 static inline bool
688 tc_cls_can_offload_and_chain0(const struct net_device *dev,
689 			      struct tc_cls_common_offload *common)
690 {
691 	if (!tc_can_offload_extack(dev, common->extack))
692 		return false;
693 	if (common->chain_index) {
694 		NL_SET_ERR_MSG(common->extack,
695 			       "Driver supports only offload of chain 0");
696 		return false;
697 	}
698 	return true;
699 }
700 
701 static inline bool tc_skip_hw(u32 flags)
702 {
703 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
704 }
705 
706 static inline bool tc_skip_sw(u32 flags)
707 {
708 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
709 }
710 
711 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
712 static inline bool tc_flags_valid(u32 flags)
713 {
714 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
715 		      TCA_CLS_FLAGS_VERBOSE))
716 		return false;
717 
718 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
719 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
720 		return false;
721 
722 	return true;
723 }
724 
725 static inline bool tc_in_hw(u32 flags)
726 {
727 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
728 }
729 
730 static inline void
731 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
732 			   const struct tcf_proto *tp, u32 flags,
733 			   struct netlink_ext_ack *extack)
734 {
735 	cls_common->chain_index = tp->chain->index;
736 	cls_common->protocol = tp->protocol;
737 	cls_common->prio = tp->prio;
738 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
739 		cls_common->extack = extack;
740 }
741 
742 enum tc_fl_command {
743 	TC_CLSFLOWER_REPLACE,
744 	TC_CLSFLOWER_DESTROY,
745 	TC_CLSFLOWER_STATS,
746 	TC_CLSFLOWER_TMPLT_CREATE,
747 	TC_CLSFLOWER_TMPLT_DESTROY,
748 };
749 
750 struct tc_cls_flower_offload {
751 	struct tc_cls_common_offload common;
752 	enum tc_fl_command command;
753 	unsigned long cookie;
754 	struct flow_rule *rule;
755 	struct flow_stats stats;
756 	u32 classid;
757 };
758 
759 static inline struct flow_rule *
760 tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
761 {
762 	return tc_flow_cmd->rule;
763 }
764 
765 enum tc_matchall_command {
766 	TC_CLSMATCHALL_REPLACE,
767 	TC_CLSMATCHALL_DESTROY,
768 	TC_CLSMATCHALL_STATS,
769 };
770 
771 struct tc_cls_matchall_offload {
772 	struct tc_cls_common_offload common;
773 	enum tc_matchall_command command;
774 	struct flow_rule *rule;
775 	struct flow_stats stats;
776 	unsigned long cookie;
777 };
778 
779 enum tc_clsbpf_command {
780 	TC_CLSBPF_OFFLOAD,
781 	TC_CLSBPF_STATS,
782 };
783 
784 struct tc_cls_bpf_offload {
785 	struct tc_cls_common_offload common;
786 	enum tc_clsbpf_command command;
787 	struct tcf_exts *exts;
788 	struct bpf_prog *prog;
789 	struct bpf_prog *oldprog;
790 	const char *name;
791 	bool exts_integrated;
792 };
793 
794 struct tc_mqprio_qopt_offload {
795 	/* struct tc_mqprio_qopt must always be the first element */
796 	struct tc_mqprio_qopt qopt;
797 	u16 mode;
798 	u16 shaper;
799 	u32 flags;
800 	u64 min_rate[TC_QOPT_MAX_QUEUE];
801 	u64 max_rate[TC_QOPT_MAX_QUEUE];
802 };
803 
804 /* This structure holds cookie structure that is passed from user
805  * to the kernel for actions and classifiers
806  */
807 struct tc_cookie {
808 	u8  *data;
809 	u32 len;
810 	struct rcu_head rcu;
811 };
812 
813 struct tc_qopt_offload_stats {
814 	struct gnet_stats_basic_packed *bstats;
815 	struct gnet_stats_queue *qstats;
816 };
817 
818 enum tc_mq_command {
819 	TC_MQ_CREATE,
820 	TC_MQ_DESTROY,
821 	TC_MQ_STATS,
822 	TC_MQ_GRAFT,
823 };
824 
825 struct tc_mq_opt_offload_graft_params {
826 	unsigned long queue;
827 	u32 child_handle;
828 };
829 
830 struct tc_mq_qopt_offload {
831 	enum tc_mq_command command;
832 	u32 handle;
833 	union {
834 		struct tc_qopt_offload_stats stats;
835 		struct tc_mq_opt_offload_graft_params graft_params;
836 	};
837 };
838 
839 enum tc_red_command {
840 	TC_RED_REPLACE,
841 	TC_RED_DESTROY,
842 	TC_RED_STATS,
843 	TC_RED_XSTATS,
844 	TC_RED_GRAFT,
845 };
846 
847 struct tc_red_qopt_offload_params {
848 	u32 min;
849 	u32 max;
850 	u32 probability;
851 	u32 limit;
852 	bool is_ecn;
853 	bool is_harddrop;
854 	struct gnet_stats_queue *qstats;
855 };
856 
857 struct tc_red_qopt_offload {
858 	enum tc_red_command command;
859 	u32 handle;
860 	u32 parent;
861 	union {
862 		struct tc_red_qopt_offload_params set;
863 		struct tc_qopt_offload_stats stats;
864 		struct red_stats *xstats;
865 		u32 child_handle;
866 	};
867 };
868 
869 enum tc_gred_command {
870 	TC_GRED_REPLACE,
871 	TC_GRED_DESTROY,
872 	TC_GRED_STATS,
873 };
874 
875 struct tc_gred_vq_qopt_offload_params {
876 	bool present;
877 	u32 limit;
878 	u32 prio;
879 	u32 min;
880 	u32 max;
881 	bool is_ecn;
882 	bool is_harddrop;
883 	u32 probability;
884 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
885 	u32 *backlog;
886 };
887 
888 struct tc_gred_qopt_offload_params {
889 	bool grio_on;
890 	bool wred_on;
891 	unsigned int dp_cnt;
892 	unsigned int dp_def;
893 	struct gnet_stats_queue *qstats;
894 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
895 };
896 
897 struct tc_gred_qopt_offload_stats {
898 	struct gnet_stats_basic_packed bstats[MAX_DPs];
899 	struct gnet_stats_queue qstats[MAX_DPs];
900 	struct red_stats *xstats[MAX_DPs];
901 };
902 
903 struct tc_gred_qopt_offload {
904 	enum tc_gred_command command;
905 	u32 handle;
906 	u32 parent;
907 	union {
908 		struct tc_gred_qopt_offload_params set;
909 		struct tc_gred_qopt_offload_stats stats;
910 	};
911 };
912 
913 enum tc_prio_command {
914 	TC_PRIO_REPLACE,
915 	TC_PRIO_DESTROY,
916 	TC_PRIO_STATS,
917 	TC_PRIO_GRAFT,
918 };
919 
920 struct tc_prio_qopt_offload_params {
921 	int bands;
922 	u8 priomap[TC_PRIO_MAX + 1];
923 	/* In case that a prio qdisc is offloaded and now is changed to a
924 	 * non-offloadedable config, it needs to update the backlog & qlen
925 	 * values to negate the HW backlog & qlen values (and only them).
926 	 */
927 	struct gnet_stats_queue *qstats;
928 };
929 
930 struct tc_prio_qopt_offload_graft_params {
931 	u8 band;
932 	u32 child_handle;
933 };
934 
935 struct tc_prio_qopt_offload {
936 	enum tc_prio_command command;
937 	u32 handle;
938 	u32 parent;
939 	union {
940 		struct tc_prio_qopt_offload_params replace_params;
941 		struct tc_qopt_offload_stats stats;
942 		struct tc_prio_qopt_offload_graft_params graft_params;
943 	};
944 };
945 
946 enum tc_root_command {
947 	TC_ROOT_GRAFT,
948 };
949 
950 struct tc_root_qopt_offload {
951 	enum tc_root_command command;
952 	u32 handle;
953 	bool ingress;
954 };
955 
956 #endif
957