xref: /openbmc/linux/include/net/pkt_cls.h (revision 6cd70754)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 struct tcf_block_ext_info {
29 	enum flow_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_qevent {
36 	struct tcf_block	*block;
37 	struct tcf_block_ext_info info;
38 	struct tcf_proto __rcu *filter_chain;
39 };
40 
41 struct tcf_block_cb;
42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
43 
44 #ifdef CONFIG_NET_CLS
45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 				       u32 chain_index);
47 void tcf_chain_put_by_act(struct tcf_chain *chain);
48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 				     struct tcf_chain *chain);
50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
51 				     struct tcf_proto *tp);
52 void tcf_block_netif_keep_dst(struct tcf_block *block);
53 int tcf_block_get(struct tcf_block **p_block,
54 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 		  struct netlink_ext_ack *extack);
56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
57 		      struct tcf_block_ext_info *ei,
58 		      struct netlink_ext_ack *extack);
59 void tcf_block_put(struct tcf_block *block);
60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
61 		       struct tcf_block_ext_info *ei);
62 
63 static inline bool tcf_block_shared(struct tcf_block *block)
64 {
65 	return block->index;
66 }
67 
68 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
69 {
70 	return block && block->index;
71 }
72 
73 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
74 {
75 	WARN_ON(tcf_block_shared(block));
76 	return block->q;
77 }
78 
79 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
80 		 struct tcf_result *res, bool compat_mode);
81 int tcf_classify_ingress(struct sk_buff *skb,
82 			 const struct tcf_block *ingress_block,
83 			 const struct tcf_proto *tp, struct tcf_result *res,
84 			 bool compat_mode);
85 
86 #else
87 static inline bool tcf_block_shared(struct tcf_block *block)
88 {
89 	return false;
90 }
91 
92 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
93 {
94 	return false;
95 }
96 
97 static inline
98 int tcf_block_get(struct tcf_block **p_block,
99 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
100 		  struct netlink_ext_ack *extack)
101 {
102 	return 0;
103 }
104 
105 static inline
106 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
107 		      struct tcf_block_ext_info *ei,
108 		      struct netlink_ext_ack *extack)
109 {
110 	return 0;
111 }
112 
113 static inline void tcf_block_put(struct tcf_block *block)
114 {
115 }
116 
117 static inline
118 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
119 		       struct tcf_block_ext_info *ei)
120 {
121 }
122 
123 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
124 {
125 	return NULL;
126 }
127 
128 static inline
129 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
130 			       void *cb_priv)
131 {
132 	return 0;
133 }
134 
135 static inline
136 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
137 				  void *cb_priv)
138 {
139 }
140 
141 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
142 			       struct tcf_result *res, bool compat_mode)
143 {
144 	return TC_ACT_UNSPEC;
145 }
146 
147 static inline int tcf_classify_ingress(struct sk_buff *skb,
148 				       const struct tcf_block *ingress_block,
149 				       const struct tcf_proto *tp,
150 				       struct tcf_result *res, bool compat_mode)
151 {
152 	return TC_ACT_UNSPEC;
153 }
154 
155 #endif
156 
157 static inline unsigned long
158 __cls_set_class(unsigned long *clp, unsigned long cl)
159 {
160 	return xchg(clp, cl);
161 }
162 
163 static inline void
164 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
165 {
166 	unsigned long cl;
167 
168 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
169 	cl = __cls_set_class(&r->class, cl);
170 	if (cl)
171 		q->ops->cl_ops->unbind_tcf(q, cl);
172 }
173 
174 static inline void
175 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
176 {
177 	struct Qdisc *q = tp->chain->block->q;
178 
179 	/* Check q as it is not set for shared blocks. In that case,
180 	 * setting class is not supported.
181 	 */
182 	if (!q)
183 		return;
184 	sch_tree_lock(q);
185 	__tcf_bind_filter(q, r, base);
186 	sch_tree_unlock(q);
187 }
188 
189 static inline void
190 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
191 {
192 	unsigned long cl;
193 
194 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
195 		q->ops->cl_ops->unbind_tcf(q, cl);
196 }
197 
198 static inline void
199 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
200 {
201 	struct Qdisc *q = tp->chain->block->q;
202 
203 	if (!q)
204 		return;
205 	__tcf_unbind_filter(q, r);
206 }
207 
208 struct tcf_exts {
209 #ifdef CONFIG_NET_CLS_ACT
210 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
211 	int nr_actions;
212 	struct tc_action **actions;
213 	struct net *net;
214 #endif
215 	/* Map to export classifier specific extension TLV types to the
216 	 * generic extensions API. Unsupported extensions must be set to 0.
217 	 */
218 	int action;
219 	int police;
220 };
221 
222 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
223 				int action, int police)
224 {
225 #ifdef CONFIG_NET_CLS_ACT
226 	exts->type = 0;
227 	exts->nr_actions = 0;
228 	exts->net = net;
229 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
230 				GFP_KERNEL);
231 	if (!exts->actions)
232 		return -ENOMEM;
233 #endif
234 	exts->action = action;
235 	exts->police = police;
236 	return 0;
237 }
238 
239 /* Return false if the netns is being destroyed in cleanup_net(). Callers
240  * need to do cleanup synchronously in this case, otherwise may race with
241  * tc_action_net_exit(). Return true for other cases.
242  */
243 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
244 {
245 #ifdef CONFIG_NET_CLS_ACT
246 	exts->net = maybe_get_net(exts->net);
247 	return exts->net != NULL;
248 #else
249 	return true;
250 #endif
251 }
252 
253 static inline void tcf_exts_put_net(struct tcf_exts *exts)
254 {
255 #ifdef CONFIG_NET_CLS_ACT
256 	if (exts->net)
257 		put_net(exts->net);
258 #endif
259 }
260 
261 #ifdef CONFIG_NET_CLS_ACT
262 #define tcf_exts_for_each_action(i, a, exts) \
263 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
264 #else
265 #define tcf_exts_for_each_action(i, a, exts) \
266 	for (; 0; (void)(i), (void)(a), (void)(exts))
267 #endif
268 
269 static inline void
270 tcf_exts_stats_update(const struct tcf_exts *exts,
271 		      u64 bytes, u64 packets, u64 drops, u64 lastuse,
272 		      u8 used_hw_stats, bool used_hw_stats_valid)
273 {
274 #ifdef CONFIG_NET_CLS_ACT
275 	int i;
276 
277 	preempt_disable();
278 
279 	for (i = 0; i < exts->nr_actions; i++) {
280 		struct tc_action *a = exts->actions[i];
281 
282 		tcf_action_stats_update(a, bytes, packets, drops,
283 					lastuse, true);
284 		a->used_hw_stats = used_hw_stats;
285 		a->used_hw_stats_valid = used_hw_stats_valid;
286 	}
287 
288 	preempt_enable();
289 #endif
290 }
291 
292 /**
293  * tcf_exts_has_actions - check if at least one action is present
294  * @exts: tc filter extensions handle
295  *
296  * Returns true if at least one action is present.
297  */
298 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
299 {
300 #ifdef CONFIG_NET_CLS_ACT
301 	return exts->nr_actions;
302 #else
303 	return false;
304 #endif
305 }
306 
307 /**
308  * tcf_exts_exec - execute tc filter extensions
309  * @skb: socket buffer
310  * @exts: tc filter extensions handle
311  * @res: desired result
312  *
313  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
314  * a negative number if the filter must be considered unmatched or
315  * a positive action code (TC_ACT_*) which must be returned to the
316  * underlying layer.
317  */
318 static inline int
319 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
320 	      struct tcf_result *res)
321 {
322 #ifdef CONFIG_NET_CLS_ACT
323 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
324 #endif
325 	return TC_ACT_OK;
326 }
327 
328 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
329 		      struct nlattr **tb, struct nlattr *rate_tlv,
330 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
331 		      struct netlink_ext_ack *extack);
332 void tcf_exts_destroy(struct tcf_exts *exts);
333 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
334 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
335 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
336 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
337 
338 /**
339  * struct tcf_pkt_info - packet information
340  */
341 struct tcf_pkt_info {
342 	unsigned char *		ptr;
343 	int			nexthdr;
344 };
345 
346 #ifdef CONFIG_NET_EMATCH
347 
348 struct tcf_ematch_ops;
349 
350 /**
351  * struct tcf_ematch - extended match (ematch)
352  *
353  * @matchid: identifier to allow userspace to reidentify a match
354  * @flags: flags specifying attributes and the relation to other matches
355  * @ops: the operations lookup table of the corresponding ematch module
356  * @datalen: length of the ematch specific configuration data
357  * @data: ematch specific data
358  */
359 struct tcf_ematch {
360 	struct tcf_ematch_ops * ops;
361 	unsigned long		data;
362 	unsigned int		datalen;
363 	u16			matchid;
364 	u16			flags;
365 	struct net		*net;
366 };
367 
368 static inline int tcf_em_is_container(struct tcf_ematch *em)
369 {
370 	return !em->ops;
371 }
372 
373 static inline int tcf_em_is_simple(struct tcf_ematch *em)
374 {
375 	return em->flags & TCF_EM_SIMPLE;
376 }
377 
378 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
379 {
380 	return em->flags & TCF_EM_INVERT;
381 }
382 
383 static inline int tcf_em_last_match(struct tcf_ematch *em)
384 {
385 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
386 }
387 
388 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
389 {
390 	if (tcf_em_last_match(em))
391 		return 1;
392 
393 	if (result == 0 && em->flags & TCF_EM_REL_AND)
394 		return 1;
395 
396 	if (result != 0 && em->flags & TCF_EM_REL_OR)
397 		return 1;
398 
399 	return 0;
400 }
401 
402 /**
403  * struct tcf_ematch_tree - ematch tree handle
404  *
405  * @hdr: ematch tree header supplied by userspace
406  * @matches: array of ematches
407  */
408 struct tcf_ematch_tree {
409 	struct tcf_ematch_tree_hdr hdr;
410 	struct tcf_ematch *	matches;
411 
412 };
413 
414 /**
415  * struct tcf_ematch_ops - ematch module operations
416  *
417  * @kind: identifier (kind) of this ematch module
418  * @datalen: length of expected configuration data (optional)
419  * @change: called during validation (optional)
420  * @match: called during ematch tree evaluation, must return 1/0
421  * @destroy: called during destroyage (optional)
422  * @dump: called during dumping process (optional)
423  * @owner: owner, must be set to THIS_MODULE
424  * @link: link to previous/next ematch module (internal use)
425  */
426 struct tcf_ematch_ops {
427 	int			kind;
428 	int			datalen;
429 	int			(*change)(struct net *net, void *,
430 					  int, struct tcf_ematch *);
431 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
432 					 struct tcf_pkt_info *);
433 	void			(*destroy)(struct tcf_ematch *);
434 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
435 	struct module		*owner;
436 	struct list_head	link;
437 };
438 
439 int tcf_em_register(struct tcf_ematch_ops *);
440 void tcf_em_unregister(struct tcf_ematch_ops *);
441 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
442 			 struct tcf_ematch_tree *);
443 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
444 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
445 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
446 			struct tcf_pkt_info *);
447 
448 /**
449  * tcf_em_tree_match - evaulate an ematch tree
450  *
451  * @skb: socket buffer of the packet in question
452  * @tree: ematch tree to be used for evaluation
453  * @info: packet information examined by classifier
454  *
455  * This function matches @skb against the ematch tree in @tree by going
456  * through all ematches respecting their logic relations returning
457  * as soon as the result is obvious.
458  *
459  * Returns 1 if the ematch tree as-one matches, no ematches are configured
460  * or ematch is not enabled in the kernel, otherwise 0 is returned.
461  */
462 static inline int tcf_em_tree_match(struct sk_buff *skb,
463 				    struct tcf_ematch_tree *tree,
464 				    struct tcf_pkt_info *info)
465 {
466 	if (tree->hdr.nmatches)
467 		return __tcf_em_tree_match(skb, tree, info);
468 	else
469 		return 1;
470 }
471 
472 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
473 
474 #else /* CONFIG_NET_EMATCH */
475 
476 struct tcf_ematch_tree {
477 };
478 
479 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
480 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
481 #define tcf_em_tree_dump(skb, t, tlv) (0)
482 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
483 
484 #endif /* CONFIG_NET_EMATCH */
485 
486 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
487 {
488 	switch (layer) {
489 		case TCF_LAYER_LINK:
490 			return skb_mac_header(skb);
491 		case TCF_LAYER_NETWORK:
492 			return skb_network_header(skb);
493 		case TCF_LAYER_TRANSPORT:
494 			return skb_transport_header(skb);
495 	}
496 
497 	return NULL;
498 }
499 
500 static inline int tcf_valid_offset(const struct sk_buff *skb,
501 				   const unsigned char *ptr, const int len)
502 {
503 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
504 		      ptr >= skb->head &&
505 		      (ptr <= (ptr + len)));
506 }
507 
508 static inline int
509 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
510 		 struct netlink_ext_ack *extack)
511 {
512 	char indev[IFNAMSIZ];
513 	struct net_device *dev;
514 
515 	if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
516 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
517 				    "Interface name too long");
518 		return -EINVAL;
519 	}
520 	dev = __dev_get_by_name(net, indev);
521 	if (!dev) {
522 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
523 				    "Network device not found");
524 		return -ENODEV;
525 	}
526 	return dev->ifindex;
527 }
528 
529 static inline bool
530 tcf_match_indev(struct sk_buff *skb, int ifindex)
531 {
532 	if (!ifindex)
533 		return true;
534 	if  (!skb->skb_iif)
535 		return false;
536 	return ifindex == skb->skb_iif;
537 }
538 
539 int tc_setup_flow_action(struct flow_action *flow_action,
540 			 const struct tcf_exts *exts);
541 void tc_cleanup_flow_action(struct flow_action *flow_action);
542 
543 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
544 		     void *type_data, bool err_stop, bool rtnl_held);
545 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
546 		    enum tc_setup_type type, void *type_data, bool err_stop,
547 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
548 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
549 			enum tc_setup_type type, void *type_data, bool err_stop,
550 			u32 *old_flags, unsigned int *old_in_hw_count,
551 			u32 *new_flags, unsigned int *new_in_hw_count,
552 			bool rtnl_held);
553 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
554 			enum tc_setup_type type, void *type_data, bool err_stop,
555 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
556 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
557 			  bool add, flow_setup_cb_t *cb,
558 			  enum tc_setup_type type, void *type_data,
559 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
560 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
561 
562 #ifdef CONFIG_NET_CLS_ACT
563 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
564 		    enum flow_block_binder_type binder_type,
565 		    struct nlattr *block_index_attr,
566 		    struct netlink_ext_ack *extack);
567 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
568 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
569 			       struct netlink_ext_ack *extack);
570 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
571 				  struct sk_buff **to_free, int *ret);
572 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
573 #else
574 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
575 				  enum flow_block_binder_type binder_type,
576 				  struct nlattr *block_index_attr,
577 				  struct netlink_ext_ack *extack)
578 {
579 	return 0;
580 }
581 
582 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
583 {
584 }
585 
586 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
587 					     struct netlink_ext_ack *extack)
588 {
589 	return 0;
590 }
591 
592 static inline struct sk_buff *
593 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
594 		  struct sk_buff **to_free, int *ret)
595 {
596 	return skb;
597 }
598 
599 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
600 {
601 	return 0;
602 }
603 #endif
604 
605 struct tc_cls_u32_knode {
606 	struct tcf_exts *exts;
607 	struct tcf_result *res;
608 	struct tc_u32_sel *sel;
609 	u32 handle;
610 	u32 val;
611 	u32 mask;
612 	u32 link_handle;
613 	u8 fshift;
614 };
615 
616 struct tc_cls_u32_hnode {
617 	u32 handle;
618 	u32 prio;
619 	unsigned int divisor;
620 };
621 
622 enum tc_clsu32_command {
623 	TC_CLSU32_NEW_KNODE,
624 	TC_CLSU32_REPLACE_KNODE,
625 	TC_CLSU32_DELETE_KNODE,
626 	TC_CLSU32_NEW_HNODE,
627 	TC_CLSU32_REPLACE_HNODE,
628 	TC_CLSU32_DELETE_HNODE,
629 };
630 
631 struct tc_cls_u32_offload {
632 	struct flow_cls_common_offload common;
633 	/* knode values */
634 	enum tc_clsu32_command command;
635 	union {
636 		struct tc_cls_u32_knode knode;
637 		struct tc_cls_u32_hnode hnode;
638 	};
639 };
640 
641 static inline bool tc_can_offload(const struct net_device *dev)
642 {
643 	return dev->features & NETIF_F_HW_TC;
644 }
645 
646 static inline bool tc_can_offload_extack(const struct net_device *dev,
647 					 struct netlink_ext_ack *extack)
648 {
649 	bool can = tc_can_offload(dev);
650 
651 	if (!can)
652 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
653 
654 	return can;
655 }
656 
657 static inline bool
658 tc_cls_can_offload_and_chain0(const struct net_device *dev,
659 			      struct flow_cls_common_offload *common)
660 {
661 	if (!tc_can_offload_extack(dev, common->extack))
662 		return false;
663 	if (common->chain_index) {
664 		NL_SET_ERR_MSG(common->extack,
665 			       "Driver supports only offload of chain 0");
666 		return false;
667 	}
668 	return true;
669 }
670 
671 static inline bool tc_skip_hw(u32 flags)
672 {
673 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
674 }
675 
676 static inline bool tc_skip_sw(u32 flags)
677 {
678 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
679 }
680 
681 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
682 static inline bool tc_flags_valid(u32 flags)
683 {
684 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
685 		      TCA_CLS_FLAGS_VERBOSE))
686 		return false;
687 
688 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
689 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
690 		return false;
691 
692 	return true;
693 }
694 
695 static inline bool tc_in_hw(u32 flags)
696 {
697 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
698 }
699 
700 static inline void
701 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
702 			   const struct tcf_proto *tp, u32 flags,
703 			   struct netlink_ext_ack *extack)
704 {
705 	cls_common->chain_index = tp->chain->index;
706 	cls_common->protocol = tp->protocol;
707 	cls_common->prio = tp->prio >> 16;
708 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
709 		cls_common->extack = extack;
710 }
711 
712 enum tc_matchall_command {
713 	TC_CLSMATCHALL_REPLACE,
714 	TC_CLSMATCHALL_DESTROY,
715 	TC_CLSMATCHALL_STATS,
716 };
717 
718 struct tc_cls_matchall_offload {
719 	struct flow_cls_common_offload common;
720 	enum tc_matchall_command command;
721 	struct flow_rule *rule;
722 	struct flow_stats stats;
723 	unsigned long cookie;
724 };
725 
726 enum tc_clsbpf_command {
727 	TC_CLSBPF_OFFLOAD,
728 	TC_CLSBPF_STATS,
729 };
730 
731 struct tc_cls_bpf_offload {
732 	struct flow_cls_common_offload common;
733 	enum tc_clsbpf_command command;
734 	struct tcf_exts *exts;
735 	struct bpf_prog *prog;
736 	struct bpf_prog *oldprog;
737 	const char *name;
738 	bool exts_integrated;
739 };
740 
741 struct tc_mqprio_qopt_offload {
742 	/* struct tc_mqprio_qopt must always be the first element */
743 	struct tc_mqprio_qopt qopt;
744 	u16 mode;
745 	u16 shaper;
746 	u32 flags;
747 	u64 min_rate[TC_QOPT_MAX_QUEUE];
748 	u64 max_rate[TC_QOPT_MAX_QUEUE];
749 };
750 
751 /* This structure holds cookie structure that is passed from user
752  * to the kernel for actions and classifiers
753  */
754 struct tc_cookie {
755 	u8  *data;
756 	u32 len;
757 	struct rcu_head rcu;
758 };
759 
760 struct tc_qopt_offload_stats {
761 	struct gnet_stats_basic_packed *bstats;
762 	struct gnet_stats_queue *qstats;
763 };
764 
765 enum tc_mq_command {
766 	TC_MQ_CREATE,
767 	TC_MQ_DESTROY,
768 	TC_MQ_STATS,
769 	TC_MQ_GRAFT,
770 };
771 
772 struct tc_mq_opt_offload_graft_params {
773 	unsigned long queue;
774 	u32 child_handle;
775 };
776 
777 struct tc_mq_qopt_offload {
778 	enum tc_mq_command command;
779 	u32 handle;
780 	union {
781 		struct tc_qopt_offload_stats stats;
782 		struct tc_mq_opt_offload_graft_params graft_params;
783 	};
784 };
785 
786 enum tc_htb_command {
787 	/* Root */
788 	TC_HTB_CREATE, /* Initialize HTB offload. */
789 	TC_HTB_DESTROY, /* Destroy HTB offload. */
790 
791 	/* Classes */
792 	/* Allocate qid and create leaf. */
793 	TC_HTB_LEAF_ALLOC_QUEUE,
794 	/* Convert leaf to inner, preserve and return qid, create new leaf. */
795 	TC_HTB_LEAF_TO_INNER,
796 	/* Delete leaf, while siblings remain. */
797 	TC_HTB_LEAF_DEL,
798 	/* Delete leaf, convert parent to leaf, preserving qid. */
799 	TC_HTB_LEAF_DEL_LAST,
800 	/* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
801 	TC_HTB_LEAF_DEL_LAST_FORCE,
802 	/* Modify parameters of a node. */
803 	TC_HTB_NODE_MODIFY,
804 
805 	/* Class qdisc */
806 	TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
807 };
808 
809 struct tc_htb_qopt_offload {
810 	struct netlink_ext_ack *extack;
811 	enum tc_htb_command command;
812 	u16 classid;
813 	u32 parent_classid;
814 	u16 qid;
815 	u16 moved_qid;
816 	u64 rate;
817 	u64 ceil;
818 };
819 
820 #define TC_HTB_CLASSID_ROOT U32_MAX
821 
822 enum tc_red_command {
823 	TC_RED_REPLACE,
824 	TC_RED_DESTROY,
825 	TC_RED_STATS,
826 	TC_RED_XSTATS,
827 	TC_RED_GRAFT,
828 };
829 
830 struct tc_red_qopt_offload_params {
831 	u32 min;
832 	u32 max;
833 	u32 probability;
834 	u32 limit;
835 	bool is_ecn;
836 	bool is_harddrop;
837 	bool is_nodrop;
838 	struct gnet_stats_queue *qstats;
839 };
840 
841 struct tc_red_qopt_offload {
842 	enum tc_red_command command;
843 	u32 handle;
844 	u32 parent;
845 	union {
846 		struct tc_red_qopt_offload_params set;
847 		struct tc_qopt_offload_stats stats;
848 		struct red_stats *xstats;
849 		u32 child_handle;
850 	};
851 };
852 
853 enum tc_gred_command {
854 	TC_GRED_REPLACE,
855 	TC_GRED_DESTROY,
856 	TC_GRED_STATS,
857 };
858 
859 struct tc_gred_vq_qopt_offload_params {
860 	bool present;
861 	u32 limit;
862 	u32 prio;
863 	u32 min;
864 	u32 max;
865 	bool is_ecn;
866 	bool is_harddrop;
867 	u32 probability;
868 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
869 	u32 *backlog;
870 };
871 
872 struct tc_gred_qopt_offload_params {
873 	bool grio_on;
874 	bool wred_on;
875 	unsigned int dp_cnt;
876 	unsigned int dp_def;
877 	struct gnet_stats_queue *qstats;
878 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
879 };
880 
881 struct tc_gred_qopt_offload_stats {
882 	struct gnet_stats_basic_packed bstats[MAX_DPs];
883 	struct gnet_stats_queue qstats[MAX_DPs];
884 	struct red_stats *xstats[MAX_DPs];
885 };
886 
887 struct tc_gred_qopt_offload {
888 	enum tc_gred_command command;
889 	u32 handle;
890 	u32 parent;
891 	union {
892 		struct tc_gred_qopt_offload_params set;
893 		struct tc_gred_qopt_offload_stats stats;
894 	};
895 };
896 
897 enum tc_prio_command {
898 	TC_PRIO_REPLACE,
899 	TC_PRIO_DESTROY,
900 	TC_PRIO_STATS,
901 	TC_PRIO_GRAFT,
902 };
903 
904 struct tc_prio_qopt_offload_params {
905 	int bands;
906 	u8 priomap[TC_PRIO_MAX + 1];
907 	/* At the point of un-offloading the Qdisc, the reported backlog and
908 	 * qlen need to be reduced by the portion that is in HW.
909 	 */
910 	struct gnet_stats_queue *qstats;
911 };
912 
913 struct tc_prio_qopt_offload_graft_params {
914 	u8 band;
915 	u32 child_handle;
916 };
917 
918 struct tc_prio_qopt_offload {
919 	enum tc_prio_command command;
920 	u32 handle;
921 	u32 parent;
922 	union {
923 		struct tc_prio_qopt_offload_params replace_params;
924 		struct tc_qopt_offload_stats stats;
925 		struct tc_prio_qopt_offload_graft_params graft_params;
926 	};
927 };
928 
929 enum tc_root_command {
930 	TC_ROOT_GRAFT,
931 };
932 
933 struct tc_root_qopt_offload {
934 	enum tc_root_command command;
935 	u32 handle;
936 	bool ingress;
937 };
938 
939 enum tc_ets_command {
940 	TC_ETS_REPLACE,
941 	TC_ETS_DESTROY,
942 	TC_ETS_STATS,
943 	TC_ETS_GRAFT,
944 };
945 
946 struct tc_ets_qopt_offload_replace_params {
947 	unsigned int bands;
948 	u8 priomap[TC_PRIO_MAX + 1];
949 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
950 	unsigned int weights[TCQ_ETS_MAX_BANDS];
951 	struct gnet_stats_queue *qstats;
952 };
953 
954 struct tc_ets_qopt_offload_graft_params {
955 	u8 band;
956 	u32 child_handle;
957 };
958 
959 struct tc_ets_qopt_offload {
960 	enum tc_ets_command command;
961 	u32 handle;
962 	u32 parent;
963 	union {
964 		struct tc_ets_qopt_offload_replace_params replace_params;
965 		struct tc_qopt_offload_stats stats;
966 		struct tc_ets_qopt_offload_graft_params graft_params;
967 	};
968 };
969 
970 enum tc_tbf_command {
971 	TC_TBF_REPLACE,
972 	TC_TBF_DESTROY,
973 	TC_TBF_STATS,
974 };
975 
976 struct tc_tbf_qopt_offload_replace_params {
977 	struct psched_ratecfg rate;
978 	u32 max_size;
979 	struct gnet_stats_queue *qstats;
980 };
981 
982 struct tc_tbf_qopt_offload {
983 	enum tc_tbf_command command;
984 	u32 handle;
985 	u32 parent;
986 	union {
987 		struct tc_tbf_qopt_offload_replace_params replace_params;
988 		struct tc_qopt_offload_stats stats;
989 	};
990 };
991 
992 enum tc_fifo_command {
993 	TC_FIFO_REPLACE,
994 	TC_FIFO_DESTROY,
995 	TC_FIFO_STATS,
996 };
997 
998 struct tc_fifo_qopt_offload {
999 	enum tc_fifo_command command;
1000 	u32 handle;
1001 	u32 parent;
1002 	union {
1003 		struct tc_qopt_offload_stats stats;
1004 	};
1005 };
1006 
1007 #endif
1008