xref: /openbmc/linux/net/sched/act_api.c (revision e481ff3f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_api.c	Packet action API.
4  *
5  * Author:	Jamal Hadi Salim
6  */
7 
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/kmod.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <net/net_namespace.h>
19 #include <net/sock.h>
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
24 
25 #ifdef CONFIG_INET
26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
28 #endif
29 
30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
31 {
32 #ifdef CONFIG_INET
33 	if (static_branch_unlikely(&tcf_frag_xmit_count))
34 		return sch_frag_xmit_hook(skb, xmit);
35 #endif
36 
37 	return xmit(skb);
38 }
39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
40 
41 static void tcf_action_goto_chain_exec(const struct tc_action *a,
42 				       struct tcf_result *res)
43 {
44 	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
45 
46 	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
47 }
48 
49 static void tcf_free_cookie_rcu(struct rcu_head *p)
50 {
51 	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
52 
53 	kfree(cookie->data);
54 	kfree(cookie);
55 }
56 
57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
58 				  struct tc_cookie *new_cookie)
59 {
60 	struct tc_cookie *old;
61 
62 	old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
63 	if (old)
64 		call_rcu(&old->rcu, tcf_free_cookie_rcu);
65 }
66 
67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
68 			     struct tcf_chain **newchain,
69 			     struct netlink_ext_ack *extack)
70 {
71 	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
72 	u32 chain_index;
73 
74 	if (!opcode)
75 		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
76 	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
77 		ret = 0;
78 	if (ret) {
79 		NL_SET_ERR_MSG(extack, "invalid control action");
80 		goto end;
81 	}
82 
83 	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
84 		chain_index = action & TC_ACT_EXT_VAL_MASK;
85 		if (!tp || !newchain) {
86 			ret = -EINVAL;
87 			NL_SET_ERR_MSG(extack,
88 				       "can't goto NULL proto/chain");
89 			goto end;
90 		}
91 		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
92 		if (!*newchain) {
93 			ret = -ENOMEM;
94 			NL_SET_ERR_MSG(extack,
95 				       "can't allocate goto_chain");
96 		}
97 	}
98 end:
99 	return ret;
100 }
101 EXPORT_SYMBOL(tcf_action_check_ctrlact);
102 
103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
104 					 struct tcf_chain *goto_chain)
105 {
106 	a->tcfa_action = action;
107 	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
108 	return goto_chain;
109 }
110 EXPORT_SYMBOL(tcf_action_set_ctrlact);
111 
112 /* XXX: For standalone actions, we don't need a RCU grace period either, because
113  * actions are always connected to filters and filters are already destroyed in
114  * RCU callbacks, so after a RCU grace period actions are already disconnected
115  * from filters. Readers later can not find us.
116  */
117 static void free_tcf(struct tc_action *p)
118 {
119 	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
120 
121 	free_percpu(p->cpu_bstats);
122 	free_percpu(p->cpu_bstats_hw);
123 	free_percpu(p->cpu_qstats);
124 
125 	tcf_set_action_cookie(&p->act_cookie, NULL);
126 	if (chain)
127 		tcf_chain_put_by_act(chain);
128 
129 	kfree(p);
130 }
131 
132 static void tcf_action_cleanup(struct tc_action *p)
133 {
134 	if (p->ops->cleanup)
135 		p->ops->cleanup(p);
136 
137 	gen_kill_estimator(&p->tcfa_rate_est);
138 	free_tcf(p);
139 }
140 
141 static int __tcf_action_put(struct tc_action *p, bool bind)
142 {
143 	struct tcf_idrinfo *idrinfo = p->idrinfo;
144 
145 	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
146 		if (bind)
147 			atomic_dec(&p->tcfa_bindcnt);
148 		idr_remove(&idrinfo->action_idr, p->tcfa_index);
149 		mutex_unlock(&idrinfo->lock);
150 
151 		tcf_action_cleanup(p);
152 		return 1;
153 	}
154 
155 	if (bind)
156 		atomic_dec(&p->tcfa_bindcnt);
157 
158 	return 0;
159 }
160 
161 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
162 {
163 	int ret = 0;
164 
165 	/* Release with strict==1 and bind==0 is only called through act API
166 	 * interface (classifiers always bind). Only case when action with
167 	 * positive reference count and zero bind count can exist is when it was
168 	 * also created with act API (unbinding last classifier will destroy the
169 	 * action if it was created by classifier). So only case when bind count
170 	 * can be changed after initial check is when unbound action is
171 	 * destroyed by act API while classifier binds to action with same id
172 	 * concurrently. This result either creation of new action(same behavior
173 	 * as before), or reusing existing action if concurrent process
174 	 * increments reference count before action is deleted. Both scenarios
175 	 * are acceptable.
176 	 */
177 	if (p) {
178 		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
179 			return -EPERM;
180 
181 		if (__tcf_action_put(p, bind))
182 			ret = ACT_P_DELETED;
183 	}
184 
185 	return ret;
186 }
187 
188 int tcf_idr_release(struct tc_action *a, bool bind)
189 {
190 	const struct tc_action_ops *ops = a->ops;
191 	int ret;
192 
193 	ret = __tcf_idr_release(a, bind, false);
194 	if (ret == ACT_P_DELETED)
195 		module_put(ops->owner);
196 	return ret;
197 }
198 EXPORT_SYMBOL(tcf_idr_release);
199 
200 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
201 {
202 	struct tc_cookie *act_cookie;
203 	u32 cookie_len = 0;
204 
205 	rcu_read_lock();
206 	act_cookie = rcu_dereference(act->act_cookie);
207 
208 	if (act_cookie)
209 		cookie_len = nla_total_size(act_cookie->len);
210 	rcu_read_unlock();
211 
212 	return  nla_total_size(0) /* action number nested */
213 		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
214 		+ cookie_len /* TCA_ACT_COOKIE */
215 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
216 		+ nla_total_size(0) /* TCA_ACT_STATS nested */
217 		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
218 		/* TCA_STATS_BASIC */
219 		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
220 		/* TCA_STATS_PKT64 */
221 		+ nla_total_size_64bit(sizeof(u64))
222 		/* TCA_STATS_QUEUE */
223 		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
224 		+ nla_total_size(0) /* TCA_OPTIONS nested */
225 		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
226 }
227 
228 static size_t tcf_action_full_attrs_size(size_t sz)
229 {
230 	return NLMSG_HDRLEN                     /* struct nlmsghdr */
231 		+ sizeof(struct tcamsg)
232 		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
233 		+ sz;
234 }
235 
236 static size_t tcf_action_fill_size(const struct tc_action *act)
237 {
238 	size_t sz = tcf_action_shared_attrs_size(act);
239 
240 	if (act->ops->get_fill_size)
241 		return act->ops->get_fill_size(act) + sz;
242 	return sz;
243 }
244 
245 static int
246 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
247 {
248 	unsigned char *b = skb_tail_pointer(skb);
249 	struct tc_cookie *cookie;
250 
251 	if (nla_put_string(skb, TCA_KIND, a->ops->kind))
252 		goto nla_put_failure;
253 	if (tcf_action_copy_stats(skb, a, 0))
254 		goto nla_put_failure;
255 	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
256 		goto nla_put_failure;
257 
258 	rcu_read_lock();
259 	cookie = rcu_dereference(a->act_cookie);
260 	if (cookie) {
261 		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
262 			rcu_read_unlock();
263 			goto nla_put_failure;
264 		}
265 	}
266 	rcu_read_unlock();
267 
268 	return 0;
269 
270 nla_put_failure:
271 	nlmsg_trim(skb, b);
272 	return -1;
273 }
274 
275 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
276 			   struct netlink_callback *cb)
277 {
278 	int err = 0, index = -1, s_i = 0, n_i = 0;
279 	u32 act_flags = cb->args[2];
280 	unsigned long jiffy_since = cb->args[3];
281 	struct nlattr *nest;
282 	struct idr *idr = &idrinfo->action_idr;
283 	struct tc_action *p;
284 	unsigned long id = 1;
285 	unsigned long tmp;
286 
287 	mutex_lock(&idrinfo->lock);
288 
289 	s_i = cb->args[0];
290 
291 	idr_for_each_entry_ul(idr, p, tmp, id) {
292 		index++;
293 		if (index < s_i)
294 			continue;
295 		if (IS_ERR(p))
296 			continue;
297 
298 		if (jiffy_since &&
299 		    time_after(jiffy_since,
300 			       (unsigned long)p->tcfa_tm.lastuse))
301 			continue;
302 
303 		nest = nla_nest_start_noflag(skb, n_i);
304 		if (!nest) {
305 			index--;
306 			goto nla_put_failure;
307 		}
308 		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
309 			tcf_action_dump_terse(skb, p, true) :
310 			tcf_action_dump_1(skb, p, 0, 0);
311 		if (err < 0) {
312 			index--;
313 			nlmsg_trim(skb, nest);
314 			goto done;
315 		}
316 		nla_nest_end(skb, nest);
317 		n_i++;
318 		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
319 		    n_i >= TCA_ACT_MAX_PRIO)
320 			goto done;
321 	}
322 done:
323 	if (index >= 0)
324 		cb->args[0] = index + 1;
325 
326 	mutex_unlock(&idrinfo->lock);
327 	if (n_i) {
328 		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
329 			cb->args[1] = n_i;
330 	}
331 	return n_i;
332 
333 nla_put_failure:
334 	nla_nest_cancel(skb, nest);
335 	goto done;
336 }
337 
338 static int tcf_idr_release_unsafe(struct tc_action *p)
339 {
340 	if (atomic_read(&p->tcfa_bindcnt) > 0)
341 		return -EPERM;
342 
343 	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
344 		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
345 		tcf_action_cleanup(p);
346 		return ACT_P_DELETED;
347 	}
348 
349 	return 0;
350 }
351 
352 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
353 			  const struct tc_action_ops *ops)
354 {
355 	struct nlattr *nest;
356 	int n_i = 0;
357 	int ret = -EINVAL;
358 	struct idr *idr = &idrinfo->action_idr;
359 	struct tc_action *p;
360 	unsigned long id = 1;
361 	unsigned long tmp;
362 
363 	nest = nla_nest_start_noflag(skb, 0);
364 	if (nest == NULL)
365 		goto nla_put_failure;
366 	if (nla_put_string(skb, TCA_KIND, ops->kind))
367 		goto nla_put_failure;
368 
369 	mutex_lock(&idrinfo->lock);
370 	idr_for_each_entry_ul(idr, p, tmp, id) {
371 		if (IS_ERR(p))
372 			continue;
373 		ret = tcf_idr_release_unsafe(p);
374 		if (ret == ACT_P_DELETED) {
375 			module_put(ops->owner);
376 			n_i++;
377 		} else if (ret < 0) {
378 			mutex_unlock(&idrinfo->lock);
379 			goto nla_put_failure;
380 		}
381 	}
382 	mutex_unlock(&idrinfo->lock);
383 
384 	ret = nla_put_u32(skb, TCA_FCNT, n_i);
385 	if (ret)
386 		goto nla_put_failure;
387 	nla_nest_end(skb, nest);
388 
389 	return n_i;
390 nla_put_failure:
391 	nla_nest_cancel(skb, nest);
392 	return ret;
393 }
394 
395 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
396 		       struct netlink_callback *cb, int type,
397 		       const struct tc_action_ops *ops,
398 		       struct netlink_ext_ack *extack)
399 {
400 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
401 
402 	if (type == RTM_DELACTION) {
403 		return tcf_del_walker(idrinfo, skb, ops);
404 	} else if (type == RTM_GETACTION) {
405 		return tcf_dump_walker(idrinfo, skb, cb);
406 	} else {
407 		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
408 		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
409 		return -EINVAL;
410 	}
411 }
412 EXPORT_SYMBOL(tcf_generic_walker);
413 
414 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
415 {
416 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
417 	struct tc_action *p;
418 
419 	mutex_lock(&idrinfo->lock);
420 	p = idr_find(&idrinfo->action_idr, index);
421 	if (IS_ERR(p))
422 		p = NULL;
423 	else if (p)
424 		refcount_inc(&p->tcfa_refcnt);
425 	mutex_unlock(&idrinfo->lock);
426 
427 	if (p) {
428 		*a = p;
429 		return true;
430 	}
431 	return false;
432 }
433 EXPORT_SYMBOL(tcf_idr_search);
434 
435 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
436 {
437 	struct tc_action *p;
438 	int ret = 0;
439 
440 	mutex_lock(&idrinfo->lock);
441 	p = idr_find(&idrinfo->action_idr, index);
442 	if (!p) {
443 		mutex_unlock(&idrinfo->lock);
444 		return -ENOENT;
445 	}
446 
447 	if (!atomic_read(&p->tcfa_bindcnt)) {
448 		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
449 			struct module *owner = p->ops->owner;
450 
451 			WARN_ON(p != idr_remove(&idrinfo->action_idr,
452 						p->tcfa_index));
453 			mutex_unlock(&idrinfo->lock);
454 
455 			tcf_action_cleanup(p);
456 			module_put(owner);
457 			return 0;
458 		}
459 		ret = 0;
460 	} else {
461 		ret = -EPERM;
462 	}
463 
464 	mutex_unlock(&idrinfo->lock);
465 	return ret;
466 }
467 
468 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
469 		   struct tc_action **a, const struct tc_action_ops *ops,
470 		   int bind, bool cpustats, u32 flags)
471 {
472 	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
473 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
474 	int err = -ENOMEM;
475 
476 	if (unlikely(!p))
477 		return -ENOMEM;
478 	refcount_set(&p->tcfa_refcnt, 1);
479 	if (bind)
480 		atomic_set(&p->tcfa_bindcnt, 1);
481 
482 	if (cpustats) {
483 		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
484 		if (!p->cpu_bstats)
485 			goto err1;
486 		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
487 		if (!p->cpu_bstats_hw)
488 			goto err2;
489 		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
490 		if (!p->cpu_qstats)
491 			goto err3;
492 	}
493 	spin_lock_init(&p->tcfa_lock);
494 	p->tcfa_index = index;
495 	p->tcfa_tm.install = jiffies;
496 	p->tcfa_tm.lastuse = jiffies;
497 	p->tcfa_tm.firstuse = 0;
498 	p->tcfa_flags = flags;
499 	if (est) {
500 		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
501 					&p->tcfa_rate_est,
502 					&p->tcfa_lock, NULL, est);
503 		if (err)
504 			goto err4;
505 	}
506 
507 	p->idrinfo = idrinfo;
508 	__module_get(ops->owner);
509 	p->ops = ops;
510 	*a = p;
511 	return 0;
512 err4:
513 	free_percpu(p->cpu_qstats);
514 err3:
515 	free_percpu(p->cpu_bstats_hw);
516 err2:
517 	free_percpu(p->cpu_bstats);
518 err1:
519 	kfree(p);
520 	return err;
521 }
522 EXPORT_SYMBOL(tcf_idr_create);
523 
524 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
525 			      struct nlattr *est, struct tc_action **a,
526 			      const struct tc_action_ops *ops, int bind,
527 			      u32 flags)
528 {
529 	/* Set cpustats according to actions flags. */
530 	return tcf_idr_create(tn, index, est, a, ops, bind,
531 			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
532 }
533 EXPORT_SYMBOL(tcf_idr_create_from_flags);
534 
535 /* Cleanup idr index that was allocated but not initialized. */
536 
537 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
538 {
539 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
540 
541 	mutex_lock(&idrinfo->lock);
542 	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
543 	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
544 	mutex_unlock(&idrinfo->lock);
545 }
546 EXPORT_SYMBOL(tcf_idr_cleanup);
547 
548 /* Check if action with specified index exists. If actions is found, increments
549  * its reference and bind counters, and return 1. Otherwise insert temporary
550  * error pointer (to prevent concurrent users from inserting actions with same
551  * index) and return 0.
552  */
553 
554 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
555 			struct tc_action **a, int bind)
556 {
557 	struct tcf_idrinfo *idrinfo = tn->idrinfo;
558 	struct tc_action *p;
559 	int ret;
560 
561 again:
562 	mutex_lock(&idrinfo->lock);
563 	if (*index) {
564 		p = idr_find(&idrinfo->action_idr, *index);
565 		if (IS_ERR(p)) {
566 			/* This means that another process allocated
567 			 * index but did not assign the pointer yet.
568 			 */
569 			mutex_unlock(&idrinfo->lock);
570 			goto again;
571 		}
572 
573 		if (p) {
574 			refcount_inc(&p->tcfa_refcnt);
575 			if (bind)
576 				atomic_inc(&p->tcfa_bindcnt);
577 			*a = p;
578 			ret = 1;
579 		} else {
580 			*a = NULL;
581 			ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
582 					    *index, GFP_KERNEL);
583 			if (!ret)
584 				idr_replace(&idrinfo->action_idr,
585 					    ERR_PTR(-EBUSY), *index);
586 		}
587 	} else {
588 		*index = 1;
589 		*a = NULL;
590 		ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
591 				    UINT_MAX, GFP_KERNEL);
592 		if (!ret)
593 			idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
594 				    *index);
595 	}
596 	mutex_unlock(&idrinfo->lock);
597 	return ret;
598 }
599 EXPORT_SYMBOL(tcf_idr_check_alloc);
600 
601 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
602 			 struct tcf_idrinfo *idrinfo)
603 {
604 	struct idr *idr = &idrinfo->action_idr;
605 	struct tc_action *p;
606 	int ret;
607 	unsigned long id = 1;
608 	unsigned long tmp;
609 
610 	idr_for_each_entry_ul(idr, p, tmp, id) {
611 		ret = __tcf_idr_release(p, false, true);
612 		if (ret == ACT_P_DELETED)
613 			module_put(ops->owner);
614 		else if (ret < 0)
615 			return;
616 	}
617 	idr_destroy(&idrinfo->action_idr);
618 }
619 EXPORT_SYMBOL(tcf_idrinfo_destroy);
620 
621 static LIST_HEAD(act_base);
622 static DEFINE_RWLOCK(act_mod_lock);
623 
624 int tcf_register_action(struct tc_action_ops *act,
625 			struct pernet_operations *ops)
626 {
627 	struct tc_action_ops *a;
628 	int ret;
629 
630 	if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
631 		return -EINVAL;
632 
633 	/* We have to register pernet ops before making the action ops visible,
634 	 * otherwise tcf_action_init_1() could get a partially initialized
635 	 * netns.
636 	 */
637 	ret = register_pernet_subsys(ops);
638 	if (ret)
639 		return ret;
640 
641 	write_lock(&act_mod_lock);
642 	list_for_each_entry(a, &act_base, head) {
643 		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
644 			write_unlock(&act_mod_lock);
645 			unregister_pernet_subsys(ops);
646 			return -EEXIST;
647 		}
648 	}
649 	list_add_tail(&act->head, &act_base);
650 	write_unlock(&act_mod_lock);
651 
652 	return 0;
653 }
654 EXPORT_SYMBOL(tcf_register_action);
655 
656 int tcf_unregister_action(struct tc_action_ops *act,
657 			  struct pernet_operations *ops)
658 {
659 	struct tc_action_ops *a;
660 	int err = -ENOENT;
661 
662 	write_lock(&act_mod_lock);
663 	list_for_each_entry(a, &act_base, head) {
664 		if (a == act) {
665 			list_del(&act->head);
666 			err = 0;
667 			break;
668 		}
669 	}
670 	write_unlock(&act_mod_lock);
671 	if (!err)
672 		unregister_pernet_subsys(ops);
673 	return err;
674 }
675 EXPORT_SYMBOL(tcf_unregister_action);
676 
677 /* lookup by name */
678 static struct tc_action_ops *tc_lookup_action_n(char *kind)
679 {
680 	struct tc_action_ops *a, *res = NULL;
681 
682 	if (kind) {
683 		read_lock(&act_mod_lock);
684 		list_for_each_entry(a, &act_base, head) {
685 			if (strcmp(kind, a->kind) == 0) {
686 				if (try_module_get(a->owner))
687 					res = a;
688 				break;
689 			}
690 		}
691 		read_unlock(&act_mod_lock);
692 	}
693 	return res;
694 }
695 
696 /* lookup by nlattr */
697 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
698 {
699 	struct tc_action_ops *a, *res = NULL;
700 
701 	if (kind) {
702 		read_lock(&act_mod_lock);
703 		list_for_each_entry(a, &act_base, head) {
704 			if (nla_strcmp(kind, a->kind) == 0) {
705 				if (try_module_get(a->owner))
706 					res = a;
707 				break;
708 			}
709 		}
710 		read_unlock(&act_mod_lock);
711 	}
712 	return res;
713 }
714 
715 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
716 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
717 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
718 		    int nr_actions, struct tcf_result *res)
719 {
720 	u32 jmp_prgcnt = 0;
721 	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
722 	int i;
723 	int ret = TC_ACT_OK;
724 
725 	if (skb_skip_tc_classify(skb))
726 		return TC_ACT_OK;
727 
728 restart_act_graph:
729 	for (i = 0; i < nr_actions; i++) {
730 		const struct tc_action *a = actions[i];
731 
732 		if (jmp_prgcnt > 0) {
733 			jmp_prgcnt -= 1;
734 			continue;
735 		}
736 repeat:
737 		ret = a->ops->act(skb, a, res);
738 		if (ret == TC_ACT_REPEAT)
739 			goto repeat;	/* we need a ttl - JHS */
740 
741 		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
742 			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
743 			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
744 				/* faulty opcode, stop pipeline */
745 				return TC_ACT_OK;
746 			} else {
747 				jmp_ttl -= 1;
748 				if (jmp_ttl > 0)
749 					goto restart_act_graph;
750 				else /* faulty graph, stop pipeline */
751 					return TC_ACT_OK;
752 			}
753 		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
754 			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
755 				net_warn_ratelimited("can't go to NULL chain!\n");
756 				return TC_ACT_SHOT;
757 			}
758 			tcf_action_goto_chain_exec(a, res);
759 		}
760 
761 		if (ret != TC_ACT_PIPE)
762 			break;
763 	}
764 
765 	return ret;
766 }
767 EXPORT_SYMBOL(tcf_action_exec);
768 
769 int tcf_action_destroy(struct tc_action *actions[], int bind)
770 {
771 	const struct tc_action_ops *ops;
772 	struct tc_action *a;
773 	int ret = 0, i;
774 
775 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
776 		a = actions[i];
777 		actions[i] = NULL;
778 		ops = a->ops;
779 		ret = __tcf_idr_release(a, bind, true);
780 		if (ret == ACT_P_DELETED)
781 			module_put(ops->owner);
782 		else if (ret < 0)
783 			return ret;
784 	}
785 	return ret;
786 }
787 
788 static int tcf_action_put(struct tc_action *p)
789 {
790 	return __tcf_action_put(p, false);
791 }
792 
793 /* Put all actions in this array, skip those NULL's. */
794 static void tcf_action_put_many(struct tc_action *actions[])
795 {
796 	int i;
797 
798 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
799 		struct tc_action *a = actions[i];
800 		const struct tc_action_ops *ops;
801 
802 		if (!a)
803 			continue;
804 		ops = a->ops;
805 		if (tcf_action_put(a))
806 			module_put(ops->owner);
807 	}
808 }
809 
810 int
811 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
812 {
813 	return a->ops->dump(skb, a, bind, ref);
814 }
815 
816 int
817 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
818 {
819 	int err = -EINVAL;
820 	unsigned char *b = skb_tail_pointer(skb);
821 	struct nlattr *nest;
822 
823 	if (tcf_action_dump_terse(skb, a, false))
824 		goto nla_put_failure;
825 
826 	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
827 	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
828 			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
829 		goto nla_put_failure;
830 
831 	if (a->used_hw_stats_valid &&
832 	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
833 			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
834 		goto nla_put_failure;
835 
836 	if (a->tcfa_flags &&
837 	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
838 			       a->tcfa_flags, a->tcfa_flags))
839 		goto nla_put_failure;
840 
841 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
842 	if (nest == NULL)
843 		goto nla_put_failure;
844 	err = tcf_action_dump_old(skb, a, bind, ref);
845 	if (err > 0) {
846 		nla_nest_end(skb, nest);
847 		return err;
848 	}
849 
850 nla_put_failure:
851 	nlmsg_trim(skb, b);
852 	return -1;
853 }
854 EXPORT_SYMBOL(tcf_action_dump_1);
855 
856 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
857 		    int bind, int ref, bool terse)
858 {
859 	struct tc_action *a;
860 	int err = -EINVAL, i;
861 	struct nlattr *nest;
862 
863 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
864 		a = actions[i];
865 		nest = nla_nest_start_noflag(skb, i + 1);
866 		if (nest == NULL)
867 			goto nla_put_failure;
868 		err = terse ? tcf_action_dump_terse(skb, a, false) :
869 			tcf_action_dump_1(skb, a, bind, ref);
870 		if (err < 0)
871 			goto errout;
872 		nla_nest_end(skb, nest);
873 	}
874 
875 	return 0;
876 
877 nla_put_failure:
878 	err = -EINVAL;
879 errout:
880 	nla_nest_cancel(skb, nest);
881 	return err;
882 }
883 
884 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
885 {
886 	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
887 	if (!c)
888 		return NULL;
889 
890 	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
891 	if (!c->data) {
892 		kfree(c);
893 		return NULL;
894 	}
895 	c->len = nla_len(tb[TCA_ACT_COOKIE]);
896 
897 	return c;
898 }
899 
900 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
901 {
902 	struct nla_bitfield32 hw_stats_bf;
903 
904 	/* If the user did not pass the attr, that means he does
905 	 * not care about the type. Return "any" in that case
906 	 * which is setting on all supported types.
907 	 */
908 	if (!hw_stats_attr)
909 		return TCA_ACT_HW_STATS_ANY;
910 	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
911 	return hw_stats_bf.value;
912 }
913 
914 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
915 	[TCA_ACT_KIND]		= { .type = NLA_STRING },
916 	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
917 	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
918 				    .len = TC_COOKIE_MAX_SIZE },
919 	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
920 	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
921 	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
922 };
923 
924 void tcf_idr_insert_many(struct tc_action *actions[])
925 {
926 	int i;
927 
928 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
929 		struct tc_action *a = actions[i];
930 		struct tcf_idrinfo *idrinfo;
931 
932 		if (!a)
933 			continue;
934 		idrinfo = a->idrinfo;
935 		mutex_lock(&idrinfo->lock);
936 		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
937 		 * it is just created, otherwise this is just a nop.
938 		 */
939 		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
940 		mutex_unlock(&idrinfo->lock);
941 	}
942 }
943 
944 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
945 					 bool rtnl_held,
946 					 struct netlink_ext_ack *extack)
947 {
948 	struct nlattr *tb[TCA_ACT_MAX + 1];
949 	struct tc_action_ops *a_o;
950 	char act_name[IFNAMSIZ];
951 	struct nlattr *kind;
952 	int err;
953 
954 	if (name == NULL) {
955 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
956 						  tcf_action_policy, extack);
957 		if (err < 0)
958 			return ERR_PTR(err);
959 		err = -EINVAL;
960 		kind = tb[TCA_ACT_KIND];
961 		if (!kind) {
962 			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
963 			return ERR_PTR(err);
964 		}
965 		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
966 			NL_SET_ERR_MSG(extack, "TC action name too long");
967 			return ERR_PTR(err);
968 		}
969 	} else {
970 		if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
971 			NL_SET_ERR_MSG(extack, "TC action name too long");
972 			return ERR_PTR(-EINVAL);
973 		}
974 	}
975 
976 	a_o = tc_lookup_action_n(act_name);
977 	if (a_o == NULL) {
978 #ifdef CONFIG_MODULES
979 		if (rtnl_held)
980 			rtnl_unlock();
981 		request_module("act_%s", act_name);
982 		if (rtnl_held)
983 			rtnl_lock();
984 
985 		a_o = tc_lookup_action_n(act_name);
986 
987 		/* We dropped the RTNL semaphore in order to
988 		 * perform the module load.  So, even if we
989 		 * succeeded in loading the module we have to
990 		 * tell the caller to replay the request.  We
991 		 * indicate this using -EAGAIN.
992 		 */
993 		if (a_o != NULL) {
994 			module_put(a_o->owner);
995 			return ERR_PTR(-EAGAIN);
996 		}
997 #endif
998 		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
999 		return ERR_PTR(-ENOENT);
1000 	}
1001 
1002 	return a_o;
1003 }
1004 
1005 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1006 				    struct nlattr *nla, struct nlattr *est,
1007 				    char *name, int ovr, int bind,
1008 				    struct tc_action_ops *a_o, int *init_res,
1009 				    bool rtnl_held,
1010 				    struct netlink_ext_ack *extack)
1011 {
1012 	struct nla_bitfield32 flags = { 0, 0 };
1013 	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1014 	struct nlattr *tb[TCA_ACT_MAX + 1];
1015 	struct tc_cookie *cookie = NULL;
1016 	struct tc_action *a;
1017 	int err;
1018 
1019 	/* backward compatibility for policer */
1020 	if (name == NULL) {
1021 		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1022 						  tcf_action_policy, extack);
1023 		if (err < 0)
1024 			return ERR_PTR(err);
1025 		if (tb[TCA_ACT_COOKIE]) {
1026 			cookie = nla_memdup_cookie(tb);
1027 			if (!cookie) {
1028 				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1029 				err = -ENOMEM;
1030 				goto err_out;
1031 			}
1032 		}
1033 		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1034 		if (tb[TCA_ACT_FLAGS])
1035 			flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1036 
1037 		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
1038 				rtnl_held, tp, flags.value, extack);
1039 	} else {
1040 		err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
1041 				tp, flags.value, extack);
1042 	}
1043 	if (err < 0)
1044 		goto err_out;
1045 	*init_res = err;
1046 
1047 	if (!name && tb[TCA_ACT_COOKIE])
1048 		tcf_set_action_cookie(&a->act_cookie, cookie);
1049 
1050 	if (!name)
1051 		a->hw_stats = hw_stats;
1052 
1053 	return a;
1054 
1055 err_out:
1056 	if (cookie) {
1057 		kfree(cookie->data);
1058 		kfree(cookie);
1059 	}
1060 	return ERR_PTR(err);
1061 }
1062 
1063 /* Returns numbers of initialized actions or negative error. */
1064 
1065 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1066 		    struct nlattr *est, char *name, int ovr, int bind,
1067 		    struct tc_action *actions[], int init_res[], size_t *attr_size,
1068 		    bool rtnl_held, struct netlink_ext_ack *extack)
1069 {
1070 	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1071 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1072 	struct tc_action *act;
1073 	size_t sz = 0;
1074 	int err;
1075 	int i;
1076 
1077 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1078 					  extack);
1079 	if (err < 0)
1080 		return err;
1081 
1082 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1083 		struct tc_action_ops *a_o;
1084 
1085 		a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack);
1086 		if (IS_ERR(a_o)) {
1087 			err = PTR_ERR(a_o);
1088 			goto err_mod;
1089 		}
1090 		ops[i - 1] = a_o;
1091 	}
1092 
1093 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1094 		act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
1095 					ops[i - 1], &init_res[i - 1], rtnl_held,
1096 					extack);
1097 		if (IS_ERR(act)) {
1098 			err = PTR_ERR(act);
1099 			goto err;
1100 		}
1101 		sz += tcf_action_fill_size(act);
1102 		/* Start from index 0 */
1103 		actions[i - 1] = act;
1104 	}
1105 
1106 	/* We have to commit them all together, because if any error happened in
1107 	 * between, we could not handle the failure gracefully.
1108 	 */
1109 	tcf_idr_insert_many(actions);
1110 
1111 	*attr_size = tcf_action_full_attrs_size(sz);
1112 	err = i - 1;
1113 	goto err_mod;
1114 
1115 err:
1116 	tcf_action_destroy(actions, bind);
1117 err_mod:
1118 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1119 		if (ops[i])
1120 			module_put(ops[i]->owner);
1121 	}
1122 	return err;
1123 }
1124 
1125 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1126 			     u64 drops, bool hw)
1127 {
1128 	if (a->cpu_bstats) {
1129 		_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1130 
1131 		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1132 
1133 		if (hw)
1134 			_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
1135 					   bytes, packets);
1136 		return;
1137 	}
1138 
1139 	_bstats_update(&a->tcfa_bstats, bytes, packets);
1140 	a->tcfa_qstats.drops += drops;
1141 	if (hw)
1142 		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1143 }
1144 EXPORT_SYMBOL(tcf_action_update_stats);
1145 
1146 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1147 			  int compat_mode)
1148 {
1149 	int err = 0;
1150 	struct gnet_dump d;
1151 
1152 	if (p == NULL)
1153 		goto errout;
1154 
1155 	/* compat_mode being true specifies a call that is supposed
1156 	 * to add additional backward compatibility statistic TLVs.
1157 	 */
1158 	if (compat_mode) {
1159 		if (p->type == TCA_OLD_COMPAT)
1160 			err = gnet_stats_start_copy_compat(skb, 0,
1161 							   TCA_STATS,
1162 							   TCA_XSTATS,
1163 							   &p->tcfa_lock, &d,
1164 							   TCA_PAD);
1165 		else
1166 			return 0;
1167 	} else
1168 		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1169 					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1170 
1171 	if (err < 0)
1172 		goto errout;
1173 
1174 	if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
1175 	    gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
1176 				     &p->tcfa_bstats_hw) < 0 ||
1177 	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1178 	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1179 				  &p->tcfa_qstats,
1180 				  p->tcfa_qstats.qlen) < 0)
1181 		goto errout;
1182 
1183 	if (gnet_stats_finish_copy(&d) < 0)
1184 		goto errout;
1185 
1186 	return 0;
1187 
1188 errout:
1189 	return -1;
1190 }
1191 
1192 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1193 			u32 portid, u32 seq, u16 flags, int event, int bind,
1194 			int ref)
1195 {
1196 	struct tcamsg *t;
1197 	struct nlmsghdr *nlh;
1198 	unsigned char *b = skb_tail_pointer(skb);
1199 	struct nlattr *nest;
1200 
1201 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1202 	if (!nlh)
1203 		goto out_nlmsg_trim;
1204 	t = nlmsg_data(nlh);
1205 	t->tca_family = AF_UNSPEC;
1206 	t->tca__pad1 = 0;
1207 	t->tca__pad2 = 0;
1208 
1209 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1210 	if (!nest)
1211 		goto out_nlmsg_trim;
1212 
1213 	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1214 		goto out_nlmsg_trim;
1215 
1216 	nla_nest_end(skb, nest);
1217 
1218 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1219 	return skb->len;
1220 
1221 out_nlmsg_trim:
1222 	nlmsg_trim(skb, b);
1223 	return -1;
1224 }
1225 
1226 static int
1227 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1228 	       struct tc_action *actions[], int event,
1229 	       struct netlink_ext_ack *extack)
1230 {
1231 	struct sk_buff *skb;
1232 
1233 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1234 	if (!skb)
1235 		return -ENOBUFS;
1236 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1237 			 0, 1) <= 0) {
1238 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1239 		kfree_skb(skb);
1240 		return -EINVAL;
1241 	}
1242 
1243 	return rtnl_unicast(skb, net, portid);
1244 }
1245 
1246 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1247 					  struct nlmsghdr *n, u32 portid,
1248 					  struct netlink_ext_ack *extack)
1249 {
1250 	struct nlattr *tb[TCA_ACT_MAX + 1];
1251 	const struct tc_action_ops *ops;
1252 	struct tc_action *a;
1253 	int index;
1254 	int err;
1255 
1256 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1257 					  tcf_action_policy, extack);
1258 	if (err < 0)
1259 		goto err_out;
1260 
1261 	err = -EINVAL;
1262 	if (tb[TCA_ACT_INDEX] == NULL ||
1263 	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1264 		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1265 		goto err_out;
1266 	}
1267 	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1268 
1269 	err = -EINVAL;
1270 	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1271 	if (!ops) { /* could happen in batch of actions */
1272 		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1273 		goto err_out;
1274 	}
1275 	err = -ENOENT;
1276 	if (ops->lookup(net, &a, index) == 0) {
1277 		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1278 		goto err_mod;
1279 	}
1280 
1281 	module_put(ops->owner);
1282 	return a;
1283 
1284 err_mod:
1285 	module_put(ops->owner);
1286 err_out:
1287 	return ERR_PTR(err);
1288 }
1289 
1290 static int tca_action_flush(struct net *net, struct nlattr *nla,
1291 			    struct nlmsghdr *n, u32 portid,
1292 			    struct netlink_ext_ack *extack)
1293 {
1294 	struct sk_buff *skb;
1295 	unsigned char *b;
1296 	struct nlmsghdr *nlh;
1297 	struct tcamsg *t;
1298 	struct netlink_callback dcb;
1299 	struct nlattr *nest;
1300 	struct nlattr *tb[TCA_ACT_MAX + 1];
1301 	const struct tc_action_ops *ops;
1302 	struct nlattr *kind;
1303 	int err = -ENOMEM;
1304 
1305 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1306 	if (!skb)
1307 		return err;
1308 
1309 	b = skb_tail_pointer(skb);
1310 
1311 	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1312 					  tcf_action_policy, extack);
1313 	if (err < 0)
1314 		goto err_out;
1315 
1316 	err = -EINVAL;
1317 	kind = tb[TCA_ACT_KIND];
1318 	ops = tc_lookup_action(kind);
1319 	if (!ops) { /*some idjot trying to flush unknown action */
1320 		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1321 		goto err_out;
1322 	}
1323 
1324 	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1325 			sizeof(*t), 0);
1326 	if (!nlh) {
1327 		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1328 		goto out_module_put;
1329 	}
1330 	t = nlmsg_data(nlh);
1331 	t->tca_family = AF_UNSPEC;
1332 	t->tca__pad1 = 0;
1333 	t->tca__pad2 = 0;
1334 
1335 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1336 	if (!nest) {
1337 		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1338 		goto out_module_put;
1339 	}
1340 
1341 	err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1342 	if (err <= 0) {
1343 		nla_nest_cancel(skb, nest);
1344 		goto out_module_put;
1345 	}
1346 
1347 	nla_nest_end(skb, nest);
1348 
1349 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1350 	nlh->nlmsg_flags |= NLM_F_ROOT;
1351 	module_put(ops->owner);
1352 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1353 			     n->nlmsg_flags & NLM_F_ECHO);
1354 	if (err > 0)
1355 		return 0;
1356 	if (err < 0)
1357 		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1358 
1359 	return err;
1360 
1361 out_module_put:
1362 	module_put(ops->owner);
1363 err_out:
1364 	kfree_skb(skb);
1365 	return err;
1366 }
1367 
1368 static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1369 {
1370 	int i;
1371 
1372 	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1373 		struct tc_action *a = actions[i];
1374 		const struct tc_action_ops *ops = a->ops;
1375 		/* Actions can be deleted concurrently so we must save their
1376 		 * type and id to search again after reference is released.
1377 		 */
1378 		struct tcf_idrinfo *idrinfo = a->idrinfo;
1379 		u32 act_index = a->tcfa_index;
1380 
1381 		actions[i] = NULL;
1382 		if (tcf_action_put(a)) {
1383 			/* last reference, action was deleted concurrently */
1384 			module_put(ops->owner);
1385 		} else  {
1386 			int ret;
1387 
1388 			/* now do the delete */
1389 			ret = tcf_idr_delete_index(idrinfo, act_index);
1390 			if (ret < 0)
1391 				return ret;
1392 		}
1393 	}
1394 	return 0;
1395 }
1396 
1397 static int
1398 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1399 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1400 {
1401 	int ret;
1402 	struct sk_buff *skb;
1403 
1404 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1405 			GFP_KERNEL);
1406 	if (!skb)
1407 		return -ENOBUFS;
1408 
1409 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1410 			 0, 2) <= 0) {
1411 		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1412 		kfree_skb(skb);
1413 		return -EINVAL;
1414 	}
1415 
1416 	/* now do the delete */
1417 	ret = tcf_action_delete(net, actions);
1418 	if (ret < 0) {
1419 		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1420 		kfree_skb(skb);
1421 		return ret;
1422 	}
1423 
1424 	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1425 			     n->nlmsg_flags & NLM_F_ECHO);
1426 	if (ret > 0)
1427 		return 0;
1428 	return ret;
1429 }
1430 
1431 static int
1432 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1433 	      u32 portid, int event, struct netlink_ext_ack *extack)
1434 {
1435 	int i, ret;
1436 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1437 	struct tc_action *act;
1438 	size_t attr_size = 0;
1439 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1440 
1441 	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1442 					  extack);
1443 	if (ret < 0)
1444 		return ret;
1445 
1446 	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1447 		if (tb[1])
1448 			return tca_action_flush(net, tb[1], n, portid, extack);
1449 
1450 		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1451 		return -EINVAL;
1452 	}
1453 
1454 	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1455 		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1456 		if (IS_ERR(act)) {
1457 			ret = PTR_ERR(act);
1458 			goto err;
1459 		}
1460 		attr_size += tcf_action_fill_size(act);
1461 		actions[i - 1] = act;
1462 	}
1463 
1464 	attr_size = tcf_action_full_attrs_size(attr_size);
1465 
1466 	if (event == RTM_GETACTION)
1467 		ret = tcf_get_notify(net, portid, n, actions, event, extack);
1468 	else { /* delete */
1469 		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1470 		if (ret)
1471 			goto err;
1472 		return 0;
1473 	}
1474 err:
1475 	tcf_action_put_many(actions);
1476 	return ret;
1477 }
1478 
1479 static int
1480 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1481 	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1482 {
1483 	struct sk_buff *skb;
1484 	int err = 0;
1485 
1486 	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1487 			GFP_KERNEL);
1488 	if (!skb)
1489 		return -ENOBUFS;
1490 
1491 	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1492 			 RTM_NEWACTION, 0, 0) <= 0) {
1493 		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1494 		kfree_skb(skb);
1495 		return -EINVAL;
1496 	}
1497 
1498 	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1499 			     n->nlmsg_flags & NLM_F_ECHO);
1500 	if (err > 0)
1501 		err = 0;
1502 	return err;
1503 }
1504 
1505 static int tcf_action_add(struct net *net, struct nlattr *nla,
1506 			  struct nlmsghdr *n, u32 portid, int ovr,
1507 			  struct netlink_ext_ack *extack)
1508 {
1509 	size_t attr_size = 0;
1510 	int loop, ret, i;
1511 	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1512 	int init_res[TCA_ACT_MAX_PRIO] = {};
1513 
1514 	for (loop = 0; loop < 10; loop++) {
1515 		ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
1516 				      actions, init_res, &attr_size, true, extack);
1517 		if (ret != -EAGAIN)
1518 			break;
1519 	}
1520 
1521 	if (ret < 0)
1522 		return ret;
1523 	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
1524 
1525 	/* only put existing actions */
1526 	for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1527 		if (init_res[i] == ACT_P_CREATED)
1528 			actions[i] = NULL;
1529 	tcf_action_put_many(actions);
1530 
1531 	return ret;
1532 }
1533 
1534 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
1535 	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
1536 						 TCA_ACT_FLAG_TERSE_DUMP),
1537 	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
1538 };
1539 
1540 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1541 			 struct netlink_ext_ack *extack)
1542 {
1543 	struct net *net = sock_net(skb->sk);
1544 	struct nlattr *tca[TCA_ROOT_MAX + 1];
1545 	u32 portid = NETLINK_CB(skb).portid;
1546 	int ret = 0, ovr = 0;
1547 
1548 	if ((n->nlmsg_type != RTM_GETACTION) &&
1549 	    !netlink_capable(skb, CAP_NET_ADMIN))
1550 		return -EPERM;
1551 
1552 	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1553 				     TCA_ROOT_MAX, NULL, extack);
1554 	if (ret < 0)
1555 		return ret;
1556 
1557 	if (tca[TCA_ACT_TAB] == NULL) {
1558 		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1559 		return -EINVAL;
1560 	}
1561 
1562 	/* n->nlmsg_flags & NLM_F_CREATE */
1563 	switch (n->nlmsg_type) {
1564 	case RTM_NEWACTION:
1565 		/* we are going to assume all other flags
1566 		 * imply create only if it doesn't exist
1567 		 * Note that CREATE | EXCL implies that
1568 		 * but since we want avoid ambiguity (eg when flags
1569 		 * is zero) then just set this
1570 		 */
1571 		if (n->nlmsg_flags & NLM_F_REPLACE)
1572 			ovr = 1;
1573 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
1574 				     extack);
1575 		break;
1576 	case RTM_DELACTION:
1577 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1578 				    portid, RTM_DELACTION, extack);
1579 		break;
1580 	case RTM_GETACTION:
1581 		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1582 				    portid, RTM_GETACTION, extack);
1583 		break;
1584 	default:
1585 		BUG();
1586 	}
1587 
1588 	return ret;
1589 }
1590 
1591 static struct nlattr *find_dump_kind(struct nlattr **nla)
1592 {
1593 	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1594 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1595 	struct nlattr *kind;
1596 
1597 	tb1 = nla[TCA_ACT_TAB];
1598 	if (tb1 == NULL)
1599 		return NULL;
1600 
1601 	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
1602 		return NULL;
1603 
1604 	if (tb[1] == NULL)
1605 		return NULL;
1606 	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
1607 		return NULL;
1608 	kind = tb2[TCA_ACT_KIND];
1609 
1610 	return kind;
1611 }
1612 
1613 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1614 {
1615 	struct net *net = sock_net(skb->sk);
1616 	struct nlmsghdr *nlh;
1617 	unsigned char *b = skb_tail_pointer(skb);
1618 	struct nlattr *nest;
1619 	struct tc_action_ops *a_o;
1620 	int ret = 0;
1621 	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1622 	struct nlattr *tb[TCA_ROOT_MAX + 1];
1623 	struct nlattr *count_attr = NULL;
1624 	unsigned long jiffy_since = 0;
1625 	struct nlattr *kind = NULL;
1626 	struct nla_bitfield32 bf;
1627 	u32 msecs_since = 0;
1628 	u32 act_count = 0;
1629 
1630 	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1631 				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
1632 	if (ret < 0)
1633 		return ret;
1634 
1635 	kind = find_dump_kind(tb);
1636 	if (kind == NULL) {
1637 		pr_info("tc_dump_action: action bad kind\n");
1638 		return 0;
1639 	}
1640 
1641 	a_o = tc_lookup_action(kind);
1642 	if (a_o == NULL)
1643 		return 0;
1644 
1645 	cb->args[2] = 0;
1646 	if (tb[TCA_ROOT_FLAGS]) {
1647 		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1648 		cb->args[2] = bf.value;
1649 	}
1650 
1651 	if (tb[TCA_ROOT_TIME_DELTA]) {
1652 		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1653 	}
1654 
1655 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1656 			cb->nlh->nlmsg_type, sizeof(*t), 0);
1657 	if (!nlh)
1658 		goto out_module_put;
1659 
1660 	if (msecs_since)
1661 		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1662 
1663 	t = nlmsg_data(nlh);
1664 	t->tca_family = AF_UNSPEC;
1665 	t->tca__pad1 = 0;
1666 	t->tca__pad2 = 0;
1667 	cb->args[3] = jiffy_since;
1668 	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1669 	if (!count_attr)
1670 		goto out_module_put;
1671 
1672 	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1673 	if (nest == NULL)
1674 		goto out_module_put;
1675 
1676 	ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1677 	if (ret < 0)
1678 		goto out_module_put;
1679 
1680 	if (ret > 0) {
1681 		nla_nest_end(skb, nest);
1682 		ret = skb->len;
1683 		act_count = cb->args[1];
1684 		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1685 		cb->args[1] = 0;
1686 	} else
1687 		nlmsg_trim(skb, b);
1688 
1689 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1690 	if (NETLINK_CB(cb->skb).portid && ret)
1691 		nlh->nlmsg_flags |= NLM_F_MULTI;
1692 	module_put(a_o->owner);
1693 	return skb->len;
1694 
1695 out_module_put:
1696 	module_put(a_o->owner);
1697 	nlmsg_trim(skb, b);
1698 	return skb->len;
1699 }
1700 
1701 static int __init tc_action_init(void)
1702 {
1703 	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1704 	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1705 	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1706 		      0);
1707 
1708 	return 0;
1709 }
1710 
1711 subsys_initcall(tc_action_init);
1712