xref: /openbmc/linux/net/sched/act_api.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * net/sched/act_api.c	Packet action API.
4   *
5   * Author:	Jamal Hadi Salim
6   */
7  
8  #include <linux/types.h>
9  #include <linux/kernel.h>
10  #include <linux/string.h>
11  #include <linux/errno.h>
12  #include <linux/slab.h>
13  #include <linux/skbuff.h>
14  #include <linux/init.h>
15  #include <linux/kmod.h>
16  #include <linux/err.h>
17  #include <linux/module.h>
18  #include <net/net_namespace.h>
19  #include <net/sock.h>
20  #include <net/sch_generic.h>
21  #include <net/pkt_cls.h>
22  #include <net/tc_act/tc_pedit.h>
23  #include <net/act_api.h>
24  #include <net/netlink.h>
25  #include <net/flow_offload.h>
26  #include <net/tc_wrapper.h>
27  
28  #ifdef CONFIG_INET
29  DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
30  EXPORT_SYMBOL_GPL(tcf_frag_xmit_count);
31  #endif
32  
tcf_dev_queue_xmit(struct sk_buff * skb,int (* xmit)(struct sk_buff * skb))33  int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
34  {
35  #ifdef CONFIG_INET
36  	if (static_branch_unlikely(&tcf_frag_xmit_count))
37  		return sch_frag_xmit_hook(skb, xmit);
38  #endif
39  
40  	return xmit(skb);
41  }
42  EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit);
43  
tcf_action_goto_chain_exec(const struct tc_action * a,struct tcf_result * res)44  static void tcf_action_goto_chain_exec(const struct tc_action *a,
45  				       struct tcf_result *res)
46  {
47  	const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
48  
49  	res->goto_tp = rcu_dereference_bh(chain->filter_chain);
50  }
51  
tcf_free_cookie_rcu(struct rcu_head * p)52  static void tcf_free_cookie_rcu(struct rcu_head *p)
53  {
54  	struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
55  
56  	kfree(cookie->data);
57  	kfree(cookie);
58  }
59  
tcf_set_action_cookie(struct tc_cookie __rcu ** old_cookie,struct tc_cookie * new_cookie)60  static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
61  				  struct tc_cookie *new_cookie)
62  {
63  	struct tc_cookie *old;
64  
65  	old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie)));
66  	if (old)
67  		call_rcu(&old->rcu, tcf_free_cookie_rcu);
68  }
69  
tcf_action_check_ctrlact(int action,struct tcf_proto * tp,struct tcf_chain ** newchain,struct netlink_ext_ack * extack)70  int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
71  			     struct tcf_chain **newchain,
72  			     struct netlink_ext_ack *extack)
73  {
74  	int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
75  	u32 chain_index;
76  
77  	if (!opcode)
78  		ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
79  	else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
80  		ret = 0;
81  	if (ret) {
82  		NL_SET_ERR_MSG(extack, "invalid control action");
83  		goto end;
84  	}
85  
86  	if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
87  		chain_index = action & TC_ACT_EXT_VAL_MASK;
88  		if (!tp || !newchain) {
89  			ret = -EINVAL;
90  			NL_SET_ERR_MSG(extack,
91  				       "can't goto NULL proto/chain");
92  			goto end;
93  		}
94  		*newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
95  		if (!*newchain) {
96  			ret = -ENOMEM;
97  			NL_SET_ERR_MSG(extack,
98  				       "can't allocate goto_chain");
99  		}
100  	}
101  end:
102  	return ret;
103  }
104  EXPORT_SYMBOL(tcf_action_check_ctrlact);
105  
tcf_action_set_ctrlact(struct tc_action * a,int action,struct tcf_chain * goto_chain)106  struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
107  					 struct tcf_chain *goto_chain)
108  {
109  	a->tcfa_action = action;
110  	goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
111  	return goto_chain;
112  }
113  EXPORT_SYMBOL(tcf_action_set_ctrlact);
114  
115  /* XXX: For standalone actions, we don't need a RCU grace period either, because
116   * actions are always connected to filters and filters are already destroyed in
117   * RCU callbacks, so after a RCU grace period actions are already disconnected
118   * from filters. Readers later can not find us.
119   */
free_tcf(struct tc_action * p)120  static void free_tcf(struct tc_action *p)
121  {
122  	struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
123  
124  	free_percpu(p->cpu_bstats);
125  	free_percpu(p->cpu_bstats_hw);
126  	free_percpu(p->cpu_qstats);
127  
128  	tcf_set_action_cookie(&p->user_cookie, NULL);
129  	if (chain)
130  		tcf_chain_put_by_act(chain);
131  
132  	kfree(p);
133  }
134  
offload_action_hw_count_set(struct tc_action * act,u32 hw_count)135  static void offload_action_hw_count_set(struct tc_action *act,
136  					u32 hw_count)
137  {
138  	act->in_hw_count = hw_count;
139  }
140  
offload_action_hw_count_inc(struct tc_action * act,u32 hw_count)141  static void offload_action_hw_count_inc(struct tc_action *act,
142  					u32 hw_count)
143  {
144  	act->in_hw_count += hw_count;
145  }
146  
offload_action_hw_count_dec(struct tc_action * act,u32 hw_count)147  static void offload_action_hw_count_dec(struct tc_action *act,
148  					u32 hw_count)
149  {
150  	act->in_hw_count = act->in_hw_count > hw_count ?
151  			   act->in_hw_count - hw_count : 0;
152  }
153  
tcf_offload_act_num_actions_single(struct tc_action * act)154  static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
155  {
156  	if (is_tcf_pedit(act))
157  		return tcf_pedit_nkeys(act);
158  	else
159  		return 1;
160  }
161  
tc_act_skip_hw(u32 flags)162  static bool tc_act_skip_hw(u32 flags)
163  {
164  	return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
165  }
166  
tc_act_skip_sw(u32 flags)167  static bool tc_act_skip_sw(u32 flags)
168  {
169  	return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
170  }
171  
172  /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
tc_act_flags_valid(u32 flags)173  static bool tc_act_flags_valid(u32 flags)
174  {
175  	flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
176  
177  	return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
178  }
179  
offload_action_init(struct flow_offload_action * fl_action,struct tc_action * act,enum offload_act_command cmd,struct netlink_ext_ack * extack)180  static int offload_action_init(struct flow_offload_action *fl_action,
181  			       struct tc_action *act,
182  			       enum offload_act_command  cmd,
183  			       struct netlink_ext_ack *extack)
184  {
185  	int err;
186  
187  	fl_action->extack = extack;
188  	fl_action->command = cmd;
189  	fl_action->index = act->tcfa_index;
190  	fl_action->cookie = (unsigned long)act;
191  
192  	if (act->ops->offload_act_setup) {
193  		spin_lock_bh(&act->tcfa_lock);
194  		err = act->ops->offload_act_setup(act, fl_action, NULL,
195  						  false, extack);
196  		spin_unlock_bh(&act->tcfa_lock);
197  		return err;
198  	}
199  
200  	return -EOPNOTSUPP;
201  }
202  
tcf_action_offload_cmd_ex(struct flow_offload_action * fl_act,u32 * hw_count)203  static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
204  				     u32 *hw_count)
205  {
206  	int err;
207  
208  	err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
209  					  fl_act, NULL, NULL);
210  	if (err < 0)
211  		return err;
212  
213  	if (hw_count)
214  		*hw_count = err;
215  
216  	return 0;
217  }
218  
tcf_action_offload_cmd_cb_ex(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)219  static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
220  					u32 *hw_count,
221  					flow_indr_block_bind_cb_t *cb,
222  					void *cb_priv)
223  {
224  	int err;
225  
226  	err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
227  	if (err < 0)
228  		return err;
229  
230  	if (hw_count)
231  		*hw_count = 1;
232  
233  	return 0;
234  }
235  
tcf_action_offload_cmd(struct flow_offload_action * fl_act,u32 * hw_count,flow_indr_block_bind_cb_t * cb,void * cb_priv)236  static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
237  				  u32 *hw_count,
238  				  flow_indr_block_bind_cb_t *cb,
239  				  void *cb_priv)
240  {
241  	return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
242  						 cb, cb_priv) :
243  		    tcf_action_offload_cmd_ex(fl_act, hw_count);
244  }
245  
tcf_action_offload_add_ex(struct tc_action * action,struct netlink_ext_ack * extack,flow_indr_block_bind_cb_t * cb,void * cb_priv)246  static int tcf_action_offload_add_ex(struct tc_action *action,
247  				     struct netlink_ext_ack *extack,
248  				     flow_indr_block_bind_cb_t *cb,
249  				     void *cb_priv)
250  {
251  	bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
252  	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
253  		[0] = action,
254  	};
255  	struct flow_offload_action *fl_action;
256  	u32 in_hw_count = 0;
257  	int num, err = 0;
258  
259  	if (tc_act_skip_hw(action->tcfa_flags))
260  		return 0;
261  
262  	num = tcf_offload_act_num_actions_single(action);
263  	fl_action = offload_action_alloc(num);
264  	if (!fl_action)
265  		return -ENOMEM;
266  
267  	err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
268  	if (err)
269  		goto fl_err;
270  
271  	err = tc_setup_action(&fl_action->action, actions, 0, extack);
272  	if (err) {
273  		NL_SET_ERR_MSG_MOD(extack,
274  				   "Failed to setup tc actions for offload");
275  		goto fl_err;
276  	}
277  
278  	err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
279  	if (!err)
280  		cb ? offload_action_hw_count_inc(action, in_hw_count) :
281  		     offload_action_hw_count_set(action, in_hw_count);
282  
283  	if (skip_sw && !tc_act_in_hw(action))
284  		err = -EINVAL;
285  
286  	tc_cleanup_offload_action(&fl_action->action);
287  
288  fl_err:
289  	kfree(fl_action);
290  
291  	return err;
292  }
293  
294  /* offload the tc action after it is inserted */
tcf_action_offload_add(struct tc_action * action,struct netlink_ext_ack * extack)295  static int tcf_action_offload_add(struct tc_action *action,
296  				  struct netlink_ext_ack *extack)
297  {
298  	return tcf_action_offload_add_ex(action, extack, NULL, NULL);
299  }
300  
tcf_action_update_hw_stats(struct tc_action * action)301  int tcf_action_update_hw_stats(struct tc_action *action)
302  {
303  	struct flow_offload_action fl_act = {};
304  	int err;
305  
306  	err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
307  	if (err)
308  		return err;
309  
310  	err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
311  	if (!err) {
312  		preempt_disable();
313  		tcf_action_stats_update(action, fl_act.stats.bytes,
314  					fl_act.stats.pkts,
315  					fl_act.stats.drops,
316  					fl_act.stats.lastused,
317  					true);
318  		preempt_enable();
319  		action->used_hw_stats = fl_act.stats.used_hw_stats;
320  		action->used_hw_stats_valid = true;
321  	} else {
322  		return -EOPNOTSUPP;
323  	}
324  
325  	return 0;
326  }
327  EXPORT_SYMBOL(tcf_action_update_hw_stats);
328  
tcf_action_offload_del_ex(struct tc_action * action,flow_indr_block_bind_cb_t * cb,void * cb_priv)329  static int tcf_action_offload_del_ex(struct tc_action *action,
330  				     flow_indr_block_bind_cb_t *cb,
331  				     void *cb_priv)
332  {
333  	struct flow_offload_action fl_act = {};
334  	u32 in_hw_count = 0;
335  	int err = 0;
336  
337  	if (!tc_act_in_hw(action))
338  		return 0;
339  
340  	err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
341  	if (err)
342  		return err;
343  
344  	err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
345  	if (err < 0)
346  		return err;
347  
348  	if (!cb && action->in_hw_count != in_hw_count)
349  		return -EINVAL;
350  
351  	/* do not need to update hw state when deleting action */
352  	if (cb && in_hw_count)
353  		offload_action_hw_count_dec(action, in_hw_count);
354  
355  	return 0;
356  }
357  
tcf_action_offload_del(struct tc_action * action)358  static int tcf_action_offload_del(struct tc_action *action)
359  {
360  	return tcf_action_offload_del_ex(action, NULL, NULL);
361  }
362  
tcf_action_cleanup(struct tc_action * p)363  static void tcf_action_cleanup(struct tc_action *p)
364  {
365  	tcf_action_offload_del(p);
366  	if (p->ops->cleanup)
367  		p->ops->cleanup(p);
368  
369  	gen_kill_estimator(&p->tcfa_rate_est);
370  	free_tcf(p);
371  }
372  
__tcf_action_put(struct tc_action * p,bool bind)373  static int __tcf_action_put(struct tc_action *p, bool bind)
374  {
375  	struct tcf_idrinfo *idrinfo = p->idrinfo;
376  
377  	if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
378  		if (bind)
379  			atomic_dec(&p->tcfa_bindcnt);
380  		idr_remove(&idrinfo->action_idr, p->tcfa_index);
381  		mutex_unlock(&idrinfo->lock);
382  
383  		tcf_action_cleanup(p);
384  		return 1;
385  	}
386  
387  	if (bind)
388  		atomic_dec(&p->tcfa_bindcnt);
389  
390  	return 0;
391  }
392  
__tcf_idr_release(struct tc_action * p,bool bind,bool strict)393  static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
394  {
395  	int ret = 0;
396  
397  	/* Release with strict==1 and bind==0 is only called through act API
398  	 * interface (classifiers always bind). Only case when action with
399  	 * positive reference count and zero bind count can exist is when it was
400  	 * also created with act API (unbinding last classifier will destroy the
401  	 * action if it was created by classifier). So only case when bind count
402  	 * can be changed after initial check is when unbound action is
403  	 * destroyed by act API while classifier binds to action with same id
404  	 * concurrently. This result either creation of new action(same behavior
405  	 * as before), or reusing existing action if concurrent process
406  	 * increments reference count before action is deleted. Both scenarios
407  	 * are acceptable.
408  	 */
409  	if (p) {
410  		if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
411  			return -EPERM;
412  
413  		if (__tcf_action_put(p, bind))
414  			ret = ACT_P_DELETED;
415  	}
416  
417  	return ret;
418  }
419  
tcf_idr_release(struct tc_action * a,bool bind)420  int tcf_idr_release(struct tc_action *a, bool bind)
421  {
422  	const struct tc_action_ops *ops = a->ops;
423  	int ret;
424  
425  	ret = __tcf_idr_release(a, bind, false);
426  	if (ret == ACT_P_DELETED)
427  		module_put(ops->owner);
428  	return ret;
429  }
430  EXPORT_SYMBOL(tcf_idr_release);
431  
tcf_action_shared_attrs_size(const struct tc_action * act)432  static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
433  {
434  	struct tc_cookie *user_cookie;
435  	u32 cookie_len = 0;
436  
437  	rcu_read_lock();
438  	user_cookie = rcu_dereference(act->user_cookie);
439  
440  	if (user_cookie)
441  		cookie_len = nla_total_size(user_cookie->len);
442  	rcu_read_unlock();
443  
444  	return  nla_total_size(0) /* action number nested */
445  		+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
446  		+ cookie_len /* TCA_ACT_COOKIE */
447  		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
448  		+ nla_total_size(0) /* TCA_ACT_STATS nested */
449  		+ nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
450  		/* TCA_STATS_BASIC */
451  		+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
452  		/* TCA_STATS_PKT64 */
453  		+ nla_total_size_64bit(sizeof(u64))
454  		/* TCA_STATS_QUEUE */
455  		+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
456  		+ nla_total_size(0) /* TCA_ACT_OPTIONS nested */
457  		+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
458  }
459  
tcf_action_full_attrs_size(size_t sz)460  static size_t tcf_action_full_attrs_size(size_t sz)
461  {
462  	return NLMSG_HDRLEN                     /* struct nlmsghdr */
463  		+ sizeof(struct tcamsg)
464  		+ nla_total_size(0)             /* TCA_ACT_TAB nested */
465  		+ sz;
466  }
467  
tcf_action_fill_size(const struct tc_action * act)468  static size_t tcf_action_fill_size(const struct tc_action *act)
469  {
470  	size_t sz = tcf_action_shared_attrs_size(act);
471  
472  	if (act->ops->get_fill_size)
473  		return act->ops->get_fill_size(act) + sz;
474  	return sz;
475  }
476  
477  static int
tcf_action_dump_terse(struct sk_buff * skb,struct tc_action * a,bool from_act)478  tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act)
479  {
480  	unsigned char *b = skb_tail_pointer(skb);
481  	struct tc_cookie *cookie;
482  
483  	if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind))
484  		goto nla_put_failure;
485  	if (tcf_action_copy_stats(skb, a, 0))
486  		goto nla_put_failure;
487  	if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index))
488  		goto nla_put_failure;
489  
490  	rcu_read_lock();
491  	cookie = rcu_dereference(a->user_cookie);
492  	if (cookie) {
493  		if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
494  			rcu_read_unlock();
495  			goto nla_put_failure;
496  		}
497  	}
498  	rcu_read_unlock();
499  
500  	return 0;
501  
502  nla_put_failure:
503  	nlmsg_trim(skb, b);
504  	return -1;
505  }
506  
tcf_dump_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,struct netlink_callback * cb)507  static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
508  			   struct netlink_callback *cb)
509  {
510  	int err = 0, index = -1, s_i = 0, n_i = 0;
511  	u32 act_flags = cb->args[2];
512  	unsigned long jiffy_since = cb->args[3];
513  	struct nlattr *nest;
514  	struct idr *idr = &idrinfo->action_idr;
515  	struct tc_action *p;
516  	unsigned long id = 1;
517  	unsigned long tmp;
518  
519  	mutex_lock(&idrinfo->lock);
520  
521  	s_i = cb->args[0];
522  
523  	idr_for_each_entry_ul(idr, p, tmp, id) {
524  		index++;
525  		if (index < s_i)
526  			continue;
527  		if (IS_ERR(p))
528  			continue;
529  
530  		if (jiffy_since &&
531  		    time_after(jiffy_since,
532  			       (unsigned long)p->tcfa_tm.lastuse))
533  			continue;
534  
535  		tcf_action_update_hw_stats(p);
536  
537  		nest = nla_nest_start_noflag(skb, n_i);
538  		if (!nest) {
539  			index--;
540  			goto nla_put_failure;
541  		}
542  		err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ?
543  			tcf_action_dump_terse(skb, p, true) :
544  			tcf_action_dump_1(skb, p, 0, 0);
545  		if (err < 0) {
546  			index--;
547  			nlmsg_trim(skb, nest);
548  			goto done;
549  		}
550  		nla_nest_end(skb, nest);
551  		n_i++;
552  		if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) &&
553  		    n_i >= TCA_ACT_MAX_PRIO)
554  			goto done;
555  	}
556  done:
557  	if (index >= 0)
558  		cb->args[0] = index + 1;
559  
560  	mutex_unlock(&idrinfo->lock);
561  	if (n_i) {
562  		if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON)
563  			cb->args[1] = n_i;
564  	}
565  	return n_i;
566  
567  nla_put_failure:
568  	nla_nest_cancel(skb, nest);
569  	goto done;
570  }
571  
tcf_idr_release_unsafe(struct tc_action * p)572  static int tcf_idr_release_unsafe(struct tc_action *p)
573  {
574  	if (atomic_read(&p->tcfa_bindcnt) > 0)
575  		return -EPERM;
576  
577  	if (refcount_dec_and_test(&p->tcfa_refcnt)) {
578  		idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
579  		tcf_action_cleanup(p);
580  		return ACT_P_DELETED;
581  	}
582  
583  	return 0;
584  }
585  
tcf_del_walker(struct tcf_idrinfo * idrinfo,struct sk_buff * skb,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)586  static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
587  			  const struct tc_action_ops *ops,
588  			  struct netlink_ext_ack *extack)
589  {
590  	struct nlattr *nest;
591  	int n_i = 0;
592  	int ret = -EINVAL;
593  	struct idr *idr = &idrinfo->action_idr;
594  	struct tc_action *p;
595  	unsigned long id = 1;
596  	unsigned long tmp;
597  
598  	nest = nla_nest_start_noflag(skb, 0);
599  	if (nest == NULL)
600  		goto nla_put_failure;
601  	if (nla_put_string(skb, TCA_ACT_KIND, ops->kind))
602  		goto nla_put_failure;
603  
604  	ret = 0;
605  	mutex_lock(&idrinfo->lock);
606  	idr_for_each_entry_ul(idr, p, tmp, id) {
607  		if (IS_ERR(p))
608  			continue;
609  		ret = tcf_idr_release_unsafe(p);
610  		if (ret == ACT_P_DELETED)
611  			module_put(ops->owner);
612  		else if (ret < 0)
613  			break;
614  		n_i++;
615  	}
616  	mutex_unlock(&idrinfo->lock);
617  	if (ret < 0) {
618  		if (n_i)
619  			NL_SET_ERR_MSG(extack, "Unable to flush all TC actions");
620  		else
621  			goto nla_put_failure;
622  	}
623  
624  	ret = nla_put_u32(skb, TCA_FCNT, n_i);
625  	if (ret)
626  		goto nla_put_failure;
627  	nla_nest_end(skb, nest);
628  
629  	return n_i;
630  nla_put_failure:
631  	nla_nest_cancel(skb, nest);
632  	return ret;
633  }
634  
tcf_generic_walker(struct tc_action_net * tn,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)635  int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
636  		       struct netlink_callback *cb, int type,
637  		       const struct tc_action_ops *ops,
638  		       struct netlink_ext_ack *extack)
639  {
640  	struct tcf_idrinfo *idrinfo = tn->idrinfo;
641  
642  	if (type == RTM_DELACTION) {
643  		return tcf_del_walker(idrinfo, skb, ops, extack);
644  	} else if (type == RTM_GETACTION) {
645  		return tcf_dump_walker(idrinfo, skb, cb);
646  	} else {
647  		WARN(1, "tcf_generic_walker: unknown command %d\n", type);
648  		NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
649  		return -EINVAL;
650  	}
651  }
652  EXPORT_SYMBOL(tcf_generic_walker);
653  
tcf_idr_search(struct tc_action_net * tn,struct tc_action ** a,u32 index)654  int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
655  {
656  	struct tcf_idrinfo *idrinfo = tn->idrinfo;
657  	struct tc_action *p;
658  
659  	mutex_lock(&idrinfo->lock);
660  	p = idr_find(&idrinfo->action_idr, index);
661  	if (IS_ERR(p))
662  		p = NULL;
663  	else if (p)
664  		refcount_inc(&p->tcfa_refcnt);
665  	mutex_unlock(&idrinfo->lock);
666  
667  	if (p) {
668  		*a = p;
669  		return true;
670  	}
671  	return false;
672  }
673  EXPORT_SYMBOL(tcf_idr_search);
674  
__tcf_generic_walker(struct net * net,struct sk_buff * skb,struct netlink_callback * cb,int type,const struct tc_action_ops * ops,struct netlink_ext_ack * extack)675  static int __tcf_generic_walker(struct net *net, struct sk_buff *skb,
676  				struct netlink_callback *cb, int type,
677  				const struct tc_action_ops *ops,
678  				struct netlink_ext_ack *extack)
679  {
680  	struct tc_action_net *tn = net_generic(net, ops->net_id);
681  
682  	if (unlikely(ops->walk))
683  		return ops->walk(net, skb, cb, type, ops, extack);
684  
685  	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
686  }
687  
__tcf_idr_search(struct net * net,const struct tc_action_ops * ops,struct tc_action ** a,u32 index)688  static int __tcf_idr_search(struct net *net,
689  			    const struct tc_action_ops *ops,
690  			    struct tc_action **a, u32 index)
691  {
692  	struct tc_action_net *tn = net_generic(net, ops->net_id);
693  
694  	if (unlikely(ops->lookup))
695  		return ops->lookup(net, a, index);
696  
697  	return tcf_idr_search(tn, a, index);
698  }
699  
tcf_idr_delete_index(struct tcf_idrinfo * idrinfo,u32 index)700  static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
701  {
702  	struct tc_action *p;
703  	int ret = 0;
704  
705  	mutex_lock(&idrinfo->lock);
706  	p = idr_find(&idrinfo->action_idr, index);
707  	if (!p) {
708  		mutex_unlock(&idrinfo->lock);
709  		return -ENOENT;
710  	}
711  
712  	if (!atomic_read(&p->tcfa_bindcnt)) {
713  		if (refcount_dec_and_test(&p->tcfa_refcnt)) {
714  			struct module *owner = p->ops->owner;
715  
716  			WARN_ON(p != idr_remove(&idrinfo->action_idr,
717  						p->tcfa_index));
718  			mutex_unlock(&idrinfo->lock);
719  
720  			tcf_action_cleanup(p);
721  			module_put(owner);
722  			return 0;
723  		}
724  		ret = 0;
725  	} else {
726  		ret = -EPERM;
727  	}
728  
729  	mutex_unlock(&idrinfo->lock);
730  	return ret;
731  }
732  
tcf_idr_create(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,bool cpustats,u32 flags)733  int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
734  		   struct tc_action **a, const struct tc_action_ops *ops,
735  		   int bind, bool cpustats, u32 flags)
736  {
737  	struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
738  	struct tcf_idrinfo *idrinfo = tn->idrinfo;
739  	int err = -ENOMEM;
740  
741  	if (unlikely(!p))
742  		return -ENOMEM;
743  	refcount_set(&p->tcfa_refcnt, 1);
744  	if (bind)
745  		atomic_set(&p->tcfa_bindcnt, 1);
746  
747  	if (cpustats) {
748  		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
749  		if (!p->cpu_bstats)
750  			goto err1;
751  		p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
752  		if (!p->cpu_bstats_hw)
753  			goto err2;
754  		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
755  		if (!p->cpu_qstats)
756  			goto err3;
757  	}
758  	gnet_stats_basic_sync_init(&p->tcfa_bstats);
759  	gnet_stats_basic_sync_init(&p->tcfa_bstats_hw);
760  	spin_lock_init(&p->tcfa_lock);
761  	p->tcfa_index = index;
762  	p->tcfa_tm.install = jiffies;
763  	p->tcfa_tm.lastuse = jiffies;
764  	p->tcfa_tm.firstuse = 0;
765  	p->tcfa_flags = flags;
766  	if (est) {
767  		err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
768  					&p->tcfa_rate_est,
769  					&p->tcfa_lock, false, est);
770  		if (err)
771  			goto err4;
772  	}
773  
774  	p->idrinfo = idrinfo;
775  	__module_get(ops->owner);
776  	p->ops = ops;
777  	*a = p;
778  	return 0;
779  err4:
780  	free_percpu(p->cpu_qstats);
781  err3:
782  	free_percpu(p->cpu_bstats_hw);
783  err2:
784  	free_percpu(p->cpu_bstats);
785  err1:
786  	kfree(p);
787  	return err;
788  }
789  EXPORT_SYMBOL(tcf_idr_create);
790  
tcf_idr_create_from_flags(struct tc_action_net * tn,u32 index,struct nlattr * est,struct tc_action ** a,const struct tc_action_ops * ops,int bind,u32 flags)791  int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
792  			      struct nlattr *est, struct tc_action **a,
793  			      const struct tc_action_ops *ops, int bind,
794  			      u32 flags)
795  {
796  	/* Set cpustats according to actions flags. */
797  	return tcf_idr_create(tn, index, est, a, ops, bind,
798  			      !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
799  }
800  EXPORT_SYMBOL(tcf_idr_create_from_flags);
801  
802  /* Cleanup idr index that was allocated but not initialized. */
803  
tcf_idr_cleanup(struct tc_action_net * tn,u32 index)804  void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
805  {
806  	struct tcf_idrinfo *idrinfo = tn->idrinfo;
807  
808  	mutex_lock(&idrinfo->lock);
809  	/* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
810  	WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
811  	mutex_unlock(&idrinfo->lock);
812  }
813  EXPORT_SYMBOL(tcf_idr_cleanup);
814  
815  /* Check if action with specified index exists. If actions is found, increments
816   * its reference and bind counters, and return 1. Otherwise insert temporary
817   * error pointer (to prevent concurrent users from inserting actions with same
818   * index) and return 0.
819   *
820   * May return -EAGAIN for binding actions in case of a parallel add/delete on
821   * the requested index.
822   */
823  
tcf_idr_check_alloc(struct tc_action_net * tn,u32 * index,struct tc_action ** a,int bind)824  int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
825  			struct tc_action **a, int bind)
826  {
827  	struct tcf_idrinfo *idrinfo = tn->idrinfo;
828  	struct tc_action *p;
829  	int ret;
830  	u32 max;
831  
832  	if (*index) {
833  		rcu_read_lock();
834  		p = idr_find(&idrinfo->action_idr, *index);
835  
836  		if (IS_ERR(p)) {
837  			/* This means that another process allocated
838  			 * index but did not assign the pointer yet.
839  			 */
840  			rcu_read_unlock();
841  			return -EAGAIN;
842  		}
843  
844  		if (!p) {
845  			/* Empty slot, try to allocate it */
846  			max = *index;
847  			rcu_read_unlock();
848  			goto new;
849  		}
850  
851  		if (!refcount_inc_not_zero(&p->tcfa_refcnt)) {
852  			/* Action was deleted in parallel */
853  			rcu_read_unlock();
854  			return -EAGAIN;
855  		}
856  
857  		if (bind)
858  			atomic_inc(&p->tcfa_bindcnt);
859  		*a = p;
860  
861  		rcu_read_unlock();
862  
863  		return 1;
864  	} else {
865  		/* Find a slot */
866  		*index = 1;
867  		max = UINT_MAX;
868  	}
869  
870  new:
871  	*a = NULL;
872  
873  	mutex_lock(&idrinfo->lock);
874  	ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max,
875  			    GFP_KERNEL);
876  	mutex_unlock(&idrinfo->lock);
877  
878  	/* N binds raced for action allocation,
879  	 * retry for all the ones that failed.
880  	 */
881  	if (ret == -ENOSPC && *index == max)
882  		ret = -EAGAIN;
883  
884  	return ret;
885  }
886  EXPORT_SYMBOL(tcf_idr_check_alloc);
887  
tcf_idrinfo_destroy(const struct tc_action_ops * ops,struct tcf_idrinfo * idrinfo)888  void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
889  			 struct tcf_idrinfo *idrinfo)
890  {
891  	struct idr *idr = &idrinfo->action_idr;
892  	struct tc_action *p;
893  	int ret;
894  	unsigned long id = 1;
895  	unsigned long tmp;
896  
897  	idr_for_each_entry_ul(idr, p, tmp, id) {
898  		ret = __tcf_idr_release(p, false, true);
899  		if (ret == ACT_P_DELETED)
900  			module_put(ops->owner);
901  		else if (ret < 0)
902  			return;
903  	}
904  	idr_destroy(&idrinfo->action_idr);
905  }
906  EXPORT_SYMBOL(tcf_idrinfo_destroy);
907  
908  static LIST_HEAD(act_base);
909  static DEFINE_RWLOCK(act_mod_lock);
910  /* since act ops id is stored in pernet subsystem list,
911   * then there is no way to walk through only all the action
912   * subsystem, so we keep tc action pernet ops id for
913   * reoffload to walk through.
914   */
915  static LIST_HEAD(act_pernet_id_list);
916  static DEFINE_MUTEX(act_id_mutex);
917  struct tc_act_pernet_id {
918  	struct list_head list;
919  	unsigned int id;
920  };
921  
tcf_pernet_add_id_list(unsigned int id)922  static int tcf_pernet_add_id_list(unsigned int id)
923  {
924  	struct tc_act_pernet_id *id_ptr;
925  	int ret = 0;
926  
927  	mutex_lock(&act_id_mutex);
928  	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
929  		if (id_ptr->id == id) {
930  			ret = -EEXIST;
931  			goto err_out;
932  		}
933  	}
934  
935  	id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
936  	if (!id_ptr) {
937  		ret = -ENOMEM;
938  		goto err_out;
939  	}
940  	id_ptr->id = id;
941  
942  	list_add_tail(&id_ptr->list, &act_pernet_id_list);
943  
944  err_out:
945  	mutex_unlock(&act_id_mutex);
946  	return ret;
947  }
948  
tcf_pernet_del_id_list(unsigned int id)949  static void tcf_pernet_del_id_list(unsigned int id)
950  {
951  	struct tc_act_pernet_id *id_ptr;
952  
953  	mutex_lock(&act_id_mutex);
954  	list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
955  		if (id_ptr->id == id) {
956  			list_del(&id_ptr->list);
957  			kfree(id_ptr);
958  			break;
959  		}
960  	}
961  	mutex_unlock(&act_id_mutex);
962  }
963  
tcf_register_action(struct tc_action_ops * act,struct pernet_operations * ops)964  int tcf_register_action(struct tc_action_ops *act,
965  			struct pernet_operations *ops)
966  {
967  	struct tc_action_ops *a;
968  	int ret;
969  
970  	if (!act->act || !act->dump || !act->init)
971  		return -EINVAL;
972  
973  	/* We have to register pernet ops before making the action ops visible,
974  	 * otherwise tcf_action_init_1() could get a partially initialized
975  	 * netns.
976  	 */
977  	ret = register_pernet_subsys(ops);
978  	if (ret)
979  		return ret;
980  
981  	if (ops->id) {
982  		ret = tcf_pernet_add_id_list(*ops->id);
983  		if (ret)
984  			goto err_id;
985  	}
986  
987  	write_lock(&act_mod_lock);
988  	list_for_each_entry(a, &act_base, head) {
989  		if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
990  			ret = -EEXIST;
991  			goto err_out;
992  		}
993  	}
994  	list_add_tail(&act->head, &act_base);
995  	write_unlock(&act_mod_lock);
996  
997  	return 0;
998  
999  err_out:
1000  	write_unlock(&act_mod_lock);
1001  	if (ops->id)
1002  		tcf_pernet_del_id_list(*ops->id);
1003  err_id:
1004  	unregister_pernet_subsys(ops);
1005  	return ret;
1006  }
1007  EXPORT_SYMBOL(tcf_register_action);
1008  
tcf_unregister_action(struct tc_action_ops * act,struct pernet_operations * ops)1009  int tcf_unregister_action(struct tc_action_ops *act,
1010  			  struct pernet_operations *ops)
1011  {
1012  	struct tc_action_ops *a;
1013  	int err = -ENOENT;
1014  
1015  	write_lock(&act_mod_lock);
1016  	list_for_each_entry(a, &act_base, head) {
1017  		if (a == act) {
1018  			list_del(&act->head);
1019  			err = 0;
1020  			break;
1021  		}
1022  	}
1023  	write_unlock(&act_mod_lock);
1024  	if (!err) {
1025  		unregister_pernet_subsys(ops);
1026  		if (ops->id)
1027  			tcf_pernet_del_id_list(*ops->id);
1028  	}
1029  	return err;
1030  }
1031  EXPORT_SYMBOL(tcf_unregister_action);
1032  
1033  /* lookup by name */
tc_lookup_action_n(char * kind)1034  static struct tc_action_ops *tc_lookup_action_n(char *kind)
1035  {
1036  	struct tc_action_ops *a, *res = NULL;
1037  
1038  	if (kind) {
1039  		read_lock(&act_mod_lock);
1040  		list_for_each_entry(a, &act_base, head) {
1041  			if (strcmp(kind, a->kind) == 0) {
1042  				if (try_module_get(a->owner))
1043  					res = a;
1044  				break;
1045  			}
1046  		}
1047  		read_unlock(&act_mod_lock);
1048  	}
1049  	return res;
1050  }
1051  
1052  /* lookup by nlattr */
tc_lookup_action(struct nlattr * kind)1053  static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
1054  {
1055  	struct tc_action_ops *a, *res = NULL;
1056  
1057  	if (kind) {
1058  		read_lock(&act_mod_lock);
1059  		list_for_each_entry(a, &act_base, head) {
1060  			if (nla_strcmp(kind, a->kind) == 0) {
1061  				if (try_module_get(a->owner))
1062  					res = a;
1063  				break;
1064  			}
1065  		}
1066  		read_unlock(&act_mod_lock);
1067  	}
1068  	return res;
1069  }
1070  
1071  /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */
1072  #define TCA_ACT_MAX_PRIO_MASK 0x1FF
tcf_action_exec(struct sk_buff * skb,struct tc_action ** actions,int nr_actions,struct tcf_result * res)1073  int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
1074  		    int nr_actions, struct tcf_result *res)
1075  {
1076  	u32 jmp_prgcnt = 0;
1077  	u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
1078  	int i;
1079  	int ret = TC_ACT_OK;
1080  
1081  	if (skb_skip_tc_classify(skb))
1082  		return TC_ACT_OK;
1083  
1084  restart_act_graph:
1085  	for (i = 0; i < nr_actions; i++) {
1086  		const struct tc_action *a = actions[i];
1087  		int repeat_ttl;
1088  
1089  		if (jmp_prgcnt > 0) {
1090  			jmp_prgcnt -= 1;
1091  			continue;
1092  		}
1093  
1094  		if (tc_act_skip_sw(a->tcfa_flags))
1095  			continue;
1096  
1097  		repeat_ttl = 32;
1098  repeat:
1099  		ret = tc_act(skb, a, res);
1100  		if (unlikely(ret == TC_ACT_REPEAT)) {
1101  			if (--repeat_ttl != 0)
1102  				goto repeat;
1103  			/* suspicious opcode, stop pipeline */
1104  			net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
1105  			return TC_ACT_OK;
1106  		}
1107  		if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
1108  			jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
1109  			if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
1110  				/* faulty opcode, stop pipeline */
1111  				return TC_ACT_OK;
1112  			} else {
1113  				jmp_ttl -= 1;
1114  				if (jmp_ttl > 0)
1115  					goto restart_act_graph;
1116  				else /* faulty graph, stop pipeline */
1117  					return TC_ACT_OK;
1118  			}
1119  		} else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
1120  			if (unlikely(!rcu_access_pointer(a->goto_chain))) {
1121  				net_warn_ratelimited("can't go to NULL chain!\n");
1122  				return TC_ACT_SHOT;
1123  			}
1124  			tcf_action_goto_chain_exec(a, res);
1125  		}
1126  
1127  		if (ret != TC_ACT_PIPE)
1128  			break;
1129  	}
1130  
1131  	return ret;
1132  }
1133  EXPORT_SYMBOL(tcf_action_exec);
1134  
tcf_action_destroy(struct tc_action * actions[],int bind)1135  int tcf_action_destroy(struct tc_action *actions[], int bind)
1136  {
1137  	const struct tc_action_ops *ops;
1138  	struct tc_action *a;
1139  	int ret = 0, i;
1140  
1141  	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1142  		a = actions[i];
1143  		actions[i] = NULL;
1144  		ops = a->ops;
1145  		ret = __tcf_idr_release(a, bind, true);
1146  		if (ret == ACT_P_DELETED)
1147  			module_put(ops->owner);
1148  		else if (ret < 0)
1149  			return ret;
1150  	}
1151  	return ret;
1152  }
1153  
tcf_action_put(struct tc_action * p)1154  static int tcf_action_put(struct tc_action *p)
1155  {
1156  	return __tcf_action_put(p, false);
1157  }
1158  
1159  /* Put all actions in this array, skip those NULL's. */
tcf_action_put_many(struct tc_action * actions[])1160  static void tcf_action_put_many(struct tc_action *actions[])
1161  {
1162  	int i;
1163  
1164  	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1165  		struct tc_action *a = actions[i];
1166  		const struct tc_action_ops *ops;
1167  
1168  		if (!a)
1169  			continue;
1170  		ops = a->ops;
1171  		if (tcf_action_put(a))
1172  			module_put(ops->owner);
1173  	}
1174  }
1175  
1176  int
tcf_action_dump_old(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1177  tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1178  {
1179  	return a->ops->dump(skb, a, bind, ref);
1180  }
1181  
1182  int
tcf_action_dump_1(struct sk_buff * skb,struct tc_action * a,int bind,int ref)1183  tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
1184  {
1185  	int err = -EINVAL;
1186  	unsigned char *b = skb_tail_pointer(skb);
1187  	struct nlattr *nest;
1188  	u32 flags;
1189  
1190  	if (tcf_action_dump_terse(skb, a, false))
1191  		goto nla_put_failure;
1192  
1193  	if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
1194  	    nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
1195  			       a->hw_stats, TCA_ACT_HW_STATS_ANY))
1196  		goto nla_put_failure;
1197  
1198  	if (a->used_hw_stats_valid &&
1199  	    nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
1200  			       a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
1201  		goto nla_put_failure;
1202  
1203  	flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
1204  	if (flags &&
1205  	    nla_put_bitfield32(skb, TCA_ACT_FLAGS,
1206  			       flags, flags))
1207  		goto nla_put_failure;
1208  
1209  	if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
1210  		goto nla_put_failure;
1211  
1212  	nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
1213  	if (nest == NULL)
1214  		goto nla_put_failure;
1215  	err = tcf_action_dump_old(skb, a, bind, ref);
1216  	if (err > 0) {
1217  		nla_nest_end(skb, nest);
1218  		return err;
1219  	}
1220  
1221  nla_put_failure:
1222  	nlmsg_trim(skb, b);
1223  	return -1;
1224  }
1225  EXPORT_SYMBOL(tcf_action_dump_1);
1226  
tcf_action_dump(struct sk_buff * skb,struct tc_action * actions[],int bind,int ref,bool terse)1227  int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
1228  		    int bind, int ref, bool terse)
1229  {
1230  	struct tc_action *a;
1231  	int err = -EINVAL, i;
1232  	struct nlattr *nest;
1233  
1234  	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1235  		a = actions[i];
1236  		nest = nla_nest_start_noflag(skb, i + 1);
1237  		if (nest == NULL)
1238  			goto nla_put_failure;
1239  		err = terse ? tcf_action_dump_terse(skb, a, false) :
1240  			tcf_action_dump_1(skb, a, bind, ref);
1241  		if (err < 0)
1242  			goto errout;
1243  		nla_nest_end(skb, nest);
1244  	}
1245  
1246  	return 0;
1247  
1248  nla_put_failure:
1249  	err = -EINVAL;
1250  errout:
1251  	nla_nest_cancel(skb, nest);
1252  	return err;
1253  }
1254  
nla_memdup_cookie(struct nlattr ** tb)1255  static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
1256  {
1257  	struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
1258  	if (!c)
1259  		return NULL;
1260  
1261  	c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
1262  	if (!c->data) {
1263  		kfree(c);
1264  		return NULL;
1265  	}
1266  	c->len = nla_len(tb[TCA_ACT_COOKIE]);
1267  
1268  	return c;
1269  }
1270  
tcf_action_hw_stats_get(struct nlattr * hw_stats_attr)1271  static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
1272  {
1273  	struct nla_bitfield32 hw_stats_bf;
1274  
1275  	/* If the user did not pass the attr, that means he does
1276  	 * not care about the type. Return "any" in that case
1277  	 * which is setting on all supported types.
1278  	 */
1279  	if (!hw_stats_attr)
1280  		return TCA_ACT_HW_STATS_ANY;
1281  	hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
1282  	return hw_stats_bf.value;
1283  }
1284  
1285  static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
1286  	[TCA_ACT_KIND]		= { .type = NLA_STRING },
1287  	[TCA_ACT_INDEX]		= { .type = NLA_U32 },
1288  	[TCA_ACT_COOKIE]	= { .type = NLA_BINARY,
1289  				    .len = TC_COOKIE_MAX_SIZE },
1290  	[TCA_ACT_OPTIONS]	= { .type = NLA_NESTED },
1291  	[TCA_ACT_FLAGS]		= NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
1292  							TCA_ACT_FLAGS_SKIP_HW |
1293  							TCA_ACT_FLAGS_SKIP_SW),
1294  	[TCA_ACT_HW_STATS]	= NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
1295  };
1296  
tcf_idr_insert_many(struct tc_action * actions[])1297  void tcf_idr_insert_many(struct tc_action *actions[])
1298  {
1299  	int i;
1300  
1301  	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1302  		struct tc_action *a = actions[i];
1303  		struct tcf_idrinfo *idrinfo;
1304  
1305  		if (!a)
1306  			continue;
1307  		idrinfo = a->idrinfo;
1308  		mutex_lock(&idrinfo->lock);
1309  		/* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
1310  		 * it is just created, otherwise this is just a nop.
1311  		 */
1312  		idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
1313  		mutex_unlock(&idrinfo->lock);
1314  	}
1315  }
1316  
tc_action_load_ops(struct nlattr * nla,bool police,bool rtnl_held,struct netlink_ext_ack * extack)1317  struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
1318  					 bool rtnl_held,
1319  					 struct netlink_ext_ack *extack)
1320  {
1321  	struct nlattr *tb[TCA_ACT_MAX + 1];
1322  	struct tc_action_ops *a_o;
1323  	char act_name[IFNAMSIZ];
1324  	struct nlattr *kind;
1325  	int err;
1326  
1327  	if (!police) {
1328  		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1329  						  tcf_action_policy, extack);
1330  		if (err < 0)
1331  			return ERR_PTR(err);
1332  		err = -EINVAL;
1333  		kind = tb[TCA_ACT_KIND];
1334  		if (!kind) {
1335  			NL_SET_ERR_MSG(extack, "TC action kind must be specified");
1336  			return ERR_PTR(err);
1337  		}
1338  		if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) {
1339  			NL_SET_ERR_MSG(extack, "TC action name too long");
1340  			return ERR_PTR(err);
1341  		}
1342  	} else {
1343  		if (strscpy(act_name, "police", IFNAMSIZ) < 0) {
1344  			NL_SET_ERR_MSG(extack, "TC action name too long");
1345  			return ERR_PTR(-EINVAL);
1346  		}
1347  	}
1348  
1349  	a_o = tc_lookup_action_n(act_name);
1350  	if (a_o == NULL) {
1351  #ifdef CONFIG_MODULES
1352  		if (rtnl_held)
1353  			rtnl_unlock();
1354  		request_module("act_%s", act_name);
1355  		if (rtnl_held)
1356  			rtnl_lock();
1357  
1358  		a_o = tc_lookup_action_n(act_name);
1359  
1360  		/* We dropped the RTNL semaphore in order to
1361  		 * perform the module load.  So, even if we
1362  		 * succeeded in loading the module we have to
1363  		 * tell the caller to replay the request.  We
1364  		 * indicate this using -EAGAIN.
1365  		 */
1366  		if (a_o != NULL) {
1367  			module_put(a_o->owner);
1368  			return ERR_PTR(-EAGAIN);
1369  		}
1370  #endif
1371  		NL_SET_ERR_MSG(extack, "Failed to load TC action module");
1372  		return ERR_PTR(-ENOENT);
1373  	}
1374  
1375  	return a_o;
1376  }
1377  
tcf_action_init_1(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action_ops * a_o,int * init_res,u32 flags,struct netlink_ext_ack * extack)1378  struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
1379  				    struct nlattr *nla, struct nlattr *est,
1380  				    struct tc_action_ops *a_o, int *init_res,
1381  				    u32 flags, struct netlink_ext_ack *extack)
1382  {
1383  	bool police = flags & TCA_ACT_FLAGS_POLICE;
1384  	struct nla_bitfield32 userflags = { 0, 0 };
1385  	struct tc_cookie *user_cookie = NULL;
1386  	u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1387  	struct nlattr *tb[TCA_ACT_MAX + 1];
1388  	struct tc_action *a;
1389  	int err;
1390  
1391  	/* backward compatibility for policer */
1392  	if (!police) {
1393  		err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1394  						  tcf_action_policy, extack);
1395  		if (err < 0)
1396  			return ERR_PTR(err);
1397  		if (tb[TCA_ACT_COOKIE]) {
1398  			user_cookie = nla_memdup_cookie(tb);
1399  			if (!user_cookie) {
1400  				NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1401  				err = -ENOMEM;
1402  				goto err_out;
1403  			}
1404  		}
1405  		hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1406  		if (tb[TCA_ACT_FLAGS]) {
1407  			userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1408  			if (!tc_act_flags_valid(userflags.value)) {
1409  				err = -EINVAL;
1410  				goto err_out;
1411  			}
1412  		}
1413  
1414  		err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
1415  				userflags.value | flags, extack);
1416  	} else {
1417  		err = a_o->init(net, nla, est, &a, tp, userflags.value | flags,
1418  				extack);
1419  	}
1420  	if (err < 0)
1421  		goto err_out;
1422  	*init_res = err;
1423  
1424  	if (!police && tb[TCA_ACT_COOKIE])
1425  		tcf_set_action_cookie(&a->user_cookie, user_cookie);
1426  
1427  	if (!police)
1428  		a->hw_stats = hw_stats;
1429  
1430  	return a;
1431  
1432  err_out:
1433  	if (user_cookie) {
1434  		kfree(user_cookie->data);
1435  		kfree(user_cookie);
1436  	}
1437  	return ERR_PTR(err);
1438  }
1439  
tc_act_bind(u32 flags)1440  static bool tc_act_bind(u32 flags)
1441  {
1442  	return !!(flags & TCA_ACT_FLAGS_BIND);
1443  }
1444  
1445  /* Returns numbers of initialized actions or negative error. */
1446  
tcf_action_init(struct net * net,struct tcf_proto * tp,struct nlattr * nla,struct nlattr * est,struct tc_action * actions[],int init_res[],size_t * attr_size,u32 flags,u32 fl_flags,struct netlink_ext_ack * extack)1447  int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1448  		    struct nlattr *est, struct tc_action *actions[],
1449  		    int init_res[], size_t *attr_size,
1450  		    u32 flags, u32 fl_flags,
1451  		    struct netlink_ext_ack *extack)
1452  {
1453  	struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
1454  	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1455  	struct tc_action *act;
1456  	size_t sz = 0;
1457  	int err;
1458  	int i;
1459  
1460  	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1461  					  extack);
1462  	if (err < 0)
1463  		return err;
1464  
1465  	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1466  		struct tc_action_ops *a_o;
1467  
1468  		a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE,
1469  					 !(flags & TCA_ACT_FLAGS_NO_RTNL),
1470  					 extack);
1471  		if (IS_ERR(a_o)) {
1472  			err = PTR_ERR(a_o);
1473  			goto err_mod;
1474  		}
1475  		ops[i - 1] = a_o;
1476  	}
1477  
1478  	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1479  		act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1],
1480  					&init_res[i - 1], flags, extack);
1481  		if (IS_ERR(act)) {
1482  			err = PTR_ERR(act);
1483  			goto err;
1484  		}
1485  		sz += tcf_action_fill_size(act);
1486  		/* Start from index 0 */
1487  		actions[i - 1] = act;
1488  		if (tc_act_bind(flags)) {
1489  			bool skip_sw = tc_skip_sw(fl_flags);
1490  			bool skip_hw = tc_skip_hw(fl_flags);
1491  
1492  			if (tc_act_bind(act->tcfa_flags)) {
1493  				/* Action is created by classifier and is not
1494  				 * standalone. Check that the user did not set
1495  				 * any action flags different than the
1496  				 * classifier flags, and inherit the flags from
1497  				 * the classifier for the compatibility case
1498  				 * where no flags were specified at all.
1499  				 */
1500  				if ((tc_act_skip_sw(act->tcfa_flags) && !skip_sw) ||
1501  				    (tc_act_skip_hw(act->tcfa_flags) && !skip_hw)) {
1502  					NL_SET_ERR_MSG(extack,
1503  						       "Mismatch between action and filter offload flags");
1504  					err = -EINVAL;
1505  					goto err;
1506  				}
1507  				if (skip_sw)
1508  					act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_SW;
1509  				if (skip_hw)
1510  					act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_HW;
1511  				continue;
1512  			}
1513  
1514  			/* Action is standalone */
1515  			if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
1516  			    skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
1517  				NL_SET_ERR_MSG(extack,
1518  					       "Mismatch between action and filter offload flags");
1519  				err = -EINVAL;
1520  				goto err;
1521  			}
1522  		} else {
1523  			err = tcf_action_offload_add(act, extack);
1524  			if (tc_act_skip_sw(act->tcfa_flags) && err)
1525  				goto err;
1526  		}
1527  	}
1528  
1529  	/* We have to commit them all together, because if any error happened in
1530  	 * between, we could not handle the failure gracefully.
1531  	 */
1532  	tcf_idr_insert_many(actions);
1533  
1534  	*attr_size = tcf_action_full_attrs_size(sz);
1535  	err = i - 1;
1536  	goto err_mod;
1537  
1538  err:
1539  	tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND);
1540  err_mod:
1541  	for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1542  		if (ops[i])
1543  			module_put(ops[i]->owner);
1544  	}
1545  	return err;
1546  }
1547  
tcf_action_update_stats(struct tc_action * a,u64 bytes,u64 packets,u64 drops,bool hw)1548  void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1549  			     u64 drops, bool hw)
1550  {
1551  	if (a->cpu_bstats) {
1552  		_bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1553  
1554  		this_cpu_ptr(a->cpu_qstats)->drops += drops;
1555  
1556  		if (hw)
1557  			_bstats_update(this_cpu_ptr(a->cpu_bstats_hw),
1558  				       bytes, packets);
1559  		return;
1560  	}
1561  
1562  	_bstats_update(&a->tcfa_bstats, bytes, packets);
1563  	a->tcfa_qstats.drops += drops;
1564  	if (hw)
1565  		_bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1566  }
1567  EXPORT_SYMBOL(tcf_action_update_stats);
1568  
tcf_action_copy_stats(struct sk_buff * skb,struct tc_action * p,int compat_mode)1569  int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1570  			  int compat_mode)
1571  {
1572  	int err = 0;
1573  	struct gnet_dump d;
1574  
1575  	if (p == NULL)
1576  		goto errout;
1577  
1578  	/* compat_mode being true specifies a call that is supposed
1579  	 * to add additional backward compatibility statistic TLVs.
1580  	 */
1581  	if (compat_mode) {
1582  		if (p->type == TCA_OLD_COMPAT)
1583  			err = gnet_stats_start_copy_compat(skb, 0,
1584  							   TCA_STATS,
1585  							   TCA_XSTATS,
1586  							   &p->tcfa_lock, &d,
1587  							   TCA_PAD);
1588  		else
1589  			return 0;
1590  	} else
1591  		err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1592  					    &p->tcfa_lock, &d, TCA_ACT_PAD);
1593  
1594  	if (err < 0)
1595  		goto errout;
1596  
1597  	if (gnet_stats_copy_basic(&d, p->cpu_bstats,
1598  				  &p->tcfa_bstats, false) < 0 ||
1599  	    gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw,
1600  				     &p->tcfa_bstats_hw, false) < 0 ||
1601  	    gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1602  	    gnet_stats_copy_queue(&d, p->cpu_qstats,
1603  				  &p->tcfa_qstats,
1604  				  p->tcfa_qstats.qlen) < 0)
1605  		goto errout;
1606  
1607  	if (gnet_stats_finish_copy(&d) < 0)
1608  		goto errout;
1609  
1610  	return 0;
1611  
1612  errout:
1613  	return -1;
1614  }
1615  
tca_get_fill(struct sk_buff * skb,struct tc_action * actions[],u32 portid,u32 seq,u16 flags,int event,int bind,int ref,struct netlink_ext_ack * extack)1616  static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1617  			u32 portid, u32 seq, u16 flags, int event, int bind,
1618  			int ref, struct netlink_ext_ack *extack)
1619  {
1620  	struct tcamsg *t;
1621  	struct nlmsghdr *nlh;
1622  	unsigned char *b = skb_tail_pointer(skb);
1623  	struct nlattr *nest;
1624  
1625  	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1626  	if (!nlh)
1627  		goto out_nlmsg_trim;
1628  	t = nlmsg_data(nlh);
1629  	t->tca_family = AF_UNSPEC;
1630  	t->tca__pad1 = 0;
1631  	t->tca__pad2 = 0;
1632  
1633  	if (extack && extack->_msg &&
1634  	    nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg))
1635  		goto out_nlmsg_trim;
1636  
1637  	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1638  	if (!nest)
1639  		goto out_nlmsg_trim;
1640  
1641  	if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
1642  		goto out_nlmsg_trim;
1643  
1644  	nla_nest_end(skb, nest);
1645  
1646  	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1647  
1648  	return skb->len;
1649  
1650  out_nlmsg_trim:
1651  	nlmsg_trim(skb, b);
1652  	return -1;
1653  }
1654  
1655  static int
tcf_get_notify(struct net * net,u32 portid,struct nlmsghdr * n,struct tc_action * actions[],int event,struct netlink_ext_ack * extack)1656  tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1657  	       struct tc_action *actions[], int event,
1658  	       struct netlink_ext_ack *extack)
1659  {
1660  	struct sk_buff *skb;
1661  
1662  	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1663  	if (!skb)
1664  		return -ENOBUFS;
1665  	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1666  			 0, 1, NULL) <= 0) {
1667  		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1668  		kfree_skb(skb);
1669  		return -EINVAL;
1670  	}
1671  
1672  	return rtnl_unicast(skb, net, portid);
1673  }
1674  
tcf_action_get_1(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1675  static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1676  					  struct nlmsghdr *n, u32 portid,
1677  					  struct netlink_ext_ack *extack)
1678  {
1679  	struct nlattr *tb[TCA_ACT_MAX + 1];
1680  	const struct tc_action_ops *ops;
1681  	struct tc_action *a;
1682  	int index;
1683  	int err;
1684  
1685  	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1686  					  tcf_action_policy, extack);
1687  	if (err < 0)
1688  		goto err_out;
1689  
1690  	err = -EINVAL;
1691  	if (tb[TCA_ACT_INDEX] == NULL ||
1692  	    nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1693  		NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1694  		goto err_out;
1695  	}
1696  	index = nla_get_u32(tb[TCA_ACT_INDEX]);
1697  
1698  	err = -EINVAL;
1699  	ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1700  	if (!ops) { /* could happen in batch of actions */
1701  		NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
1702  		goto err_out;
1703  	}
1704  	err = -ENOENT;
1705  	if (__tcf_idr_search(net, ops, &a, index) == 0) {
1706  		NL_SET_ERR_MSG(extack, "TC action with specified index not found");
1707  		goto err_mod;
1708  	}
1709  
1710  	module_put(ops->owner);
1711  	return a;
1712  
1713  err_mod:
1714  	module_put(ops->owner);
1715  err_out:
1716  	return ERR_PTR(err);
1717  }
1718  
tca_action_flush(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,struct netlink_ext_ack * extack)1719  static int tca_action_flush(struct net *net, struct nlattr *nla,
1720  			    struct nlmsghdr *n, u32 portid,
1721  			    struct netlink_ext_ack *extack)
1722  {
1723  	struct sk_buff *skb;
1724  	unsigned char *b;
1725  	struct nlmsghdr *nlh;
1726  	struct tcamsg *t;
1727  	struct netlink_callback dcb;
1728  	struct nlattr *nest;
1729  	struct nlattr *tb[TCA_ACT_MAX + 1];
1730  	const struct tc_action_ops *ops;
1731  	struct nlattr *kind;
1732  	int err = -ENOMEM;
1733  
1734  	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1735  	if (!skb)
1736  		return err;
1737  
1738  	b = skb_tail_pointer(skb);
1739  
1740  	err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1741  					  tcf_action_policy, extack);
1742  	if (err < 0)
1743  		goto err_out;
1744  
1745  	err = -EINVAL;
1746  	kind = tb[TCA_ACT_KIND];
1747  	ops = tc_lookup_action(kind);
1748  	if (!ops) { /*some idjot trying to flush unknown action */
1749  		NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1750  		goto err_out;
1751  	}
1752  
1753  	nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1754  			sizeof(*t), 0);
1755  	if (!nlh) {
1756  		NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1757  		goto out_module_put;
1758  	}
1759  	t = nlmsg_data(nlh);
1760  	t->tca_family = AF_UNSPEC;
1761  	t->tca__pad1 = 0;
1762  	t->tca__pad2 = 0;
1763  
1764  	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
1765  	if (!nest) {
1766  		NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1767  		goto out_module_put;
1768  	}
1769  
1770  	err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack);
1771  	if (err <= 0) {
1772  		nla_nest_cancel(skb, nest);
1773  		goto out_module_put;
1774  	}
1775  
1776  	nla_nest_end(skb, nest);
1777  
1778  	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1779  	nlh->nlmsg_flags |= NLM_F_ROOT;
1780  	module_put(ops->owner);
1781  	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1782  			     n->nlmsg_flags & NLM_F_ECHO);
1783  	if (err < 0)
1784  		NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1785  
1786  	return err;
1787  
1788  out_module_put:
1789  	module_put(ops->owner);
1790  err_out:
1791  	kfree_skb(skb);
1792  	return err;
1793  }
1794  
tcf_action_delete(struct net * net,struct tc_action * actions[])1795  static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1796  {
1797  	int i;
1798  
1799  	for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1800  		struct tc_action *a = actions[i];
1801  		const struct tc_action_ops *ops = a->ops;
1802  		/* Actions can be deleted concurrently so we must save their
1803  		 * type and id to search again after reference is released.
1804  		 */
1805  		struct tcf_idrinfo *idrinfo = a->idrinfo;
1806  		u32 act_index = a->tcfa_index;
1807  
1808  		actions[i] = NULL;
1809  		if (tcf_action_put(a)) {
1810  			/* last reference, action was deleted concurrently */
1811  			module_put(ops->owner);
1812  		} else  {
1813  			int ret;
1814  
1815  			/* now do the delete */
1816  			ret = tcf_idr_delete_index(idrinfo, act_index);
1817  			if (ret < 0)
1818  				return ret;
1819  		}
1820  	}
1821  	return 0;
1822  }
1823  
1824  static int
tcf_reoffload_del_notify(struct net * net,struct tc_action * action)1825  tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
1826  {
1827  	size_t attr_size = tcf_action_fill_size(action);
1828  	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
1829  		[0] = action,
1830  	};
1831  	const struct tc_action_ops *ops = action->ops;
1832  	struct sk_buff *skb;
1833  	int ret;
1834  
1835  	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1836  			GFP_KERNEL);
1837  	if (!skb)
1838  		return -ENOBUFS;
1839  
1840  	if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1, NULL) <= 0) {
1841  		kfree_skb(skb);
1842  		return -EINVAL;
1843  	}
1844  
1845  	ret = tcf_idr_release_unsafe(action);
1846  	if (ret == ACT_P_DELETED) {
1847  		module_put(ops->owner);
1848  		ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
1849  	} else {
1850  		kfree_skb(skb);
1851  	}
1852  
1853  	return ret;
1854  }
1855  
tcf_action_reoffload_cb(flow_indr_block_bind_cb_t * cb,void * cb_priv,bool add)1856  int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
1857  			    void *cb_priv, bool add)
1858  {
1859  	struct tc_act_pernet_id *id_ptr;
1860  	struct tcf_idrinfo *idrinfo;
1861  	struct tc_action_net *tn;
1862  	struct tc_action *p;
1863  	unsigned int act_id;
1864  	unsigned long tmp;
1865  	unsigned long id;
1866  	struct idr *idr;
1867  	struct net *net;
1868  	int ret;
1869  
1870  	if (!cb)
1871  		return -EINVAL;
1872  
1873  	down_read(&net_rwsem);
1874  	mutex_lock(&act_id_mutex);
1875  
1876  	for_each_net(net) {
1877  		list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
1878  			act_id = id_ptr->id;
1879  			tn = net_generic(net, act_id);
1880  			if (!tn)
1881  				continue;
1882  			idrinfo = tn->idrinfo;
1883  			if (!idrinfo)
1884  				continue;
1885  
1886  			mutex_lock(&idrinfo->lock);
1887  			idr = &idrinfo->action_idr;
1888  			idr_for_each_entry_ul(idr, p, tmp, id) {
1889  				if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
1890  					continue;
1891  				if (add) {
1892  					tcf_action_offload_add_ex(p, NULL, cb,
1893  								  cb_priv);
1894  					continue;
1895  				}
1896  
1897  				/* cb unregister to update hw count */
1898  				ret = tcf_action_offload_del_ex(p, cb, cb_priv);
1899  				if (ret < 0)
1900  					continue;
1901  				if (tc_act_skip_sw(p->tcfa_flags) &&
1902  				    !tc_act_in_hw(p))
1903  					tcf_reoffload_del_notify(net, p);
1904  			}
1905  			mutex_unlock(&idrinfo->lock);
1906  		}
1907  	}
1908  	mutex_unlock(&act_id_mutex);
1909  	up_read(&net_rwsem);
1910  
1911  	return 0;
1912  }
1913  
1914  static int
tcf_del_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1915  tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1916  	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1917  {
1918  	int ret;
1919  	struct sk_buff *skb;
1920  
1921  	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1922  			GFP_KERNEL);
1923  	if (!skb)
1924  		return -ENOBUFS;
1925  
1926  	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1927  			 0, 2, extack) <= 0) {
1928  		NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1929  		kfree_skb(skb);
1930  		return -EINVAL;
1931  	}
1932  
1933  	/* now do the delete */
1934  	ret = tcf_action_delete(net, actions);
1935  	if (ret < 0) {
1936  		NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1937  		kfree_skb(skb);
1938  		return ret;
1939  	}
1940  
1941  	ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1942  			     n->nlmsg_flags & NLM_F_ECHO);
1943  	return ret;
1944  }
1945  
1946  static int
tca_action_gd(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,int event,struct netlink_ext_ack * extack)1947  tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1948  	      u32 portid, int event, struct netlink_ext_ack *extack)
1949  {
1950  	int i, ret;
1951  	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1952  	struct tc_action *act;
1953  	size_t attr_size = 0;
1954  	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1955  
1956  	ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1957  					  extack);
1958  	if (ret < 0)
1959  		return ret;
1960  
1961  	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1962  		if (tb[1])
1963  			return tca_action_flush(net, tb[1], n, portid, extack);
1964  
1965  		NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1966  		return -EINVAL;
1967  	}
1968  
1969  	for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1970  		act = tcf_action_get_1(net, tb[i], n, portid, extack);
1971  		if (IS_ERR(act)) {
1972  			ret = PTR_ERR(act);
1973  			goto err;
1974  		}
1975  		attr_size += tcf_action_fill_size(act);
1976  		actions[i - 1] = act;
1977  	}
1978  
1979  	attr_size = tcf_action_full_attrs_size(attr_size);
1980  
1981  	if (event == RTM_GETACTION)
1982  		ret = tcf_get_notify(net, portid, n, actions, event, extack);
1983  	else { /* delete */
1984  		ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1985  		if (ret)
1986  			goto err;
1987  		return 0;
1988  	}
1989  err:
1990  	tcf_action_put_many(actions);
1991  	return ret;
1992  }
1993  
1994  static int
tcf_add_notify(struct net * net,struct nlmsghdr * n,struct tc_action * actions[],u32 portid,size_t attr_size,struct netlink_ext_ack * extack)1995  tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1996  	       u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1997  {
1998  	struct sk_buff *skb;
1999  
2000  	skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
2001  			GFP_KERNEL);
2002  	if (!skb)
2003  		return -ENOBUFS;
2004  
2005  	if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
2006  			 RTM_NEWACTION, 0, 0, extack) <= 0) {
2007  		NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
2008  		kfree_skb(skb);
2009  		return -EINVAL;
2010  	}
2011  
2012  	return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2013  			      n->nlmsg_flags & NLM_F_ECHO);
2014  }
2015  
tcf_action_add(struct net * net,struct nlattr * nla,struct nlmsghdr * n,u32 portid,u32 flags,struct netlink_ext_ack * extack)2016  static int tcf_action_add(struct net *net, struct nlattr *nla,
2017  			  struct nlmsghdr *n, u32 portid, u32 flags,
2018  			  struct netlink_ext_ack *extack)
2019  {
2020  	size_t attr_size = 0;
2021  	int loop, ret, i;
2022  	struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2023  	int init_res[TCA_ACT_MAX_PRIO] = {};
2024  
2025  	for (loop = 0; loop < 10; loop++) {
2026  		ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
2027  				      &attr_size, flags, 0, extack);
2028  		if (ret != -EAGAIN)
2029  			break;
2030  	}
2031  
2032  	if (ret < 0)
2033  		return ret;
2034  	ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2035  
2036  	/* only put existing actions */
2037  	for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
2038  		if (init_res[i] == ACT_P_CREATED)
2039  			actions[i] = NULL;
2040  	tcf_action_put_many(actions);
2041  
2042  	return ret;
2043  }
2044  
2045  static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
2046  	[TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON |
2047  						 TCA_ACT_FLAG_TERSE_DUMP),
2048  	[TCA_ROOT_TIME_DELTA]      = { .type = NLA_U32 },
2049  };
2050  
tc_ctl_action(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2051  static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2052  			 struct netlink_ext_ack *extack)
2053  {
2054  	struct net *net = sock_net(skb->sk);
2055  	struct nlattr *tca[TCA_ROOT_MAX + 1];
2056  	u32 portid = NETLINK_CB(skb).portid;
2057  	u32 flags = 0;
2058  	int ret = 0;
2059  
2060  	if ((n->nlmsg_type != RTM_GETACTION) &&
2061  	    !netlink_capable(skb, CAP_NET_ADMIN))
2062  		return -EPERM;
2063  
2064  	ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
2065  				     TCA_ROOT_MAX, NULL, extack);
2066  	if (ret < 0)
2067  		return ret;
2068  
2069  	if (tca[TCA_ACT_TAB] == NULL) {
2070  		NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
2071  		return -EINVAL;
2072  	}
2073  
2074  	/* n->nlmsg_flags & NLM_F_CREATE */
2075  	switch (n->nlmsg_type) {
2076  	case RTM_NEWACTION:
2077  		/* we are going to assume all other flags
2078  		 * imply create only if it doesn't exist
2079  		 * Note that CREATE | EXCL implies that
2080  		 * but since we want avoid ambiguity (eg when flags
2081  		 * is zero) then just set this
2082  		 */
2083  		if (n->nlmsg_flags & NLM_F_REPLACE)
2084  			flags = TCA_ACT_FLAGS_REPLACE;
2085  		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags,
2086  				     extack);
2087  		break;
2088  	case RTM_DELACTION:
2089  		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2090  				    portid, RTM_DELACTION, extack);
2091  		break;
2092  	case RTM_GETACTION:
2093  		ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2094  				    portid, RTM_GETACTION, extack);
2095  		break;
2096  	default:
2097  		BUG();
2098  	}
2099  
2100  	return ret;
2101  }
2102  
find_dump_kind(struct nlattr ** nla)2103  static struct nlattr *find_dump_kind(struct nlattr **nla)
2104  {
2105  	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
2106  	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
2107  	struct nlattr *kind;
2108  
2109  	tb1 = nla[TCA_ACT_TAB];
2110  	if (tb1 == NULL)
2111  		return NULL;
2112  
2113  	if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
2114  		return NULL;
2115  
2116  	if (tb[1] == NULL)
2117  		return NULL;
2118  	if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2119  		return NULL;
2120  	kind = tb2[TCA_ACT_KIND];
2121  
2122  	return kind;
2123  }
2124  
tc_dump_action(struct sk_buff * skb,struct netlink_callback * cb)2125  static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
2126  {
2127  	struct net *net = sock_net(skb->sk);
2128  	struct nlmsghdr *nlh;
2129  	unsigned char *b = skb_tail_pointer(skb);
2130  	struct nlattr *nest;
2131  	struct tc_action_ops *a_o;
2132  	int ret = 0;
2133  	struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
2134  	struct nlattr *tb[TCA_ROOT_MAX + 1];
2135  	struct nlattr *count_attr = NULL;
2136  	unsigned long jiffy_since = 0;
2137  	struct nlattr *kind = NULL;
2138  	struct nla_bitfield32 bf;
2139  	u32 msecs_since = 0;
2140  	u32 act_count = 0;
2141  
2142  	ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
2143  				     TCA_ROOT_MAX, tcaa_policy, cb->extack);
2144  	if (ret < 0)
2145  		return ret;
2146  
2147  	kind = find_dump_kind(tb);
2148  	if (kind == NULL) {
2149  		pr_info("tc_dump_action: action bad kind\n");
2150  		return 0;
2151  	}
2152  
2153  	a_o = tc_lookup_action(kind);
2154  	if (a_o == NULL)
2155  		return 0;
2156  
2157  	cb->args[2] = 0;
2158  	if (tb[TCA_ROOT_FLAGS]) {
2159  		bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
2160  		cb->args[2] = bf.value;
2161  	}
2162  
2163  	if (tb[TCA_ROOT_TIME_DELTA]) {
2164  		msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
2165  	}
2166  
2167  	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2168  			cb->nlh->nlmsg_type, sizeof(*t), 0);
2169  	if (!nlh)
2170  		goto out_module_put;
2171  
2172  	if (msecs_since)
2173  		jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
2174  
2175  	t = nlmsg_data(nlh);
2176  	t->tca_family = AF_UNSPEC;
2177  	t->tca__pad1 = 0;
2178  	t->tca__pad2 = 0;
2179  	cb->args[3] = jiffy_since;
2180  	count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
2181  	if (!count_attr)
2182  		goto out_module_put;
2183  
2184  	nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
2185  	if (nest == NULL)
2186  		goto out_module_put;
2187  
2188  	ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL);
2189  	if (ret < 0)
2190  		goto out_module_put;
2191  
2192  	if (ret > 0) {
2193  		nla_nest_end(skb, nest);
2194  		ret = skb->len;
2195  		act_count = cb->args[1];
2196  		memcpy(nla_data(count_attr), &act_count, sizeof(u32));
2197  		cb->args[1] = 0;
2198  	} else
2199  		nlmsg_trim(skb, b);
2200  
2201  	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2202  	if (NETLINK_CB(cb->skb).portid && ret)
2203  		nlh->nlmsg_flags |= NLM_F_MULTI;
2204  	module_put(a_o->owner);
2205  	return skb->len;
2206  
2207  out_module_put:
2208  	module_put(a_o->owner);
2209  	nlmsg_trim(skb, b);
2210  	return skb->len;
2211  }
2212  
tc_action_init(void)2213  static int __init tc_action_init(void)
2214  {
2215  	rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
2216  	rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
2217  	rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
2218  		      0);
2219  
2220  	return 0;
2221  }
2222  
2223  subsys_initcall(tc_action_init);
2224