Lines Matching +full:array +full:- +full:nest

1 // SPDX-License-Identifier: GPL-2.0-or-later
47 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); in tcf_action_goto_chain_exec()
49 res->goto_tp = rcu_dereference_bh(chain->filter_chain); in tcf_action_goto_chain_exec()
56 kfree(cookie->data); in tcf_free_cookie_rcu()
67 call_rcu(&old->rcu, tcf_free_cookie_rcu); in tcf_set_action_cookie()
74 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; in tcf_action_check_ctrlact()
78 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; in tcf_action_check_ctrlact()
89 ret = -EINVAL; in tcf_action_check_ctrlact()
94 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); in tcf_action_check_ctrlact()
96 ret = -ENOMEM; in tcf_action_check_ctrlact()
109 a->tcfa_action = action; in tcf_action_set_ctrlact()
110 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); in tcf_action_set_ctrlact()
122 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); in free_tcf()
124 free_percpu(p->cpu_bstats); in free_tcf()
125 free_percpu(p->cpu_bstats_hw); in free_tcf()
126 free_percpu(p->cpu_qstats); in free_tcf()
128 tcf_set_action_cookie(&p->user_cookie, NULL); in free_tcf()
138 act->in_hw_count = hw_count; in offload_action_hw_count_set()
144 act->in_hw_count += hw_count; in offload_action_hw_count_inc()
150 act->in_hw_count = act->in_hw_count > hw_count ? in offload_action_hw_count_dec()
151 act->in_hw_count - hw_count : 0; in offload_action_hw_count_dec()
187 fl_action->extack = extack; in offload_action_init()
188 fl_action->command = cmd; in offload_action_init()
189 fl_action->index = act->tcfa_index; in offload_action_init()
190 fl_action->cookie = (unsigned long)act; in offload_action_init()
192 if (act->ops->offload_act_setup) { in offload_action_init()
193 spin_lock_bh(&act->tcfa_lock); in offload_action_init()
194 err = act->ops->offload_act_setup(act, fl_action, NULL, in offload_action_init()
196 spin_unlock_bh(&act->tcfa_lock); in offload_action_init()
200 return -EOPNOTSUPP; in offload_action_init()
251 bool skip_sw = tc_act_skip_sw(action->tcfa_flags); in tcf_action_offload_add_ex()
259 if (tc_act_skip_hw(action->tcfa_flags)) in tcf_action_offload_add_ex()
265 return -ENOMEM; in tcf_action_offload_add_ex()
271 err = tc_setup_action(&fl_action->action, actions, 0, extack); in tcf_action_offload_add_ex()
284 err = -EINVAL; in tcf_action_offload_add_ex()
286 tc_cleanup_offload_action(&fl_action->action); in tcf_action_offload_add_ex()
319 action->used_hw_stats = fl_act.stats.used_hw_stats; in tcf_action_update_hw_stats()
320 action->used_hw_stats_valid = true; in tcf_action_update_hw_stats()
322 return -EOPNOTSUPP; in tcf_action_update_hw_stats()
348 if (!cb && action->in_hw_count != in_hw_count) in tcf_action_offload_del_ex()
349 return -EINVAL; in tcf_action_offload_del_ex()
366 if (p->ops->cleanup) in tcf_action_cleanup()
367 p->ops->cleanup(p); in tcf_action_cleanup()
369 gen_kill_estimator(&p->tcfa_rate_est); in tcf_action_cleanup()
375 struct tcf_idrinfo *idrinfo = p->idrinfo; in __tcf_action_put()
377 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { in __tcf_action_put()
379 atomic_dec(&p->tcfa_bindcnt); in __tcf_action_put()
380 idr_remove(&idrinfo->action_idr, p->tcfa_index); in __tcf_action_put()
381 mutex_unlock(&idrinfo->lock); in __tcf_action_put()
388 atomic_dec(&p->tcfa_bindcnt); in __tcf_action_put()
410 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) in __tcf_idr_release()
411 return -EPERM; in __tcf_idr_release()
422 const struct tc_action_ops *ops = a->ops; in tcf_idr_release()
427 module_put(ops->owner); in tcf_idr_release()
438 user_cookie = rcu_dereference(act->user_cookie); in tcf_action_shared_attrs_size()
441 cookie_len = nla_total_size(user_cookie->len); in tcf_action_shared_attrs_size()
472 if (act->ops->get_fill_size) in tcf_action_fill_size()
473 return act->ops->get_fill_size(act) + sz; in tcf_action_fill_size()
483 if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind)) in tcf_action_dump_terse()
487 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index)) in tcf_action_dump_terse()
491 cookie = rcu_dereference(a->user_cookie); in tcf_action_dump_terse()
493 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { in tcf_action_dump_terse()
504 return -1; in tcf_action_dump_terse()
510 int err = 0, index = -1, s_i = 0, n_i = 0; in tcf_dump_walker()
511 u32 act_flags = cb->args[2]; in tcf_dump_walker()
512 unsigned long jiffy_since = cb->args[3]; in tcf_dump_walker()
513 struct nlattr *nest; in tcf_dump_walker() local
514 struct idr *idr = &idrinfo->action_idr; in tcf_dump_walker()
519 mutex_lock(&idrinfo->lock); in tcf_dump_walker()
521 s_i = cb->args[0]; in tcf_dump_walker()
532 (unsigned long)p->tcfa_tm.lastuse)) in tcf_dump_walker()
537 nest = nla_nest_start_noflag(skb, n_i); in tcf_dump_walker()
538 if (!nest) { in tcf_dump_walker()
539 index--; in tcf_dump_walker()
546 index--; in tcf_dump_walker()
547 nlmsg_trim(skb, nest); in tcf_dump_walker()
550 nla_nest_end(skb, nest); in tcf_dump_walker()
558 cb->args[0] = index + 1; in tcf_dump_walker()
560 mutex_unlock(&idrinfo->lock); in tcf_dump_walker()
563 cb->args[1] = n_i; in tcf_dump_walker()
568 nla_nest_cancel(skb, nest); in tcf_dump_walker()
574 if (atomic_read(&p->tcfa_bindcnt) > 0) in tcf_idr_release_unsafe()
575 return -EPERM; in tcf_idr_release_unsafe()
577 if (refcount_dec_and_test(&p->tcfa_refcnt)) { in tcf_idr_release_unsafe()
578 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); in tcf_idr_release_unsafe()
590 struct nlattr *nest; in tcf_del_walker() local
592 int ret = -EINVAL; in tcf_del_walker()
593 struct idr *idr = &idrinfo->action_idr; in tcf_del_walker()
598 nest = nla_nest_start_noflag(skb, 0); in tcf_del_walker()
599 if (nest == NULL) in tcf_del_walker()
601 if (nla_put_string(skb, TCA_ACT_KIND, ops->kind)) in tcf_del_walker()
605 mutex_lock(&idrinfo->lock); in tcf_del_walker()
611 module_put(ops->owner); in tcf_del_walker()
616 mutex_unlock(&idrinfo->lock); in tcf_del_walker()
627 nla_nest_end(skb, nest); in tcf_del_walker()
631 nla_nest_cancel(skb, nest); in tcf_del_walker()
640 struct tcf_idrinfo *idrinfo = tn->idrinfo; in tcf_generic_walker()
649 return -EINVAL; in tcf_generic_walker()
656 struct tcf_idrinfo *idrinfo = tn->idrinfo; in tcf_idr_search()
659 mutex_lock(&idrinfo->lock); in tcf_idr_search()
660 p = idr_find(&idrinfo->action_idr, index); in tcf_idr_search()
664 refcount_inc(&p->tcfa_refcnt); in tcf_idr_search()
665 mutex_unlock(&idrinfo->lock); in tcf_idr_search()
680 struct tc_action_net *tn = net_generic(net, ops->net_id); in __tcf_generic_walker()
682 if (unlikely(ops->walk)) in __tcf_generic_walker()
683 return ops->walk(net, skb, cb, type, ops, extack); in __tcf_generic_walker()
692 struct tc_action_net *tn = net_generic(net, ops->net_id); in __tcf_idr_search()
694 if (unlikely(ops->lookup)) in __tcf_idr_search()
695 return ops->lookup(net, a, index); in __tcf_idr_search()
705 mutex_lock(&idrinfo->lock); in tcf_idr_delete_index()
706 p = idr_find(&idrinfo->action_idr, index); in tcf_idr_delete_index()
708 mutex_unlock(&idrinfo->lock); in tcf_idr_delete_index()
709 return -ENOENT; in tcf_idr_delete_index()
712 if (!atomic_read(&p->tcfa_bindcnt)) { in tcf_idr_delete_index()
713 if (refcount_dec_and_test(&p->tcfa_refcnt)) { in tcf_idr_delete_index()
714 struct module *owner = p->ops->owner; in tcf_idr_delete_index()
716 WARN_ON(p != idr_remove(&idrinfo->action_idr, in tcf_idr_delete_index()
717 p->tcfa_index)); in tcf_idr_delete_index()
718 mutex_unlock(&idrinfo->lock); in tcf_idr_delete_index()
726 ret = -EPERM; in tcf_idr_delete_index()
729 mutex_unlock(&idrinfo->lock); in tcf_idr_delete_index()
737 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); in tcf_idr_create()
738 struct tcf_idrinfo *idrinfo = tn->idrinfo; in tcf_idr_create()
739 int err = -ENOMEM; in tcf_idr_create()
742 return -ENOMEM; in tcf_idr_create()
743 refcount_set(&p->tcfa_refcnt, 1); in tcf_idr_create()
745 atomic_set(&p->tcfa_bindcnt, 1); in tcf_idr_create()
748 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); in tcf_idr_create()
749 if (!p->cpu_bstats) in tcf_idr_create()
751 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); in tcf_idr_create()
752 if (!p->cpu_bstats_hw) in tcf_idr_create()
754 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); in tcf_idr_create()
755 if (!p->cpu_qstats) in tcf_idr_create()
758 gnet_stats_basic_sync_init(&p->tcfa_bstats); in tcf_idr_create()
759 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw); in tcf_idr_create()
760 spin_lock_init(&p->tcfa_lock); in tcf_idr_create()
761 p->tcfa_index = index; in tcf_idr_create()
762 p->tcfa_tm.install = jiffies; in tcf_idr_create()
763 p->tcfa_tm.lastuse = jiffies; in tcf_idr_create()
764 p->tcfa_tm.firstuse = 0; in tcf_idr_create()
765 p->tcfa_flags = flags; in tcf_idr_create()
767 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, in tcf_idr_create()
768 &p->tcfa_rate_est, in tcf_idr_create()
769 &p->tcfa_lock, false, est); in tcf_idr_create()
774 p->idrinfo = idrinfo; in tcf_idr_create()
775 __module_get(ops->owner); in tcf_idr_create()
776 p->ops = ops; in tcf_idr_create()
780 free_percpu(p->cpu_qstats); in tcf_idr_create()
782 free_percpu(p->cpu_bstats_hw); in tcf_idr_create()
784 free_percpu(p->cpu_bstats); in tcf_idr_create()
806 struct tcf_idrinfo *idrinfo = tn->idrinfo; in tcf_idr_cleanup()
808 mutex_lock(&idrinfo->lock); in tcf_idr_cleanup()
809 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ in tcf_idr_cleanup()
810 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); in tcf_idr_cleanup()
811 mutex_unlock(&idrinfo->lock); in tcf_idr_cleanup()
820 * May return -EAGAIN for binding actions in case of a parallel add/delete on
827 struct tcf_idrinfo *idrinfo = tn->idrinfo; in tcf_idr_check_alloc()
834 p = idr_find(&idrinfo->action_idr, *index); in tcf_idr_check_alloc()
841 return -EAGAIN; in tcf_idr_check_alloc()
851 if (!refcount_inc_not_zero(&p->tcfa_refcnt)) { in tcf_idr_check_alloc()
854 return -EAGAIN; in tcf_idr_check_alloc()
858 atomic_inc(&p->tcfa_bindcnt); in tcf_idr_check_alloc()
873 mutex_lock(&idrinfo->lock); in tcf_idr_check_alloc()
874 ret = idr_alloc_u32(&idrinfo->action_idr, ERR_PTR(-EBUSY), index, max, in tcf_idr_check_alloc()
876 mutex_unlock(&idrinfo->lock); in tcf_idr_check_alloc()
881 if (ret == -ENOSPC && *index == max) in tcf_idr_check_alloc()
882 ret = -EAGAIN; in tcf_idr_check_alloc()
891 struct idr *idr = &idrinfo->action_idr; in tcf_idrinfo_destroy()
900 module_put(ops->owner); in tcf_idrinfo_destroy()
904 idr_destroy(&idrinfo->action_idr); in tcf_idrinfo_destroy()
929 if (id_ptr->id == id) { in tcf_pernet_add_id_list()
930 ret = -EEXIST; in tcf_pernet_add_id_list()
937 ret = -ENOMEM; in tcf_pernet_add_id_list()
940 id_ptr->id = id; in tcf_pernet_add_id_list()
942 list_add_tail(&id_ptr->list, &act_pernet_id_list); in tcf_pernet_add_id_list()
955 if (id_ptr->id == id) { in tcf_pernet_del_id_list()
956 list_del(&id_ptr->list); in tcf_pernet_del_id_list()
970 if (!act->act || !act->dump || !act->init) in tcf_register_action()
971 return -EINVAL; in tcf_register_action()
981 if (ops->id) { in tcf_register_action()
982 ret = tcf_pernet_add_id_list(*ops->id); in tcf_register_action()
989 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { in tcf_register_action()
990 ret = -EEXIST; in tcf_register_action()
994 list_add_tail(&act->head, &act_base); in tcf_register_action()
1001 if (ops->id) in tcf_register_action()
1002 tcf_pernet_del_id_list(*ops->id); in tcf_register_action()
1013 int err = -ENOENT; in tcf_unregister_action()
1018 list_del(&act->head); in tcf_unregister_action()
1026 if (ops->id) in tcf_unregister_action()
1027 tcf_pernet_del_id_list(*ops->id); in tcf_unregister_action()
1041 if (strcmp(kind, a->kind) == 0) { in tc_lookup_action_n()
1042 if (try_module_get(a->owner)) in tc_lookup_action_n()
1060 if (nla_strcmp(kind, a->kind) == 0) { in tc_lookup_action()
1061 if (try_module_get(a->owner)) in tc_lookup_action()
1090 jmp_prgcnt -= 1; in tcf_action_exec()
1094 if (tc_act_skip_sw(a->tcfa_flags)) in tcf_action_exec()
1101 if (--repeat_ttl != 0) in tcf_action_exec()
1113 jmp_ttl -= 1; in tcf_action_exec()
1120 if (unlikely(!rcu_access_pointer(a->goto_chain))) { in tcf_action_exec()
1144 ops = a->ops; in tcf_action_destroy()
1147 module_put(ops->owner); in tcf_action_destroy()
1159 /* Put all actions in this array, skip those NULL's. */
1170 ops = a->ops; in tcf_action_put_many()
1172 module_put(ops->owner); in tcf_action_put_many()
1179 return a->ops->dump(skb, a, bind, ref); in tcf_action_dump_old()
1185 int err = -EINVAL; in tcf_action_dump_1()
1187 struct nlattr *nest; in tcf_action_dump_1() local
1193 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && in tcf_action_dump_1()
1195 a->hw_stats, TCA_ACT_HW_STATS_ANY)) in tcf_action_dump_1()
1198 if (a->used_hw_stats_valid && in tcf_action_dump_1()
1200 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) in tcf_action_dump_1()
1203 flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; in tcf_action_dump_1()
1209 if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) in tcf_action_dump_1()
1212 nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS); in tcf_action_dump_1()
1213 if (nest == NULL) in tcf_action_dump_1()
1217 nla_nest_end(skb, nest); in tcf_action_dump_1()
1223 return -1; in tcf_action_dump_1()
1231 int err = -EINVAL, i; in tcf_action_dump()
1232 struct nlattr *nest; in tcf_action_dump() local
1236 nest = nla_nest_start_noflag(skb, i + 1); in tcf_action_dump()
1237 if (nest == NULL) in tcf_action_dump()
1243 nla_nest_end(skb, nest); in tcf_action_dump()
1249 err = -EINVAL; in tcf_action_dump()
1251 nla_nest_cancel(skb, nest); in tcf_action_dump()
1261 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); in nla_memdup_cookie()
1262 if (!c->data) { in nla_memdup_cookie()
1266 c->len = nla_len(tb[TCA_ACT_COOKIE]); in nla_memdup_cookie()
1307 idrinfo = a->idrinfo; in tcf_idr_insert_many()
1308 mutex_lock(&idrinfo->lock); in tcf_idr_insert_many()
1309 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if in tcf_idr_insert_many()
1312 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); in tcf_idr_insert_many()
1313 mutex_unlock(&idrinfo->lock); in tcf_idr_insert_many()
1332 err = -EINVAL; in tc_action_load_ops()
1345 return ERR_PTR(-EINVAL); in tc_action_load_ops()
1364 * indicate this using -EAGAIN. in tc_action_load_ops()
1367 module_put(a_o->owner); in tc_action_load_ops()
1368 return ERR_PTR(-EAGAIN); in tc_action_load_ops()
1372 return ERR_PTR(-ENOENT); in tc_action_load_ops()
1401 err = -ENOMEM; in tcf_action_init_1()
1409 err = -EINVAL; in tcf_action_init_1()
1414 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, in tcf_action_init_1()
1417 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags, in tcf_action_init_1()
1425 tcf_set_action_cookie(&a->user_cookie, user_cookie); in tcf_action_init_1()
1428 a->hw_stats = hw_stats; in tcf_action_init_1()
1434 kfree(user_cookie->data); in tcf_action_init_1()
1475 ops[i - 1] = a_o; in tcf_action_init()
1479 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1], in tcf_action_init()
1480 &init_res[i - 1], flags, extack); in tcf_action_init()
1487 actions[i - 1] = act; in tcf_action_init()
1492 if (tc_act_bind(act->tcfa_flags)) { in tcf_action_init()
1500 if ((tc_act_skip_sw(act->tcfa_flags) && !skip_sw) || in tcf_action_init()
1501 (tc_act_skip_hw(act->tcfa_flags) && !skip_hw)) { in tcf_action_init()
1504 err = -EINVAL; in tcf_action_init()
1508 act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_SW; in tcf_action_init()
1510 act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_HW; in tcf_action_init()
1515 if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || in tcf_action_init()
1516 skip_hw != tc_act_skip_hw(act->tcfa_flags)) { in tcf_action_init()
1519 err = -EINVAL; in tcf_action_init()
1524 if (tc_act_skip_sw(act->tcfa_flags) && err) in tcf_action_init()
1535 err = i - 1; in tcf_action_init()
1543 module_put(ops[i]->owner); in tcf_action_init()
1551 if (a->cpu_bstats) { in tcf_action_update_stats()
1552 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); in tcf_action_update_stats()
1554 this_cpu_ptr(a->cpu_qstats)->drops += drops; in tcf_action_update_stats()
1557 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw), in tcf_action_update_stats()
1562 _bstats_update(&a->tcfa_bstats, bytes, packets); in tcf_action_update_stats()
1563 a->tcfa_qstats.drops += drops; in tcf_action_update_stats()
1565 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); in tcf_action_update_stats()
1582 if (p->type == TCA_OLD_COMPAT) in tcf_action_copy_stats()
1586 &p->tcfa_lock, &d, in tcf_action_copy_stats()
1592 &p->tcfa_lock, &d, TCA_ACT_PAD); in tcf_action_copy_stats()
1597 if (gnet_stats_copy_basic(&d, p->cpu_bstats, in tcf_action_copy_stats()
1598 &p->tcfa_bstats, false) < 0 || in tcf_action_copy_stats()
1599 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, in tcf_action_copy_stats()
1600 &p->tcfa_bstats_hw, false) < 0 || in tcf_action_copy_stats()
1601 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || in tcf_action_copy_stats()
1602 gnet_stats_copy_queue(&d, p->cpu_qstats, in tcf_action_copy_stats()
1603 &p->tcfa_qstats, in tcf_action_copy_stats()
1604 p->tcfa_qstats.qlen) < 0) in tcf_action_copy_stats()
1613 return -1; in tcf_action_copy_stats()
1623 struct nlattr *nest; in tca_get_fill() local
1629 t->tca_family = AF_UNSPEC; in tca_get_fill()
1630 t->tca__pad1 = 0; in tca_get_fill()
1631 t->tca__pad2 = 0; in tca_get_fill()
1633 if (extack && extack->_msg && in tca_get_fill()
1634 nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg)) in tca_get_fill()
1637 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); in tca_get_fill()
1638 if (!nest) in tca_get_fill()
1644 nla_nest_end(skb, nest); in tca_get_fill()
1646 nlh->nlmsg_len = skb_tail_pointer(skb) - b; in tca_get_fill()
1648 return skb->len; in tca_get_fill()
1652 return -1; in tca_get_fill()
1664 return -ENOBUFS; in tcf_get_notify()
1665 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, in tcf_get_notify()
1669 return -EINVAL; in tcf_get_notify()
1690 err = -EINVAL; in tcf_action_get_1()
1698 err = -EINVAL; in tcf_action_get_1()
1704 err = -ENOENT; in tcf_action_get_1()
1710 module_put(ops->owner); in tcf_action_get_1()
1714 module_put(ops->owner); in tcf_action_get_1()
1728 struct nlattr *nest; in tca_action_flush() local
1732 int err = -ENOMEM; in tca_action_flush()
1745 err = -EINVAL; in tca_action_flush()
1753 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, in tca_action_flush()
1760 t->tca_family = AF_UNSPEC; in tca_action_flush()
1761 t->tca__pad1 = 0; in tca_action_flush()
1762 t->tca__pad2 = 0; in tca_action_flush()
1764 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); in tca_action_flush()
1765 if (!nest) { in tca_action_flush()
1772 nla_nest_cancel(skb, nest); in tca_action_flush()
1776 nla_nest_end(skb, nest); in tca_action_flush()
1778 nlh->nlmsg_len = skb_tail_pointer(skb) - b; in tca_action_flush()
1779 nlh->nlmsg_flags |= NLM_F_ROOT; in tca_action_flush()
1780 module_put(ops->owner); in tca_action_flush()
1782 n->nlmsg_flags & NLM_F_ECHO); in tca_action_flush()
1789 module_put(ops->owner); in tca_action_flush()
1801 const struct tc_action_ops *ops = a->ops; in tcf_action_delete()
1805 struct tcf_idrinfo *idrinfo = a->idrinfo; in tcf_action_delete()
1806 u32 act_index = a->tcfa_index; in tcf_action_delete()
1811 module_put(ops->owner); in tcf_action_delete()
1831 const struct tc_action_ops *ops = action->ops; in tcf_reoffload_del_notify()
1838 return -ENOBUFS; in tcf_reoffload_del_notify()
1842 return -EINVAL; in tcf_reoffload_del_notify()
1847 module_put(ops->owner); in tcf_reoffload_del_notify()
1871 return -EINVAL; in tcf_action_reoffload_cb()
1878 act_id = id_ptr->id; in tcf_action_reoffload_cb()
1882 idrinfo = tn->idrinfo; in tcf_action_reoffload_cb()
1886 mutex_lock(&idrinfo->lock); in tcf_action_reoffload_cb()
1887 idr = &idrinfo->action_idr; in tcf_action_reoffload_cb()
1889 if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) in tcf_action_reoffload_cb()
1901 if (tc_act_skip_sw(p->tcfa_flags) && in tcf_action_reoffload_cb()
1905 mutex_unlock(&idrinfo->lock); in tcf_action_reoffload_cb()
1924 return -ENOBUFS; in tcf_del_notify()
1926 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, in tcf_del_notify()
1930 return -EINVAL; in tcf_del_notify()
1942 n->nlmsg_flags & NLM_F_ECHO); in tcf_del_notify()
1961 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { in tca_action_gd()
1966 return -EINVAL; in tca_action_gd()
1976 actions[i - 1] = act; in tca_action_gd()
2003 return -ENOBUFS; in tcf_add_notify()
2005 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, in tcf_add_notify()
2009 return -EINVAL; in tcf_add_notify()
2013 n->nlmsg_flags & NLM_F_ECHO); in tcf_add_notify()
2028 if (ret != -EAGAIN) in tcf_action_add()
2054 struct net *net = sock_net(skb->sk); in tc_ctl_action()
2060 if ((n->nlmsg_type != RTM_GETACTION) && in tc_ctl_action()
2062 return -EPERM; in tc_ctl_action()
2071 return -EINVAL; in tc_ctl_action()
2074 /* n->nlmsg_flags & NLM_F_CREATE */ in tc_ctl_action()
2075 switch (n->nlmsg_type) { in tc_ctl_action()
2083 if (n->nlmsg_flags & NLM_F_REPLACE) in tc_ctl_action()
2127 struct net *net = sock_net(skb->sk); in tc_dump_action()
2130 struct nlattr *nest; in tc_dump_action() local
2133 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); in tc_dump_action()
2142 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, in tc_dump_action()
2143 TCA_ROOT_MAX, tcaa_policy, cb->extack); in tc_dump_action()
2157 cb->args[2] = 0; in tc_dump_action()
2160 cb->args[2] = bf.value; in tc_dump_action()
2167 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, in tc_dump_action()
2168 cb->nlh->nlmsg_type, sizeof(*t), 0); in tc_dump_action()
2173 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); in tc_dump_action()
2176 t->tca_family = AF_UNSPEC; in tc_dump_action()
2177 t->tca__pad1 = 0; in tc_dump_action()
2178 t->tca__pad2 = 0; in tc_dump_action()
2179 cb->args[3] = jiffy_since; in tc_dump_action()
2184 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); in tc_dump_action()
2185 if (nest == NULL) in tc_dump_action()
2193 nla_nest_end(skb, nest); in tc_dump_action()
2194 ret = skb->len; in tc_dump_action()
2195 act_count = cb->args[1]; in tc_dump_action()
2197 cb->args[1] = 0; in tc_dump_action()
2201 nlh->nlmsg_len = skb_tail_pointer(skb) - b; in tc_dump_action()
2202 if (NETLINK_CB(cb->skb).portid && ret) in tc_dump_action()
2203 nlh->nlmsg_flags |= NLM_F_MULTI; in tc_dump_action()
2204 module_put(a_o->owner); in tc_dump_action()
2205 return skb->len; in tc_dump_action()
2208 module_put(a_o->owner); in tc_dump_action()
2210 return skb->len; in tc_dump_action()