Lines Matching full:block

181 	struct tcf_block *block = chain->block;  in tcf_proto_signal_destroying()  local
183 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, in tcf_proto_signal_destroying()
186 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, in tcf_proto_exists_destroying()
220 struct tcf_block *block = chain->block; in tcf_proto_signal_destroyed() local
222 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
225 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
460 #define ASSERT_BLOCK_LOCKED(block) \ argument
461 lockdep_assert_held(&(block)->lock)
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, in tcf_chain_create() argument
474 ASSERT_BLOCK_LOCKED(block); in tcf_chain_create()
479 list_add_tail_rcu(&chain->list, &block->chain_list); in tcf_chain_create()
481 chain->block = block; in tcf_chain_create()
485 block->chain0.chain = chain; in tcf_chain_create()
500 struct tcf_block *block = chain->block; in tcf_chain0_head_change() local
505 mutex_lock(&block->lock); in tcf_chain0_head_change()
506 list_for_each_entry(item, &block->chain0.filter_chain_list, list) in tcf_chain0_head_change()
508 mutex_unlock(&block->lock); in tcf_chain0_head_change()
511 /* Returns true if block can be safely freed. */
515 struct tcf_block *block = chain->block; in tcf_chain_detach() local
517 ASSERT_BLOCK_LOCKED(block); in tcf_chain_detach()
521 block->chain0.chain = NULL; in tcf_chain_detach()
523 if (list_empty(&block->chain_list) && in tcf_chain_detach()
524 refcount_read(&block->refcnt) == 0) in tcf_chain_detach()
530 static void tcf_block_destroy(struct tcf_block *block) in tcf_block_destroy() argument
532 mutex_destroy(&block->lock); in tcf_block_destroy()
533 mutex_destroy(&block->proto_destroy_lock); in tcf_block_destroy()
534 kfree_rcu(block, rcu); in tcf_block_destroy()
539 struct tcf_block *block = chain->block; in tcf_chain_destroy() local
544 tcf_block_destroy(block); in tcf_chain_destroy()
549 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_hold()
556 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_held_by_acts_only()
564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, in tcf_chain_lookup() argument
569 ASSERT_BLOCK_LOCKED(block); in tcf_chain_lookup()
571 list_for_each_entry(chain, &block->chain_list, list) { in tcf_chain_lookup()
579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, in tcf_chain_lookup_rcu() argument
584 list_for_each_entry_rcu(chain, &block->chain_list, list) { in tcf_chain_lookup_rcu()
596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, in __tcf_chain_get() argument
603 mutex_lock(&block->lock); in __tcf_chain_get()
604 chain = tcf_chain_lookup(block, chain_index); in __tcf_chain_get()
610 chain = tcf_chain_create(block, chain_index); in __tcf_chain_get()
618 mutex_unlock(&block->lock); in __tcf_chain_get()
632 mutex_unlock(&block->lock); in __tcf_chain_get()
636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, in tcf_chain_get() argument
639 return __tcf_chain_get(block, chain_index, create, false); in tcf_chain_get()
642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) in tcf_chain_get_by_act() argument
644 return __tcf_chain_get(block, chain_index, true, true); in tcf_chain_get_by_act()
652 struct tcf_block *block, struct sk_buff *oskb,
658 struct tcf_block *block = chain->block; in __tcf_chain_put() local
664 mutex_lock(&block->lock); in __tcf_chain_put()
667 mutex_unlock(&block->lock); in __tcf_chain_put()
676 /* tc_chain_notify_delete can't be called while holding block lock. in __tcf_chain_put()
677 * However, when block is unlocked chain can be changed concurrently, so in __tcf_chain_put()
688 chain->index, block, NULL, 0, 0, in __tcf_chain_put()
696 mutex_unlock(&block->lock); in __tcf_chain_put()
744 static int tcf_block_setup(struct tcf_block *block,
757 bo->block = flow_block; in tcf_block_offload_init()
765 static void tcf_block_unbind(struct tcf_block *block,
770 struct tcf_block *block = block_cb->indr.data; in tc_block_indr_cleanup() local
778 &block->flow_block, tcf_block_shared(block), in tc_block_indr_cleanup()
781 down_write(&block->cb_lock); in tc_block_indr_cleanup()
784 tcf_block_unbind(block, &bo); in tc_block_indr_cleanup()
785 up_write(&block->cb_lock); in tc_block_indr_cleanup()
789 static bool tcf_block_offload_in_use(struct tcf_block *block) in tcf_block_offload_in_use() argument
791 return atomic_read(&block->offloadcnt); in tcf_block_offload_in_use()
794 static int tcf_block_offload_cmd(struct tcf_block *block, in tcf_block_offload_cmd() argument
803 &block->flow_block, tcf_block_shared(block), in tcf_block_offload_cmd()
816 return tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, in tcf_block_offload_cmd()
821 tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_bind() argument
833 down_write(&block->cb_lock); in tcf_block_offload_bind()
835 /* If tc offload feature is disabled and the block we try to bind in tcf_block_offload_bind()
840 tcf_block_offload_in_use(block)) { in tcf_block_offload_bind()
841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); in tcf_block_offload_bind()
846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
852 up_write(&block->cb_lock); in tcf_block_offload_bind()
856 if (tcf_block_offload_in_use(block)) in tcf_block_offload_bind()
860 block->nooffloaddevcnt++; in tcf_block_offload_bind()
862 up_write(&block->cb_lock); in tcf_block_offload_bind()
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_unbind() argument
872 down_write(&block->cb_lock); in tcf_block_offload_unbind()
873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); in tcf_block_offload_unbind()
876 up_write(&block->cb_lock); in tcf_block_offload_unbind()
880 WARN_ON(block->nooffloaddevcnt-- == 0); in tcf_block_offload_unbind()
881 up_write(&block->cb_lock); in tcf_block_offload_unbind()
885 tcf_chain0_head_change_cb_add(struct tcf_block *block, in tcf_chain0_head_change_cb_add() argument
900 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
901 chain0 = block->chain0.chain; in tcf_chain0_head_change_cb_add()
905 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
906 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
917 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
918 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
919 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
929 tcf_chain0_head_change_cb_del(struct tcf_block *block, in tcf_chain0_head_change_cb_del() argument
934 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_del()
935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { in tcf_chain0_head_change_cb_del()
939 if (block->chain0.chain) in tcf_chain0_head_change_cb_del()
942 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
948 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
959 static int tcf_block_insert(struct tcf_block *block, struct net *net, in tcf_block_insert() argument
967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, in tcf_block_insert()
975 static void tcf_block_remove(struct tcf_block *block, struct net *net) in tcf_block_remove() argument
980 idr_remove(&tn->idr, block->index); in tcf_block_remove()
988 struct tcf_block *block; in tcf_block_create() local
990 block = kzalloc(sizeof(*block), GFP_KERNEL); in tcf_block_create()
991 if (!block) { in tcf_block_create()
992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); in tcf_block_create()
995 mutex_init(&block->lock); in tcf_block_create()
996 mutex_init(&block->proto_destroy_lock); in tcf_block_create()
997 init_rwsem(&block->cb_lock); in tcf_block_create()
998 flow_block_init(&block->flow_block); in tcf_block_create()
999 INIT_LIST_HEAD(&block->chain_list); in tcf_block_create()
1000 INIT_LIST_HEAD(&block->owner_list); in tcf_block_create()
1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list); in tcf_block_create()
1003 refcount_set(&block->refcnt, 1); in tcf_block_create()
1004 block->net = net; in tcf_block_create()
1005 block->index = block_index; in tcf_block_create()
1008 if (!tcf_block_shared(block)) in tcf_block_create()
1009 block->q = q; in tcf_block_create()
1010 return block; in tcf_block_create()
1022 struct tcf_block *block; in tcf_block_refcnt_get() local
1025 block = tcf_block_lookup(net, block_index); in tcf_block_refcnt_get()
1026 if (block && !refcount_inc_not_zero(&block->refcnt)) in tcf_block_refcnt_get()
1027 block = NULL; in tcf_block_refcnt_get()
1030 return block; in tcf_block_refcnt_get()
1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in __tcf_get_next_chain() argument
1036 mutex_lock(&block->lock); in __tcf_get_next_chain()
1038 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
1041 chain = list_first_entry_or_null(&block->chain_list, in __tcf_get_next_chain()
1046 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
1051 mutex_unlock(&block->lock); in __tcf_get_next_chain()
1057 * block. It properly obtains block->lock and takes reference to chain before
1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in tcf_get_next_chain() argument
1068 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); in tcf_get_next_chain()
1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) in tcf_block_flush_all_chains() argument
1134 /* Last reference to block. At this point chains cannot be added or in tcf_block_flush_all_chains()
1137 for (chain = tcf_get_next_chain(block, NULL); in tcf_block_flush_all_chains()
1139 chain = tcf_get_next_chain(block, chain)) { in tcf_block_flush_all_chains()
1249 struct tcf_block *block; in __tcf_block_find() local
1252 block = tcf_block_refcnt_get(net, block_index); in __tcf_block_find()
1253 if (!block) { in __tcf_block_find()
1254 NL_SET_ERR_MSG(extack, "Block of given index was not found"); in __tcf_block_find()
1260 block = cops->tcf_block(q, cl, extack); in __tcf_block_find()
1261 if (!block) in __tcf_block_find()
1264 if (tcf_block_shared(block)) { in __tcf_block_find()
1265 …NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the … in __tcf_block_find()
1269 /* Always take reference to block in order to support execution in __tcf_block_find()
1271 * must release block when it is finished using it. 'if' block in __tcf_block_find()
1272 * of this conditional obtain reference to block by calling in __tcf_block_find()
1275 refcount_inc(&block->refcnt); in __tcf_block_find()
1278 return block; in __tcf_block_find()
1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, in __tcf_block_put() argument
1284 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { in __tcf_block_put()
1285 /* Flushing/putting all chains will cause the block to be in __tcf_block_put()
1287 * is empty, block has to be manually deallocated. After block in __tcf_block_put()
1289 * increment it or add new chains to block. in __tcf_block_put()
1291 bool free_block = list_empty(&block->chain_list); in __tcf_block_put()
1293 mutex_unlock(&block->lock); in __tcf_block_put()
1294 if (tcf_block_shared(block)) in __tcf_block_put()
1295 tcf_block_remove(block, block->net); in __tcf_block_put()
1298 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1301 tcf_block_destroy(block); in __tcf_block_put()
1303 tcf_block_flush_all_chains(block, rtnl_held); in __tcf_block_put()
1305 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) in tcf_block_refcnt_put() argument
1311 __tcf_block_put(block, NULL, NULL, rtnl_held); in tcf_block_refcnt_put()
1314 /* Find tcf block.
1323 struct tcf_block *block; in tcf_block_find() local
1336 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); in tcf_block_find()
1337 if (IS_ERR(block)) { in tcf_block_find()
1338 err = PTR_ERR(block); in tcf_block_find()
1342 return block; in tcf_block_find()
1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, in tcf_block_release() argument
1355 if (!IS_ERR_OR_NULL(block)) in tcf_block_release()
1356 tcf_block_refcnt_put(block, rtnl_held); in tcf_block_release()
1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block, in tcf_block_owner_netif_keep_dst() argument
1377 if (block->keep_dst && in tcf_block_owner_netif_keep_dst()
1383 void tcf_block_netif_keep_dst(struct tcf_block *block) in tcf_block_netif_keep_dst() argument
1387 block->keep_dst = true; in tcf_block_netif_keep_dst()
1388 list_for_each_entry(item, &block->owner_list, list) in tcf_block_netif_keep_dst()
1389 tcf_block_owner_netif_keep_dst(block, item->q, in tcf_block_netif_keep_dst()
1394 static int tcf_block_owner_add(struct tcf_block *block, in tcf_block_owner_add() argument
1405 list_add(&item->list, &block->owner_list); in tcf_block_owner_add()
1409 static void tcf_block_owner_del(struct tcf_block *block, in tcf_block_owner_del() argument
1415 list_for_each_entry(item, &block->owner_list, list) { in tcf_block_owner_del()
1430 struct tcf_block *block = NULL; in tcf_block_get_ext() local
1434 /* block_index not 0 means the shared block is requested */ in tcf_block_get_ext()
1435 block = tcf_block_refcnt_get(net, ei->block_index); in tcf_block_get_ext()
1437 if (!block) { in tcf_block_get_ext()
1438 block = tcf_block_create(net, q, ei->block_index, extack); in tcf_block_get_ext()
1439 if (IS_ERR(block)) in tcf_block_get_ext()
1440 return PTR_ERR(block); in tcf_block_get_ext()
1441 if (tcf_block_shared(block)) { in tcf_block_get_ext()
1442 err = tcf_block_insert(block, net, extack); in tcf_block_get_ext()
1448 err = tcf_block_owner_add(block, q, ei->binder_type); in tcf_block_get_ext()
1452 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); in tcf_block_get_ext()
1454 err = tcf_chain0_head_change_cb_add(block, ei, extack); in tcf_block_get_ext()
1458 err = tcf_block_offload_bind(block, q, ei, extack); in tcf_block_get_ext()
1462 *p_block = block; in tcf_block_get_ext()
1466 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_get_ext()
1468 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_get_ext()
1471 tcf_block_refcnt_put(block, true); in tcf_block_get_ext()
1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, in tcf_block_put_ext() argument
1503 if (!block) in tcf_block_put_ext()
1505 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_put_ext()
1506 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_put_ext()
1508 __tcf_block_put(block, q, ei, true); in tcf_block_put_ext()
1512 void tcf_block_put(struct tcf_block *block) in tcf_block_put() argument
1516 if (!block) in tcf_block_put()
1518 tcf_block_put_ext(block, block->q, &ei); in tcf_block_put()
1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, in tcf_block_playback_offloads() argument
1532 lockdep_assert_held(&block->cb_lock); in tcf_block_playback_offloads()
1534 for (chain = __tcf_get_next_chain(block, NULL); in tcf_block_playback_offloads()
1537 chain = __tcf_get_next_chain(block, chain), in tcf_block_playback_offloads()
1567 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, in tcf_block_playback_offloads()
1572 static int tcf_block_bind(struct tcf_block *block, in tcf_block_bind() argument
1578 lockdep_assert_held(&block->cb_lock); in tcf_block_bind()
1581 err = tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1583 tcf_block_offload_in_use(block), in tcf_block_bind()
1588 block->lockeddevcnt++; in tcf_block_bind()
1592 list_splice(&bo->cb_list, &block->flow_block.cb_list); in tcf_block_bind()
1601 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1603 tcf_block_offload_in_use(block), in tcf_block_bind()
1606 block->lockeddevcnt--; in tcf_block_bind()
1614 static void tcf_block_unbind(struct tcf_block *block, in tcf_block_unbind() argument
1619 lockdep_assert_held(&block->cb_lock); in tcf_block_unbind()
1622 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_unbind()
1624 tcf_block_offload_in_use(block), in tcf_block_unbind()
1629 block->lockeddevcnt--; in tcf_block_unbind()
1633 static int tcf_block_setup(struct tcf_block *block, in tcf_block_setup() argument
1640 err = tcf_block_bind(block, bo); in tcf_block_setup()
1644 tcf_block_unbind(block, bo); in tcf_block_setup()
1729 tp->chain->block->index, in __tcf_classify()
1741 const struct tcf_block *block, in tcf_classify() argument
1758 if (block) { in tcf_classify()
1776 fchain = tcf_chain_lookup_rcu(block, chain); in tcf_classify()
1971 struct tcf_proto *tp, struct tcf_block *block, in tcf_fill_node() argument
1993 tcm->tcm_block_index = block->index; in tcf_fill_node()
2033 struct tcf_block *block, struct Qdisc *q, in tfilter_notify() argument
2045 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_notify()
2062 struct tcf_block *block, struct Qdisc *q, in tfilter_del_notify() argument
2074 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_del_notify()
2100 struct tcf_block *block, struct Qdisc *q, in tfilter_notify_chain() argument
2109 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, in tfilter_notify_chain()
2139 struct tcf_block *block; in tc_new_tfilter() local
2163 block = NULL; in tc_new_tfilter()
2194 * block is shared (no qdisc found), qdisc is not unlocked, classifier in tc_new_tfilter()
2208 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_new_tfilter()
2210 if (IS_ERR(block)) { in tc_new_tfilter()
2211 err = PTR_ERR(block); in tc_new_tfilter()
2214 block->classid = parent; in tc_new_tfilter()
2222 chain = tcf_chain_get(block, chain_index, true); in tc_new_tfilter()
2320 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_new_tfilter()
2338 tcf_block_release(q, block, rtnl_held); in tc_new_tfilter()
2372 struct tcf_block *block = NULL; in tc_del_tfilter() local
2405 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc in tc_del_tfilter()
2420 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_del_tfilter()
2422 if (IS_ERR(block)) { in tc_del_tfilter()
2423 err = PTR_ERR(block); in tc_del_tfilter()
2433 chain = tcf_chain_get(block, chain_index, false); in tc_del_tfilter()
2448 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_del_tfilter()
2472 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_del_tfilter()
2487 err = tfilter_del_notify(net, skb, n, tp, block, in tc_del_tfilter()
2503 tcf_block_release(q, block, rtnl_held); in tc_del_tfilter()
2529 struct tcf_block *block = NULL; in tc_get_tfilter() local
2562 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not in tc_get_tfilter()
2576 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_get_tfilter()
2578 if (IS_ERR(block)) { in tc_get_tfilter()
2579 err = PTR_ERR(block); in tc_get_tfilter()
2589 chain = tcf_chain_get(block, chain_index, false); in tc_get_tfilter()
2616 err = tfilter_notify(net, skb, n, tp, block, q, parent, in tc_get_tfilter()
2629 tcf_block_release(q, block, rtnl_held); in tc_get_tfilter()
2641 struct tcf_block *block; member
2652 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, in tcf_node_dump()
2663 struct tcf_block *block = chain->block; in tcf_chain_dump() local
2686 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, in tcf_chain_dump()
2698 arg.block = block; in tcf_chain_dump()
2730 struct tcf_block *block; in tc_dump_tfilter() local
2754 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_tfilter()
2755 if (!block) in tc_dump_tfilter()
2757 /* If we work with block index, q is NULL and parent value in tc_dump_tfilter()
2790 block = cops->tcf_block(q, cl, NULL); in tc_dump_tfilter()
2791 if (!block) in tc_dump_tfilter()
2793 parent = block->classid; in tc_dump_tfilter()
2794 if (tcf_block_shared(block)) in tc_dump_tfilter()
2801 for (chain = __tcf_get_next_chain(block, NULL); in tc_dump_tfilter()
2804 chain = __tcf_get_next_chain(block, chain), in tc_dump_tfilter()
2818 tcf_block_refcnt_put(block, true); in tc_dump_tfilter()
2831 struct tcf_block *block, in tc_chain_fill_node() argument
2852 if (block->q) { in tc_chain_fill_node()
2853 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; in tc_chain_fill_node()
2854 tcm->tcm_parent = block->q->handle; in tc_chain_fill_node()
2857 tcm->tcm_block_index = block->index; in tc_chain_fill_node()
2889 struct tcf_block *block = chain->block; in tc_chain_notify() local
2890 struct net *net = block->net; in tc_chain_notify()
2899 chain->index, net, skb, block, portid, in tc_chain_notify()
2916 struct tcf_block *block, struct sk_buff *oskb, in tc_chain_notify_delete() argument
2920 struct net *net = block->net; in tc_chain_notify_delete()
2928 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { in tc_chain_notify_delete()
2999 struct tcf_block *block; in tc_ctl_chain() local
3014 block = tcf_block_find(net, &q, &parent, &cl, in tc_ctl_chain()
3016 if (IS_ERR(block)) in tc_ctl_chain()
3017 return PTR_ERR(block); in tc_ctl_chain()
3026 mutex_lock(&block->lock); in tc_ctl_chain()
3027 chain = tcf_chain_lookup(block, chain_index); in tc_ctl_chain()
3046 chain = tcf_chain_create(block, chain_index); in tc_ctl_chain()
3063 /* Modifying chain requires holding parent block lock. In case in tc_ctl_chain()
3071 mutex_unlock(&block->lock); in tc_ctl_chain()
3085 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_ctl_chain()
3109 tcf_block_release(q, block, true); in tc_ctl_chain()
3116 mutex_unlock(&block->lock); in tc_ctl_chain()
3126 struct tcf_block *block; in tc_dump_chain() local
3142 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_chain()
3143 if (!block) in tc_dump_chain()
3171 block = cops->tcf_block(q, cl, NULL); in tc_dump_chain()
3172 if (!block) in tc_dump_chain()
3174 if (tcf_block_shared(block)) in tc_dump_chain()
3181 mutex_lock(&block->lock); in tc_dump_chain()
3182 list_for_each_entry(chain, &block->chain_list, list) { in tc_dump_chain()
3193 chain->index, net, skb, block, in tc_dump_chain()
3201 mutex_unlock(&block->lock); in tc_dump_chain()
3204 tcf_block_refcnt_put(block, true); in tc_dump_chain()
3432 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) in tcf_block_offload_inc() argument
3437 atomic_inc(&block->offloadcnt); in tcf_block_offload_inc()
3440 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) in tcf_block_offload_dec() argument
3445 atomic_dec(&block->offloadcnt); in tcf_block_offload_dec()
3448 static void tc_cls_offload_cnt_update(struct tcf_block *block, in tc_cls_offload_cnt_update() argument
3452 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_update()
3457 tcf_block_offload_inc(block, flags); in tc_cls_offload_cnt_update()
3462 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_update()
3468 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, in tc_cls_offload_cnt_reset() argument
3471 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_reset()
3474 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_reset()
3480 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in __tc_setup_cb_call() argument
3487 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { in __tc_setup_cb_call()
3499 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in tc_setup_cb_call() argument
3502 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_call()
3508 down_read(&block->cb_lock); in tc_setup_cb_call()
3509 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_call()
3510 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_call()
3513 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_call()
3514 up_read(&block->cb_lock); in tc_setup_cb_call()
3519 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_call()
3521 up_read(&block->cb_lock); in tc_setup_cb_call()
3529 * successfully offloaded, increment block offloads counter. On failure,
3534 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_add() argument
3538 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_add()
3544 down_read(&block->cb_lock); in tc_setup_cb_add()
3545 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_add()
3546 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_add()
3549 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_add()
3550 up_read(&block->cb_lock); in tc_setup_cb_add()
3555 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_add()
3556 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_add()
3561 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_add()
3568 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, in tc_setup_cb_add()
3571 up_read(&block->cb_lock); in tc_setup_cb_add()
3579 * successfully offloaded, increment block offload counter. On failure,
3584 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_replace() argument
3590 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_replace()
3596 down_read(&block->cb_lock); in tc_setup_cb_replace()
3597 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_replace()
3598 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_replace()
3601 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_replace()
3602 up_read(&block->cb_lock); in tc_setup_cb_replace()
3607 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_replace()
3608 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_replace()
3613 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); in tc_setup_cb_replace()
3617 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_replace()
3624 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, in tc_setup_cb_replace()
3627 up_read(&block->cb_lock); in tc_setup_cb_replace()
3634 /* Destroy filter and decrement block offload counter, if filter was previously
3638 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_destroy() argument
3642 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_destroy()
3648 down_read(&block->cb_lock); in tc_setup_cb_destroy()
3649 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_destroy()
3650 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_destroy()
3653 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_destroy()
3654 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3659 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_destroy()
3661 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); in tc_setup_cb_destroy()
3665 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3672 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_reoffload() argument
3683 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, in tc_setup_cb_reoffload()
3842 NL_SET_ERR_MSG(extack, "Block number may not be zero"); in tcf_qevent_parse_block_index()
3869 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); in tcf_qevent_init()
3876 tcf_block_put_ext(qe->block, sch, &qe->info); in tcf_qevent_destroy()
3893 /* Bounce newly-configured block or change in block. */ in tcf_qevent_validate_change()