Lines Matching refs:tb

71 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);  in inet_bind_bucket_create()  local
73 if (tb) { in inet_bind_bucket_create()
74 write_pnet(&tb->ib_net, net); in inet_bind_bucket_create()
75 tb->l3mdev = l3mdev; in inet_bind_bucket_create()
76 tb->port = snum; in inet_bind_bucket_create()
77 tb->fastreuse = 0; in inet_bind_bucket_create()
78 tb->fastreuseport = 0; in inet_bind_bucket_create()
79 INIT_HLIST_HEAD(&tb->owners); in inet_bind_bucket_create()
80 hlist_add_head(&tb->node, &head->chain); in inet_bind_bucket_create()
82 return tb; in inet_bind_bucket_create()
88 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) in inet_bind_bucket_destroy() argument
90 if (hlist_empty(&tb->owners)) { in inet_bind_bucket_destroy()
91 __hlist_del(&tb->node); in inet_bind_bucket_destroy()
92 kmem_cache_free(cachep, tb); in inet_bind_bucket_destroy()
96 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, in inet_bind_bucket_match() argument
99 return net_eq(ib_net(tb), net) && tb->port == port && in inet_bind_bucket_match()
100 tb->l3mdev == l3mdev; in inet_bind_bucket_match()
103 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb, in inet_bind2_bucket_init() argument
109 write_pnet(&tb->ib_net, net); in inet_bind2_bucket_init()
110 tb->l3mdev = l3mdev; in inet_bind2_bucket_init()
111 tb->port = port; in inet_bind2_bucket_init()
113 tb->family = sk->sk_family; in inet_bind2_bucket_init()
115 tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr; in inet_bind2_bucket_init()
118 tb->rcv_saddr = sk->sk_rcv_saddr; in inet_bind2_bucket_init()
119 INIT_HLIST_HEAD(&tb->owners); in inet_bind2_bucket_init()
120 INIT_HLIST_HEAD(&tb->deathrow); in inet_bind2_bucket_init()
121 hlist_add_head(&tb->node, &head->chain); in inet_bind2_bucket_init()
131 struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); in inet_bind2_bucket_create() local
133 if (tb) in inet_bind2_bucket_create()
134 inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk); in inet_bind2_bucket_create()
136 return tb; in inet_bind2_bucket_create()
140 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) in inet_bind2_bucket_destroy() argument
142 if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) { in inet_bind2_bucket_destroy()
143 __hlist_del(&tb->node); in inet_bind2_bucket_destroy()
144 kmem_cache_free(cachep, tb); in inet_bind2_bucket_destroy()
168 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, in inet_bind_hash() argument
172 sk_add_bind_node(sk, &tb->owners); in inet_bind_hash()
173 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
186 struct inet_bind_bucket *tb; in __inet_put_port() local
194 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
198 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); in __inet_put_port()
230 struct inet_bind_bucket *tb; in __inet_inherit_port() local
239 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
241 if (unlikely(!tb || !tb2)) { in __inet_inherit_port()
246 if (tb->port != port) { in __inet_inherit_port()
254 inet_bind_bucket_for_each(tb, &head->chain) { in __inet_inherit_port()
255 if (inet_bind_bucket_match(tb, net, port, l3mdev)) in __inet_inherit_port()
258 if (!tb) { in __inet_inherit_port()
259 tb = inet_bind_bucket_create(table->bind_bucket_cachep, in __inet_inherit_port()
261 if (!tb) { in __inet_inherit_port()
285 inet_csk_update_fastreuse(tb, child); in __inet_inherit_port()
286 inet_bind_hash(child, tb, tb2, port); in __inet_inherit_port()
294 inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); in __inet_inherit_port()
713 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock() local
723 inet_csk(sk2)->icsk_bind_hash == tb && in inet_reuseport_add_sock()
820 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, in inet_bind2_bucket_match() argument
824 if (!net_eq(ib2_net(tb), net) || tb->port != port || in inet_bind2_bucket_match()
825 tb->l3mdev != l3mdev) in inet_bind2_bucket_match()
828 return inet_bind2_bucket_addr_match(tb, sk); in inet_bind2_bucket_match()
831 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, in inet_bind2_bucket_match_addr_any() argument
834 if (!net_eq(ib2_net(tb), net) || tb->port != port || in inet_bind2_bucket_match_addr_any()
835 tb->l3mdev != l3mdev) in inet_bind2_bucket_match_addr_any()
839 if (sk->sk_family != tb->family) { in inet_bind2_bucket_match_addr_any()
841 return ipv6_addr_any(&tb->v6_rcv_saddr) || in inet_bind2_bucket_match_addr_any()
842 ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr); in inet_bind2_bucket_match_addr_any()
848 return ipv6_addr_any(&tb->v6_rcv_saddr); in inet_bind2_bucket_match_addr_any()
850 return tb->rcv_saddr == 0; in inet_bind2_bucket_match_addr_any()
1011 struct inet_bind_bucket *tb; in __inet_hash_connect() local
1058 inet_bind_bucket_for_each(tb, &head->chain) { in __inet_hash_connect()
1059 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { in __inet_hash_connect()
1060 if (tb->fastreuse >= 0 || in __inet_hash_connect()
1061 tb->fastreuseport >= 0) in __inet_hash_connect()
1063 WARN_ON(hlist_empty(&tb->owners)); in __inet_hash_connect()
1071 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, in __inet_hash_connect()
1073 if (!tb) { in __inet_hash_connect()
1078 tb->fastreuse = -1; in __inet_hash_connect()
1079 tb->fastreuseport = -1; in __inet_hash_connect()
1116 inet_bind_hash(sk, tb, tb2, port); in __inet_hash_connect()
1153 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); in __inet_hash_connect()