1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET6 transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp, generalised here 10 * by Arnaldo Carvalho de Melo <acme@mandriva.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/random.h> 15 16 #include <net/addrconf.h> 17 #include <net/inet_connection_sock.h> 18 #include <net/inet_hashtables.h> 19 #include <net/inet6_hashtables.h> 20 #include <net/secure_seq.h> 21 #include <net/ip.h> 22 #include <net/sock_reuseport.h> 23 24 u32 inet6_ehashfn(const struct net *net, 25 const struct in6_addr *laddr, const u16 lport, 26 const struct in6_addr *faddr, const __be16 fport) 27 { 28 static u32 inet6_ehash_secret __read_mostly; 29 static u32 ipv6_hash_secret __read_mostly; 30 31 u32 lhash, fhash; 32 33 net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret)); 34 net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 35 36 lhash = (__force u32)laddr->s6_addr32[3]; 37 fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret); 38 39 return __inet6_ehashfn(lhash, lport, fhash, fport, 40 inet6_ehash_secret + net_hash_mix(net)); 41 } 42 EXPORT_SYMBOL_GPL(inet6_ehashfn); 43 44 /* 45 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so 46 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM 47 * 48 * The sockhash lock must be held as a reader here. 49 */ 50 struct sock *__inet6_lookup_established(struct net *net, 51 struct inet_hashinfo *hashinfo, 52 const struct in6_addr *saddr, 53 const __be16 sport, 54 const struct in6_addr *daddr, 55 const u16 hnum, 56 const int dif, const int sdif) 57 { 58 struct sock *sk; 59 const struct hlist_nulls_node *node; 60 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 61 /* Optimize here for direct hit, only listening connections can 62 * have wildcards anyways. 63 */ 64 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 65 unsigned int slot = hash & hashinfo->ehash_mask; 66 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 67 68 69 begin: 70 sk_nulls_for_each_rcu(sk, node, &head->chain) { 71 if (sk->sk_hash != hash) 72 continue; 73 if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) 74 continue; 75 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 76 goto out; 77 78 if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) { 79 sock_gen_put(sk); 80 goto begin; 81 } 82 goto found; 83 } 84 if (get_nulls_value(node) != slot) 85 goto begin; 86 out: 87 sk = NULL; 88 found: 89 return sk; 90 } 91 EXPORT_SYMBOL(__inet6_lookup_established); 92 93 static inline int compute_score(struct sock *sk, struct net *net, 94 const unsigned short hnum, 95 const struct in6_addr *daddr, 96 const int dif, const int sdif) 97 { 98 int score = -1; 99 100 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && 101 sk->sk_family == PF_INET6) { 102 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 103 return -1; 104 105 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 106 return -1; 107 108 score = sk->sk_bound_dev_if ? 2 : 1; 109 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 110 score++; 111 } 112 return score; 113 } 114 115 INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn); 116 117 struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk, 118 struct sk_buff *skb, int doff, 119 const struct in6_addr *saddr, 120 __be16 sport, 121 const struct in6_addr *daddr, 122 unsigned short hnum, 123 inet6_ehashfn_t *ehashfn) 124 { 125 struct sock *reuse_sk = NULL; 126 u32 phash; 127 128 if (sk->sk_reuseport) { 129 phash = INDIRECT_CALL_INET(ehashfn, udp6_ehashfn, inet6_ehashfn, 130 net, daddr, hnum, saddr, sport); 131 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 132 } 133 return reuse_sk; 134 } 135 EXPORT_SYMBOL_GPL(inet6_lookup_reuseport); 136 137 /* called with rcu_read_lock() */ 138 static struct sock *inet6_lhash2_lookup(struct net *net, 139 struct inet_listen_hashbucket *ilb2, 140 struct sk_buff *skb, int doff, 141 const struct in6_addr *saddr, 142 const __be16 sport, const struct in6_addr *daddr, 143 const unsigned short hnum, const int dif, const int sdif) 144 { 145 struct sock *sk, *result = NULL; 146 struct hlist_nulls_node *node; 147 int score, hiscore = 0; 148 149 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { 150 score = compute_score(sk, net, hnum, daddr, dif, sdif); 151 if (score > hiscore) { 152 result = inet6_lookup_reuseport(net, sk, skb, doff, 153 saddr, sport, daddr, hnum, inet6_ehashfn); 154 if (result) 155 return result; 156 157 result = sk; 158 hiscore = score; 159 } 160 } 161 162 return result; 163 } 164 165 static inline struct sock *inet6_lookup_run_bpf(struct net *net, 166 struct inet_hashinfo *hashinfo, 167 struct sk_buff *skb, int doff, 168 const struct in6_addr *saddr, 169 const __be16 sport, 170 const struct in6_addr *daddr, 171 const u16 hnum, const int dif) 172 { 173 struct sock *sk, *reuse_sk; 174 bool no_reuseport; 175 176 if (hashinfo != net->ipv4.tcp_death_row.hashinfo) 177 return NULL; /* only TCP is supported */ 178 179 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, 180 daddr, hnum, dif, &sk); 181 if (no_reuseport || IS_ERR_OR_NULL(sk)) 182 return sk; 183 184 reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, 185 saddr, sport, daddr, hnum, inet6_ehashfn); 186 if (reuse_sk) 187 sk = reuse_sk; 188 return sk; 189 } 190 191 struct sock *inet6_lookup_listener(struct net *net, 192 struct inet_hashinfo *hashinfo, 193 struct sk_buff *skb, int doff, 194 const struct in6_addr *saddr, 195 const __be16 sport, const struct in6_addr *daddr, 196 const unsigned short hnum, const int dif, const int sdif) 197 { 198 struct inet_listen_hashbucket *ilb2; 199 struct sock *result = NULL; 200 unsigned int hash2; 201 202 /* Lookup redirect from BPF */ 203 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 204 result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, 205 saddr, sport, daddr, hnum, dif); 206 if (result) 207 goto done; 208 } 209 210 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 211 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 212 213 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 214 saddr, sport, daddr, hnum, 215 dif, sdif); 216 if (result) 217 goto done; 218 219 /* Lookup lhash2 with in6addr_any */ 220 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 221 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 222 223 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 224 saddr, sport, &in6addr_any, hnum, 225 dif, sdif); 226 done: 227 if (IS_ERR(result)) 228 return NULL; 229 return result; 230 } 231 EXPORT_SYMBOL_GPL(inet6_lookup_listener); 232 233 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, 234 struct sk_buff *skb, int doff, 235 const struct in6_addr *saddr, const __be16 sport, 236 const struct in6_addr *daddr, const __be16 dport, 237 const int dif) 238 { 239 struct sock *sk; 240 bool refcounted; 241 242 sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 243 ntohs(dport), dif, 0, &refcounted); 244 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 245 sk = NULL; 246 return sk; 247 } 248 EXPORT_SYMBOL_GPL(inet6_lookup); 249 250 static int __inet6_check_established(struct inet_timewait_death_row *death_row, 251 struct sock *sk, const __u16 lport, 252 struct inet_timewait_sock **twp) 253 { 254 struct inet_hashinfo *hinfo = death_row->hashinfo; 255 struct inet_sock *inet = inet_sk(sk); 256 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; 257 const struct in6_addr *saddr = &sk->sk_v6_daddr; 258 const int dif = sk->sk_bound_dev_if; 259 struct net *net = sock_net(sk); 260 const int sdif = l3mdev_master_ifindex_by_index(net, dif); 261 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 262 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, 263 inet->inet_dport); 264 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 265 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 266 struct sock *sk2; 267 const struct hlist_nulls_node *node; 268 struct inet_timewait_sock *tw = NULL; 269 270 spin_lock(lock); 271 272 sk_nulls_for_each(sk2, node, &head->chain) { 273 if (sk2->sk_hash != hash) 274 continue; 275 276 if (likely(inet6_match(net, sk2, saddr, daddr, ports, 277 dif, sdif))) { 278 if (sk2->sk_state == TCP_TIME_WAIT) { 279 tw = inet_twsk(sk2); 280 if (twsk_unique(sk, sk2, twp)) 281 break; 282 } 283 goto not_unique; 284 } 285 } 286 287 /* Must record num and sport now. Otherwise we will see 288 * in hash table socket with a funny identity. 289 */ 290 inet->inet_num = lport; 291 inet->inet_sport = htons(lport); 292 sk->sk_hash = hash; 293 WARN_ON(!sk_unhashed(sk)); 294 __sk_nulls_add_node_rcu(sk, &head->chain); 295 if (tw) { 296 sk_nulls_del_node_init_rcu((struct sock *)tw); 297 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 298 } 299 spin_unlock(lock); 300 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 301 302 if (twp) { 303 *twp = tw; 304 } else if (tw) { 305 /* Silly. Should hash-dance instead... */ 306 inet_twsk_deschedule_put(tw); 307 } 308 return 0; 309 310 not_unique: 311 spin_unlock(lock); 312 return -EADDRNOTAVAIL; 313 } 314 315 static u64 inet6_sk_port_offset(const struct sock *sk) 316 { 317 const struct inet_sock *inet = inet_sk(sk); 318 319 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32, 320 sk->sk_v6_daddr.s6_addr32, 321 inet->inet_dport); 322 } 323 324 int inet6_hash_connect(struct inet_timewait_death_row *death_row, 325 struct sock *sk) 326 { 327 u64 port_offset = 0; 328 329 if (!inet_sk(sk)->inet_num) 330 port_offset = inet6_sk_port_offset(sk); 331 return __inet_hash_connect(death_row, sk, port_offset, 332 __inet6_check_established); 333 } 334 EXPORT_SYMBOL_GPL(inet6_hash_connect); 335 336 int inet6_hash(struct sock *sk) 337 { 338 int err = 0; 339 340 if (sk->sk_state != TCP_CLOSE) 341 err = __inet_hash(sk, NULL); 342 343 return err; 344 } 345 EXPORT_SYMBOL_GPL(inet6_hash); 346