1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET6 transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp, generalised here 10 * by Arnaldo Carvalho de Melo <acme@mandriva.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/random.h> 15 16 #include <net/addrconf.h> 17 #include <net/inet_connection_sock.h> 18 #include <net/inet_hashtables.h> 19 #include <net/inet6_hashtables.h> 20 #include <net/secure_seq.h> 21 #include <net/ip.h> 22 #include <net/sock_reuseport.h> 23 24 u32 inet6_ehashfn(const struct net *net, 25 const struct in6_addr *laddr, const u16 lport, 26 const struct in6_addr *faddr, const __be16 fport) 27 { 28 static u32 inet6_ehash_secret __read_mostly; 29 static u32 ipv6_hash_secret __read_mostly; 30 31 u32 lhash, fhash; 32 33 net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret)); 34 net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 35 36 lhash = (__force u32)laddr->s6_addr32[3]; 37 fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret); 38 39 return __inet6_ehashfn(lhash, lport, fhash, fport, 40 inet6_ehash_secret + net_hash_mix(net)); 41 } 42 43 /* 44 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so 45 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM 46 * 47 * The sockhash lock must be held as a reader here. 48 */ 49 struct sock *__inet6_lookup_established(struct net *net, 50 struct inet_hashinfo *hashinfo, 51 const struct in6_addr *saddr, 52 const __be16 sport, 53 const struct in6_addr *daddr, 54 const u16 hnum, 55 const int dif, const int sdif) 56 { 57 struct sock *sk; 58 const struct hlist_nulls_node *node; 59 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 60 /* Optimize here for direct hit, only listening connections can 61 * have wildcards anyways. 62 */ 63 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 64 unsigned int slot = hash & hashinfo->ehash_mask; 65 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 66 67 68 begin: 69 sk_nulls_for_each_rcu(sk, node, &head->chain) { 70 if (sk->sk_hash != hash) 71 continue; 72 if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) 73 continue; 74 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 75 goto out; 76 77 if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) { 78 sock_gen_put(sk); 79 goto begin; 80 } 81 goto found; 82 } 83 if (get_nulls_value(node) != slot) 84 goto begin; 85 out: 86 sk = NULL; 87 found: 88 return sk; 89 } 90 EXPORT_SYMBOL(__inet6_lookup_established); 91 92 static inline int compute_score(struct sock *sk, struct net *net, 93 const unsigned short hnum, 94 const struct in6_addr *daddr, 95 const int dif, const int sdif) 96 { 97 int score = -1; 98 99 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && 100 sk->sk_family == PF_INET6) { 101 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 102 return -1; 103 104 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 105 return -1; 106 107 score = sk->sk_bound_dev_if ? 2 : 1; 108 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 109 score++; 110 } 111 return score; 112 } 113 114 struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk, 115 struct sk_buff *skb, int doff, 116 const struct in6_addr *saddr, 117 __be16 sport, 118 const struct in6_addr *daddr, 119 unsigned short hnum) 120 { 121 struct sock *reuse_sk = NULL; 122 u32 phash; 123 124 if (sk->sk_reuseport) { 125 phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 126 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 127 } 128 return reuse_sk; 129 } 130 EXPORT_SYMBOL_GPL(inet6_lookup_reuseport); 131 132 /* called with rcu_read_lock() */ 133 static struct sock *inet6_lhash2_lookup(struct net *net, 134 struct inet_listen_hashbucket *ilb2, 135 struct sk_buff *skb, int doff, 136 const struct in6_addr *saddr, 137 const __be16 sport, const struct in6_addr *daddr, 138 const unsigned short hnum, const int dif, const int sdif) 139 { 140 struct sock *sk, *result = NULL; 141 struct hlist_nulls_node *node; 142 int score, hiscore = 0; 143 144 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { 145 score = compute_score(sk, net, hnum, daddr, dif, sdif); 146 if (score > hiscore) { 147 result = inet6_lookup_reuseport(net, sk, skb, doff, 148 saddr, sport, daddr, hnum); 149 if (result) 150 return result; 151 152 result = sk; 153 hiscore = score; 154 } 155 } 156 157 return result; 158 } 159 160 static inline struct sock *inet6_lookup_run_bpf(struct net *net, 161 struct inet_hashinfo *hashinfo, 162 struct sk_buff *skb, int doff, 163 const struct in6_addr *saddr, 164 const __be16 sport, 165 const struct in6_addr *daddr, 166 const u16 hnum, const int dif) 167 { 168 struct sock *sk, *reuse_sk; 169 bool no_reuseport; 170 171 if (hashinfo != net->ipv4.tcp_death_row.hashinfo) 172 return NULL; /* only TCP is supported */ 173 174 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, 175 daddr, hnum, dif, &sk); 176 if (no_reuseport || IS_ERR_OR_NULL(sk)) 177 return sk; 178 179 reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); 180 if (reuse_sk) 181 sk = reuse_sk; 182 return sk; 183 } 184 185 struct sock *inet6_lookup_listener(struct net *net, 186 struct inet_hashinfo *hashinfo, 187 struct sk_buff *skb, int doff, 188 const struct in6_addr *saddr, 189 const __be16 sport, const struct in6_addr *daddr, 190 const unsigned short hnum, const int dif, const int sdif) 191 { 192 struct inet_listen_hashbucket *ilb2; 193 struct sock *result = NULL; 194 unsigned int hash2; 195 196 /* Lookup redirect from BPF */ 197 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 198 result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, 199 saddr, sport, daddr, hnum, dif); 200 if (result) 201 goto done; 202 } 203 204 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 205 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 206 207 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 208 saddr, sport, daddr, hnum, 209 dif, sdif); 210 if (result) 211 goto done; 212 213 /* Lookup lhash2 with in6addr_any */ 214 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 215 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 216 217 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 218 saddr, sport, &in6addr_any, hnum, 219 dif, sdif); 220 done: 221 if (IS_ERR(result)) 222 return NULL; 223 return result; 224 } 225 EXPORT_SYMBOL_GPL(inet6_lookup_listener); 226 227 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, 228 struct sk_buff *skb, int doff, 229 const struct in6_addr *saddr, const __be16 sport, 230 const struct in6_addr *daddr, const __be16 dport, 231 const int dif) 232 { 233 struct sock *sk; 234 bool refcounted; 235 236 sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 237 ntohs(dport), dif, 0, &refcounted); 238 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 239 sk = NULL; 240 return sk; 241 } 242 EXPORT_SYMBOL_GPL(inet6_lookup); 243 244 static int __inet6_check_established(struct inet_timewait_death_row *death_row, 245 struct sock *sk, const __u16 lport, 246 struct inet_timewait_sock **twp) 247 { 248 struct inet_hashinfo *hinfo = death_row->hashinfo; 249 struct inet_sock *inet = inet_sk(sk); 250 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; 251 const struct in6_addr *saddr = &sk->sk_v6_daddr; 252 const int dif = sk->sk_bound_dev_if; 253 struct net *net = sock_net(sk); 254 const int sdif = l3mdev_master_ifindex_by_index(net, dif); 255 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 256 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, 257 inet->inet_dport); 258 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 259 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 260 struct sock *sk2; 261 const struct hlist_nulls_node *node; 262 struct inet_timewait_sock *tw = NULL; 263 264 spin_lock(lock); 265 266 sk_nulls_for_each(sk2, node, &head->chain) { 267 if (sk2->sk_hash != hash) 268 continue; 269 270 if (likely(inet6_match(net, sk2, saddr, daddr, ports, 271 dif, sdif))) { 272 if (sk2->sk_state == TCP_TIME_WAIT) { 273 tw = inet_twsk(sk2); 274 if (twsk_unique(sk, sk2, twp)) 275 break; 276 } 277 goto not_unique; 278 } 279 } 280 281 /* Must record num and sport now. Otherwise we will see 282 * in hash table socket with a funny identity. 283 */ 284 inet->inet_num = lport; 285 inet->inet_sport = htons(lport); 286 sk->sk_hash = hash; 287 WARN_ON(!sk_unhashed(sk)); 288 __sk_nulls_add_node_rcu(sk, &head->chain); 289 if (tw) { 290 sk_nulls_del_node_init_rcu((struct sock *)tw); 291 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 292 } 293 spin_unlock(lock); 294 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 295 296 if (twp) { 297 *twp = tw; 298 } else if (tw) { 299 /* Silly. Should hash-dance instead... */ 300 inet_twsk_deschedule_put(tw); 301 } 302 return 0; 303 304 not_unique: 305 spin_unlock(lock); 306 return -EADDRNOTAVAIL; 307 } 308 309 static u64 inet6_sk_port_offset(const struct sock *sk) 310 { 311 const struct inet_sock *inet = inet_sk(sk); 312 313 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32, 314 sk->sk_v6_daddr.s6_addr32, 315 inet->inet_dport); 316 } 317 318 int inet6_hash_connect(struct inet_timewait_death_row *death_row, 319 struct sock *sk) 320 { 321 u64 port_offset = 0; 322 323 if (!inet_sk(sk)->inet_num) 324 port_offset = inet6_sk_port_offset(sk); 325 return __inet_hash_connect(death_row, sk, port_offset, 326 __inet6_check_established); 327 } 328 EXPORT_SYMBOL_GPL(inet6_hash_connect); 329 330 int inet6_hash(struct sock *sk) 331 { 332 int err = 0; 333 334 if (sk->sk_state != TCP_CLOSE) 335 err = __inet_hash(sk, NULL); 336 337 return err; 338 } 339 EXPORT_SYMBOL_GPL(inet6_hash); 340