1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET6 transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp, generalised here 10 * by Arnaldo Carvalho de Melo <acme@mandriva.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/random.h> 15 16 #include <net/addrconf.h> 17 #include <net/inet_connection_sock.h> 18 #include <net/inet_hashtables.h> 19 #include <net/inet6_hashtables.h> 20 #include <net/secure_seq.h> 21 #include <net/ip.h> 22 #include <net/sock_reuseport.h> 23 24 extern struct inet_hashinfo tcp_hashinfo; 25 26 u32 inet6_ehashfn(const struct net *net, 27 const struct in6_addr *laddr, const u16 lport, 28 const struct in6_addr *faddr, const __be16 fport) 29 { 30 static u32 inet6_ehash_secret __read_mostly; 31 static u32 ipv6_hash_secret __read_mostly; 32 33 u32 lhash, fhash; 34 35 net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret)); 36 net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); 37 38 lhash = (__force u32)laddr->s6_addr32[3]; 39 fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret); 40 41 return __inet6_ehashfn(lhash, lport, fhash, fport, 42 inet6_ehash_secret + net_hash_mix(net)); 43 } 44 45 /* 46 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so 47 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM 48 * 49 * The sockhash lock must be held as a reader here. 50 */ 51 struct sock *__inet6_lookup_established(struct net *net, 52 struct inet_hashinfo *hashinfo, 53 const struct in6_addr *saddr, 54 const __be16 sport, 55 const struct in6_addr *daddr, 56 const u16 hnum, 57 const int dif, const int sdif) 58 { 59 struct sock *sk; 60 const struct hlist_nulls_node *node; 61 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 62 /* Optimize here for direct hit, only listening connections can 63 * have wildcards anyways. 64 */ 65 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 66 unsigned int slot = hash & hashinfo->ehash_mask; 67 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 68 69 70 begin: 71 sk_nulls_for_each_rcu(sk, node, &head->chain) { 72 if (sk->sk_hash != hash) 73 continue; 74 if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif)) 75 continue; 76 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 77 goto out; 78 79 if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))) { 80 sock_gen_put(sk); 81 goto begin; 82 } 83 goto found; 84 } 85 if (get_nulls_value(node) != slot) 86 goto begin; 87 out: 88 sk = NULL; 89 found: 90 return sk; 91 } 92 EXPORT_SYMBOL(__inet6_lookup_established); 93 94 static inline int compute_score(struct sock *sk, struct net *net, 95 const unsigned short hnum, 96 const struct in6_addr *daddr, 97 const int dif, const int sdif) 98 { 99 int score = -1; 100 101 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && 102 sk->sk_family == PF_INET6) { 103 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) 104 return -1; 105 106 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 107 return -1; 108 109 score = sk->sk_bound_dev_if ? 2 : 1; 110 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 111 score++; 112 } 113 return score; 114 } 115 116 static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, 117 struct sk_buff *skb, int doff, 118 const struct in6_addr *saddr, 119 __be16 sport, 120 const struct in6_addr *daddr, 121 unsigned short hnum) 122 { 123 struct sock *reuse_sk = NULL; 124 u32 phash; 125 126 if (sk->sk_reuseport) { 127 phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); 128 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 129 } 130 return reuse_sk; 131 } 132 133 /* called with rcu_read_lock() */ 134 static struct sock *inet6_lhash2_lookup(struct net *net, 135 struct inet_listen_hashbucket *ilb2, 136 struct sk_buff *skb, int doff, 137 const struct in6_addr *saddr, 138 const __be16 sport, const struct in6_addr *daddr, 139 const unsigned short hnum, const int dif, const int sdif) 140 { 141 struct inet_connection_sock *icsk; 142 struct sock *sk, *result = NULL; 143 int score, hiscore = 0; 144 145 inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { 146 sk = (struct sock *)icsk; 147 score = compute_score(sk, net, hnum, daddr, dif, sdif); 148 if (score > hiscore) { 149 result = lookup_reuseport(net, sk, skb, doff, 150 saddr, sport, daddr, hnum); 151 if (result) 152 return result; 153 154 result = sk; 155 hiscore = score; 156 } 157 } 158 159 return result; 160 } 161 162 static inline struct sock *inet6_lookup_run_bpf(struct net *net, 163 struct inet_hashinfo *hashinfo, 164 struct sk_buff *skb, int doff, 165 const struct in6_addr *saddr, 166 const __be16 sport, 167 const struct in6_addr *daddr, 168 const u16 hnum, const int dif) 169 { 170 struct sock *sk, *reuse_sk; 171 bool no_reuseport; 172 173 if (hashinfo != &tcp_hashinfo) 174 return NULL; /* only TCP is supported */ 175 176 no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport, 177 daddr, hnum, dif, &sk); 178 if (no_reuseport || IS_ERR_OR_NULL(sk)) 179 return sk; 180 181 reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); 182 if (reuse_sk) 183 sk = reuse_sk; 184 return sk; 185 } 186 187 struct sock *inet6_lookup_listener(struct net *net, 188 struct inet_hashinfo *hashinfo, 189 struct sk_buff *skb, int doff, 190 const struct in6_addr *saddr, 191 const __be16 sport, const struct in6_addr *daddr, 192 const unsigned short hnum, const int dif, const int sdif) 193 { 194 struct inet_listen_hashbucket *ilb2; 195 struct sock *result = NULL; 196 unsigned int hash2; 197 198 /* Lookup redirect from BPF */ 199 if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { 200 result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, 201 saddr, sport, daddr, hnum, dif); 202 if (result) 203 goto done; 204 } 205 206 hash2 = ipv6_portaddr_hash(net, daddr, hnum); 207 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 208 209 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 210 saddr, sport, daddr, hnum, 211 dif, sdif); 212 if (result) 213 goto done; 214 215 /* Lookup lhash2 with in6addr_any */ 216 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); 217 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 218 219 result = inet6_lhash2_lookup(net, ilb2, skb, doff, 220 saddr, sport, &in6addr_any, hnum, 221 dif, sdif); 222 done: 223 if (IS_ERR(result)) 224 return NULL; 225 return result; 226 } 227 EXPORT_SYMBOL_GPL(inet6_lookup_listener); 228 229 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, 230 struct sk_buff *skb, int doff, 231 const struct in6_addr *saddr, const __be16 sport, 232 const struct in6_addr *daddr, const __be16 dport, 233 const int dif) 234 { 235 struct sock *sk; 236 bool refcounted; 237 238 sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 239 ntohs(dport), dif, 0, &refcounted); 240 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 241 sk = NULL; 242 return sk; 243 } 244 EXPORT_SYMBOL_GPL(inet6_lookup); 245 246 static int __inet6_check_established(struct inet_timewait_death_row *death_row, 247 struct sock *sk, const __u16 lport, 248 struct inet_timewait_sock **twp) 249 { 250 struct inet_hashinfo *hinfo = death_row->hashinfo; 251 struct inet_sock *inet = inet_sk(sk); 252 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; 253 const struct in6_addr *saddr = &sk->sk_v6_daddr; 254 const int dif = sk->sk_bound_dev_if; 255 struct net *net = sock_net(sk); 256 const int sdif = l3mdev_master_ifindex_by_index(net, dif); 257 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 258 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, 259 inet->inet_dport); 260 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 261 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 262 struct sock *sk2; 263 const struct hlist_nulls_node *node; 264 struct inet_timewait_sock *tw = NULL; 265 266 spin_lock(lock); 267 268 sk_nulls_for_each(sk2, node, &head->chain) { 269 if (sk2->sk_hash != hash) 270 continue; 271 272 if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, 273 dif, sdif))) { 274 if (sk2->sk_state == TCP_TIME_WAIT) { 275 tw = inet_twsk(sk2); 276 if (twsk_unique(sk, sk2, twp)) 277 break; 278 } 279 goto not_unique; 280 } 281 } 282 283 /* Must record num and sport now. Otherwise we will see 284 * in hash table socket with a funny identity. 285 */ 286 inet->inet_num = lport; 287 inet->inet_sport = htons(lport); 288 sk->sk_hash = hash; 289 WARN_ON(!sk_unhashed(sk)); 290 __sk_nulls_add_node_rcu(sk, &head->chain); 291 if (tw) { 292 sk_nulls_del_node_init_rcu((struct sock *)tw); 293 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 294 } 295 spin_unlock(lock); 296 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 297 298 if (twp) { 299 *twp = tw; 300 } else if (tw) { 301 /* Silly. Should hash-dance instead... */ 302 inet_twsk_deschedule_put(tw); 303 } 304 return 0; 305 306 not_unique: 307 spin_unlock(lock); 308 return -EADDRNOTAVAIL; 309 } 310 311 static u32 inet6_sk_port_offset(const struct sock *sk) 312 { 313 const struct inet_sock *inet = inet_sk(sk); 314 315 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32, 316 sk->sk_v6_daddr.s6_addr32, 317 inet->inet_dport); 318 } 319 320 int inet6_hash_connect(struct inet_timewait_death_row *death_row, 321 struct sock *sk) 322 { 323 u32 port_offset = 0; 324 325 if (!inet_sk(sk)->inet_num) 326 port_offset = inet6_sk_port_offset(sk); 327 return __inet_hash_connect(death_row, sk, port_offset, 328 __inet6_check_established); 329 } 330 EXPORT_SYMBOL_GPL(inet6_hash_connect); 331 332 int inet6_hash(struct sock *sk) 333 { 334 int err = 0; 335 336 if (sk->sk_state != TCP_CLOSE) { 337 local_bh_disable(); 338 err = __inet_hash(sk, NULL); 339 local_bh_enable(); 340 } 341 342 return err; 343 } 344 EXPORT_SYMBOL_GPL(inet6_hash); 345