1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Support for INET6 connection oriented protocols. 7 * 8 * Authors: See the TCPv6 sources 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or(at your option) any later version. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/in6.h> 18 #include <linux/ipv6.h> 19 #include <linux/jhash.h> 20 #include <linux/slab.h> 21 22 #include <net/addrconf.h> 23 #include <net/inet_connection_sock.h> 24 #include <net/inet_ecn.h> 25 #include <net/inet_hashtables.h> 26 #include <net/ip6_route.h> 27 #include <net/sock.h> 28 #include <net/inet6_connection_sock.h> 29 30 int inet6_csk_bind_conflict(const struct sock *sk, 31 const struct inet_bind_bucket *tb, bool relax) 32 { 33 const struct sock *sk2; 34 int reuse = sk->sk_reuse; 35 int reuseport = sk->sk_reuseport; 36 kuid_t uid = sock_i_uid((struct sock *)sk); 37 38 /* We must walk the whole port owner list in this case. -DaveM */ 39 /* 40 * See comment in inet_csk_bind_conflict about sock lookup 41 * vs net namespaces issues. 42 */ 43 sk_for_each_bound(sk2, &tb->owners) { 44 if (sk != sk2 && 45 (!sk->sk_bound_dev_if || 46 !sk2->sk_bound_dev_if || 47 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 48 if ((!reuse || !sk2->sk_reuse || 49 sk2->sk_state == TCP_LISTEN) && 50 (!reuseport || !sk2->sk_reuseport || 51 (sk2->sk_state != TCP_TIME_WAIT && 52 !uid_eq(uid, 53 sock_i_uid((struct sock *)sk2))))) { 54 if (ipv6_rcv_saddr_equal(sk, sk2)) 55 break; 56 } 57 if (!relax && reuse && sk2->sk_reuse && 58 sk2->sk_state != TCP_LISTEN && 59 ipv6_rcv_saddr_equal(sk, sk2)) 60 break; 61 } 62 } 63 64 return sk2 != NULL; 65 } 66 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 67 68 struct dst_entry *inet6_csk_route_req(struct sock *sk, 69 struct flowi6 *fl6, 70 const struct request_sock *req) 71 { 72 struct inet_request_sock *ireq = inet_rsk(req); 73 struct ipv6_pinfo *np = inet6_sk(sk); 74 struct in6_addr *final_p, final; 75 struct dst_entry *dst; 76 77 memset(fl6, 0, sizeof(*fl6)); 78 fl6->flowi6_proto = IPPROTO_TCP; 79 fl6->daddr = ireq->ir_v6_rmt_addr; 80 final_p = fl6_update_dst(fl6, np->opt, &final); 81 fl6->saddr = ireq->ir_v6_loc_addr; 82 fl6->flowi6_oif = ireq->ir_iif; 83 fl6->flowi6_mark = ireq->ir_mark; 84 fl6->fl6_dport = ireq->ir_rmt_port; 85 fl6->fl6_sport = htons(ireq->ir_num); 86 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 87 88 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 89 if (IS_ERR(dst)) 90 return NULL; 91 92 return dst; 93 } 94 95 /* 96 * request_sock (formerly open request) hash tables. 97 */ 98 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 99 const u32 rnd, const u32 synq_hsize) 100 { 101 u32 c; 102 103 c = jhash_3words((__force u32)raddr->s6_addr32[0], 104 (__force u32)raddr->s6_addr32[1], 105 (__force u32)raddr->s6_addr32[2], 106 rnd); 107 108 c = jhash_2words((__force u32)raddr->s6_addr32[3], 109 (__force u32)rport, 110 c); 111 112 return c & (synq_hsize - 1); 113 } 114 115 struct request_sock *inet6_csk_search_req(struct sock *sk, 116 const __be16 rport, 117 const struct in6_addr *raddr, 118 const struct in6_addr *laddr, 119 const int iif) 120 { 121 struct inet_connection_sock *icsk = inet_csk(sk); 122 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 123 struct request_sock *req; 124 u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd, 125 lopt->nr_table_entries); 126 127 spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); 128 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { 129 const struct inet_request_sock *ireq = inet_rsk(req); 130 131 if (ireq->ir_rmt_port == rport && 132 req->rsk_ops->family == AF_INET6 && 133 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && 134 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && 135 (!ireq->ir_iif || ireq->ir_iif == iif)) { 136 atomic_inc(&req->rsk_refcnt); 137 WARN_ON(req->sk != NULL); 138 break; 139 } 140 } 141 spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock); 142 143 return req; 144 } 145 EXPORT_SYMBOL_GPL(inet6_csk_search_req); 146 147 void inet6_csk_reqsk_queue_hash_add(struct sock *sk, 148 struct request_sock *req, 149 const unsigned long timeout) 150 { 151 struct inet_connection_sock *icsk = inet_csk(sk); 152 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 153 const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr, 154 inet_rsk(req)->ir_rmt_port, 155 lopt->hash_rnd, lopt->nr_table_entries); 156 157 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); 158 inet_csk_reqsk_queue_added(sk, timeout); 159 } 160 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add); 161 162 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) 163 { 164 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; 165 166 sin6->sin6_family = AF_INET6; 167 sin6->sin6_addr = sk->sk_v6_daddr; 168 sin6->sin6_port = inet_sk(sk)->inet_dport; 169 /* We do not store received flowlabel for TCP */ 170 sin6->sin6_flowinfo = 0; 171 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, 172 sk->sk_bound_dev_if); 173 } 174 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 175 176 static inline 177 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, 178 const struct in6_addr *daddr, 179 const struct in6_addr *saddr) 180 { 181 __ip6_dst_store(sk, dst, daddr, saddr); 182 } 183 184 static inline 185 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) 186 { 187 return __sk_dst_check(sk, cookie); 188 } 189 190 static struct dst_entry *inet6_csk_route_socket(struct sock *sk, 191 struct flowi6 *fl6) 192 { 193 struct inet_sock *inet = inet_sk(sk); 194 struct ipv6_pinfo *np = inet6_sk(sk); 195 struct in6_addr *final_p, final; 196 struct dst_entry *dst; 197 198 memset(fl6, 0, sizeof(*fl6)); 199 fl6->flowi6_proto = sk->sk_protocol; 200 fl6->daddr = sk->sk_v6_daddr; 201 fl6->saddr = np->saddr; 202 fl6->flowlabel = np->flow_label; 203 IP6_ECN_flow_xmit(sk, fl6->flowlabel); 204 fl6->flowi6_oif = sk->sk_bound_dev_if; 205 fl6->flowi6_mark = sk->sk_mark; 206 fl6->fl6_sport = inet->inet_sport; 207 fl6->fl6_dport = inet->inet_dport; 208 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 209 210 final_p = fl6_update_dst(fl6, np->opt, &final); 211 212 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 213 if (!dst) { 214 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 215 216 if (!IS_ERR(dst)) 217 __inet6_csk_dst_store(sk, dst, NULL, NULL); 218 } 219 return dst; 220 } 221 222 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused) 223 { 224 struct ipv6_pinfo *np = inet6_sk(sk); 225 struct flowi6 fl6; 226 struct dst_entry *dst; 227 int res; 228 229 dst = inet6_csk_route_socket(sk, &fl6); 230 if (IS_ERR(dst)) { 231 sk->sk_err_soft = -PTR_ERR(dst); 232 sk->sk_route_caps = 0; 233 kfree_skb(skb); 234 return PTR_ERR(dst); 235 } 236 237 rcu_read_lock(); 238 skb_dst_set_noref(skb, dst); 239 240 /* Restore final destination back after routing done */ 241 fl6.daddr = sk->sk_v6_daddr; 242 243 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 244 rcu_read_unlock(); 245 return res; 246 } 247 EXPORT_SYMBOL_GPL(inet6_csk_xmit); 248 249 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) 250 { 251 struct flowi6 fl6; 252 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); 253 254 if (IS_ERR(dst)) 255 return NULL; 256 dst->ops->update_pmtu(dst, sk, NULL, mtu); 257 258 dst = inet6_csk_route_socket(sk, &fl6); 259 return IS_ERR(dst) ? NULL : dst; 260 } 261 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); 262