1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Support for INET6 connection oriented protocols. 7 * 8 * Authors: See the TCPv6 sources 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or(at your option) any later version. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/in6.h> 18 #include <linux/ipv6.h> 19 #include <linux/jhash.h> 20 #include <linux/slab.h> 21 22 #include <net/addrconf.h> 23 #include <net/inet_connection_sock.h> 24 #include <net/inet_ecn.h> 25 #include <net/inet_hashtables.h> 26 #include <net/ip6_route.h> 27 #include <net/sock.h> 28 #include <net/inet6_connection_sock.h> 29 30 int inet6_csk_bind_conflict(const struct sock *sk, 31 const struct inet_bind_bucket *tb, bool relax) 32 { 33 const struct sock *sk2; 34 const struct hlist_node *node; 35 36 /* We must walk the whole port owner list in this case. -DaveM */ 37 /* 38 * See comment in inet_csk_bind_conflict about sock lookup 39 * vs net namespaces issues. 40 */ 41 sk_for_each_bound(sk2, node, &tb->owners) { 42 if (sk != sk2 && 43 (!sk->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if || 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 (!sk->sk_reuse || !sk2->sk_reuse || 47 sk2->sk_state == TCP_LISTEN) && 48 ipv6_rcv_saddr_equal(sk, sk2)) 49 break; 50 } 51 52 return node != NULL; 53 } 54 55 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); 56 57 struct dst_entry *inet6_csk_route_req(struct sock *sk, 58 struct flowi6 *fl6, 59 const struct request_sock *req) 60 { 61 struct inet6_request_sock *treq = inet6_rsk(req); 62 struct ipv6_pinfo *np = inet6_sk(sk); 63 struct in6_addr *final_p, final; 64 struct dst_entry *dst; 65 66 memset(fl6, 0, sizeof(*fl6)); 67 fl6->flowi6_proto = IPPROTO_TCP; 68 fl6->daddr = treq->rmt_addr; 69 final_p = fl6_update_dst(fl6, np->opt, &final); 70 fl6->saddr = treq->loc_addr; 71 fl6->flowi6_oif = treq->iif; 72 fl6->flowi6_mark = sk->sk_mark; 73 fl6->fl6_dport = inet_rsk(req)->rmt_port; 74 fl6->fl6_sport = inet_rsk(req)->loc_port; 75 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 76 77 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 78 if (IS_ERR(dst)) 79 return NULL; 80 81 return dst; 82 } 83 84 /* 85 * request_sock (formerly open request) hash tables. 86 */ 87 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, 88 const u32 rnd, const u32 synq_hsize) 89 { 90 u32 c; 91 92 c = jhash_3words((__force u32)raddr->s6_addr32[0], 93 (__force u32)raddr->s6_addr32[1], 94 (__force u32)raddr->s6_addr32[2], 95 rnd); 96 97 c = jhash_2words((__force u32)raddr->s6_addr32[3], 98 (__force u32)rport, 99 c); 100 101 return c & (synq_hsize - 1); 102 } 103 104 struct request_sock *inet6_csk_search_req(const struct sock *sk, 105 struct request_sock ***prevp, 106 const __be16 rport, 107 const struct in6_addr *raddr, 108 const struct in6_addr *laddr, 109 const int iif) 110 { 111 const struct inet_connection_sock *icsk = inet_csk(sk); 112 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 113 struct request_sock *req, **prev; 114 115 for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport, 116 lopt->hash_rnd, 117 lopt->nr_table_entries)]; 118 (req = *prev) != NULL; 119 prev = &req->dl_next) { 120 const struct inet6_request_sock *treq = inet6_rsk(req); 121 122 if (inet_rsk(req)->rmt_port == rport && 123 req->rsk_ops->family == AF_INET6 && 124 ipv6_addr_equal(&treq->rmt_addr, raddr) && 125 ipv6_addr_equal(&treq->loc_addr, laddr) && 126 (!treq->iif || treq->iif == iif)) { 127 WARN_ON(req->sk != NULL); 128 *prevp = prev; 129 return req; 130 } 131 } 132 133 return NULL; 134 } 135 136 EXPORT_SYMBOL_GPL(inet6_csk_search_req); 137 138 void inet6_csk_reqsk_queue_hash_add(struct sock *sk, 139 struct request_sock *req, 140 const unsigned long timeout) 141 { 142 struct inet_connection_sock *icsk = inet_csk(sk); 143 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 144 const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr, 145 inet_rsk(req)->rmt_port, 146 lopt->hash_rnd, lopt->nr_table_entries); 147 148 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); 149 inet_csk_reqsk_queue_added(sk, timeout); 150 } 151 152 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add); 153 154 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) 155 { 156 struct ipv6_pinfo *np = inet6_sk(sk); 157 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; 158 159 sin6->sin6_family = AF_INET6; 160 sin6->sin6_addr = np->daddr; 161 sin6->sin6_port = inet_sk(sk)->inet_dport; 162 /* We do not store received flowlabel for TCP */ 163 sin6->sin6_flowinfo = 0; 164 sin6->sin6_scope_id = 0; 165 if (sk->sk_bound_dev_if && 166 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 167 sin6->sin6_scope_id = sk->sk_bound_dev_if; 168 } 169 170 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 171 172 static inline 173 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, 174 const struct in6_addr *daddr, 175 const struct in6_addr *saddr) 176 { 177 __ip6_dst_store(sk, dst, daddr, saddr); 178 179 #ifdef CONFIG_XFRM 180 { 181 struct rt6_info *rt = (struct rt6_info *)dst; 182 rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); 183 } 184 #endif 185 } 186 187 static inline 188 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) 189 { 190 struct dst_entry *dst; 191 192 dst = __sk_dst_check(sk, cookie); 193 194 #ifdef CONFIG_XFRM 195 if (dst) { 196 struct rt6_info *rt = (struct rt6_info *)dst; 197 if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { 198 __sk_dst_reset(sk); 199 dst = NULL; 200 } 201 } 202 #endif 203 204 return dst; 205 } 206 207 static struct dst_entry *inet6_csk_route_socket(struct sock *sk, 208 struct flowi6 *fl6) 209 { 210 struct inet_sock *inet = inet_sk(sk); 211 struct ipv6_pinfo *np = inet6_sk(sk); 212 struct in6_addr *final_p, final; 213 struct dst_entry *dst; 214 215 memset(fl6, 0, sizeof(*fl6)); 216 fl6->flowi6_proto = sk->sk_protocol; 217 fl6->daddr = np->daddr; 218 fl6->saddr = np->saddr; 219 fl6->flowlabel = np->flow_label; 220 IP6_ECN_flow_xmit(sk, fl6->flowlabel); 221 fl6->flowi6_oif = sk->sk_bound_dev_if; 222 fl6->flowi6_mark = sk->sk_mark; 223 fl6->fl6_sport = inet->inet_sport; 224 fl6->fl6_dport = inet->inet_dport; 225 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 226 227 final_p = fl6_update_dst(fl6, np->opt, &final); 228 229 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 230 if (!dst) { 231 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); 232 233 if (!IS_ERR(dst)) 234 __inet6_csk_dst_store(sk, dst, NULL, NULL); 235 } 236 return dst; 237 } 238 239 int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) 240 { 241 struct sock *sk = skb->sk; 242 struct ipv6_pinfo *np = inet6_sk(sk); 243 struct flowi6 fl6; 244 struct dst_entry *dst; 245 int res; 246 247 dst = inet6_csk_route_socket(sk, &fl6); 248 if (IS_ERR(dst)) { 249 sk->sk_err_soft = -PTR_ERR(dst); 250 sk->sk_route_caps = 0; 251 kfree_skb(skb); 252 return PTR_ERR(dst); 253 } 254 255 rcu_read_lock(); 256 skb_dst_set_noref(skb, dst); 257 258 /* Restore final destination back after routing done */ 259 fl6.daddr = np->daddr; 260 261 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 262 rcu_read_unlock(); 263 return res; 264 } 265 EXPORT_SYMBOL_GPL(inet6_csk_xmit); 266 267 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu) 268 { 269 struct flowi6 fl6; 270 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6); 271 272 if (IS_ERR(dst)) 273 return NULL; 274 dst->ops->update_pmtu(dst, sk, NULL, mtu); 275 276 return inet6_csk_route_socket(sk, &fl6); 277 } 278 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu); 279