1 /*
2  * INET        An implementation of the TCP/IP protocol suite for the LINUX
3  *             operating system.  INET is implemented using the  BSD Socket
4  *             interface as the means of communication with the user level.
5  *
6  *             Support for INET6 connection oriented protocols.
7  *
8  * Authors:    See the TCPv6 sources
9  *
10  *             This program is free software; you can redistribute it and/or
11  *             modify it under the terms of the GNU General Public License
12  *             as published by the Free Software Foundation; either version
13  *             2 of the License, or(at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 
21 #include <net/addrconf.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_ecn.h>
24 #include <net/inet_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/sock.h>
27 #include <net/inet6_connection_sock.h>
28 
29 int inet6_csk_bind_conflict(const struct sock *sk,
30 			    const struct inet_bind_bucket *tb)
31 {
32 	const struct sock *sk2;
33 	const struct hlist_node *node;
34 
35 	/* We must walk the whole port owner list in this case. -DaveM */
36 	/*
37 	 * See comment in inet_csk_bind_conflict about sock lookup
38 	 * vs net namespaces issues.
39 	 */
40 	sk_for_each_bound(sk2, node, &tb->owners) {
41 		if (sk != sk2 &&
42 		    (!sk->sk_bound_dev_if ||
43 		     !sk2->sk_bound_dev_if ||
44 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
45 		    (!sk->sk_reuse || !sk2->sk_reuse ||
46 		     sk2->sk_state == TCP_LISTEN) &&
47 		     ipv6_rcv_saddr_equal(sk, sk2))
48 			break;
49 	}
50 
51 	return node != NULL;
52 }
53 
54 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
55 
56 /*
57  * request_sock (formerly open request) hash tables.
58  */
59 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
60 			   const u32 rnd, const u16 synq_hsize)
61 {
62 	u32 a = (__force u32)raddr->s6_addr32[0];
63 	u32 b = (__force u32)raddr->s6_addr32[1];
64 	u32 c = (__force u32)raddr->s6_addr32[2];
65 
66 	a += JHASH_GOLDEN_RATIO;
67 	b += JHASH_GOLDEN_RATIO;
68 	c += rnd;
69 	__jhash_mix(a, b, c);
70 
71 	a += (__force u32)raddr->s6_addr32[3];
72 	b += (__force u32)rport;
73 	__jhash_mix(a, b, c);
74 
75 	return c & (synq_hsize - 1);
76 }
77 
78 struct request_sock *inet6_csk_search_req(const struct sock *sk,
79 					  struct request_sock ***prevp,
80 					  const __be16 rport,
81 					  const struct in6_addr *raddr,
82 					  const struct in6_addr *laddr,
83 					  const int iif)
84 {
85 	const struct inet_connection_sock *icsk = inet_csk(sk);
86 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
87 	struct request_sock *req, **prev;
88 
89 	for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
90 						     lopt->hash_rnd,
91 						     lopt->nr_table_entries)];
92 	     (req = *prev) != NULL;
93 	     prev = &req->dl_next) {
94 		const struct inet6_request_sock *treq = inet6_rsk(req);
95 
96 		if (inet_rsk(req)->rmt_port == rport &&
97 		    req->rsk_ops->family == AF_INET6 &&
98 		    ipv6_addr_equal(&treq->rmt_addr, raddr) &&
99 		    ipv6_addr_equal(&treq->loc_addr, laddr) &&
100 		    (!treq->iif || treq->iif == iif)) {
101 			WARN_ON(req->sk != NULL);
102 			*prevp = prev;
103 			return req;
104 		}
105 	}
106 
107 	return NULL;
108 }
109 
110 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
111 
112 void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
113 				    struct request_sock *req,
114 				    const unsigned long timeout)
115 {
116 	struct inet_connection_sock *icsk = inet_csk(sk);
117 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
118 	const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
119 				      inet_rsk(req)->rmt_port,
120 				      lopt->hash_rnd, lopt->nr_table_entries);
121 
122 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
123 	inet_csk_reqsk_queue_added(sk, timeout);
124 }
125 
126 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
127 
128 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
129 {
130 	struct ipv6_pinfo *np = inet6_sk(sk);
131 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
132 
133 	sin6->sin6_family = AF_INET6;
134 	ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
135 	sin6->sin6_port	= inet_sk(sk)->dport;
136 	/* We do not store received flowlabel for TCP */
137 	sin6->sin6_flowinfo = 0;
138 	sin6->sin6_scope_id = 0;
139 	if (sk->sk_bound_dev_if &&
140 	    ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
141 		sin6->sin6_scope_id = sk->sk_bound_dev_if;
142 }
143 
144 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
145 
146 static inline
147 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
148 			   struct in6_addr *daddr, struct in6_addr *saddr)
149 {
150 	__ip6_dst_store(sk, dst, daddr, saddr);
151 
152 #ifdef CONFIG_XFRM
153 	{
154 		struct rt6_info *rt = (struct rt6_info  *)dst;
155 		rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
156 	}
157 #endif
158 }
159 
160 static inline
161 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
162 {
163 	struct dst_entry *dst;
164 
165 	dst = __sk_dst_check(sk, cookie);
166 
167 #ifdef CONFIG_XFRM
168 	if (dst) {
169 		struct rt6_info *rt = (struct rt6_info *)dst;
170 		if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
171 			sk->sk_dst_cache = NULL;
172 			dst_release(dst);
173 			dst = NULL;
174 		}
175 	}
176 #endif
177 
178 	return dst;
179 }
180 
181 int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
182 {
183 	struct sock *sk = skb->sk;
184 	struct inet_sock *inet = inet_sk(sk);
185 	struct ipv6_pinfo *np = inet6_sk(sk);
186 	struct flowi fl;
187 	struct dst_entry *dst;
188 	struct in6_addr *final_p = NULL, final;
189 
190 	memset(&fl, 0, sizeof(fl));
191 	fl.proto = sk->sk_protocol;
192 	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
193 	ipv6_addr_copy(&fl.fl6_src, &np->saddr);
194 	fl.fl6_flowlabel = np->flow_label;
195 	IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
196 	fl.oif = sk->sk_bound_dev_if;
197 	fl.fl_ip_sport = inet->sport;
198 	fl.fl_ip_dport = inet->dport;
199 	security_sk_classify_flow(sk, &fl);
200 
201 	if (np->opt && np->opt->srcrt) {
202 		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
203 		ipv6_addr_copy(&final, &fl.fl6_dst);
204 		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
205 		final_p = &final;
206 	}
207 
208 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
209 
210 	if (dst == NULL) {
211 		int err = ip6_dst_lookup(sk, &dst, &fl);
212 
213 		if (err) {
214 			sk->sk_err_soft = -err;
215 			kfree_skb(skb);
216 			return err;
217 		}
218 
219 		if (final_p)
220 			ipv6_addr_copy(&fl.fl6_dst, final_p);
221 
222 		if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0) {
223 			sk->sk_route_caps = 0;
224 			kfree_skb(skb);
225 			return err;
226 		}
227 
228 		__inet6_csk_dst_store(sk, dst, NULL, NULL);
229 	}
230 
231 	skb_dst_set(skb, dst_clone(dst));
232 
233 	/* Restore final destination back after routing done */
234 	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
235 
236 	return ip6_xmit(sk, skb, &fl, np->opt, 0);
237 }
238 
239 EXPORT_SYMBOL_GPL(inet6_csk_xmit);
240