1 /*
2  * INET        An implementation of the TCP/IP protocol suite for the LINUX
3  *             operating system.  INET is implemented using the  BSD Socket
4  *             interface as the means of communication with the user level.
5  *
6  *             Support for INET6 connection oriented protocols.
7  *
8  * Authors:    See the TCPv6 sources
9  *
10  *             This program is free software; you can redistribute it and/or
11  *             modify it under the terms of the GNU General Public License
12  *             as published by the Free Software Foundation; either version
13  *             2 of the License, or(at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 
21 #include <net/addrconf.h>
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_ecn.h>
24 #include <net/inet_hashtables.h>
25 #include <net/ip6_route.h>
26 #include <net/sock.h>
27 #include <net/inet6_connection_sock.h>
28 
29 int inet6_csk_bind_conflict(const struct sock *sk,
30 			    const struct inet_bind_bucket *tb)
31 {
32 	const struct sock *sk2;
33 	const struct hlist_node *node;
34 
35 	/* We must walk the whole port owner list in this case. -DaveM */
36 	sk_for_each_bound(sk2, node, &tb->owners) {
37 		if (sk != sk2 &&
38 		    (!sk->sk_bound_dev_if ||
39 		     !sk2->sk_bound_dev_if ||
40 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
41 		    (!sk->sk_reuse || !sk2->sk_reuse ||
42 		     sk2->sk_state == TCP_LISTEN) &&
43 		     ipv6_rcv_saddr_equal(sk, sk2))
44 			break;
45 	}
46 
47 	return node != NULL;
48 }
49 
50 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
51 
52 /*
53  * request_sock (formerly open request) hash tables.
54  */
55 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
56 			   const u32 rnd, const u16 synq_hsize)
57 {
58 	u32 a = (__force u32)raddr->s6_addr32[0];
59 	u32 b = (__force u32)raddr->s6_addr32[1];
60 	u32 c = (__force u32)raddr->s6_addr32[2];
61 
62 	a += JHASH_GOLDEN_RATIO;
63 	b += JHASH_GOLDEN_RATIO;
64 	c += rnd;
65 	__jhash_mix(a, b, c);
66 
67 	a += (__force u32)raddr->s6_addr32[3];
68 	b += (__force u32)rport;
69 	__jhash_mix(a, b, c);
70 
71 	return c & (synq_hsize - 1);
72 }
73 
74 struct request_sock *inet6_csk_search_req(const struct sock *sk,
75 					  struct request_sock ***prevp,
76 					  const __be16 rport,
77 					  const struct in6_addr *raddr,
78 					  const struct in6_addr *laddr,
79 					  const int iif)
80 {
81 	const struct inet_connection_sock *icsk = inet_csk(sk);
82 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
83 	struct request_sock *req, **prev;
84 
85 	for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
86 						     lopt->hash_rnd,
87 						     lopt->nr_table_entries)];
88 	     (req = *prev) != NULL;
89 	     prev = &req->dl_next) {
90 		const struct inet6_request_sock *treq = inet6_rsk(req);
91 
92 		if (inet_rsk(req)->rmt_port == rport &&
93 		    req->rsk_ops->family == AF_INET6 &&
94 		    ipv6_addr_equal(&treq->rmt_addr, raddr) &&
95 		    ipv6_addr_equal(&treq->loc_addr, laddr) &&
96 		    (!treq->iif || treq->iif == iif)) {
97 			BUG_TRAP(req->sk == NULL);
98 			*prevp = prev;
99 			return req;
100 		}
101 	}
102 
103 	return NULL;
104 }
105 
106 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
107 
108 void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
109 				    struct request_sock *req,
110 				    const unsigned long timeout)
111 {
112 	struct inet_connection_sock *icsk = inet_csk(sk);
113 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
114 	const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
115 				      inet_rsk(req)->rmt_port,
116 				      lopt->hash_rnd, lopt->nr_table_entries);
117 
118 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
119 	inet_csk_reqsk_queue_added(sk, timeout);
120 }
121 
122 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
123 
124 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
125 {
126 	struct ipv6_pinfo *np = inet6_sk(sk);
127 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
128 
129 	sin6->sin6_family = AF_INET6;
130 	ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
131 	sin6->sin6_port	= inet_sk(sk)->dport;
132 	/* We do not store received flowlabel for TCP */
133 	sin6->sin6_flowinfo = 0;
134 	sin6->sin6_scope_id = 0;
135 	if (sk->sk_bound_dev_if &&
136 	    ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
137 		sin6->sin6_scope_id = sk->sk_bound_dev_if;
138 }
139 
140 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
141 
142 static inline
143 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
144 			   struct in6_addr *daddr, struct in6_addr *saddr)
145 {
146 	__ip6_dst_store(sk, dst, daddr, saddr);
147 
148 #ifdef CONFIG_XFRM
149 	{
150 		struct rt6_info *rt = (struct rt6_info  *)dst;
151 		rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
152 	}
153 #endif
154 }
155 
156 static inline
157 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
158 {
159 	struct dst_entry *dst;
160 
161 	dst = __sk_dst_check(sk, cookie);
162 
163 #ifdef CONFIG_XFRM
164 	if (dst) {
165 		struct rt6_info *rt = (struct rt6_info *)dst;
166 		if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
167 			sk->sk_dst_cache = NULL;
168 			dst_release(dst);
169 			dst = NULL;
170 		}
171 	}
172 #endif
173 
174 	return dst;
175 }
176 
177 int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
178 {
179 	struct sock *sk = skb->sk;
180 	struct inet_sock *inet = inet_sk(sk);
181 	struct ipv6_pinfo *np = inet6_sk(sk);
182 	struct flowi fl;
183 	struct dst_entry *dst;
184 	struct in6_addr *final_p = NULL, final;
185 
186 	memset(&fl, 0, sizeof(fl));
187 	fl.proto = sk->sk_protocol;
188 	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
189 	ipv6_addr_copy(&fl.fl6_src, &np->saddr);
190 	fl.fl6_flowlabel = np->flow_label;
191 	IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
192 	fl.oif = sk->sk_bound_dev_if;
193 	fl.fl_ip_sport = inet->sport;
194 	fl.fl_ip_dport = inet->dport;
195 	security_sk_classify_flow(sk, &fl);
196 
197 	if (np->opt && np->opt->srcrt) {
198 		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
199 		ipv6_addr_copy(&final, &fl.fl6_dst);
200 		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
201 		final_p = &final;
202 	}
203 
204 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
205 
206 	if (dst == NULL) {
207 		int err = ip6_dst_lookup(sk, &dst, &fl);
208 
209 		if (err) {
210 			sk->sk_err_soft = -err;
211 			kfree_skb(skb);
212 			return err;
213 		}
214 
215 		if (final_p)
216 			ipv6_addr_copy(&fl.fl6_dst, final_p);
217 
218 		if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
219 			sk->sk_route_caps = 0;
220 			kfree_skb(skb);
221 			return err;
222 		}
223 
224 		__inet6_csk_dst_store(sk, dst, NULL, NULL);
225 	}
226 
227 	skb->dst = dst_clone(dst);
228 
229 	/* Restore final destination back after routing done */
230 	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
231 
232 	return ip6_xmit(sk, skb, &fl, np->opt, 0);
233 }
234 
235 EXPORT_SYMBOL_GPL(inet6_csk_xmit);
236