xref: /openbmc/linux/net/ipv6/udp.c (revision b868a02e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <net/xfrm.h>
49 #include <net/inet_hashtables.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/busy_poll.h>
52 #include <net/sock_reuseport.h>
53 
54 #include <linux/proc_fs.h>
55 #include <linux/seq_file.h>
56 #include <trace/events/skb.h>
57 #include "udp_impl.h"
58 
59 static void udpv6_destruct_sock(struct sock *sk)
60 {
61 	udp_destruct_common(sk);
62 	inet6_sock_destruct(sk);
63 }
64 
65 int udpv6_init_sock(struct sock *sk)
66 {
67 	skb_queue_head_init(&udp_sk(sk)->reader_queue);
68 	sk->sk_destruct = udpv6_destruct_sock;
69 	return 0;
70 }
71 
72 static u32 udp6_ehashfn(const struct net *net,
73 			const struct in6_addr *laddr,
74 			const u16 lport,
75 			const struct in6_addr *faddr,
76 			const __be16 fport)
77 {
78 	static u32 udp6_ehash_secret __read_mostly;
79 	static u32 udp_ipv6_hash_secret __read_mostly;
80 
81 	u32 lhash, fhash;
82 
83 	net_get_random_once(&udp6_ehash_secret,
84 			    sizeof(udp6_ehash_secret));
85 	net_get_random_once(&udp_ipv6_hash_secret,
86 			    sizeof(udp_ipv6_hash_secret));
87 
88 	lhash = (__force u32)laddr->s6_addr32[3];
89 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
90 
91 	return __inet6_ehashfn(lhash, lport, fhash, fport,
92 			       udp_ipv6_hash_secret + net_hash_mix(net));
93 }
94 
95 int udp_v6_get_port(struct sock *sk, unsigned short snum)
96 {
97 	unsigned int hash2_nulladdr =
98 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
99 	unsigned int hash2_partial =
100 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
101 
102 	/* precompute partial secondary hash */
103 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
104 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
105 }
106 
107 void udp_v6_rehash(struct sock *sk)
108 {
109 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
110 					  &sk->sk_v6_rcv_saddr,
111 					  inet_sk(sk)->inet_num);
112 
113 	udp_lib_rehash(sk, new_hash);
114 }
115 
116 static int compute_score(struct sock *sk, struct net *net,
117 			 const struct in6_addr *saddr, __be16 sport,
118 			 const struct in6_addr *daddr, unsigned short hnum,
119 			 int dif, int sdif)
120 {
121 	int bound_dev_if, score;
122 	struct inet_sock *inet;
123 	bool dev_match;
124 
125 	if (!net_eq(sock_net(sk), net) ||
126 	    udp_sk(sk)->udp_port_hash != hnum ||
127 	    sk->sk_family != PF_INET6)
128 		return -1;
129 
130 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
131 		return -1;
132 
133 	score = 0;
134 	inet = inet_sk(sk);
135 
136 	if (inet->inet_dport) {
137 		if (inet->inet_dport != sport)
138 			return -1;
139 		score++;
140 	}
141 
142 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
143 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
144 			return -1;
145 		score++;
146 	}
147 
148 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
149 	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
150 	if (!dev_match)
151 		return -1;
152 	if (bound_dev_if)
153 		score++;
154 
155 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
156 		score++;
157 
158 	return score;
159 }
160 
161 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
162 				     struct sk_buff *skb,
163 				     const struct in6_addr *saddr,
164 				     __be16 sport,
165 				     const struct in6_addr *daddr,
166 				     unsigned int hnum)
167 {
168 	struct sock *reuse_sk = NULL;
169 	u32 hash;
170 
171 	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
172 		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
173 		reuse_sk = reuseport_select_sock(sk, hash, skb,
174 						 sizeof(struct udphdr));
175 	}
176 	return reuse_sk;
177 }
178 
179 /* called with rcu_read_lock() */
180 static struct sock *udp6_lib_lookup2(struct net *net,
181 		const struct in6_addr *saddr, __be16 sport,
182 		const struct in6_addr *daddr, unsigned int hnum,
183 		int dif, int sdif, struct udp_hslot *hslot2,
184 		struct sk_buff *skb)
185 {
186 	struct sock *sk, *result;
187 	int score, badness;
188 
189 	result = NULL;
190 	badness = -1;
191 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
192 		score = compute_score(sk, net, saddr, sport,
193 				      daddr, hnum, dif, sdif);
194 		if (score > badness) {
195 			result = lookup_reuseport(net, sk, skb,
196 						  saddr, sport, daddr, hnum);
197 			/* Fall back to scoring if group has connections */
198 			if (result && !reuseport_has_conns(sk, false))
199 				return result;
200 
201 			result = result ? : sk;
202 			badness = score;
203 		}
204 	}
205 	return result;
206 }
207 
208 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
209 					       struct udp_table *udptable,
210 					       struct sk_buff *skb,
211 					       const struct in6_addr *saddr,
212 					       __be16 sport,
213 					       const struct in6_addr *daddr,
214 					       u16 hnum, const int dif)
215 {
216 	struct sock *sk, *reuse_sk;
217 	bool no_reuseport;
218 
219 	if (udptable != &udp_table)
220 		return NULL; /* only UDP is supported */
221 
222 	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
223 					    daddr, hnum, dif, &sk);
224 	if (no_reuseport || IS_ERR_OR_NULL(sk))
225 		return sk;
226 
227 	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
228 	if (reuse_sk)
229 		sk = reuse_sk;
230 	return sk;
231 }
232 
233 /* rcu_read_lock() must be held */
234 struct sock *__udp6_lib_lookup(struct net *net,
235 			       const struct in6_addr *saddr, __be16 sport,
236 			       const struct in6_addr *daddr, __be16 dport,
237 			       int dif, int sdif, struct udp_table *udptable,
238 			       struct sk_buff *skb)
239 {
240 	unsigned short hnum = ntohs(dport);
241 	unsigned int hash2, slot2;
242 	struct udp_hslot *hslot2;
243 	struct sock *result, *sk;
244 
245 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
246 	slot2 = hash2 & udptable->mask;
247 	hslot2 = &udptable->hash2[slot2];
248 
249 	/* Lookup connected or non-wildcard sockets */
250 	result = udp6_lib_lookup2(net, saddr, sport,
251 				  daddr, hnum, dif, sdif,
252 				  hslot2, skb);
253 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
254 		goto done;
255 
256 	/* Lookup redirect from BPF */
257 	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
258 		sk = udp6_lookup_run_bpf(net, udptable, skb,
259 					 saddr, sport, daddr, hnum, dif);
260 		if (sk) {
261 			result = sk;
262 			goto done;
263 		}
264 	}
265 
266 	/* Got non-wildcard socket or error on first lookup */
267 	if (result)
268 		goto done;
269 
270 	/* Lookup wildcard sockets */
271 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
272 	slot2 = hash2 & udptable->mask;
273 	hslot2 = &udptable->hash2[slot2];
274 
275 	result = udp6_lib_lookup2(net, saddr, sport,
276 				  &in6addr_any, hnum, dif, sdif,
277 				  hslot2, skb);
278 done:
279 	if (IS_ERR(result))
280 		return NULL;
281 	return result;
282 }
283 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
284 
285 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
286 					  __be16 sport, __be16 dport,
287 					  struct udp_table *udptable)
288 {
289 	const struct ipv6hdr *iph = ipv6_hdr(skb);
290 
291 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
292 				 &iph->daddr, dport, inet6_iif(skb),
293 				 inet6_sdif(skb), udptable, skb);
294 }
295 
296 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
297 				 __be16 sport, __be16 dport)
298 {
299 	const struct ipv6hdr *iph = ipv6_hdr(skb);
300 
301 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
302 				 &iph->daddr, dport, inet6_iif(skb),
303 				 inet6_sdif(skb), &udp_table, NULL);
304 }
305 
306 /* Must be called under rcu_read_lock().
307  * Does increment socket refcount.
308  */
309 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
310 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
311 			     const struct in6_addr *daddr, __be16 dport, int dif)
312 {
313 	struct sock *sk;
314 
315 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
316 				dif, 0, &udp_table, NULL);
317 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
318 		sk = NULL;
319 	return sk;
320 }
321 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
322 #endif
323 
324 /* do not use the scratch area len for jumbogram: their length execeeds the
325  * scratch area space; note that the IP6CB flags is still in the first
326  * cacheline, so checking for jumbograms is cheap
327  */
328 static int udp6_skb_len(struct sk_buff *skb)
329 {
330 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
331 }
332 
333 /*
334  *	This should be easy, if there is something there we
335  *	return it, otherwise we block.
336  */
337 
338 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
339 		  int flags, int *addr_len)
340 {
341 	struct ipv6_pinfo *np = inet6_sk(sk);
342 	struct inet_sock *inet = inet_sk(sk);
343 	struct sk_buff *skb;
344 	unsigned int ulen, copied;
345 	int off, err, peeking = flags & MSG_PEEK;
346 	int is_udplite = IS_UDPLITE(sk);
347 	struct udp_mib __percpu *mib;
348 	bool checksum_valid = false;
349 	int is_udp4;
350 
351 	if (flags & MSG_ERRQUEUE)
352 		return ipv6_recv_error(sk, msg, len, addr_len);
353 
354 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
355 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
356 
357 try_again:
358 	off = sk_peek_offset(sk, flags);
359 	skb = __skb_recv_udp(sk, flags, &off, &err);
360 	if (!skb)
361 		return err;
362 
363 	ulen = udp6_skb_len(skb);
364 	copied = len;
365 	if (copied > ulen - off)
366 		copied = ulen - off;
367 	else if (copied < ulen)
368 		msg->msg_flags |= MSG_TRUNC;
369 
370 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
371 	mib = __UDPX_MIB(sk, is_udp4);
372 
373 	/*
374 	 * If checksum is needed at all, try to do it while copying the
375 	 * data.  If the data is truncated, or if we only want a partial
376 	 * coverage checksum (UDP-Lite), do it before the copy.
377 	 */
378 
379 	if (copied < ulen || peeking ||
380 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
381 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
382 				!__udp_lib_checksum_complete(skb);
383 		if (!checksum_valid)
384 			goto csum_copy_err;
385 	}
386 
387 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
388 		if (udp_skb_is_linear(skb))
389 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
390 		else
391 			err = skb_copy_datagram_msg(skb, off, msg, copied);
392 	} else {
393 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
394 		if (err == -EINVAL)
395 			goto csum_copy_err;
396 	}
397 	if (unlikely(err)) {
398 		if (!peeking) {
399 			atomic_inc(&sk->sk_drops);
400 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
401 		}
402 		kfree_skb(skb);
403 		return err;
404 	}
405 	if (!peeking)
406 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
407 
408 	sock_recv_cmsgs(msg, sk, skb);
409 
410 	/* Copy the address. */
411 	if (msg->msg_name) {
412 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
413 		sin6->sin6_family = AF_INET6;
414 		sin6->sin6_port = udp_hdr(skb)->source;
415 		sin6->sin6_flowinfo = 0;
416 
417 		if (is_udp4) {
418 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
419 					       &sin6->sin6_addr);
420 			sin6->sin6_scope_id = 0;
421 		} else {
422 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
423 			sin6->sin6_scope_id =
424 				ipv6_iface_scope_id(&sin6->sin6_addr,
425 						    inet6_iif(skb));
426 		}
427 		*addr_len = sizeof(*sin6);
428 
429 		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
430 						      (struct sockaddr *)sin6);
431 	}
432 
433 	if (udp_sk(sk)->gro_enabled)
434 		udp_cmsg_recv(msg, sk, skb);
435 
436 	if (np->rxopt.all)
437 		ip6_datagram_recv_common_ctl(sk, msg, skb);
438 
439 	if (is_udp4) {
440 		if (inet->cmsg_flags)
441 			ip_cmsg_recv_offset(msg, sk, skb,
442 					    sizeof(struct udphdr), off);
443 	} else {
444 		if (np->rxopt.all)
445 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
446 	}
447 
448 	err = copied;
449 	if (flags & MSG_TRUNC)
450 		err = ulen;
451 
452 	skb_consume_udp(sk, skb, peeking ? -err : err);
453 	return err;
454 
455 csum_copy_err:
456 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
457 				 udp_skb_destructor)) {
458 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
459 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
460 	}
461 	kfree_skb(skb);
462 
463 	/* starting over for a new packet, but check if we need to yield */
464 	cond_resched();
465 	msg->msg_flags &= ~MSG_TRUNC;
466 	goto try_again;
467 }
468 
469 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
470 void udpv6_encap_enable(void)
471 {
472 	static_branch_inc(&udpv6_encap_needed_key);
473 }
474 EXPORT_SYMBOL(udpv6_encap_enable);
475 
476 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
477  * through error handlers in encapsulations looking for a match.
478  */
479 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
480 				      struct inet6_skb_parm *opt,
481 				      u8 type, u8 code, int offset, __be32 info)
482 {
483 	int i;
484 
485 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
486 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
487 			       u8 type, u8 code, int offset, __be32 info);
488 		const struct ip6_tnl_encap_ops *encap;
489 
490 		encap = rcu_dereference(ip6tun_encaps[i]);
491 		if (!encap)
492 			continue;
493 		handler = encap->err_handler;
494 		if (handler && !handler(skb, opt, type, code, offset, info))
495 			return 0;
496 	}
497 
498 	return -ENOENT;
499 }
500 
501 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
502  * reversing source and destination port: this will match tunnels that force the
503  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
504  * lwtunnels might actually break this assumption by being configured with
505  * different destination ports on endpoints, in this case we won't be able to
506  * trace ICMP messages back to them.
507  *
508  * If this doesn't match any socket, probe tunnels with arbitrary destination
509  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
510  * we've sent packets to won't necessarily match the local destination port.
511  *
512  * Then ask the tunnel implementation to match the error against a valid
513  * association.
514  *
515  * Return an error if we can't find a match, the socket if we need further
516  * processing, zero otherwise.
517  */
518 static struct sock *__udp6_lib_err_encap(struct net *net,
519 					 const struct ipv6hdr *hdr, int offset,
520 					 struct udphdr *uh,
521 					 struct udp_table *udptable,
522 					 struct sock *sk,
523 					 struct sk_buff *skb,
524 					 struct inet6_skb_parm *opt,
525 					 u8 type, u8 code, __be32 info)
526 {
527 	int (*lookup)(struct sock *sk, struct sk_buff *skb);
528 	int network_offset, transport_offset;
529 	struct udp_sock *up;
530 
531 	network_offset = skb_network_offset(skb);
532 	transport_offset = skb_transport_offset(skb);
533 
534 	/* Network header needs to point to the outer IPv6 header inside ICMP */
535 	skb_reset_network_header(skb);
536 
537 	/* Transport header needs to point to the UDP header */
538 	skb_set_transport_header(skb, offset);
539 
540 	if (sk) {
541 		up = udp_sk(sk);
542 
543 		lookup = READ_ONCE(up->encap_err_lookup);
544 		if (lookup && lookup(sk, skb))
545 			sk = NULL;
546 
547 		goto out;
548 	}
549 
550 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
551 			       &hdr->saddr, uh->dest,
552 			       inet6_iif(skb), 0, udptable, skb);
553 	if (sk) {
554 		up = udp_sk(sk);
555 
556 		lookup = READ_ONCE(up->encap_err_lookup);
557 		if (!lookup || lookup(sk, skb))
558 			sk = NULL;
559 	}
560 
561 out:
562 	if (!sk) {
563 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
564 							offset, info));
565 	}
566 
567 	skb_set_transport_header(skb, transport_offset);
568 	skb_set_network_header(skb, network_offset);
569 
570 	return sk;
571 }
572 
573 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
574 		   u8 type, u8 code, int offset, __be32 info,
575 		   struct udp_table *udptable)
576 {
577 	struct ipv6_pinfo *np;
578 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
579 	const struct in6_addr *saddr = &hdr->saddr;
580 	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
581 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
582 	bool tunnel = false;
583 	struct sock *sk;
584 	int harderr;
585 	int err;
586 	struct net *net = dev_net(skb->dev);
587 
588 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
589 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
590 
591 	if (!sk || udp_sk(sk)->encap_type) {
592 		/* No socket for error: try tunnels before discarding */
593 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
594 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
595 						  udptable, sk, skb,
596 						  opt, type, code, info);
597 			if (!sk)
598 				return 0;
599 		} else
600 			sk = ERR_PTR(-ENOENT);
601 
602 		if (IS_ERR(sk)) {
603 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
604 					  ICMP6_MIB_INERRORS);
605 			return PTR_ERR(sk);
606 		}
607 
608 		tunnel = true;
609 	}
610 
611 	harderr = icmpv6_err_convert(type, code, &err);
612 	np = inet6_sk(sk);
613 
614 	if (type == ICMPV6_PKT_TOOBIG) {
615 		if (!ip6_sk_accept_pmtu(sk))
616 			goto out;
617 		ip6_sk_update_pmtu(skb, sk, info);
618 		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
619 			harderr = 1;
620 	}
621 	if (type == NDISC_REDIRECT) {
622 		if (tunnel) {
623 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
624 				     sk->sk_mark, sk->sk_uid);
625 		} else {
626 			ip6_sk_redirect(skb, sk);
627 		}
628 		goto out;
629 	}
630 
631 	/* Tunnels don't have an application socket: don't pass errors back */
632 	if (tunnel) {
633 		if (udp_sk(sk)->encap_err_rcv)
634 			udp_sk(sk)->encap_err_rcv(sk, skb, offset);
635 		goto out;
636 	}
637 
638 	if (!np->recverr) {
639 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
640 			goto out;
641 	} else {
642 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
643 	}
644 
645 	sk->sk_err = err;
646 	sk_error_report(sk);
647 out:
648 	return 0;
649 }
650 
651 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
652 {
653 	int rc;
654 
655 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
656 		sock_rps_save_rxhash(sk, skb);
657 		sk_mark_napi_id(sk, skb);
658 		sk_incoming_cpu_update(sk);
659 	} else {
660 		sk_mark_napi_id_once(sk, skb);
661 	}
662 
663 	rc = __udp_enqueue_schedule_skb(sk, skb);
664 	if (rc < 0) {
665 		int is_udplite = IS_UDPLITE(sk);
666 		enum skb_drop_reason drop_reason;
667 
668 		/* Note that an ENOMEM error is charged twice */
669 		if (rc == -ENOMEM) {
670 			UDP6_INC_STATS(sock_net(sk),
671 					 UDP_MIB_RCVBUFERRORS, is_udplite);
672 			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
673 		} else {
674 			UDP6_INC_STATS(sock_net(sk),
675 				       UDP_MIB_MEMERRORS, is_udplite);
676 			drop_reason = SKB_DROP_REASON_PROTO_MEM;
677 		}
678 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
679 		kfree_skb_reason(skb, drop_reason);
680 		return -1;
681 	}
682 
683 	return 0;
684 }
685 
686 static __inline__ int udpv6_err(struct sk_buff *skb,
687 				struct inet6_skb_parm *opt, u8 type,
688 				u8 code, int offset, __be32 info)
689 {
690 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
691 }
692 
693 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
694 {
695 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
696 	struct udp_sock *up = udp_sk(sk);
697 	int is_udplite = IS_UDPLITE(sk);
698 
699 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
700 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
701 		goto drop;
702 	}
703 
704 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
705 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
706 
707 		/*
708 		 * This is an encapsulation socket so pass the skb to
709 		 * the socket's udp_encap_rcv() hook. Otherwise, just
710 		 * fall through and pass this up the UDP socket.
711 		 * up->encap_rcv() returns the following value:
712 		 * =0 if skb was successfully passed to the encap
713 		 *    handler or was discarded by it.
714 		 * >0 if skb should be passed on to UDP.
715 		 * <0 if skb should be resubmitted as proto -N
716 		 */
717 
718 		/* if we're overly short, let UDP handle it */
719 		encap_rcv = READ_ONCE(up->encap_rcv);
720 		if (encap_rcv) {
721 			int ret;
722 
723 			/* Verify checksum before giving to encap */
724 			if (udp_lib_checksum_complete(skb))
725 				goto csum_error;
726 
727 			ret = encap_rcv(sk, skb);
728 			if (ret <= 0) {
729 				__UDP6_INC_STATS(sock_net(sk),
730 						 UDP_MIB_INDATAGRAMS,
731 						 is_udplite);
732 				return -ret;
733 			}
734 		}
735 
736 		/* FALLTHROUGH -- it's a UDP Packet */
737 	}
738 
739 	/*
740 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
741 	 */
742 	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
743 
744 		if (up->pcrlen == 0) {          /* full coverage was set  */
745 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
746 					    UDP_SKB_CB(skb)->cscov, skb->len);
747 			goto drop;
748 		}
749 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
750 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
751 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
752 			goto drop;
753 		}
754 	}
755 
756 	prefetch(&sk->sk_rmem_alloc);
757 	if (rcu_access_pointer(sk->sk_filter) &&
758 	    udp_lib_checksum_complete(skb))
759 		goto csum_error;
760 
761 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
762 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
763 		goto drop;
764 	}
765 
766 	udp_csum_pull_header(skb);
767 
768 	skb_dst_drop(skb);
769 
770 	return __udpv6_queue_rcv_skb(sk, skb);
771 
772 csum_error:
773 	drop_reason = SKB_DROP_REASON_UDP_CSUM;
774 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
775 drop:
776 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
777 	atomic_inc(&sk->sk_drops);
778 	kfree_skb_reason(skb, drop_reason);
779 	return -1;
780 }
781 
782 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
783 {
784 	struct sk_buff *next, *segs;
785 	int ret;
786 
787 	if (likely(!udp_unexpected_gso(sk, skb)))
788 		return udpv6_queue_rcv_one_skb(sk, skb);
789 
790 	__skb_push(skb, -skb_mac_offset(skb));
791 	segs = udp_rcv_segment(sk, skb, false);
792 	skb_list_walk_safe(segs, skb, next) {
793 		__skb_pull(skb, skb_transport_offset(skb));
794 
795 		udp_post_segment_fix_csum(skb);
796 		ret = udpv6_queue_rcv_one_skb(sk, skb);
797 		if (ret > 0)
798 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
799 						 true);
800 	}
801 	return 0;
802 }
803 
804 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
805 				   __be16 loc_port, const struct in6_addr *loc_addr,
806 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
807 				   int dif, int sdif, unsigned short hnum)
808 {
809 	struct inet_sock *inet = inet_sk(sk);
810 
811 	if (!net_eq(sock_net(sk), net))
812 		return false;
813 
814 	if (udp_sk(sk)->udp_port_hash != hnum ||
815 	    sk->sk_family != PF_INET6 ||
816 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
817 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
818 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
819 	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
820 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
821 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
822 		return false;
823 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
824 		return false;
825 	return true;
826 }
827 
828 static void udp6_csum_zero_error(struct sk_buff *skb)
829 {
830 	/* RFC 2460 section 8.1 says that we SHOULD log
831 	 * this error. Well, it is reasonable.
832 	 */
833 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
834 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
835 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
836 }
837 
838 /*
839  * Note: called only from the BH handler context,
840  * so we don't need to lock the hashes.
841  */
842 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
843 		const struct in6_addr *saddr, const struct in6_addr *daddr,
844 		struct udp_table *udptable, int proto)
845 {
846 	struct sock *sk, *first = NULL;
847 	const struct udphdr *uh = udp_hdr(skb);
848 	unsigned short hnum = ntohs(uh->dest);
849 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
850 	unsigned int offset = offsetof(typeof(*sk), sk_node);
851 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
852 	int dif = inet6_iif(skb);
853 	int sdif = inet6_sdif(skb);
854 	struct hlist_node *node;
855 	struct sk_buff *nskb;
856 
857 	if (use_hash2) {
858 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
859 			    udptable->mask;
860 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
861 start_lookup:
862 		hslot = &udptable->hash2[hash2];
863 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
864 	}
865 
866 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
867 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
868 					    uh->source, saddr, dif, sdif,
869 					    hnum))
870 			continue;
871 		/* If zero checksum and no_check is not on for
872 		 * the socket then skip it.
873 		 */
874 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
875 			continue;
876 		if (!first) {
877 			first = sk;
878 			continue;
879 		}
880 		nskb = skb_clone(skb, GFP_ATOMIC);
881 		if (unlikely(!nskb)) {
882 			atomic_inc(&sk->sk_drops);
883 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
884 					 IS_UDPLITE(sk));
885 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
886 					 IS_UDPLITE(sk));
887 			continue;
888 		}
889 
890 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
891 			consume_skb(nskb);
892 	}
893 
894 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
895 	if (use_hash2 && hash2 != hash2_any) {
896 		hash2 = hash2_any;
897 		goto start_lookup;
898 	}
899 
900 	if (first) {
901 		if (udpv6_queue_rcv_skb(first, skb) > 0)
902 			consume_skb(skb);
903 	} else {
904 		kfree_skb(skb);
905 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
906 				 proto == IPPROTO_UDPLITE);
907 	}
908 	return 0;
909 }
910 
911 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
912 {
913 	if (udp_sk_rx_dst_set(sk, dst)) {
914 		const struct rt6_info *rt = (const struct rt6_info *)dst;
915 
916 		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
917 	}
918 }
919 
920 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
921  * return code conversion for ip layer consumption
922  */
923 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
924 				struct udphdr *uh)
925 {
926 	int ret;
927 
928 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
929 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
930 
931 	ret = udpv6_queue_rcv_skb(sk, skb);
932 
933 	/* a return value > 0 means to resubmit the input */
934 	if (ret > 0)
935 		return ret;
936 	return 0;
937 }
938 
939 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
940 		   int proto)
941 {
942 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
943 	const struct in6_addr *saddr, *daddr;
944 	struct net *net = dev_net(skb->dev);
945 	struct udphdr *uh;
946 	struct sock *sk;
947 	bool refcounted;
948 	u32 ulen = 0;
949 
950 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
951 		goto discard;
952 
953 	saddr = &ipv6_hdr(skb)->saddr;
954 	daddr = &ipv6_hdr(skb)->daddr;
955 	uh = udp_hdr(skb);
956 
957 	ulen = ntohs(uh->len);
958 	if (ulen > skb->len)
959 		goto short_packet;
960 
961 	if (proto == IPPROTO_UDP) {
962 		/* UDP validates ulen. */
963 
964 		/* Check for jumbo payload */
965 		if (ulen == 0)
966 			ulen = skb->len;
967 
968 		if (ulen < sizeof(*uh))
969 			goto short_packet;
970 
971 		if (ulen < skb->len) {
972 			if (pskb_trim_rcsum(skb, ulen))
973 				goto short_packet;
974 			saddr = &ipv6_hdr(skb)->saddr;
975 			daddr = &ipv6_hdr(skb)->daddr;
976 			uh = udp_hdr(skb);
977 		}
978 	}
979 
980 	if (udp6_csum_init(skb, uh, proto))
981 		goto csum_error;
982 
983 	/* Check if the socket is already available, e.g. due to early demux */
984 	sk = skb_steal_sock(skb, &refcounted);
985 	if (sk) {
986 		struct dst_entry *dst = skb_dst(skb);
987 		int ret;
988 
989 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
990 			udp6_sk_rx_dst_set(sk, dst);
991 
992 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
993 			if (refcounted)
994 				sock_put(sk);
995 			goto report_csum_error;
996 		}
997 
998 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
999 		if (refcounted)
1000 			sock_put(sk);
1001 		return ret;
1002 	}
1003 
1004 	/*
1005 	 *	Multicast receive code
1006 	 */
1007 	if (ipv6_addr_is_multicast(daddr))
1008 		return __udp6_lib_mcast_deliver(net, skb,
1009 				saddr, daddr, udptable, proto);
1010 
1011 	/* Unicast */
1012 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1013 	if (sk) {
1014 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
1015 			goto report_csum_error;
1016 		return udp6_unicast_rcv_skb(sk, skb, uh);
1017 	}
1018 
1019 	reason = SKB_DROP_REASON_NO_SOCKET;
1020 
1021 	if (!uh->check)
1022 		goto report_csum_error;
1023 
1024 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1025 		goto discard;
1026 
1027 	if (udp_lib_checksum_complete(skb))
1028 		goto csum_error;
1029 
1030 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1031 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1032 
1033 	kfree_skb_reason(skb, reason);
1034 	return 0;
1035 
1036 short_packet:
1037 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1038 		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1039 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1040 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1041 			    saddr, ntohs(uh->source),
1042 			    ulen, skb->len,
1043 			    daddr, ntohs(uh->dest));
1044 	goto discard;
1045 
1046 report_csum_error:
1047 	udp6_csum_zero_error(skb);
1048 csum_error:
1049 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1050 		reason = SKB_DROP_REASON_UDP_CSUM;
1051 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1052 discard:
1053 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1054 	kfree_skb_reason(skb, reason);
1055 	return 0;
1056 }
1057 
1058 
1059 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1060 			__be16 loc_port, const struct in6_addr *loc_addr,
1061 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1062 			int dif, int sdif)
1063 {
1064 	unsigned short hnum = ntohs(loc_port);
1065 	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1066 	unsigned int slot2 = hash2 & udp_table.mask;
1067 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1068 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1069 	struct sock *sk;
1070 
1071 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1072 		if (sk->sk_state == TCP_ESTABLISHED &&
1073 		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1074 			return sk;
1075 		/* Only check first socket in chain */
1076 		break;
1077 	}
1078 	return NULL;
1079 }
1080 
1081 void udp_v6_early_demux(struct sk_buff *skb)
1082 {
1083 	struct net *net = dev_net(skb->dev);
1084 	const struct udphdr *uh;
1085 	struct sock *sk;
1086 	struct dst_entry *dst;
1087 	int dif = skb->dev->ifindex;
1088 	int sdif = inet6_sdif(skb);
1089 
1090 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1091 	    sizeof(struct udphdr)))
1092 		return;
1093 
1094 	uh = udp_hdr(skb);
1095 
1096 	if (skb->pkt_type == PACKET_HOST)
1097 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1098 					     &ipv6_hdr(skb)->daddr,
1099 					     uh->source, &ipv6_hdr(skb)->saddr,
1100 					     dif, sdif);
1101 	else
1102 		return;
1103 
1104 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1105 		return;
1106 
1107 	skb->sk = sk;
1108 	skb->destructor = sock_efree;
1109 	dst = rcu_dereference(sk->sk_rx_dst);
1110 
1111 	if (dst)
1112 		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1113 	if (dst) {
1114 		/* set noref for now.
1115 		 * any place which wants to hold dst has to call
1116 		 * dst_hold_safe()
1117 		 */
1118 		skb_dst_set_noref(skb, dst);
1119 	}
1120 }
1121 
1122 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1123 {
1124 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1125 }
1126 
1127 /*
1128  * Throw away all pending data and cancel the corking. Socket is locked.
1129  */
1130 static void udp_v6_flush_pending_frames(struct sock *sk)
1131 {
1132 	struct udp_sock *up = udp_sk(sk);
1133 
1134 	if (up->pending == AF_INET)
1135 		udp_flush_pending_frames(sk);
1136 	else if (up->pending) {
1137 		up->len = 0;
1138 		up->pending = 0;
1139 		ip6_flush_pending_frames(sk);
1140 	}
1141 }
1142 
1143 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1144 			     int addr_len)
1145 {
1146 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1147 		return -EINVAL;
1148 	/* The following checks are replicated from __ip6_datagram_connect()
1149 	 * and intended to prevent BPF program called below from accessing
1150 	 * bytes that are out of the bound specified by user in addr_len.
1151 	 */
1152 	if (uaddr->sa_family == AF_INET) {
1153 		if (ipv6_only_sock(sk))
1154 			return -EAFNOSUPPORT;
1155 		return udp_pre_connect(sk, uaddr, addr_len);
1156 	}
1157 
1158 	if (addr_len < SIN6_LEN_RFC2133)
1159 		return -EINVAL;
1160 
1161 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1162 }
1163 
1164 /**
1165  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1166  *	@sk:	socket we are sending on
1167  *	@skb:	sk_buff containing the filled-in UDP header
1168  *		(checksum field must be zeroed out)
1169  *	@saddr: source address
1170  *	@daddr: destination address
1171  *	@len:	length of packet
1172  */
1173 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1174 				 const struct in6_addr *saddr,
1175 				 const struct in6_addr *daddr, int len)
1176 {
1177 	unsigned int offset;
1178 	struct udphdr *uh = udp_hdr(skb);
1179 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1180 	__wsum csum = 0;
1181 
1182 	if (!frags) {
1183 		/* Only one fragment on the socket.  */
1184 		skb->csum_start = skb_transport_header(skb) - skb->head;
1185 		skb->csum_offset = offsetof(struct udphdr, check);
1186 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1187 	} else {
1188 		/*
1189 		 * HW-checksum won't work as there are two or more
1190 		 * fragments on the socket so that all csums of sk_buffs
1191 		 * should be together
1192 		 */
1193 		offset = skb_transport_offset(skb);
1194 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1195 		csum = skb->csum;
1196 
1197 		skb->ip_summed = CHECKSUM_NONE;
1198 
1199 		do {
1200 			csum = csum_add(csum, frags->csum);
1201 		} while ((frags = frags->next));
1202 
1203 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1204 					    csum);
1205 		if (uh->check == 0)
1206 			uh->check = CSUM_MANGLED_0;
1207 	}
1208 }
1209 
1210 /*
1211  *	Sending
1212  */
1213 
1214 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1215 			   struct inet_cork *cork)
1216 {
1217 	struct sock *sk = skb->sk;
1218 	struct udphdr *uh;
1219 	int err = 0;
1220 	int is_udplite = IS_UDPLITE(sk);
1221 	__wsum csum = 0;
1222 	int offset = skb_transport_offset(skb);
1223 	int len = skb->len - offset;
1224 	int datalen = len - sizeof(*uh);
1225 
1226 	/*
1227 	 * Create a UDP header
1228 	 */
1229 	uh = udp_hdr(skb);
1230 	uh->source = fl6->fl6_sport;
1231 	uh->dest = fl6->fl6_dport;
1232 	uh->len = htons(len);
1233 	uh->check = 0;
1234 
1235 	if (cork->gso_size) {
1236 		const int hlen = skb_network_header_len(skb) +
1237 				 sizeof(struct udphdr);
1238 
1239 		if (hlen + cork->gso_size > cork->fragsize) {
1240 			kfree_skb(skb);
1241 			return -EINVAL;
1242 		}
1243 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1244 			kfree_skb(skb);
1245 			return -EINVAL;
1246 		}
1247 		if (udp_sk(sk)->no_check6_tx) {
1248 			kfree_skb(skb);
1249 			return -EINVAL;
1250 		}
1251 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1252 		    dst_xfrm(skb_dst(skb))) {
1253 			kfree_skb(skb);
1254 			return -EIO;
1255 		}
1256 
1257 		if (datalen > cork->gso_size) {
1258 			skb_shinfo(skb)->gso_size = cork->gso_size;
1259 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1260 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1261 								 cork->gso_size);
1262 		}
1263 		goto csum_partial;
1264 	}
1265 
1266 	if (is_udplite)
1267 		csum = udplite_csum(skb);
1268 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1269 		skb->ip_summed = CHECKSUM_NONE;
1270 		goto send;
1271 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1272 csum_partial:
1273 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1274 		goto send;
1275 	} else
1276 		csum = udp_csum(skb);
1277 
1278 	/* add protocol-dependent pseudo-header */
1279 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1280 				    len, fl6->flowi6_proto, csum);
1281 	if (uh->check == 0)
1282 		uh->check = CSUM_MANGLED_0;
1283 
1284 send:
1285 	err = ip6_send_skb(skb);
1286 	if (err) {
1287 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1288 			UDP6_INC_STATS(sock_net(sk),
1289 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1290 			err = 0;
1291 		}
1292 	} else {
1293 		UDP6_INC_STATS(sock_net(sk),
1294 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1295 	}
1296 	return err;
1297 }
1298 
1299 static int udp_v6_push_pending_frames(struct sock *sk)
1300 {
1301 	struct sk_buff *skb;
1302 	struct udp_sock  *up = udp_sk(sk);
1303 	int err = 0;
1304 
1305 	if (up->pending == AF_INET)
1306 		return udp_push_pending_frames(sk);
1307 
1308 	skb = ip6_finish_skb(sk);
1309 	if (!skb)
1310 		goto out;
1311 
1312 	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1313 			      &inet_sk(sk)->cork.base);
1314 out:
1315 	up->len = 0;
1316 	up->pending = 0;
1317 	return err;
1318 }
1319 
1320 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1321 {
1322 	struct ipv6_txoptions opt_space;
1323 	struct udp_sock *up = udp_sk(sk);
1324 	struct inet_sock *inet = inet_sk(sk);
1325 	struct ipv6_pinfo *np = inet6_sk(sk);
1326 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1327 	struct in6_addr *daddr, *final_p, final;
1328 	struct ipv6_txoptions *opt = NULL;
1329 	struct ipv6_txoptions *opt_to_free = NULL;
1330 	struct ip6_flowlabel *flowlabel = NULL;
1331 	struct inet_cork_full cork;
1332 	struct flowi6 *fl6 = &cork.fl.u.ip6;
1333 	struct dst_entry *dst;
1334 	struct ipcm6_cookie ipc6;
1335 	int addr_len = msg->msg_namelen;
1336 	bool connected = false;
1337 	int ulen = len;
1338 	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1339 	int err;
1340 	int is_udplite = IS_UDPLITE(sk);
1341 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1342 
1343 	ipcm6_init(&ipc6);
1344 	ipc6.gso_size = READ_ONCE(up->gso_size);
1345 	ipc6.sockc.tsflags = sk->sk_tsflags;
1346 	ipc6.sockc.mark = sk->sk_mark;
1347 
1348 	/* destination address check */
1349 	if (sin6) {
1350 		if (addr_len < offsetof(struct sockaddr, sa_data))
1351 			return -EINVAL;
1352 
1353 		switch (sin6->sin6_family) {
1354 		case AF_INET6:
1355 			if (addr_len < SIN6_LEN_RFC2133)
1356 				return -EINVAL;
1357 			daddr = &sin6->sin6_addr;
1358 			if (ipv6_addr_any(daddr) &&
1359 			    ipv6_addr_v4mapped(&np->saddr))
1360 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1361 						       daddr);
1362 			break;
1363 		case AF_INET:
1364 			goto do_udp_sendmsg;
1365 		case AF_UNSPEC:
1366 			msg->msg_name = sin6 = NULL;
1367 			msg->msg_namelen = addr_len = 0;
1368 			daddr = NULL;
1369 			break;
1370 		default:
1371 			return -EINVAL;
1372 		}
1373 	} else if (!up->pending) {
1374 		if (sk->sk_state != TCP_ESTABLISHED)
1375 			return -EDESTADDRREQ;
1376 		daddr = &sk->sk_v6_daddr;
1377 	} else
1378 		daddr = NULL;
1379 
1380 	if (daddr) {
1381 		if (ipv6_addr_v4mapped(daddr)) {
1382 			struct sockaddr_in sin;
1383 			sin.sin_family = AF_INET;
1384 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1385 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1386 			msg->msg_name = &sin;
1387 			msg->msg_namelen = sizeof(sin);
1388 do_udp_sendmsg:
1389 			if (ipv6_only_sock(sk))
1390 				return -ENETUNREACH;
1391 			return udp_sendmsg(sk, msg, len);
1392 		}
1393 	}
1394 
1395 	/* Rough check on arithmetic overflow,
1396 	   better check is made in ip6_append_data().
1397 	   */
1398 	if (len > INT_MAX - sizeof(struct udphdr))
1399 		return -EMSGSIZE;
1400 
1401 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1402 	if (up->pending) {
1403 		if (up->pending == AF_INET)
1404 			return udp_sendmsg(sk, msg, len);
1405 		/*
1406 		 * There are pending frames.
1407 		 * The socket lock must be held while it's corked.
1408 		 */
1409 		lock_sock(sk);
1410 		if (likely(up->pending)) {
1411 			if (unlikely(up->pending != AF_INET6)) {
1412 				release_sock(sk);
1413 				return -EAFNOSUPPORT;
1414 			}
1415 			dst = NULL;
1416 			goto do_append_data;
1417 		}
1418 		release_sock(sk);
1419 	}
1420 	ulen += sizeof(struct udphdr);
1421 
1422 	memset(fl6, 0, sizeof(*fl6));
1423 
1424 	if (sin6) {
1425 		if (sin6->sin6_port == 0)
1426 			return -EINVAL;
1427 
1428 		fl6->fl6_dport = sin6->sin6_port;
1429 		daddr = &sin6->sin6_addr;
1430 
1431 		if (np->sndflow) {
1432 			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1433 			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1434 				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1435 				if (IS_ERR(flowlabel))
1436 					return -EINVAL;
1437 			}
1438 		}
1439 
1440 		/*
1441 		 * Otherwise it will be difficult to maintain
1442 		 * sk->sk_dst_cache.
1443 		 */
1444 		if (sk->sk_state == TCP_ESTABLISHED &&
1445 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1446 			daddr = &sk->sk_v6_daddr;
1447 
1448 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1449 		    sin6->sin6_scope_id &&
1450 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1451 			fl6->flowi6_oif = sin6->sin6_scope_id;
1452 	} else {
1453 		if (sk->sk_state != TCP_ESTABLISHED)
1454 			return -EDESTADDRREQ;
1455 
1456 		fl6->fl6_dport = inet->inet_dport;
1457 		daddr = &sk->sk_v6_daddr;
1458 		fl6->flowlabel = np->flow_label;
1459 		connected = true;
1460 	}
1461 
1462 	if (!fl6->flowi6_oif)
1463 		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1464 
1465 	if (!fl6->flowi6_oif)
1466 		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1467 
1468 	fl6->flowi6_uid = sk->sk_uid;
1469 
1470 	if (msg->msg_controllen) {
1471 		opt = &opt_space;
1472 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1473 		opt->tot_len = sizeof(*opt);
1474 		ipc6.opt = opt;
1475 
1476 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1477 		if (err > 0)
1478 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1479 						    &ipc6);
1480 		if (err < 0) {
1481 			fl6_sock_release(flowlabel);
1482 			return err;
1483 		}
1484 		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1485 			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1486 			if (IS_ERR(flowlabel))
1487 				return -EINVAL;
1488 		}
1489 		if (!(opt->opt_nflen|opt->opt_flen))
1490 			opt = NULL;
1491 		connected = false;
1492 	}
1493 	if (!opt) {
1494 		opt = txopt_get(np);
1495 		opt_to_free = opt;
1496 	}
1497 	if (flowlabel)
1498 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1499 	opt = ipv6_fixup_options(&opt_space, opt);
1500 	ipc6.opt = opt;
1501 
1502 	fl6->flowi6_proto = sk->sk_protocol;
1503 	fl6->flowi6_mark = ipc6.sockc.mark;
1504 	fl6->daddr = *daddr;
1505 	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1506 		fl6->saddr = np->saddr;
1507 	fl6->fl6_sport = inet->inet_sport;
1508 
1509 	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1510 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1511 					   (struct sockaddr *)sin6,
1512 					   &fl6->saddr);
1513 		if (err)
1514 			goto out_no_dst;
1515 		if (sin6) {
1516 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1517 				/* BPF program rewrote IPv6-only by IPv4-mapped
1518 				 * IPv6. It's currently unsupported.
1519 				 */
1520 				err = -ENOTSUPP;
1521 				goto out_no_dst;
1522 			}
1523 			if (sin6->sin6_port == 0) {
1524 				/* BPF program set invalid port. Reject it. */
1525 				err = -EINVAL;
1526 				goto out_no_dst;
1527 			}
1528 			fl6->fl6_dport = sin6->sin6_port;
1529 			fl6->daddr = sin6->sin6_addr;
1530 		}
1531 	}
1532 
1533 	if (ipv6_addr_any(&fl6->daddr))
1534 		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1535 
1536 	final_p = fl6_update_dst(fl6, opt, &final);
1537 	if (final_p)
1538 		connected = false;
1539 
1540 	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1541 		fl6->flowi6_oif = np->mcast_oif;
1542 		connected = false;
1543 	} else if (!fl6->flowi6_oif)
1544 		fl6->flowi6_oif = np->ucast_oif;
1545 
1546 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1547 
1548 	if (ipc6.tclass < 0)
1549 		ipc6.tclass = np->tclass;
1550 
1551 	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1552 
1553 	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1554 	if (IS_ERR(dst)) {
1555 		err = PTR_ERR(dst);
1556 		dst = NULL;
1557 		goto out;
1558 	}
1559 
1560 	if (ipc6.hlimit < 0)
1561 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1562 
1563 	if (msg->msg_flags&MSG_CONFIRM)
1564 		goto do_confirm;
1565 back_from_confirm:
1566 
1567 	/* Lockless fast path for the non-corking case */
1568 	if (!corkreq) {
1569 		struct sk_buff *skb;
1570 
1571 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1572 				   sizeof(struct udphdr), &ipc6,
1573 				   (struct rt6_info *)dst,
1574 				   msg->msg_flags, &cork);
1575 		err = PTR_ERR(skb);
1576 		if (!IS_ERR_OR_NULL(skb))
1577 			err = udp_v6_send_skb(skb, fl6, &cork.base);
1578 		/* ip6_make_skb steals dst reference */
1579 		goto out_no_dst;
1580 	}
1581 
1582 	lock_sock(sk);
1583 	if (unlikely(up->pending)) {
1584 		/* The socket is already corked while preparing it. */
1585 		/* ... which is an evident application bug. --ANK */
1586 		release_sock(sk);
1587 
1588 		net_dbg_ratelimited("udp cork app bug 2\n");
1589 		err = -EINVAL;
1590 		goto out;
1591 	}
1592 
1593 	up->pending = AF_INET6;
1594 
1595 do_append_data:
1596 	if (ipc6.dontfrag < 0)
1597 		ipc6.dontfrag = np->dontfrag;
1598 	up->len += ulen;
1599 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1600 			      &ipc6, fl6, (struct rt6_info *)dst,
1601 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1602 	if (err)
1603 		udp_v6_flush_pending_frames(sk);
1604 	else if (!corkreq)
1605 		err = udp_v6_push_pending_frames(sk);
1606 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1607 		up->pending = 0;
1608 
1609 	if (err > 0)
1610 		err = np->recverr ? net_xmit_errno(err) : 0;
1611 	release_sock(sk);
1612 
1613 out:
1614 	dst_release(dst);
1615 out_no_dst:
1616 	fl6_sock_release(flowlabel);
1617 	txopt_put(opt_to_free);
1618 	if (!err)
1619 		return len;
1620 	/*
1621 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1622 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1623 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1624 	 * things).  We could add another new stat but at least for now that
1625 	 * seems like overkill.
1626 	 */
1627 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1628 		UDP6_INC_STATS(sock_net(sk),
1629 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1630 	}
1631 	return err;
1632 
1633 do_confirm:
1634 	if (msg->msg_flags & MSG_PROBE)
1635 		dst_confirm_neigh(dst, &fl6->daddr);
1636 	if (!(msg->msg_flags&MSG_PROBE) || len)
1637 		goto back_from_confirm;
1638 	err = 0;
1639 	goto out;
1640 }
1641 
1642 void udpv6_destroy_sock(struct sock *sk)
1643 {
1644 	struct udp_sock *up = udp_sk(sk);
1645 	lock_sock(sk);
1646 
1647 	/* protects from races with udp_abort() */
1648 	sock_set_flag(sk, SOCK_DEAD);
1649 	udp_v6_flush_pending_frames(sk);
1650 	release_sock(sk);
1651 
1652 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1653 		if (up->encap_type) {
1654 			void (*encap_destroy)(struct sock *sk);
1655 			encap_destroy = READ_ONCE(up->encap_destroy);
1656 			if (encap_destroy)
1657 				encap_destroy(sk);
1658 		}
1659 		if (up->encap_enabled) {
1660 			static_branch_dec(&udpv6_encap_needed_key);
1661 			udp_encap_disable();
1662 		}
1663 	}
1664 
1665 	inet6_destroy_sock(sk);
1666 }
1667 
1668 /*
1669  *	Socket option code for UDP
1670  */
1671 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1672 		     unsigned int optlen)
1673 {
1674 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1675 		return udp_lib_setsockopt(sk, level, optname,
1676 					  optval, optlen,
1677 					  udp_v6_push_pending_frames);
1678 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1679 }
1680 
1681 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1682 		     char __user *optval, int __user *optlen)
1683 {
1684 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1685 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1686 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1687 }
1688 
1689 static const struct inet6_protocol udpv6_protocol = {
1690 	.handler	=	udpv6_rcv,
1691 	.err_handler	=	udpv6_err,
1692 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1693 };
1694 
1695 /* ------------------------------------------------------------------------ */
1696 #ifdef CONFIG_PROC_FS
1697 int udp6_seq_show(struct seq_file *seq, void *v)
1698 {
1699 	if (v == SEQ_START_TOKEN) {
1700 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1701 	} else {
1702 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1703 		struct inet_sock *inet = inet_sk(v);
1704 		__u16 srcp = ntohs(inet->inet_sport);
1705 		__u16 destp = ntohs(inet->inet_dport);
1706 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1707 					  udp_rqueue_get(v), bucket);
1708 	}
1709 	return 0;
1710 }
1711 
1712 const struct seq_operations udp6_seq_ops = {
1713 	.start		= udp_seq_start,
1714 	.next		= udp_seq_next,
1715 	.stop		= udp_seq_stop,
1716 	.show		= udp6_seq_show,
1717 };
1718 EXPORT_SYMBOL(udp6_seq_ops);
1719 
1720 static struct udp_seq_afinfo udp6_seq_afinfo = {
1721 	.family		= AF_INET6,
1722 	.udp_table	= &udp_table,
1723 };
1724 
1725 int __net_init udp6_proc_init(struct net *net)
1726 {
1727 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1728 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1729 		return -ENOMEM;
1730 	return 0;
1731 }
1732 
1733 void udp6_proc_exit(struct net *net)
1734 {
1735 	remove_proc_entry("udp6", net->proc_net);
1736 }
1737 #endif /* CONFIG_PROC_FS */
1738 
1739 /* ------------------------------------------------------------------------ */
1740 
1741 struct proto udpv6_prot = {
1742 	.name			= "UDPv6",
1743 	.owner			= THIS_MODULE,
1744 	.close			= udp_lib_close,
1745 	.pre_connect		= udpv6_pre_connect,
1746 	.connect		= ip6_datagram_connect,
1747 	.disconnect		= udp_disconnect,
1748 	.ioctl			= udp_ioctl,
1749 	.init			= udpv6_init_sock,
1750 	.destroy		= udpv6_destroy_sock,
1751 	.setsockopt		= udpv6_setsockopt,
1752 	.getsockopt		= udpv6_getsockopt,
1753 	.sendmsg		= udpv6_sendmsg,
1754 	.recvmsg		= udpv6_recvmsg,
1755 	.release_cb		= ip6_datagram_release_cb,
1756 	.hash			= udp_lib_hash,
1757 	.unhash			= udp_lib_unhash,
1758 	.rehash			= udp_v6_rehash,
1759 	.get_port		= udp_v6_get_port,
1760 	.put_port		= udp_lib_unhash,
1761 #ifdef CONFIG_BPF_SYSCALL
1762 	.psock_update_sk_prot	= udp_bpf_update_proto,
1763 #endif
1764 
1765 	.memory_allocated	= &udp_memory_allocated,
1766 	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1767 
1768 	.sysctl_mem		= sysctl_udp_mem,
1769 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1770 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1771 	.obj_size		= sizeof(struct udp6_sock),
1772 	.h.udp_table		= &udp_table,
1773 	.diag_destroy		= udp_abort,
1774 };
1775 
1776 static struct inet_protosw udpv6_protosw = {
1777 	.type =      SOCK_DGRAM,
1778 	.protocol =  IPPROTO_UDP,
1779 	.prot =      &udpv6_prot,
1780 	.ops =       &inet6_dgram_ops,
1781 	.flags =     INET_PROTOSW_PERMANENT,
1782 };
1783 
1784 int __init udpv6_init(void)
1785 {
1786 	int ret;
1787 
1788 	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1789 	if (ret)
1790 		goto out;
1791 
1792 	ret = inet6_register_protosw(&udpv6_protosw);
1793 	if (ret)
1794 		goto out_udpv6_protocol;
1795 out:
1796 	return ret;
1797 
1798 out_udpv6_protocol:
1799 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1800 	goto out;
1801 }
1802 
1803 void udpv6_exit(void)
1804 {
1805 	inet6_unregister_protosw(&udpv6_protosw);
1806 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1807 }
1808