xref: /openbmc/linux/net/ipv6/udp.c (revision e72e8bf1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
36 
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
42 #include <net/raw.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
46 #include <net/xfrm.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
51 
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
55 #include "udp_impl.h"
56 
57 static u32 udp6_ehashfn(const struct net *net,
58 			const struct in6_addr *laddr,
59 			const u16 lport,
60 			const struct in6_addr *faddr,
61 			const __be16 fport)
62 {
63 	static u32 udp6_ehash_secret __read_mostly;
64 	static u32 udp_ipv6_hash_secret __read_mostly;
65 
66 	u32 lhash, fhash;
67 
68 	net_get_random_once(&udp6_ehash_secret,
69 			    sizeof(udp6_ehash_secret));
70 	net_get_random_once(&udp_ipv6_hash_secret,
71 			    sizeof(udp_ipv6_hash_secret));
72 
73 	lhash = (__force u32)laddr->s6_addr32[3];
74 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75 
76 	return __inet6_ehashfn(lhash, lport, fhash, fport,
77 			       udp_ipv6_hash_secret + net_hash_mix(net));
78 }
79 
80 int udp_v6_get_port(struct sock *sk, unsigned short snum)
81 {
82 	unsigned int hash2_nulladdr =
83 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
84 	unsigned int hash2_partial =
85 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
86 
87 	/* precompute partial secondary hash */
88 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
89 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
90 }
91 
92 void udp_v6_rehash(struct sock *sk)
93 {
94 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
95 					  &sk->sk_v6_rcv_saddr,
96 					  inet_sk(sk)->inet_num);
97 
98 	udp_lib_rehash(sk, new_hash);
99 }
100 
101 static int compute_score(struct sock *sk, struct net *net,
102 			 const struct in6_addr *saddr, __be16 sport,
103 			 const struct in6_addr *daddr, unsigned short hnum,
104 			 int dif, int sdif)
105 {
106 	int score;
107 	struct inet_sock *inet;
108 	bool dev_match;
109 
110 	if (!net_eq(sock_net(sk), net) ||
111 	    udp_sk(sk)->udp_port_hash != hnum ||
112 	    sk->sk_family != PF_INET6)
113 		return -1;
114 
115 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
116 		return -1;
117 
118 	score = 0;
119 	inet = inet_sk(sk);
120 
121 	if (inet->inet_dport) {
122 		if (inet->inet_dport != sport)
123 			return -1;
124 		score++;
125 	}
126 
127 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
128 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
129 			return -1;
130 		score++;
131 	}
132 
133 	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
134 	if (!dev_match)
135 		return -1;
136 	score++;
137 
138 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 		score++;
140 
141 	return score;
142 }
143 
144 /* called with rcu_read_lock() */
145 static struct sock *udp6_lib_lookup2(struct net *net,
146 		const struct in6_addr *saddr, __be16 sport,
147 		const struct in6_addr *daddr, unsigned int hnum,
148 		int dif, int sdif, struct udp_hslot *hslot2,
149 		struct sk_buff *skb)
150 {
151 	struct sock *sk, *result;
152 	int score, badness;
153 	u32 hash = 0;
154 
155 	result = NULL;
156 	badness = -1;
157 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
158 		score = compute_score(sk, net, saddr, sport,
159 				      daddr, hnum, dif, sdif);
160 		if (score > badness) {
161 			if (sk->sk_reuseport &&
162 			    sk->sk_state != TCP_ESTABLISHED) {
163 				hash = udp6_ehashfn(net, daddr, hnum,
164 						    saddr, sport);
165 
166 				result = reuseport_select_sock(sk, hash, skb,
167 							sizeof(struct udphdr));
168 				if (result && !reuseport_has_conns(sk, false))
169 					return result;
170 			}
171 			result = sk;
172 			badness = score;
173 		}
174 	}
175 	return result;
176 }
177 
178 /* rcu_read_lock() must be held */
179 struct sock *__udp6_lib_lookup(struct net *net,
180 			       const struct in6_addr *saddr, __be16 sport,
181 			       const struct in6_addr *daddr, __be16 dport,
182 			       int dif, int sdif, struct udp_table *udptable,
183 			       struct sk_buff *skb)
184 {
185 	unsigned short hnum = ntohs(dport);
186 	unsigned int hash2, slot2;
187 	struct udp_hslot *hslot2;
188 	struct sock *result;
189 
190 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
191 	slot2 = hash2 & udptable->mask;
192 	hslot2 = &udptable->hash2[slot2];
193 
194 	result = udp6_lib_lookup2(net, saddr, sport,
195 				  daddr, hnum, dif, sdif,
196 				  hslot2, skb);
197 	if (!result) {
198 		hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
199 		slot2 = hash2 & udptable->mask;
200 
201 		hslot2 = &udptable->hash2[slot2];
202 
203 		result = udp6_lib_lookup2(net, saddr, sport,
204 					  &in6addr_any, hnum, dif, sdif,
205 					  hslot2, skb);
206 	}
207 	if (IS_ERR(result))
208 		return NULL;
209 	return result;
210 }
211 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
212 
213 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
214 					  __be16 sport, __be16 dport,
215 					  struct udp_table *udptable)
216 {
217 	const struct ipv6hdr *iph = ipv6_hdr(skb);
218 
219 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
220 				 &iph->daddr, dport, inet6_iif(skb),
221 				 inet6_sdif(skb), udptable, skb);
222 }
223 
224 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
225 				 __be16 sport, __be16 dport)
226 {
227 	const struct ipv6hdr *iph = ipv6_hdr(skb);
228 
229 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
230 				 &iph->daddr, dport, inet6_iif(skb),
231 				 inet6_sdif(skb), &udp_table, NULL);
232 }
233 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
234 
235 /* Must be called under rcu_read_lock().
236  * Does increment socket refcount.
237  */
238 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
239 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
240 			     const struct in6_addr *daddr, __be16 dport, int dif)
241 {
242 	struct sock *sk;
243 
244 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
245 				dif, 0, &udp_table, NULL);
246 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
247 		sk = NULL;
248 	return sk;
249 }
250 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
251 #endif
252 
253 /* do not use the scratch area len for jumbogram: their length execeeds the
254  * scratch area space; note that the IP6CB flags is still in the first
255  * cacheline, so checking for jumbograms is cheap
256  */
257 static int udp6_skb_len(struct sk_buff *skb)
258 {
259 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
260 }
261 
262 /*
263  *	This should be easy, if there is something there we
264  *	return it, otherwise we block.
265  */
266 
267 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
268 		  int noblock, int flags, int *addr_len)
269 {
270 	struct ipv6_pinfo *np = inet6_sk(sk);
271 	struct inet_sock *inet = inet_sk(sk);
272 	struct sk_buff *skb;
273 	unsigned int ulen, copied;
274 	int off, err, peeking = flags & MSG_PEEK;
275 	int is_udplite = IS_UDPLITE(sk);
276 	struct udp_mib __percpu *mib;
277 	bool checksum_valid = false;
278 	int is_udp4;
279 
280 	if (flags & MSG_ERRQUEUE)
281 		return ipv6_recv_error(sk, msg, len, addr_len);
282 
283 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
284 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
285 
286 try_again:
287 	off = sk_peek_offset(sk, flags);
288 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
289 	if (!skb)
290 		return err;
291 
292 	ulen = udp6_skb_len(skb);
293 	copied = len;
294 	if (copied > ulen - off)
295 		copied = ulen - off;
296 	else if (copied < ulen)
297 		msg->msg_flags |= MSG_TRUNC;
298 
299 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
300 	mib = __UDPX_MIB(sk, is_udp4);
301 
302 	/*
303 	 * If checksum is needed at all, try to do it while copying the
304 	 * data.  If the data is truncated, or if we only want a partial
305 	 * coverage checksum (UDP-Lite), do it before the copy.
306 	 */
307 
308 	if (copied < ulen || peeking ||
309 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
310 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
311 				!__udp_lib_checksum_complete(skb);
312 		if (!checksum_valid)
313 			goto csum_copy_err;
314 	}
315 
316 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
317 		if (udp_skb_is_linear(skb))
318 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
319 		else
320 			err = skb_copy_datagram_msg(skb, off, msg, copied);
321 	} else {
322 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
323 		if (err == -EINVAL)
324 			goto csum_copy_err;
325 	}
326 	if (unlikely(err)) {
327 		if (!peeking) {
328 			atomic_inc(&sk->sk_drops);
329 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
330 		}
331 		kfree_skb(skb);
332 		return err;
333 	}
334 	if (!peeking)
335 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
336 
337 	sock_recv_ts_and_drops(msg, sk, skb);
338 
339 	/* Copy the address. */
340 	if (msg->msg_name) {
341 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
342 		sin6->sin6_family = AF_INET6;
343 		sin6->sin6_port = udp_hdr(skb)->source;
344 		sin6->sin6_flowinfo = 0;
345 
346 		if (is_udp4) {
347 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
348 					       &sin6->sin6_addr);
349 			sin6->sin6_scope_id = 0;
350 		} else {
351 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
352 			sin6->sin6_scope_id =
353 				ipv6_iface_scope_id(&sin6->sin6_addr,
354 						    inet6_iif(skb));
355 		}
356 		*addr_len = sizeof(*sin6);
357 
358 		if (cgroup_bpf_enabled)
359 			BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
360 						(struct sockaddr *)sin6);
361 	}
362 
363 	if (udp_sk(sk)->gro_enabled)
364 		udp_cmsg_recv(msg, sk, skb);
365 
366 	if (np->rxopt.all)
367 		ip6_datagram_recv_common_ctl(sk, msg, skb);
368 
369 	if (is_udp4) {
370 		if (inet->cmsg_flags)
371 			ip_cmsg_recv_offset(msg, sk, skb,
372 					    sizeof(struct udphdr), off);
373 	} else {
374 		if (np->rxopt.all)
375 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
376 	}
377 
378 	err = copied;
379 	if (flags & MSG_TRUNC)
380 		err = ulen;
381 
382 	skb_consume_udp(sk, skb, peeking ? -err : err);
383 	return err;
384 
385 csum_copy_err:
386 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
387 				 udp_skb_destructor)) {
388 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
389 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
390 	}
391 	kfree_skb(skb);
392 
393 	/* starting over for a new packet, but check if we need to yield */
394 	cond_resched();
395 	msg->msg_flags &= ~MSG_TRUNC;
396 	goto try_again;
397 }
398 
399 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
400 void udpv6_encap_enable(void)
401 {
402 	static_branch_inc(&udpv6_encap_needed_key);
403 }
404 EXPORT_SYMBOL(udpv6_encap_enable);
405 
406 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
407  * through error handlers in encapsulations looking for a match.
408  */
409 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
410 				      struct inet6_skb_parm *opt,
411 				      u8 type, u8 code, int offset, __be32 info)
412 {
413 	int i;
414 
415 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
416 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
417 			       u8 type, u8 code, int offset, __be32 info);
418 		const struct ip6_tnl_encap_ops *encap;
419 
420 		encap = rcu_dereference(ip6tun_encaps[i]);
421 		if (!encap)
422 			continue;
423 		handler = encap->err_handler;
424 		if (handler && !handler(skb, opt, type, code, offset, info))
425 			return 0;
426 	}
427 
428 	return -ENOENT;
429 }
430 
431 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
432  * reversing source and destination port: this will match tunnels that force the
433  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
434  * lwtunnels might actually break this assumption by being configured with
435  * different destination ports on endpoints, in this case we won't be able to
436  * trace ICMP messages back to them.
437  *
438  * If this doesn't match any socket, probe tunnels with arbitrary destination
439  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
440  * we've sent packets to won't necessarily match the local destination port.
441  *
442  * Then ask the tunnel implementation to match the error against a valid
443  * association.
444  *
445  * Return an error if we can't find a match, the socket if we need further
446  * processing, zero otherwise.
447  */
448 static struct sock *__udp6_lib_err_encap(struct net *net,
449 					 const struct ipv6hdr *hdr, int offset,
450 					 struct udphdr *uh,
451 					 struct udp_table *udptable,
452 					 struct sk_buff *skb,
453 					 struct inet6_skb_parm *opt,
454 					 u8 type, u8 code, __be32 info)
455 {
456 	int network_offset, transport_offset;
457 	struct sock *sk;
458 
459 	network_offset = skb_network_offset(skb);
460 	transport_offset = skb_transport_offset(skb);
461 
462 	/* Network header needs to point to the outer IPv6 header inside ICMP */
463 	skb_reset_network_header(skb);
464 
465 	/* Transport header needs to point to the UDP header */
466 	skb_set_transport_header(skb, offset);
467 
468 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
469 			       &hdr->saddr, uh->dest,
470 			       inet6_iif(skb), 0, udptable, skb);
471 	if (sk) {
472 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
473 		struct udp_sock *up = udp_sk(sk);
474 
475 		lookup = READ_ONCE(up->encap_err_lookup);
476 		if (!lookup || lookup(sk, skb))
477 			sk = NULL;
478 	}
479 
480 	if (!sk) {
481 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
482 							offset, info));
483 	}
484 
485 	skb_set_transport_header(skb, transport_offset);
486 	skb_set_network_header(skb, network_offset);
487 
488 	return sk;
489 }
490 
491 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
492 		   u8 type, u8 code, int offset, __be32 info,
493 		   struct udp_table *udptable)
494 {
495 	struct ipv6_pinfo *np;
496 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
497 	const struct in6_addr *saddr = &hdr->saddr;
498 	const struct in6_addr *daddr = &hdr->daddr;
499 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
500 	bool tunnel = false;
501 	struct sock *sk;
502 	int harderr;
503 	int err;
504 	struct net *net = dev_net(skb->dev);
505 
506 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
507 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
508 	if (!sk) {
509 		/* No socket for error: try tunnels before discarding */
510 		sk = ERR_PTR(-ENOENT);
511 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
512 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
513 						  udptable, skb,
514 						  opt, type, code, info);
515 			if (!sk)
516 				return 0;
517 		}
518 
519 		if (IS_ERR(sk)) {
520 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
521 					  ICMP6_MIB_INERRORS);
522 			return PTR_ERR(sk);
523 		}
524 
525 		tunnel = true;
526 	}
527 
528 	harderr = icmpv6_err_convert(type, code, &err);
529 	np = inet6_sk(sk);
530 
531 	if (type == ICMPV6_PKT_TOOBIG) {
532 		if (!ip6_sk_accept_pmtu(sk))
533 			goto out;
534 		ip6_sk_update_pmtu(skb, sk, info);
535 		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
536 			harderr = 1;
537 	}
538 	if (type == NDISC_REDIRECT) {
539 		if (tunnel) {
540 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
541 				     sk->sk_mark, sk->sk_uid);
542 		} else {
543 			ip6_sk_redirect(skb, sk);
544 		}
545 		goto out;
546 	}
547 
548 	/* Tunnels don't have an application socket: don't pass errors back */
549 	if (tunnel)
550 		goto out;
551 
552 	if (!np->recverr) {
553 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
554 			goto out;
555 	} else {
556 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
557 	}
558 
559 	sk->sk_err = err;
560 	sk->sk_error_report(sk);
561 out:
562 	return 0;
563 }
564 
565 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
566 {
567 	int rc;
568 
569 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
570 		sock_rps_save_rxhash(sk, skb);
571 		sk_mark_napi_id(sk, skb);
572 		sk_incoming_cpu_update(sk);
573 	} else {
574 		sk_mark_napi_id_once(sk, skb);
575 	}
576 
577 	rc = __udp_enqueue_schedule_skb(sk, skb);
578 	if (rc < 0) {
579 		int is_udplite = IS_UDPLITE(sk);
580 
581 		/* Note that an ENOMEM error is charged twice */
582 		if (rc == -ENOMEM)
583 			UDP6_INC_STATS(sock_net(sk),
584 					 UDP_MIB_RCVBUFERRORS, is_udplite);
585 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
586 		kfree_skb(skb);
587 		return -1;
588 	}
589 
590 	return 0;
591 }
592 
593 static __inline__ int udpv6_err(struct sk_buff *skb,
594 				struct inet6_skb_parm *opt, u8 type,
595 				u8 code, int offset, __be32 info)
596 {
597 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
598 }
599 
600 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
601 {
602 	struct udp_sock *up = udp_sk(sk);
603 	int is_udplite = IS_UDPLITE(sk);
604 
605 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
606 		goto drop;
607 
608 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
609 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
610 
611 		/*
612 		 * This is an encapsulation socket so pass the skb to
613 		 * the socket's udp_encap_rcv() hook. Otherwise, just
614 		 * fall through and pass this up the UDP socket.
615 		 * up->encap_rcv() returns the following value:
616 		 * =0 if skb was successfully passed to the encap
617 		 *    handler or was discarded by it.
618 		 * >0 if skb should be passed on to UDP.
619 		 * <0 if skb should be resubmitted as proto -N
620 		 */
621 
622 		/* if we're overly short, let UDP handle it */
623 		encap_rcv = READ_ONCE(up->encap_rcv);
624 		if (encap_rcv) {
625 			int ret;
626 
627 			/* Verify checksum before giving to encap */
628 			if (udp_lib_checksum_complete(skb))
629 				goto csum_error;
630 
631 			ret = encap_rcv(sk, skb);
632 			if (ret <= 0) {
633 				__UDP_INC_STATS(sock_net(sk),
634 						UDP_MIB_INDATAGRAMS,
635 						is_udplite);
636 				return -ret;
637 			}
638 		}
639 
640 		/* FALLTHROUGH -- it's a UDP Packet */
641 	}
642 
643 	/*
644 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
645 	 */
646 	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
647 
648 		if (up->pcrlen == 0) {          /* full coverage was set  */
649 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
650 					    UDP_SKB_CB(skb)->cscov, skb->len);
651 			goto drop;
652 		}
653 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
654 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
655 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
656 			goto drop;
657 		}
658 	}
659 
660 	prefetch(&sk->sk_rmem_alloc);
661 	if (rcu_access_pointer(sk->sk_filter) &&
662 	    udp_lib_checksum_complete(skb))
663 		goto csum_error;
664 
665 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
666 		goto drop;
667 
668 	udp_csum_pull_header(skb);
669 
670 	skb_dst_drop(skb);
671 
672 	return __udpv6_queue_rcv_skb(sk, skb);
673 
674 csum_error:
675 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
676 drop:
677 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
678 	atomic_inc(&sk->sk_drops);
679 	kfree_skb(skb);
680 	return -1;
681 }
682 
683 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
684 {
685 	struct sk_buff *next, *segs;
686 	int ret;
687 
688 	if (likely(!udp_unexpected_gso(sk, skb)))
689 		return udpv6_queue_rcv_one_skb(sk, skb);
690 
691 	__skb_push(skb, -skb_mac_offset(skb));
692 	segs = udp_rcv_segment(sk, skb, false);
693 	skb_list_walk_safe(segs, skb, next) {
694 		__skb_pull(skb, skb_transport_offset(skb));
695 
696 		ret = udpv6_queue_rcv_one_skb(sk, skb);
697 		if (ret > 0)
698 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
699 						 true);
700 	}
701 	return 0;
702 }
703 
704 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
705 				   __be16 loc_port, const struct in6_addr *loc_addr,
706 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
707 				   int dif, int sdif, unsigned short hnum)
708 {
709 	struct inet_sock *inet = inet_sk(sk);
710 
711 	if (!net_eq(sock_net(sk), net))
712 		return false;
713 
714 	if (udp_sk(sk)->udp_port_hash != hnum ||
715 	    sk->sk_family != PF_INET6 ||
716 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
717 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
718 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
719 	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
720 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
721 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
722 		return false;
723 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
724 		return false;
725 	return true;
726 }
727 
728 static void udp6_csum_zero_error(struct sk_buff *skb)
729 {
730 	/* RFC 2460 section 8.1 says that we SHOULD log
731 	 * this error. Well, it is reasonable.
732 	 */
733 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
734 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
735 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
736 }
737 
738 /*
739  * Note: called only from the BH handler context,
740  * so we don't need to lock the hashes.
741  */
742 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
743 		const struct in6_addr *saddr, const struct in6_addr *daddr,
744 		struct udp_table *udptable, int proto)
745 {
746 	struct sock *sk, *first = NULL;
747 	const struct udphdr *uh = udp_hdr(skb);
748 	unsigned short hnum = ntohs(uh->dest);
749 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
750 	unsigned int offset = offsetof(typeof(*sk), sk_node);
751 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
752 	int dif = inet6_iif(skb);
753 	int sdif = inet6_sdif(skb);
754 	struct hlist_node *node;
755 	struct sk_buff *nskb;
756 
757 	if (use_hash2) {
758 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
759 			    udptable->mask;
760 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
761 start_lookup:
762 		hslot = &udptable->hash2[hash2];
763 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
764 	}
765 
766 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
767 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
768 					    uh->source, saddr, dif, sdif,
769 					    hnum))
770 			continue;
771 		/* If zero checksum and no_check is not on for
772 		 * the socket then skip it.
773 		 */
774 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
775 			continue;
776 		if (!first) {
777 			first = sk;
778 			continue;
779 		}
780 		nskb = skb_clone(skb, GFP_ATOMIC);
781 		if (unlikely(!nskb)) {
782 			atomic_inc(&sk->sk_drops);
783 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
784 					 IS_UDPLITE(sk));
785 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
786 					 IS_UDPLITE(sk));
787 			continue;
788 		}
789 
790 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
791 			consume_skb(nskb);
792 	}
793 
794 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
795 	if (use_hash2 && hash2 != hash2_any) {
796 		hash2 = hash2_any;
797 		goto start_lookup;
798 	}
799 
800 	if (first) {
801 		if (udpv6_queue_rcv_skb(first, skb) > 0)
802 			consume_skb(skb);
803 	} else {
804 		kfree_skb(skb);
805 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
806 				 proto == IPPROTO_UDPLITE);
807 	}
808 	return 0;
809 }
810 
811 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
812 {
813 	if (udp_sk_rx_dst_set(sk, dst)) {
814 		const struct rt6_info *rt = (const struct rt6_info *)dst;
815 
816 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
817 	}
818 }
819 
820 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
821  * return code conversion for ip layer consumption
822  */
823 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
824 				struct udphdr *uh)
825 {
826 	int ret;
827 
828 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
829 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
830 
831 	ret = udpv6_queue_rcv_skb(sk, skb);
832 
833 	/* a return value > 0 means to resubmit the input */
834 	if (ret > 0)
835 		return ret;
836 	return 0;
837 }
838 
839 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
840 		   int proto)
841 {
842 	const struct in6_addr *saddr, *daddr;
843 	struct net *net = dev_net(skb->dev);
844 	struct udphdr *uh;
845 	struct sock *sk;
846 	bool refcounted;
847 	u32 ulen = 0;
848 
849 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
850 		goto discard;
851 
852 	saddr = &ipv6_hdr(skb)->saddr;
853 	daddr = &ipv6_hdr(skb)->daddr;
854 	uh = udp_hdr(skb);
855 
856 	ulen = ntohs(uh->len);
857 	if (ulen > skb->len)
858 		goto short_packet;
859 
860 	if (proto == IPPROTO_UDP) {
861 		/* UDP validates ulen. */
862 
863 		/* Check for jumbo payload */
864 		if (ulen == 0)
865 			ulen = skb->len;
866 
867 		if (ulen < sizeof(*uh))
868 			goto short_packet;
869 
870 		if (ulen < skb->len) {
871 			if (pskb_trim_rcsum(skb, ulen))
872 				goto short_packet;
873 			saddr = &ipv6_hdr(skb)->saddr;
874 			daddr = &ipv6_hdr(skb)->daddr;
875 			uh = udp_hdr(skb);
876 		}
877 	}
878 
879 	if (udp6_csum_init(skb, uh, proto))
880 		goto csum_error;
881 
882 	/* Check if the socket is already available, e.g. due to early demux */
883 	sk = skb_steal_sock(skb, &refcounted);
884 	if (sk) {
885 		struct dst_entry *dst = skb_dst(skb);
886 		int ret;
887 
888 		if (unlikely(sk->sk_rx_dst != dst))
889 			udp6_sk_rx_dst_set(sk, dst);
890 
891 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
892 			if (refcounted)
893 				sock_put(sk);
894 			goto report_csum_error;
895 		}
896 
897 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
898 		if (refcounted)
899 			sock_put(sk);
900 		return ret;
901 	}
902 
903 	/*
904 	 *	Multicast receive code
905 	 */
906 	if (ipv6_addr_is_multicast(daddr))
907 		return __udp6_lib_mcast_deliver(net, skb,
908 				saddr, daddr, udptable, proto);
909 
910 	/* Unicast */
911 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
912 	if (sk) {
913 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
914 			goto report_csum_error;
915 		return udp6_unicast_rcv_skb(sk, skb, uh);
916 	}
917 
918 	if (!uh->check)
919 		goto report_csum_error;
920 
921 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
922 		goto discard;
923 
924 	if (udp_lib_checksum_complete(skb))
925 		goto csum_error;
926 
927 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
928 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
929 
930 	kfree_skb(skb);
931 	return 0;
932 
933 short_packet:
934 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
935 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
936 			    saddr, ntohs(uh->source),
937 			    ulen, skb->len,
938 			    daddr, ntohs(uh->dest));
939 	goto discard;
940 
941 report_csum_error:
942 	udp6_csum_zero_error(skb);
943 csum_error:
944 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
945 discard:
946 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
947 	kfree_skb(skb);
948 	return 0;
949 }
950 
951 
952 static struct sock *__udp6_lib_demux_lookup(struct net *net,
953 			__be16 loc_port, const struct in6_addr *loc_addr,
954 			__be16 rmt_port, const struct in6_addr *rmt_addr,
955 			int dif, int sdif)
956 {
957 	unsigned short hnum = ntohs(loc_port);
958 	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
959 	unsigned int slot2 = hash2 & udp_table.mask;
960 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
961 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
962 	struct sock *sk;
963 
964 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
965 		if (sk->sk_state == TCP_ESTABLISHED &&
966 		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
967 			return sk;
968 		/* Only check first socket in chain */
969 		break;
970 	}
971 	return NULL;
972 }
973 
974 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
975 {
976 	struct net *net = dev_net(skb->dev);
977 	const struct udphdr *uh;
978 	struct sock *sk;
979 	struct dst_entry *dst;
980 	int dif = skb->dev->ifindex;
981 	int sdif = inet6_sdif(skb);
982 
983 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
984 	    sizeof(struct udphdr)))
985 		return;
986 
987 	uh = udp_hdr(skb);
988 
989 	if (skb->pkt_type == PACKET_HOST)
990 		sk = __udp6_lib_demux_lookup(net, uh->dest,
991 					     &ipv6_hdr(skb)->daddr,
992 					     uh->source, &ipv6_hdr(skb)->saddr,
993 					     dif, sdif);
994 	else
995 		return;
996 
997 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
998 		return;
999 
1000 	skb->sk = sk;
1001 	skb->destructor = sock_efree;
1002 	dst = READ_ONCE(sk->sk_rx_dst);
1003 
1004 	if (dst)
1005 		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1006 	if (dst) {
1007 		/* set noref for now.
1008 		 * any place which wants to hold dst has to call
1009 		 * dst_hold_safe()
1010 		 */
1011 		skb_dst_set_noref(skb, dst);
1012 	}
1013 }
1014 
1015 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1016 {
1017 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1018 }
1019 
1020 /*
1021  * Throw away all pending data and cancel the corking. Socket is locked.
1022  */
1023 static void udp_v6_flush_pending_frames(struct sock *sk)
1024 {
1025 	struct udp_sock *up = udp_sk(sk);
1026 
1027 	if (up->pending == AF_INET)
1028 		udp_flush_pending_frames(sk);
1029 	else if (up->pending) {
1030 		up->len = 0;
1031 		up->pending = 0;
1032 		ip6_flush_pending_frames(sk);
1033 	}
1034 }
1035 
1036 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1037 			     int addr_len)
1038 {
1039 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1040 		return -EINVAL;
1041 	/* The following checks are replicated from __ip6_datagram_connect()
1042 	 * and intended to prevent BPF program called below from accessing
1043 	 * bytes that are out of the bound specified by user in addr_len.
1044 	 */
1045 	if (uaddr->sa_family == AF_INET) {
1046 		if (__ipv6_only_sock(sk))
1047 			return -EAFNOSUPPORT;
1048 		return udp_pre_connect(sk, uaddr, addr_len);
1049 	}
1050 
1051 	if (addr_len < SIN6_LEN_RFC2133)
1052 		return -EINVAL;
1053 
1054 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1055 }
1056 
1057 /**
1058  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1059  *	@sk:	socket we are sending on
1060  *	@skb:	sk_buff containing the filled-in UDP header
1061  *		(checksum field must be zeroed out)
1062  */
1063 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1064 				 const struct in6_addr *saddr,
1065 				 const struct in6_addr *daddr, int len)
1066 {
1067 	unsigned int offset;
1068 	struct udphdr *uh = udp_hdr(skb);
1069 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1070 	__wsum csum = 0;
1071 
1072 	if (!frags) {
1073 		/* Only one fragment on the socket.  */
1074 		skb->csum_start = skb_transport_header(skb) - skb->head;
1075 		skb->csum_offset = offsetof(struct udphdr, check);
1076 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1077 	} else {
1078 		/*
1079 		 * HW-checksum won't work as there are two or more
1080 		 * fragments on the socket so that all csums of sk_buffs
1081 		 * should be together
1082 		 */
1083 		offset = skb_transport_offset(skb);
1084 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1085 		csum = skb->csum;
1086 
1087 		skb->ip_summed = CHECKSUM_NONE;
1088 
1089 		do {
1090 			csum = csum_add(csum, frags->csum);
1091 		} while ((frags = frags->next));
1092 
1093 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1094 					    csum);
1095 		if (uh->check == 0)
1096 			uh->check = CSUM_MANGLED_0;
1097 	}
1098 }
1099 
1100 /*
1101  *	Sending
1102  */
1103 
1104 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1105 			   struct inet_cork *cork)
1106 {
1107 	struct sock *sk = skb->sk;
1108 	struct udphdr *uh;
1109 	int err = 0;
1110 	int is_udplite = IS_UDPLITE(sk);
1111 	__wsum csum = 0;
1112 	int offset = skb_transport_offset(skb);
1113 	int len = skb->len - offset;
1114 	int datalen = len - sizeof(*uh);
1115 
1116 	/*
1117 	 * Create a UDP header
1118 	 */
1119 	uh = udp_hdr(skb);
1120 	uh->source = fl6->fl6_sport;
1121 	uh->dest = fl6->fl6_dport;
1122 	uh->len = htons(len);
1123 	uh->check = 0;
1124 
1125 	if (cork->gso_size) {
1126 		const int hlen = skb_network_header_len(skb) +
1127 				 sizeof(struct udphdr);
1128 
1129 		if (hlen + cork->gso_size > cork->fragsize) {
1130 			kfree_skb(skb);
1131 			return -EINVAL;
1132 		}
1133 		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1134 			kfree_skb(skb);
1135 			return -EINVAL;
1136 		}
1137 		if (udp_sk(sk)->no_check6_tx) {
1138 			kfree_skb(skb);
1139 			return -EINVAL;
1140 		}
1141 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1142 		    dst_xfrm(skb_dst(skb))) {
1143 			kfree_skb(skb);
1144 			return -EIO;
1145 		}
1146 
1147 		if (datalen > cork->gso_size) {
1148 			skb_shinfo(skb)->gso_size = cork->gso_size;
1149 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1150 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1151 								 cork->gso_size);
1152 		}
1153 		goto csum_partial;
1154 	}
1155 
1156 	if (is_udplite)
1157 		csum = udplite_csum(skb);
1158 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1159 		skb->ip_summed = CHECKSUM_NONE;
1160 		goto send;
1161 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1162 csum_partial:
1163 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1164 		goto send;
1165 	} else
1166 		csum = udp_csum(skb);
1167 
1168 	/* add protocol-dependent pseudo-header */
1169 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1170 				    len, fl6->flowi6_proto, csum);
1171 	if (uh->check == 0)
1172 		uh->check = CSUM_MANGLED_0;
1173 
1174 send:
1175 	err = ip6_send_skb(skb);
1176 	if (err) {
1177 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1178 			UDP6_INC_STATS(sock_net(sk),
1179 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1180 			err = 0;
1181 		}
1182 	} else {
1183 		UDP6_INC_STATS(sock_net(sk),
1184 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1185 	}
1186 	return err;
1187 }
1188 
1189 static int udp_v6_push_pending_frames(struct sock *sk)
1190 {
1191 	struct sk_buff *skb;
1192 	struct udp_sock  *up = udp_sk(sk);
1193 	struct flowi6 fl6;
1194 	int err = 0;
1195 
1196 	if (up->pending == AF_INET)
1197 		return udp_push_pending_frames(sk);
1198 
1199 	/* ip6_finish_skb will release the cork, so make a copy of
1200 	 * fl6 here.
1201 	 */
1202 	fl6 = inet_sk(sk)->cork.fl.u.ip6;
1203 
1204 	skb = ip6_finish_skb(sk);
1205 	if (!skb)
1206 		goto out;
1207 
1208 	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1209 
1210 out:
1211 	up->len = 0;
1212 	up->pending = 0;
1213 	return err;
1214 }
1215 
1216 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1217 {
1218 	struct ipv6_txoptions opt_space;
1219 	struct udp_sock *up = udp_sk(sk);
1220 	struct inet_sock *inet = inet_sk(sk);
1221 	struct ipv6_pinfo *np = inet6_sk(sk);
1222 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1223 	struct in6_addr *daddr, *final_p, final;
1224 	struct ipv6_txoptions *opt = NULL;
1225 	struct ipv6_txoptions *opt_to_free = NULL;
1226 	struct ip6_flowlabel *flowlabel = NULL;
1227 	struct flowi6 fl6;
1228 	struct dst_entry *dst;
1229 	struct ipcm6_cookie ipc6;
1230 	int addr_len = msg->msg_namelen;
1231 	bool connected = false;
1232 	int ulen = len;
1233 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1234 	int err;
1235 	int is_udplite = IS_UDPLITE(sk);
1236 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1237 
1238 	ipcm6_init(&ipc6);
1239 	ipc6.gso_size = up->gso_size;
1240 	ipc6.sockc.tsflags = sk->sk_tsflags;
1241 	ipc6.sockc.mark = sk->sk_mark;
1242 
1243 	/* destination address check */
1244 	if (sin6) {
1245 		if (addr_len < offsetof(struct sockaddr, sa_data))
1246 			return -EINVAL;
1247 
1248 		switch (sin6->sin6_family) {
1249 		case AF_INET6:
1250 			if (addr_len < SIN6_LEN_RFC2133)
1251 				return -EINVAL;
1252 			daddr = &sin6->sin6_addr;
1253 			if (ipv6_addr_any(daddr) &&
1254 			    ipv6_addr_v4mapped(&np->saddr))
1255 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1256 						       daddr);
1257 			break;
1258 		case AF_INET:
1259 			goto do_udp_sendmsg;
1260 		case AF_UNSPEC:
1261 			msg->msg_name = sin6 = NULL;
1262 			msg->msg_namelen = addr_len = 0;
1263 			daddr = NULL;
1264 			break;
1265 		default:
1266 			return -EINVAL;
1267 		}
1268 	} else if (!up->pending) {
1269 		if (sk->sk_state != TCP_ESTABLISHED)
1270 			return -EDESTADDRREQ;
1271 		daddr = &sk->sk_v6_daddr;
1272 	} else
1273 		daddr = NULL;
1274 
1275 	if (daddr) {
1276 		if (ipv6_addr_v4mapped(daddr)) {
1277 			struct sockaddr_in sin;
1278 			sin.sin_family = AF_INET;
1279 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1280 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1281 			msg->msg_name = &sin;
1282 			msg->msg_namelen = sizeof(sin);
1283 do_udp_sendmsg:
1284 			if (__ipv6_only_sock(sk))
1285 				return -ENETUNREACH;
1286 			return udp_sendmsg(sk, msg, len);
1287 		}
1288 	}
1289 
1290 	if (up->pending == AF_INET)
1291 		return udp_sendmsg(sk, msg, len);
1292 
1293 	/* Rough check on arithmetic overflow,
1294 	   better check is made in ip6_append_data().
1295 	   */
1296 	if (len > INT_MAX - sizeof(struct udphdr))
1297 		return -EMSGSIZE;
1298 
1299 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1300 	if (up->pending) {
1301 		/*
1302 		 * There are pending frames.
1303 		 * The socket lock must be held while it's corked.
1304 		 */
1305 		lock_sock(sk);
1306 		if (likely(up->pending)) {
1307 			if (unlikely(up->pending != AF_INET6)) {
1308 				release_sock(sk);
1309 				return -EAFNOSUPPORT;
1310 			}
1311 			dst = NULL;
1312 			goto do_append_data;
1313 		}
1314 		release_sock(sk);
1315 	}
1316 	ulen += sizeof(struct udphdr);
1317 
1318 	memset(&fl6, 0, sizeof(fl6));
1319 
1320 	if (sin6) {
1321 		if (sin6->sin6_port == 0)
1322 			return -EINVAL;
1323 
1324 		fl6.fl6_dport = sin6->sin6_port;
1325 		daddr = &sin6->sin6_addr;
1326 
1327 		if (np->sndflow) {
1328 			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1329 			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1330 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1331 				if (IS_ERR(flowlabel))
1332 					return -EINVAL;
1333 			}
1334 		}
1335 
1336 		/*
1337 		 * Otherwise it will be difficult to maintain
1338 		 * sk->sk_dst_cache.
1339 		 */
1340 		if (sk->sk_state == TCP_ESTABLISHED &&
1341 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1342 			daddr = &sk->sk_v6_daddr;
1343 
1344 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1345 		    sin6->sin6_scope_id &&
1346 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1347 			fl6.flowi6_oif = sin6->sin6_scope_id;
1348 	} else {
1349 		if (sk->sk_state != TCP_ESTABLISHED)
1350 			return -EDESTADDRREQ;
1351 
1352 		fl6.fl6_dport = inet->inet_dport;
1353 		daddr = &sk->sk_v6_daddr;
1354 		fl6.flowlabel = np->flow_label;
1355 		connected = true;
1356 	}
1357 
1358 	if (!fl6.flowi6_oif)
1359 		fl6.flowi6_oif = sk->sk_bound_dev_if;
1360 
1361 	if (!fl6.flowi6_oif)
1362 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1363 
1364 	fl6.flowi6_mark = ipc6.sockc.mark;
1365 	fl6.flowi6_uid = sk->sk_uid;
1366 
1367 	if (msg->msg_controllen) {
1368 		opt = &opt_space;
1369 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1370 		opt->tot_len = sizeof(*opt);
1371 		ipc6.opt = opt;
1372 
1373 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1374 		if (err > 0)
1375 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1376 						    &ipc6);
1377 		if (err < 0) {
1378 			fl6_sock_release(flowlabel);
1379 			return err;
1380 		}
1381 		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1382 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1383 			if (IS_ERR(flowlabel))
1384 				return -EINVAL;
1385 		}
1386 		if (!(opt->opt_nflen|opt->opt_flen))
1387 			opt = NULL;
1388 		connected = false;
1389 	}
1390 	if (!opt) {
1391 		opt = txopt_get(np);
1392 		opt_to_free = opt;
1393 	}
1394 	if (flowlabel)
1395 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1396 	opt = ipv6_fixup_options(&opt_space, opt);
1397 	ipc6.opt = opt;
1398 
1399 	fl6.flowi6_proto = sk->sk_protocol;
1400 	fl6.daddr = *daddr;
1401 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1402 		fl6.saddr = np->saddr;
1403 	fl6.fl6_sport = inet->inet_sport;
1404 
1405 	if (cgroup_bpf_enabled && !connected) {
1406 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1407 					   (struct sockaddr *)sin6, &fl6.saddr);
1408 		if (err)
1409 			goto out_no_dst;
1410 		if (sin6) {
1411 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1412 				/* BPF program rewrote IPv6-only by IPv4-mapped
1413 				 * IPv6. It's currently unsupported.
1414 				 */
1415 				err = -ENOTSUPP;
1416 				goto out_no_dst;
1417 			}
1418 			if (sin6->sin6_port == 0) {
1419 				/* BPF program set invalid port. Reject it. */
1420 				err = -EINVAL;
1421 				goto out_no_dst;
1422 			}
1423 			fl6.fl6_dport = sin6->sin6_port;
1424 			fl6.daddr = sin6->sin6_addr;
1425 		}
1426 	}
1427 
1428 	if (ipv6_addr_any(&fl6.daddr))
1429 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1430 
1431 	final_p = fl6_update_dst(&fl6, opt, &final);
1432 	if (final_p)
1433 		connected = false;
1434 
1435 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1436 		fl6.flowi6_oif = np->mcast_oif;
1437 		connected = false;
1438 	} else if (!fl6.flowi6_oif)
1439 		fl6.flowi6_oif = np->ucast_oif;
1440 
1441 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1442 
1443 	if (ipc6.tclass < 0)
1444 		ipc6.tclass = np->tclass;
1445 
1446 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1447 
1448 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1449 	if (IS_ERR(dst)) {
1450 		err = PTR_ERR(dst);
1451 		dst = NULL;
1452 		goto out;
1453 	}
1454 
1455 	if (ipc6.hlimit < 0)
1456 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1457 
1458 	if (msg->msg_flags&MSG_CONFIRM)
1459 		goto do_confirm;
1460 back_from_confirm:
1461 
1462 	/* Lockless fast path for the non-corking case */
1463 	if (!corkreq) {
1464 		struct inet_cork_full cork;
1465 		struct sk_buff *skb;
1466 
1467 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1468 				   sizeof(struct udphdr), &ipc6,
1469 				   &fl6, (struct rt6_info *)dst,
1470 				   msg->msg_flags, &cork);
1471 		err = PTR_ERR(skb);
1472 		if (!IS_ERR_OR_NULL(skb))
1473 			err = udp_v6_send_skb(skb, &fl6, &cork.base);
1474 		goto out;
1475 	}
1476 
1477 	lock_sock(sk);
1478 	if (unlikely(up->pending)) {
1479 		/* The socket is already corked while preparing it. */
1480 		/* ... which is an evident application bug. --ANK */
1481 		release_sock(sk);
1482 
1483 		net_dbg_ratelimited("udp cork app bug 2\n");
1484 		err = -EINVAL;
1485 		goto out;
1486 	}
1487 
1488 	up->pending = AF_INET6;
1489 
1490 do_append_data:
1491 	if (ipc6.dontfrag < 0)
1492 		ipc6.dontfrag = np->dontfrag;
1493 	up->len += ulen;
1494 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1495 			      &ipc6, &fl6, (struct rt6_info *)dst,
1496 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1497 	if (err)
1498 		udp_v6_flush_pending_frames(sk);
1499 	else if (!corkreq)
1500 		err = udp_v6_push_pending_frames(sk);
1501 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1502 		up->pending = 0;
1503 
1504 	if (err > 0)
1505 		err = np->recverr ? net_xmit_errno(err) : 0;
1506 	release_sock(sk);
1507 
1508 out:
1509 	dst_release(dst);
1510 out_no_dst:
1511 	fl6_sock_release(flowlabel);
1512 	txopt_put(opt_to_free);
1513 	if (!err)
1514 		return len;
1515 	/*
1516 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1517 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1518 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1519 	 * things).  We could add another new stat but at least for now that
1520 	 * seems like overkill.
1521 	 */
1522 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1523 		UDP6_INC_STATS(sock_net(sk),
1524 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1525 	}
1526 	return err;
1527 
1528 do_confirm:
1529 	if (msg->msg_flags & MSG_PROBE)
1530 		dst_confirm_neigh(dst, &fl6.daddr);
1531 	if (!(msg->msg_flags&MSG_PROBE) || len)
1532 		goto back_from_confirm;
1533 	err = 0;
1534 	goto out;
1535 }
1536 
1537 void udpv6_destroy_sock(struct sock *sk)
1538 {
1539 	struct udp_sock *up = udp_sk(sk);
1540 	lock_sock(sk);
1541 	udp_v6_flush_pending_frames(sk);
1542 	release_sock(sk);
1543 
1544 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1545 		if (up->encap_type) {
1546 			void (*encap_destroy)(struct sock *sk);
1547 			encap_destroy = READ_ONCE(up->encap_destroy);
1548 			if (encap_destroy)
1549 				encap_destroy(sk);
1550 		}
1551 		if (up->encap_enabled)
1552 			static_branch_dec(&udpv6_encap_needed_key);
1553 	}
1554 
1555 	inet6_destroy_sock(sk);
1556 }
1557 
1558 /*
1559  *	Socket option code for UDP
1560  */
1561 int udpv6_setsockopt(struct sock *sk, int level, int optname,
1562 		     char __user *optval, unsigned int optlen)
1563 {
1564 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1565 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1566 					  udp_v6_push_pending_frames);
1567 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1568 }
1569 
1570 #ifdef CONFIG_COMPAT
1571 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1572 			    char __user *optval, unsigned int optlen)
1573 {
1574 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1575 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1576 					  udp_v6_push_pending_frames);
1577 	return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
1578 }
1579 #endif
1580 
1581 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1582 		     char __user *optval, int __user *optlen)
1583 {
1584 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1585 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1586 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1587 }
1588 
1589 #ifdef CONFIG_COMPAT
1590 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1591 			    char __user *optval, int __user *optlen)
1592 {
1593 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1594 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1595 	return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
1596 }
1597 #endif
1598 
1599 /* thinking of making this const? Don't.
1600  * early_demux can change based on sysctl.
1601  */
1602 static struct inet6_protocol udpv6_protocol = {
1603 	.early_demux	=	udp_v6_early_demux,
1604 	.early_demux_handler =  udp_v6_early_demux,
1605 	.handler	=	udpv6_rcv,
1606 	.err_handler	=	udpv6_err,
1607 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1608 };
1609 
1610 /* ------------------------------------------------------------------------ */
1611 #ifdef CONFIG_PROC_FS
1612 int udp6_seq_show(struct seq_file *seq, void *v)
1613 {
1614 	if (v == SEQ_START_TOKEN) {
1615 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1616 	} else {
1617 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1618 		struct inet_sock *inet = inet_sk(v);
1619 		__u16 srcp = ntohs(inet->inet_sport);
1620 		__u16 destp = ntohs(inet->inet_dport);
1621 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1622 					  udp_rqueue_get(v), bucket);
1623 	}
1624 	return 0;
1625 }
1626 
1627 const struct seq_operations udp6_seq_ops = {
1628 	.start		= udp_seq_start,
1629 	.next		= udp_seq_next,
1630 	.stop		= udp_seq_stop,
1631 	.show		= udp6_seq_show,
1632 };
1633 EXPORT_SYMBOL(udp6_seq_ops);
1634 
1635 static struct udp_seq_afinfo udp6_seq_afinfo = {
1636 	.family		= AF_INET6,
1637 	.udp_table	= &udp_table,
1638 };
1639 
1640 int __net_init udp6_proc_init(struct net *net)
1641 {
1642 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1643 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1644 		return -ENOMEM;
1645 	return 0;
1646 }
1647 
1648 void udp6_proc_exit(struct net *net)
1649 {
1650 	remove_proc_entry("udp6", net->proc_net);
1651 }
1652 #endif /* CONFIG_PROC_FS */
1653 
1654 /* ------------------------------------------------------------------------ */
1655 
1656 struct proto udpv6_prot = {
1657 	.name			= "UDPv6",
1658 	.owner			= THIS_MODULE,
1659 	.close			= udp_lib_close,
1660 	.pre_connect		= udpv6_pre_connect,
1661 	.connect		= ip6_datagram_connect,
1662 	.disconnect		= udp_disconnect,
1663 	.ioctl			= udp_ioctl,
1664 	.init			= udp_init_sock,
1665 	.destroy		= udpv6_destroy_sock,
1666 	.setsockopt		= udpv6_setsockopt,
1667 	.getsockopt		= udpv6_getsockopt,
1668 	.sendmsg		= udpv6_sendmsg,
1669 	.recvmsg		= udpv6_recvmsg,
1670 	.release_cb		= ip6_datagram_release_cb,
1671 	.hash			= udp_lib_hash,
1672 	.unhash			= udp_lib_unhash,
1673 	.rehash			= udp_v6_rehash,
1674 	.get_port		= udp_v6_get_port,
1675 	.memory_allocated	= &udp_memory_allocated,
1676 	.sysctl_mem		= sysctl_udp_mem,
1677 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1678 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1679 	.obj_size		= sizeof(struct udp6_sock),
1680 	.h.udp_table		= &udp_table,
1681 #ifdef CONFIG_COMPAT
1682 	.compat_setsockopt	= compat_udpv6_setsockopt,
1683 	.compat_getsockopt	= compat_udpv6_getsockopt,
1684 #endif
1685 	.diag_destroy		= udp_abort,
1686 };
1687 
1688 static struct inet_protosw udpv6_protosw = {
1689 	.type =      SOCK_DGRAM,
1690 	.protocol =  IPPROTO_UDP,
1691 	.prot =      &udpv6_prot,
1692 	.ops =       &inet6_dgram_ops,
1693 	.flags =     INET_PROTOSW_PERMANENT,
1694 };
1695 
1696 int __init udpv6_init(void)
1697 {
1698 	int ret;
1699 
1700 	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1701 	if (ret)
1702 		goto out;
1703 
1704 	ret = inet6_register_protosw(&udpv6_protosw);
1705 	if (ret)
1706 		goto out_udpv6_protocol;
1707 out:
1708 	return ret;
1709 
1710 out_udpv6_protocol:
1711 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1712 	goto out;
1713 }
1714 
1715 void udpv6_exit(void)
1716 {
1717 	inet6_unregister_protosw(&udpv6_protosw);
1718 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1719 }
1720