xref: /openbmc/linux/net/ipv6/udp.c (revision 4bb1eb3c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_arp.h>
28 #include <linux/ipv6.h>
29 #include <linux/icmpv6.h>
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/uaccess.h>
35 #include <linux/indirect_call_wrapper.h>
36 
37 #include <net/addrconf.h>
38 #include <net/ndisc.h>
39 #include <net/protocol.h>
40 #include <net/transp_v6.h>
41 #include <net/ip6_route.h>
42 #include <net/raw.h>
43 #include <net/tcp_states.h>
44 #include <net/ip6_checksum.h>
45 #include <net/ip6_tunnel.h>
46 #include <net/xfrm.h>
47 #include <net/inet_hashtables.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
50 #include <net/sock_reuseport.h>
51 
52 #include <linux/proc_fs.h>
53 #include <linux/seq_file.h>
54 #include <trace/events/skb.h>
55 #include "udp_impl.h"
56 
57 static u32 udp6_ehashfn(const struct net *net,
58 			const struct in6_addr *laddr,
59 			const u16 lport,
60 			const struct in6_addr *faddr,
61 			const __be16 fport)
62 {
63 	static u32 udp6_ehash_secret __read_mostly;
64 	static u32 udp_ipv6_hash_secret __read_mostly;
65 
66 	u32 lhash, fhash;
67 
68 	net_get_random_once(&udp6_ehash_secret,
69 			    sizeof(udp6_ehash_secret));
70 	net_get_random_once(&udp_ipv6_hash_secret,
71 			    sizeof(udp_ipv6_hash_secret));
72 
73 	lhash = (__force u32)laddr->s6_addr32[3];
74 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75 
76 	return __inet6_ehashfn(lhash, lport, fhash, fport,
77 			       udp_ipv6_hash_secret + net_hash_mix(net));
78 }
79 
80 int udp_v6_get_port(struct sock *sk, unsigned short snum)
81 {
82 	unsigned int hash2_nulladdr =
83 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
84 	unsigned int hash2_partial =
85 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
86 
87 	/* precompute partial secondary hash */
88 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
89 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
90 }
91 
92 void udp_v6_rehash(struct sock *sk)
93 {
94 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
95 					  &sk->sk_v6_rcv_saddr,
96 					  inet_sk(sk)->inet_num);
97 
98 	udp_lib_rehash(sk, new_hash);
99 }
100 
101 static int compute_score(struct sock *sk, struct net *net,
102 			 const struct in6_addr *saddr, __be16 sport,
103 			 const struct in6_addr *daddr, unsigned short hnum,
104 			 int dif, int sdif)
105 {
106 	int score;
107 	struct inet_sock *inet;
108 	bool dev_match;
109 
110 	if (!net_eq(sock_net(sk), net) ||
111 	    udp_sk(sk)->udp_port_hash != hnum ||
112 	    sk->sk_family != PF_INET6)
113 		return -1;
114 
115 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
116 		return -1;
117 
118 	score = 0;
119 	inet = inet_sk(sk);
120 
121 	if (inet->inet_dport) {
122 		if (inet->inet_dport != sport)
123 			return -1;
124 		score++;
125 	}
126 
127 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
128 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
129 			return -1;
130 		score++;
131 	}
132 
133 	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
134 	if (!dev_match)
135 		return -1;
136 	score++;
137 
138 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
139 		score++;
140 
141 	return score;
142 }
143 
144 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
145 				     struct sk_buff *skb,
146 				     const struct in6_addr *saddr,
147 				     __be16 sport,
148 				     const struct in6_addr *daddr,
149 				     unsigned int hnum)
150 {
151 	struct sock *reuse_sk = NULL;
152 	u32 hash;
153 
154 	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
155 		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
156 		reuse_sk = reuseport_select_sock(sk, hash, skb,
157 						 sizeof(struct udphdr));
158 	}
159 	return reuse_sk;
160 }
161 
162 /* called with rcu_read_lock() */
163 static struct sock *udp6_lib_lookup2(struct net *net,
164 		const struct in6_addr *saddr, __be16 sport,
165 		const struct in6_addr *daddr, unsigned int hnum,
166 		int dif, int sdif, struct udp_hslot *hslot2,
167 		struct sk_buff *skb)
168 {
169 	struct sock *sk, *result;
170 	int score, badness;
171 
172 	result = NULL;
173 	badness = -1;
174 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
175 		score = compute_score(sk, net, saddr, sport,
176 				      daddr, hnum, dif, sdif);
177 		if (score > badness) {
178 			result = lookup_reuseport(net, sk, skb,
179 						  saddr, sport, daddr, hnum);
180 			/* Fall back to scoring if group has connections */
181 			if (result && !reuseport_has_conns(sk, false))
182 				return result;
183 
184 			result = result ? : sk;
185 			badness = score;
186 		}
187 	}
188 	return result;
189 }
190 
191 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
192 					       struct udp_table *udptable,
193 					       struct sk_buff *skb,
194 					       const struct in6_addr *saddr,
195 					       __be16 sport,
196 					       const struct in6_addr *daddr,
197 					       u16 hnum)
198 {
199 	struct sock *sk, *reuse_sk;
200 	bool no_reuseport;
201 
202 	if (udptable != &udp_table)
203 		return NULL; /* only UDP is supported */
204 
205 	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
206 					    saddr, sport, daddr, hnum, &sk);
207 	if (no_reuseport || IS_ERR_OR_NULL(sk))
208 		return sk;
209 
210 	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
211 	if (reuse_sk)
212 		sk = reuse_sk;
213 	return sk;
214 }
215 
216 /* rcu_read_lock() must be held */
217 struct sock *__udp6_lib_lookup(struct net *net,
218 			       const struct in6_addr *saddr, __be16 sport,
219 			       const struct in6_addr *daddr, __be16 dport,
220 			       int dif, int sdif, struct udp_table *udptable,
221 			       struct sk_buff *skb)
222 {
223 	unsigned short hnum = ntohs(dport);
224 	unsigned int hash2, slot2;
225 	struct udp_hslot *hslot2;
226 	struct sock *result, *sk;
227 
228 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
229 	slot2 = hash2 & udptable->mask;
230 	hslot2 = &udptable->hash2[slot2];
231 
232 	/* Lookup connected or non-wildcard sockets */
233 	result = udp6_lib_lookup2(net, saddr, sport,
234 				  daddr, hnum, dif, sdif,
235 				  hslot2, skb);
236 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
237 		goto done;
238 
239 	/* Lookup redirect from BPF */
240 	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
241 		sk = udp6_lookup_run_bpf(net, udptable, skb,
242 					 saddr, sport, daddr, hnum);
243 		if (sk) {
244 			result = sk;
245 			goto done;
246 		}
247 	}
248 
249 	/* Got non-wildcard socket or error on first lookup */
250 	if (result)
251 		goto done;
252 
253 	/* Lookup wildcard sockets */
254 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
255 	slot2 = hash2 & udptable->mask;
256 	hslot2 = &udptable->hash2[slot2];
257 
258 	result = udp6_lib_lookup2(net, saddr, sport,
259 				  &in6addr_any, hnum, dif, sdif,
260 				  hslot2, skb);
261 done:
262 	if (IS_ERR(result))
263 		return NULL;
264 	return result;
265 }
266 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
267 
268 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
269 					  __be16 sport, __be16 dport,
270 					  struct udp_table *udptable)
271 {
272 	const struct ipv6hdr *iph = ipv6_hdr(skb);
273 
274 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
275 				 &iph->daddr, dport, inet6_iif(skb),
276 				 inet6_sdif(skb), udptable, skb);
277 }
278 
279 struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
280 				 __be16 sport, __be16 dport)
281 {
282 	const struct ipv6hdr *iph = ipv6_hdr(skb);
283 
284 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
285 				 &iph->daddr, dport, inet6_iif(skb),
286 				 inet6_sdif(skb), &udp_table, NULL);
287 }
288 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
289 
290 /* Must be called under rcu_read_lock().
291  * Does increment socket refcount.
292  */
293 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
294 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
295 			     const struct in6_addr *daddr, __be16 dport, int dif)
296 {
297 	struct sock *sk;
298 
299 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
300 				dif, 0, &udp_table, NULL);
301 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
302 		sk = NULL;
303 	return sk;
304 }
305 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
306 #endif
307 
308 /* do not use the scratch area len for jumbogram: their length execeeds the
309  * scratch area space; note that the IP6CB flags is still in the first
310  * cacheline, so checking for jumbograms is cheap
311  */
312 static int udp6_skb_len(struct sk_buff *skb)
313 {
314 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
315 }
316 
317 /*
318  *	This should be easy, if there is something there we
319  *	return it, otherwise we block.
320  */
321 
322 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
323 		  int noblock, int flags, int *addr_len)
324 {
325 	struct ipv6_pinfo *np = inet6_sk(sk);
326 	struct inet_sock *inet = inet_sk(sk);
327 	struct sk_buff *skb;
328 	unsigned int ulen, copied;
329 	int off, err, peeking = flags & MSG_PEEK;
330 	int is_udplite = IS_UDPLITE(sk);
331 	struct udp_mib __percpu *mib;
332 	bool checksum_valid = false;
333 	int is_udp4;
334 
335 	if (flags & MSG_ERRQUEUE)
336 		return ipv6_recv_error(sk, msg, len, addr_len);
337 
338 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
339 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
340 
341 try_again:
342 	off = sk_peek_offset(sk, flags);
343 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
344 	if (!skb)
345 		return err;
346 
347 	ulen = udp6_skb_len(skb);
348 	copied = len;
349 	if (copied > ulen - off)
350 		copied = ulen - off;
351 	else if (copied < ulen)
352 		msg->msg_flags |= MSG_TRUNC;
353 
354 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
355 	mib = __UDPX_MIB(sk, is_udp4);
356 
357 	/*
358 	 * If checksum is needed at all, try to do it while copying the
359 	 * data.  If the data is truncated, or if we only want a partial
360 	 * coverage checksum (UDP-Lite), do it before the copy.
361 	 */
362 
363 	if (copied < ulen || peeking ||
364 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
365 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
366 				!__udp_lib_checksum_complete(skb);
367 		if (!checksum_valid)
368 			goto csum_copy_err;
369 	}
370 
371 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
372 		if (udp_skb_is_linear(skb))
373 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
374 		else
375 			err = skb_copy_datagram_msg(skb, off, msg, copied);
376 	} else {
377 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
378 		if (err == -EINVAL)
379 			goto csum_copy_err;
380 	}
381 	if (unlikely(err)) {
382 		if (!peeking) {
383 			atomic_inc(&sk->sk_drops);
384 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
385 		}
386 		kfree_skb(skb);
387 		return err;
388 	}
389 	if (!peeking)
390 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
391 
392 	sock_recv_ts_and_drops(msg, sk, skb);
393 
394 	/* Copy the address. */
395 	if (msg->msg_name) {
396 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
397 		sin6->sin6_family = AF_INET6;
398 		sin6->sin6_port = udp_hdr(skb)->source;
399 		sin6->sin6_flowinfo = 0;
400 
401 		if (is_udp4) {
402 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
403 					       &sin6->sin6_addr);
404 			sin6->sin6_scope_id = 0;
405 		} else {
406 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
407 			sin6->sin6_scope_id =
408 				ipv6_iface_scope_id(&sin6->sin6_addr,
409 						    inet6_iif(skb));
410 		}
411 		*addr_len = sizeof(*sin6);
412 
413 		if (cgroup_bpf_enabled)
414 			BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
415 						(struct sockaddr *)sin6);
416 	}
417 
418 	if (udp_sk(sk)->gro_enabled)
419 		udp_cmsg_recv(msg, sk, skb);
420 
421 	if (np->rxopt.all)
422 		ip6_datagram_recv_common_ctl(sk, msg, skb);
423 
424 	if (is_udp4) {
425 		if (inet->cmsg_flags)
426 			ip_cmsg_recv_offset(msg, sk, skb,
427 					    sizeof(struct udphdr), off);
428 	} else {
429 		if (np->rxopt.all)
430 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
431 	}
432 
433 	err = copied;
434 	if (flags & MSG_TRUNC)
435 		err = ulen;
436 
437 	skb_consume_udp(sk, skb, peeking ? -err : err);
438 	return err;
439 
440 csum_copy_err:
441 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
442 				 udp_skb_destructor)) {
443 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
444 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
445 	}
446 	kfree_skb(skb);
447 
448 	/* starting over for a new packet, but check if we need to yield */
449 	cond_resched();
450 	msg->msg_flags &= ~MSG_TRUNC;
451 	goto try_again;
452 }
453 
454 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
455 void udpv6_encap_enable(void)
456 {
457 	static_branch_inc(&udpv6_encap_needed_key);
458 }
459 EXPORT_SYMBOL(udpv6_encap_enable);
460 
461 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
462  * through error handlers in encapsulations looking for a match.
463  */
464 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
465 				      struct inet6_skb_parm *opt,
466 				      u8 type, u8 code, int offset, __be32 info)
467 {
468 	int i;
469 
470 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
471 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
472 			       u8 type, u8 code, int offset, __be32 info);
473 		const struct ip6_tnl_encap_ops *encap;
474 
475 		encap = rcu_dereference(ip6tun_encaps[i]);
476 		if (!encap)
477 			continue;
478 		handler = encap->err_handler;
479 		if (handler && !handler(skb, opt, type, code, offset, info))
480 			return 0;
481 	}
482 
483 	return -ENOENT;
484 }
485 
486 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
487  * reversing source and destination port: this will match tunnels that force the
488  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
489  * lwtunnels might actually break this assumption by being configured with
490  * different destination ports on endpoints, in this case we won't be able to
491  * trace ICMP messages back to them.
492  *
493  * If this doesn't match any socket, probe tunnels with arbitrary destination
494  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
495  * we've sent packets to won't necessarily match the local destination port.
496  *
497  * Then ask the tunnel implementation to match the error against a valid
498  * association.
499  *
500  * Return an error if we can't find a match, the socket if we need further
501  * processing, zero otherwise.
502  */
503 static struct sock *__udp6_lib_err_encap(struct net *net,
504 					 const struct ipv6hdr *hdr, int offset,
505 					 struct udphdr *uh,
506 					 struct udp_table *udptable,
507 					 struct sk_buff *skb,
508 					 struct inet6_skb_parm *opt,
509 					 u8 type, u8 code, __be32 info)
510 {
511 	int network_offset, transport_offset;
512 	struct sock *sk;
513 
514 	network_offset = skb_network_offset(skb);
515 	transport_offset = skb_transport_offset(skb);
516 
517 	/* Network header needs to point to the outer IPv6 header inside ICMP */
518 	skb_reset_network_header(skb);
519 
520 	/* Transport header needs to point to the UDP header */
521 	skb_set_transport_header(skb, offset);
522 
523 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
524 			       &hdr->saddr, uh->dest,
525 			       inet6_iif(skb), 0, udptable, skb);
526 	if (sk) {
527 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
528 		struct udp_sock *up = udp_sk(sk);
529 
530 		lookup = READ_ONCE(up->encap_err_lookup);
531 		if (!lookup || lookup(sk, skb))
532 			sk = NULL;
533 	}
534 
535 	if (!sk) {
536 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
537 							offset, info));
538 	}
539 
540 	skb_set_transport_header(skb, transport_offset);
541 	skb_set_network_header(skb, network_offset);
542 
543 	return sk;
544 }
545 
546 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
547 		   u8 type, u8 code, int offset, __be32 info,
548 		   struct udp_table *udptable)
549 {
550 	struct ipv6_pinfo *np;
551 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
552 	const struct in6_addr *saddr = &hdr->saddr;
553 	const struct in6_addr *daddr = &hdr->daddr;
554 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
555 	bool tunnel = false;
556 	struct sock *sk;
557 	int harderr;
558 	int err;
559 	struct net *net = dev_net(skb->dev);
560 
561 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
562 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
563 	if (!sk) {
564 		/* No socket for error: try tunnels before discarding */
565 		sk = ERR_PTR(-ENOENT);
566 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
567 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
568 						  udptable, skb,
569 						  opt, type, code, info);
570 			if (!sk)
571 				return 0;
572 		}
573 
574 		if (IS_ERR(sk)) {
575 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
576 					  ICMP6_MIB_INERRORS);
577 			return PTR_ERR(sk);
578 		}
579 
580 		tunnel = true;
581 	}
582 
583 	harderr = icmpv6_err_convert(type, code, &err);
584 	np = inet6_sk(sk);
585 
586 	if (type == ICMPV6_PKT_TOOBIG) {
587 		if (!ip6_sk_accept_pmtu(sk))
588 			goto out;
589 		ip6_sk_update_pmtu(skb, sk, info);
590 		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
591 			harderr = 1;
592 	}
593 	if (type == NDISC_REDIRECT) {
594 		if (tunnel) {
595 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
596 				     sk->sk_mark, sk->sk_uid);
597 		} else {
598 			ip6_sk_redirect(skb, sk);
599 		}
600 		goto out;
601 	}
602 
603 	/* Tunnels don't have an application socket: don't pass errors back */
604 	if (tunnel)
605 		goto out;
606 
607 	if (!np->recverr) {
608 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
609 			goto out;
610 	} else {
611 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
612 	}
613 
614 	sk->sk_err = err;
615 	sk->sk_error_report(sk);
616 out:
617 	return 0;
618 }
619 
620 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
621 {
622 	int rc;
623 
624 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
625 		sock_rps_save_rxhash(sk, skb);
626 		sk_mark_napi_id(sk, skb);
627 		sk_incoming_cpu_update(sk);
628 	} else {
629 		sk_mark_napi_id_once(sk, skb);
630 	}
631 
632 	rc = __udp_enqueue_schedule_skb(sk, skb);
633 	if (rc < 0) {
634 		int is_udplite = IS_UDPLITE(sk);
635 
636 		/* Note that an ENOMEM error is charged twice */
637 		if (rc == -ENOMEM)
638 			UDP6_INC_STATS(sock_net(sk),
639 					 UDP_MIB_RCVBUFERRORS, is_udplite);
640 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
641 		kfree_skb(skb);
642 		return -1;
643 	}
644 
645 	return 0;
646 }
647 
648 static __inline__ int udpv6_err(struct sk_buff *skb,
649 				struct inet6_skb_parm *opt, u8 type,
650 				u8 code, int offset, __be32 info)
651 {
652 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
653 }
654 
655 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
656 {
657 	struct udp_sock *up = udp_sk(sk);
658 	int is_udplite = IS_UDPLITE(sk);
659 
660 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
661 		goto drop;
662 
663 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
664 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
665 
666 		/*
667 		 * This is an encapsulation socket so pass the skb to
668 		 * the socket's udp_encap_rcv() hook. Otherwise, just
669 		 * fall through and pass this up the UDP socket.
670 		 * up->encap_rcv() returns the following value:
671 		 * =0 if skb was successfully passed to the encap
672 		 *    handler or was discarded by it.
673 		 * >0 if skb should be passed on to UDP.
674 		 * <0 if skb should be resubmitted as proto -N
675 		 */
676 
677 		/* if we're overly short, let UDP handle it */
678 		encap_rcv = READ_ONCE(up->encap_rcv);
679 		if (encap_rcv) {
680 			int ret;
681 
682 			/* Verify checksum before giving to encap */
683 			if (udp_lib_checksum_complete(skb))
684 				goto csum_error;
685 
686 			ret = encap_rcv(sk, skb);
687 			if (ret <= 0) {
688 				__UDP_INC_STATS(sock_net(sk),
689 						UDP_MIB_INDATAGRAMS,
690 						is_udplite);
691 				return -ret;
692 			}
693 		}
694 
695 		/* FALLTHROUGH -- it's a UDP Packet */
696 	}
697 
698 	/*
699 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
700 	 */
701 	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
702 
703 		if (up->pcrlen == 0) {          /* full coverage was set  */
704 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
705 					    UDP_SKB_CB(skb)->cscov, skb->len);
706 			goto drop;
707 		}
708 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
709 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
710 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
711 			goto drop;
712 		}
713 	}
714 
715 	prefetch(&sk->sk_rmem_alloc);
716 	if (rcu_access_pointer(sk->sk_filter) &&
717 	    udp_lib_checksum_complete(skb))
718 		goto csum_error;
719 
720 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
721 		goto drop;
722 
723 	udp_csum_pull_header(skb);
724 
725 	skb_dst_drop(skb);
726 
727 	return __udpv6_queue_rcv_skb(sk, skb);
728 
729 csum_error:
730 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
731 drop:
732 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
733 	atomic_inc(&sk->sk_drops);
734 	kfree_skb(skb);
735 	return -1;
736 }
737 
738 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
739 {
740 	struct sk_buff *next, *segs;
741 	int ret;
742 
743 	if (likely(!udp_unexpected_gso(sk, skb)))
744 		return udpv6_queue_rcv_one_skb(sk, skb);
745 
746 	__skb_push(skb, -skb_mac_offset(skb));
747 	segs = udp_rcv_segment(sk, skb, false);
748 	skb_list_walk_safe(segs, skb, next) {
749 		__skb_pull(skb, skb_transport_offset(skb));
750 
751 		ret = udpv6_queue_rcv_one_skb(sk, skb);
752 		if (ret > 0)
753 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
754 						 true);
755 	}
756 	return 0;
757 }
758 
759 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
760 				   __be16 loc_port, const struct in6_addr *loc_addr,
761 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
762 				   int dif, int sdif, unsigned short hnum)
763 {
764 	struct inet_sock *inet = inet_sk(sk);
765 
766 	if (!net_eq(sock_net(sk), net))
767 		return false;
768 
769 	if (udp_sk(sk)->udp_port_hash != hnum ||
770 	    sk->sk_family != PF_INET6 ||
771 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
772 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
773 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
774 	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
775 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
776 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
777 		return false;
778 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
779 		return false;
780 	return true;
781 }
782 
783 static void udp6_csum_zero_error(struct sk_buff *skb)
784 {
785 	/* RFC 2460 section 8.1 says that we SHOULD log
786 	 * this error. Well, it is reasonable.
787 	 */
788 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
789 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
790 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
791 }
792 
793 /*
794  * Note: called only from the BH handler context,
795  * so we don't need to lock the hashes.
796  */
797 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
798 		const struct in6_addr *saddr, const struct in6_addr *daddr,
799 		struct udp_table *udptable, int proto)
800 {
801 	struct sock *sk, *first = NULL;
802 	const struct udphdr *uh = udp_hdr(skb);
803 	unsigned short hnum = ntohs(uh->dest);
804 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
805 	unsigned int offset = offsetof(typeof(*sk), sk_node);
806 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
807 	int dif = inet6_iif(skb);
808 	int sdif = inet6_sdif(skb);
809 	struct hlist_node *node;
810 	struct sk_buff *nskb;
811 
812 	if (use_hash2) {
813 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
814 			    udptable->mask;
815 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
816 start_lookup:
817 		hslot = &udptable->hash2[hash2];
818 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
819 	}
820 
821 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
822 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
823 					    uh->source, saddr, dif, sdif,
824 					    hnum))
825 			continue;
826 		/* If zero checksum and no_check is not on for
827 		 * the socket then skip it.
828 		 */
829 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
830 			continue;
831 		if (!first) {
832 			first = sk;
833 			continue;
834 		}
835 		nskb = skb_clone(skb, GFP_ATOMIC);
836 		if (unlikely(!nskb)) {
837 			atomic_inc(&sk->sk_drops);
838 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
839 					 IS_UDPLITE(sk));
840 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
841 					 IS_UDPLITE(sk));
842 			continue;
843 		}
844 
845 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
846 			consume_skb(nskb);
847 	}
848 
849 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
850 	if (use_hash2 && hash2 != hash2_any) {
851 		hash2 = hash2_any;
852 		goto start_lookup;
853 	}
854 
855 	if (first) {
856 		if (udpv6_queue_rcv_skb(first, skb) > 0)
857 			consume_skb(skb);
858 	} else {
859 		kfree_skb(skb);
860 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
861 				 proto == IPPROTO_UDPLITE);
862 	}
863 	return 0;
864 }
865 
866 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
867 {
868 	if (udp_sk_rx_dst_set(sk, dst)) {
869 		const struct rt6_info *rt = (const struct rt6_info *)dst;
870 
871 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
872 	}
873 }
874 
875 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
876  * return code conversion for ip layer consumption
877  */
878 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
879 				struct udphdr *uh)
880 {
881 	int ret;
882 
883 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
884 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
885 
886 	ret = udpv6_queue_rcv_skb(sk, skb);
887 
888 	/* a return value > 0 means to resubmit the input */
889 	if (ret > 0)
890 		return ret;
891 	return 0;
892 }
893 
894 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
895 		   int proto)
896 {
897 	const struct in6_addr *saddr, *daddr;
898 	struct net *net = dev_net(skb->dev);
899 	struct udphdr *uh;
900 	struct sock *sk;
901 	bool refcounted;
902 	u32 ulen = 0;
903 
904 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
905 		goto discard;
906 
907 	saddr = &ipv6_hdr(skb)->saddr;
908 	daddr = &ipv6_hdr(skb)->daddr;
909 	uh = udp_hdr(skb);
910 
911 	ulen = ntohs(uh->len);
912 	if (ulen > skb->len)
913 		goto short_packet;
914 
915 	if (proto == IPPROTO_UDP) {
916 		/* UDP validates ulen. */
917 
918 		/* Check for jumbo payload */
919 		if (ulen == 0)
920 			ulen = skb->len;
921 
922 		if (ulen < sizeof(*uh))
923 			goto short_packet;
924 
925 		if (ulen < skb->len) {
926 			if (pskb_trim_rcsum(skb, ulen))
927 				goto short_packet;
928 			saddr = &ipv6_hdr(skb)->saddr;
929 			daddr = &ipv6_hdr(skb)->daddr;
930 			uh = udp_hdr(skb);
931 		}
932 	}
933 
934 	if (udp6_csum_init(skb, uh, proto))
935 		goto csum_error;
936 
937 	/* Check if the socket is already available, e.g. due to early demux */
938 	sk = skb_steal_sock(skb, &refcounted);
939 	if (sk) {
940 		struct dst_entry *dst = skb_dst(skb);
941 		int ret;
942 
943 		if (unlikely(sk->sk_rx_dst != dst))
944 			udp6_sk_rx_dst_set(sk, dst);
945 
946 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
947 			if (refcounted)
948 				sock_put(sk);
949 			goto report_csum_error;
950 		}
951 
952 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
953 		if (refcounted)
954 			sock_put(sk);
955 		return ret;
956 	}
957 
958 	/*
959 	 *	Multicast receive code
960 	 */
961 	if (ipv6_addr_is_multicast(daddr))
962 		return __udp6_lib_mcast_deliver(net, skb,
963 				saddr, daddr, udptable, proto);
964 
965 	/* Unicast */
966 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
967 	if (sk) {
968 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
969 			goto report_csum_error;
970 		return udp6_unicast_rcv_skb(sk, skb, uh);
971 	}
972 
973 	if (!uh->check)
974 		goto report_csum_error;
975 
976 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
977 		goto discard;
978 
979 	if (udp_lib_checksum_complete(skb))
980 		goto csum_error;
981 
982 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
983 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
984 
985 	kfree_skb(skb);
986 	return 0;
987 
988 short_packet:
989 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
990 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
991 			    saddr, ntohs(uh->source),
992 			    ulen, skb->len,
993 			    daddr, ntohs(uh->dest));
994 	goto discard;
995 
996 report_csum_error:
997 	udp6_csum_zero_error(skb);
998 csum_error:
999 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1000 discard:
1001 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1002 	kfree_skb(skb);
1003 	return 0;
1004 }
1005 
1006 
1007 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1008 			__be16 loc_port, const struct in6_addr *loc_addr,
1009 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1010 			int dif, int sdif)
1011 {
1012 	unsigned short hnum = ntohs(loc_port);
1013 	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1014 	unsigned int slot2 = hash2 & udp_table.mask;
1015 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1016 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1017 	struct sock *sk;
1018 
1019 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1020 		if (sk->sk_state == TCP_ESTABLISHED &&
1021 		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1022 			return sk;
1023 		/* Only check first socket in chain */
1024 		break;
1025 	}
1026 	return NULL;
1027 }
1028 
1029 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1030 {
1031 	struct net *net = dev_net(skb->dev);
1032 	const struct udphdr *uh;
1033 	struct sock *sk;
1034 	struct dst_entry *dst;
1035 	int dif = skb->dev->ifindex;
1036 	int sdif = inet6_sdif(skb);
1037 
1038 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1039 	    sizeof(struct udphdr)))
1040 		return;
1041 
1042 	uh = udp_hdr(skb);
1043 
1044 	if (skb->pkt_type == PACKET_HOST)
1045 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1046 					     &ipv6_hdr(skb)->daddr,
1047 					     uh->source, &ipv6_hdr(skb)->saddr,
1048 					     dif, sdif);
1049 	else
1050 		return;
1051 
1052 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1053 		return;
1054 
1055 	skb->sk = sk;
1056 	skb->destructor = sock_efree;
1057 	dst = READ_ONCE(sk->sk_rx_dst);
1058 
1059 	if (dst)
1060 		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1061 	if (dst) {
1062 		/* set noref for now.
1063 		 * any place which wants to hold dst has to call
1064 		 * dst_hold_safe()
1065 		 */
1066 		skb_dst_set_noref(skb, dst);
1067 	}
1068 }
1069 
1070 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1071 {
1072 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1073 }
1074 
1075 /*
1076  * Throw away all pending data and cancel the corking. Socket is locked.
1077  */
1078 static void udp_v6_flush_pending_frames(struct sock *sk)
1079 {
1080 	struct udp_sock *up = udp_sk(sk);
1081 
1082 	if (up->pending == AF_INET)
1083 		udp_flush_pending_frames(sk);
1084 	else if (up->pending) {
1085 		up->len = 0;
1086 		up->pending = 0;
1087 		ip6_flush_pending_frames(sk);
1088 	}
1089 }
1090 
1091 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1092 			     int addr_len)
1093 {
1094 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1095 		return -EINVAL;
1096 	/* The following checks are replicated from __ip6_datagram_connect()
1097 	 * and intended to prevent BPF program called below from accessing
1098 	 * bytes that are out of the bound specified by user in addr_len.
1099 	 */
1100 	if (uaddr->sa_family == AF_INET) {
1101 		if (__ipv6_only_sock(sk))
1102 			return -EAFNOSUPPORT;
1103 		return udp_pre_connect(sk, uaddr, addr_len);
1104 	}
1105 
1106 	if (addr_len < SIN6_LEN_RFC2133)
1107 		return -EINVAL;
1108 
1109 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1110 }
1111 
1112 /**
1113  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1114  *	@sk:	socket we are sending on
1115  *	@skb:	sk_buff containing the filled-in UDP header
1116  *		(checksum field must be zeroed out)
1117  *	@saddr: source address
1118  *	@daddr: destination address
1119  *	@len:	length of packet
1120  */
1121 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1122 				 const struct in6_addr *saddr,
1123 				 const struct in6_addr *daddr, int len)
1124 {
1125 	unsigned int offset;
1126 	struct udphdr *uh = udp_hdr(skb);
1127 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1128 	__wsum csum = 0;
1129 
1130 	if (!frags) {
1131 		/* Only one fragment on the socket.  */
1132 		skb->csum_start = skb_transport_header(skb) - skb->head;
1133 		skb->csum_offset = offsetof(struct udphdr, check);
1134 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1135 	} else {
1136 		/*
1137 		 * HW-checksum won't work as there are two or more
1138 		 * fragments on the socket so that all csums of sk_buffs
1139 		 * should be together
1140 		 */
1141 		offset = skb_transport_offset(skb);
1142 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1143 		csum = skb->csum;
1144 
1145 		skb->ip_summed = CHECKSUM_NONE;
1146 
1147 		do {
1148 			csum = csum_add(csum, frags->csum);
1149 		} while ((frags = frags->next));
1150 
1151 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1152 					    csum);
1153 		if (uh->check == 0)
1154 			uh->check = CSUM_MANGLED_0;
1155 	}
1156 }
1157 
1158 /*
1159  *	Sending
1160  */
1161 
1162 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1163 			   struct inet_cork *cork)
1164 {
1165 	struct sock *sk = skb->sk;
1166 	struct udphdr *uh;
1167 	int err = 0;
1168 	int is_udplite = IS_UDPLITE(sk);
1169 	__wsum csum = 0;
1170 	int offset = skb_transport_offset(skb);
1171 	int len = skb->len - offset;
1172 	int datalen = len - sizeof(*uh);
1173 
1174 	/*
1175 	 * Create a UDP header
1176 	 */
1177 	uh = udp_hdr(skb);
1178 	uh->source = fl6->fl6_sport;
1179 	uh->dest = fl6->fl6_dport;
1180 	uh->len = htons(len);
1181 	uh->check = 0;
1182 
1183 	if (cork->gso_size) {
1184 		const int hlen = skb_network_header_len(skb) +
1185 				 sizeof(struct udphdr);
1186 
1187 		if (hlen + cork->gso_size > cork->fragsize) {
1188 			kfree_skb(skb);
1189 			return -EINVAL;
1190 		}
1191 		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1192 			kfree_skb(skb);
1193 			return -EINVAL;
1194 		}
1195 		if (udp_sk(sk)->no_check6_tx) {
1196 			kfree_skb(skb);
1197 			return -EINVAL;
1198 		}
1199 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1200 		    dst_xfrm(skb_dst(skb))) {
1201 			kfree_skb(skb);
1202 			return -EIO;
1203 		}
1204 
1205 		if (datalen > cork->gso_size) {
1206 			skb_shinfo(skb)->gso_size = cork->gso_size;
1207 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1208 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1209 								 cork->gso_size);
1210 		}
1211 		goto csum_partial;
1212 	}
1213 
1214 	if (is_udplite)
1215 		csum = udplite_csum(skb);
1216 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1217 		skb->ip_summed = CHECKSUM_NONE;
1218 		goto send;
1219 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1220 csum_partial:
1221 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1222 		goto send;
1223 	} else
1224 		csum = udp_csum(skb);
1225 
1226 	/* add protocol-dependent pseudo-header */
1227 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1228 				    len, fl6->flowi6_proto, csum);
1229 	if (uh->check == 0)
1230 		uh->check = CSUM_MANGLED_0;
1231 
1232 send:
1233 	err = ip6_send_skb(skb);
1234 	if (err) {
1235 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1236 			UDP6_INC_STATS(sock_net(sk),
1237 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1238 			err = 0;
1239 		}
1240 	} else {
1241 		UDP6_INC_STATS(sock_net(sk),
1242 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1243 	}
1244 	return err;
1245 }
1246 
1247 static int udp_v6_push_pending_frames(struct sock *sk)
1248 {
1249 	struct sk_buff *skb;
1250 	struct udp_sock  *up = udp_sk(sk);
1251 	struct flowi6 fl6;
1252 	int err = 0;
1253 
1254 	if (up->pending == AF_INET)
1255 		return udp_push_pending_frames(sk);
1256 
1257 	/* ip6_finish_skb will release the cork, so make a copy of
1258 	 * fl6 here.
1259 	 */
1260 	fl6 = inet_sk(sk)->cork.fl.u.ip6;
1261 
1262 	skb = ip6_finish_skb(sk);
1263 	if (!skb)
1264 		goto out;
1265 
1266 	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1267 
1268 out:
1269 	up->len = 0;
1270 	up->pending = 0;
1271 	return err;
1272 }
1273 
1274 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1275 {
1276 	struct ipv6_txoptions opt_space;
1277 	struct udp_sock *up = udp_sk(sk);
1278 	struct inet_sock *inet = inet_sk(sk);
1279 	struct ipv6_pinfo *np = inet6_sk(sk);
1280 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1281 	struct in6_addr *daddr, *final_p, final;
1282 	struct ipv6_txoptions *opt = NULL;
1283 	struct ipv6_txoptions *opt_to_free = NULL;
1284 	struct ip6_flowlabel *flowlabel = NULL;
1285 	struct flowi6 fl6;
1286 	struct dst_entry *dst;
1287 	struct ipcm6_cookie ipc6;
1288 	int addr_len = msg->msg_namelen;
1289 	bool connected = false;
1290 	int ulen = len;
1291 	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1292 	int err;
1293 	int is_udplite = IS_UDPLITE(sk);
1294 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1295 
1296 	ipcm6_init(&ipc6);
1297 	ipc6.gso_size = up->gso_size;
1298 	ipc6.sockc.tsflags = sk->sk_tsflags;
1299 	ipc6.sockc.mark = sk->sk_mark;
1300 
1301 	/* destination address check */
1302 	if (sin6) {
1303 		if (addr_len < offsetof(struct sockaddr, sa_data))
1304 			return -EINVAL;
1305 
1306 		switch (sin6->sin6_family) {
1307 		case AF_INET6:
1308 			if (addr_len < SIN6_LEN_RFC2133)
1309 				return -EINVAL;
1310 			daddr = &sin6->sin6_addr;
1311 			if (ipv6_addr_any(daddr) &&
1312 			    ipv6_addr_v4mapped(&np->saddr))
1313 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1314 						       daddr);
1315 			break;
1316 		case AF_INET:
1317 			goto do_udp_sendmsg;
1318 		case AF_UNSPEC:
1319 			msg->msg_name = sin6 = NULL;
1320 			msg->msg_namelen = addr_len = 0;
1321 			daddr = NULL;
1322 			break;
1323 		default:
1324 			return -EINVAL;
1325 		}
1326 	} else if (!up->pending) {
1327 		if (sk->sk_state != TCP_ESTABLISHED)
1328 			return -EDESTADDRREQ;
1329 		daddr = &sk->sk_v6_daddr;
1330 	} else
1331 		daddr = NULL;
1332 
1333 	if (daddr) {
1334 		if (ipv6_addr_v4mapped(daddr)) {
1335 			struct sockaddr_in sin;
1336 			sin.sin_family = AF_INET;
1337 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1338 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1339 			msg->msg_name = &sin;
1340 			msg->msg_namelen = sizeof(sin);
1341 do_udp_sendmsg:
1342 			if (__ipv6_only_sock(sk))
1343 				return -ENETUNREACH;
1344 			return udp_sendmsg(sk, msg, len);
1345 		}
1346 	}
1347 
1348 	if (up->pending == AF_INET)
1349 		return udp_sendmsg(sk, msg, len);
1350 
1351 	/* Rough check on arithmetic overflow,
1352 	   better check is made in ip6_append_data().
1353 	   */
1354 	if (len > INT_MAX - sizeof(struct udphdr))
1355 		return -EMSGSIZE;
1356 
1357 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1358 	if (up->pending) {
1359 		/*
1360 		 * There are pending frames.
1361 		 * The socket lock must be held while it's corked.
1362 		 */
1363 		lock_sock(sk);
1364 		if (likely(up->pending)) {
1365 			if (unlikely(up->pending != AF_INET6)) {
1366 				release_sock(sk);
1367 				return -EAFNOSUPPORT;
1368 			}
1369 			dst = NULL;
1370 			goto do_append_data;
1371 		}
1372 		release_sock(sk);
1373 	}
1374 	ulen += sizeof(struct udphdr);
1375 
1376 	memset(&fl6, 0, sizeof(fl6));
1377 
1378 	if (sin6) {
1379 		if (sin6->sin6_port == 0)
1380 			return -EINVAL;
1381 
1382 		fl6.fl6_dport = sin6->sin6_port;
1383 		daddr = &sin6->sin6_addr;
1384 
1385 		if (np->sndflow) {
1386 			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1387 			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1388 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1389 				if (IS_ERR(flowlabel))
1390 					return -EINVAL;
1391 			}
1392 		}
1393 
1394 		/*
1395 		 * Otherwise it will be difficult to maintain
1396 		 * sk->sk_dst_cache.
1397 		 */
1398 		if (sk->sk_state == TCP_ESTABLISHED &&
1399 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1400 			daddr = &sk->sk_v6_daddr;
1401 
1402 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1403 		    sin6->sin6_scope_id &&
1404 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1405 			fl6.flowi6_oif = sin6->sin6_scope_id;
1406 	} else {
1407 		if (sk->sk_state != TCP_ESTABLISHED)
1408 			return -EDESTADDRREQ;
1409 
1410 		fl6.fl6_dport = inet->inet_dport;
1411 		daddr = &sk->sk_v6_daddr;
1412 		fl6.flowlabel = np->flow_label;
1413 		connected = true;
1414 	}
1415 
1416 	if (!fl6.flowi6_oif)
1417 		fl6.flowi6_oif = sk->sk_bound_dev_if;
1418 
1419 	if (!fl6.flowi6_oif)
1420 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1421 
1422 	fl6.flowi6_mark = ipc6.sockc.mark;
1423 	fl6.flowi6_uid = sk->sk_uid;
1424 
1425 	if (msg->msg_controllen) {
1426 		opt = &opt_space;
1427 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1428 		opt->tot_len = sizeof(*opt);
1429 		ipc6.opt = opt;
1430 
1431 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1432 		if (err > 0)
1433 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1434 						    &ipc6);
1435 		if (err < 0) {
1436 			fl6_sock_release(flowlabel);
1437 			return err;
1438 		}
1439 		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1440 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1441 			if (IS_ERR(flowlabel))
1442 				return -EINVAL;
1443 		}
1444 		if (!(opt->opt_nflen|opt->opt_flen))
1445 			opt = NULL;
1446 		connected = false;
1447 	}
1448 	if (!opt) {
1449 		opt = txopt_get(np);
1450 		opt_to_free = opt;
1451 	}
1452 	if (flowlabel)
1453 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1454 	opt = ipv6_fixup_options(&opt_space, opt);
1455 	ipc6.opt = opt;
1456 
1457 	fl6.flowi6_proto = sk->sk_protocol;
1458 	fl6.daddr = *daddr;
1459 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1460 		fl6.saddr = np->saddr;
1461 	fl6.fl6_sport = inet->inet_sport;
1462 
1463 	if (cgroup_bpf_enabled && !connected) {
1464 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1465 					   (struct sockaddr *)sin6, &fl6.saddr);
1466 		if (err)
1467 			goto out_no_dst;
1468 		if (sin6) {
1469 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1470 				/* BPF program rewrote IPv6-only by IPv4-mapped
1471 				 * IPv6. It's currently unsupported.
1472 				 */
1473 				err = -ENOTSUPP;
1474 				goto out_no_dst;
1475 			}
1476 			if (sin6->sin6_port == 0) {
1477 				/* BPF program set invalid port. Reject it. */
1478 				err = -EINVAL;
1479 				goto out_no_dst;
1480 			}
1481 			fl6.fl6_dport = sin6->sin6_port;
1482 			fl6.daddr = sin6->sin6_addr;
1483 		}
1484 	}
1485 
1486 	if (ipv6_addr_any(&fl6.daddr))
1487 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1488 
1489 	final_p = fl6_update_dst(&fl6, opt, &final);
1490 	if (final_p)
1491 		connected = false;
1492 
1493 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1494 		fl6.flowi6_oif = np->mcast_oif;
1495 		connected = false;
1496 	} else if (!fl6.flowi6_oif)
1497 		fl6.flowi6_oif = np->ucast_oif;
1498 
1499 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1500 
1501 	if (ipc6.tclass < 0)
1502 		ipc6.tclass = np->tclass;
1503 
1504 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1505 
1506 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1507 	if (IS_ERR(dst)) {
1508 		err = PTR_ERR(dst);
1509 		dst = NULL;
1510 		goto out;
1511 	}
1512 
1513 	if (ipc6.hlimit < 0)
1514 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1515 
1516 	if (msg->msg_flags&MSG_CONFIRM)
1517 		goto do_confirm;
1518 back_from_confirm:
1519 
1520 	/* Lockless fast path for the non-corking case */
1521 	if (!corkreq) {
1522 		struct inet_cork_full cork;
1523 		struct sk_buff *skb;
1524 
1525 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1526 				   sizeof(struct udphdr), &ipc6,
1527 				   &fl6, (struct rt6_info *)dst,
1528 				   msg->msg_flags, &cork);
1529 		err = PTR_ERR(skb);
1530 		if (!IS_ERR_OR_NULL(skb))
1531 			err = udp_v6_send_skb(skb, &fl6, &cork.base);
1532 		goto out;
1533 	}
1534 
1535 	lock_sock(sk);
1536 	if (unlikely(up->pending)) {
1537 		/* The socket is already corked while preparing it. */
1538 		/* ... which is an evident application bug. --ANK */
1539 		release_sock(sk);
1540 
1541 		net_dbg_ratelimited("udp cork app bug 2\n");
1542 		err = -EINVAL;
1543 		goto out;
1544 	}
1545 
1546 	up->pending = AF_INET6;
1547 
1548 do_append_data:
1549 	if (ipc6.dontfrag < 0)
1550 		ipc6.dontfrag = np->dontfrag;
1551 	up->len += ulen;
1552 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1553 			      &ipc6, &fl6, (struct rt6_info *)dst,
1554 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1555 	if (err)
1556 		udp_v6_flush_pending_frames(sk);
1557 	else if (!corkreq)
1558 		err = udp_v6_push_pending_frames(sk);
1559 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1560 		up->pending = 0;
1561 
1562 	if (err > 0)
1563 		err = np->recverr ? net_xmit_errno(err) : 0;
1564 	release_sock(sk);
1565 
1566 out:
1567 	dst_release(dst);
1568 out_no_dst:
1569 	fl6_sock_release(flowlabel);
1570 	txopt_put(opt_to_free);
1571 	if (!err)
1572 		return len;
1573 	/*
1574 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1575 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1576 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1577 	 * things).  We could add another new stat but at least for now that
1578 	 * seems like overkill.
1579 	 */
1580 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1581 		UDP6_INC_STATS(sock_net(sk),
1582 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1583 	}
1584 	return err;
1585 
1586 do_confirm:
1587 	if (msg->msg_flags & MSG_PROBE)
1588 		dst_confirm_neigh(dst, &fl6.daddr);
1589 	if (!(msg->msg_flags&MSG_PROBE) || len)
1590 		goto back_from_confirm;
1591 	err = 0;
1592 	goto out;
1593 }
1594 
1595 void udpv6_destroy_sock(struct sock *sk)
1596 {
1597 	struct udp_sock *up = udp_sk(sk);
1598 	lock_sock(sk);
1599 	udp_v6_flush_pending_frames(sk);
1600 	release_sock(sk);
1601 
1602 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1603 		if (up->encap_type) {
1604 			void (*encap_destroy)(struct sock *sk);
1605 			encap_destroy = READ_ONCE(up->encap_destroy);
1606 			if (encap_destroy)
1607 				encap_destroy(sk);
1608 		}
1609 		if (up->encap_enabled)
1610 			static_branch_dec(&udpv6_encap_needed_key);
1611 	}
1612 
1613 	inet6_destroy_sock(sk);
1614 }
1615 
1616 /*
1617  *	Socket option code for UDP
1618  */
1619 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1620 		     unsigned int optlen)
1621 {
1622 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1623 		return udp_lib_setsockopt(sk, level, optname,
1624 					  optval, optlen,
1625 					  udp_v6_push_pending_frames);
1626 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1627 }
1628 
1629 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1630 		     char __user *optval, int __user *optlen)
1631 {
1632 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1633 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1634 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1635 }
1636 
1637 /* thinking of making this const? Don't.
1638  * early_demux can change based on sysctl.
1639  */
1640 static struct inet6_protocol udpv6_protocol = {
1641 	.early_demux	=	udp_v6_early_demux,
1642 	.early_demux_handler =  udp_v6_early_demux,
1643 	.handler	=	udpv6_rcv,
1644 	.err_handler	=	udpv6_err,
1645 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1646 };
1647 
1648 /* ------------------------------------------------------------------------ */
1649 #ifdef CONFIG_PROC_FS
1650 int udp6_seq_show(struct seq_file *seq, void *v)
1651 {
1652 	if (v == SEQ_START_TOKEN) {
1653 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1654 	} else {
1655 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1656 		struct inet_sock *inet = inet_sk(v);
1657 		__u16 srcp = ntohs(inet->inet_sport);
1658 		__u16 destp = ntohs(inet->inet_dport);
1659 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1660 					  udp_rqueue_get(v), bucket);
1661 	}
1662 	return 0;
1663 }
1664 
1665 const struct seq_operations udp6_seq_ops = {
1666 	.start		= udp_seq_start,
1667 	.next		= udp_seq_next,
1668 	.stop		= udp_seq_stop,
1669 	.show		= udp6_seq_show,
1670 };
1671 EXPORT_SYMBOL(udp6_seq_ops);
1672 
1673 static struct udp_seq_afinfo udp6_seq_afinfo = {
1674 	.family		= AF_INET6,
1675 	.udp_table	= &udp_table,
1676 };
1677 
1678 int __net_init udp6_proc_init(struct net *net)
1679 {
1680 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1681 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1682 		return -ENOMEM;
1683 	return 0;
1684 }
1685 
1686 void udp6_proc_exit(struct net *net)
1687 {
1688 	remove_proc_entry("udp6", net->proc_net);
1689 }
1690 #endif /* CONFIG_PROC_FS */
1691 
1692 /* ------------------------------------------------------------------------ */
1693 
1694 struct proto udpv6_prot = {
1695 	.name			= "UDPv6",
1696 	.owner			= THIS_MODULE,
1697 	.close			= udp_lib_close,
1698 	.pre_connect		= udpv6_pre_connect,
1699 	.connect		= ip6_datagram_connect,
1700 	.disconnect		= udp_disconnect,
1701 	.ioctl			= udp_ioctl,
1702 	.init			= udp_init_sock,
1703 	.destroy		= udpv6_destroy_sock,
1704 	.setsockopt		= udpv6_setsockopt,
1705 	.getsockopt		= udpv6_getsockopt,
1706 	.sendmsg		= udpv6_sendmsg,
1707 	.recvmsg		= udpv6_recvmsg,
1708 	.release_cb		= ip6_datagram_release_cb,
1709 	.hash			= udp_lib_hash,
1710 	.unhash			= udp_lib_unhash,
1711 	.rehash			= udp_v6_rehash,
1712 	.get_port		= udp_v6_get_port,
1713 	.memory_allocated	= &udp_memory_allocated,
1714 	.sysctl_mem		= sysctl_udp_mem,
1715 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1716 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1717 	.obj_size		= sizeof(struct udp6_sock),
1718 	.h.udp_table		= &udp_table,
1719 	.diag_destroy		= udp_abort,
1720 };
1721 
1722 static struct inet_protosw udpv6_protosw = {
1723 	.type =      SOCK_DGRAM,
1724 	.protocol =  IPPROTO_UDP,
1725 	.prot =      &udpv6_prot,
1726 	.ops =       &inet6_dgram_ops,
1727 	.flags =     INET_PROTOSW_PERMANENT,
1728 };
1729 
1730 int __init udpv6_init(void)
1731 {
1732 	int ret;
1733 
1734 	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1735 	if (ret)
1736 		goto out;
1737 
1738 	ret = inet6_register_protosw(&udpv6_protosw);
1739 	if (ret)
1740 		goto out_udpv6_protocol;
1741 out:
1742 	return ret;
1743 
1744 out_udpv6_protocol:
1745 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1746 	goto out;
1747 }
1748 
1749 void udpv6_exit(void)
1750 {
1751 	inet6_unregister_protosw(&udpv6_protosw);
1752 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1753 }
1754