1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <trace/events/udp.h>
49 #include <net/xfrm.h>
50 #include <net/inet_hashtables.h>
51 #include <net/inet6_hashtables.h>
52 #include <net/busy_poll.h>
53 #include <net/sock_reuseport.h>
54 #include <net/gro.h>
55
56 #include <linux/proc_fs.h>
57 #include <linux/seq_file.h>
58 #include <trace/events/skb.h>
59 #include "udp_impl.h"
60
udpv6_destruct_sock(struct sock * sk)61 static void udpv6_destruct_sock(struct sock *sk)
62 {
63 udp_destruct_common(sk);
64 inet6_sock_destruct(sk);
65 }
66
udpv6_init_sock(struct sock * sk)67 int udpv6_init_sock(struct sock *sk)
68 {
69 udp_lib_init_sock(sk);
70 sk->sk_destruct = udpv6_destruct_sock;
71 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
72 return 0;
73 }
74
75 INDIRECT_CALLABLE_SCOPE
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)76 u32 udp6_ehashfn(const struct net *net,
77 const struct in6_addr *laddr,
78 const u16 lport,
79 const struct in6_addr *faddr,
80 const __be16 fport)
81 {
82 static u32 udp6_ehash_secret __read_mostly;
83 static u32 udp_ipv6_hash_secret __read_mostly;
84
85 u32 lhash, fhash;
86
87 net_get_random_once(&udp6_ehash_secret,
88 sizeof(udp6_ehash_secret));
89 net_get_random_once(&udp_ipv6_hash_secret,
90 sizeof(udp_ipv6_hash_secret));
91
92 lhash = (__force u32)laddr->s6_addr32[3];
93 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
94
95 return __inet6_ehashfn(lhash, lport, fhash, fport,
96 udp6_ehash_secret + net_hash_mix(net));
97 }
98
udp_v6_get_port(struct sock * sk,unsigned short snum)99 int udp_v6_get_port(struct sock *sk, unsigned short snum)
100 {
101 unsigned int hash2_nulladdr =
102 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
103 unsigned int hash2_partial =
104 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
105
106 /* precompute partial secondary hash */
107 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
108 return udp_lib_get_port(sk, snum, hash2_nulladdr);
109 }
110
udp_v6_rehash(struct sock * sk)111 void udp_v6_rehash(struct sock *sk)
112 {
113 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
114 &sk->sk_v6_rcv_saddr,
115 inet_sk(sk)->inet_num);
116
117 udp_lib_rehash(sk, new_hash);
118 }
119
compute_score(struct sock * sk,struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)120 static int compute_score(struct sock *sk, struct net *net,
121 const struct in6_addr *saddr, __be16 sport,
122 const struct in6_addr *daddr, unsigned short hnum,
123 int dif, int sdif)
124 {
125 int bound_dev_if, score;
126 struct inet_sock *inet;
127 bool dev_match;
128
129 if (!net_eq(sock_net(sk), net) ||
130 udp_sk(sk)->udp_port_hash != hnum ||
131 sk->sk_family != PF_INET6)
132 return -1;
133
134 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
135 return -1;
136
137 score = 0;
138 inet = inet_sk(sk);
139
140 if (inet->inet_dport) {
141 if (inet->inet_dport != sport)
142 return -1;
143 score++;
144 }
145
146 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
147 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
148 return -1;
149 score++;
150 }
151
152 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
153 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
154 if (!dev_match)
155 return -1;
156 if (bound_dev_if)
157 score++;
158
159 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
160 score++;
161
162 return score;
163 }
164
165 /* called with rcu_read_lock() */
udp6_lib_lookup2(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)166 static struct sock *udp6_lib_lookup2(struct net *net,
167 const struct in6_addr *saddr, __be16 sport,
168 const struct in6_addr *daddr, unsigned int hnum,
169 int dif, int sdif, struct udp_hslot *hslot2,
170 struct sk_buff *skb)
171 {
172 struct sock *sk, *result;
173 int score, badness;
174 bool need_rescore;
175
176 result = NULL;
177 badness = -1;
178 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
179 need_rescore = false;
180 rescore:
181 score = compute_score(need_rescore ? result : sk, net, saddr,
182 sport, daddr, hnum, dif, sdif);
183 if (score > badness) {
184 badness = score;
185
186 if (need_rescore)
187 continue;
188
189 if (sk->sk_state == TCP_ESTABLISHED) {
190 result = sk;
191 continue;
192 }
193
194 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
195 saddr, sport, daddr, hnum, udp6_ehashfn);
196 if (!result) {
197 result = sk;
198 continue;
199 }
200
201 /* Fall back to scoring if group has connections */
202 if (!reuseport_has_conns(sk))
203 return result;
204
205 /* Reuseport logic returned an error, keep original score. */
206 if (IS_ERR(result))
207 continue;
208
209 /* compute_score is too long of a function to be
210 * inlined, and calling it again here yields
211 * measureable overhead for some
212 * workloads. Work around it by jumping
213 * backwards to rescore 'result'.
214 */
215 need_rescore = true;
216 goto rescore;
217 }
218 }
219 return result;
220 }
221
222 /* rcu_read_lock() must be held */
__udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)223 struct sock *__udp6_lib_lookup(struct net *net,
224 const struct in6_addr *saddr, __be16 sport,
225 const struct in6_addr *daddr, __be16 dport,
226 int dif, int sdif, struct udp_table *udptable,
227 struct sk_buff *skb)
228 {
229 unsigned short hnum = ntohs(dport);
230 unsigned int hash2, slot2;
231 struct udp_hslot *hslot2;
232 struct sock *result, *sk;
233
234 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
235 slot2 = hash2 & udptable->mask;
236 hslot2 = &udptable->hash2[slot2];
237
238 /* Lookup connected or non-wildcard sockets */
239 result = udp6_lib_lookup2(net, saddr, sport,
240 daddr, hnum, dif, sdif,
241 hslot2, skb);
242 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
243 goto done;
244
245 /* Lookup redirect from BPF */
246 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
247 udptable == net->ipv4.udp_table) {
248 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
249 saddr, sport, daddr, hnum, dif,
250 udp6_ehashfn);
251 if (sk) {
252 result = sk;
253 goto done;
254 }
255 }
256
257 /* Got non-wildcard socket or error on first lookup */
258 if (result)
259 goto done;
260
261 /* Lookup wildcard sockets */
262 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
263 slot2 = hash2 & udptable->mask;
264 hslot2 = &udptable->hash2[slot2];
265
266 result = udp6_lib_lookup2(net, saddr, sport,
267 &in6addr_any, hnum, dif, sdif,
268 hslot2, skb);
269 done:
270 if (IS_ERR(result))
271 return NULL;
272 return result;
273 }
274 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
275
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)276 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
277 __be16 sport, __be16 dport,
278 struct udp_table *udptable)
279 {
280 const struct ipv6hdr *iph = ipv6_hdr(skb);
281
282 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
283 &iph->daddr, dport, inet6_iif(skb),
284 inet6_sdif(skb), udptable, skb);
285 }
286
udp6_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)287 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
288 __be16 sport, __be16 dport)
289 {
290 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
291 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
292 struct net *net = dev_net(skb->dev);
293 int iif, sdif;
294
295 inet6_get_iif_sdif(skb, &iif, &sdif);
296
297 return __udp6_lib_lookup(net, &iph->saddr, sport,
298 &iph->daddr, dport, iif,
299 sdif, net->ipv4.udp_table, NULL);
300 }
301
302 /* Must be called under rcu_read_lock().
303 * Does increment socket refcount.
304 */
305 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)306 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
307 const struct in6_addr *daddr, __be16 dport, int dif)
308 {
309 struct sock *sk;
310
311 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
312 dif, 0, net->ipv4.udp_table, NULL);
313 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
314 sk = NULL;
315 return sk;
316 }
317 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
318 #endif
319
320 /* do not use the scratch area len for jumbogram: their length execeeds the
321 * scratch area space; note that the IP6CB flags is still in the first
322 * cacheline, so checking for jumbograms is cheap
323 */
udp6_skb_len(struct sk_buff * skb)324 static int udp6_skb_len(struct sk_buff *skb)
325 {
326 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
327 }
328
329 /*
330 * This should be easy, if there is something there we
331 * return it, otherwise we block.
332 */
333
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)334 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
335 int flags, int *addr_len)
336 {
337 struct ipv6_pinfo *np = inet6_sk(sk);
338 struct inet_sock *inet = inet_sk(sk);
339 struct sk_buff *skb;
340 unsigned int ulen, copied;
341 int off, err, peeking = flags & MSG_PEEK;
342 int is_udplite = IS_UDPLITE(sk);
343 struct udp_mib __percpu *mib;
344 bool checksum_valid = false;
345 int is_udp4;
346
347 if (flags & MSG_ERRQUEUE)
348 return ipv6_recv_error(sk, msg, len, addr_len);
349
350 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
351 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
352
353 try_again:
354 off = sk_peek_offset(sk, flags);
355 skb = __skb_recv_udp(sk, flags, &off, &err);
356 if (!skb)
357 return err;
358
359 ulen = udp6_skb_len(skb);
360 copied = len;
361 if (copied > ulen - off)
362 copied = ulen - off;
363 else if (copied < ulen)
364 msg->msg_flags |= MSG_TRUNC;
365
366 is_udp4 = (skb->protocol == htons(ETH_P_IP));
367 mib = __UDPX_MIB(sk, is_udp4);
368
369 /*
370 * If checksum is needed at all, try to do it while copying the
371 * data. If the data is truncated, or if we only want a partial
372 * coverage checksum (UDP-Lite), do it before the copy.
373 */
374
375 if (copied < ulen || peeking ||
376 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
377 checksum_valid = udp_skb_csum_unnecessary(skb) ||
378 !__udp_lib_checksum_complete(skb);
379 if (!checksum_valid)
380 goto csum_copy_err;
381 }
382
383 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
384 if (udp_skb_is_linear(skb))
385 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
386 else
387 err = skb_copy_datagram_msg(skb, off, msg, copied);
388 } else {
389 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
390 if (err == -EINVAL)
391 goto csum_copy_err;
392 }
393 if (unlikely(err)) {
394 if (!peeking) {
395 atomic_inc(&sk->sk_drops);
396 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
397 }
398 kfree_skb(skb);
399 return err;
400 }
401 if (!peeking)
402 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
403
404 sock_recv_cmsgs(msg, sk, skb);
405
406 /* Copy the address. */
407 if (msg->msg_name) {
408 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
409 sin6->sin6_family = AF_INET6;
410 sin6->sin6_port = udp_hdr(skb)->source;
411 sin6->sin6_flowinfo = 0;
412
413 if (is_udp4) {
414 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
415 &sin6->sin6_addr);
416 sin6->sin6_scope_id = 0;
417 } else {
418 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
419 sin6->sin6_scope_id =
420 ipv6_iface_scope_id(&sin6->sin6_addr,
421 inet6_iif(skb));
422 }
423 *addr_len = sizeof(*sin6);
424
425 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
426 (struct sockaddr *)sin6,
427 addr_len);
428 }
429
430 if (udp_test_bit(GRO_ENABLED, sk))
431 udp_cmsg_recv(msg, sk, skb);
432
433 if (np->rxopt.all)
434 ip6_datagram_recv_common_ctl(sk, msg, skb);
435
436 if (is_udp4) {
437 if (inet_cmsg_flags(inet))
438 ip_cmsg_recv_offset(msg, sk, skb,
439 sizeof(struct udphdr), off);
440 } else {
441 if (np->rxopt.all)
442 ip6_datagram_recv_specific_ctl(sk, msg, skb);
443 }
444
445 err = copied;
446 if (flags & MSG_TRUNC)
447 err = ulen;
448
449 skb_consume_udp(sk, skb, peeking ? -err : err);
450 return err;
451
452 csum_copy_err:
453 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
454 udp_skb_destructor)) {
455 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
456 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
457 }
458 kfree_skb(skb);
459
460 /* starting over for a new packet, but check if we need to yield */
461 cond_resched();
462 msg->msg_flags &= ~MSG_TRUNC;
463 goto try_again;
464 }
465
466 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)467 void udpv6_encap_enable(void)
468 {
469 static_branch_inc(&udpv6_encap_needed_key);
470 }
471 EXPORT_SYMBOL(udpv6_encap_enable);
472
473 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
474 * through error handlers in encapsulations looking for a match.
475 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)476 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
477 struct inet6_skb_parm *opt,
478 u8 type, u8 code, int offset, __be32 info)
479 {
480 int i;
481
482 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
483 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
484 u8 type, u8 code, int offset, __be32 info);
485 const struct ip6_tnl_encap_ops *encap;
486
487 encap = rcu_dereference(ip6tun_encaps[i]);
488 if (!encap)
489 continue;
490 handler = encap->err_handler;
491 if (handler && !handler(skb, opt, type, code, offset, info))
492 return 0;
493 }
494
495 return -ENOENT;
496 }
497
498 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
499 * reversing source and destination port: this will match tunnels that force the
500 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
501 * lwtunnels might actually break this assumption by being configured with
502 * different destination ports on endpoints, in this case we won't be able to
503 * trace ICMP messages back to them.
504 *
505 * If this doesn't match any socket, probe tunnels with arbitrary destination
506 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
507 * we've sent packets to won't necessarily match the local destination port.
508 *
509 * Then ask the tunnel implementation to match the error against a valid
510 * association.
511 *
512 * Return an error if we can't find a match, the socket if we need further
513 * processing, zero otherwise.
514 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)515 static struct sock *__udp6_lib_err_encap(struct net *net,
516 const struct ipv6hdr *hdr, int offset,
517 struct udphdr *uh,
518 struct udp_table *udptable,
519 struct sock *sk,
520 struct sk_buff *skb,
521 struct inet6_skb_parm *opt,
522 u8 type, u8 code, __be32 info)
523 {
524 int (*lookup)(struct sock *sk, struct sk_buff *skb);
525 int network_offset, transport_offset;
526 struct udp_sock *up;
527
528 network_offset = skb_network_offset(skb);
529 transport_offset = skb_transport_offset(skb);
530
531 /* Network header needs to point to the outer IPv6 header inside ICMP */
532 skb_reset_network_header(skb);
533
534 /* Transport header needs to point to the UDP header */
535 skb_set_transport_header(skb, offset);
536
537 if (sk) {
538 up = udp_sk(sk);
539
540 lookup = READ_ONCE(up->encap_err_lookup);
541 if (lookup && lookup(sk, skb))
542 sk = NULL;
543
544 goto out;
545 }
546
547 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
548 &hdr->saddr, uh->dest,
549 inet6_iif(skb), 0, udptable, skb);
550 if (sk) {
551 up = udp_sk(sk);
552
553 lookup = READ_ONCE(up->encap_err_lookup);
554 if (!lookup || lookup(sk, skb))
555 sk = NULL;
556 }
557
558 out:
559 if (!sk) {
560 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
561 offset, info));
562 }
563
564 skb_set_transport_header(skb, transport_offset);
565 skb_set_network_header(skb, network_offset);
566
567 return sk;
568 }
569
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)570 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
571 u8 type, u8 code, int offset, __be32 info,
572 struct udp_table *udptable)
573 {
574 struct ipv6_pinfo *np;
575 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
576 const struct in6_addr *saddr = &hdr->saddr;
577 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
578 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
579 bool tunnel = false;
580 struct sock *sk;
581 int harderr;
582 int err;
583 struct net *net = dev_net(skb->dev);
584
585 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
586 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
587
588 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
589 /* No socket for error: try tunnels before discarding */
590 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
591 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
592 udptable, sk, skb,
593 opt, type, code, info);
594 if (!sk)
595 return 0;
596 } else
597 sk = ERR_PTR(-ENOENT);
598
599 if (IS_ERR(sk)) {
600 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
601 ICMP6_MIB_INERRORS);
602 return PTR_ERR(sk);
603 }
604
605 tunnel = true;
606 }
607
608 harderr = icmpv6_err_convert(type, code, &err);
609 np = inet6_sk(sk);
610
611 if (type == ICMPV6_PKT_TOOBIG) {
612 if (!ip6_sk_accept_pmtu(sk))
613 goto out;
614 ip6_sk_update_pmtu(skb, sk, info);
615 if (np->pmtudisc != IPV6_PMTUDISC_DONT)
616 harderr = 1;
617 }
618 if (type == NDISC_REDIRECT) {
619 if (tunnel) {
620 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
621 READ_ONCE(sk->sk_mark), sk->sk_uid);
622 } else {
623 ip6_sk_redirect(skb, sk);
624 }
625 goto out;
626 }
627
628 /* Tunnels don't have an application socket: don't pass errors back */
629 if (tunnel) {
630 if (udp_sk(sk)->encap_err_rcv)
631 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
632 ntohl(info), (u8 *)(uh+1));
633 goto out;
634 }
635
636 if (!np->recverr) {
637 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
638 goto out;
639 } else {
640 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
641 }
642
643 sk->sk_err = err;
644 sk_error_report(sk);
645 out:
646 return 0;
647 }
648
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)649 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
650 {
651 int rc;
652
653 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
654 sock_rps_save_rxhash(sk, skb);
655 sk_mark_napi_id(sk, skb);
656 sk_incoming_cpu_update(sk);
657 } else {
658 sk_mark_napi_id_once(sk, skb);
659 }
660
661 rc = __udp_enqueue_schedule_skb(sk, skb);
662 if (rc < 0) {
663 int is_udplite = IS_UDPLITE(sk);
664 enum skb_drop_reason drop_reason;
665
666 /* Note that an ENOMEM error is charged twice */
667 if (rc == -ENOMEM) {
668 UDP6_INC_STATS(sock_net(sk),
669 UDP_MIB_RCVBUFERRORS, is_udplite);
670 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
671 } else {
672 UDP6_INC_STATS(sock_net(sk),
673 UDP_MIB_MEMERRORS, is_udplite);
674 drop_reason = SKB_DROP_REASON_PROTO_MEM;
675 }
676 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
677 kfree_skb_reason(skb, drop_reason);
678 trace_udp_fail_queue_rcv_skb(rc, sk);
679 return -1;
680 }
681
682 return 0;
683 }
684
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)685 static __inline__ int udpv6_err(struct sk_buff *skb,
686 struct inet6_skb_parm *opt, u8 type,
687 u8 code, int offset, __be32 info)
688 {
689 return __udp6_lib_err(skb, opt, type, code, offset, info,
690 dev_net(skb->dev)->ipv4.udp_table);
691 }
692
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)693 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
694 {
695 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
696 struct udp_sock *up = udp_sk(sk);
697 int is_udplite = IS_UDPLITE(sk);
698
699 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
700 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
701 goto drop;
702 }
703 nf_reset_ct(skb);
704
705 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
706 READ_ONCE(up->encap_type)) {
707 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
708
709 /*
710 * This is an encapsulation socket so pass the skb to
711 * the socket's udp_encap_rcv() hook. Otherwise, just
712 * fall through and pass this up the UDP socket.
713 * up->encap_rcv() returns the following value:
714 * =0 if skb was successfully passed to the encap
715 * handler or was discarded by it.
716 * >0 if skb should be passed on to UDP.
717 * <0 if skb should be resubmitted as proto -N
718 */
719
720 /* if we're overly short, let UDP handle it */
721 encap_rcv = READ_ONCE(up->encap_rcv);
722 if (encap_rcv) {
723 int ret;
724
725 /* Verify checksum before giving to encap */
726 if (udp_lib_checksum_complete(skb))
727 goto csum_error;
728
729 ret = encap_rcv(sk, skb);
730 if (ret <= 0) {
731 __UDP6_INC_STATS(sock_net(sk),
732 UDP_MIB_INDATAGRAMS,
733 is_udplite);
734 return -ret;
735 }
736 }
737
738 /* FALLTHROUGH -- it's a UDP Packet */
739 }
740
741 /*
742 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
743 */
744 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
745 u16 pcrlen = READ_ONCE(up->pcrlen);
746
747 if (pcrlen == 0) { /* full coverage was set */
748 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
749 UDP_SKB_CB(skb)->cscov, skb->len);
750 goto drop;
751 }
752 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
753 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
754 UDP_SKB_CB(skb)->cscov, pcrlen);
755 goto drop;
756 }
757 }
758
759 prefetch(&sk->sk_rmem_alloc);
760 if (rcu_access_pointer(sk->sk_filter) &&
761 udp_lib_checksum_complete(skb))
762 goto csum_error;
763
764 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
765 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
766 goto drop;
767 }
768
769 udp_csum_pull_header(skb);
770
771 skb_dst_drop(skb);
772
773 return __udpv6_queue_rcv_skb(sk, skb);
774
775 csum_error:
776 drop_reason = SKB_DROP_REASON_UDP_CSUM;
777 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
778 drop:
779 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
780 atomic_inc(&sk->sk_drops);
781 kfree_skb_reason(skb, drop_reason);
782 return -1;
783 }
784
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)785 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
786 {
787 struct sk_buff *next, *segs;
788 int ret;
789
790 if (likely(!udp_unexpected_gso(sk, skb)))
791 return udpv6_queue_rcv_one_skb(sk, skb);
792
793 __skb_push(skb, -skb_mac_offset(skb));
794 segs = udp_rcv_segment(sk, skb, false);
795 skb_list_walk_safe(segs, skb, next) {
796 __skb_pull(skb, skb_transport_offset(skb));
797
798 udp_post_segment_fix_csum(skb);
799 ret = udpv6_queue_rcv_one_skb(sk, skb);
800 if (ret > 0)
801 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
802 true);
803 }
804 return 0;
805 }
806
__udp_v6_is_mcast_sock(struct net * net,const struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)807 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
808 __be16 loc_port, const struct in6_addr *loc_addr,
809 __be16 rmt_port, const struct in6_addr *rmt_addr,
810 int dif, int sdif, unsigned short hnum)
811 {
812 const struct inet_sock *inet = inet_sk(sk);
813
814 if (!net_eq(sock_net(sk), net))
815 return false;
816
817 if (udp_sk(sk)->udp_port_hash != hnum ||
818 sk->sk_family != PF_INET6 ||
819 (inet->inet_dport && inet->inet_dport != rmt_port) ||
820 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
821 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
822 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
823 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
824 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
825 return false;
826 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
827 return false;
828 return true;
829 }
830
udp6_csum_zero_error(struct sk_buff * skb)831 static void udp6_csum_zero_error(struct sk_buff *skb)
832 {
833 /* RFC 2460 section 8.1 says that we SHOULD log
834 * this error. Well, it is reasonable.
835 */
836 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
837 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
838 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
839 }
840
841 /*
842 * Note: called only from the BH handler context,
843 * so we don't need to lock the hashes.
844 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)845 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
846 const struct in6_addr *saddr, const struct in6_addr *daddr,
847 struct udp_table *udptable, int proto)
848 {
849 struct sock *sk, *first = NULL;
850 const struct udphdr *uh = udp_hdr(skb);
851 unsigned short hnum = ntohs(uh->dest);
852 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
853 unsigned int offset = offsetof(typeof(*sk), sk_node);
854 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
855 int dif = inet6_iif(skb);
856 int sdif = inet6_sdif(skb);
857 struct hlist_node *node;
858 struct sk_buff *nskb;
859
860 if (use_hash2) {
861 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
862 udptable->mask;
863 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
864 start_lookup:
865 hslot = &udptable->hash2[hash2];
866 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
867 }
868
869 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
870 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
871 uh->source, saddr, dif, sdif,
872 hnum))
873 continue;
874 /* If zero checksum and no_check is not on for
875 * the socket then skip it.
876 */
877 if (!uh->check && !udp_get_no_check6_rx(sk))
878 continue;
879 if (!first) {
880 first = sk;
881 continue;
882 }
883 nskb = skb_clone(skb, GFP_ATOMIC);
884 if (unlikely(!nskb)) {
885 atomic_inc(&sk->sk_drops);
886 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
887 IS_UDPLITE(sk));
888 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
889 IS_UDPLITE(sk));
890 continue;
891 }
892
893 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
894 consume_skb(nskb);
895 }
896
897 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
898 if (use_hash2 && hash2 != hash2_any) {
899 hash2 = hash2_any;
900 goto start_lookup;
901 }
902
903 if (first) {
904 if (udpv6_queue_rcv_skb(first, skb) > 0)
905 consume_skb(skb);
906 } else {
907 kfree_skb(skb);
908 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
909 proto == IPPROTO_UDPLITE);
910 }
911 return 0;
912 }
913
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)914 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
915 {
916 if (udp_sk_rx_dst_set(sk, dst)) {
917 const struct rt6_info *rt = (const struct rt6_info *)dst;
918
919 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
920 }
921 }
922
923 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
924 * return code conversion for ip layer consumption
925 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)926 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
927 struct udphdr *uh)
928 {
929 int ret;
930
931 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
932 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
933
934 ret = udpv6_queue_rcv_skb(sk, skb);
935
936 /* a return value > 0 means to resubmit the input */
937 if (ret > 0)
938 return ret;
939 return 0;
940 }
941
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)942 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
943 int proto)
944 {
945 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
946 const struct in6_addr *saddr, *daddr;
947 struct net *net = dev_net(skb->dev);
948 struct udphdr *uh;
949 struct sock *sk;
950 bool refcounted;
951 u32 ulen = 0;
952
953 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
954 goto discard;
955
956 saddr = &ipv6_hdr(skb)->saddr;
957 daddr = &ipv6_hdr(skb)->daddr;
958 uh = udp_hdr(skb);
959
960 ulen = ntohs(uh->len);
961 if (ulen > skb->len)
962 goto short_packet;
963
964 if (proto == IPPROTO_UDP) {
965 /* UDP validates ulen. */
966
967 /* Check for jumbo payload */
968 if (ulen == 0)
969 ulen = skb->len;
970
971 if (ulen < sizeof(*uh))
972 goto short_packet;
973
974 if (ulen < skb->len) {
975 if (pskb_trim_rcsum(skb, ulen))
976 goto short_packet;
977 saddr = &ipv6_hdr(skb)->saddr;
978 daddr = &ipv6_hdr(skb)->daddr;
979 uh = udp_hdr(skb);
980 }
981 }
982
983 if (udp6_csum_init(skb, uh, proto))
984 goto csum_error;
985
986 /* Check if the socket is already available, e.g. due to early demux */
987 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
988 &refcounted, udp6_ehashfn);
989 if (IS_ERR(sk))
990 goto no_sk;
991
992 if (sk) {
993 struct dst_entry *dst = skb_dst(skb);
994 int ret;
995
996 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
997 udp6_sk_rx_dst_set(sk, dst);
998
999 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1000 if (refcounted)
1001 sock_put(sk);
1002 goto report_csum_error;
1003 }
1004
1005 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1006 if (refcounted)
1007 sock_put(sk);
1008 return ret;
1009 }
1010
1011 /*
1012 * Multicast receive code
1013 */
1014 if (ipv6_addr_is_multicast(daddr))
1015 return __udp6_lib_mcast_deliver(net, skb,
1016 saddr, daddr, udptable, proto);
1017
1018 /* Unicast */
1019 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1020 if (sk) {
1021 if (!uh->check && !udp_get_no_check6_rx(sk))
1022 goto report_csum_error;
1023 return udp6_unicast_rcv_skb(sk, skb, uh);
1024 }
1025 no_sk:
1026 reason = SKB_DROP_REASON_NO_SOCKET;
1027
1028 if (!uh->check)
1029 goto report_csum_error;
1030
1031 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1032 goto discard;
1033 nf_reset_ct(skb);
1034
1035 if (udp_lib_checksum_complete(skb))
1036 goto csum_error;
1037
1038 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1039 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1040
1041 kfree_skb_reason(skb, reason);
1042 return 0;
1043
1044 short_packet:
1045 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1046 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1047 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1048 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1049 saddr, ntohs(uh->source),
1050 ulen, skb->len,
1051 daddr, ntohs(uh->dest));
1052 goto discard;
1053
1054 report_csum_error:
1055 udp6_csum_zero_error(skb);
1056 csum_error:
1057 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1058 reason = SKB_DROP_REASON_UDP_CSUM;
1059 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1060 discard:
1061 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1062 kfree_skb_reason(skb, reason);
1063 return 0;
1064 }
1065
1066
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1067 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1068 __be16 loc_port, const struct in6_addr *loc_addr,
1069 __be16 rmt_port, const struct in6_addr *rmt_addr,
1070 int dif, int sdif)
1071 {
1072 struct udp_table *udptable = net->ipv4.udp_table;
1073 unsigned short hnum = ntohs(loc_port);
1074 unsigned int hash2, slot2;
1075 struct udp_hslot *hslot2;
1076 __portpair ports;
1077 struct sock *sk;
1078
1079 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1080 slot2 = hash2 & udptable->mask;
1081 hslot2 = &udptable->hash2[slot2];
1082 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1083
1084 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1085 if (sk->sk_state == TCP_ESTABLISHED &&
1086 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1087 return sk;
1088 /* Only check first socket in chain */
1089 break;
1090 }
1091 return NULL;
1092 }
1093
udp_v6_early_demux(struct sk_buff * skb)1094 void udp_v6_early_demux(struct sk_buff *skb)
1095 {
1096 struct net *net = dev_net(skb->dev);
1097 const struct udphdr *uh;
1098 struct sock *sk;
1099 struct dst_entry *dst;
1100 int dif = skb->dev->ifindex;
1101 int sdif = inet6_sdif(skb);
1102
1103 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1104 sizeof(struct udphdr)))
1105 return;
1106
1107 uh = udp_hdr(skb);
1108
1109 if (skb->pkt_type == PACKET_HOST)
1110 sk = __udp6_lib_demux_lookup(net, uh->dest,
1111 &ipv6_hdr(skb)->daddr,
1112 uh->source, &ipv6_hdr(skb)->saddr,
1113 dif, sdif);
1114 else
1115 return;
1116
1117 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1118 return;
1119
1120 skb->sk = sk;
1121 skb->destructor = sock_efree;
1122 dst = rcu_dereference(sk->sk_rx_dst);
1123
1124 if (dst)
1125 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1126 if (dst) {
1127 /* set noref for now.
1128 * any place which wants to hold dst has to call
1129 * dst_hold_safe()
1130 */
1131 skb_dst_set_noref(skb, dst);
1132 }
1133 }
1134
udpv6_rcv(struct sk_buff * skb)1135 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1136 {
1137 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1138 }
1139
1140 /*
1141 * Throw away all pending data and cancel the corking. Socket is locked.
1142 */
udp_v6_flush_pending_frames(struct sock * sk)1143 static void udp_v6_flush_pending_frames(struct sock *sk)
1144 {
1145 struct udp_sock *up = udp_sk(sk);
1146
1147 if (up->pending == AF_INET)
1148 udp_flush_pending_frames(sk);
1149 else if (up->pending) {
1150 up->len = 0;
1151 WRITE_ONCE(up->pending, 0);
1152 ip6_flush_pending_frames(sk);
1153 }
1154 }
1155
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1156 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1157 int addr_len)
1158 {
1159 if (addr_len < offsetofend(struct sockaddr, sa_family))
1160 return -EINVAL;
1161 /* The following checks are replicated from __ip6_datagram_connect()
1162 * and intended to prevent BPF program called below from accessing
1163 * bytes that are out of the bound specified by user in addr_len.
1164 */
1165 if (uaddr->sa_family == AF_INET) {
1166 if (ipv6_only_sock(sk))
1167 return -EAFNOSUPPORT;
1168 return udp_pre_connect(sk, uaddr, addr_len);
1169 }
1170
1171 if (addr_len < SIN6_LEN_RFC2133)
1172 return -EINVAL;
1173
1174 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1175 }
1176
1177 /**
1178 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1179 * @sk: socket we are sending on
1180 * @skb: sk_buff containing the filled-in UDP header
1181 * (checksum field must be zeroed out)
1182 * @saddr: source address
1183 * @daddr: destination address
1184 * @len: length of packet
1185 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1186 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1187 const struct in6_addr *saddr,
1188 const struct in6_addr *daddr, int len)
1189 {
1190 unsigned int offset;
1191 struct udphdr *uh = udp_hdr(skb);
1192 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1193 __wsum csum = 0;
1194
1195 if (!frags) {
1196 /* Only one fragment on the socket. */
1197 skb->csum_start = skb_transport_header(skb) - skb->head;
1198 skb->csum_offset = offsetof(struct udphdr, check);
1199 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1200 } else {
1201 /*
1202 * HW-checksum won't work as there are two or more
1203 * fragments on the socket so that all csums of sk_buffs
1204 * should be together
1205 */
1206 offset = skb_transport_offset(skb);
1207 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1208 csum = skb->csum;
1209
1210 skb->ip_summed = CHECKSUM_NONE;
1211
1212 do {
1213 csum = csum_add(csum, frags->csum);
1214 } while ((frags = frags->next));
1215
1216 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1217 csum);
1218 if (uh->check == 0)
1219 uh->check = CSUM_MANGLED_0;
1220 }
1221 }
1222
1223 /*
1224 * Sending
1225 */
1226
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1227 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1228 struct inet_cork *cork)
1229 {
1230 struct sock *sk = skb->sk;
1231 struct udphdr *uh;
1232 int err = 0;
1233 int is_udplite = IS_UDPLITE(sk);
1234 __wsum csum = 0;
1235 int offset = skb_transport_offset(skb);
1236 int len = skb->len - offset;
1237 int datalen = len - sizeof(*uh);
1238
1239 /*
1240 * Create a UDP header
1241 */
1242 uh = udp_hdr(skb);
1243 uh->source = fl6->fl6_sport;
1244 uh->dest = fl6->fl6_dport;
1245 uh->len = htons(len);
1246 uh->check = 0;
1247
1248 if (cork->gso_size) {
1249 const int hlen = skb_network_header_len(skb) +
1250 sizeof(struct udphdr);
1251
1252 if (hlen + cork->gso_size > cork->fragsize) {
1253 kfree_skb(skb);
1254 return -EINVAL;
1255 }
1256 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1257 kfree_skb(skb);
1258 return -EINVAL;
1259 }
1260 if (udp_get_no_check6_tx(sk)) {
1261 kfree_skb(skb);
1262 return -EINVAL;
1263 }
1264 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1265 dst_xfrm(skb_dst(skb))) {
1266 kfree_skb(skb);
1267 return -EIO;
1268 }
1269
1270 if (datalen > cork->gso_size) {
1271 skb_shinfo(skb)->gso_size = cork->gso_size;
1272 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1273 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1274 cork->gso_size);
1275 }
1276 goto csum_partial;
1277 }
1278
1279 if (is_udplite)
1280 csum = udplite_csum(skb);
1281 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1282 skb->ip_summed = CHECKSUM_NONE;
1283 goto send;
1284 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1285 csum_partial:
1286 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1287 goto send;
1288 } else
1289 csum = udp_csum(skb);
1290
1291 /* add protocol-dependent pseudo-header */
1292 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1293 len, fl6->flowi6_proto, csum);
1294 if (uh->check == 0)
1295 uh->check = CSUM_MANGLED_0;
1296
1297 send:
1298 err = ip6_send_skb(skb);
1299 if (err) {
1300 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1301 UDP6_INC_STATS(sock_net(sk),
1302 UDP_MIB_SNDBUFERRORS, is_udplite);
1303 err = 0;
1304 }
1305 } else {
1306 UDP6_INC_STATS(sock_net(sk),
1307 UDP_MIB_OUTDATAGRAMS, is_udplite);
1308 }
1309 return err;
1310 }
1311
udp_v6_push_pending_frames(struct sock * sk)1312 static int udp_v6_push_pending_frames(struct sock *sk)
1313 {
1314 struct sk_buff *skb;
1315 struct udp_sock *up = udp_sk(sk);
1316 int err = 0;
1317
1318 if (up->pending == AF_INET)
1319 return udp_push_pending_frames(sk);
1320
1321 skb = ip6_finish_skb(sk);
1322 if (!skb)
1323 goto out;
1324
1325 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1326 &inet_sk(sk)->cork.base);
1327 out:
1328 up->len = 0;
1329 WRITE_ONCE(up->pending, 0);
1330 return err;
1331 }
1332
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1333 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1334 {
1335 struct ipv6_txoptions opt_space;
1336 struct udp_sock *up = udp_sk(sk);
1337 struct inet_sock *inet = inet_sk(sk);
1338 struct ipv6_pinfo *np = inet6_sk(sk);
1339 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1340 struct in6_addr *daddr, *final_p, final;
1341 struct ipv6_txoptions *opt = NULL;
1342 struct ipv6_txoptions *opt_to_free = NULL;
1343 struct ip6_flowlabel *flowlabel = NULL;
1344 struct inet_cork_full cork;
1345 struct flowi6 *fl6 = &cork.fl.u.ip6;
1346 struct dst_entry *dst;
1347 struct ipcm6_cookie ipc6;
1348 int addr_len = msg->msg_namelen;
1349 bool connected = false;
1350 int ulen = len;
1351 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1352 int err;
1353 int is_udplite = IS_UDPLITE(sk);
1354 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1355
1356 ipcm6_init(&ipc6);
1357 ipc6.gso_size = READ_ONCE(up->gso_size);
1358 ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
1359 ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
1360
1361 /* destination address check */
1362 if (sin6) {
1363 if (addr_len < offsetof(struct sockaddr, sa_data))
1364 return -EINVAL;
1365
1366 switch (sin6->sin6_family) {
1367 case AF_INET6:
1368 if (addr_len < SIN6_LEN_RFC2133)
1369 return -EINVAL;
1370 daddr = &sin6->sin6_addr;
1371 if (ipv6_addr_any(daddr) &&
1372 ipv6_addr_v4mapped(&np->saddr))
1373 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1374 daddr);
1375 break;
1376 case AF_INET:
1377 goto do_udp_sendmsg;
1378 case AF_UNSPEC:
1379 msg->msg_name = sin6 = NULL;
1380 msg->msg_namelen = addr_len = 0;
1381 daddr = NULL;
1382 break;
1383 default:
1384 return -EINVAL;
1385 }
1386 } else if (!READ_ONCE(up->pending)) {
1387 if (sk->sk_state != TCP_ESTABLISHED)
1388 return -EDESTADDRREQ;
1389 daddr = &sk->sk_v6_daddr;
1390 } else
1391 daddr = NULL;
1392
1393 if (daddr) {
1394 if (ipv6_addr_v4mapped(daddr)) {
1395 struct sockaddr_in sin;
1396 sin.sin_family = AF_INET;
1397 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1398 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1399 msg->msg_name = &sin;
1400 msg->msg_namelen = sizeof(sin);
1401 do_udp_sendmsg:
1402 err = ipv6_only_sock(sk) ?
1403 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1404 msg->msg_name = sin6;
1405 msg->msg_namelen = addr_len;
1406 return err;
1407 }
1408 }
1409
1410 /* Rough check on arithmetic overflow,
1411 better check is made in ip6_append_data().
1412 */
1413 if (len > INT_MAX - sizeof(struct udphdr))
1414 return -EMSGSIZE;
1415
1416 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1417 if (READ_ONCE(up->pending)) {
1418 if (READ_ONCE(up->pending) == AF_INET)
1419 return udp_sendmsg(sk, msg, len);
1420 /*
1421 * There are pending frames.
1422 * The socket lock must be held while it's corked.
1423 */
1424 lock_sock(sk);
1425 if (likely(up->pending)) {
1426 if (unlikely(up->pending != AF_INET6)) {
1427 release_sock(sk);
1428 return -EAFNOSUPPORT;
1429 }
1430 dst = NULL;
1431 goto do_append_data;
1432 }
1433 release_sock(sk);
1434 }
1435 ulen += sizeof(struct udphdr);
1436
1437 memset(fl6, 0, sizeof(*fl6));
1438
1439 if (sin6) {
1440 if (sin6->sin6_port == 0)
1441 return -EINVAL;
1442
1443 fl6->fl6_dport = sin6->sin6_port;
1444 daddr = &sin6->sin6_addr;
1445
1446 if (np->sndflow) {
1447 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1448 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1449 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1450 if (IS_ERR(flowlabel))
1451 return -EINVAL;
1452 }
1453 }
1454
1455 /*
1456 * Otherwise it will be difficult to maintain
1457 * sk->sk_dst_cache.
1458 */
1459 if (sk->sk_state == TCP_ESTABLISHED &&
1460 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1461 daddr = &sk->sk_v6_daddr;
1462
1463 if (addr_len >= sizeof(struct sockaddr_in6) &&
1464 sin6->sin6_scope_id &&
1465 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1466 fl6->flowi6_oif = sin6->sin6_scope_id;
1467 } else {
1468 if (sk->sk_state != TCP_ESTABLISHED)
1469 return -EDESTADDRREQ;
1470
1471 fl6->fl6_dport = inet->inet_dport;
1472 daddr = &sk->sk_v6_daddr;
1473 fl6->flowlabel = np->flow_label;
1474 connected = true;
1475 }
1476
1477 if (!fl6->flowi6_oif)
1478 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1479
1480 if (!fl6->flowi6_oif)
1481 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1482
1483 fl6->flowi6_uid = sk->sk_uid;
1484
1485 if (msg->msg_controllen) {
1486 opt = &opt_space;
1487 memset(opt, 0, sizeof(struct ipv6_txoptions));
1488 opt->tot_len = sizeof(*opt);
1489 ipc6.opt = opt;
1490
1491 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1492 if (err > 0) {
1493 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1494 &ipc6);
1495 connected = false;
1496 }
1497 if (err < 0) {
1498 fl6_sock_release(flowlabel);
1499 return err;
1500 }
1501 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1502 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1503 if (IS_ERR(flowlabel))
1504 return -EINVAL;
1505 }
1506 if (!(opt->opt_nflen|opt->opt_flen))
1507 opt = NULL;
1508 }
1509 if (!opt) {
1510 opt = txopt_get(np);
1511 opt_to_free = opt;
1512 }
1513 if (flowlabel)
1514 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1515 opt = ipv6_fixup_options(&opt_space, opt);
1516 ipc6.opt = opt;
1517
1518 fl6->flowi6_proto = sk->sk_protocol;
1519 fl6->flowi6_mark = ipc6.sockc.mark;
1520 fl6->daddr = *daddr;
1521 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1522 fl6->saddr = np->saddr;
1523 fl6->fl6_sport = inet->inet_sport;
1524
1525 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1526 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1527 (struct sockaddr *)sin6,
1528 &addr_len,
1529 &fl6->saddr);
1530 if (err)
1531 goto out_no_dst;
1532 if (sin6) {
1533 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1534 /* BPF program rewrote IPv6-only by IPv4-mapped
1535 * IPv6. It's currently unsupported.
1536 */
1537 err = -ENOTSUPP;
1538 goto out_no_dst;
1539 }
1540 if (sin6->sin6_port == 0) {
1541 /* BPF program set invalid port. Reject it. */
1542 err = -EINVAL;
1543 goto out_no_dst;
1544 }
1545 fl6->fl6_dport = sin6->sin6_port;
1546 fl6->daddr = sin6->sin6_addr;
1547 }
1548 }
1549
1550 if (ipv6_addr_any(&fl6->daddr))
1551 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1552
1553 final_p = fl6_update_dst(fl6, opt, &final);
1554 if (final_p)
1555 connected = false;
1556
1557 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1558 fl6->flowi6_oif = np->mcast_oif;
1559 connected = false;
1560 } else if (!fl6->flowi6_oif)
1561 fl6->flowi6_oif = np->ucast_oif;
1562
1563 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1564
1565 if (ipc6.tclass < 0)
1566 ipc6.tclass = np->tclass;
1567
1568 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1569
1570 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1571 if (IS_ERR(dst)) {
1572 err = PTR_ERR(dst);
1573 dst = NULL;
1574 goto out;
1575 }
1576
1577 if (ipc6.hlimit < 0)
1578 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1579
1580 if (msg->msg_flags&MSG_CONFIRM)
1581 goto do_confirm;
1582 back_from_confirm:
1583
1584 /* Lockless fast path for the non-corking case */
1585 if (!corkreq) {
1586 struct sk_buff *skb;
1587
1588 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1589 sizeof(struct udphdr), &ipc6,
1590 (struct rt6_info *)dst,
1591 msg->msg_flags, &cork);
1592 err = PTR_ERR(skb);
1593 if (!IS_ERR_OR_NULL(skb))
1594 err = udp_v6_send_skb(skb, fl6, &cork.base);
1595 /* ip6_make_skb steals dst reference */
1596 goto out_no_dst;
1597 }
1598
1599 lock_sock(sk);
1600 if (unlikely(up->pending)) {
1601 /* The socket is already corked while preparing it. */
1602 /* ... which is an evident application bug. --ANK */
1603 release_sock(sk);
1604
1605 net_dbg_ratelimited("udp cork app bug 2\n");
1606 err = -EINVAL;
1607 goto out;
1608 }
1609
1610 WRITE_ONCE(up->pending, AF_INET6);
1611
1612 do_append_data:
1613 if (ipc6.dontfrag < 0)
1614 ipc6.dontfrag = np->dontfrag;
1615 up->len += ulen;
1616 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1617 &ipc6, fl6, (struct rt6_info *)dst,
1618 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1619 if (err)
1620 udp_v6_flush_pending_frames(sk);
1621 else if (!corkreq)
1622 err = udp_v6_push_pending_frames(sk);
1623 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1624 WRITE_ONCE(up->pending, 0);
1625
1626 if (err > 0)
1627 err = np->recverr ? net_xmit_errno(err) : 0;
1628 release_sock(sk);
1629
1630 out:
1631 dst_release(dst);
1632 out_no_dst:
1633 fl6_sock_release(flowlabel);
1634 txopt_put(opt_to_free);
1635 if (!err)
1636 return len;
1637 /*
1638 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1639 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1640 * we don't have a good statistic (IpOutDiscards but it can be too many
1641 * things). We could add another new stat but at least for now that
1642 * seems like overkill.
1643 */
1644 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1645 UDP6_INC_STATS(sock_net(sk),
1646 UDP_MIB_SNDBUFERRORS, is_udplite);
1647 }
1648 return err;
1649
1650 do_confirm:
1651 if (msg->msg_flags & MSG_PROBE)
1652 dst_confirm_neigh(dst, &fl6->daddr);
1653 if (!(msg->msg_flags&MSG_PROBE) || len)
1654 goto back_from_confirm;
1655 err = 0;
1656 goto out;
1657 }
1658 EXPORT_SYMBOL(udpv6_sendmsg);
1659
udpv6_splice_eof(struct socket * sock)1660 static void udpv6_splice_eof(struct socket *sock)
1661 {
1662 struct sock *sk = sock->sk;
1663 struct udp_sock *up = udp_sk(sk);
1664
1665 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1666 return;
1667
1668 lock_sock(sk);
1669 if (up->pending && !udp_test_bit(CORK, sk))
1670 udp_v6_push_pending_frames(sk);
1671 release_sock(sk);
1672 }
1673
udpv6_destroy_sock(struct sock * sk)1674 void udpv6_destroy_sock(struct sock *sk)
1675 {
1676 struct udp_sock *up = udp_sk(sk);
1677 lock_sock(sk);
1678
1679 /* protects from races with udp_abort() */
1680 sock_set_flag(sk, SOCK_DEAD);
1681 udp_v6_flush_pending_frames(sk);
1682 release_sock(sk);
1683
1684 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1685 if (up->encap_type) {
1686 void (*encap_destroy)(struct sock *sk);
1687 encap_destroy = READ_ONCE(up->encap_destroy);
1688 if (encap_destroy)
1689 encap_destroy(sk);
1690 }
1691 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1692 static_branch_dec(&udpv6_encap_needed_key);
1693 udp_encap_disable();
1694 }
1695 }
1696 }
1697
1698 /*
1699 * Socket option code for UDP
1700 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1701 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1702 unsigned int optlen)
1703 {
1704 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1705 return udp_lib_setsockopt(sk, level, optname,
1706 optval, optlen,
1707 udp_v6_push_pending_frames);
1708 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1709 }
1710
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1711 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1712 char __user *optval, int __user *optlen)
1713 {
1714 if (level == SOL_UDP || level == SOL_UDPLITE)
1715 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1716 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1717 }
1718
1719 static const struct inet6_protocol udpv6_protocol = {
1720 .handler = udpv6_rcv,
1721 .err_handler = udpv6_err,
1722 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1723 };
1724
1725 /* ------------------------------------------------------------------------ */
1726 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1727 int udp6_seq_show(struct seq_file *seq, void *v)
1728 {
1729 if (v == SEQ_START_TOKEN) {
1730 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1731 } else {
1732 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1733 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1734 __u16 srcp = ntohs(inet->inet_sport);
1735 __u16 destp = ntohs(inet->inet_dport);
1736 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1737 udp_rqueue_get(v), bucket);
1738 }
1739 return 0;
1740 }
1741
1742 const struct seq_operations udp6_seq_ops = {
1743 .start = udp_seq_start,
1744 .next = udp_seq_next,
1745 .stop = udp_seq_stop,
1746 .show = udp6_seq_show,
1747 };
1748 EXPORT_SYMBOL(udp6_seq_ops);
1749
1750 static struct udp_seq_afinfo udp6_seq_afinfo = {
1751 .family = AF_INET6,
1752 .udp_table = NULL,
1753 };
1754
udp6_proc_init(struct net * net)1755 int __net_init udp6_proc_init(struct net *net)
1756 {
1757 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1758 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1759 return -ENOMEM;
1760 return 0;
1761 }
1762
udp6_proc_exit(struct net * net)1763 void udp6_proc_exit(struct net *net)
1764 {
1765 remove_proc_entry("udp6", net->proc_net);
1766 }
1767 #endif /* CONFIG_PROC_FS */
1768
1769 /* ------------------------------------------------------------------------ */
1770
1771 struct proto udpv6_prot = {
1772 .name = "UDPv6",
1773 .owner = THIS_MODULE,
1774 .close = udp_lib_close,
1775 .pre_connect = udpv6_pre_connect,
1776 .connect = ip6_datagram_connect,
1777 .disconnect = udp_disconnect,
1778 .ioctl = udp_ioctl,
1779 .init = udpv6_init_sock,
1780 .destroy = udpv6_destroy_sock,
1781 .setsockopt = udpv6_setsockopt,
1782 .getsockopt = udpv6_getsockopt,
1783 .sendmsg = udpv6_sendmsg,
1784 .recvmsg = udpv6_recvmsg,
1785 .splice_eof = udpv6_splice_eof,
1786 .release_cb = ip6_datagram_release_cb,
1787 .hash = udp_lib_hash,
1788 .unhash = udp_lib_unhash,
1789 .rehash = udp_v6_rehash,
1790 .get_port = udp_v6_get_port,
1791 .put_port = udp_lib_unhash,
1792 #ifdef CONFIG_BPF_SYSCALL
1793 .psock_update_sk_prot = udp_bpf_update_proto,
1794 #endif
1795
1796 .memory_allocated = &udp_memory_allocated,
1797 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1798
1799 .sysctl_mem = sysctl_udp_mem,
1800 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1801 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1802 .obj_size = sizeof(struct udp6_sock),
1803 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1804 .h.udp_table = NULL,
1805 .diag_destroy = udp_abort,
1806 };
1807
1808 static struct inet_protosw udpv6_protosw = {
1809 .type = SOCK_DGRAM,
1810 .protocol = IPPROTO_UDP,
1811 .prot = &udpv6_prot,
1812 .ops = &inet6_dgram_ops,
1813 .flags = INET_PROTOSW_PERMANENT,
1814 };
1815
udpv6_init(void)1816 int __init udpv6_init(void)
1817 {
1818 int ret;
1819
1820 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1821 if (ret)
1822 goto out;
1823
1824 ret = inet6_register_protosw(&udpv6_protosw);
1825 if (ret)
1826 goto out_udpv6_protocol;
1827 out:
1828 return ret;
1829
1830 out_udpv6_protocol:
1831 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1832 goto out;
1833 }
1834
udpv6_exit(void)1835 void udpv6_exit(void)
1836 {
1837 inet6_unregister_protosw(&udpv6_protosw);
1838 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1839 }
1840