1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * The IP to API glue. 7 * 8 * Authors: see ip.c 9 * 10 * Fixes: 11 * Many : Split from ip.c , see ip.c for history. 12 * Martin Mares : TOS setting fixed. 13 * Alan Cox : Fixed a couple of oopses in Martin's 14 * TOS tweaks. 15 * Mike McLagan : Routing by source 16 */ 17 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/mm.h> 21 #include <linux/skbuff.h> 22 #include <linux/ip.h> 23 #include <linux/icmp.h> 24 #include <linux/inetdevice.h> 25 #include <linux/netdevice.h> 26 #include <linux/slab.h> 27 #include <net/sock.h> 28 #include <net/ip.h> 29 #include <net/icmp.h> 30 #include <net/tcp_states.h> 31 #include <linux/udp.h> 32 #include <linux/igmp.h> 33 #include <linux/netfilter.h> 34 #include <linux/route.h> 35 #include <linux/mroute.h> 36 #include <net/inet_ecn.h> 37 #include <net/route.h> 38 #include <net/xfrm.h> 39 #include <net/compat.h> 40 #if IS_ENABLED(CONFIG_IPV6) 41 #include <net/transp_v6.h> 42 #endif 43 #include <net/ip_fib.h> 44 45 #include <linux/errqueue.h> 46 #include <asm/uaccess.h> 47 48 #define IP_CMSG_PKTINFO 1 49 #define IP_CMSG_TTL 2 50 #define IP_CMSG_TOS 4 51 #define IP_CMSG_RECVOPTS 8 52 #define IP_CMSG_RETOPTS 16 53 #define IP_CMSG_PASSSEC 32 54 #define IP_CMSG_ORIGDSTADDR 64 55 56 /* 57 * SOL_IP control messages. 58 */ 59 60 static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 61 { 62 struct in_pktinfo info = *PKTINFO_SKB_CB(skb); 63 64 info.ipi_addr.s_addr = ip_hdr(skb)->daddr; 65 66 put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); 67 } 68 69 static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb) 70 { 71 int ttl = ip_hdr(skb)->ttl; 72 put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl); 73 } 74 75 static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb) 76 { 77 put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos); 78 } 79 80 static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) 81 { 82 if (IPCB(skb)->opt.optlen == 0) 83 return; 84 85 put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen, 86 ip_hdr(skb) + 1); 87 } 88 89 90 static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) 91 { 92 unsigned char optbuf[sizeof(struct ip_options) + 40]; 93 struct ip_options *opt = (struct ip_options *)optbuf; 94 95 if (IPCB(skb)->opt.optlen == 0) 96 return; 97 98 if (ip_options_echo(opt, skb)) { 99 msg->msg_flags |= MSG_CTRUNC; 100 return; 101 } 102 ip_options_undo(opt); 103 104 put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data); 105 } 106 107 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 108 { 109 char *secdata; 110 u32 seclen, secid; 111 int err; 112 113 err = security_socket_getpeersec_dgram(NULL, skb, &secid); 114 if (err) 115 return; 116 117 err = security_secid_to_secctx(secid, &secdata, &seclen); 118 if (err) 119 return; 120 121 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 122 security_release_secctx(secdata, seclen); 123 } 124 125 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 126 { 127 struct sockaddr_in sin; 128 const struct iphdr *iph = ip_hdr(skb); 129 __be16 *ports = (__be16 *)skb_transport_header(skb); 130 131 if (skb_transport_offset(skb) + 4 > skb->len) 132 return; 133 134 /* All current transport protocols have the port numbers in the 135 * first four bytes of the transport header and this function is 136 * written with this assumption in mind. 137 */ 138 139 sin.sin_family = AF_INET; 140 sin.sin_addr.s_addr = iph->daddr; 141 sin.sin_port = ports[1]; 142 memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 143 144 put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin); 145 } 146 147 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 148 { 149 struct inet_sock *inet = inet_sk(skb->sk); 150 unsigned int flags = inet->cmsg_flags; 151 152 /* Ordered by supposed usage frequency */ 153 if (flags & 1) 154 ip_cmsg_recv_pktinfo(msg, skb); 155 if ((flags >>= 1) == 0) 156 return; 157 158 if (flags & 1) 159 ip_cmsg_recv_ttl(msg, skb); 160 if ((flags >>= 1) == 0) 161 return; 162 163 if (flags & 1) 164 ip_cmsg_recv_tos(msg, skb); 165 if ((flags >>= 1) == 0) 166 return; 167 168 if (flags & 1) 169 ip_cmsg_recv_opts(msg, skb); 170 if ((flags >>= 1) == 0) 171 return; 172 173 if (flags & 1) 174 ip_cmsg_recv_retopts(msg, skb); 175 if ((flags >>= 1) == 0) 176 return; 177 178 if (flags & 1) 179 ip_cmsg_recv_security(msg, skb); 180 181 if ((flags >>= 1) == 0) 182 return; 183 if (flags & 1) 184 ip_cmsg_recv_dstaddr(msg, skb); 185 186 } 187 EXPORT_SYMBOL(ip_cmsg_recv); 188 189 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, 190 bool allow_ipv6) 191 { 192 int err, val; 193 struct cmsghdr *cmsg; 194 195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 196 if (!CMSG_OK(msg, cmsg)) 197 return -EINVAL; 198 #if defined(CONFIG_IPV6) 199 if (allow_ipv6 && 200 cmsg->cmsg_level == SOL_IPV6 && 201 cmsg->cmsg_type == IPV6_PKTINFO) { 202 struct in6_pktinfo *src_info; 203 204 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info))) 205 return -EINVAL; 206 src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); 207 if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) 208 return -EINVAL; 209 ipc->oif = src_info->ipi6_ifindex; 210 ipc->addr = src_info->ipi6_addr.s6_addr32[3]; 211 continue; 212 } 213 #endif 214 if (cmsg->cmsg_level != SOL_IP) 215 continue; 216 switch (cmsg->cmsg_type) { 217 case IP_RETOPTS: 218 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 219 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), 220 err < 40 ? err : 40); 221 if (err) 222 return err; 223 break; 224 case IP_PKTINFO: 225 { 226 struct in_pktinfo *info; 227 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) 228 return -EINVAL; 229 info = (struct in_pktinfo *)CMSG_DATA(cmsg); 230 ipc->oif = info->ipi_ifindex; 231 ipc->addr = info->ipi_spec_dst.s_addr; 232 break; 233 } 234 case IP_TTL: 235 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) 236 return -EINVAL; 237 val = *(int *)CMSG_DATA(cmsg); 238 if (val < 1 || val > 255) 239 return -EINVAL; 240 ipc->ttl = val; 241 break; 242 case IP_TOS: 243 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) 244 return -EINVAL; 245 val = *(int *)CMSG_DATA(cmsg); 246 if (val < 0 || val > 255) 247 return -EINVAL; 248 ipc->tos = val; 249 ipc->priority = rt_tos2priority(ipc->tos); 250 break; 251 252 default: 253 return -EINVAL; 254 } 255 } 256 return 0; 257 } 258 259 260 /* Special input handler for packets caught by router alert option. 261 They are selected only by protocol field, and then processed likely 262 local ones; but only if someone wants them! Otherwise, router 263 not running rsvpd will kill RSVP. 264 265 It is user level problem, what it will make with them. 266 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)), 267 but receiver should be enough clever f.e. to forward mtrace requests, 268 sent to multicast group to reach destination designated router. 269 */ 270 struct ip_ra_chain __rcu *ip_ra_chain; 271 static DEFINE_SPINLOCK(ip_ra_lock); 272 273 274 static void ip_ra_destroy_rcu(struct rcu_head *head) 275 { 276 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); 277 278 sock_put(ra->saved_sk); 279 kfree(ra); 280 } 281 282 int ip_ra_control(struct sock *sk, unsigned char on, 283 void (*destructor)(struct sock *)) 284 { 285 struct ip_ra_chain *ra, *new_ra; 286 struct ip_ra_chain __rcu **rap; 287 288 if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW) 289 return -EINVAL; 290 291 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 292 293 spin_lock_bh(&ip_ra_lock); 294 for (rap = &ip_ra_chain; 295 (ra = rcu_dereference_protected(*rap, 296 lockdep_is_held(&ip_ra_lock))) != NULL; 297 rap = &ra->next) { 298 if (ra->sk == sk) { 299 if (on) { 300 spin_unlock_bh(&ip_ra_lock); 301 kfree(new_ra); 302 return -EADDRINUSE; 303 } 304 /* dont let ip_call_ra_chain() use sk again */ 305 ra->sk = NULL; 306 rcu_assign_pointer(*rap, ra->next); 307 spin_unlock_bh(&ip_ra_lock); 308 309 if (ra->destructor) 310 ra->destructor(sk); 311 /* 312 * Delay sock_put(sk) and kfree(ra) after one rcu grace 313 * period. This guarantee ip_call_ra_chain() dont need 314 * to mess with socket refcounts. 315 */ 316 ra->saved_sk = sk; 317 call_rcu(&ra->rcu, ip_ra_destroy_rcu); 318 return 0; 319 } 320 } 321 if (new_ra == NULL) { 322 spin_unlock_bh(&ip_ra_lock); 323 return -ENOBUFS; 324 } 325 new_ra->sk = sk; 326 new_ra->destructor = destructor; 327 328 new_ra->next = ra; 329 rcu_assign_pointer(*rap, new_ra); 330 sock_hold(sk); 331 spin_unlock_bh(&ip_ra_lock); 332 333 return 0; 334 } 335 336 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 337 __be16 port, u32 info, u8 *payload) 338 { 339 struct sock_exterr_skb *serr; 340 341 skb = skb_clone(skb, GFP_ATOMIC); 342 if (!skb) 343 return; 344 345 serr = SKB_EXT_ERR(skb); 346 serr->ee.ee_errno = err; 347 serr->ee.ee_origin = SO_EE_ORIGIN_ICMP; 348 serr->ee.ee_type = icmp_hdr(skb)->type; 349 serr->ee.ee_code = icmp_hdr(skb)->code; 350 serr->ee.ee_pad = 0; 351 serr->ee.ee_info = info; 352 serr->ee.ee_data = 0; 353 serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) - 354 skb_network_header(skb); 355 serr->port = port; 356 357 if (skb_pull(skb, payload - skb->data) != NULL) { 358 skb_reset_transport_header(skb); 359 if (sock_queue_err_skb(sk, skb) == 0) 360 return; 361 } 362 kfree_skb(skb); 363 } 364 365 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info) 366 { 367 struct inet_sock *inet = inet_sk(sk); 368 struct sock_exterr_skb *serr; 369 struct iphdr *iph; 370 struct sk_buff *skb; 371 372 if (!inet->recverr) 373 return; 374 375 skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC); 376 if (!skb) 377 return; 378 379 skb_put(skb, sizeof(struct iphdr)); 380 skb_reset_network_header(skb); 381 iph = ip_hdr(skb); 382 iph->daddr = daddr; 383 384 serr = SKB_EXT_ERR(skb); 385 serr->ee.ee_errno = err; 386 serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; 387 serr->ee.ee_type = 0; 388 serr->ee.ee_code = 0; 389 serr->ee.ee_pad = 0; 390 serr->ee.ee_info = info; 391 serr->ee.ee_data = 0; 392 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 393 serr->port = port; 394 395 __skb_pull(skb, skb_tail_pointer(skb) - skb->data); 396 skb_reset_transport_header(skb); 397 398 if (sock_queue_err_skb(sk, skb)) 399 kfree_skb(skb); 400 } 401 402 /* 403 * Handle MSG_ERRQUEUE 404 */ 405 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) 406 { 407 struct sock_exterr_skb *serr; 408 struct sk_buff *skb, *skb2; 409 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 410 struct { 411 struct sock_extended_err ee; 412 struct sockaddr_in offender; 413 } errhdr; 414 int err; 415 int copied; 416 417 err = -EAGAIN; 418 skb = skb_dequeue(&sk->sk_error_queue); 419 if (skb == NULL) 420 goto out; 421 422 copied = skb->len; 423 if (copied > len) { 424 msg->msg_flags |= MSG_TRUNC; 425 copied = len; 426 } 427 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 428 if (err) 429 goto out_free_skb; 430 431 sock_recv_timestamp(msg, sk, skb); 432 433 serr = SKB_EXT_ERR(skb); 434 435 if (sin) { 436 sin->sin_family = AF_INET; 437 sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 438 serr->addr_offset); 439 sin->sin_port = serr->port; 440 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 441 *addr_len = sizeof(*sin); 442 } 443 444 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); 445 sin = &errhdr.offender; 446 sin->sin_family = AF_UNSPEC; 447 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) { 448 struct inet_sock *inet = inet_sk(sk); 449 450 sin->sin_family = AF_INET; 451 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 452 sin->sin_port = 0; 453 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 454 if (inet->cmsg_flags) 455 ip_cmsg_recv(msg, skb); 456 } 457 458 put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr); 459 460 /* Now we could try to dump offended packet options */ 461 462 msg->msg_flags |= MSG_ERRQUEUE; 463 err = copied; 464 465 /* Reset and regenerate socket error */ 466 spin_lock_bh(&sk->sk_error_queue.lock); 467 sk->sk_err = 0; 468 skb2 = skb_peek(&sk->sk_error_queue); 469 if (skb2 != NULL) { 470 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; 471 spin_unlock_bh(&sk->sk_error_queue.lock); 472 sk->sk_error_report(sk); 473 } else 474 spin_unlock_bh(&sk->sk_error_queue.lock); 475 476 out_free_skb: 477 kfree_skb(skb); 478 out: 479 return err; 480 } 481 482 483 /* 484 * Socket option code for IP. This is the end of the line after any 485 * TCP,UDP etc options on an IP socket. 486 */ 487 488 static int do_ip_setsockopt(struct sock *sk, int level, 489 int optname, char __user *optval, unsigned int optlen) 490 { 491 struct inet_sock *inet = inet_sk(sk); 492 int val = 0, err; 493 494 switch (optname) { 495 case IP_PKTINFO: 496 case IP_RECVTTL: 497 case IP_RECVOPTS: 498 case IP_RECVTOS: 499 case IP_RETOPTS: 500 case IP_TOS: 501 case IP_TTL: 502 case IP_HDRINCL: 503 case IP_MTU_DISCOVER: 504 case IP_RECVERR: 505 case IP_ROUTER_ALERT: 506 case IP_FREEBIND: 507 case IP_PASSSEC: 508 case IP_TRANSPARENT: 509 case IP_MINTTL: 510 case IP_NODEFRAG: 511 case IP_UNICAST_IF: 512 case IP_MULTICAST_TTL: 513 case IP_MULTICAST_ALL: 514 case IP_MULTICAST_LOOP: 515 case IP_RECVORIGDSTADDR: 516 if (optlen >= sizeof(int)) { 517 if (get_user(val, (int __user *) optval)) 518 return -EFAULT; 519 } else if (optlen >= sizeof(char)) { 520 unsigned char ucval; 521 522 if (get_user(ucval, (unsigned char __user *) optval)) 523 return -EFAULT; 524 val = (int) ucval; 525 } 526 } 527 528 /* If optlen==0, it is equivalent to val == 0 */ 529 530 if (ip_mroute_opt(optname)) 531 return ip_mroute_setsockopt(sk, optname, optval, optlen); 532 533 err = 0; 534 lock_sock(sk); 535 536 switch (optname) { 537 case IP_OPTIONS: 538 { 539 struct ip_options_rcu *old, *opt = NULL; 540 541 if (optlen > 40) 542 goto e_inval; 543 err = ip_options_get_from_user(sock_net(sk), &opt, 544 optval, optlen); 545 if (err) 546 break; 547 old = rcu_dereference_protected(inet->inet_opt, 548 sock_owned_by_user(sk)); 549 if (inet->is_icsk) { 550 struct inet_connection_sock *icsk = inet_csk(sk); 551 #if IS_ENABLED(CONFIG_IPV6) 552 if (sk->sk_family == PF_INET || 553 (!((1 << sk->sk_state) & 554 (TCPF_LISTEN | TCPF_CLOSE)) && 555 inet->inet_daddr != LOOPBACK4_IPV6)) { 556 #endif 557 if (old) 558 icsk->icsk_ext_hdr_len -= old->opt.optlen; 559 if (opt) 560 icsk->icsk_ext_hdr_len += opt->opt.optlen; 561 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 562 #if IS_ENABLED(CONFIG_IPV6) 563 } 564 #endif 565 } 566 rcu_assign_pointer(inet->inet_opt, opt); 567 if (old) 568 kfree_rcu(old, rcu); 569 break; 570 } 571 case IP_PKTINFO: 572 if (val) 573 inet->cmsg_flags |= IP_CMSG_PKTINFO; 574 else 575 inet->cmsg_flags &= ~IP_CMSG_PKTINFO; 576 break; 577 case IP_RECVTTL: 578 if (val) 579 inet->cmsg_flags |= IP_CMSG_TTL; 580 else 581 inet->cmsg_flags &= ~IP_CMSG_TTL; 582 break; 583 case IP_RECVTOS: 584 if (val) 585 inet->cmsg_flags |= IP_CMSG_TOS; 586 else 587 inet->cmsg_flags &= ~IP_CMSG_TOS; 588 break; 589 case IP_RECVOPTS: 590 if (val) 591 inet->cmsg_flags |= IP_CMSG_RECVOPTS; 592 else 593 inet->cmsg_flags &= ~IP_CMSG_RECVOPTS; 594 break; 595 case IP_RETOPTS: 596 if (val) 597 inet->cmsg_flags |= IP_CMSG_RETOPTS; 598 else 599 inet->cmsg_flags &= ~IP_CMSG_RETOPTS; 600 break; 601 case IP_PASSSEC: 602 if (val) 603 inet->cmsg_flags |= IP_CMSG_PASSSEC; 604 else 605 inet->cmsg_flags &= ~IP_CMSG_PASSSEC; 606 break; 607 case IP_RECVORIGDSTADDR: 608 if (val) 609 inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR; 610 else 611 inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR; 612 break; 613 case IP_TOS: /* This sets both TOS and Precedence */ 614 if (sk->sk_type == SOCK_STREAM) { 615 val &= ~INET_ECN_MASK; 616 val |= inet->tos & INET_ECN_MASK; 617 } 618 if (inet->tos != val) { 619 inet->tos = val; 620 sk->sk_priority = rt_tos2priority(val); 621 sk_dst_reset(sk); 622 } 623 break; 624 case IP_TTL: 625 if (optlen < 1) 626 goto e_inval; 627 if (val != -1 && (val < 1 || val > 255)) 628 goto e_inval; 629 inet->uc_ttl = val; 630 break; 631 case IP_HDRINCL: 632 if (sk->sk_type != SOCK_RAW) { 633 err = -ENOPROTOOPT; 634 break; 635 } 636 inet->hdrincl = val ? 1 : 0; 637 break; 638 case IP_NODEFRAG: 639 if (sk->sk_type != SOCK_RAW) { 640 err = -ENOPROTOOPT; 641 break; 642 } 643 inet->nodefrag = val ? 1 : 0; 644 break; 645 case IP_MTU_DISCOVER: 646 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) 647 goto e_inval; 648 inet->pmtudisc = val; 649 break; 650 case IP_RECVERR: 651 inet->recverr = !!val; 652 if (!val) 653 skb_queue_purge(&sk->sk_error_queue); 654 break; 655 case IP_MULTICAST_TTL: 656 if (sk->sk_type == SOCK_STREAM) 657 goto e_inval; 658 if (optlen < 1) 659 goto e_inval; 660 if (val == -1) 661 val = 1; 662 if (val < 0 || val > 255) 663 goto e_inval; 664 inet->mc_ttl = val; 665 break; 666 case IP_MULTICAST_LOOP: 667 if (optlen < 1) 668 goto e_inval; 669 inet->mc_loop = !!val; 670 break; 671 case IP_UNICAST_IF: 672 { 673 struct net_device *dev = NULL; 674 int ifindex; 675 676 if (optlen != sizeof(int)) 677 goto e_inval; 678 679 ifindex = (__force int)ntohl((__force __be32)val); 680 if (ifindex == 0) { 681 inet->uc_index = 0; 682 err = 0; 683 break; 684 } 685 686 dev = dev_get_by_index(sock_net(sk), ifindex); 687 err = -EADDRNOTAVAIL; 688 if (!dev) 689 break; 690 dev_put(dev); 691 692 err = -EINVAL; 693 if (sk->sk_bound_dev_if) 694 break; 695 696 inet->uc_index = ifindex; 697 err = 0; 698 break; 699 } 700 case IP_MULTICAST_IF: 701 { 702 struct ip_mreqn mreq; 703 struct net_device *dev = NULL; 704 705 if (sk->sk_type == SOCK_STREAM) 706 goto e_inval; 707 /* 708 * Check the arguments are allowable 709 */ 710 711 if (optlen < sizeof(struct in_addr)) 712 goto e_inval; 713 714 err = -EFAULT; 715 if (optlen >= sizeof(struct ip_mreqn)) { 716 if (copy_from_user(&mreq, optval, sizeof(mreq))) 717 break; 718 } else { 719 memset(&mreq, 0, sizeof(mreq)); 720 if (optlen >= sizeof(struct ip_mreq)) { 721 if (copy_from_user(&mreq, optval, 722 sizeof(struct ip_mreq))) 723 break; 724 } else if (optlen >= sizeof(struct in_addr)) { 725 if (copy_from_user(&mreq.imr_address, optval, 726 sizeof(struct in_addr))) 727 break; 728 } 729 } 730 731 if (!mreq.imr_ifindex) { 732 if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) { 733 inet->mc_index = 0; 734 inet->mc_addr = 0; 735 err = 0; 736 break; 737 } 738 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); 739 if (dev) 740 mreq.imr_ifindex = dev->ifindex; 741 } else 742 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 743 744 745 err = -EADDRNOTAVAIL; 746 if (!dev) 747 break; 748 dev_put(dev); 749 750 err = -EINVAL; 751 if (sk->sk_bound_dev_if && 752 mreq.imr_ifindex != sk->sk_bound_dev_if) 753 break; 754 755 inet->mc_index = mreq.imr_ifindex; 756 inet->mc_addr = mreq.imr_address.s_addr; 757 err = 0; 758 break; 759 } 760 761 case IP_ADD_MEMBERSHIP: 762 case IP_DROP_MEMBERSHIP: 763 { 764 struct ip_mreqn mreq; 765 766 err = -EPROTO; 767 if (inet_sk(sk)->is_icsk) 768 break; 769 770 if (optlen < sizeof(struct ip_mreq)) 771 goto e_inval; 772 err = -EFAULT; 773 if (optlen >= sizeof(struct ip_mreqn)) { 774 if (copy_from_user(&mreq, optval, sizeof(mreq))) 775 break; 776 } else { 777 memset(&mreq, 0, sizeof(mreq)); 778 if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) 779 break; 780 } 781 782 if (optname == IP_ADD_MEMBERSHIP) 783 err = ip_mc_join_group(sk, &mreq); 784 else 785 err = ip_mc_leave_group(sk, &mreq); 786 break; 787 } 788 case IP_MSFILTER: 789 { 790 struct ip_msfilter *msf; 791 792 if (optlen < IP_MSFILTER_SIZE(0)) 793 goto e_inval; 794 if (optlen > sysctl_optmem_max) { 795 err = -ENOBUFS; 796 break; 797 } 798 msf = kmalloc(optlen, GFP_KERNEL); 799 if (!msf) { 800 err = -ENOBUFS; 801 break; 802 } 803 err = -EFAULT; 804 if (copy_from_user(msf, optval, optlen)) { 805 kfree(msf); 806 break; 807 } 808 /* numsrc >= (1G-4) overflow in 32 bits */ 809 if (msf->imsf_numsrc >= 0x3ffffffcU || 810 msf->imsf_numsrc > sysctl_igmp_max_msf) { 811 kfree(msf); 812 err = -ENOBUFS; 813 break; 814 } 815 if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) { 816 kfree(msf); 817 err = -EINVAL; 818 break; 819 } 820 err = ip_mc_msfilter(sk, msf, 0); 821 kfree(msf); 822 break; 823 } 824 case IP_BLOCK_SOURCE: 825 case IP_UNBLOCK_SOURCE: 826 case IP_ADD_SOURCE_MEMBERSHIP: 827 case IP_DROP_SOURCE_MEMBERSHIP: 828 { 829 struct ip_mreq_source mreqs; 830 int omode, add; 831 832 if (optlen != sizeof(struct ip_mreq_source)) 833 goto e_inval; 834 if (copy_from_user(&mreqs, optval, sizeof(mreqs))) { 835 err = -EFAULT; 836 break; 837 } 838 if (optname == IP_BLOCK_SOURCE) { 839 omode = MCAST_EXCLUDE; 840 add = 1; 841 } else if (optname == IP_UNBLOCK_SOURCE) { 842 omode = MCAST_EXCLUDE; 843 add = 0; 844 } else if (optname == IP_ADD_SOURCE_MEMBERSHIP) { 845 struct ip_mreqn mreq; 846 847 mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr; 848 mreq.imr_address.s_addr = mreqs.imr_interface; 849 mreq.imr_ifindex = 0; 850 err = ip_mc_join_group(sk, &mreq); 851 if (err && err != -EADDRINUSE) 852 break; 853 omode = MCAST_INCLUDE; 854 add = 1; 855 } else /* IP_DROP_SOURCE_MEMBERSHIP */ { 856 omode = MCAST_INCLUDE; 857 add = 0; 858 } 859 err = ip_mc_source(add, omode, sk, &mreqs, 0); 860 break; 861 } 862 case MCAST_JOIN_GROUP: 863 case MCAST_LEAVE_GROUP: 864 { 865 struct group_req greq; 866 struct sockaddr_in *psin; 867 struct ip_mreqn mreq; 868 869 if (optlen < sizeof(struct group_req)) 870 goto e_inval; 871 err = -EFAULT; 872 if (copy_from_user(&greq, optval, sizeof(greq))) 873 break; 874 psin = (struct sockaddr_in *)&greq.gr_group; 875 if (psin->sin_family != AF_INET) 876 goto e_inval; 877 memset(&mreq, 0, sizeof(mreq)); 878 mreq.imr_multiaddr = psin->sin_addr; 879 mreq.imr_ifindex = greq.gr_interface; 880 881 if (optname == MCAST_JOIN_GROUP) 882 err = ip_mc_join_group(sk, &mreq); 883 else 884 err = ip_mc_leave_group(sk, &mreq); 885 break; 886 } 887 case MCAST_JOIN_SOURCE_GROUP: 888 case MCAST_LEAVE_SOURCE_GROUP: 889 case MCAST_BLOCK_SOURCE: 890 case MCAST_UNBLOCK_SOURCE: 891 { 892 struct group_source_req greqs; 893 struct ip_mreq_source mreqs; 894 struct sockaddr_in *psin; 895 int omode, add; 896 897 if (optlen != sizeof(struct group_source_req)) 898 goto e_inval; 899 if (copy_from_user(&greqs, optval, sizeof(greqs))) { 900 err = -EFAULT; 901 break; 902 } 903 if (greqs.gsr_group.ss_family != AF_INET || 904 greqs.gsr_source.ss_family != AF_INET) { 905 err = -EADDRNOTAVAIL; 906 break; 907 } 908 psin = (struct sockaddr_in *)&greqs.gsr_group; 909 mreqs.imr_multiaddr = psin->sin_addr.s_addr; 910 psin = (struct sockaddr_in *)&greqs.gsr_source; 911 mreqs.imr_sourceaddr = psin->sin_addr.s_addr; 912 mreqs.imr_interface = 0; /* use index for mc_source */ 913 914 if (optname == MCAST_BLOCK_SOURCE) { 915 omode = MCAST_EXCLUDE; 916 add = 1; 917 } else if (optname == MCAST_UNBLOCK_SOURCE) { 918 omode = MCAST_EXCLUDE; 919 add = 0; 920 } else if (optname == MCAST_JOIN_SOURCE_GROUP) { 921 struct ip_mreqn mreq; 922 923 psin = (struct sockaddr_in *)&greqs.gsr_group; 924 mreq.imr_multiaddr = psin->sin_addr; 925 mreq.imr_address.s_addr = 0; 926 mreq.imr_ifindex = greqs.gsr_interface; 927 err = ip_mc_join_group(sk, &mreq); 928 if (err && err != -EADDRINUSE) 929 break; 930 greqs.gsr_interface = mreq.imr_ifindex; 931 omode = MCAST_INCLUDE; 932 add = 1; 933 } else /* MCAST_LEAVE_SOURCE_GROUP */ { 934 omode = MCAST_INCLUDE; 935 add = 0; 936 } 937 err = ip_mc_source(add, omode, sk, &mreqs, 938 greqs.gsr_interface); 939 break; 940 } 941 case MCAST_MSFILTER: 942 { 943 struct sockaddr_in *psin; 944 struct ip_msfilter *msf = NULL; 945 struct group_filter *gsf = NULL; 946 int msize, i, ifindex; 947 948 if (optlen < GROUP_FILTER_SIZE(0)) 949 goto e_inval; 950 if (optlen > sysctl_optmem_max) { 951 err = -ENOBUFS; 952 break; 953 } 954 gsf = kmalloc(optlen, GFP_KERNEL); 955 if (!gsf) { 956 err = -ENOBUFS; 957 break; 958 } 959 err = -EFAULT; 960 if (copy_from_user(gsf, optval, optlen)) 961 goto mc_msf_out; 962 963 /* numsrc >= (4G-140)/128 overflow in 32 bits */ 964 if (gsf->gf_numsrc >= 0x1ffffff || 965 gsf->gf_numsrc > sysctl_igmp_max_msf) { 966 err = -ENOBUFS; 967 goto mc_msf_out; 968 } 969 if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { 970 err = -EINVAL; 971 goto mc_msf_out; 972 } 973 msize = IP_MSFILTER_SIZE(gsf->gf_numsrc); 974 msf = kmalloc(msize, GFP_KERNEL); 975 if (!msf) { 976 err = -ENOBUFS; 977 goto mc_msf_out; 978 } 979 ifindex = gsf->gf_interface; 980 psin = (struct sockaddr_in *)&gsf->gf_group; 981 if (psin->sin_family != AF_INET) { 982 err = -EADDRNOTAVAIL; 983 goto mc_msf_out; 984 } 985 msf->imsf_multiaddr = psin->sin_addr.s_addr; 986 msf->imsf_interface = 0; 987 msf->imsf_fmode = gsf->gf_fmode; 988 msf->imsf_numsrc = gsf->gf_numsrc; 989 err = -EADDRNOTAVAIL; 990 for (i = 0; i < gsf->gf_numsrc; ++i) { 991 psin = (struct sockaddr_in *)&gsf->gf_slist[i]; 992 993 if (psin->sin_family != AF_INET) 994 goto mc_msf_out; 995 msf->imsf_slist[i] = psin->sin_addr.s_addr; 996 } 997 kfree(gsf); 998 gsf = NULL; 999 1000 err = ip_mc_msfilter(sk, msf, ifindex); 1001 mc_msf_out: 1002 kfree(msf); 1003 kfree(gsf); 1004 break; 1005 } 1006 case IP_MULTICAST_ALL: 1007 if (optlen < 1) 1008 goto e_inval; 1009 if (val != 0 && val != 1) 1010 goto e_inval; 1011 inet->mc_all = val; 1012 break; 1013 case IP_ROUTER_ALERT: 1014 err = ip_ra_control(sk, val ? 1 : 0, NULL); 1015 break; 1016 1017 case IP_FREEBIND: 1018 if (optlen < 1) 1019 goto e_inval; 1020 inet->freebind = !!val; 1021 break; 1022 1023 case IP_IPSEC_POLICY: 1024 case IP_XFRM_POLICY: 1025 err = -EPERM; 1026 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1027 break; 1028 err = xfrm_user_policy(sk, optname, optval, optlen); 1029 break; 1030 1031 case IP_TRANSPARENT: 1032 if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1033 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1034 err = -EPERM; 1035 break; 1036 } 1037 if (optlen < 1) 1038 goto e_inval; 1039 inet->transparent = !!val; 1040 break; 1041 1042 case IP_MINTTL: 1043 if (optlen < 1) 1044 goto e_inval; 1045 if (val < 0 || val > 255) 1046 goto e_inval; 1047 inet->min_ttl = val; 1048 break; 1049 1050 default: 1051 err = -ENOPROTOOPT; 1052 break; 1053 } 1054 release_sock(sk); 1055 return err; 1056 1057 e_inval: 1058 release_sock(sk); 1059 return -EINVAL; 1060 } 1061 1062 /** 1063 * ipv4_pktinfo_prepare - transfert some info from rtable to skb 1064 * @sk: socket 1065 * @skb: buffer 1066 * 1067 * To support IP_CMSG_PKTINFO option, we store rt_iif and specific 1068 * destination in skb->cb[] before dst drop. 1069 * This way, receiver doesn't make cache line misses to read rtable. 1070 */ 1071 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) 1072 { 1073 struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); 1074 bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || 1075 ipv6_sk_rxinfo(sk); 1076 1077 if (prepare && skb_rtable(skb)) { 1078 pktinfo->ipi_ifindex = inet_iif(skb); 1079 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); 1080 } else { 1081 pktinfo->ipi_ifindex = 0; 1082 pktinfo->ipi_spec_dst.s_addr = 0; 1083 } 1084 skb_dst_drop(skb); 1085 } 1086 1087 int ip_setsockopt(struct sock *sk, int level, 1088 int optname, char __user *optval, unsigned int optlen) 1089 { 1090 int err; 1091 1092 if (level != SOL_IP) 1093 return -ENOPROTOOPT; 1094 1095 err = do_ip_setsockopt(sk, level, optname, optval, optlen); 1096 #ifdef CONFIG_NETFILTER 1097 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1098 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 1099 optname != IP_IPSEC_POLICY && 1100 optname != IP_XFRM_POLICY && 1101 !ip_mroute_opt(optname)) { 1102 lock_sock(sk); 1103 err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); 1104 release_sock(sk); 1105 } 1106 #endif 1107 return err; 1108 } 1109 EXPORT_SYMBOL(ip_setsockopt); 1110 1111 #ifdef CONFIG_COMPAT 1112 int compat_ip_setsockopt(struct sock *sk, int level, int optname, 1113 char __user *optval, unsigned int optlen) 1114 { 1115 int err; 1116 1117 if (level != SOL_IP) 1118 return -ENOPROTOOPT; 1119 1120 if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER) 1121 return compat_mc_setsockopt(sk, level, optname, optval, optlen, 1122 ip_setsockopt); 1123 1124 err = do_ip_setsockopt(sk, level, optname, optval, optlen); 1125 #ifdef CONFIG_NETFILTER 1126 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1127 if (err == -ENOPROTOOPT && optname != IP_HDRINCL && 1128 optname != IP_IPSEC_POLICY && 1129 optname != IP_XFRM_POLICY && 1130 !ip_mroute_opt(optname)) { 1131 lock_sock(sk); 1132 err = compat_nf_setsockopt(sk, PF_INET, optname, 1133 optval, optlen); 1134 release_sock(sk); 1135 } 1136 #endif 1137 return err; 1138 } 1139 EXPORT_SYMBOL(compat_ip_setsockopt); 1140 #endif 1141 1142 /* 1143 * Get the options. Note for future reference. The GET of IP options gets 1144 * the _received_ ones. The set sets the _sent_ ones. 1145 */ 1146 1147 static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1148 char __user *optval, int __user *optlen, unsigned int flags) 1149 { 1150 struct inet_sock *inet = inet_sk(sk); 1151 int val; 1152 int len; 1153 1154 if (level != SOL_IP) 1155 return -EOPNOTSUPP; 1156 1157 if (ip_mroute_opt(optname)) 1158 return ip_mroute_getsockopt(sk, optname, optval, optlen); 1159 1160 if (get_user(len, optlen)) 1161 return -EFAULT; 1162 if (len < 0) 1163 return -EINVAL; 1164 1165 lock_sock(sk); 1166 1167 switch (optname) { 1168 case IP_OPTIONS: 1169 { 1170 unsigned char optbuf[sizeof(struct ip_options)+40]; 1171 struct ip_options *opt = (struct ip_options *)optbuf; 1172 struct ip_options_rcu *inet_opt; 1173 1174 inet_opt = rcu_dereference_protected(inet->inet_opt, 1175 sock_owned_by_user(sk)); 1176 opt->optlen = 0; 1177 if (inet_opt) 1178 memcpy(optbuf, &inet_opt->opt, 1179 sizeof(struct ip_options) + 1180 inet_opt->opt.optlen); 1181 release_sock(sk); 1182 1183 if (opt->optlen == 0) 1184 return put_user(0, optlen); 1185 1186 ip_options_undo(opt); 1187 1188 len = min_t(unsigned int, len, opt->optlen); 1189 if (put_user(len, optlen)) 1190 return -EFAULT; 1191 if (copy_to_user(optval, opt->__data, len)) 1192 return -EFAULT; 1193 return 0; 1194 } 1195 case IP_PKTINFO: 1196 val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0; 1197 break; 1198 case IP_RECVTTL: 1199 val = (inet->cmsg_flags & IP_CMSG_TTL) != 0; 1200 break; 1201 case IP_RECVTOS: 1202 val = (inet->cmsg_flags & IP_CMSG_TOS) != 0; 1203 break; 1204 case IP_RECVOPTS: 1205 val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0; 1206 break; 1207 case IP_RETOPTS: 1208 val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0; 1209 break; 1210 case IP_PASSSEC: 1211 val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0; 1212 break; 1213 case IP_RECVORIGDSTADDR: 1214 val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0; 1215 break; 1216 case IP_TOS: 1217 val = inet->tos; 1218 break; 1219 case IP_TTL: 1220 val = (inet->uc_ttl == -1 ? 1221 sysctl_ip_default_ttl : 1222 inet->uc_ttl); 1223 break; 1224 case IP_HDRINCL: 1225 val = inet->hdrincl; 1226 break; 1227 case IP_NODEFRAG: 1228 val = inet->nodefrag; 1229 break; 1230 case IP_MTU_DISCOVER: 1231 val = inet->pmtudisc; 1232 break; 1233 case IP_MTU: 1234 { 1235 struct dst_entry *dst; 1236 val = 0; 1237 dst = sk_dst_get(sk); 1238 if (dst) { 1239 val = dst_mtu(dst); 1240 dst_release(dst); 1241 } 1242 if (!val) { 1243 release_sock(sk); 1244 return -ENOTCONN; 1245 } 1246 break; 1247 } 1248 case IP_RECVERR: 1249 val = inet->recverr; 1250 break; 1251 case IP_MULTICAST_TTL: 1252 val = inet->mc_ttl; 1253 break; 1254 case IP_MULTICAST_LOOP: 1255 val = inet->mc_loop; 1256 break; 1257 case IP_UNICAST_IF: 1258 val = (__force int)htonl((__u32) inet->uc_index); 1259 break; 1260 case IP_MULTICAST_IF: 1261 { 1262 struct in_addr addr; 1263 len = min_t(unsigned int, len, sizeof(struct in_addr)); 1264 addr.s_addr = inet->mc_addr; 1265 release_sock(sk); 1266 1267 if (put_user(len, optlen)) 1268 return -EFAULT; 1269 if (copy_to_user(optval, &addr, len)) 1270 return -EFAULT; 1271 return 0; 1272 } 1273 case IP_MSFILTER: 1274 { 1275 struct ip_msfilter msf; 1276 int err; 1277 1278 if (len < IP_MSFILTER_SIZE(0)) { 1279 release_sock(sk); 1280 return -EINVAL; 1281 } 1282 if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { 1283 release_sock(sk); 1284 return -EFAULT; 1285 } 1286 err = ip_mc_msfget(sk, &msf, 1287 (struct ip_msfilter __user *)optval, optlen); 1288 release_sock(sk); 1289 return err; 1290 } 1291 case MCAST_MSFILTER: 1292 { 1293 struct group_filter gsf; 1294 int err; 1295 1296 if (len < GROUP_FILTER_SIZE(0)) { 1297 release_sock(sk); 1298 return -EINVAL; 1299 } 1300 if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) { 1301 release_sock(sk); 1302 return -EFAULT; 1303 } 1304 err = ip_mc_gsfget(sk, &gsf, 1305 (struct group_filter __user *)optval, 1306 optlen); 1307 release_sock(sk); 1308 return err; 1309 } 1310 case IP_MULTICAST_ALL: 1311 val = inet->mc_all; 1312 break; 1313 case IP_PKTOPTIONS: 1314 { 1315 struct msghdr msg; 1316 1317 release_sock(sk); 1318 1319 if (sk->sk_type != SOCK_STREAM) 1320 return -ENOPROTOOPT; 1321 1322 msg.msg_control = (__force void *) optval; 1323 msg.msg_controllen = len; 1324 msg.msg_flags = flags; 1325 1326 if (inet->cmsg_flags & IP_CMSG_PKTINFO) { 1327 struct in_pktinfo info; 1328 1329 info.ipi_addr.s_addr = inet->inet_rcv_saddr; 1330 info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr; 1331 info.ipi_ifindex = inet->mc_index; 1332 put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); 1333 } 1334 if (inet->cmsg_flags & IP_CMSG_TTL) { 1335 int hlim = inet->mc_ttl; 1336 put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); 1337 } 1338 if (inet->cmsg_flags & IP_CMSG_TOS) { 1339 int tos = inet->rcv_tos; 1340 put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); 1341 } 1342 len -= msg.msg_controllen; 1343 return put_user(len, optlen); 1344 } 1345 case IP_FREEBIND: 1346 val = inet->freebind; 1347 break; 1348 case IP_TRANSPARENT: 1349 val = inet->transparent; 1350 break; 1351 case IP_MINTTL: 1352 val = inet->min_ttl; 1353 break; 1354 default: 1355 release_sock(sk); 1356 return -ENOPROTOOPT; 1357 } 1358 release_sock(sk); 1359 1360 if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { 1361 unsigned char ucval = (unsigned char)val; 1362 len = 1; 1363 if (put_user(len, optlen)) 1364 return -EFAULT; 1365 if (copy_to_user(optval, &ucval, 1)) 1366 return -EFAULT; 1367 } else { 1368 len = min_t(unsigned int, sizeof(int), len); 1369 if (put_user(len, optlen)) 1370 return -EFAULT; 1371 if (copy_to_user(optval, &val, len)) 1372 return -EFAULT; 1373 } 1374 return 0; 1375 } 1376 1377 int ip_getsockopt(struct sock *sk, int level, 1378 int optname, char __user *optval, int __user *optlen) 1379 { 1380 int err; 1381 1382 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); 1383 #ifdef CONFIG_NETFILTER 1384 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1385 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && 1386 !ip_mroute_opt(optname)) { 1387 int len; 1388 1389 if (get_user(len, optlen)) 1390 return -EFAULT; 1391 1392 lock_sock(sk); 1393 err = nf_getsockopt(sk, PF_INET, optname, optval, 1394 &len); 1395 release_sock(sk); 1396 if (err >= 0) 1397 err = put_user(len, optlen); 1398 return err; 1399 } 1400 #endif 1401 return err; 1402 } 1403 EXPORT_SYMBOL(ip_getsockopt); 1404 1405 #ifdef CONFIG_COMPAT 1406 int compat_ip_getsockopt(struct sock *sk, int level, int optname, 1407 char __user *optval, int __user *optlen) 1408 { 1409 int err; 1410 1411 if (optname == MCAST_MSFILTER) 1412 return compat_mc_getsockopt(sk, level, optname, optval, optlen, 1413 ip_getsockopt); 1414 1415 err = do_ip_getsockopt(sk, level, optname, optval, optlen, 1416 MSG_CMSG_COMPAT); 1417 1418 #ifdef CONFIG_NETFILTER 1419 /* we need to exclude all possible ENOPROTOOPTs except default case */ 1420 if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && 1421 !ip_mroute_opt(optname)) { 1422 int len; 1423 1424 if (get_user(len, optlen)) 1425 return -EFAULT; 1426 1427 lock_sock(sk); 1428 err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); 1429 release_sock(sk); 1430 if (err >= 0) 1431 err = put_user(len, optlen); 1432 return err; 1433 } 1434 #endif 1435 return err; 1436 } 1437 EXPORT_SYMBOL(compat_ip_getsockopt); 1438 #endif 1439