1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * "Ping" sockets 8 * 9 * Based on ipv4/udp.c code. 10 * 11 * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6), 12 * Pavel Kankovsky (for Linux 2.4.32) 13 * 14 * Pavel gave all rights to bugs to Vasiliy, 15 * none of the bugs are Pavel's now. 16 */ 17 18 #include <linux/uaccess.h> 19 #include <linux/types.h> 20 #include <linux/fcntl.h> 21 #include <linux/socket.h> 22 #include <linux/sockios.h> 23 #include <linux/in.h> 24 #include <linux/errno.h> 25 #include <linux/timer.h> 26 #include <linux/mm.h> 27 #include <linux/inet.h> 28 #include <linux/netdevice.h> 29 #include <net/snmp.h> 30 #include <net/ip.h> 31 #include <net/icmp.h> 32 #include <net/protocol.h> 33 #include <linux/skbuff.h> 34 #include <linux/proc_fs.h> 35 #include <linux/export.h> 36 #include <linux/bpf-cgroup.h> 37 #include <net/sock.h> 38 #include <net/ping.h> 39 #include <net/udp.h> 40 #include <net/route.h> 41 #include <net/inet_common.h> 42 #include <net/checksum.h> 43 44 #if IS_ENABLED(CONFIG_IPV6) 45 #include <linux/in6.h> 46 #include <linux/icmpv6.h> 47 #include <net/addrconf.h> 48 #include <net/ipv6.h> 49 #include <net/transp_v6.h> 50 #endif 51 52 struct ping_table { 53 struct hlist_head hash[PING_HTABLE_SIZE]; 54 spinlock_t lock; 55 }; 56 57 static struct ping_table ping_table; 58 struct pingv6_ops pingv6_ops; 59 EXPORT_SYMBOL_GPL(pingv6_ops); 60 61 static u16 ping_port_rover; 62 63 static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask) 64 { 65 u32 res = (num + net_hash_mix(net)) & mask; 66 67 pr_debug("hash(%u) = %u\n", num, res); 68 return res; 69 } 70 EXPORT_SYMBOL_GPL(ping_hash); 71 72 static inline struct hlist_head *ping_hashslot(struct ping_table *table, 73 struct net *net, unsigned int num) 74 { 75 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; 76 } 77 78 int ping_get_port(struct sock *sk, unsigned short ident) 79 { 80 struct inet_sock *isk, *isk2; 81 struct hlist_head *hlist; 82 struct sock *sk2 = NULL; 83 84 isk = inet_sk(sk); 85 spin_lock(&ping_table.lock); 86 if (ident == 0) { 87 u32 i; 88 u16 result = ping_port_rover + 1; 89 90 for (i = 0; i < (1L << 16); i++, result++) { 91 if (!result) 92 result++; /* avoid zero */ 93 hlist = ping_hashslot(&ping_table, sock_net(sk), 94 result); 95 sk_for_each(sk2, hlist) { 96 isk2 = inet_sk(sk2); 97 98 if (isk2->inet_num == result) 99 goto next_port; 100 } 101 102 /* found */ 103 ping_port_rover = ident = result; 104 break; 105 next_port: 106 ; 107 } 108 if (i >= (1L << 16)) 109 goto fail; 110 } else { 111 hlist = ping_hashslot(&ping_table, sock_net(sk), ident); 112 sk_for_each(sk2, hlist) { 113 isk2 = inet_sk(sk2); 114 115 /* BUG? Why is this reuse and not reuseaddr? ping.c 116 * doesn't turn off SO_REUSEADDR, and it doesn't expect 117 * that other ping processes can steal its packets. 118 */ 119 if ((isk2->inet_num == ident) && 120 (sk2 != sk) && 121 (!sk2->sk_reuse || !sk->sk_reuse)) 122 goto fail; 123 } 124 } 125 126 pr_debug("found port/ident = %d\n", ident); 127 isk->inet_num = ident; 128 if (sk_unhashed(sk)) { 129 pr_debug("was not hashed\n"); 130 sk_add_node_rcu(sk, hlist); 131 sock_set_flag(sk, SOCK_RCU_FREE); 132 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 133 } 134 spin_unlock(&ping_table.lock); 135 return 0; 136 137 fail: 138 spin_unlock(&ping_table.lock); 139 return -EADDRINUSE; 140 } 141 EXPORT_SYMBOL_GPL(ping_get_port); 142 143 int ping_hash(struct sock *sk) 144 { 145 pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); 146 BUG(); /* "Please do not press this button again." */ 147 148 return 0; 149 } 150 151 void ping_unhash(struct sock *sk) 152 { 153 struct inet_sock *isk = inet_sk(sk); 154 155 pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); 156 spin_lock(&ping_table.lock); 157 if (sk_del_node_init_rcu(sk)) { 158 isk->inet_num = 0; 159 isk->inet_sport = 0; 160 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 161 } 162 spin_unlock(&ping_table.lock); 163 } 164 EXPORT_SYMBOL_GPL(ping_unhash); 165 166 /* Called under rcu_read_lock() */ 167 static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) 168 { 169 struct hlist_head *hslot = ping_hashslot(&ping_table, net, ident); 170 struct sock *sk = NULL; 171 struct inet_sock *isk; 172 int dif, sdif; 173 174 if (skb->protocol == htons(ETH_P_IP)) { 175 dif = inet_iif(skb); 176 sdif = inet_sdif(skb); 177 pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", 178 (int)ident, &ip_hdr(skb)->daddr, dif); 179 #if IS_ENABLED(CONFIG_IPV6) 180 } else if (skb->protocol == htons(ETH_P_IPV6)) { 181 dif = inet6_iif(skb); 182 sdif = inet6_sdif(skb); 183 pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", 184 (int)ident, &ipv6_hdr(skb)->daddr, dif); 185 #endif 186 } else { 187 return NULL; 188 } 189 190 sk_for_each_rcu(sk, hslot) { 191 isk = inet_sk(sk); 192 193 pr_debug("iterate\n"); 194 if (isk->inet_num != ident) 195 continue; 196 197 if (skb->protocol == htons(ETH_P_IP) && 198 sk->sk_family == AF_INET) { 199 pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, 200 (int) isk->inet_num, &isk->inet_rcv_saddr, 201 sk->sk_bound_dev_if); 202 203 if (isk->inet_rcv_saddr && 204 isk->inet_rcv_saddr != ip_hdr(skb)->daddr) 205 continue; 206 #if IS_ENABLED(CONFIG_IPV6) 207 } else if (skb->protocol == htons(ETH_P_IPV6) && 208 sk->sk_family == AF_INET6) { 209 210 pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, 211 (int) isk->inet_num, 212 &sk->sk_v6_rcv_saddr, 213 sk->sk_bound_dev_if); 214 215 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && 216 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, 217 &ipv6_hdr(skb)->daddr)) 218 continue; 219 #endif 220 } else { 221 continue; 222 } 223 224 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && 225 sk->sk_bound_dev_if != sdif) 226 continue; 227 228 goto exit; 229 } 230 231 sk = NULL; 232 exit: 233 234 return sk; 235 } 236 237 static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, 238 kgid_t *high) 239 { 240 kgid_t *data = net->ipv4.ping_group_range.range; 241 unsigned int seq; 242 243 do { 244 seq = read_seqbegin(&net->ipv4.ping_group_range.lock); 245 246 *low = data[0]; 247 *high = data[1]; 248 } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq)); 249 } 250 251 252 int ping_init_sock(struct sock *sk) 253 { 254 struct net *net = sock_net(sk); 255 kgid_t group = current_egid(); 256 struct group_info *group_info; 257 int i; 258 kgid_t low, high; 259 int ret = 0; 260 261 if (sk->sk_family == AF_INET6) 262 sk->sk_ipv6only = 1; 263 264 inet_get_ping_group_range_net(net, &low, &high); 265 if (gid_lte(low, group) && gid_lte(group, high)) 266 return 0; 267 268 group_info = get_current_groups(); 269 for (i = 0; i < group_info->ngroups; i++) { 270 kgid_t gid = group_info->gid[i]; 271 272 if (gid_lte(low, gid) && gid_lte(gid, high)) 273 goto out_release_group; 274 } 275 276 ret = -EACCES; 277 278 out_release_group: 279 put_group_info(group_info); 280 return ret; 281 } 282 EXPORT_SYMBOL_GPL(ping_init_sock); 283 284 void ping_close(struct sock *sk, long timeout) 285 { 286 pr_debug("ping_close(sk=%p,sk->num=%u)\n", 287 inet_sk(sk), inet_sk(sk)->inet_num); 288 pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt)); 289 290 sk_common_release(sk); 291 } 292 EXPORT_SYMBOL_GPL(ping_close); 293 294 static int ping_pre_connect(struct sock *sk, struct sockaddr *uaddr, 295 int addr_len) 296 { 297 /* This check is replicated from __ip4_datagram_connect() and 298 * intended to prevent BPF program called below from accessing bytes 299 * that are out of the bound specified by user in addr_len. 300 */ 301 if (addr_len < sizeof(struct sockaddr_in)) 302 return -EINVAL; 303 304 return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len); 305 } 306 307 /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ 308 static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, 309 struct sockaddr *uaddr, int addr_len) 310 { 311 struct net *net = sock_net(sk); 312 if (sk->sk_family == AF_INET) { 313 struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; 314 u32 tb_id = RT_TABLE_LOCAL; 315 int chk_addr_ret; 316 317 if (addr_len < sizeof(*addr)) 318 return -EINVAL; 319 320 if (addr->sin_family != AF_INET && 321 !(addr->sin_family == AF_UNSPEC && 322 addr->sin_addr.s_addr == htonl(INADDR_ANY))) 323 return -EAFNOSUPPORT; 324 325 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", 326 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); 327 328 if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) 329 return 0; 330 331 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; 332 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); 333 334 if (chk_addr_ret == RTN_MULTICAST || 335 chk_addr_ret == RTN_BROADCAST || 336 (chk_addr_ret != RTN_LOCAL && 337 !inet_can_nonlocal_bind(net, isk))) 338 return -EADDRNOTAVAIL; 339 340 #if IS_ENABLED(CONFIG_IPV6) 341 } else if (sk->sk_family == AF_INET6) { 342 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; 343 int addr_type, scoped, has_addr; 344 struct net_device *dev = NULL; 345 346 if (addr_len < sizeof(*addr)) 347 return -EINVAL; 348 349 if (addr->sin6_family != AF_INET6) 350 return -EAFNOSUPPORT; 351 352 pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", 353 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); 354 355 addr_type = ipv6_addr_type(&addr->sin6_addr); 356 scoped = __ipv6_addr_needs_scope_id(addr_type); 357 if ((addr_type != IPV6_ADDR_ANY && 358 !(addr_type & IPV6_ADDR_UNICAST)) || 359 (scoped && !addr->sin6_scope_id)) 360 return -EINVAL; 361 362 rcu_read_lock(); 363 if (addr->sin6_scope_id) { 364 dev = dev_get_by_index_rcu(net, addr->sin6_scope_id); 365 if (!dev) { 366 rcu_read_unlock(); 367 return -ENODEV; 368 } 369 } 370 371 if (!dev && sk->sk_bound_dev_if) { 372 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); 373 if (!dev) { 374 rcu_read_unlock(); 375 return -ENODEV; 376 } 377 } 378 has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, 379 scoped); 380 rcu_read_unlock(); 381 382 if (!(ipv6_can_nonlocal_bind(net, isk) || has_addr || 383 addr_type == IPV6_ADDR_ANY)) 384 return -EADDRNOTAVAIL; 385 386 if (scoped) 387 sk->sk_bound_dev_if = addr->sin6_scope_id; 388 #endif 389 } else { 390 return -EAFNOSUPPORT; 391 } 392 return 0; 393 } 394 395 static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) 396 { 397 if (saddr->sa_family == AF_INET) { 398 struct inet_sock *isk = inet_sk(sk); 399 struct sockaddr_in *addr = (struct sockaddr_in *) saddr; 400 isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; 401 #if IS_ENABLED(CONFIG_IPV6) 402 } else if (saddr->sa_family == AF_INET6) { 403 struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr; 404 struct ipv6_pinfo *np = inet6_sk(sk); 405 sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr; 406 #endif 407 } 408 } 409 410 /* 411 * We need our own bind because there are no privileged id's == local ports. 412 * Moreover, we don't allow binding to multi- and broadcast addresses. 413 */ 414 415 int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 416 { 417 struct inet_sock *isk = inet_sk(sk); 418 unsigned short snum; 419 int err; 420 int dif = sk->sk_bound_dev_if; 421 422 err = ping_check_bind_addr(sk, isk, uaddr, addr_len); 423 if (err) 424 return err; 425 426 lock_sock(sk); 427 428 err = -EINVAL; 429 if (isk->inet_num != 0) 430 goto out; 431 432 err = -EADDRINUSE; 433 snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port); 434 if (ping_get_port(sk, snum) != 0) { 435 /* Restore possibly modified sk->sk_bound_dev_if by ping_check_bind_addr(). */ 436 sk->sk_bound_dev_if = dif; 437 goto out; 438 } 439 ping_set_saddr(sk, uaddr); 440 441 pr_debug("after bind(): num = %hu, dif = %d\n", 442 isk->inet_num, 443 sk->sk_bound_dev_if); 444 445 err = 0; 446 if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) 447 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; 448 #if IS_ENABLED(CONFIG_IPV6) 449 if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr)) 450 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; 451 #endif 452 453 if (snum) 454 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; 455 isk->inet_sport = htons(isk->inet_num); 456 isk->inet_daddr = 0; 457 isk->inet_dport = 0; 458 459 #if IS_ENABLED(CONFIG_IPV6) 460 if (sk->sk_family == AF_INET6) 461 memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 462 #endif 463 464 sk_dst_reset(sk); 465 out: 466 release_sock(sk); 467 pr_debug("ping_v4_bind -> %d\n", err); 468 return err; 469 } 470 EXPORT_SYMBOL_GPL(ping_bind); 471 472 /* 473 * Is this a supported type of ICMP message? 474 */ 475 476 static inline int ping_supported(int family, int type, int code) 477 { 478 return (family == AF_INET && type == ICMP_ECHO && code == 0) || 479 (family == AF_INET && type == ICMP_EXT_ECHO && code == 0) || 480 (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0) || 481 (family == AF_INET6 && type == ICMPV6_EXT_ECHO_REQUEST && code == 0); 482 } 483 484 /* 485 * This routine is called by the ICMP module when it gets some 486 * sort of error condition. 487 */ 488 489 void ping_err(struct sk_buff *skb, int offset, u32 info) 490 { 491 int family; 492 struct icmphdr *icmph; 493 struct inet_sock *inet_sock; 494 int type; 495 int code; 496 struct net *net = dev_net(skb->dev); 497 struct sock *sk; 498 int harderr; 499 int err; 500 501 if (skb->protocol == htons(ETH_P_IP)) { 502 family = AF_INET; 503 type = icmp_hdr(skb)->type; 504 code = icmp_hdr(skb)->code; 505 icmph = (struct icmphdr *)(skb->data + offset); 506 } else if (skb->protocol == htons(ETH_P_IPV6)) { 507 family = AF_INET6; 508 type = icmp6_hdr(skb)->icmp6_type; 509 code = icmp6_hdr(skb)->icmp6_code; 510 icmph = (struct icmphdr *) (skb->data + offset); 511 } else { 512 BUG(); 513 } 514 515 /* We assume the packet has already been checked by icmp_unreach */ 516 517 if (!ping_supported(family, icmph->type, icmph->code)) 518 return; 519 520 pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n", 521 skb->protocol, type, code, ntohs(icmph->un.echo.id), 522 ntohs(icmph->un.echo.sequence)); 523 524 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 525 if (!sk) { 526 pr_debug("no socket, dropping\n"); 527 return; /* No socket for error */ 528 } 529 pr_debug("err on socket %p\n", sk); 530 531 err = 0; 532 harderr = 0; 533 inet_sock = inet_sk(sk); 534 535 if (skb->protocol == htons(ETH_P_IP)) { 536 switch (type) { 537 default: 538 case ICMP_TIME_EXCEEDED: 539 err = EHOSTUNREACH; 540 break; 541 case ICMP_SOURCE_QUENCH: 542 /* This is not a real error but ping wants to see it. 543 * Report it with some fake errno. 544 */ 545 err = EREMOTEIO; 546 break; 547 case ICMP_PARAMETERPROB: 548 err = EPROTO; 549 harderr = 1; 550 break; 551 case ICMP_DEST_UNREACH: 552 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ 553 ipv4_sk_update_pmtu(skb, sk, info); 554 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { 555 err = EMSGSIZE; 556 harderr = 1; 557 break; 558 } 559 goto out; 560 } 561 err = EHOSTUNREACH; 562 if (code <= NR_ICMP_UNREACH) { 563 harderr = icmp_err_convert[code].fatal; 564 err = icmp_err_convert[code].errno; 565 } 566 break; 567 case ICMP_REDIRECT: 568 /* See ICMP_SOURCE_QUENCH */ 569 ipv4_sk_redirect(skb, sk); 570 err = EREMOTEIO; 571 break; 572 } 573 #if IS_ENABLED(CONFIG_IPV6) 574 } else if (skb->protocol == htons(ETH_P_IPV6)) { 575 harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); 576 #endif 577 } 578 579 /* 580 * RFC1122: OK. Passes ICMP errors back to application, as per 581 * 4.1.3.3. 582 */ 583 if ((family == AF_INET && !inet_test_bit(RECVERR, sk)) || 584 (family == AF_INET6 && !inet6_sk(sk)->recverr)) { 585 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 586 goto out; 587 } else { 588 if (family == AF_INET) { 589 ip_icmp_error(sk, skb, err, 0 /* no remote port */, 590 info, (u8 *)icmph); 591 #if IS_ENABLED(CONFIG_IPV6) 592 } else if (family == AF_INET6) { 593 pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, 594 info, (u8 *)icmph); 595 #endif 596 } 597 } 598 sk->sk_err = err; 599 sk_error_report(sk); 600 out: 601 return; 602 } 603 EXPORT_SYMBOL_GPL(ping_err); 604 605 /* 606 * Copy and checksum an ICMP Echo packet from user space into a buffer 607 * starting from the payload. 608 */ 609 610 int ping_getfrag(void *from, char *to, 611 int offset, int fraglen, int odd, struct sk_buff *skb) 612 { 613 struct pingfakehdr *pfh = from; 614 615 if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck, 616 &pfh->msg->msg_iter)) 617 return -EFAULT; 618 619 #if IS_ENABLED(CONFIG_IPV6) 620 /* For IPv6, checksum each skb as we go along, as expected by 621 * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in 622 * wcheck, it will be finalized in ping_v4_push_pending_frames. 623 */ 624 if (pfh->family == AF_INET6) { 625 skb->csum = csum_block_add(skb->csum, pfh->wcheck, odd); 626 skb->ip_summed = CHECKSUM_NONE; 627 pfh->wcheck = 0; 628 } 629 #endif 630 631 return 0; 632 } 633 EXPORT_SYMBOL_GPL(ping_getfrag); 634 635 static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, 636 struct flowi4 *fl4) 637 { 638 struct sk_buff *skb = skb_peek(&sk->sk_write_queue); 639 640 if (!skb) 641 return 0; 642 pfh->wcheck = csum_partial((char *)&pfh->icmph, 643 sizeof(struct icmphdr), pfh->wcheck); 644 pfh->icmph.checksum = csum_fold(pfh->wcheck); 645 memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr)); 646 skb->ip_summed = CHECKSUM_NONE; 647 return ip_push_pending_frames(sk, fl4); 648 } 649 650 int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, 651 void *user_icmph, size_t icmph_len) 652 { 653 u8 type, code; 654 655 if (len > 0xFFFF) 656 return -EMSGSIZE; 657 658 /* Must have at least a full ICMP header. */ 659 if (len < icmph_len) 660 return -EINVAL; 661 662 /* 663 * Check the flags. 664 */ 665 666 /* Mirror BSD error message compatibility */ 667 if (msg->msg_flags & MSG_OOB) 668 return -EOPNOTSUPP; 669 670 /* 671 * Fetch the ICMP header provided by the userland. 672 * iovec is modified! The ICMP header is consumed. 673 */ 674 if (memcpy_from_msg(user_icmph, msg, icmph_len)) 675 return -EFAULT; 676 677 if (family == AF_INET) { 678 type = ((struct icmphdr *) user_icmph)->type; 679 code = ((struct icmphdr *) user_icmph)->code; 680 #if IS_ENABLED(CONFIG_IPV6) 681 } else if (family == AF_INET6) { 682 type = ((struct icmp6hdr *) user_icmph)->icmp6_type; 683 code = ((struct icmp6hdr *) user_icmph)->icmp6_code; 684 #endif 685 } else { 686 BUG(); 687 } 688 689 if (!ping_supported(family, type, code)) 690 return -EINVAL; 691 692 return 0; 693 } 694 EXPORT_SYMBOL_GPL(ping_common_sendmsg); 695 696 static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 697 { 698 struct net *net = sock_net(sk); 699 struct flowi4 fl4; 700 struct inet_sock *inet = inet_sk(sk); 701 struct ipcm_cookie ipc; 702 struct icmphdr user_icmph; 703 struct pingfakehdr pfh; 704 struct rtable *rt = NULL; 705 struct ip_options_data opt_copy; 706 int free = 0; 707 __be32 saddr, daddr, faddr; 708 u8 tos, scope; 709 int err; 710 711 pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); 712 713 err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph, 714 sizeof(user_icmph)); 715 if (err) 716 return err; 717 718 /* 719 * Get and verify the address. 720 */ 721 722 if (msg->msg_name) { 723 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 724 if (msg->msg_namelen < sizeof(*usin)) 725 return -EINVAL; 726 if (usin->sin_family != AF_INET) 727 return -EAFNOSUPPORT; 728 daddr = usin->sin_addr.s_addr; 729 /* no remote port */ 730 } else { 731 if (sk->sk_state != TCP_ESTABLISHED) 732 return -EDESTADDRREQ; 733 daddr = inet->inet_daddr; 734 /* no remote port */ 735 } 736 737 ipcm_init_sk(&ipc, inet); 738 739 if (msg->msg_controllen) { 740 err = ip_cmsg_send(sk, msg, &ipc, false); 741 if (unlikely(err)) { 742 kfree(ipc.opt); 743 return err; 744 } 745 if (ipc.opt) 746 free = 1; 747 } 748 if (!ipc.opt) { 749 struct ip_options_rcu *inet_opt; 750 751 rcu_read_lock(); 752 inet_opt = rcu_dereference(inet->inet_opt); 753 if (inet_opt) { 754 memcpy(&opt_copy, inet_opt, 755 sizeof(*inet_opt) + inet_opt->opt.optlen); 756 ipc.opt = &opt_copy.opt; 757 } 758 rcu_read_unlock(); 759 } 760 761 saddr = ipc.addr; 762 ipc.addr = faddr = daddr; 763 764 if (ipc.opt && ipc.opt->opt.srr) { 765 if (!daddr) { 766 err = -EINVAL; 767 goto out_free; 768 } 769 faddr = ipc.opt->opt.faddr; 770 } 771 tos = get_rttos(&ipc, inet); 772 scope = ip_sendmsg_scope(inet, &ipc, msg); 773 774 if (ipv4_is_multicast(daddr)) { 775 if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) 776 ipc.oif = inet->mc_index; 777 if (!saddr) 778 saddr = inet->mc_addr; 779 } else if (!ipc.oif) 780 ipc.oif = inet->uc_index; 781 782 flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope, 783 sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, 784 saddr, 0, 0, sk->sk_uid); 785 786 fl4.fl4_icmp_type = user_icmph.type; 787 fl4.fl4_icmp_code = user_icmph.code; 788 789 security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); 790 rt = ip_route_output_flow(net, &fl4, sk); 791 if (IS_ERR(rt)) { 792 err = PTR_ERR(rt); 793 rt = NULL; 794 if (err == -ENETUNREACH) 795 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 796 goto out; 797 } 798 799 err = -EACCES; 800 if ((rt->rt_flags & RTCF_BROADCAST) && 801 !sock_flag(sk, SOCK_BROADCAST)) 802 goto out; 803 804 if (msg->msg_flags & MSG_CONFIRM) 805 goto do_confirm; 806 back_from_confirm: 807 808 if (!ipc.addr) 809 ipc.addr = fl4.daddr; 810 811 lock_sock(sk); 812 813 pfh.icmph.type = user_icmph.type; /* already checked */ 814 pfh.icmph.code = user_icmph.code; /* ditto */ 815 pfh.icmph.checksum = 0; 816 pfh.icmph.un.echo.id = inet->inet_sport; 817 pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; 818 pfh.msg = msg; 819 pfh.wcheck = 0; 820 pfh.family = AF_INET; 821 822 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, 823 sizeof(struct icmphdr), &ipc, &rt, 824 msg->msg_flags); 825 if (err) 826 ip_flush_pending_frames(sk); 827 else 828 err = ping_v4_push_pending_frames(sk, &pfh, &fl4); 829 release_sock(sk); 830 831 out: 832 ip_rt_put(rt); 833 out_free: 834 if (free) 835 kfree(ipc.opt); 836 if (!err) { 837 icmp_out_count(sock_net(sk), user_icmph.type); 838 return len; 839 } 840 return err; 841 842 do_confirm: 843 if (msg->msg_flags & MSG_PROBE) 844 dst_confirm_neigh(&rt->dst, &fl4.daddr); 845 if (!(msg->msg_flags & MSG_PROBE) || len) 846 goto back_from_confirm; 847 err = 0; 848 goto out; 849 } 850 851 int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 852 int *addr_len) 853 { 854 struct inet_sock *isk = inet_sk(sk); 855 int family = sk->sk_family; 856 struct sk_buff *skb; 857 int copied, err; 858 859 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); 860 861 err = -EOPNOTSUPP; 862 if (flags & MSG_OOB) 863 goto out; 864 865 if (flags & MSG_ERRQUEUE) 866 return inet_recv_error(sk, msg, len, addr_len); 867 868 skb = skb_recv_datagram(sk, flags, &err); 869 if (!skb) 870 goto out; 871 872 copied = skb->len; 873 if (copied > len) { 874 msg->msg_flags |= MSG_TRUNC; 875 copied = len; 876 } 877 878 /* Don't bother checking the checksum */ 879 err = skb_copy_datagram_msg(skb, 0, msg, copied); 880 if (err) 881 goto done; 882 883 sock_recv_timestamp(msg, sk, skb); 884 885 /* Copy the address and add cmsg data. */ 886 if (family == AF_INET) { 887 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 888 889 if (sin) { 890 sin->sin_family = AF_INET; 891 sin->sin_port = 0 /* skb->h.uh->source */; 892 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 893 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 894 *addr_len = sizeof(*sin); 895 } 896 897 if (inet_cmsg_flags(isk)) 898 ip_cmsg_recv(msg, skb); 899 900 #if IS_ENABLED(CONFIG_IPV6) 901 } else if (family == AF_INET6) { 902 struct ipv6_pinfo *np = inet6_sk(sk); 903 struct ipv6hdr *ip6 = ipv6_hdr(skb); 904 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 905 906 if (sin6) { 907 sin6->sin6_family = AF_INET6; 908 sin6->sin6_port = 0; 909 sin6->sin6_addr = ip6->saddr; 910 sin6->sin6_flowinfo = 0; 911 if (np->sndflow) 912 sin6->sin6_flowinfo = ip6_flowinfo(ip6); 913 sin6->sin6_scope_id = 914 ipv6_iface_scope_id(&sin6->sin6_addr, 915 inet6_iif(skb)); 916 *addr_len = sizeof(*sin6); 917 } 918 919 if (inet6_sk(sk)->rxopt.all) 920 pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb); 921 if (skb->protocol == htons(ETH_P_IPV6) && 922 inet6_sk(sk)->rxopt.all) 923 pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb); 924 else if (skb->protocol == htons(ETH_P_IP) && 925 inet_cmsg_flags(isk)) 926 ip_cmsg_recv(msg, skb); 927 #endif 928 } else { 929 BUG(); 930 } 931 932 err = copied; 933 934 done: 935 skb_free_datagram(sk, skb); 936 out: 937 pr_debug("ping_recvmsg -> %d\n", err); 938 return err; 939 } 940 EXPORT_SYMBOL_GPL(ping_recvmsg); 941 942 static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk, 943 struct sk_buff *skb) 944 { 945 enum skb_drop_reason reason; 946 947 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", 948 inet_sk(sk), inet_sk(sk)->inet_num, skb); 949 if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { 950 kfree_skb_reason(skb, reason); 951 pr_debug("ping_queue_rcv_skb -> failed\n"); 952 return reason; 953 } 954 return SKB_NOT_DROPPED_YET; 955 } 956 957 int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 958 { 959 return __ping_queue_rcv_skb(sk, skb) ? -1 : 0; 960 } 961 EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); 962 963 964 /* 965 * All we need to do is get the socket. 966 */ 967 968 enum skb_drop_reason ping_rcv(struct sk_buff *skb) 969 { 970 enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET; 971 struct sock *sk; 972 struct net *net = dev_net(skb->dev); 973 struct icmphdr *icmph = icmp_hdr(skb); 974 975 /* We assume the packet has already been checked by icmp_rcv */ 976 977 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", 978 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); 979 980 /* Push ICMP header back */ 981 skb_push(skb, skb->data - (u8 *)icmph); 982 983 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); 984 if (sk) { 985 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 986 987 pr_debug("rcv on socket %p\n", sk); 988 if (skb2) 989 reason = __ping_queue_rcv_skb(sk, skb2); 990 else 991 reason = SKB_DROP_REASON_NOMEM; 992 } 993 994 if (reason) 995 pr_debug("no socket, dropping\n"); 996 997 return reason; 998 } 999 EXPORT_SYMBOL_GPL(ping_rcv); 1000 1001 struct proto ping_prot = { 1002 .name = "PING", 1003 .owner = THIS_MODULE, 1004 .init = ping_init_sock, 1005 .close = ping_close, 1006 .pre_connect = ping_pre_connect, 1007 .connect = ip4_datagram_connect, 1008 .disconnect = __udp_disconnect, 1009 .setsockopt = ip_setsockopt, 1010 .getsockopt = ip_getsockopt, 1011 .sendmsg = ping_v4_sendmsg, 1012 .recvmsg = ping_recvmsg, 1013 .bind = ping_bind, 1014 .backlog_rcv = ping_queue_rcv_skb, 1015 .release_cb = ip4_datagram_release_cb, 1016 .hash = ping_hash, 1017 .unhash = ping_unhash, 1018 .get_port = ping_get_port, 1019 .put_port = ping_unhash, 1020 .obj_size = sizeof(struct inet_sock), 1021 }; 1022 EXPORT_SYMBOL(ping_prot); 1023 1024 #ifdef CONFIG_PROC_FS 1025 1026 static struct sock *ping_get_first(struct seq_file *seq, int start) 1027 { 1028 struct sock *sk; 1029 struct ping_iter_state *state = seq->private; 1030 struct net *net = seq_file_net(seq); 1031 1032 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; 1033 ++state->bucket) { 1034 struct hlist_head *hslot; 1035 1036 hslot = &ping_table.hash[state->bucket]; 1037 1038 if (hlist_empty(hslot)) 1039 continue; 1040 1041 sk_for_each(sk, hslot) { 1042 if (net_eq(sock_net(sk), net) && 1043 sk->sk_family == state->family) 1044 goto found; 1045 } 1046 } 1047 sk = NULL; 1048 found: 1049 return sk; 1050 } 1051 1052 static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) 1053 { 1054 struct ping_iter_state *state = seq->private; 1055 struct net *net = seq_file_net(seq); 1056 1057 do { 1058 sk = sk_next(sk); 1059 } while (sk && (!net_eq(sock_net(sk), net))); 1060 1061 if (!sk) 1062 return ping_get_first(seq, state->bucket + 1); 1063 return sk; 1064 } 1065 1066 static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) 1067 { 1068 struct sock *sk = ping_get_first(seq, 0); 1069 1070 if (sk) 1071 while (pos && (sk = ping_get_next(seq, sk)) != NULL) 1072 --pos; 1073 return pos ? NULL : sk; 1074 } 1075 1076 void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family) 1077 __acquires(ping_table.lock) 1078 { 1079 struct ping_iter_state *state = seq->private; 1080 state->bucket = 0; 1081 state->family = family; 1082 1083 spin_lock(&ping_table.lock); 1084 1085 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; 1086 } 1087 EXPORT_SYMBOL_GPL(ping_seq_start); 1088 1089 static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) 1090 { 1091 return ping_seq_start(seq, pos, AF_INET); 1092 } 1093 1094 void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1095 { 1096 struct sock *sk; 1097 1098 if (v == SEQ_START_TOKEN) 1099 sk = ping_get_idx(seq, 0); 1100 else 1101 sk = ping_get_next(seq, v); 1102 1103 ++*pos; 1104 return sk; 1105 } 1106 EXPORT_SYMBOL_GPL(ping_seq_next); 1107 1108 void ping_seq_stop(struct seq_file *seq, void *v) 1109 __releases(ping_table.lock) 1110 { 1111 spin_unlock(&ping_table.lock); 1112 } 1113 EXPORT_SYMBOL_GPL(ping_seq_stop); 1114 1115 static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, 1116 int bucket) 1117 { 1118 struct inet_sock *inet = inet_sk(sp); 1119 __be32 dest = inet->inet_daddr; 1120 __be32 src = inet->inet_rcv_saddr; 1121 __u16 destp = ntohs(inet->inet_dport); 1122 __u16 srcp = ntohs(inet->inet_sport); 1123 1124 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 1125 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 1126 bucket, src, srcp, dest, destp, sp->sk_state, 1127 sk_wmem_alloc_get(sp), 1128 sk_rmem_alloc_get(sp), 1129 0, 0L, 0, 1130 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 1131 0, sock_i_ino(sp), 1132 refcount_read(&sp->sk_refcnt), sp, 1133 atomic_read(&sp->sk_drops)); 1134 } 1135 1136 static int ping_v4_seq_show(struct seq_file *seq, void *v) 1137 { 1138 seq_setwidth(seq, 127); 1139 if (v == SEQ_START_TOKEN) 1140 seq_puts(seq, " sl local_address rem_address st tx_queue " 1141 "rx_queue tr tm->when retrnsmt uid timeout " 1142 "inode ref pointer drops"); 1143 else { 1144 struct ping_iter_state *state = seq->private; 1145 1146 ping_v4_format_sock(v, seq, state->bucket); 1147 } 1148 seq_pad(seq, '\n'); 1149 return 0; 1150 } 1151 1152 static const struct seq_operations ping_v4_seq_ops = { 1153 .start = ping_v4_seq_start, 1154 .show = ping_v4_seq_show, 1155 .next = ping_seq_next, 1156 .stop = ping_seq_stop, 1157 }; 1158 1159 static int __net_init ping_v4_proc_init_net(struct net *net) 1160 { 1161 if (!proc_create_net("icmp", 0444, net->proc_net, &ping_v4_seq_ops, 1162 sizeof(struct ping_iter_state))) 1163 return -ENOMEM; 1164 return 0; 1165 } 1166 1167 static void __net_exit ping_v4_proc_exit_net(struct net *net) 1168 { 1169 remove_proc_entry("icmp", net->proc_net); 1170 } 1171 1172 static struct pernet_operations ping_v4_net_ops = { 1173 .init = ping_v4_proc_init_net, 1174 .exit = ping_v4_proc_exit_net, 1175 }; 1176 1177 int __init ping_proc_init(void) 1178 { 1179 return register_pernet_subsys(&ping_v4_net_ops); 1180 } 1181 1182 void ping_proc_exit(void) 1183 { 1184 unregister_pernet_subsys(&ping_v4_net_ops); 1185 } 1186 1187 #endif 1188 1189 void __init ping_init(void) 1190 { 1191 int i; 1192 1193 for (i = 0; i < PING_HTABLE_SIZE; i++) 1194 INIT_HLIST_HEAD(&ping_table.hash[i]); 1195 spin_lock_init(&ping_table.lock); 1196 } 1197