1 /* 2 * inet_diag.c Module for monitoring INET transport protocols sockets. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/random.h> 17 #include <linux/slab.h> 18 #include <linux/cache.h> 19 #include <linux/init.h> 20 #include <linux/time.h> 21 22 #include <net/icmp.h> 23 #include <net/tcp.h> 24 #include <net/ipv6.h> 25 #include <net/inet_common.h> 26 #include <net/inet_connection_sock.h> 27 #include <net/inet_hashtables.h> 28 #include <net/inet_timewait_sock.h> 29 #include <net/inet6_hashtables.h> 30 #include <net/netlink.h> 31 32 #include <linux/inet.h> 33 #include <linux/stddef.h> 34 35 #include <linux/inet_diag.h> 36 #include <linux/sock_diag.h> 37 38 static const struct inet_diag_handler **inet_diag_table; 39 40 struct inet_diag_entry { 41 __be32 *saddr; 42 __be32 *daddr; 43 u16 sport; 44 u16 dport; 45 u16 family; 46 u16 userlocks; 47 #if IS_ENABLED(CONFIG_IPV6) 48 struct in6_addr saddr_storage; /* for IPv4-mapped-IPv6 addresses */ 49 struct in6_addr daddr_storage; /* for IPv4-mapped-IPv6 addresses */ 50 #endif 51 }; 52 53 static DEFINE_MUTEX(inet_diag_table_mutex); 54 55 static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 56 { 57 if (!inet_diag_table[proto]) 58 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 59 NETLINK_SOCK_DIAG, AF_INET, proto); 60 61 mutex_lock(&inet_diag_table_mutex); 62 if (!inet_diag_table[proto]) 63 return ERR_PTR(-ENOENT); 64 65 return inet_diag_table[proto]; 66 } 67 68 static inline void inet_diag_unlock_handler( 69 const struct inet_diag_handler *handler) 70 { 71 mutex_unlock(&inet_diag_table_mutex); 72 } 73 74 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 75 struct sk_buff *skb, struct inet_diag_req_v2 *req, 76 struct user_namespace *user_ns, 77 u32 portid, u32 seq, u16 nlmsg_flags, 78 const struct nlmsghdr *unlh) 79 { 80 const struct inet_sock *inet = inet_sk(sk); 81 struct inet_diag_msg *r; 82 struct nlmsghdr *nlh; 83 struct nlattr *attr; 84 void *info = NULL; 85 const struct inet_diag_handler *handler; 86 int ext = req->idiag_ext; 87 88 handler = inet_diag_table[req->sdiag_protocol]; 89 BUG_ON(handler == NULL); 90 91 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 92 nlmsg_flags); 93 if (!nlh) 94 return -EMSGSIZE; 95 96 r = nlmsg_data(nlh); 97 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 98 99 r->idiag_family = sk->sk_family; 100 r->idiag_state = sk->sk_state; 101 r->idiag_timer = 0; 102 r->idiag_retrans = 0; 103 104 r->id.idiag_if = sk->sk_bound_dev_if; 105 sock_diag_save_cookie(sk, r->id.idiag_cookie); 106 107 r->id.idiag_sport = inet->inet_sport; 108 r->id.idiag_dport = inet->inet_dport; 109 110 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 111 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 112 113 r->id.idiag_src[0] = inet->inet_rcv_saddr; 114 r->id.idiag_dst[0] = inet->inet_daddr; 115 116 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) 117 goto errout; 118 119 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, 120 * hence this needs to be included regardless of socket family. 121 */ 122 if (ext & (1 << (INET_DIAG_TOS - 1))) 123 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0) 124 goto errout; 125 126 #if IS_ENABLED(CONFIG_IPV6) 127 if (r->idiag_family == AF_INET6) { 128 129 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr; 130 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr; 131 132 if (ext & (1 << (INET_DIAG_TCLASS - 1))) 133 if (nla_put_u8(skb, INET_DIAG_TCLASS, 134 inet6_sk(sk)->tclass) < 0) 135 goto errout; 136 } 137 #endif 138 139 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 140 r->idiag_inode = sock_i_ino(sk); 141 142 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { 143 struct inet_diag_meminfo minfo = { 144 .idiag_rmem = sk_rmem_alloc_get(sk), 145 .idiag_wmem = sk->sk_wmem_queued, 146 .idiag_fmem = sk->sk_forward_alloc, 147 .idiag_tmem = sk_wmem_alloc_get(sk), 148 }; 149 150 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0) 151 goto errout; 152 } 153 154 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) 155 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) 156 goto errout; 157 158 if (icsk == NULL) { 159 handler->idiag_get_info(sk, r, NULL); 160 goto out; 161 } 162 163 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) 164 165 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 166 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 167 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 168 r->idiag_timer = 1; 169 r->idiag_retrans = icsk->icsk_retransmits; 170 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 171 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 172 r->idiag_timer = 4; 173 r->idiag_retrans = icsk->icsk_probes_out; 174 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 175 } else if (timer_pending(&sk->sk_timer)) { 176 r->idiag_timer = 2; 177 r->idiag_retrans = icsk->icsk_probes_out; 178 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 179 } else { 180 r->idiag_timer = 0; 181 r->idiag_expires = 0; 182 } 183 #undef EXPIRES_IN_MS 184 185 if (ext & (1 << (INET_DIAG_INFO - 1))) { 186 attr = nla_reserve(skb, INET_DIAG_INFO, 187 sizeof(struct tcp_info)); 188 if (!attr) 189 goto errout; 190 191 info = nla_data(attr); 192 } 193 194 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) 195 if (nla_put_string(skb, INET_DIAG_CONG, 196 icsk->icsk_ca_ops->name) < 0) 197 goto errout; 198 199 handler->idiag_get_info(sk, r, info); 200 201 if (sk->sk_state < TCP_TIME_WAIT && 202 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 203 icsk->icsk_ca_ops->get_info(sk, ext, skb); 204 205 out: 206 nlmsg_end(skb, nlh); 207 return 0; 208 209 errout: 210 nlmsg_cancel(skb, nlh); 211 return -EMSGSIZE; 212 } 213 EXPORT_SYMBOL_GPL(inet_sk_diag_fill); 214 215 static int inet_csk_diag_fill(struct sock *sk, 216 struct sk_buff *skb, struct inet_diag_req_v2 *req, 217 struct user_namespace *user_ns, 218 u32 portid, u32 seq, u16 nlmsg_flags, 219 const struct nlmsghdr *unlh) 220 { 221 return inet_sk_diag_fill(sk, inet_csk(sk), 222 skb, req, user_ns, portid, seq, nlmsg_flags, unlh); 223 } 224 225 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 226 struct sk_buff *skb, struct inet_diag_req_v2 *req, 227 u32 portid, u32 seq, u16 nlmsg_flags, 228 const struct nlmsghdr *unlh) 229 { 230 s32 tmo; 231 struct inet_diag_msg *r; 232 struct nlmsghdr *nlh; 233 234 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 235 nlmsg_flags); 236 if (!nlh) 237 return -EMSGSIZE; 238 239 r = nlmsg_data(nlh); 240 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 241 242 tmo = tw->tw_ttd - inet_tw_time_stamp(); 243 if (tmo < 0) 244 tmo = 0; 245 246 r->idiag_family = tw->tw_family; 247 r->idiag_retrans = 0; 248 249 r->id.idiag_if = tw->tw_bound_dev_if; 250 sock_diag_save_cookie(tw, r->id.idiag_cookie); 251 252 r->id.idiag_sport = tw->tw_sport; 253 r->id.idiag_dport = tw->tw_dport; 254 255 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 256 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 257 258 r->id.idiag_src[0] = tw->tw_rcv_saddr; 259 r->id.idiag_dst[0] = tw->tw_daddr; 260 261 r->idiag_state = tw->tw_substate; 262 r->idiag_timer = 3; 263 r->idiag_expires = jiffies_to_msecs(tmo); 264 r->idiag_rqueue = 0; 265 r->idiag_wqueue = 0; 266 r->idiag_uid = 0; 267 r->idiag_inode = 0; 268 #if IS_ENABLED(CONFIG_IPV6) 269 if (tw->tw_family == AF_INET6) { 270 *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr; 271 *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr; 272 } 273 #endif 274 275 nlmsg_end(skb, nlh); 276 return 0; 277 } 278 279 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 280 struct inet_diag_req_v2 *r, 281 struct user_namespace *user_ns, 282 u32 portid, u32 seq, u16 nlmsg_flags, 283 const struct nlmsghdr *unlh) 284 { 285 if (sk->sk_state == TCP_TIME_WAIT) 286 return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq, 287 nlmsg_flags, unlh); 288 289 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, 290 nlmsg_flags, unlh); 291 } 292 293 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, 294 const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req) 295 { 296 int err; 297 struct sock *sk; 298 struct sk_buff *rep; 299 struct net *net = sock_net(in_skb->sk); 300 301 err = -EINVAL; 302 if (req->sdiag_family == AF_INET) { 303 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0], 304 req->id.idiag_dport, req->id.idiag_src[0], 305 req->id.idiag_sport, req->id.idiag_if); 306 } 307 #if IS_ENABLED(CONFIG_IPV6) 308 else if (req->sdiag_family == AF_INET6) { 309 sk = inet6_lookup(net, hashinfo, 310 (struct in6_addr *)req->id.idiag_dst, 311 req->id.idiag_dport, 312 (struct in6_addr *)req->id.idiag_src, 313 req->id.idiag_sport, 314 req->id.idiag_if); 315 } 316 #endif 317 else { 318 goto out_nosk; 319 } 320 321 err = -ENOENT; 322 if (sk == NULL) 323 goto out_nosk; 324 325 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); 326 if (err) 327 goto out; 328 329 rep = nlmsg_new(sizeof(struct inet_diag_msg) + 330 sizeof(struct inet_diag_meminfo) + 331 sizeof(struct tcp_info) + 64, GFP_KERNEL); 332 if (!rep) { 333 err = -ENOMEM; 334 goto out; 335 } 336 337 err = sk_diag_fill(sk, rep, req, 338 sk_user_ns(NETLINK_CB(in_skb).sk), 339 NETLINK_CB(in_skb).portid, 340 nlh->nlmsg_seq, 0, nlh); 341 if (err < 0) { 342 WARN_ON(err == -EMSGSIZE); 343 nlmsg_free(rep); 344 goto out; 345 } 346 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, 347 MSG_DONTWAIT); 348 if (err > 0) 349 err = 0; 350 351 out: 352 if (sk) 353 sock_gen_put(sk); 354 355 out_nosk: 356 return err; 357 } 358 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); 359 360 static int inet_diag_get_exact(struct sk_buff *in_skb, 361 const struct nlmsghdr *nlh, 362 struct inet_diag_req_v2 *req) 363 { 364 const struct inet_diag_handler *handler; 365 int err; 366 367 handler = inet_diag_lock_handler(req->sdiag_protocol); 368 if (IS_ERR(handler)) 369 err = PTR_ERR(handler); 370 else 371 err = handler->dump_one(in_skb, nlh, req); 372 inet_diag_unlock_handler(handler); 373 374 return err; 375 } 376 377 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) 378 { 379 int words = bits >> 5; 380 381 bits &= 0x1f; 382 383 if (words) { 384 if (memcmp(a1, a2, words << 2)) 385 return 0; 386 } 387 if (bits) { 388 __be32 w1, w2; 389 __be32 mask; 390 391 w1 = a1[words]; 392 w2 = a2[words]; 393 394 mask = htonl((0xffffffff) << (32 - bits)); 395 396 if ((w1 ^ w2) & mask) 397 return 0; 398 } 399 400 return 1; 401 } 402 403 404 static int inet_diag_bc_run(const struct nlattr *_bc, 405 const struct inet_diag_entry *entry) 406 { 407 const void *bc = nla_data(_bc); 408 int len = nla_len(_bc); 409 410 while (len > 0) { 411 int yes = 1; 412 const struct inet_diag_bc_op *op = bc; 413 414 switch (op->code) { 415 case INET_DIAG_BC_NOP: 416 break; 417 case INET_DIAG_BC_JMP: 418 yes = 0; 419 break; 420 case INET_DIAG_BC_S_GE: 421 yes = entry->sport >= op[1].no; 422 break; 423 case INET_DIAG_BC_S_LE: 424 yes = entry->sport <= op[1].no; 425 break; 426 case INET_DIAG_BC_D_GE: 427 yes = entry->dport >= op[1].no; 428 break; 429 case INET_DIAG_BC_D_LE: 430 yes = entry->dport <= op[1].no; 431 break; 432 case INET_DIAG_BC_AUTO: 433 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); 434 break; 435 case INET_DIAG_BC_S_COND: 436 case INET_DIAG_BC_D_COND: { 437 struct inet_diag_hostcond *cond; 438 __be32 *addr; 439 440 cond = (struct inet_diag_hostcond *)(op + 1); 441 if (cond->port != -1 && 442 cond->port != (op->code == INET_DIAG_BC_S_COND ? 443 entry->sport : entry->dport)) { 444 yes = 0; 445 break; 446 } 447 448 if (op->code == INET_DIAG_BC_S_COND) 449 addr = entry->saddr; 450 else 451 addr = entry->daddr; 452 453 if (cond->family != AF_UNSPEC && 454 cond->family != entry->family) { 455 if (entry->family == AF_INET6 && 456 cond->family == AF_INET) { 457 if (addr[0] == 0 && addr[1] == 0 && 458 addr[2] == htonl(0xffff) && 459 bitstring_match(addr + 3, 460 cond->addr, 461 cond->prefix_len)) 462 break; 463 } 464 yes = 0; 465 break; 466 } 467 468 if (cond->prefix_len == 0) 469 break; 470 if (bitstring_match(addr, cond->addr, 471 cond->prefix_len)) 472 break; 473 yes = 0; 474 break; 475 } 476 } 477 478 if (yes) { 479 len -= op->yes; 480 bc += op->yes; 481 } else { 482 len -= op->no; 483 bc += op->no; 484 } 485 } 486 return len == 0; 487 } 488 489 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) 490 { 491 struct inet_diag_entry entry; 492 struct inet_sock *inet = inet_sk(sk); 493 494 if (bc == NULL) 495 return 1; 496 497 entry.family = sk->sk_family; 498 #if IS_ENABLED(CONFIG_IPV6) 499 if (entry.family == AF_INET6) { 500 501 entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32; 502 entry.daddr = sk->sk_v6_daddr.s6_addr32; 503 } else 504 #endif 505 { 506 entry.saddr = &inet->inet_rcv_saddr; 507 entry.daddr = &inet->inet_daddr; 508 } 509 entry.sport = inet->inet_num; 510 entry.dport = ntohs(inet->inet_dport); 511 entry.userlocks = sk->sk_userlocks; 512 513 return inet_diag_bc_run(bc, &entry); 514 } 515 EXPORT_SYMBOL_GPL(inet_diag_bc_sk); 516 517 static int valid_cc(const void *bc, int len, int cc) 518 { 519 while (len >= 0) { 520 const struct inet_diag_bc_op *op = bc; 521 522 if (cc > len) 523 return 0; 524 if (cc == len) 525 return 1; 526 if (op->yes < 4 || op->yes & 3) 527 return 0; 528 len -= op->yes; 529 bc += op->yes; 530 } 531 return 0; 532 } 533 534 /* Validate an inet_diag_hostcond. */ 535 static bool valid_hostcond(const struct inet_diag_bc_op *op, int len, 536 int *min_len) 537 { 538 int addr_len; 539 struct inet_diag_hostcond *cond; 540 541 /* Check hostcond space. */ 542 *min_len += sizeof(struct inet_diag_hostcond); 543 if (len < *min_len) 544 return false; 545 cond = (struct inet_diag_hostcond *)(op + 1); 546 547 /* Check address family and address length. */ 548 switch (cond->family) { 549 case AF_UNSPEC: 550 addr_len = 0; 551 break; 552 case AF_INET: 553 addr_len = sizeof(struct in_addr); 554 break; 555 case AF_INET6: 556 addr_len = sizeof(struct in6_addr); 557 break; 558 default: 559 return false; 560 } 561 *min_len += addr_len; 562 if (len < *min_len) 563 return false; 564 565 /* Check prefix length (in bits) vs address length (in bytes). */ 566 if (cond->prefix_len > 8 * addr_len) 567 return false; 568 569 return true; 570 } 571 572 /* Validate a port comparison operator. */ 573 static inline bool valid_port_comparison(const struct inet_diag_bc_op *op, 574 int len, int *min_len) 575 { 576 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */ 577 *min_len += sizeof(struct inet_diag_bc_op); 578 if (len < *min_len) 579 return false; 580 return true; 581 } 582 583 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 584 { 585 const void *bc = bytecode; 586 int len = bytecode_len; 587 588 while (len > 0) { 589 const struct inet_diag_bc_op *op = bc; 590 int min_len = sizeof(struct inet_diag_bc_op); 591 592 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 593 switch (op->code) { 594 case INET_DIAG_BC_S_COND: 595 case INET_DIAG_BC_D_COND: 596 if (!valid_hostcond(bc, len, &min_len)) 597 return -EINVAL; 598 break; 599 case INET_DIAG_BC_S_GE: 600 case INET_DIAG_BC_S_LE: 601 case INET_DIAG_BC_D_GE: 602 case INET_DIAG_BC_D_LE: 603 if (!valid_port_comparison(bc, len, &min_len)) 604 return -EINVAL; 605 break; 606 case INET_DIAG_BC_AUTO: 607 case INET_DIAG_BC_JMP: 608 case INET_DIAG_BC_NOP: 609 break; 610 default: 611 return -EINVAL; 612 } 613 614 if (op->code != INET_DIAG_BC_NOP) { 615 if (op->no < min_len || op->no > len + 4 || op->no & 3) 616 return -EINVAL; 617 if (op->no < len && 618 !valid_cc(bytecode, bytecode_len, len - op->no)) 619 return -EINVAL; 620 } 621 622 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3) 623 return -EINVAL; 624 bc += op->yes; 625 len -= op->yes; 626 } 627 return len == 0 ? 0 : -EINVAL; 628 } 629 630 static int inet_csk_diag_dump(struct sock *sk, 631 struct sk_buff *skb, 632 struct netlink_callback *cb, 633 struct inet_diag_req_v2 *r, 634 const struct nlattr *bc) 635 { 636 if (!inet_diag_bc_sk(bc, sk)) 637 return 0; 638 639 return inet_csk_diag_fill(sk, skb, r, 640 sk_user_ns(NETLINK_CB(cb->skb).sk), 641 NETLINK_CB(cb->skb).portid, 642 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 643 } 644 645 static int inet_twsk_diag_dump(struct sock *sk, 646 struct sk_buff *skb, 647 struct netlink_callback *cb, 648 struct inet_diag_req_v2 *r, 649 const struct nlattr *bc) 650 { 651 struct inet_timewait_sock *tw = inet_twsk(sk); 652 653 if (bc != NULL) { 654 struct inet_diag_entry entry; 655 656 entry.family = tw->tw_family; 657 #if IS_ENABLED(CONFIG_IPV6) 658 if (tw->tw_family == AF_INET6) { 659 entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32; 660 entry.daddr = tw->tw_v6_daddr.s6_addr32; 661 } else 662 #endif 663 { 664 entry.saddr = &tw->tw_rcv_saddr; 665 entry.daddr = &tw->tw_daddr; 666 } 667 entry.sport = tw->tw_num; 668 entry.dport = ntohs(tw->tw_dport); 669 entry.userlocks = 0; 670 671 if (!inet_diag_bc_run(bc, &entry)) 672 return 0; 673 } 674 675 return inet_twsk_diag_fill(tw, skb, r, 676 NETLINK_CB(cb->skb).portid, 677 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 678 } 679 680 /* Get the IPv4, IPv6, or IPv4-mapped-IPv6 local and remote addresses 681 * from a request_sock. For IPv4-mapped-IPv6 we must map IPv4 to IPv6. 682 */ 683 static inline void inet_diag_req_addrs(const struct sock *sk, 684 const struct request_sock *req, 685 struct inet_diag_entry *entry) 686 { 687 struct inet_request_sock *ireq = inet_rsk(req); 688 689 #if IS_ENABLED(CONFIG_IPV6) 690 if (sk->sk_family == AF_INET6) { 691 if (req->rsk_ops->family == AF_INET6) { 692 entry->saddr = ireq->ir_v6_loc_addr.s6_addr32; 693 entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32; 694 } else if (req->rsk_ops->family == AF_INET) { 695 ipv6_addr_set_v4mapped(ireq->ir_loc_addr, 696 &entry->saddr_storage); 697 ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, 698 &entry->daddr_storage); 699 entry->saddr = entry->saddr_storage.s6_addr32; 700 entry->daddr = entry->daddr_storage.s6_addr32; 701 } 702 } else 703 #endif 704 { 705 entry->saddr = &ireq->ir_loc_addr; 706 entry->daddr = &ireq->ir_rmt_addr; 707 } 708 } 709 710 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 711 struct request_sock *req, 712 struct user_namespace *user_ns, 713 u32 portid, u32 seq, 714 const struct nlmsghdr *unlh) 715 { 716 const struct inet_request_sock *ireq = inet_rsk(req); 717 struct inet_sock *inet = inet_sk(sk); 718 struct inet_diag_msg *r; 719 struct nlmsghdr *nlh; 720 long tmo; 721 722 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), 723 NLM_F_MULTI); 724 if (!nlh) 725 return -EMSGSIZE; 726 727 r = nlmsg_data(nlh); 728 r->idiag_family = sk->sk_family; 729 r->idiag_state = TCP_SYN_RECV; 730 r->idiag_timer = 1; 731 r->idiag_retrans = req->num_retrans; 732 733 r->id.idiag_if = sk->sk_bound_dev_if; 734 sock_diag_save_cookie(req, r->id.idiag_cookie); 735 736 tmo = req->expires - jiffies; 737 if (tmo < 0) 738 tmo = 0; 739 740 r->id.idiag_sport = inet->inet_sport; 741 r->id.idiag_dport = ireq->ir_rmt_port; 742 743 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); 744 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); 745 746 r->id.idiag_src[0] = ireq->ir_loc_addr; 747 r->id.idiag_dst[0] = ireq->ir_rmt_addr; 748 749 r->idiag_expires = jiffies_to_msecs(tmo); 750 r->idiag_rqueue = 0; 751 r->idiag_wqueue = 0; 752 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); 753 r->idiag_inode = 0; 754 #if IS_ENABLED(CONFIG_IPV6) 755 if (r->idiag_family == AF_INET6) { 756 struct inet_diag_entry entry; 757 inet_diag_req_addrs(sk, req, &entry); 758 memcpy(r->id.idiag_src, entry.saddr, sizeof(struct in6_addr)); 759 memcpy(r->id.idiag_dst, entry.daddr, sizeof(struct in6_addr)); 760 } 761 #endif 762 763 nlmsg_end(skb, nlh); 764 return 0; 765 } 766 767 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 768 struct netlink_callback *cb, 769 struct inet_diag_req_v2 *r, 770 const struct nlattr *bc) 771 { 772 struct inet_diag_entry entry; 773 struct inet_connection_sock *icsk = inet_csk(sk); 774 struct listen_sock *lopt; 775 struct inet_sock *inet = inet_sk(sk); 776 int j, s_j; 777 int reqnum, s_reqnum; 778 int err = 0; 779 780 s_j = cb->args[3]; 781 s_reqnum = cb->args[4]; 782 783 if (s_j > 0) 784 s_j--; 785 786 entry.family = sk->sk_family; 787 788 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 789 790 lopt = icsk->icsk_accept_queue.listen_opt; 791 if (!lopt || !lopt->qlen) 792 goto out; 793 794 if (bc != NULL) { 795 entry.sport = inet->inet_num; 796 entry.userlocks = sk->sk_userlocks; 797 } 798 799 for (j = s_j; j < lopt->nr_table_entries; j++) { 800 struct request_sock *req, *head = lopt->syn_table[j]; 801 802 reqnum = 0; 803 for (req = head; req; reqnum++, req = req->dl_next) { 804 struct inet_request_sock *ireq = inet_rsk(req); 805 806 if (reqnum < s_reqnum) 807 continue; 808 if (r->id.idiag_dport != ireq->ir_rmt_port && 809 r->id.idiag_dport) 810 continue; 811 812 if (bc) { 813 inet_diag_req_addrs(sk, req, &entry); 814 entry.dport = ntohs(ireq->ir_rmt_port); 815 816 if (!inet_diag_bc_run(bc, &entry)) 817 continue; 818 } 819 820 err = inet_diag_fill_req(skb, sk, req, 821 sk_user_ns(NETLINK_CB(cb->skb).sk), 822 NETLINK_CB(cb->skb).portid, 823 cb->nlh->nlmsg_seq, cb->nlh); 824 if (err < 0) { 825 cb->args[3] = j + 1; 826 cb->args[4] = reqnum; 827 goto out; 828 } 829 } 830 831 s_reqnum = 0; 832 } 833 834 out: 835 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 836 837 return err; 838 } 839 840 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, 841 struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc) 842 { 843 int i, num; 844 int s_i, s_num; 845 struct net *net = sock_net(skb->sk); 846 847 s_i = cb->args[1]; 848 s_num = num = cb->args[2]; 849 850 if (cb->args[0] == 0) { 851 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) 852 goto skip_listen_ht; 853 854 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { 855 struct sock *sk; 856 struct hlist_nulls_node *node; 857 struct inet_listen_hashbucket *ilb; 858 859 num = 0; 860 ilb = &hashinfo->listening_hash[i]; 861 spin_lock_bh(&ilb->lock); 862 sk_nulls_for_each(sk, node, &ilb->head) { 863 struct inet_sock *inet = inet_sk(sk); 864 865 if (!net_eq(sock_net(sk), net)) 866 continue; 867 868 if (num < s_num) { 869 num++; 870 continue; 871 } 872 873 if (r->sdiag_family != AF_UNSPEC && 874 sk->sk_family != r->sdiag_family) 875 goto next_listen; 876 877 if (r->id.idiag_sport != inet->inet_sport && 878 r->id.idiag_sport) 879 goto next_listen; 880 881 if (!(r->idiag_states & TCPF_LISTEN) || 882 r->id.idiag_dport || 883 cb->args[3] > 0) 884 goto syn_recv; 885 886 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { 887 spin_unlock_bh(&ilb->lock); 888 goto done; 889 } 890 891 syn_recv: 892 if (!(r->idiag_states & TCPF_SYN_RECV)) 893 goto next_listen; 894 895 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) { 896 spin_unlock_bh(&ilb->lock); 897 goto done; 898 } 899 900 next_listen: 901 cb->args[3] = 0; 902 cb->args[4] = 0; 903 ++num; 904 } 905 spin_unlock_bh(&ilb->lock); 906 907 s_num = 0; 908 cb->args[3] = 0; 909 cb->args[4] = 0; 910 } 911 skip_listen_ht: 912 cb->args[0] = 1; 913 s_i = num = s_num = 0; 914 } 915 916 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) 917 goto out; 918 919 for (i = s_i; i <= hashinfo->ehash_mask; i++) { 920 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 921 spinlock_t *lock = inet_ehash_lockp(hashinfo, i); 922 struct sock *sk; 923 struct hlist_nulls_node *node; 924 925 num = 0; 926 927 if (hlist_nulls_empty(&head->chain)) 928 continue; 929 930 if (i > s_i) 931 s_num = 0; 932 933 spin_lock_bh(lock); 934 sk_nulls_for_each(sk, node, &head->chain) { 935 int res; 936 int state; 937 938 if (!net_eq(sock_net(sk), net)) 939 continue; 940 if (num < s_num) 941 goto next_normal; 942 state = (sk->sk_state == TCP_TIME_WAIT) ? 943 inet_twsk(sk)->tw_substate : sk->sk_state; 944 if (!(r->idiag_states & (1 << state))) 945 goto next_normal; 946 if (r->sdiag_family != AF_UNSPEC && 947 sk->sk_family != r->sdiag_family) 948 goto next_normal; 949 if (r->id.idiag_sport != htons(sk->sk_num) && 950 r->id.idiag_sport) 951 goto next_normal; 952 if (r->id.idiag_dport != sk->sk_dport && 953 r->id.idiag_dport) 954 goto next_normal; 955 if (sk->sk_state == TCP_TIME_WAIT) 956 res = inet_twsk_diag_dump(sk, skb, cb, r, bc); 957 else 958 res = inet_csk_diag_dump(sk, skb, cb, r, bc); 959 if (res < 0) { 960 spin_unlock_bh(lock); 961 goto done; 962 } 963 next_normal: 964 ++num; 965 } 966 967 spin_unlock_bh(lock); 968 } 969 970 done: 971 cb->args[1] = i; 972 cb->args[2] = num; 973 out: 974 ; 975 } 976 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); 977 978 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 979 struct inet_diag_req_v2 *r, struct nlattr *bc) 980 { 981 const struct inet_diag_handler *handler; 982 int err = 0; 983 984 handler = inet_diag_lock_handler(r->sdiag_protocol); 985 if (!IS_ERR(handler)) 986 handler->dump(skb, cb, r, bc); 987 else 988 err = PTR_ERR(handler); 989 inet_diag_unlock_handler(handler); 990 991 return err ? : skb->len; 992 } 993 994 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 995 { 996 struct nlattr *bc = NULL; 997 int hdrlen = sizeof(struct inet_diag_req_v2); 998 999 if (nlmsg_attrlen(cb->nlh, hdrlen)) 1000 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 1001 1002 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc); 1003 } 1004 1005 static inline int inet_diag_type2proto(int type) 1006 { 1007 switch (type) { 1008 case TCPDIAG_GETSOCK: 1009 return IPPROTO_TCP; 1010 case DCCPDIAG_GETSOCK: 1011 return IPPROTO_DCCP; 1012 default: 1013 return 0; 1014 } 1015 } 1016 1017 static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) 1018 { 1019 struct inet_diag_req *rc = nlmsg_data(cb->nlh); 1020 struct inet_diag_req_v2 req; 1021 struct nlattr *bc = NULL; 1022 int hdrlen = sizeof(struct inet_diag_req); 1023 1024 req.sdiag_family = AF_UNSPEC; /* compatibility */ 1025 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); 1026 req.idiag_ext = rc->idiag_ext; 1027 req.idiag_states = rc->idiag_states; 1028 req.id = rc->id; 1029 1030 if (nlmsg_attrlen(cb->nlh, hdrlen)) 1031 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 1032 1033 return __inet_diag_dump(skb, cb, &req, bc); 1034 } 1035 1036 static int inet_diag_get_exact_compat(struct sk_buff *in_skb, 1037 const struct nlmsghdr *nlh) 1038 { 1039 struct inet_diag_req *rc = nlmsg_data(nlh); 1040 struct inet_diag_req_v2 req; 1041 1042 req.sdiag_family = rc->idiag_family; 1043 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); 1044 req.idiag_ext = rc->idiag_ext; 1045 req.idiag_states = rc->idiag_states; 1046 req.id = rc->id; 1047 1048 return inet_diag_get_exact(in_skb, nlh, &req); 1049 } 1050 1051 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) 1052 { 1053 int hdrlen = sizeof(struct inet_diag_req); 1054 struct net *net = sock_net(skb->sk); 1055 1056 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || 1057 nlmsg_len(nlh) < hdrlen) 1058 return -EINVAL; 1059 1060 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1061 if (nlmsg_attrlen(nlh, hdrlen)) { 1062 struct nlattr *attr; 1063 1064 attr = nlmsg_find_attr(nlh, hdrlen, 1065 INET_DIAG_REQ_BYTECODE); 1066 if (attr == NULL || 1067 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 1068 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 1069 return -EINVAL; 1070 } 1071 { 1072 struct netlink_dump_control c = { 1073 .dump = inet_diag_dump_compat, 1074 }; 1075 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c); 1076 } 1077 } 1078 1079 return inet_diag_get_exact_compat(skb, nlh); 1080 } 1081 1082 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 1083 { 1084 int hdrlen = sizeof(struct inet_diag_req_v2); 1085 struct net *net = sock_net(skb->sk); 1086 1087 if (nlmsg_len(h) < hdrlen) 1088 return -EINVAL; 1089 1090 if (h->nlmsg_flags & NLM_F_DUMP) { 1091 if (nlmsg_attrlen(h, hdrlen)) { 1092 struct nlattr *attr; 1093 attr = nlmsg_find_attr(h, hdrlen, 1094 INET_DIAG_REQ_BYTECODE); 1095 if (attr == NULL || 1096 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 1097 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 1098 return -EINVAL; 1099 } 1100 { 1101 struct netlink_dump_control c = { 1102 .dump = inet_diag_dump, 1103 }; 1104 return netlink_dump_start(net->diag_nlsk, skb, h, &c); 1105 } 1106 } 1107 1108 return inet_diag_get_exact(skb, h, nlmsg_data(h)); 1109 } 1110 1111 static const struct sock_diag_handler inet_diag_handler = { 1112 .family = AF_INET, 1113 .dump = inet_diag_handler_dump, 1114 }; 1115 1116 static const struct sock_diag_handler inet6_diag_handler = { 1117 .family = AF_INET6, 1118 .dump = inet_diag_handler_dump, 1119 }; 1120 1121 int inet_diag_register(const struct inet_diag_handler *h) 1122 { 1123 const __u16 type = h->idiag_type; 1124 int err = -EINVAL; 1125 1126 if (type >= IPPROTO_MAX) 1127 goto out; 1128 1129 mutex_lock(&inet_diag_table_mutex); 1130 err = -EEXIST; 1131 if (inet_diag_table[type] == NULL) { 1132 inet_diag_table[type] = h; 1133 err = 0; 1134 } 1135 mutex_unlock(&inet_diag_table_mutex); 1136 out: 1137 return err; 1138 } 1139 EXPORT_SYMBOL_GPL(inet_diag_register); 1140 1141 void inet_diag_unregister(const struct inet_diag_handler *h) 1142 { 1143 const __u16 type = h->idiag_type; 1144 1145 if (type >= IPPROTO_MAX) 1146 return; 1147 1148 mutex_lock(&inet_diag_table_mutex); 1149 inet_diag_table[type] = NULL; 1150 mutex_unlock(&inet_diag_table_mutex); 1151 } 1152 EXPORT_SYMBOL_GPL(inet_diag_unregister); 1153 1154 static int __init inet_diag_init(void) 1155 { 1156 const int inet_diag_table_size = (IPPROTO_MAX * 1157 sizeof(struct inet_diag_handler *)); 1158 int err = -ENOMEM; 1159 1160 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 1161 if (!inet_diag_table) 1162 goto out; 1163 1164 err = sock_diag_register(&inet_diag_handler); 1165 if (err) 1166 goto out_free_nl; 1167 1168 err = sock_diag_register(&inet6_diag_handler); 1169 if (err) 1170 goto out_free_inet; 1171 1172 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat); 1173 out: 1174 return err; 1175 1176 out_free_inet: 1177 sock_diag_unregister(&inet_diag_handler); 1178 out_free_nl: 1179 kfree(inet_diag_table); 1180 goto out; 1181 } 1182 1183 static void __exit inet_diag_exit(void) 1184 { 1185 sock_diag_unregister(&inet6_diag_handler); 1186 sock_diag_unregister(&inet_diag_handler); 1187 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat); 1188 kfree(inet_diag_table); 1189 } 1190 1191 module_init(inet_diag_init); 1192 module_exit(inet_diag_exit); 1193 MODULE_LICENSE("GPL"); 1194 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); 1195 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); 1196