1 /* 2 * inet_diag.c Module for monitoring INET transport protocols sockets. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/random.h> 17 #include <linux/slab.h> 18 #include <linux/cache.h> 19 #include <linux/init.h> 20 #include <linux/time.h> 21 22 #include <net/icmp.h> 23 #include <net/tcp.h> 24 #include <net/ipv6.h> 25 #include <net/inet_common.h> 26 #include <net/inet_connection_sock.h> 27 #include <net/inet_hashtables.h> 28 #include <net/inet_timewait_sock.h> 29 #include <net/inet6_hashtables.h> 30 #include <net/netlink.h> 31 32 #include <linux/inet.h> 33 #include <linux/stddef.h> 34 35 #include <linux/inet_diag.h> 36 #include <linux/sock_diag.h> 37 38 static const struct inet_diag_handler **inet_diag_table; 39 40 struct inet_diag_entry { 41 __be32 *saddr; 42 __be32 *daddr; 43 u16 sport; 44 u16 dport; 45 u16 family; 46 u16 userlocks; 47 }; 48 49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 51 52 static DEFINE_MUTEX(inet_diag_table_mutex); 53 54 static const struct inet_diag_handler *inet_diag_lock_handler(int proto) 55 { 56 if (!inet_diag_table[proto]) 57 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 58 NETLINK_SOCK_DIAG, AF_INET, proto); 59 60 mutex_lock(&inet_diag_table_mutex); 61 if (!inet_diag_table[proto]) 62 return ERR_PTR(-ENOENT); 63 64 return inet_diag_table[proto]; 65 } 66 67 static inline void inet_diag_unlock_handler( 68 const struct inet_diag_handler *handler) 69 { 70 mutex_unlock(&inet_diag_table_mutex); 71 } 72 73 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, 74 struct sk_buff *skb, struct inet_diag_req_v2 *req, 75 u32 pid, u32 seq, u16 nlmsg_flags, 76 const struct nlmsghdr *unlh) 77 { 78 const struct inet_sock *inet = inet_sk(sk); 79 struct inet_diag_msg *r; 80 struct nlmsghdr *nlh; 81 void *info = NULL; 82 struct inet_diag_meminfo *minfo = NULL; 83 unsigned char *b = skb_tail_pointer(skb); 84 const struct inet_diag_handler *handler; 85 int ext = req->idiag_ext; 86 87 handler = inet_diag_table[req->sdiag_protocol]; 88 BUG_ON(handler == NULL); 89 90 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 91 nlh->nlmsg_flags = nlmsg_flags; 92 93 r = NLMSG_DATA(nlh); 94 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 95 96 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 97 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); 98 99 r->idiag_family = sk->sk_family; 100 r->idiag_state = sk->sk_state; 101 r->idiag_timer = 0; 102 r->idiag_retrans = 0; 103 104 r->id.idiag_if = sk->sk_bound_dev_if; 105 sock_diag_save_cookie(sk, r->id.idiag_cookie); 106 107 r->id.idiag_sport = inet->inet_sport; 108 r->id.idiag_dport = inet->inet_dport; 109 r->id.idiag_src[0] = inet->inet_rcv_saddr; 110 r->id.idiag_dst[0] = inet->inet_daddr; 111 112 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, 113 * hence this needs to be included regardless of socket family. 114 */ 115 if (ext & (1 << (INET_DIAG_TOS - 1))) 116 RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos); 117 118 #if IS_ENABLED(CONFIG_IPV6) 119 if (r->idiag_family == AF_INET6) { 120 const struct ipv6_pinfo *np = inet6_sk(sk); 121 122 *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; 123 *(struct in6_addr *)r->id.idiag_dst = np->daddr; 124 if (ext & (1 << (INET_DIAG_TCLASS - 1))) 125 RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); 126 } 127 #endif 128 129 r->idiag_uid = sock_i_uid(sk); 130 r->idiag_inode = sock_i_ino(sk); 131 132 if (minfo) { 133 minfo->idiag_rmem = sk_rmem_alloc_get(sk); 134 minfo->idiag_wmem = sk->sk_wmem_queued; 135 minfo->idiag_fmem = sk->sk_forward_alloc; 136 minfo->idiag_tmem = sk_wmem_alloc_get(sk); 137 } 138 139 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) 140 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) 141 goto rtattr_failure; 142 143 if (icsk == NULL) { 144 r->idiag_rqueue = r->idiag_wqueue = 0; 145 goto out; 146 } 147 148 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) 149 150 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 151 r->idiag_timer = 1; 152 r->idiag_retrans = icsk->icsk_retransmits; 153 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 154 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 155 r->idiag_timer = 4; 156 r->idiag_retrans = icsk->icsk_probes_out; 157 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 158 } else if (timer_pending(&sk->sk_timer)) { 159 r->idiag_timer = 2; 160 r->idiag_retrans = icsk->icsk_probes_out; 161 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 162 } else { 163 r->idiag_timer = 0; 164 r->idiag_expires = 0; 165 } 166 #undef EXPIRES_IN_MS 167 168 if (ext & (1 << (INET_DIAG_INFO - 1))) 169 info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info)); 170 171 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 172 const size_t len = strlen(icsk->icsk_ca_ops->name); 173 174 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 175 icsk->icsk_ca_ops->name); 176 } 177 178 handler->idiag_get_info(sk, r, info); 179 180 if (sk->sk_state < TCP_TIME_WAIT && 181 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 182 icsk->icsk_ca_ops->get_info(sk, ext, skb); 183 184 out: 185 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 186 return skb->len; 187 188 rtattr_failure: 189 nlmsg_failure: 190 nlmsg_trim(skb, b); 191 return -EMSGSIZE; 192 } 193 EXPORT_SYMBOL_GPL(inet_sk_diag_fill); 194 195 static int inet_csk_diag_fill(struct sock *sk, 196 struct sk_buff *skb, struct inet_diag_req_v2 *req, 197 u32 pid, u32 seq, u16 nlmsg_flags, 198 const struct nlmsghdr *unlh) 199 { 200 return inet_sk_diag_fill(sk, inet_csk(sk), 201 skb, req, pid, seq, nlmsg_flags, unlh); 202 } 203 204 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 205 struct sk_buff *skb, struct inet_diag_req_v2 *req, 206 u32 pid, u32 seq, u16 nlmsg_flags, 207 const struct nlmsghdr *unlh) 208 { 209 long tmo; 210 struct inet_diag_msg *r; 211 const unsigned char *previous_tail = skb_tail_pointer(skb); 212 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 213 unlh->nlmsg_type, sizeof(*r)); 214 215 r = NLMSG_DATA(nlh); 216 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 217 218 nlh->nlmsg_flags = nlmsg_flags; 219 220 tmo = tw->tw_ttd - jiffies; 221 if (tmo < 0) 222 tmo = 0; 223 224 r->idiag_family = tw->tw_family; 225 r->idiag_retrans = 0; 226 r->id.idiag_if = tw->tw_bound_dev_if; 227 sock_diag_save_cookie(tw, r->id.idiag_cookie); 228 r->id.idiag_sport = tw->tw_sport; 229 r->id.idiag_dport = tw->tw_dport; 230 r->id.idiag_src[0] = tw->tw_rcv_saddr; 231 r->id.idiag_dst[0] = tw->tw_daddr; 232 r->idiag_state = tw->tw_substate; 233 r->idiag_timer = 3; 234 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); 235 r->idiag_rqueue = 0; 236 r->idiag_wqueue = 0; 237 r->idiag_uid = 0; 238 r->idiag_inode = 0; 239 #if IS_ENABLED(CONFIG_IPV6) 240 if (tw->tw_family == AF_INET6) { 241 const struct inet6_timewait_sock *tw6 = 242 inet6_twsk((struct sock *)tw); 243 244 *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr; 245 *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; 246 } 247 #endif 248 nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; 249 return skb->len; 250 nlmsg_failure: 251 nlmsg_trim(skb, previous_tail); 252 return -EMSGSIZE; 253 } 254 255 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 256 struct inet_diag_req_v2 *r, u32 pid, u32 seq, u16 nlmsg_flags, 257 const struct nlmsghdr *unlh) 258 { 259 if (sk->sk_state == TCP_TIME_WAIT) 260 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 261 skb, r, pid, seq, nlmsg_flags, 262 unlh); 263 return inet_csk_diag_fill(sk, skb, r, pid, seq, nlmsg_flags, unlh); 264 } 265 266 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, 267 const struct nlmsghdr *nlh, struct inet_diag_req_v2 *req) 268 { 269 int err; 270 struct sock *sk; 271 struct sk_buff *rep; 272 273 err = -EINVAL; 274 if (req->sdiag_family == AF_INET) { 275 sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0], 276 req->id.idiag_dport, req->id.idiag_src[0], 277 req->id.idiag_sport, req->id.idiag_if); 278 } 279 #if IS_ENABLED(CONFIG_IPV6) 280 else if (req->sdiag_family == AF_INET6) { 281 sk = inet6_lookup(&init_net, hashinfo, 282 (struct in6_addr *)req->id.idiag_dst, 283 req->id.idiag_dport, 284 (struct in6_addr *)req->id.idiag_src, 285 req->id.idiag_sport, 286 req->id.idiag_if); 287 } 288 #endif 289 else { 290 goto out_nosk; 291 } 292 293 err = -ENOENT; 294 if (sk == NULL) 295 goto out_nosk; 296 297 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); 298 if (err) 299 goto out; 300 301 err = -ENOMEM; 302 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 303 sizeof(struct inet_diag_meminfo) + 304 sizeof(struct tcp_info) + 64)), 305 GFP_KERNEL); 306 if (!rep) 307 goto out; 308 309 err = sk_diag_fill(sk, rep, req, 310 NETLINK_CB(in_skb).pid, 311 nlh->nlmsg_seq, 0, nlh); 312 if (err < 0) { 313 WARN_ON(err == -EMSGSIZE); 314 kfree_skb(rep); 315 goto out; 316 } 317 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid, 318 MSG_DONTWAIT); 319 if (err > 0) 320 err = 0; 321 322 out: 323 if (sk) { 324 if (sk->sk_state == TCP_TIME_WAIT) 325 inet_twsk_put((struct inet_timewait_sock *)sk); 326 else 327 sock_put(sk); 328 } 329 out_nosk: 330 return err; 331 } 332 EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); 333 334 static int inet_diag_get_exact(struct sk_buff *in_skb, 335 const struct nlmsghdr *nlh, 336 struct inet_diag_req_v2 *req) 337 { 338 const struct inet_diag_handler *handler; 339 int err; 340 341 handler = inet_diag_lock_handler(req->sdiag_protocol); 342 if (IS_ERR(handler)) 343 err = PTR_ERR(handler); 344 else 345 err = handler->dump_one(in_skb, nlh, req); 346 inet_diag_unlock_handler(handler); 347 348 return err; 349 } 350 351 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) 352 { 353 int words = bits >> 5; 354 355 bits &= 0x1f; 356 357 if (words) { 358 if (memcmp(a1, a2, words << 2)) 359 return 0; 360 } 361 if (bits) { 362 __be32 w1, w2; 363 __be32 mask; 364 365 w1 = a1[words]; 366 w2 = a2[words]; 367 368 mask = htonl((0xffffffff) << (32 - bits)); 369 370 if ((w1 ^ w2) & mask) 371 return 0; 372 } 373 374 return 1; 375 } 376 377 378 static int inet_diag_bc_run(const struct nlattr *_bc, 379 const struct inet_diag_entry *entry) 380 { 381 const void *bc = nla_data(_bc); 382 int len = nla_len(_bc); 383 384 while (len > 0) { 385 int yes = 1; 386 const struct inet_diag_bc_op *op = bc; 387 388 switch (op->code) { 389 case INET_DIAG_BC_NOP: 390 break; 391 case INET_DIAG_BC_JMP: 392 yes = 0; 393 break; 394 case INET_DIAG_BC_S_GE: 395 yes = entry->sport >= op[1].no; 396 break; 397 case INET_DIAG_BC_S_LE: 398 yes = entry->sport <= op[1].no; 399 break; 400 case INET_DIAG_BC_D_GE: 401 yes = entry->dport >= op[1].no; 402 break; 403 case INET_DIAG_BC_D_LE: 404 yes = entry->dport <= op[1].no; 405 break; 406 case INET_DIAG_BC_AUTO: 407 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); 408 break; 409 case INET_DIAG_BC_S_COND: 410 case INET_DIAG_BC_D_COND: { 411 struct inet_diag_hostcond *cond; 412 __be32 *addr; 413 414 cond = (struct inet_diag_hostcond *)(op + 1); 415 if (cond->port != -1 && 416 cond->port != (op->code == INET_DIAG_BC_S_COND ? 417 entry->sport : entry->dport)) { 418 yes = 0; 419 break; 420 } 421 422 if (cond->prefix_len == 0) 423 break; 424 425 if (op->code == INET_DIAG_BC_S_COND) 426 addr = entry->saddr; 427 else 428 addr = entry->daddr; 429 430 if (bitstring_match(addr, cond->addr, 431 cond->prefix_len)) 432 break; 433 if (entry->family == AF_INET6 && 434 cond->family == AF_INET) { 435 if (addr[0] == 0 && addr[1] == 0 && 436 addr[2] == htonl(0xffff) && 437 bitstring_match(addr + 3, cond->addr, 438 cond->prefix_len)) 439 break; 440 } 441 yes = 0; 442 break; 443 } 444 } 445 446 if (yes) { 447 len -= op->yes; 448 bc += op->yes; 449 } else { 450 len -= op->no; 451 bc += op->no; 452 } 453 } 454 return len == 0; 455 } 456 457 int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk) 458 { 459 struct inet_diag_entry entry; 460 struct inet_sock *inet = inet_sk(sk); 461 462 if (bc == NULL) 463 return 1; 464 465 entry.family = sk->sk_family; 466 #if IS_ENABLED(CONFIG_IPV6) 467 if (entry.family == AF_INET6) { 468 struct ipv6_pinfo *np = inet6_sk(sk); 469 470 entry.saddr = np->rcv_saddr.s6_addr32; 471 entry.daddr = np->daddr.s6_addr32; 472 } else 473 #endif 474 { 475 entry.saddr = &inet->inet_rcv_saddr; 476 entry.daddr = &inet->inet_daddr; 477 } 478 entry.sport = inet->inet_num; 479 entry.dport = ntohs(inet->inet_dport); 480 entry.userlocks = sk->sk_userlocks; 481 482 return inet_diag_bc_run(bc, &entry); 483 } 484 EXPORT_SYMBOL_GPL(inet_diag_bc_sk); 485 486 static int valid_cc(const void *bc, int len, int cc) 487 { 488 while (len >= 0) { 489 const struct inet_diag_bc_op *op = bc; 490 491 if (cc > len) 492 return 0; 493 if (cc == len) 494 return 1; 495 if (op->yes < 4 || op->yes & 3) 496 return 0; 497 len -= op->yes; 498 bc += op->yes; 499 } 500 return 0; 501 } 502 503 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 504 { 505 const void *bc = bytecode; 506 int len = bytecode_len; 507 508 while (len > 0) { 509 const struct inet_diag_bc_op *op = bc; 510 511 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 512 switch (op->code) { 513 case INET_DIAG_BC_AUTO: 514 case INET_DIAG_BC_S_COND: 515 case INET_DIAG_BC_D_COND: 516 case INET_DIAG_BC_S_GE: 517 case INET_DIAG_BC_S_LE: 518 case INET_DIAG_BC_D_GE: 519 case INET_DIAG_BC_D_LE: 520 case INET_DIAG_BC_JMP: 521 if (op->no < 4 || op->no > len + 4 || op->no & 3) 522 return -EINVAL; 523 if (op->no < len && 524 !valid_cc(bytecode, bytecode_len, len - op->no)) 525 return -EINVAL; 526 break; 527 case INET_DIAG_BC_NOP: 528 break; 529 default: 530 return -EINVAL; 531 } 532 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3) 533 return -EINVAL; 534 bc += op->yes; 535 len -= op->yes; 536 } 537 return len == 0 ? 0 : -EINVAL; 538 } 539 540 static int inet_csk_diag_dump(struct sock *sk, 541 struct sk_buff *skb, 542 struct netlink_callback *cb, 543 struct inet_diag_req_v2 *r, 544 const struct nlattr *bc) 545 { 546 if (!inet_diag_bc_sk(bc, sk)) 547 return 0; 548 549 return inet_csk_diag_fill(sk, skb, r, 550 NETLINK_CB(cb->skb).pid, 551 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 552 } 553 554 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, 555 struct sk_buff *skb, 556 struct netlink_callback *cb, 557 struct inet_diag_req_v2 *r, 558 const struct nlattr *bc) 559 { 560 if (bc != NULL) { 561 struct inet_diag_entry entry; 562 563 entry.family = tw->tw_family; 564 #if IS_ENABLED(CONFIG_IPV6) 565 if (tw->tw_family == AF_INET6) { 566 struct inet6_timewait_sock *tw6 = 567 inet6_twsk((struct sock *)tw); 568 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; 569 entry.daddr = tw6->tw_v6_daddr.s6_addr32; 570 } else 571 #endif 572 { 573 entry.saddr = &tw->tw_rcv_saddr; 574 entry.daddr = &tw->tw_daddr; 575 } 576 entry.sport = tw->tw_num; 577 entry.dport = ntohs(tw->tw_dport); 578 entry.userlocks = 0; 579 580 if (!inet_diag_bc_run(bc, &entry)) 581 return 0; 582 } 583 584 return inet_twsk_diag_fill(tw, skb, r, 585 NETLINK_CB(cb->skb).pid, 586 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 587 } 588 589 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 590 struct request_sock *req, u32 pid, u32 seq, 591 const struct nlmsghdr *unlh) 592 { 593 const struct inet_request_sock *ireq = inet_rsk(req); 594 struct inet_sock *inet = inet_sk(sk); 595 unsigned char *b = skb_tail_pointer(skb); 596 struct inet_diag_msg *r; 597 struct nlmsghdr *nlh; 598 long tmo; 599 600 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 601 nlh->nlmsg_flags = NLM_F_MULTI; 602 r = NLMSG_DATA(nlh); 603 604 r->idiag_family = sk->sk_family; 605 r->idiag_state = TCP_SYN_RECV; 606 r->idiag_timer = 1; 607 r->idiag_retrans = req->retrans; 608 609 r->id.idiag_if = sk->sk_bound_dev_if; 610 sock_diag_save_cookie(req, r->id.idiag_cookie); 611 612 tmo = req->expires - jiffies; 613 if (tmo < 0) 614 tmo = 0; 615 616 r->id.idiag_sport = inet->inet_sport; 617 r->id.idiag_dport = ireq->rmt_port; 618 r->id.idiag_src[0] = ireq->loc_addr; 619 r->id.idiag_dst[0] = ireq->rmt_addr; 620 r->idiag_expires = jiffies_to_msecs(tmo); 621 r->idiag_rqueue = 0; 622 r->idiag_wqueue = 0; 623 r->idiag_uid = sock_i_uid(sk); 624 r->idiag_inode = 0; 625 #if IS_ENABLED(CONFIG_IPV6) 626 if (r->idiag_family == AF_INET6) { 627 *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; 628 *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; 629 } 630 #endif 631 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 632 633 return skb->len; 634 635 nlmsg_failure: 636 nlmsg_trim(skb, b); 637 return -1; 638 } 639 640 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 641 struct netlink_callback *cb, 642 struct inet_diag_req_v2 *r, 643 const struct nlattr *bc) 644 { 645 struct inet_diag_entry entry; 646 struct inet_connection_sock *icsk = inet_csk(sk); 647 struct listen_sock *lopt; 648 struct inet_sock *inet = inet_sk(sk); 649 int j, s_j; 650 int reqnum, s_reqnum; 651 int err = 0; 652 653 s_j = cb->args[3]; 654 s_reqnum = cb->args[4]; 655 656 if (s_j > 0) 657 s_j--; 658 659 entry.family = sk->sk_family; 660 661 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 662 663 lopt = icsk->icsk_accept_queue.listen_opt; 664 if (!lopt || !lopt->qlen) 665 goto out; 666 667 if (bc != NULL) { 668 entry.sport = inet->inet_num; 669 entry.userlocks = sk->sk_userlocks; 670 } 671 672 for (j = s_j; j < lopt->nr_table_entries; j++) { 673 struct request_sock *req, *head = lopt->syn_table[j]; 674 675 reqnum = 0; 676 for (req = head; req; reqnum++, req = req->dl_next) { 677 struct inet_request_sock *ireq = inet_rsk(req); 678 679 if (reqnum < s_reqnum) 680 continue; 681 if (r->id.idiag_dport != ireq->rmt_port && 682 r->id.idiag_dport) 683 continue; 684 685 if (bc) { 686 entry.saddr = 687 #if IS_ENABLED(CONFIG_IPV6) 688 (entry.family == AF_INET6) ? 689 inet6_rsk(req)->loc_addr.s6_addr32 : 690 #endif 691 &ireq->loc_addr; 692 entry.daddr = 693 #if IS_ENABLED(CONFIG_IPV6) 694 (entry.family == AF_INET6) ? 695 inet6_rsk(req)->rmt_addr.s6_addr32 : 696 #endif 697 &ireq->rmt_addr; 698 entry.dport = ntohs(ireq->rmt_port); 699 700 if (!inet_diag_bc_run(bc, &entry)) 701 continue; 702 } 703 704 err = inet_diag_fill_req(skb, sk, req, 705 NETLINK_CB(cb->skb).pid, 706 cb->nlh->nlmsg_seq, cb->nlh); 707 if (err < 0) { 708 cb->args[3] = j + 1; 709 cb->args[4] = reqnum; 710 goto out; 711 } 712 } 713 714 s_reqnum = 0; 715 } 716 717 out: 718 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 719 720 return err; 721 } 722 723 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, 724 struct netlink_callback *cb, struct inet_diag_req_v2 *r, struct nlattr *bc) 725 { 726 int i, num; 727 int s_i, s_num; 728 729 s_i = cb->args[1]; 730 s_num = num = cb->args[2]; 731 732 if (cb->args[0] == 0) { 733 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) 734 goto skip_listen_ht; 735 736 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { 737 struct sock *sk; 738 struct hlist_nulls_node *node; 739 struct inet_listen_hashbucket *ilb; 740 741 num = 0; 742 ilb = &hashinfo->listening_hash[i]; 743 spin_lock_bh(&ilb->lock); 744 sk_nulls_for_each(sk, node, &ilb->head) { 745 struct inet_sock *inet = inet_sk(sk); 746 747 if (num < s_num) { 748 num++; 749 continue; 750 } 751 752 if (r->sdiag_family != AF_UNSPEC && 753 sk->sk_family != r->sdiag_family) 754 goto next_listen; 755 756 if (r->id.idiag_sport != inet->inet_sport && 757 r->id.idiag_sport) 758 goto next_listen; 759 760 if (!(r->idiag_states & TCPF_LISTEN) || 761 r->id.idiag_dport || 762 cb->args[3] > 0) 763 goto syn_recv; 764 765 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { 766 spin_unlock_bh(&ilb->lock); 767 goto done; 768 } 769 770 syn_recv: 771 if (!(r->idiag_states & TCPF_SYN_RECV)) 772 goto next_listen; 773 774 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) { 775 spin_unlock_bh(&ilb->lock); 776 goto done; 777 } 778 779 next_listen: 780 cb->args[3] = 0; 781 cb->args[4] = 0; 782 ++num; 783 } 784 spin_unlock_bh(&ilb->lock); 785 786 s_num = 0; 787 cb->args[3] = 0; 788 cb->args[4] = 0; 789 } 790 skip_listen_ht: 791 cb->args[0] = 1; 792 s_i = num = s_num = 0; 793 } 794 795 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) 796 goto out; 797 798 for (i = s_i; i <= hashinfo->ehash_mask; i++) { 799 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 800 spinlock_t *lock = inet_ehash_lockp(hashinfo, i); 801 struct sock *sk; 802 struct hlist_nulls_node *node; 803 804 num = 0; 805 806 if (hlist_nulls_empty(&head->chain) && 807 hlist_nulls_empty(&head->twchain)) 808 continue; 809 810 if (i > s_i) 811 s_num = 0; 812 813 spin_lock_bh(lock); 814 sk_nulls_for_each(sk, node, &head->chain) { 815 struct inet_sock *inet = inet_sk(sk); 816 817 if (num < s_num) 818 goto next_normal; 819 if (!(r->idiag_states & (1 << sk->sk_state))) 820 goto next_normal; 821 if (r->sdiag_family != AF_UNSPEC && 822 sk->sk_family != r->sdiag_family) 823 goto next_normal; 824 if (r->id.idiag_sport != inet->inet_sport && 825 r->id.idiag_sport) 826 goto next_normal; 827 if (r->id.idiag_dport != inet->inet_dport && 828 r->id.idiag_dport) 829 goto next_normal; 830 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { 831 spin_unlock_bh(lock); 832 goto done; 833 } 834 next_normal: 835 ++num; 836 } 837 838 if (r->idiag_states & TCPF_TIME_WAIT) { 839 struct inet_timewait_sock *tw; 840 841 inet_twsk_for_each(tw, node, 842 &head->twchain) { 843 844 if (num < s_num) 845 goto next_dying; 846 if (r->sdiag_family != AF_UNSPEC && 847 tw->tw_family != r->sdiag_family) 848 goto next_dying; 849 if (r->id.idiag_sport != tw->tw_sport && 850 r->id.idiag_sport) 851 goto next_dying; 852 if (r->id.idiag_dport != tw->tw_dport && 853 r->id.idiag_dport) 854 goto next_dying; 855 if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) { 856 spin_unlock_bh(lock); 857 goto done; 858 } 859 next_dying: 860 ++num; 861 } 862 } 863 spin_unlock_bh(lock); 864 } 865 866 done: 867 cb->args[1] = i; 868 cb->args[2] = num; 869 out: 870 ; 871 } 872 EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); 873 874 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, 875 struct inet_diag_req_v2 *r, struct nlattr *bc) 876 { 877 const struct inet_diag_handler *handler; 878 879 handler = inet_diag_lock_handler(r->sdiag_protocol); 880 if (!IS_ERR(handler)) 881 handler->dump(skb, cb, r, bc); 882 inet_diag_unlock_handler(handler); 883 884 return skb->len; 885 } 886 887 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 888 { 889 struct nlattr *bc = NULL; 890 int hdrlen = sizeof(struct inet_diag_req_v2); 891 892 if (nlmsg_attrlen(cb->nlh, hdrlen)) 893 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 894 895 return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc); 896 } 897 898 static inline int inet_diag_type2proto(int type) 899 { 900 switch (type) { 901 case TCPDIAG_GETSOCK: 902 return IPPROTO_TCP; 903 case DCCPDIAG_GETSOCK: 904 return IPPROTO_DCCP; 905 default: 906 return 0; 907 } 908 } 909 910 static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb) 911 { 912 struct inet_diag_req *rc = NLMSG_DATA(cb->nlh); 913 struct inet_diag_req_v2 req; 914 struct nlattr *bc = NULL; 915 int hdrlen = sizeof(struct inet_diag_req); 916 917 req.sdiag_family = AF_UNSPEC; /* compatibility */ 918 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); 919 req.idiag_ext = rc->idiag_ext; 920 req.idiag_states = rc->idiag_states; 921 req.id = rc->id; 922 923 if (nlmsg_attrlen(cb->nlh, hdrlen)) 924 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE); 925 926 return __inet_diag_dump(skb, cb, &req, bc); 927 } 928 929 static int inet_diag_get_exact_compat(struct sk_buff *in_skb, 930 const struct nlmsghdr *nlh) 931 { 932 struct inet_diag_req *rc = NLMSG_DATA(nlh); 933 struct inet_diag_req_v2 req; 934 935 req.sdiag_family = rc->idiag_family; 936 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); 937 req.idiag_ext = rc->idiag_ext; 938 req.idiag_states = rc->idiag_states; 939 req.id = rc->id; 940 941 return inet_diag_get_exact(in_skb, nlh, &req); 942 } 943 944 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) 945 { 946 int hdrlen = sizeof(struct inet_diag_req); 947 948 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || 949 nlmsg_len(nlh) < hdrlen) 950 return -EINVAL; 951 952 if (nlh->nlmsg_flags & NLM_F_DUMP) { 953 if (nlmsg_attrlen(nlh, hdrlen)) { 954 struct nlattr *attr; 955 956 attr = nlmsg_find_attr(nlh, hdrlen, 957 INET_DIAG_REQ_BYTECODE); 958 if (attr == NULL || 959 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 960 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 961 return -EINVAL; 962 } 963 { 964 struct netlink_dump_control c = { 965 .dump = inet_diag_dump_compat, 966 }; 967 return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c); 968 } 969 } 970 971 return inet_diag_get_exact_compat(skb, nlh); 972 } 973 974 static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) 975 { 976 int hdrlen = sizeof(struct inet_diag_req_v2); 977 978 if (nlmsg_len(h) < hdrlen) 979 return -EINVAL; 980 981 if (h->nlmsg_flags & NLM_F_DUMP) { 982 if (nlmsg_attrlen(h, hdrlen)) { 983 struct nlattr *attr; 984 attr = nlmsg_find_attr(h, hdrlen, 985 INET_DIAG_REQ_BYTECODE); 986 if (attr == NULL || 987 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 988 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 989 return -EINVAL; 990 } 991 { 992 struct netlink_dump_control c = { 993 .dump = inet_diag_dump, 994 }; 995 return netlink_dump_start(sock_diag_nlsk, skb, h, &c); 996 } 997 } 998 999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 1000 } 1001 1002 static struct sock_diag_handler inet_diag_handler = { 1003 .family = AF_INET, 1004 .dump = inet_diag_handler_dump, 1005 }; 1006 1007 static struct sock_diag_handler inet6_diag_handler = { 1008 .family = AF_INET6, 1009 .dump = inet_diag_handler_dump, 1010 }; 1011 1012 int inet_diag_register(const struct inet_diag_handler *h) 1013 { 1014 const __u16 type = h->idiag_type; 1015 int err = -EINVAL; 1016 1017 if (type >= IPPROTO_MAX) 1018 goto out; 1019 1020 mutex_lock(&inet_diag_table_mutex); 1021 err = -EEXIST; 1022 if (inet_diag_table[type] == NULL) { 1023 inet_diag_table[type] = h; 1024 err = 0; 1025 } 1026 mutex_unlock(&inet_diag_table_mutex); 1027 out: 1028 return err; 1029 } 1030 EXPORT_SYMBOL_GPL(inet_diag_register); 1031 1032 void inet_diag_unregister(const struct inet_diag_handler *h) 1033 { 1034 const __u16 type = h->idiag_type; 1035 1036 if (type >= IPPROTO_MAX) 1037 return; 1038 1039 mutex_lock(&inet_diag_table_mutex); 1040 inet_diag_table[type] = NULL; 1041 mutex_unlock(&inet_diag_table_mutex); 1042 } 1043 EXPORT_SYMBOL_GPL(inet_diag_unregister); 1044 1045 static int __init inet_diag_init(void) 1046 { 1047 const int inet_diag_table_size = (IPPROTO_MAX * 1048 sizeof(struct inet_diag_handler *)); 1049 int err = -ENOMEM; 1050 1051 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 1052 if (!inet_diag_table) 1053 goto out; 1054 1055 err = sock_diag_register(&inet_diag_handler); 1056 if (err) 1057 goto out_free_nl; 1058 1059 err = sock_diag_register(&inet6_diag_handler); 1060 if (err) 1061 goto out_free_inet; 1062 1063 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat); 1064 out: 1065 return err; 1066 1067 out_free_inet: 1068 sock_diag_unregister(&inet_diag_handler); 1069 out_free_nl: 1070 kfree(inet_diag_table); 1071 goto out; 1072 } 1073 1074 static void __exit inet_diag_exit(void) 1075 { 1076 sock_diag_unregister(&inet6_diag_handler); 1077 sock_diag_unregister(&inet_diag_handler); 1078 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat); 1079 kfree(inet_diag_table); 1080 } 1081 1082 module_init(inet_diag_init); 1083 module_exit(inet_diag_exit); 1084 MODULE_LICENSE("GPL"); 1085 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */); 1086 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */); 1087