1 /* 2 * inet_diag.c Module for monitoring INET transport protocols sockets. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/fcntl.h> 16 #include <linux/random.h> 17 #include <linux/cache.h> 18 #include <linux/init.h> 19 #include <linux/time.h> 20 21 #include <net/icmp.h> 22 #include <net/tcp.h> 23 #include <net/ipv6.h> 24 #include <net/inet_common.h> 25 #include <net/inet_connection_sock.h> 26 #include <net/inet_hashtables.h> 27 #include <net/inet_timewait_sock.h> 28 #include <net/inet6_hashtables.h> 29 #include <net/netlink.h> 30 31 #include <linux/inet.h> 32 #include <linux/stddef.h> 33 34 #include <linux/inet_diag.h> 35 36 static const struct inet_diag_handler **inet_diag_table; 37 38 struct inet_diag_entry { 39 __be32 *saddr; 40 __be32 *daddr; 41 u16 sport; 42 u16 dport; 43 u16 family; 44 u16 userlocks; 45 }; 46 47 static struct sock *idiagnl; 48 49 #define INET_DIAG_PUT(skb, attrtype, attrlen) \ 50 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) 51 52 static DEFINE_MUTEX(inet_diag_table_mutex); 53 54 static const struct inet_diag_handler *inet_diag_lock_handler(int type) 55 { 56 #ifdef CONFIG_KMOD 57 if (!inet_diag_table[type]) 58 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 59 NETLINK_INET_DIAG, type); 60 #endif 61 62 mutex_lock(&inet_diag_table_mutex); 63 if (!inet_diag_table[type]) 64 return ERR_PTR(-ENOENT); 65 66 return inet_diag_table[type]; 67 } 68 69 static inline void inet_diag_unlock_handler( 70 const struct inet_diag_handler *handler) 71 { 72 mutex_unlock(&inet_diag_table_mutex); 73 } 74 75 static int inet_csk_diag_fill(struct sock *sk, 76 struct sk_buff *skb, 77 int ext, u32 pid, u32 seq, u16 nlmsg_flags, 78 const struct nlmsghdr *unlh) 79 { 80 const struct inet_sock *inet = inet_sk(sk); 81 const struct inet_connection_sock *icsk = inet_csk(sk); 82 struct inet_diag_msg *r; 83 struct nlmsghdr *nlh; 84 void *info = NULL; 85 struct inet_diag_meminfo *minfo = NULL; 86 unsigned char *b = skb_tail_pointer(skb); 87 const struct inet_diag_handler *handler; 88 89 handler = inet_diag_table[unlh->nlmsg_type]; 90 BUG_ON(handler == NULL); 91 92 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 93 nlh->nlmsg_flags = nlmsg_flags; 94 95 r = NLMSG_DATA(nlh); 96 BUG_ON(sk->sk_state == TCP_TIME_WAIT); 97 98 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) 99 minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo)); 100 101 if (ext & (1 << (INET_DIAG_INFO - 1))) 102 info = INET_DIAG_PUT(skb, INET_DIAG_INFO, 103 handler->idiag_info_size); 104 105 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) { 106 const size_t len = strlen(icsk->icsk_ca_ops->name); 107 108 strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1), 109 icsk->icsk_ca_ops->name); 110 } 111 112 r->idiag_family = sk->sk_family; 113 r->idiag_state = sk->sk_state; 114 r->idiag_timer = 0; 115 r->idiag_retrans = 0; 116 117 r->id.idiag_if = sk->sk_bound_dev_if; 118 r->id.idiag_cookie[0] = (u32)(unsigned long)sk; 119 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); 120 121 r->id.idiag_sport = inet->sport; 122 r->id.idiag_dport = inet->dport; 123 r->id.idiag_src[0] = inet->rcv_saddr; 124 r->id.idiag_dst[0] = inet->daddr; 125 126 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 127 if (r->idiag_family == AF_INET6) { 128 struct ipv6_pinfo *np = inet6_sk(sk); 129 130 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 131 &np->rcv_saddr); 132 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 133 &np->daddr); 134 } 135 #endif 136 137 #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) 138 139 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 140 r->idiag_timer = 1; 141 r->idiag_retrans = icsk->icsk_retransmits; 142 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 143 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 144 r->idiag_timer = 4; 145 r->idiag_retrans = icsk->icsk_probes_out; 146 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 147 } else if (timer_pending(&sk->sk_timer)) { 148 r->idiag_timer = 2; 149 r->idiag_retrans = icsk->icsk_probes_out; 150 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); 151 } else { 152 r->idiag_timer = 0; 153 r->idiag_expires = 0; 154 } 155 #undef EXPIRES_IN_MS 156 157 r->idiag_uid = sock_i_uid(sk); 158 r->idiag_inode = sock_i_ino(sk); 159 160 if (minfo) { 161 minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc); 162 minfo->idiag_wmem = sk->sk_wmem_queued; 163 minfo->idiag_fmem = sk->sk_forward_alloc; 164 minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc); 165 } 166 167 handler->idiag_get_info(sk, r, info); 168 169 if (sk->sk_state < TCP_TIME_WAIT && 170 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 171 icsk->icsk_ca_ops->get_info(sk, ext, skb); 172 173 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 174 return skb->len; 175 176 rtattr_failure: 177 nlmsg_failure: 178 nlmsg_trim(skb, b); 179 return -EMSGSIZE; 180 } 181 182 static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, 183 struct sk_buff *skb, int ext, u32 pid, 184 u32 seq, u16 nlmsg_flags, 185 const struct nlmsghdr *unlh) 186 { 187 long tmo; 188 struct inet_diag_msg *r; 189 const unsigned char *previous_tail = skb_tail_pointer(skb); 190 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 191 unlh->nlmsg_type, sizeof(*r)); 192 193 r = NLMSG_DATA(nlh); 194 BUG_ON(tw->tw_state != TCP_TIME_WAIT); 195 196 nlh->nlmsg_flags = nlmsg_flags; 197 198 tmo = tw->tw_ttd - jiffies; 199 if (tmo < 0) 200 tmo = 0; 201 202 r->idiag_family = tw->tw_family; 203 r->idiag_state = tw->tw_state; 204 r->idiag_timer = 0; 205 r->idiag_retrans = 0; 206 r->id.idiag_if = tw->tw_bound_dev_if; 207 r->id.idiag_cookie[0] = (u32)(unsigned long)tw; 208 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); 209 r->id.idiag_sport = tw->tw_sport; 210 r->id.idiag_dport = tw->tw_dport; 211 r->id.idiag_src[0] = tw->tw_rcv_saddr; 212 r->id.idiag_dst[0] = tw->tw_daddr; 213 r->idiag_state = tw->tw_substate; 214 r->idiag_timer = 3; 215 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); 216 r->idiag_rqueue = 0; 217 r->idiag_wqueue = 0; 218 r->idiag_uid = 0; 219 r->idiag_inode = 0; 220 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 221 if (tw->tw_family == AF_INET6) { 222 const struct inet6_timewait_sock *tw6 = 223 inet6_twsk((struct sock *)tw); 224 225 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 226 &tw6->tw_v6_rcv_saddr); 227 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 228 &tw6->tw_v6_daddr); 229 } 230 #endif 231 nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; 232 return skb->len; 233 nlmsg_failure: 234 nlmsg_trim(skb, previous_tail); 235 return -EMSGSIZE; 236 } 237 238 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, 239 int ext, u32 pid, u32 seq, u16 nlmsg_flags, 240 const struct nlmsghdr *unlh) 241 { 242 if (sk->sk_state == TCP_TIME_WAIT) 243 return inet_twsk_diag_fill((struct inet_timewait_sock *)sk, 244 skb, ext, pid, seq, nlmsg_flags, 245 unlh); 246 return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh); 247 } 248 249 static int inet_diag_get_exact(struct sk_buff *in_skb, 250 const struct nlmsghdr *nlh) 251 { 252 int err; 253 struct sock *sk; 254 struct inet_diag_req *req = NLMSG_DATA(nlh); 255 struct sk_buff *rep; 256 struct inet_hashinfo *hashinfo; 257 const struct inet_diag_handler *handler; 258 259 handler = inet_diag_lock_handler(nlh->nlmsg_type); 260 if (IS_ERR(handler)) { 261 err = PTR_ERR(handler); 262 goto unlock; 263 } 264 265 hashinfo = handler->idiag_hashinfo; 266 err = -EINVAL; 267 268 if (req->idiag_family == AF_INET) { 269 sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0], 270 req->id.idiag_dport, req->id.idiag_src[0], 271 req->id.idiag_sport, req->id.idiag_if); 272 } 273 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 274 else if (req->idiag_family == AF_INET6) { 275 sk = inet6_lookup(&init_net, hashinfo, 276 (struct in6_addr *)req->id.idiag_dst, 277 req->id.idiag_dport, 278 (struct in6_addr *)req->id.idiag_src, 279 req->id.idiag_sport, 280 req->id.idiag_if); 281 } 282 #endif 283 else { 284 goto unlock; 285 } 286 287 err = -ENOENT; 288 if (sk == NULL) 289 goto unlock; 290 291 err = -ESTALE; 292 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || 293 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) && 294 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] || 295 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1])) 296 goto out; 297 298 err = -ENOMEM; 299 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 300 sizeof(struct inet_diag_meminfo) + 301 handler->idiag_info_size + 64)), 302 GFP_KERNEL); 303 if (!rep) 304 goto out; 305 306 err = sk_diag_fill(sk, rep, req->idiag_ext, 307 NETLINK_CB(in_skb).pid, 308 nlh->nlmsg_seq, 0, nlh); 309 if (err < 0) { 310 WARN_ON(err == -EMSGSIZE); 311 kfree_skb(rep); 312 goto out; 313 } 314 err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid, 315 MSG_DONTWAIT); 316 if (err > 0) 317 err = 0; 318 319 out: 320 if (sk) { 321 if (sk->sk_state == TCP_TIME_WAIT) 322 inet_twsk_put((struct inet_timewait_sock *)sk); 323 else 324 sock_put(sk); 325 } 326 unlock: 327 inet_diag_unlock_handler(handler); 328 return err; 329 } 330 331 static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) 332 { 333 int words = bits >> 5; 334 335 bits &= 0x1f; 336 337 if (words) { 338 if (memcmp(a1, a2, words << 2)) 339 return 0; 340 } 341 if (bits) { 342 __be32 w1, w2; 343 __be32 mask; 344 345 w1 = a1[words]; 346 w2 = a2[words]; 347 348 mask = htonl((0xffffffff) << (32 - bits)); 349 350 if ((w1 ^ w2) & mask) 351 return 0; 352 } 353 354 return 1; 355 } 356 357 358 static int inet_diag_bc_run(const void *bc, int len, 359 const struct inet_diag_entry *entry) 360 { 361 while (len > 0) { 362 int yes = 1; 363 const struct inet_diag_bc_op *op = bc; 364 365 switch (op->code) { 366 case INET_DIAG_BC_NOP: 367 break; 368 case INET_DIAG_BC_JMP: 369 yes = 0; 370 break; 371 case INET_DIAG_BC_S_GE: 372 yes = entry->sport >= op[1].no; 373 break; 374 case INET_DIAG_BC_S_LE: 375 yes = entry->dport <= op[1].no; 376 break; 377 case INET_DIAG_BC_D_GE: 378 yes = entry->dport >= op[1].no; 379 break; 380 case INET_DIAG_BC_D_LE: 381 yes = entry->dport <= op[1].no; 382 break; 383 case INET_DIAG_BC_AUTO: 384 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK); 385 break; 386 case INET_DIAG_BC_S_COND: 387 case INET_DIAG_BC_D_COND: { 388 struct inet_diag_hostcond *cond; 389 __be32 *addr; 390 391 cond = (struct inet_diag_hostcond *)(op + 1); 392 if (cond->port != -1 && 393 cond->port != (op->code == INET_DIAG_BC_S_COND ? 394 entry->sport : entry->dport)) { 395 yes = 0; 396 break; 397 } 398 399 if (cond->prefix_len == 0) 400 break; 401 402 if (op->code == INET_DIAG_BC_S_COND) 403 addr = entry->saddr; 404 else 405 addr = entry->daddr; 406 407 if (bitstring_match(addr, cond->addr, 408 cond->prefix_len)) 409 break; 410 if (entry->family == AF_INET6 && 411 cond->family == AF_INET) { 412 if (addr[0] == 0 && addr[1] == 0 && 413 addr[2] == htonl(0xffff) && 414 bitstring_match(addr + 3, cond->addr, 415 cond->prefix_len)) 416 break; 417 } 418 yes = 0; 419 break; 420 } 421 } 422 423 if (yes) { 424 len -= op->yes; 425 bc += op->yes; 426 } else { 427 len -= op->no; 428 bc += op->no; 429 } 430 } 431 return (len == 0); 432 } 433 434 static int valid_cc(const void *bc, int len, int cc) 435 { 436 while (len >= 0) { 437 const struct inet_diag_bc_op *op = bc; 438 439 if (cc > len) 440 return 0; 441 if (cc == len) 442 return 1; 443 if (op->yes < 4) 444 return 0; 445 len -= op->yes; 446 bc += op->yes; 447 } 448 return 0; 449 } 450 451 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 452 { 453 const unsigned char *bc = bytecode; 454 int len = bytecode_len; 455 456 while (len > 0) { 457 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 458 459 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 460 switch (op->code) { 461 case INET_DIAG_BC_AUTO: 462 case INET_DIAG_BC_S_COND: 463 case INET_DIAG_BC_D_COND: 464 case INET_DIAG_BC_S_GE: 465 case INET_DIAG_BC_S_LE: 466 case INET_DIAG_BC_D_GE: 467 case INET_DIAG_BC_D_LE: 468 if (op->yes < 4 || op->yes > len + 4) 469 return -EINVAL; 470 case INET_DIAG_BC_JMP: 471 if (op->no < 4 || op->no > len + 4) 472 return -EINVAL; 473 if (op->no < len && 474 !valid_cc(bytecode, bytecode_len, len - op->no)) 475 return -EINVAL; 476 break; 477 case INET_DIAG_BC_NOP: 478 if (op->yes < 4 || op->yes > len + 4) 479 return -EINVAL; 480 break; 481 default: 482 return -EINVAL; 483 } 484 bc += op->yes; 485 len -= op->yes; 486 } 487 return len == 0 ? 0 : -EINVAL; 488 } 489 490 static int inet_csk_diag_dump(struct sock *sk, 491 struct sk_buff *skb, 492 struct netlink_callback *cb) 493 { 494 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 495 496 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 497 struct inet_diag_entry entry; 498 struct rtattr *bc = (struct rtattr *)(r + 1); 499 struct inet_sock *inet = inet_sk(sk); 500 501 entry.family = sk->sk_family; 502 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 503 if (entry.family == AF_INET6) { 504 struct ipv6_pinfo *np = inet6_sk(sk); 505 506 entry.saddr = np->rcv_saddr.s6_addr32; 507 entry.daddr = np->daddr.s6_addr32; 508 } else 509 #endif 510 { 511 entry.saddr = &inet->rcv_saddr; 512 entry.daddr = &inet->daddr; 513 } 514 entry.sport = inet->num; 515 entry.dport = ntohs(inet->dport); 516 entry.userlocks = sk->sk_userlocks; 517 518 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 519 return 0; 520 } 521 522 return inet_csk_diag_fill(sk, skb, r->idiag_ext, 523 NETLINK_CB(cb->skb).pid, 524 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 525 } 526 527 static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, 528 struct sk_buff *skb, 529 struct netlink_callback *cb) 530 { 531 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 532 533 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 534 struct inet_diag_entry entry; 535 struct rtattr *bc = (struct rtattr *)(r + 1); 536 537 entry.family = tw->tw_family; 538 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 539 if (tw->tw_family == AF_INET6) { 540 struct inet6_timewait_sock *tw6 = 541 inet6_twsk((struct sock *)tw); 542 entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32; 543 entry.daddr = tw6->tw_v6_daddr.s6_addr32; 544 } else 545 #endif 546 { 547 entry.saddr = &tw->tw_rcv_saddr; 548 entry.daddr = &tw->tw_daddr; 549 } 550 entry.sport = tw->tw_num; 551 entry.dport = ntohs(tw->tw_dport); 552 entry.userlocks = 0; 553 554 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 555 return 0; 556 } 557 558 return inet_twsk_diag_fill(tw, skb, r->idiag_ext, 559 NETLINK_CB(cb->skb).pid, 560 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); 561 } 562 563 static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, 564 struct request_sock *req, u32 pid, u32 seq, 565 const struct nlmsghdr *unlh) 566 { 567 const struct inet_request_sock *ireq = inet_rsk(req); 568 struct inet_sock *inet = inet_sk(sk); 569 unsigned char *b = skb_tail_pointer(skb); 570 struct inet_diag_msg *r; 571 struct nlmsghdr *nlh; 572 long tmo; 573 574 nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r)); 575 nlh->nlmsg_flags = NLM_F_MULTI; 576 r = NLMSG_DATA(nlh); 577 578 r->idiag_family = sk->sk_family; 579 r->idiag_state = TCP_SYN_RECV; 580 r->idiag_timer = 1; 581 r->idiag_retrans = req->retrans; 582 583 r->id.idiag_if = sk->sk_bound_dev_if; 584 r->id.idiag_cookie[0] = (u32)(unsigned long)req; 585 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1); 586 587 tmo = req->expires - jiffies; 588 if (tmo < 0) 589 tmo = 0; 590 591 r->id.idiag_sport = inet->sport; 592 r->id.idiag_dport = ireq->rmt_port; 593 r->id.idiag_src[0] = ireq->loc_addr; 594 r->id.idiag_dst[0] = ireq->rmt_addr; 595 r->idiag_expires = jiffies_to_msecs(tmo); 596 r->idiag_rqueue = 0; 597 r->idiag_wqueue = 0; 598 r->idiag_uid = sock_i_uid(sk); 599 r->idiag_inode = 0; 600 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 601 if (r->idiag_family == AF_INET6) { 602 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 603 &inet6_rsk(req)->loc_addr); 604 ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, 605 &inet6_rsk(req)->rmt_addr); 606 } 607 #endif 608 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 609 610 return skb->len; 611 612 nlmsg_failure: 613 nlmsg_trim(skb, b); 614 return -1; 615 } 616 617 static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, 618 struct netlink_callback *cb) 619 { 620 struct inet_diag_entry entry; 621 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 622 struct inet_connection_sock *icsk = inet_csk(sk); 623 struct listen_sock *lopt; 624 struct rtattr *bc = NULL; 625 struct inet_sock *inet = inet_sk(sk); 626 int j, s_j; 627 int reqnum, s_reqnum; 628 int err = 0; 629 630 s_j = cb->args[3]; 631 s_reqnum = cb->args[4]; 632 633 if (s_j > 0) 634 s_j--; 635 636 entry.family = sk->sk_family; 637 638 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 639 640 lopt = icsk->icsk_accept_queue.listen_opt; 641 if (!lopt || !lopt->qlen) 642 goto out; 643 644 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 645 bc = (struct rtattr *)(r + 1); 646 entry.sport = inet->num; 647 entry.userlocks = sk->sk_userlocks; 648 } 649 650 for (j = s_j; j < lopt->nr_table_entries; j++) { 651 struct request_sock *req, *head = lopt->syn_table[j]; 652 653 reqnum = 0; 654 for (req = head; req; reqnum++, req = req->dl_next) { 655 struct inet_request_sock *ireq = inet_rsk(req); 656 657 if (reqnum < s_reqnum) 658 continue; 659 if (r->id.idiag_dport != ireq->rmt_port && 660 r->id.idiag_dport) 661 continue; 662 663 if (bc) { 664 entry.saddr = 665 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 666 (entry.family == AF_INET6) ? 667 inet6_rsk(req)->loc_addr.s6_addr32 : 668 #endif 669 &ireq->loc_addr; 670 entry.daddr = 671 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 672 (entry.family == AF_INET6) ? 673 inet6_rsk(req)->rmt_addr.s6_addr32 : 674 #endif 675 &ireq->rmt_addr; 676 entry.dport = ntohs(ireq->rmt_port); 677 678 if (!inet_diag_bc_run(RTA_DATA(bc), 679 RTA_PAYLOAD(bc), &entry)) 680 continue; 681 } 682 683 err = inet_diag_fill_req(skb, sk, req, 684 NETLINK_CB(cb->skb).pid, 685 cb->nlh->nlmsg_seq, cb->nlh); 686 if (err < 0) { 687 cb->args[3] = j + 1; 688 cb->args[4] = reqnum; 689 goto out; 690 } 691 } 692 693 s_reqnum = 0; 694 } 695 696 out: 697 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 698 699 return err; 700 } 701 702 static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) 703 { 704 int i, num; 705 int s_i, s_num; 706 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 707 const struct inet_diag_handler *handler; 708 struct inet_hashinfo *hashinfo; 709 710 handler = inet_diag_lock_handler(cb->nlh->nlmsg_type); 711 if (IS_ERR(handler)) 712 goto unlock; 713 714 hashinfo = handler->idiag_hashinfo; 715 716 s_i = cb->args[1]; 717 s_num = num = cb->args[2]; 718 719 if (cb->args[0] == 0) { 720 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV))) 721 goto skip_listen_ht; 722 723 inet_listen_lock(hashinfo); 724 for (i = s_i; i < INET_LHTABLE_SIZE; i++) { 725 struct sock *sk; 726 struct hlist_node *node; 727 728 num = 0; 729 sk_for_each(sk, node, &hashinfo->listening_hash[i]) { 730 struct inet_sock *inet = inet_sk(sk); 731 732 if (num < s_num) { 733 num++; 734 continue; 735 } 736 737 if (r->id.idiag_sport != inet->sport && 738 r->id.idiag_sport) 739 goto next_listen; 740 741 if (!(r->idiag_states & TCPF_LISTEN) || 742 r->id.idiag_dport || 743 cb->args[3] > 0) 744 goto syn_recv; 745 746 if (inet_csk_diag_dump(sk, skb, cb) < 0) { 747 inet_listen_unlock(hashinfo); 748 goto done; 749 } 750 751 syn_recv: 752 if (!(r->idiag_states & TCPF_SYN_RECV)) 753 goto next_listen; 754 755 if (inet_diag_dump_reqs(skb, sk, cb) < 0) { 756 inet_listen_unlock(hashinfo); 757 goto done; 758 } 759 760 next_listen: 761 cb->args[3] = 0; 762 cb->args[4] = 0; 763 ++num; 764 } 765 766 s_num = 0; 767 cb->args[3] = 0; 768 cb->args[4] = 0; 769 } 770 inet_listen_unlock(hashinfo); 771 skip_listen_ht: 772 cb->args[0] = 1; 773 s_i = num = s_num = 0; 774 } 775 776 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) 777 goto unlock; 778 779 for (i = s_i; i < hashinfo->ehash_size; i++) { 780 struct inet_ehash_bucket *head = &hashinfo->ehash[i]; 781 rwlock_t *lock = inet_ehash_lockp(hashinfo, i); 782 struct sock *sk; 783 struct hlist_node *node; 784 785 if (i > s_i) 786 s_num = 0; 787 788 read_lock_bh(lock); 789 num = 0; 790 sk_for_each(sk, node, &head->chain) { 791 struct inet_sock *inet = inet_sk(sk); 792 793 if (num < s_num) 794 goto next_normal; 795 if (!(r->idiag_states & (1 << sk->sk_state))) 796 goto next_normal; 797 if (r->id.idiag_sport != inet->sport && 798 r->id.idiag_sport) 799 goto next_normal; 800 if (r->id.idiag_dport != inet->dport && 801 r->id.idiag_dport) 802 goto next_normal; 803 if (inet_csk_diag_dump(sk, skb, cb) < 0) { 804 read_unlock_bh(lock); 805 goto done; 806 } 807 next_normal: 808 ++num; 809 } 810 811 if (r->idiag_states & TCPF_TIME_WAIT) { 812 struct inet_timewait_sock *tw; 813 814 inet_twsk_for_each(tw, node, 815 &head->twchain) { 816 817 if (num < s_num) 818 goto next_dying; 819 if (r->id.idiag_sport != tw->tw_sport && 820 r->id.idiag_sport) 821 goto next_dying; 822 if (r->id.idiag_dport != tw->tw_dport && 823 r->id.idiag_dport) 824 goto next_dying; 825 if (inet_twsk_diag_dump(tw, skb, cb) < 0) { 826 read_unlock_bh(lock); 827 goto done; 828 } 829 next_dying: 830 ++num; 831 } 832 } 833 read_unlock_bh(lock); 834 } 835 836 done: 837 cb->args[1] = i; 838 cb->args[2] = num; 839 unlock: 840 inet_diag_unlock_handler(handler); 841 return skb->len; 842 } 843 844 static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 845 { 846 int hdrlen = sizeof(struct inet_diag_req); 847 848 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX || 849 nlmsg_len(nlh) < hdrlen) 850 return -EINVAL; 851 852 if (nlh->nlmsg_flags & NLM_F_DUMP) { 853 if (nlmsg_attrlen(nlh, hdrlen)) { 854 struct nlattr *attr; 855 856 attr = nlmsg_find_attr(nlh, hdrlen, 857 INET_DIAG_REQ_BYTECODE); 858 if (attr == NULL || 859 nla_len(attr) < sizeof(struct inet_diag_bc_op) || 860 inet_diag_bc_audit(nla_data(attr), nla_len(attr))) 861 return -EINVAL; 862 } 863 864 return netlink_dump_start(idiagnl, skb, nlh, 865 inet_diag_dump, NULL); 866 } 867 868 return inet_diag_get_exact(skb, nlh); 869 } 870 871 static DEFINE_MUTEX(inet_diag_mutex); 872 873 static void inet_diag_rcv(struct sk_buff *skb) 874 { 875 mutex_lock(&inet_diag_mutex); 876 netlink_rcv_skb(skb, &inet_diag_rcv_msg); 877 mutex_unlock(&inet_diag_mutex); 878 } 879 880 int inet_diag_register(const struct inet_diag_handler *h) 881 { 882 const __u16 type = h->idiag_type; 883 int err = -EINVAL; 884 885 if (type >= INET_DIAG_GETSOCK_MAX) 886 goto out; 887 888 mutex_lock(&inet_diag_table_mutex); 889 err = -EEXIST; 890 if (inet_diag_table[type] == NULL) { 891 inet_diag_table[type] = h; 892 err = 0; 893 } 894 mutex_unlock(&inet_diag_table_mutex); 895 out: 896 return err; 897 } 898 EXPORT_SYMBOL_GPL(inet_diag_register); 899 900 void inet_diag_unregister(const struct inet_diag_handler *h) 901 { 902 const __u16 type = h->idiag_type; 903 904 if (type >= INET_DIAG_GETSOCK_MAX) 905 return; 906 907 mutex_lock(&inet_diag_table_mutex); 908 inet_diag_table[type] = NULL; 909 mutex_unlock(&inet_diag_table_mutex); 910 } 911 EXPORT_SYMBOL_GPL(inet_diag_unregister); 912 913 static int __init inet_diag_init(void) 914 { 915 const int inet_diag_table_size = (INET_DIAG_GETSOCK_MAX * 916 sizeof(struct inet_diag_handler *)); 917 int err = -ENOMEM; 918 919 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 920 if (!inet_diag_table) 921 goto out; 922 923 idiagnl = netlink_kernel_create(&init_net, NETLINK_INET_DIAG, 0, 924 inet_diag_rcv, NULL, THIS_MODULE); 925 if (idiagnl == NULL) 926 goto out_free_table; 927 err = 0; 928 out: 929 return err; 930 out_free_table: 931 kfree(inet_diag_table); 932 goto out; 933 } 934 935 static void __exit inet_diag_exit(void) 936 { 937 netlink_kernel_release(idiagnl); 938 kfree(inet_diag_table); 939 } 940 941 module_init(inet_diag_init); 942 module_exit(inet_diag_exit); 943 MODULE_LICENSE("GPL"); 944 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_INET_DIAG); 945