1 /* 2 * NETLINK Kernel-user communication protocol. 3 * 4 * Authors: Alan Cox <alan@redhat.com> 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith 13 * added netlink_proto_exit 14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> 15 * use nlk_sk, as sk->protinfo is on a diet 8) 16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> 17 * - inc module use count of module that owns 18 * the kernel socket in case userspace opens 19 * socket of same protocol 20 * - remove all module support, since netlink is 21 * mandatory if CONFIG_NET=y these days 22 */ 23 24 #include <linux/module.h> 25 26 #include <linux/capability.h> 27 #include <linux/kernel.h> 28 #include <linux/init.h> 29 #include <linux/signal.h> 30 #include <linux/sched.h> 31 #include <linux/errno.h> 32 #include <linux/string.h> 33 #include <linux/stat.h> 34 #include <linux/socket.h> 35 #include <linux/un.h> 36 #include <linux/fcntl.h> 37 #include <linux/termios.h> 38 #include <linux/sockios.h> 39 #include <linux/net.h> 40 #include <linux/fs.h> 41 #include <linux/slab.h> 42 #include <asm/uaccess.h> 43 #include <linux/skbuff.h> 44 #include <linux/netdevice.h> 45 #include <linux/rtnetlink.h> 46 #include <linux/proc_fs.h> 47 #include <linux/seq_file.h> 48 #include <linux/notifier.h> 49 #include <linux/security.h> 50 #include <linux/jhash.h> 51 #include <linux/jiffies.h> 52 #include <linux/random.h> 53 #include <linux/bitops.h> 54 #include <linux/mm.h> 55 #include <linux/types.h> 56 #include <linux/audit.h> 57 #include <linux/mutex.h> 58 59 #include <net/net_namespace.h> 60 #include <net/sock.h> 61 #include <net/scm.h> 62 #include <net/netlink.h> 63 64 #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 65 #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) 66 67 struct netlink_sock { 68 /* struct sock has to be the first member of netlink_sock */ 69 struct sock sk; 70 u32 pid; 71 u32 dst_pid; 72 u32 dst_group; 73 u32 flags; 74 u32 subscriptions; 75 u32 ngroups; 76 unsigned long *groups; 77 unsigned long state; 78 wait_queue_head_t wait; 79 struct netlink_callback *cb; 80 struct mutex *cb_mutex; 81 struct mutex cb_def_mutex; 82 void (*netlink_rcv)(struct sk_buff *skb); 83 struct module *module; 84 }; 85 86 #define NETLINK_KERNEL_SOCKET 0x1 87 #define NETLINK_RECV_PKTINFO 0x2 88 89 static inline struct netlink_sock *nlk_sk(struct sock *sk) 90 { 91 return container_of(sk, struct netlink_sock, sk); 92 } 93 94 static inline int netlink_is_kernel(struct sock *sk) 95 { 96 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 97 } 98 99 struct nl_pid_hash { 100 struct hlist_head *table; 101 unsigned long rehash_time; 102 103 unsigned int mask; 104 unsigned int shift; 105 106 unsigned int entries; 107 unsigned int max_shift; 108 109 u32 rnd; 110 }; 111 112 struct netlink_table { 113 struct nl_pid_hash hash; 114 struct hlist_head mc_list; 115 unsigned long *listeners; 116 unsigned int nl_nonroot; 117 unsigned int groups; 118 struct mutex *cb_mutex; 119 struct module *module; 120 int registered; 121 }; 122 123 static struct netlink_table *nl_table; 124 125 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 126 127 static int netlink_dump(struct sock *sk); 128 static void netlink_destroy_callback(struct netlink_callback *cb); 129 130 static DEFINE_RWLOCK(nl_table_lock); 131 static atomic_t nl_table_users = ATOMIC_INIT(0); 132 133 static ATOMIC_NOTIFIER_HEAD(netlink_chain); 134 135 static u32 netlink_group_mask(u32 group) 136 { 137 return group ? 1 << (group - 1) : 0; 138 } 139 140 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) 141 { 142 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; 143 } 144 145 static void netlink_sock_destruct(struct sock *sk) 146 { 147 struct netlink_sock *nlk = nlk_sk(sk); 148 149 if (nlk->cb) { 150 if (nlk->cb->done) 151 nlk->cb->done(nlk->cb); 152 netlink_destroy_callback(nlk->cb); 153 } 154 155 skb_queue_purge(&sk->sk_receive_queue); 156 157 if (!sock_flag(sk, SOCK_DEAD)) { 158 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); 159 return; 160 } 161 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 162 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 163 BUG_TRAP(!nlk_sk(sk)->groups); 164 } 165 166 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 167 * SMP. Look, when several writers sleep and reader wakes them up, all but one 168 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 169 * this, _but_ remember, it adds useless work on UP machines. 170 */ 171 172 static void netlink_table_grab(void) 173 __acquires(nl_table_lock) 174 { 175 write_lock_irq(&nl_table_lock); 176 177 if (atomic_read(&nl_table_users)) { 178 DECLARE_WAITQUEUE(wait, current); 179 180 add_wait_queue_exclusive(&nl_table_wait, &wait); 181 for (;;) { 182 set_current_state(TASK_UNINTERRUPTIBLE); 183 if (atomic_read(&nl_table_users) == 0) 184 break; 185 write_unlock_irq(&nl_table_lock); 186 schedule(); 187 write_lock_irq(&nl_table_lock); 188 } 189 190 __set_current_state(TASK_RUNNING); 191 remove_wait_queue(&nl_table_wait, &wait); 192 } 193 } 194 195 static void netlink_table_ungrab(void) 196 __releases(nl_table_lock) 197 { 198 write_unlock_irq(&nl_table_lock); 199 wake_up(&nl_table_wait); 200 } 201 202 static inline void 203 netlink_lock_table(void) 204 { 205 /* read_lock() synchronizes us to netlink_table_grab */ 206 207 read_lock(&nl_table_lock); 208 atomic_inc(&nl_table_users); 209 read_unlock(&nl_table_lock); 210 } 211 212 static inline void 213 netlink_unlock_table(void) 214 { 215 if (atomic_dec_and_test(&nl_table_users)) 216 wake_up(&nl_table_wait); 217 } 218 219 static inline struct sock *netlink_lookup(struct net *net, int protocol, 220 u32 pid) 221 { 222 struct nl_pid_hash *hash = &nl_table[protocol].hash; 223 struct hlist_head *head; 224 struct sock *sk; 225 struct hlist_node *node; 226 227 read_lock(&nl_table_lock); 228 head = nl_pid_hashfn(hash, pid); 229 sk_for_each(sk, node, head) { 230 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { 231 sock_hold(sk); 232 goto found; 233 } 234 } 235 sk = NULL; 236 found: 237 read_unlock(&nl_table_lock); 238 return sk; 239 } 240 241 static inline struct hlist_head *nl_pid_hash_zalloc(size_t size) 242 { 243 if (size <= PAGE_SIZE) 244 return kzalloc(size, GFP_ATOMIC); 245 else 246 return (struct hlist_head *) 247 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 248 get_order(size)); 249 } 250 251 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) 252 { 253 if (size <= PAGE_SIZE) 254 kfree(table); 255 else 256 free_pages((unsigned long)table, get_order(size)); 257 } 258 259 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) 260 { 261 unsigned int omask, mask, shift; 262 size_t osize, size; 263 struct hlist_head *otable, *table; 264 int i; 265 266 omask = mask = hash->mask; 267 osize = size = (mask + 1) * sizeof(*table); 268 shift = hash->shift; 269 270 if (grow) { 271 if (++shift > hash->max_shift) 272 return 0; 273 mask = mask * 2 + 1; 274 size *= 2; 275 } 276 277 table = nl_pid_hash_zalloc(size); 278 if (!table) 279 return 0; 280 281 otable = hash->table; 282 hash->table = table; 283 hash->mask = mask; 284 hash->shift = shift; 285 get_random_bytes(&hash->rnd, sizeof(hash->rnd)); 286 287 for (i = 0; i <= omask; i++) { 288 struct sock *sk; 289 struct hlist_node *node, *tmp; 290 291 sk_for_each_safe(sk, node, tmp, &otable[i]) 292 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); 293 } 294 295 nl_pid_hash_free(otable, osize); 296 hash->rehash_time = jiffies + 10 * 60 * HZ; 297 return 1; 298 } 299 300 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) 301 { 302 int avg = hash->entries >> hash->shift; 303 304 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) 305 return 1; 306 307 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { 308 nl_pid_hash_rehash(hash, 0); 309 return 1; 310 } 311 312 return 0; 313 } 314 315 static const struct proto_ops netlink_ops; 316 317 static void 318 netlink_update_listeners(struct sock *sk) 319 { 320 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 321 struct hlist_node *node; 322 unsigned long mask; 323 unsigned int i; 324 325 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 326 mask = 0; 327 sk_for_each_bound(sk, node, &tbl->mc_list) { 328 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 329 mask |= nlk_sk(sk)->groups[i]; 330 } 331 tbl->listeners[i] = mask; 332 } 333 /* this function is only called with the netlink table "grabbed", which 334 * makes sure updates are visible before bind or setsockopt return. */ 335 } 336 337 static int netlink_insert(struct sock *sk, struct net *net, u32 pid) 338 { 339 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 340 struct hlist_head *head; 341 int err = -EADDRINUSE; 342 struct sock *osk; 343 struct hlist_node *node; 344 int len; 345 346 netlink_table_grab(); 347 head = nl_pid_hashfn(hash, pid); 348 len = 0; 349 sk_for_each(osk, node, head) { 350 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) 351 break; 352 len++; 353 } 354 if (node) 355 goto err; 356 357 err = -EBUSY; 358 if (nlk_sk(sk)->pid) 359 goto err; 360 361 err = -ENOMEM; 362 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) 363 goto err; 364 365 if (len && nl_pid_hash_dilute(hash, len)) 366 head = nl_pid_hashfn(hash, pid); 367 hash->entries++; 368 nlk_sk(sk)->pid = pid; 369 sk_add_node(sk, head); 370 err = 0; 371 372 err: 373 netlink_table_ungrab(); 374 return err; 375 } 376 377 static void netlink_remove(struct sock *sk) 378 { 379 netlink_table_grab(); 380 if (sk_del_node_init(sk)) 381 nl_table[sk->sk_protocol].hash.entries--; 382 if (nlk_sk(sk)->subscriptions) 383 __sk_del_bind_node(sk); 384 netlink_table_ungrab(); 385 } 386 387 static struct proto netlink_proto = { 388 .name = "NETLINK", 389 .owner = THIS_MODULE, 390 .obj_size = sizeof(struct netlink_sock), 391 }; 392 393 static int __netlink_create(struct net *net, struct socket *sock, 394 struct mutex *cb_mutex, int protocol) 395 { 396 struct sock *sk; 397 struct netlink_sock *nlk; 398 399 sock->ops = &netlink_ops; 400 401 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); 402 if (!sk) 403 return -ENOMEM; 404 405 sock_init_data(sock, sk); 406 407 nlk = nlk_sk(sk); 408 if (cb_mutex) 409 nlk->cb_mutex = cb_mutex; 410 else { 411 nlk->cb_mutex = &nlk->cb_def_mutex; 412 mutex_init(nlk->cb_mutex); 413 } 414 init_waitqueue_head(&nlk->wait); 415 416 sk->sk_destruct = netlink_sock_destruct; 417 sk->sk_protocol = protocol; 418 return 0; 419 } 420 421 static int netlink_create(struct net *net, struct socket *sock, int protocol) 422 { 423 struct module *module = NULL; 424 struct mutex *cb_mutex; 425 struct netlink_sock *nlk; 426 int err = 0; 427 428 sock->state = SS_UNCONNECTED; 429 430 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 431 return -ESOCKTNOSUPPORT; 432 433 if (protocol < 0 || protocol >= MAX_LINKS) 434 return -EPROTONOSUPPORT; 435 436 netlink_lock_table(); 437 #ifdef CONFIG_KMOD 438 if (!nl_table[protocol].registered) { 439 netlink_unlock_table(); 440 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); 441 netlink_lock_table(); 442 } 443 #endif 444 if (nl_table[protocol].registered && 445 try_module_get(nl_table[protocol].module)) 446 module = nl_table[protocol].module; 447 cb_mutex = nl_table[protocol].cb_mutex; 448 netlink_unlock_table(); 449 450 err = __netlink_create(net, sock, cb_mutex, protocol); 451 if (err < 0) 452 goto out_module; 453 454 nlk = nlk_sk(sock->sk); 455 nlk->module = module; 456 out: 457 return err; 458 459 out_module: 460 module_put(module); 461 goto out; 462 } 463 464 static int netlink_release(struct socket *sock) 465 { 466 struct sock *sk = sock->sk; 467 struct netlink_sock *nlk; 468 469 if (!sk) 470 return 0; 471 472 netlink_remove(sk); 473 sock_orphan(sk); 474 nlk = nlk_sk(sk); 475 476 /* 477 * OK. Socket is unlinked, any packets that arrive now 478 * will be purged. 479 */ 480 481 sock->sk = NULL; 482 wake_up_interruptible_all(&nlk->wait); 483 484 skb_queue_purge(&sk->sk_write_queue); 485 486 if (nlk->pid && !nlk->subscriptions) { 487 struct netlink_notify n = { 488 .net = sock_net(sk), 489 .protocol = sk->sk_protocol, 490 .pid = nlk->pid, 491 }; 492 atomic_notifier_call_chain(&netlink_chain, 493 NETLINK_URELEASE, &n); 494 } 495 496 module_put(nlk->module); 497 498 netlink_table_grab(); 499 if (netlink_is_kernel(sk)) { 500 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 501 if (--nl_table[sk->sk_protocol].registered == 0) { 502 kfree(nl_table[sk->sk_protocol].listeners); 503 nl_table[sk->sk_protocol].module = NULL; 504 nl_table[sk->sk_protocol].registered = 0; 505 } 506 } else if (nlk->subscriptions) 507 netlink_update_listeners(sk); 508 netlink_table_ungrab(); 509 510 kfree(nlk->groups); 511 nlk->groups = NULL; 512 513 sock_put(sk); 514 return 0; 515 } 516 517 static int netlink_autobind(struct socket *sock) 518 { 519 struct sock *sk = sock->sk; 520 struct net *net = sock_net(sk); 521 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 522 struct hlist_head *head; 523 struct sock *osk; 524 struct hlist_node *node; 525 s32 pid = current->tgid; 526 int err; 527 static s32 rover = -4097; 528 529 retry: 530 cond_resched(); 531 netlink_table_grab(); 532 head = nl_pid_hashfn(hash, pid); 533 sk_for_each(osk, node, head) { 534 if (!net_eq(sock_net(osk), net)) 535 continue; 536 if (nlk_sk(osk)->pid == pid) { 537 /* Bind collision, search negative pid values. */ 538 pid = rover--; 539 if (rover > -4097) 540 rover = -4097; 541 netlink_table_ungrab(); 542 goto retry; 543 } 544 } 545 netlink_table_ungrab(); 546 547 err = netlink_insert(sk, net, pid); 548 if (err == -EADDRINUSE) 549 goto retry; 550 551 /* If 2 threads race to autobind, that is fine. */ 552 if (err == -EBUSY) 553 err = 0; 554 555 return err; 556 } 557 558 static inline int netlink_capable(struct socket *sock, unsigned int flag) 559 { 560 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || 561 capable(CAP_NET_ADMIN); 562 } 563 564 static void 565 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) 566 { 567 struct netlink_sock *nlk = nlk_sk(sk); 568 569 if (nlk->subscriptions && !subscriptions) 570 __sk_del_bind_node(sk); 571 else if (!nlk->subscriptions && subscriptions) 572 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); 573 nlk->subscriptions = subscriptions; 574 } 575 576 static int netlink_realloc_groups(struct sock *sk) 577 { 578 struct netlink_sock *nlk = nlk_sk(sk); 579 unsigned int groups; 580 unsigned long *new_groups; 581 int err = 0; 582 583 netlink_table_grab(); 584 585 groups = nl_table[sk->sk_protocol].groups; 586 if (!nl_table[sk->sk_protocol].registered) { 587 err = -ENOENT; 588 goto out_unlock; 589 } 590 591 if (nlk->ngroups >= groups) 592 goto out_unlock; 593 594 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); 595 if (new_groups == NULL) { 596 err = -ENOMEM; 597 goto out_unlock; 598 } 599 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, 600 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); 601 602 nlk->groups = new_groups; 603 nlk->ngroups = groups; 604 out_unlock: 605 netlink_table_ungrab(); 606 return err; 607 } 608 609 static int netlink_bind(struct socket *sock, struct sockaddr *addr, 610 int addr_len) 611 { 612 struct sock *sk = sock->sk; 613 struct net *net = sock_net(sk); 614 struct netlink_sock *nlk = nlk_sk(sk); 615 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 616 int err; 617 618 if (nladdr->nl_family != AF_NETLINK) 619 return -EINVAL; 620 621 /* Only superuser is allowed to listen multicasts */ 622 if (nladdr->nl_groups) { 623 if (!netlink_capable(sock, NL_NONROOT_RECV)) 624 return -EPERM; 625 err = netlink_realloc_groups(sk); 626 if (err) 627 return err; 628 } 629 630 if (nlk->pid) { 631 if (nladdr->nl_pid != nlk->pid) 632 return -EINVAL; 633 } else { 634 err = nladdr->nl_pid ? 635 netlink_insert(sk, net, nladdr->nl_pid) : 636 netlink_autobind(sock); 637 if (err) 638 return err; 639 } 640 641 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 642 return 0; 643 644 netlink_table_grab(); 645 netlink_update_subscriptions(sk, nlk->subscriptions + 646 hweight32(nladdr->nl_groups) - 647 hweight32(nlk->groups[0])); 648 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; 649 netlink_update_listeners(sk); 650 netlink_table_ungrab(); 651 652 return 0; 653 } 654 655 static int netlink_connect(struct socket *sock, struct sockaddr *addr, 656 int alen, int flags) 657 { 658 int err = 0; 659 struct sock *sk = sock->sk; 660 struct netlink_sock *nlk = nlk_sk(sk); 661 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 662 663 if (addr->sa_family == AF_UNSPEC) { 664 sk->sk_state = NETLINK_UNCONNECTED; 665 nlk->dst_pid = 0; 666 nlk->dst_group = 0; 667 return 0; 668 } 669 if (addr->sa_family != AF_NETLINK) 670 return -EINVAL; 671 672 /* Only superuser is allowed to send multicasts */ 673 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) 674 return -EPERM; 675 676 if (!nlk->pid) 677 err = netlink_autobind(sock); 678 679 if (err == 0) { 680 sk->sk_state = NETLINK_CONNECTED; 681 nlk->dst_pid = nladdr->nl_pid; 682 nlk->dst_group = ffs(nladdr->nl_groups); 683 } 684 685 return err; 686 } 687 688 static int netlink_getname(struct socket *sock, struct sockaddr *addr, 689 int *addr_len, int peer) 690 { 691 struct sock *sk = sock->sk; 692 struct netlink_sock *nlk = nlk_sk(sk); 693 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 694 695 nladdr->nl_family = AF_NETLINK; 696 nladdr->nl_pad = 0; 697 *addr_len = sizeof(*nladdr); 698 699 if (peer) { 700 nladdr->nl_pid = nlk->dst_pid; 701 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 702 } else { 703 nladdr->nl_pid = nlk->pid; 704 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 705 } 706 return 0; 707 } 708 709 static void netlink_overrun(struct sock *sk) 710 { 711 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { 712 sk->sk_err = ENOBUFS; 713 sk->sk_error_report(sk); 714 } 715 } 716 717 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) 718 { 719 struct sock *sock; 720 struct netlink_sock *nlk; 721 722 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); 723 if (!sock) 724 return ERR_PTR(-ECONNREFUSED); 725 726 /* Don't bother queuing skb if kernel socket has no input function */ 727 nlk = nlk_sk(sock); 728 if (sock->sk_state == NETLINK_CONNECTED && 729 nlk->dst_pid != nlk_sk(ssk)->pid) { 730 sock_put(sock); 731 return ERR_PTR(-ECONNREFUSED); 732 } 733 return sock; 734 } 735 736 struct sock *netlink_getsockbyfilp(struct file *filp) 737 { 738 struct inode *inode = filp->f_path.dentry->d_inode; 739 struct sock *sock; 740 741 if (!S_ISSOCK(inode->i_mode)) 742 return ERR_PTR(-ENOTSOCK); 743 744 sock = SOCKET_I(inode)->sk; 745 if (sock->sk_family != AF_NETLINK) 746 return ERR_PTR(-EINVAL); 747 748 sock_hold(sock); 749 return sock; 750 } 751 752 /* 753 * Attach a skb to a netlink socket. 754 * The caller must hold a reference to the destination socket. On error, the 755 * reference is dropped. The skb is not send to the destination, just all 756 * all error checks are performed and memory in the queue is reserved. 757 * Return values: 758 * < 0: error. skb freed, reference to sock dropped. 759 * 0: continue 760 * 1: repeat lookup - reference dropped while waiting for socket memory. 761 */ 762 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, 763 long *timeo, struct sock *ssk) 764 { 765 struct netlink_sock *nlk; 766 767 nlk = nlk_sk(sk); 768 769 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 770 test_bit(0, &nlk->state)) { 771 DECLARE_WAITQUEUE(wait, current); 772 if (!*timeo) { 773 if (!ssk || netlink_is_kernel(ssk)) 774 netlink_overrun(sk); 775 sock_put(sk); 776 kfree_skb(skb); 777 return -EAGAIN; 778 } 779 780 __set_current_state(TASK_INTERRUPTIBLE); 781 add_wait_queue(&nlk->wait, &wait); 782 783 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 784 test_bit(0, &nlk->state)) && 785 !sock_flag(sk, SOCK_DEAD)) 786 *timeo = schedule_timeout(*timeo); 787 788 __set_current_state(TASK_RUNNING); 789 remove_wait_queue(&nlk->wait, &wait); 790 sock_put(sk); 791 792 if (signal_pending(current)) { 793 kfree_skb(skb); 794 return sock_intr_errno(*timeo); 795 } 796 return 1; 797 } 798 skb_set_owner_r(skb, sk); 799 return 0; 800 } 801 802 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 803 { 804 int len = skb->len; 805 806 skb_queue_tail(&sk->sk_receive_queue, skb); 807 sk->sk_data_ready(sk, len); 808 sock_put(sk); 809 return len; 810 } 811 812 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) 813 { 814 kfree_skb(skb); 815 sock_put(sk); 816 } 817 818 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, 819 gfp_t allocation) 820 { 821 int delta; 822 823 skb_orphan(skb); 824 825 delta = skb->end - skb->tail; 826 if (delta * 2 < skb->truesize) 827 return skb; 828 829 if (skb_shared(skb)) { 830 struct sk_buff *nskb = skb_clone(skb, allocation); 831 if (!nskb) 832 return skb; 833 kfree_skb(skb); 834 skb = nskb; 835 } 836 837 if (!pskb_expand_head(skb, 0, -delta, allocation)) 838 skb->truesize -= delta; 839 840 return skb; 841 } 842 843 static inline void netlink_rcv_wake(struct sock *sk) 844 { 845 struct netlink_sock *nlk = nlk_sk(sk); 846 847 if (skb_queue_empty(&sk->sk_receive_queue)) 848 clear_bit(0, &nlk->state); 849 if (!test_bit(0, &nlk->state)) 850 wake_up_interruptible(&nlk->wait); 851 } 852 853 static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) 854 { 855 int ret; 856 struct netlink_sock *nlk = nlk_sk(sk); 857 858 ret = -ECONNREFUSED; 859 if (nlk->netlink_rcv != NULL) { 860 ret = skb->len; 861 skb_set_owner_r(skb, sk); 862 nlk->netlink_rcv(skb); 863 } 864 kfree_skb(skb); 865 sock_put(sk); 866 return ret; 867 } 868 869 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, 870 u32 pid, int nonblock) 871 { 872 struct sock *sk; 873 int err; 874 long timeo; 875 876 skb = netlink_trim(skb, gfp_any()); 877 878 timeo = sock_sndtimeo(ssk, nonblock); 879 retry: 880 sk = netlink_getsockbypid(ssk, pid); 881 if (IS_ERR(sk)) { 882 kfree_skb(skb); 883 return PTR_ERR(sk); 884 } 885 if (netlink_is_kernel(sk)) 886 return netlink_unicast_kernel(sk, skb); 887 888 if (sk_filter(sk, skb)) { 889 int err = skb->len; 890 kfree_skb(skb); 891 sock_put(sk); 892 return err; 893 } 894 895 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 896 if (err == 1) 897 goto retry; 898 if (err) 899 return err; 900 901 return netlink_sendskb(sk, skb); 902 } 903 EXPORT_SYMBOL(netlink_unicast); 904 905 int netlink_has_listeners(struct sock *sk, unsigned int group) 906 { 907 int res = 0; 908 unsigned long *listeners; 909 910 BUG_ON(!netlink_is_kernel(sk)); 911 912 rcu_read_lock(); 913 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 914 915 if (group - 1 < nl_table[sk->sk_protocol].groups) 916 res = test_bit(group - 1, listeners); 917 918 rcu_read_unlock(); 919 920 return res; 921 } 922 EXPORT_SYMBOL_GPL(netlink_has_listeners); 923 924 static inline int netlink_broadcast_deliver(struct sock *sk, 925 struct sk_buff *skb) 926 { 927 struct netlink_sock *nlk = nlk_sk(sk); 928 929 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 930 !test_bit(0, &nlk->state)) { 931 skb_set_owner_r(skb, sk); 932 skb_queue_tail(&sk->sk_receive_queue, skb); 933 sk->sk_data_ready(sk, skb->len); 934 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; 935 } 936 return -1; 937 } 938 939 struct netlink_broadcast_data { 940 struct sock *exclude_sk; 941 struct net *net; 942 u32 pid; 943 u32 group; 944 int failure; 945 int congested; 946 int delivered; 947 gfp_t allocation; 948 struct sk_buff *skb, *skb2; 949 }; 950 951 static inline int do_one_broadcast(struct sock *sk, 952 struct netlink_broadcast_data *p) 953 { 954 struct netlink_sock *nlk = nlk_sk(sk); 955 int val; 956 957 if (p->exclude_sk == sk) 958 goto out; 959 960 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 961 !test_bit(p->group - 1, nlk->groups)) 962 goto out; 963 964 if (!net_eq(sock_net(sk), p->net)) 965 goto out; 966 967 if (p->failure) { 968 netlink_overrun(sk); 969 goto out; 970 } 971 972 sock_hold(sk); 973 if (p->skb2 == NULL) { 974 if (skb_shared(p->skb)) { 975 p->skb2 = skb_clone(p->skb, p->allocation); 976 } else { 977 p->skb2 = skb_get(p->skb); 978 /* 979 * skb ownership may have been set when 980 * delivered to a previous socket. 981 */ 982 skb_orphan(p->skb2); 983 } 984 } 985 if (p->skb2 == NULL) { 986 netlink_overrun(sk); 987 /* Clone failed. Notify ALL listeners. */ 988 p->failure = 1; 989 } else if (sk_filter(sk, p->skb2)) { 990 kfree_skb(p->skb2); 991 p->skb2 = NULL; 992 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { 993 netlink_overrun(sk); 994 } else { 995 p->congested |= val; 996 p->delivered = 1; 997 p->skb2 = NULL; 998 } 999 sock_put(sk); 1000 1001 out: 1002 return 0; 1003 } 1004 1005 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 1006 u32 group, gfp_t allocation) 1007 { 1008 struct net *net = sock_net(ssk); 1009 struct netlink_broadcast_data info; 1010 struct hlist_node *node; 1011 struct sock *sk; 1012 1013 skb = netlink_trim(skb, allocation); 1014 1015 info.exclude_sk = ssk; 1016 info.net = net; 1017 info.pid = pid; 1018 info.group = group; 1019 info.failure = 0; 1020 info.congested = 0; 1021 info.delivered = 0; 1022 info.allocation = allocation; 1023 info.skb = skb; 1024 info.skb2 = NULL; 1025 1026 /* While we sleep in clone, do not allow to change socket list */ 1027 1028 netlink_lock_table(); 1029 1030 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1031 do_one_broadcast(sk, &info); 1032 1033 kfree_skb(skb); 1034 1035 netlink_unlock_table(); 1036 1037 if (info.skb2) 1038 kfree_skb(info.skb2); 1039 1040 if (info.delivered) { 1041 if (info.congested && (allocation & __GFP_WAIT)) 1042 yield(); 1043 return 0; 1044 } 1045 if (info.failure) 1046 return -ENOBUFS; 1047 return -ESRCH; 1048 } 1049 EXPORT_SYMBOL(netlink_broadcast); 1050 1051 struct netlink_set_err_data { 1052 struct sock *exclude_sk; 1053 u32 pid; 1054 u32 group; 1055 int code; 1056 }; 1057 1058 static inline int do_one_set_err(struct sock *sk, 1059 struct netlink_set_err_data *p) 1060 { 1061 struct netlink_sock *nlk = nlk_sk(sk); 1062 1063 if (sk == p->exclude_sk) 1064 goto out; 1065 1066 if (sock_net(sk) != sock_net(p->exclude_sk)) 1067 goto out; 1068 1069 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1070 !test_bit(p->group - 1, nlk->groups)) 1071 goto out; 1072 1073 sk->sk_err = p->code; 1074 sk->sk_error_report(sk); 1075 out: 1076 return 0; 1077 } 1078 1079 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) 1080 { 1081 struct netlink_set_err_data info; 1082 struct hlist_node *node; 1083 struct sock *sk; 1084 1085 info.exclude_sk = ssk; 1086 info.pid = pid; 1087 info.group = group; 1088 info.code = code; 1089 1090 read_lock(&nl_table_lock); 1091 1092 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1093 do_one_set_err(sk, &info); 1094 1095 read_unlock(&nl_table_lock); 1096 } 1097 1098 /* must be called with netlink table grabbed */ 1099 static void netlink_update_socket_mc(struct netlink_sock *nlk, 1100 unsigned int group, 1101 int is_new) 1102 { 1103 int old, new = !!is_new, subscriptions; 1104 1105 old = test_bit(group - 1, nlk->groups); 1106 subscriptions = nlk->subscriptions - old + new; 1107 if (new) 1108 __set_bit(group - 1, nlk->groups); 1109 else 1110 __clear_bit(group - 1, nlk->groups); 1111 netlink_update_subscriptions(&nlk->sk, subscriptions); 1112 netlink_update_listeners(&nlk->sk); 1113 } 1114 1115 static int netlink_setsockopt(struct socket *sock, int level, int optname, 1116 char __user *optval, int optlen) 1117 { 1118 struct sock *sk = sock->sk; 1119 struct netlink_sock *nlk = nlk_sk(sk); 1120 unsigned int val = 0; 1121 int err; 1122 1123 if (level != SOL_NETLINK) 1124 return -ENOPROTOOPT; 1125 1126 if (optlen >= sizeof(int) && 1127 get_user(val, (unsigned int __user *)optval)) 1128 return -EFAULT; 1129 1130 switch (optname) { 1131 case NETLINK_PKTINFO: 1132 if (val) 1133 nlk->flags |= NETLINK_RECV_PKTINFO; 1134 else 1135 nlk->flags &= ~NETLINK_RECV_PKTINFO; 1136 err = 0; 1137 break; 1138 case NETLINK_ADD_MEMBERSHIP: 1139 case NETLINK_DROP_MEMBERSHIP: { 1140 if (!netlink_capable(sock, NL_NONROOT_RECV)) 1141 return -EPERM; 1142 err = netlink_realloc_groups(sk); 1143 if (err) 1144 return err; 1145 if (!val || val - 1 >= nlk->ngroups) 1146 return -EINVAL; 1147 netlink_table_grab(); 1148 netlink_update_socket_mc(nlk, val, 1149 optname == NETLINK_ADD_MEMBERSHIP); 1150 netlink_table_ungrab(); 1151 err = 0; 1152 break; 1153 } 1154 default: 1155 err = -ENOPROTOOPT; 1156 } 1157 return err; 1158 } 1159 1160 static int netlink_getsockopt(struct socket *sock, int level, int optname, 1161 char __user *optval, int __user *optlen) 1162 { 1163 struct sock *sk = sock->sk; 1164 struct netlink_sock *nlk = nlk_sk(sk); 1165 int len, val, err; 1166 1167 if (level != SOL_NETLINK) 1168 return -ENOPROTOOPT; 1169 1170 if (get_user(len, optlen)) 1171 return -EFAULT; 1172 if (len < 0) 1173 return -EINVAL; 1174 1175 switch (optname) { 1176 case NETLINK_PKTINFO: 1177 if (len < sizeof(int)) 1178 return -EINVAL; 1179 len = sizeof(int); 1180 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0; 1181 if (put_user(len, optlen) || 1182 put_user(val, optval)) 1183 return -EFAULT; 1184 err = 0; 1185 break; 1186 default: 1187 err = -ENOPROTOOPT; 1188 } 1189 return err; 1190 } 1191 1192 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 1193 { 1194 struct nl_pktinfo info; 1195 1196 info.group = NETLINK_CB(skb).dst_group; 1197 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); 1198 } 1199 1200 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, 1201 struct msghdr *msg, size_t len) 1202 { 1203 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1204 struct sock *sk = sock->sk; 1205 struct netlink_sock *nlk = nlk_sk(sk); 1206 struct sockaddr_nl *addr = msg->msg_name; 1207 u32 dst_pid; 1208 u32 dst_group; 1209 struct sk_buff *skb; 1210 int err; 1211 struct scm_cookie scm; 1212 1213 if (msg->msg_flags&MSG_OOB) 1214 return -EOPNOTSUPP; 1215 1216 if (NULL == siocb->scm) 1217 siocb->scm = &scm; 1218 err = scm_send(sock, msg, siocb->scm); 1219 if (err < 0) 1220 return err; 1221 1222 if (msg->msg_namelen) { 1223 if (addr->nl_family != AF_NETLINK) 1224 return -EINVAL; 1225 dst_pid = addr->nl_pid; 1226 dst_group = ffs(addr->nl_groups); 1227 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1228 return -EPERM; 1229 } else { 1230 dst_pid = nlk->dst_pid; 1231 dst_group = nlk->dst_group; 1232 } 1233 1234 if (!nlk->pid) { 1235 err = netlink_autobind(sock); 1236 if (err) 1237 goto out; 1238 } 1239 1240 err = -EMSGSIZE; 1241 if (len > sk->sk_sndbuf - 32) 1242 goto out; 1243 err = -ENOBUFS; 1244 skb = alloc_skb(len, GFP_KERNEL); 1245 if (skb == NULL) 1246 goto out; 1247 1248 NETLINK_CB(skb).pid = nlk->pid; 1249 NETLINK_CB(skb).dst_group = dst_group; 1250 NETLINK_CB(skb).loginuid = audit_get_loginuid(current); 1251 security_task_getsecid(current, &(NETLINK_CB(skb).sid)); 1252 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1253 1254 /* What can I do? Netlink is asynchronous, so that 1255 we will have to save current capabilities to 1256 check them, when this message will be delivered 1257 to corresponding kernel module. --ANK (980802) 1258 */ 1259 1260 err = -EFAULT; 1261 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1262 kfree_skb(skb); 1263 goto out; 1264 } 1265 1266 err = security_netlink_send(sk, skb); 1267 if (err) { 1268 kfree_skb(skb); 1269 goto out; 1270 } 1271 1272 if (dst_group) { 1273 atomic_inc(&skb->users); 1274 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); 1275 } 1276 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); 1277 1278 out: 1279 return err; 1280 } 1281 1282 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, 1283 struct msghdr *msg, size_t len, 1284 int flags) 1285 { 1286 struct sock_iocb *siocb = kiocb_to_siocb(kiocb); 1287 struct scm_cookie scm; 1288 struct sock *sk = sock->sk; 1289 struct netlink_sock *nlk = nlk_sk(sk); 1290 int noblock = flags&MSG_DONTWAIT; 1291 size_t copied; 1292 struct sk_buff *skb; 1293 int err; 1294 1295 if (flags&MSG_OOB) 1296 return -EOPNOTSUPP; 1297 1298 copied = 0; 1299 1300 skb = skb_recv_datagram(sk, flags, noblock, &err); 1301 if (skb == NULL) 1302 goto out; 1303 1304 msg->msg_namelen = 0; 1305 1306 copied = skb->len; 1307 if (len < copied) { 1308 msg->msg_flags |= MSG_TRUNC; 1309 copied = len; 1310 } 1311 1312 skb_reset_transport_header(skb); 1313 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1314 1315 if (msg->msg_name) { 1316 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; 1317 addr->nl_family = AF_NETLINK; 1318 addr->nl_pad = 0; 1319 addr->nl_pid = NETLINK_CB(skb).pid; 1320 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); 1321 msg->msg_namelen = sizeof(*addr); 1322 } 1323 1324 if (nlk->flags & NETLINK_RECV_PKTINFO) 1325 netlink_cmsg_recv_pktinfo(msg, skb); 1326 1327 if (NULL == siocb->scm) { 1328 memset(&scm, 0, sizeof(scm)); 1329 siocb->scm = &scm; 1330 } 1331 siocb->scm->creds = *NETLINK_CREDS(skb); 1332 if (flags & MSG_TRUNC) 1333 copied = skb->len; 1334 skb_free_datagram(sk, skb); 1335 1336 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1337 netlink_dump(sk); 1338 1339 scm_recv(sock, msg, siocb->scm, flags); 1340 out: 1341 netlink_rcv_wake(sk); 1342 return err ? : copied; 1343 } 1344 1345 static void netlink_data_ready(struct sock *sk, int len) 1346 { 1347 BUG(); 1348 } 1349 1350 /* 1351 * We export these functions to other modules. They provide a 1352 * complete set of kernel non-blocking support for message 1353 * queueing. 1354 */ 1355 1356 struct sock * 1357 netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1358 void (*input)(struct sk_buff *skb), 1359 struct mutex *cb_mutex, struct module *module) 1360 { 1361 struct socket *sock; 1362 struct sock *sk; 1363 struct netlink_sock *nlk; 1364 unsigned long *listeners = NULL; 1365 1366 BUG_ON(!nl_table); 1367 1368 if (unit < 0 || unit >= MAX_LINKS) 1369 return NULL; 1370 1371 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) 1372 return NULL; 1373 1374 /* 1375 * We have to just have a reference on the net from sk, but don't 1376 * get_net it. Besides, we cannot get and then put the net here. 1377 * So we create one inside init_net and the move it to net. 1378 */ 1379 1380 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0) 1381 goto out_sock_release_nosk; 1382 1383 sk = sock->sk; 1384 sk_change_net(sk, net); 1385 1386 if (groups < 32) 1387 groups = 32; 1388 1389 listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL); 1390 if (!listeners) 1391 goto out_sock_release; 1392 1393 sk->sk_data_ready = netlink_data_ready; 1394 if (input) 1395 nlk_sk(sk)->netlink_rcv = input; 1396 1397 if (netlink_insert(sk, net, 0)) 1398 goto out_sock_release; 1399 1400 nlk = nlk_sk(sk); 1401 nlk->flags |= NETLINK_KERNEL_SOCKET; 1402 1403 netlink_table_grab(); 1404 if (!nl_table[unit].registered) { 1405 nl_table[unit].groups = groups; 1406 nl_table[unit].listeners = listeners; 1407 nl_table[unit].cb_mutex = cb_mutex; 1408 nl_table[unit].module = module; 1409 nl_table[unit].registered = 1; 1410 } else { 1411 kfree(listeners); 1412 nl_table[unit].registered++; 1413 } 1414 netlink_table_ungrab(); 1415 return sk; 1416 1417 out_sock_release: 1418 kfree(listeners); 1419 netlink_kernel_release(sk); 1420 return NULL; 1421 1422 out_sock_release_nosk: 1423 sock_release(sock); 1424 return NULL; 1425 } 1426 EXPORT_SYMBOL(netlink_kernel_create); 1427 1428 1429 void 1430 netlink_kernel_release(struct sock *sk) 1431 { 1432 sk_release_kernel(sk); 1433 } 1434 EXPORT_SYMBOL(netlink_kernel_release); 1435 1436 1437 /** 1438 * netlink_change_ngroups - change number of multicast groups 1439 * 1440 * This changes the number of multicast groups that are available 1441 * on a certain netlink family. Note that it is not possible to 1442 * change the number of groups to below 32. Also note that it does 1443 * not implicitly call netlink_clear_multicast_users() when the 1444 * number of groups is reduced. 1445 * 1446 * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). 1447 * @groups: The new number of groups. 1448 */ 1449 int netlink_change_ngroups(struct sock *sk, unsigned int groups) 1450 { 1451 unsigned long *listeners, *old = NULL; 1452 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 1453 int err = 0; 1454 1455 if (groups < 32) 1456 groups = 32; 1457 1458 netlink_table_grab(); 1459 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { 1460 listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC); 1461 if (!listeners) { 1462 err = -ENOMEM; 1463 goto out_ungrab; 1464 } 1465 old = tbl->listeners; 1466 memcpy(listeners, old, NLGRPSZ(tbl->groups)); 1467 rcu_assign_pointer(tbl->listeners, listeners); 1468 } 1469 tbl->groups = groups; 1470 1471 out_ungrab: 1472 netlink_table_ungrab(); 1473 synchronize_rcu(); 1474 kfree(old); 1475 return err; 1476 } 1477 EXPORT_SYMBOL(netlink_change_ngroups); 1478 1479 /** 1480 * netlink_clear_multicast_users - kick off multicast listeners 1481 * 1482 * This function removes all listeners from the given group. 1483 * @ksk: The kernel netlink socket, as returned by 1484 * netlink_kernel_create(). 1485 * @group: The multicast group to clear. 1486 */ 1487 void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) 1488 { 1489 struct sock *sk; 1490 struct hlist_node *node; 1491 struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; 1492 1493 netlink_table_grab(); 1494 1495 sk_for_each_bound(sk, node, &tbl->mc_list) 1496 netlink_update_socket_mc(nlk_sk(sk), group, 0); 1497 1498 netlink_table_ungrab(); 1499 } 1500 EXPORT_SYMBOL(netlink_clear_multicast_users); 1501 1502 void netlink_set_nonroot(int protocol, unsigned int flags) 1503 { 1504 if ((unsigned int)protocol < MAX_LINKS) 1505 nl_table[protocol].nl_nonroot = flags; 1506 } 1507 EXPORT_SYMBOL(netlink_set_nonroot); 1508 1509 static void netlink_destroy_callback(struct netlink_callback *cb) 1510 { 1511 if (cb->skb) 1512 kfree_skb(cb->skb); 1513 kfree(cb); 1514 } 1515 1516 /* 1517 * It looks a bit ugly. 1518 * It would be better to create kernel thread. 1519 */ 1520 1521 static int netlink_dump(struct sock *sk) 1522 { 1523 struct netlink_sock *nlk = nlk_sk(sk); 1524 struct netlink_callback *cb; 1525 struct sk_buff *skb; 1526 struct nlmsghdr *nlh; 1527 int len, err = -ENOBUFS; 1528 1529 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); 1530 if (!skb) 1531 goto errout; 1532 1533 mutex_lock(nlk->cb_mutex); 1534 1535 cb = nlk->cb; 1536 if (cb == NULL) { 1537 err = -EINVAL; 1538 goto errout_skb; 1539 } 1540 1541 len = cb->dump(skb, cb); 1542 1543 if (len > 0) { 1544 mutex_unlock(nlk->cb_mutex); 1545 1546 if (sk_filter(sk, skb)) 1547 kfree_skb(skb); 1548 else { 1549 skb_queue_tail(&sk->sk_receive_queue, skb); 1550 sk->sk_data_ready(sk, skb->len); 1551 } 1552 return 0; 1553 } 1554 1555 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); 1556 if (!nlh) 1557 goto errout_skb; 1558 1559 memcpy(nlmsg_data(nlh), &len, sizeof(len)); 1560 1561 if (sk_filter(sk, skb)) 1562 kfree_skb(skb); 1563 else { 1564 skb_queue_tail(&sk->sk_receive_queue, skb); 1565 sk->sk_data_ready(sk, skb->len); 1566 } 1567 1568 if (cb->done) 1569 cb->done(cb); 1570 nlk->cb = NULL; 1571 mutex_unlock(nlk->cb_mutex); 1572 1573 netlink_destroy_callback(cb); 1574 return 0; 1575 1576 errout_skb: 1577 mutex_unlock(nlk->cb_mutex); 1578 kfree_skb(skb); 1579 errout: 1580 return err; 1581 } 1582 1583 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1584 struct nlmsghdr *nlh, 1585 int (*dump)(struct sk_buff *skb, 1586 struct netlink_callback *), 1587 int (*done)(struct netlink_callback *)) 1588 { 1589 struct netlink_callback *cb; 1590 struct sock *sk; 1591 struct netlink_sock *nlk; 1592 1593 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1594 if (cb == NULL) 1595 return -ENOBUFS; 1596 1597 cb->dump = dump; 1598 cb->done = done; 1599 cb->nlh = nlh; 1600 atomic_inc(&skb->users); 1601 cb->skb = skb; 1602 1603 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); 1604 if (sk == NULL) { 1605 netlink_destroy_callback(cb); 1606 return -ECONNREFUSED; 1607 } 1608 nlk = nlk_sk(sk); 1609 /* A dump is in progress... */ 1610 mutex_lock(nlk->cb_mutex); 1611 if (nlk->cb) { 1612 mutex_unlock(nlk->cb_mutex); 1613 netlink_destroy_callback(cb); 1614 sock_put(sk); 1615 return -EBUSY; 1616 } 1617 nlk->cb = cb; 1618 mutex_unlock(nlk->cb_mutex); 1619 1620 netlink_dump(sk); 1621 sock_put(sk); 1622 1623 /* We successfully started a dump, by returning -EINTR we 1624 * signal not to send ACK even if it was requested. 1625 */ 1626 return -EINTR; 1627 } 1628 EXPORT_SYMBOL(netlink_dump_start); 1629 1630 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) 1631 { 1632 struct sk_buff *skb; 1633 struct nlmsghdr *rep; 1634 struct nlmsgerr *errmsg; 1635 size_t payload = sizeof(*errmsg); 1636 1637 /* error messages get the original request appened */ 1638 if (err) 1639 payload += nlmsg_len(nlh); 1640 1641 skb = nlmsg_new(payload, GFP_KERNEL); 1642 if (!skb) { 1643 struct sock *sk; 1644 1645 sk = netlink_lookup(sock_net(in_skb->sk), 1646 in_skb->sk->sk_protocol, 1647 NETLINK_CB(in_skb).pid); 1648 if (sk) { 1649 sk->sk_err = ENOBUFS; 1650 sk->sk_error_report(sk); 1651 sock_put(sk); 1652 } 1653 return; 1654 } 1655 1656 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, 1657 NLMSG_ERROR, sizeof(struct nlmsgerr), 0); 1658 errmsg = nlmsg_data(rep); 1659 errmsg->error = err; 1660 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); 1661 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 1662 } 1663 EXPORT_SYMBOL(netlink_ack); 1664 1665 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, 1666 struct nlmsghdr *)) 1667 { 1668 struct nlmsghdr *nlh; 1669 int err; 1670 1671 while (skb->len >= nlmsg_total_size(0)) { 1672 int msglen; 1673 1674 nlh = nlmsg_hdr(skb); 1675 err = 0; 1676 1677 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) 1678 return 0; 1679 1680 /* Only requests are handled by the kernel */ 1681 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) 1682 goto ack; 1683 1684 /* Skip control messages */ 1685 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 1686 goto ack; 1687 1688 err = cb(skb, nlh); 1689 if (err == -EINTR) 1690 goto skip; 1691 1692 ack: 1693 if (nlh->nlmsg_flags & NLM_F_ACK || err) 1694 netlink_ack(skb, nlh, err); 1695 1696 skip: 1697 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 1698 if (msglen > skb->len) 1699 msglen = skb->len; 1700 skb_pull(skb, msglen); 1701 } 1702 1703 return 0; 1704 } 1705 EXPORT_SYMBOL(netlink_rcv_skb); 1706 1707 /** 1708 * nlmsg_notify - send a notification netlink message 1709 * @sk: netlink socket to use 1710 * @skb: notification message 1711 * @pid: destination netlink pid for reports or 0 1712 * @group: destination multicast group or 0 1713 * @report: 1 to report back, 0 to disable 1714 * @flags: allocation flags 1715 */ 1716 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, 1717 unsigned int group, int report, gfp_t flags) 1718 { 1719 int err = 0; 1720 1721 if (group) { 1722 int exclude_pid = 0; 1723 1724 if (report) { 1725 atomic_inc(&skb->users); 1726 exclude_pid = pid; 1727 } 1728 1729 /* errors reported via destination sk->sk_err */ 1730 nlmsg_multicast(sk, skb, exclude_pid, group, flags); 1731 } 1732 1733 if (report) 1734 err = nlmsg_unicast(sk, skb, pid); 1735 1736 return err; 1737 } 1738 EXPORT_SYMBOL(nlmsg_notify); 1739 1740 #ifdef CONFIG_PROC_FS 1741 struct nl_seq_iter { 1742 struct seq_net_private p; 1743 int link; 1744 int hash_idx; 1745 }; 1746 1747 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) 1748 { 1749 struct nl_seq_iter *iter = seq->private; 1750 int i, j; 1751 struct sock *s; 1752 struct hlist_node *node; 1753 loff_t off = 0; 1754 1755 for (i = 0; i < MAX_LINKS; i++) { 1756 struct nl_pid_hash *hash = &nl_table[i].hash; 1757 1758 for (j = 0; j <= hash->mask; j++) { 1759 sk_for_each(s, node, &hash->table[j]) { 1760 if (sock_net(s) != seq_file_net(seq)) 1761 continue; 1762 if (off == pos) { 1763 iter->link = i; 1764 iter->hash_idx = j; 1765 return s; 1766 } 1767 ++off; 1768 } 1769 } 1770 } 1771 return NULL; 1772 } 1773 1774 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) 1775 __acquires(nl_table_lock) 1776 { 1777 read_lock(&nl_table_lock); 1778 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; 1779 } 1780 1781 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1782 { 1783 struct sock *s; 1784 struct nl_seq_iter *iter; 1785 int i, j; 1786 1787 ++*pos; 1788 1789 if (v == SEQ_START_TOKEN) 1790 return netlink_seq_socket_idx(seq, 0); 1791 1792 iter = seq->private; 1793 s = v; 1794 do { 1795 s = sk_next(s); 1796 } while (s && sock_net(s) != seq_file_net(seq)); 1797 if (s) 1798 return s; 1799 1800 i = iter->link; 1801 j = iter->hash_idx + 1; 1802 1803 do { 1804 struct nl_pid_hash *hash = &nl_table[i].hash; 1805 1806 for (; j <= hash->mask; j++) { 1807 s = sk_head(&hash->table[j]); 1808 while (s && sock_net(s) != seq_file_net(seq)) 1809 s = sk_next(s); 1810 if (s) { 1811 iter->link = i; 1812 iter->hash_idx = j; 1813 return s; 1814 } 1815 } 1816 1817 j = 0; 1818 } while (++i < MAX_LINKS); 1819 1820 return NULL; 1821 } 1822 1823 static void netlink_seq_stop(struct seq_file *seq, void *v) 1824 __releases(nl_table_lock) 1825 { 1826 read_unlock(&nl_table_lock); 1827 } 1828 1829 1830 static int netlink_seq_show(struct seq_file *seq, void *v) 1831 { 1832 if (v == SEQ_START_TOKEN) 1833 seq_puts(seq, 1834 "sk Eth Pid Groups " 1835 "Rmem Wmem Dump Locks\n"); 1836 else { 1837 struct sock *s = v; 1838 struct netlink_sock *nlk = nlk_sk(s); 1839 1840 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n", 1841 s, 1842 s->sk_protocol, 1843 nlk->pid, 1844 nlk->groups ? (u32)nlk->groups[0] : 0, 1845 atomic_read(&s->sk_rmem_alloc), 1846 atomic_read(&s->sk_wmem_alloc), 1847 nlk->cb, 1848 atomic_read(&s->sk_refcnt) 1849 ); 1850 1851 } 1852 return 0; 1853 } 1854 1855 static const struct seq_operations netlink_seq_ops = { 1856 .start = netlink_seq_start, 1857 .next = netlink_seq_next, 1858 .stop = netlink_seq_stop, 1859 .show = netlink_seq_show, 1860 }; 1861 1862 1863 static int netlink_seq_open(struct inode *inode, struct file *file) 1864 { 1865 return seq_open_net(inode, file, &netlink_seq_ops, 1866 sizeof(struct nl_seq_iter)); 1867 } 1868 1869 static const struct file_operations netlink_seq_fops = { 1870 .owner = THIS_MODULE, 1871 .open = netlink_seq_open, 1872 .read = seq_read, 1873 .llseek = seq_lseek, 1874 .release = seq_release_net, 1875 }; 1876 1877 #endif 1878 1879 int netlink_register_notifier(struct notifier_block *nb) 1880 { 1881 return atomic_notifier_chain_register(&netlink_chain, nb); 1882 } 1883 EXPORT_SYMBOL(netlink_register_notifier); 1884 1885 int netlink_unregister_notifier(struct notifier_block *nb) 1886 { 1887 return atomic_notifier_chain_unregister(&netlink_chain, nb); 1888 } 1889 EXPORT_SYMBOL(netlink_unregister_notifier); 1890 1891 static const struct proto_ops netlink_ops = { 1892 .family = PF_NETLINK, 1893 .owner = THIS_MODULE, 1894 .release = netlink_release, 1895 .bind = netlink_bind, 1896 .connect = netlink_connect, 1897 .socketpair = sock_no_socketpair, 1898 .accept = sock_no_accept, 1899 .getname = netlink_getname, 1900 .poll = datagram_poll, 1901 .ioctl = sock_no_ioctl, 1902 .listen = sock_no_listen, 1903 .shutdown = sock_no_shutdown, 1904 .setsockopt = netlink_setsockopt, 1905 .getsockopt = netlink_getsockopt, 1906 .sendmsg = netlink_sendmsg, 1907 .recvmsg = netlink_recvmsg, 1908 .mmap = sock_no_mmap, 1909 .sendpage = sock_no_sendpage, 1910 }; 1911 1912 static struct net_proto_family netlink_family_ops = { 1913 .family = PF_NETLINK, 1914 .create = netlink_create, 1915 .owner = THIS_MODULE, /* for consistency 8) */ 1916 }; 1917 1918 static int __net_init netlink_net_init(struct net *net) 1919 { 1920 #ifdef CONFIG_PROC_FS 1921 if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) 1922 return -ENOMEM; 1923 #endif 1924 return 0; 1925 } 1926 1927 static void __net_exit netlink_net_exit(struct net *net) 1928 { 1929 #ifdef CONFIG_PROC_FS 1930 proc_net_remove(net, "netlink"); 1931 #endif 1932 } 1933 1934 static struct pernet_operations __net_initdata netlink_net_ops = { 1935 .init = netlink_net_init, 1936 .exit = netlink_net_exit, 1937 }; 1938 1939 static int __init netlink_proto_init(void) 1940 { 1941 struct sk_buff *dummy_skb; 1942 int i; 1943 unsigned long limit; 1944 unsigned int order; 1945 int err = proto_register(&netlink_proto, 0); 1946 1947 if (err != 0) 1948 goto out; 1949 1950 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); 1951 1952 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 1953 if (!nl_table) 1954 goto panic; 1955 1956 if (num_physpages >= (128 * 1024)) 1957 limit = num_physpages >> (21 - PAGE_SHIFT); 1958 else 1959 limit = num_physpages >> (23 - PAGE_SHIFT); 1960 1961 order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; 1962 limit = (1UL << order) / sizeof(struct hlist_head); 1963 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; 1964 1965 for (i = 0; i < MAX_LINKS; i++) { 1966 struct nl_pid_hash *hash = &nl_table[i].hash; 1967 1968 hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); 1969 if (!hash->table) { 1970 while (i-- > 0) 1971 nl_pid_hash_free(nl_table[i].hash.table, 1972 1 * sizeof(*hash->table)); 1973 kfree(nl_table); 1974 goto panic; 1975 } 1976 hash->max_shift = order; 1977 hash->shift = 0; 1978 hash->mask = 0; 1979 hash->rehash_time = jiffies; 1980 } 1981 1982 sock_register(&netlink_family_ops); 1983 register_pernet_subsys(&netlink_net_ops); 1984 /* The netlink device handler may be needed early. */ 1985 rtnetlink_init(); 1986 out: 1987 return err; 1988 panic: 1989 panic("netlink_init: Cannot allocate nl_table\n"); 1990 } 1991 1992 core_initcall(netlink_proto_init); 1993