1 /* 2 * NETLINK Kernel-user communication protocol. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 6 * Patrick McHardy <kaber@trash.net> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 * 13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith 14 * added netlink_proto_exit 15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> 16 * use nlk_sk, as sk->protinfo is on a diet 8) 17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> 18 * - inc module use count of module that owns 19 * the kernel socket in case userspace opens 20 * socket of same protocol 21 * - remove all module support, since netlink is 22 * mandatory if CONFIG_NET=y these days 23 */ 24 25 #include <linux/module.h> 26 27 #include <linux/capability.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/signal.h> 31 #include <linux/sched.h> 32 #include <linux/errno.h> 33 #include <linux/string.h> 34 #include <linux/stat.h> 35 #include <linux/socket.h> 36 #include <linux/un.h> 37 #include <linux/fcntl.h> 38 #include <linux/termios.h> 39 #include <linux/sockios.h> 40 #include <linux/net.h> 41 #include <linux/fs.h> 42 #include <linux/slab.h> 43 #include <asm/uaccess.h> 44 #include <linux/skbuff.h> 45 #include <linux/netdevice.h> 46 #include <linux/rtnetlink.h> 47 #include <linux/proc_fs.h> 48 #include <linux/seq_file.h> 49 #include <linux/notifier.h> 50 #include <linux/security.h> 51 #include <linux/jhash.h> 52 #include <linux/jiffies.h> 53 #include <linux/random.h> 54 #include <linux/bitops.h> 55 #include <linux/mm.h> 56 #include <linux/types.h> 57 #include <linux/audit.h> 58 #include <linux/mutex.h> 59 #include <linux/vmalloc.h> 60 #include <linux/if_arp.h> 61 #include <linux/rhashtable.h> 62 #include <asm/cacheflush.h> 63 #include <linux/hash.h> 64 #include <linux/genetlink.h> 65 66 #include <net/net_namespace.h> 67 #include <net/sock.h> 68 #include <net/scm.h> 69 #include <net/netlink.h> 70 71 #include "af_netlink.h" 72 73 struct listeners { 74 struct rcu_head rcu; 75 unsigned long masks[0]; 76 }; 77 78 /* state bits */ 79 #define NETLINK_S_CONGESTED 0x0 80 81 /* flags */ 82 #define NETLINK_F_KERNEL_SOCKET 0x1 83 #define NETLINK_F_RECV_PKTINFO 0x2 84 #define NETLINK_F_BROADCAST_SEND_ERROR 0x4 85 #define NETLINK_F_RECV_NO_ENOBUFS 0x8 86 #define NETLINK_F_LISTEN_ALL_NSID 0x10 87 #define NETLINK_F_CAP_ACK 0x20 88 89 static inline int netlink_is_kernel(struct sock *sk) 90 { 91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; 92 } 93 94 struct netlink_table *nl_table __read_mostly; 95 EXPORT_SYMBOL_GPL(nl_table); 96 97 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 98 99 static int netlink_dump(struct sock *sk); 100 static void netlink_skb_destructor(struct sk_buff *skb); 101 102 /* nl_table locking explained: 103 * Lookup and traversal are protected with an RCU read-side lock. Insertion 104 * and removal are protected with per bucket lock while using RCU list 105 * modification primitives and may run in parallel to RCU protected lookups. 106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has 107 * been acquired * either during or after the socket has been removed from 108 * the list and after an RCU grace period. 109 */ 110 DEFINE_RWLOCK(nl_table_lock); 111 EXPORT_SYMBOL_GPL(nl_table_lock); 112 static atomic_t nl_table_users = ATOMIC_INIT(0); 113 114 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); 115 116 static ATOMIC_NOTIFIER_HEAD(netlink_chain); 117 118 static DEFINE_SPINLOCK(netlink_tap_lock); 119 static struct list_head netlink_tap_all __read_mostly; 120 121 static const struct rhashtable_params netlink_rhashtable_params; 122 123 static inline u32 netlink_group_mask(u32 group) 124 { 125 return group ? 1 << (group - 1) : 0; 126 } 127 128 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, 129 gfp_t gfp_mask) 130 { 131 unsigned int len = skb_end_offset(skb); 132 struct sk_buff *new; 133 134 new = alloc_skb(len, gfp_mask); 135 if (new == NULL) 136 return NULL; 137 138 NETLINK_CB(new).portid = NETLINK_CB(skb).portid; 139 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; 140 NETLINK_CB(new).creds = NETLINK_CB(skb).creds; 141 142 memcpy(skb_put(new, len), skb->data, len); 143 return new; 144 } 145 146 int netlink_add_tap(struct netlink_tap *nt) 147 { 148 if (unlikely(nt->dev->type != ARPHRD_NETLINK)) 149 return -EINVAL; 150 151 spin_lock(&netlink_tap_lock); 152 list_add_rcu(&nt->list, &netlink_tap_all); 153 spin_unlock(&netlink_tap_lock); 154 155 __module_get(nt->module); 156 157 return 0; 158 } 159 EXPORT_SYMBOL_GPL(netlink_add_tap); 160 161 static int __netlink_remove_tap(struct netlink_tap *nt) 162 { 163 bool found = false; 164 struct netlink_tap *tmp; 165 166 spin_lock(&netlink_tap_lock); 167 168 list_for_each_entry(tmp, &netlink_tap_all, list) { 169 if (nt == tmp) { 170 list_del_rcu(&nt->list); 171 found = true; 172 goto out; 173 } 174 } 175 176 pr_warn("__netlink_remove_tap: %p not found\n", nt); 177 out: 178 spin_unlock(&netlink_tap_lock); 179 180 if (found) 181 module_put(nt->module); 182 183 return found ? 0 : -ENODEV; 184 } 185 186 int netlink_remove_tap(struct netlink_tap *nt) 187 { 188 int ret; 189 190 ret = __netlink_remove_tap(nt); 191 synchronize_net(); 192 193 return ret; 194 } 195 EXPORT_SYMBOL_GPL(netlink_remove_tap); 196 197 static bool netlink_filter_tap(const struct sk_buff *skb) 198 { 199 struct sock *sk = skb->sk; 200 201 /* We take the more conservative approach and 202 * whitelist socket protocols that may pass. 203 */ 204 switch (sk->sk_protocol) { 205 case NETLINK_ROUTE: 206 case NETLINK_USERSOCK: 207 case NETLINK_SOCK_DIAG: 208 case NETLINK_NFLOG: 209 case NETLINK_XFRM: 210 case NETLINK_FIB_LOOKUP: 211 case NETLINK_NETFILTER: 212 case NETLINK_GENERIC: 213 return true; 214 } 215 216 return false; 217 } 218 219 static int __netlink_deliver_tap_skb(struct sk_buff *skb, 220 struct net_device *dev) 221 { 222 struct sk_buff *nskb; 223 struct sock *sk = skb->sk; 224 int ret = -ENOMEM; 225 226 dev_hold(dev); 227 228 if (is_vmalloc_addr(skb->head)) 229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC); 230 else 231 nskb = skb_clone(skb, GFP_ATOMIC); 232 if (nskb) { 233 nskb->dev = dev; 234 nskb->protocol = htons((u16) sk->sk_protocol); 235 nskb->pkt_type = netlink_is_kernel(sk) ? 236 PACKET_KERNEL : PACKET_USER; 237 skb_reset_network_header(nskb); 238 ret = dev_queue_xmit(nskb); 239 if (unlikely(ret > 0)) 240 ret = net_xmit_errno(ret); 241 } 242 243 dev_put(dev); 244 return ret; 245 } 246 247 static void __netlink_deliver_tap(struct sk_buff *skb) 248 { 249 int ret; 250 struct netlink_tap *tmp; 251 252 if (!netlink_filter_tap(skb)) 253 return; 254 255 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { 256 ret = __netlink_deliver_tap_skb(skb, tmp->dev); 257 if (unlikely(ret)) 258 break; 259 } 260 } 261 262 static void netlink_deliver_tap(struct sk_buff *skb) 263 { 264 rcu_read_lock(); 265 266 if (unlikely(!list_empty(&netlink_tap_all))) 267 __netlink_deliver_tap(skb); 268 269 rcu_read_unlock(); 270 } 271 272 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src, 273 struct sk_buff *skb) 274 { 275 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src))) 276 netlink_deliver_tap(skb); 277 } 278 279 static void netlink_overrun(struct sock *sk) 280 { 281 struct netlink_sock *nlk = nlk_sk(sk); 282 283 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) { 284 if (!test_and_set_bit(NETLINK_S_CONGESTED, 285 &nlk_sk(sk)->state)) { 286 sk->sk_err = ENOBUFS; 287 sk->sk_error_report(sk); 288 } 289 } 290 atomic_inc(&sk->sk_drops); 291 } 292 293 static void netlink_rcv_wake(struct sock *sk) 294 { 295 struct netlink_sock *nlk = nlk_sk(sk); 296 297 if (skb_queue_empty(&sk->sk_receive_queue)) 298 clear_bit(NETLINK_S_CONGESTED, &nlk->state); 299 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) 300 wake_up_interruptible(&nlk->wait); 301 } 302 303 static void netlink_skb_destructor(struct sk_buff *skb) 304 { 305 if (is_vmalloc_addr(skb->head)) { 306 if (!skb->cloned || 307 !atomic_dec_return(&(skb_shinfo(skb)->dataref))) 308 vfree(skb->head); 309 310 skb->head = NULL; 311 } 312 if (skb->sk != NULL) 313 sock_rfree(skb); 314 } 315 316 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 317 { 318 WARN_ON(skb->sk != NULL); 319 skb->sk = sk; 320 skb->destructor = netlink_skb_destructor; 321 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 322 sk_mem_charge(sk, skb->truesize); 323 } 324 325 static void __netlink_sock_destruct(struct sock *sk) 326 { 327 struct netlink_sock *nlk = nlk_sk(sk); 328 329 if (nlk->cb_running) { 330 module_put(nlk->cb.module); 331 kfree_skb(nlk->cb.skb); 332 } 333 334 skb_queue_purge(&sk->sk_receive_queue); 335 336 if (!sock_flag(sk, SOCK_DEAD)) { 337 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); 338 return; 339 } 340 341 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 342 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 343 WARN_ON(nlk_sk(sk)->groups); 344 } 345 346 static void netlink_sock_destruct_work(struct work_struct *work) 347 { 348 struct netlink_sock *nlk = container_of(work, struct netlink_sock, 349 work); 350 351 nlk->cb.done(&nlk->cb); 352 __netlink_sock_destruct(&nlk->sk); 353 } 354 355 static void netlink_sock_destruct(struct sock *sk) 356 { 357 struct netlink_sock *nlk = nlk_sk(sk); 358 359 if (nlk->cb_running && nlk->cb.done) { 360 INIT_WORK(&nlk->work, netlink_sock_destruct_work); 361 schedule_work(&nlk->work); 362 return; 363 } 364 365 __netlink_sock_destruct(sk); 366 } 367 368 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 369 * SMP. Look, when several writers sleep and reader wakes them up, all but one 370 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 371 * this, _but_ remember, it adds useless work on UP machines. 372 */ 373 374 void netlink_table_grab(void) 375 __acquires(nl_table_lock) 376 { 377 might_sleep(); 378 379 write_lock_irq(&nl_table_lock); 380 381 if (atomic_read(&nl_table_users)) { 382 DECLARE_WAITQUEUE(wait, current); 383 384 add_wait_queue_exclusive(&nl_table_wait, &wait); 385 for (;;) { 386 set_current_state(TASK_UNINTERRUPTIBLE); 387 if (atomic_read(&nl_table_users) == 0) 388 break; 389 write_unlock_irq(&nl_table_lock); 390 schedule(); 391 write_lock_irq(&nl_table_lock); 392 } 393 394 __set_current_state(TASK_RUNNING); 395 remove_wait_queue(&nl_table_wait, &wait); 396 } 397 } 398 399 void netlink_table_ungrab(void) 400 __releases(nl_table_lock) 401 { 402 write_unlock_irq(&nl_table_lock); 403 wake_up(&nl_table_wait); 404 } 405 406 static inline void 407 netlink_lock_table(void) 408 { 409 /* read_lock() synchronizes us to netlink_table_grab */ 410 411 read_lock(&nl_table_lock); 412 atomic_inc(&nl_table_users); 413 read_unlock(&nl_table_lock); 414 } 415 416 static inline void 417 netlink_unlock_table(void) 418 { 419 if (atomic_dec_and_test(&nl_table_users)) 420 wake_up(&nl_table_wait); 421 } 422 423 struct netlink_compare_arg 424 { 425 possible_net_t pnet; 426 u32 portid; 427 }; 428 429 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */ 430 #define netlink_compare_arg_len \ 431 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32)) 432 433 static inline int netlink_compare(struct rhashtable_compare_arg *arg, 434 const void *ptr) 435 { 436 const struct netlink_compare_arg *x = arg->key; 437 const struct netlink_sock *nlk = ptr; 438 439 return nlk->portid != x->portid || 440 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); 441 } 442 443 static void netlink_compare_arg_init(struct netlink_compare_arg *arg, 444 struct net *net, u32 portid) 445 { 446 memset(arg, 0, sizeof(*arg)); 447 write_pnet(&arg->pnet, net); 448 arg->portid = portid; 449 } 450 451 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, 452 struct net *net) 453 { 454 struct netlink_compare_arg arg; 455 456 netlink_compare_arg_init(&arg, net, portid); 457 return rhashtable_lookup_fast(&table->hash, &arg, 458 netlink_rhashtable_params); 459 } 460 461 static int __netlink_insert(struct netlink_table *table, struct sock *sk) 462 { 463 struct netlink_compare_arg arg; 464 465 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); 466 return rhashtable_lookup_insert_key(&table->hash, &arg, 467 &nlk_sk(sk)->node, 468 netlink_rhashtable_params); 469 } 470 471 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) 472 { 473 struct netlink_table *table = &nl_table[protocol]; 474 struct sock *sk; 475 476 rcu_read_lock(); 477 sk = __netlink_lookup(table, portid, net); 478 if (sk) 479 sock_hold(sk); 480 rcu_read_unlock(); 481 482 return sk; 483 } 484 485 static const struct proto_ops netlink_ops; 486 487 static void 488 netlink_update_listeners(struct sock *sk) 489 { 490 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 491 unsigned long mask; 492 unsigned int i; 493 struct listeners *listeners; 494 495 listeners = nl_deref_protected(tbl->listeners); 496 if (!listeners) 497 return; 498 499 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 500 mask = 0; 501 sk_for_each_bound(sk, &tbl->mc_list) { 502 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 503 mask |= nlk_sk(sk)->groups[i]; 504 } 505 listeners->masks[i] = mask; 506 } 507 /* this function is only called with the netlink table "grabbed", which 508 * makes sure updates are visible before bind or setsockopt return. */ 509 } 510 511 static int netlink_insert(struct sock *sk, u32 portid) 512 { 513 struct netlink_table *table = &nl_table[sk->sk_protocol]; 514 int err; 515 516 lock_sock(sk); 517 518 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; 519 if (nlk_sk(sk)->bound) 520 goto err; 521 522 err = -ENOMEM; 523 if (BITS_PER_LONG > 32 && 524 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX)) 525 goto err; 526 527 nlk_sk(sk)->portid = portid; 528 sock_hold(sk); 529 530 err = __netlink_insert(table, sk); 531 if (err) { 532 /* In case the hashtable backend returns with -EBUSY 533 * from here, it must not escape to the caller. 534 */ 535 if (unlikely(err == -EBUSY)) 536 err = -EOVERFLOW; 537 if (err == -EEXIST) 538 err = -EADDRINUSE; 539 sock_put(sk); 540 goto err; 541 } 542 543 /* We need to ensure that the socket is hashed and visible. */ 544 smp_wmb(); 545 nlk_sk(sk)->bound = portid; 546 547 err: 548 release_sock(sk); 549 return err; 550 } 551 552 static void netlink_remove(struct sock *sk) 553 { 554 struct netlink_table *table; 555 556 table = &nl_table[sk->sk_protocol]; 557 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, 558 netlink_rhashtable_params)) { 559 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 560 __sock_put(sk); 561 } 562 563 netlink_table_grab(); 564 if (nlk_sk(sk)->subscriptions) { 565 __sk_del_bind_node(sk); 566 netlink_update_listeners(sk); 567 } 568 if (sk->sk_protocol == NETLINK_GENERIC) 569 atomic_inc(&genl_sk_destructing_cnt); 570 netlink_table_ungrab(); 571 } 572 573 static struct proto netlink_proto = { 574 .name = "NETLINK", 575 .owner = THIS_MODULE, 576 .obj_size = sizeof(struct netlink_sock), 577 }; 578 579 static int __netlink_create(struct net *net, struct socket *sock, 580 struct mutex *cb_mutex, int protocol, 581 int kern) 582 { 583 struct sock *sk; 584 struct netlink_sock *nlk; 585 586 sock->ops = &netlink_ops; 587 588 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern); 589 if (!sk) 590 return -ENOMEM; 591 592 sock_init_data(sock, sk); 593 594 nlk = nlk_sk(sk); 595 if (cb_mutex) { 596 nlk->cb_mutex = cb_mutex; 597 } else { 598 nlk->cb_mutex = &nlk->cb_def_mutex; 599 mutex_init(nlk->cb_mutex); 600 } 601 init_waitqueue_head(&nlk->wait); 602 603 sk->sk_destruct = netlink_sock_destruct; 604 sk->sk_protocol = protocol; 605 return 0; 606 } 607 608 static int netlink_create(struct net *net, struct socket *sock, int protocol, 609 int kern) 610 { 611 struct module *module = NULL; 612 struct mutex *cb_mutex; 613 struct netlink_sock *nlk; 614 int (*bind)(struct net *net, int group); 615 void (*unbind)(struct net *net, int group); 616 int err = 0; 617 618 sock->state = SS_UNCONNECTED; 619 620 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 621 return -ESOCKTNOSUPPORT; 622 623 if (protocol < 0 || protocol >= MAX_LINKS) 624 return -EPROTONOSUPPORT; 625 626 netlink_lock_table(); 627 #ifdef CONFIG_MODULES 628 if (!nl_table[protocol].registered) { 629 netlink_unlock_table(); 630 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); 631 netlink_lock_table(); 632 } 633 #endif 634 if (nl_table[protocol].registered && 635 try_module_get(nl_table[protocol].module)) 636 module = nl_table[protocol].module; 637 else 638 err = -EPROTONOSUPPORT; 639 cb_mutex = nl_table[protocol].cb_mutex; 640 bind = nl_table[protocol].bind; 641 unbind = nl_table[protocol].unbind; 642 netlink_unlock_table(); 643 644 if (err < 0) 645 goto out; 646 647 err = __netlink_create(net, sock, cb_mutex, protocol, kern); 648 if (err < 0) 649 goto out_module; 650 651 local_bh_disable(); 652 sock_prot_inuse_add(net, &netlink_proto, 1); 653 local_bh_enable(); 654 655 nlk = nlk_sk(sock->sk); 656 nlk->module = module; 657 nlk->netlink_bind = bind; 658 nlk->netlink_unbind = unbind; 659 out: 660 return err; 661 662 out_module: 663 module_put(module); 664 goto out; 665 } 666 667 static void deferred_put_nlk_sk(struct rcu_head *head) 668 { 669 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); 670 671 sock_put(&nlk->sk); 672 } 673 674 static int netlink_release(struct socket *sock) 675 { 676 struct sock *sk = sock->sk; 677 struct netlink_sock *nlk; 678 679 if (!sk) 680 return 0; 681 682 netlink_remove(sk); 683 sock_orphan(sk); 684 nlk = nlk_sk(sk); 685 686 /* 687 * OK. Socket is unlinked, any packets that arrive now 688 * will be purged. 689 */ 690 691 /* must not acquire netlink_table_lock in any way again before unbind 692 * and notifying genetlink is done as otherwise it might deadlock 693 */ 694 if (nlk->netlink_unbind) { 695 int i; 696 697 for (i = 0; i < nlk->ngroups; i++) 698 if (test_bit(i, nlk->groups)) 699 nlk->netlink_unbind(sock_net(sk), i + 1); 700 } 701 if (sk->sk_protocol == NETLINK_GENERIC && 702 atomic_dec_return(&genl_sk_destructing_cnt) == 0) 703 wake_up(&genl_sk_destructing_waitq); 704 705 sock->sk = NULL; 706 wake_up_interruptible_all(&nlk->wait); 707 708 skb_queue_purge(&sk->sk_write_queue); 709 710 if (nlk->portid && nlk->bound) { 711 struct netlink_notify n = { 712 .net = sock_net(sk), 713 .protocol = sk->sk_protocol, 714 .portid = nlk->portid, 715 }; 716 atomic_notifier_call_chain(&netlink_chain, 717 NETLINK_URELEASE, &n); 718 } 719 720 module_put(nlk->module); 721 722 if (netlink_is_kernel(sk)) { 723 netlink_table_grab(); 724 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 725 if (--nl_table[sk->sk_protocol].registered == 0) { 726 struct listeners *old; 727 728 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); 729 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); 730 kfree_rcu(old, rcu); 731 nl_table[sk->sk_protocol].module = NULL; 732 nl_table[sk->sk_protocol].bind = NULL; 733 nl_table[sk->sk_protocol].unbind = NULL; 734 nl_table[sk->sk_protocol].flags = 0; 735 nl_table[sk->sk_protocol].registered = 0; 736 } 737 netlink_table_ungrab(); 738 } 739 740 kfree(nlk->groups); 741 nlk->groups = NULL; 742 743 local_bh_disable(); 744 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); 745 local_bh_enable(); 746 call_rcu(&nlk->rcu, deferred_put_nlk_sk); 747 return 0; 748 } 749 750 static int netlink_autobind(struct socket *sock) 751 { 752 struct sock *sk = sock->sk; 753 struct net *net = sock_net(sk); 754 struct netlink_table *table = &nl_table[sk->sk_protocol]; 755 s32 portid = task_tgid_vnr(current); 756 int err; 757 s32 rover = -4096; 758 bool ok; 759 760 retry: 761 cond_resched(); 762 rcu_read_lock(); 763 ok = !__netlink_lookup(table, portid, net); 764 rcu_read_unlock(); 765 if (!ok) { 766 /* Bind collision, search negative portid values. */ 767 if (rover == -4096) 768 /* rover will be in range [S32_MIN, -4097] */ 769 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN); 770 else if (rover >= -4096) 771 rover = -4097; 772 portid = rover--; 773 goto retry; 774 } 775 776 err = netlink_insert(sk, portid); 777 if (err == -EADDRINUSE) 778 goto retry; 779 780 /* If 2 threads race to autobind, that is fine. */ 781 if (err == -EBUSY) 782 err = 0; 783 784 return err; 785 } 786 787 /** 788 * __netlink_ns_capable - General netlink message capability test 789 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace. 790 * @user_ns: The user namespace of the capability to use 791 * @cap: The capability to use 792 * 793 * Test to see if the opener of the socket we received the message 794 * from had when the netlink socket was created and the sender of the 795 * message has has the capability @cap in the user namespace @user_ns. 796 */ 797 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 798 struct user_namespace *user_ns, int cap) 799 { 800 return ((nsp->flags & NETLINK_SKB_DST) || 801 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && 802 ns_capable(user_ns, cap); 803 } 804 EXPORT_SYMBOL(__netlink_ns_capable); 805 806 /** 807 * netlink_ns_capable - General netlink message capability test 808 * @skb: socket buffer holding a netlink command from userspace 809 * @user_ns: The user namespace of the capability to use 810 * @cap: The capability to use 811 * 812 * Test to see if the opener of the socket we received the message 813 * from had when the netlink socket was created and the sender of the 814 * message has has the capability @cap in the user namespace @user_ns. 815 */ 816 bool netlink_ns_capable(const struct sk_buff *skb, 817 struct user_namespace *user_ns, int cap) 818 { 819 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap); 820 } 821 EXPORT_SYMBOL(netlink_ns_capable); 822 823 /** 824 * netlink_capable - Netlink global message capability test 825 * @skb: socket buffer holding a netlink command from userspace 826 * @cap: The capability to use 827 * 828 * Test to see if the opener of the socket we received the message 829 * from had when the netlink socket was created and the sender of the 830 * message has has the capability @cap in all user namespaces. 831 */ 832 bool netlink_capable(const struct sk_buff *skb, int cap) 833 { 834 return netlink_ns_capable(skb, &init_user_ns, cap); 835 } 836 EXPORT_SYMBOL(netlink_capable); 837 838 /** 839 * netlink_net_capable - Netlink network namespace message capability test 840 * @skb: socket buffer holding a netlink command from userspace 841 * @cap: The capability to use 842 * 843 * Test to see if the opener of the socket we received the message 844 * from had when the netlink socket was created and the sender of the 845 * message has has the capability @cap over the network namespace of 846 * the socket we received the message from. 847 */ 848 bool netlink_net_capable(const struct sk_buff *skb, int cap) 849 { 850 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); 851 } 852 EXPORT_SYMBOL(netlink_net_capable); 853 854 static inline int netlink_allowed(const struct socket *sock, unsigned int flag) 855 { 856 return (nl_table[sock->sk->sk_protocol].flags & flag) || 857 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); 858 } 859 860 static void 861 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) 862 { 863 struct netlink_sock *nlk = nlk_sk(sk); 864 865 if (nlk->subscriptions && !subscriptions) 866 __sk_del_bind_node(sk); 867 else if (!nlk->subscriptions && subscriptions) 868 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); 869 nlk->subscriptions = subscriptions; 870 } 871 872 static int netlink_realloc_groups(struct sock *sk) 873 { 874 struct netlink_sock *nlk = nlk_sk(sk); 875 unsigned int groups; 876 unsigned long *new_groups; 877 int err = 0; 878 879 netlink_table_grab(); 880 881 groups = nl_table[sk->sk_protocol].groups; 882 if (!nl_table[sk->sk_protocol].registered) { 883 err = -ENOENT; 884 goto out_unlock; 885 } 886 887 if (nlk->ngroups >= groups) 888 goto out_unlock; 889 890 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); 891 if (new_groups == NULL) { 892 err = -ENOMEM; 893 goto out_unlock; 894 } 895 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, 896 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); 897 898 nlk->groups = new_groups; 899 nlk->ngroups = groups; 900 out_unlock: 901 netlink_table_ungrab(); 902 return err; 903 } 904 905 static void netlink_undo_bind(int group, long unsigned int groups, 906 struct sock *sk) 907 { 908 struct netlink_sock *nlk = nlk_sk(sk); 909 int undo; 910 911 if (!nlk->netlink_unbind) 912 return; 913 914 for (undo = 0; undo < group; undo++) 915 if (test_bit(undo, &groups)) 916 nlk->netlink_unbind(sock_net(sk), undo + 1); 917 } 918 919 static int netlink_bind(struct socket *sock, struct sockaddr *addr, 920 int addr_len) 921 { 922 struct sock *sk = sock->sk; 923 struct net *net = sock_net(sk); 924 struct netlink_sock *nlk = nlk_sk(sk); 925 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 926 int err; 927 long unsigned int groups = nladdr->nl_groups; 928 bool bound; 929 930 if (addr_len < sizeof(struct sockaddr_nl)) 931 return -EINVAL; 932 933 if (nladdr->nl_family != AF_NETLINK) 934 return -EINVAL; 935 936 /* Only superuser is allowed to listen multicasts */ 937 if (groups) { 938 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 939 return -EPERM; 940 err = netlink_realloc_groups(sk); 941 if (err) 942 return err; 943 } 944 945 bound = nlk->bound; 946 if (bound) { 947 /* Ensure nlk->portid is up-to-date. */ 948 smp_rmb(); 949 950 if (nladdr->nl_pid != nlk->portid) 951 return -EINVAL; 952 } 953 954 if (nlk->netlink_bind && groups) { 955 int group; 956 957 for (group = 0; group < nlk->ngroups; group++) { 958 if (!test_bit(group, &groups)) 959 continue; 960 err = nlk->netlink_bind(net, group + 1); 961 if (!err) 962 continue; 963 netlink_undo_bind(group, groups, sk); 964 return err; 965 } 966 } 967 968 /* No need for barriers here as we return to user-space without 969 * using any of the bound attributes. 970 */ 971 if (!bound) { 972 err = nladdr->nl_pid ? 973 netlink_insert(sk, nladdr->nl_pid) : 974 netlink_autobind(sock); 975 if (err) { 976 netlink_undo_bind(nlk->ngroups, groups, sk); 977 return err; 978 } 979 } 980 981 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 982 return 0; 983 984 netlink_table_grab(); 985 netlink_update_subscriptions(sk, nlk->subscriptions + 986 hweight32(groups) - 987 hweight32(nlk->groups[0])); 988 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups; 989 netlink_update_listeners(sk); 990 netlink_table_ungrab(); 991 992 return 0; 993 } 994 995 static int netlink_connect(struct socket *sock, struct sockaddr *addr, 996 int alen, int flags) 997 { 998 int err = 0; 999 struct sock *sk = sock->sk; 1000 struct netlink_sock *nlk = nlk_sk(sk); 1001 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1002 1003 if (alen < sizeof(addr->sa_family)) 1004 return -EINVAL; 1005 1006 if (addr->sa_family == AF_UNSPEC) { 1007 sk->sk_state = NETLINK_UNCONNECTED; 1008 nlk->dst_portid = 0; 1009 nlk->dst_group = 0; 1010 return 0; 1011 } 1012 if (addr->sa_family != AF_NETLINK) 1013 return -EINVAL; 1014 1015 if ((nladdr->nl_groups || nladdr->nl_pid) && 1016 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1017 return -EPERM; 1018 1019 /* No need for barriers here as we return to user-space without 1020 * using any of the bound attributes. 1021 */ 1022 if (!nlk->bound) 1023 err = netlink_autobind(sock); 1024 1025 if (err == 0) { 1026 sk->sk_state = NETLINK_CONNECTED; 1027 nlk->dst_portid = nladdr->nl_pid; 1028 nlk->dst_group = ffs(nladdr->nl_groups); 1029 } 1030 1031 return err; 1032 } 1033 1034 static int netlink_getname(struct socket *sock, struct sockaddr *addr, 1035 int *addr_len, int peer) 1036 { 1037 struct sock *sk = sock->sk; 1038 struct netlink_sock *nlk = nlk_sk(sk); 1039 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); 1040 1041 nladdr->nl_family = AF_NETLINK; 1042 nladdr->nl_pad = 0; 1043 *addr_len = sizeof(*nladdr); 1044 1045 if (peer) { 1046 nladdr->nl_pid = nlk->dst_portid; 1047 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 1048 } else { 1049 nladdr->nl_pid = nlk->portid; 1050 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 1051 } 1052 return 0; 1053 } 1054 1055 static int netlink_ioctl(struct socket *sock, unsigned int cmd, 1056 unsigned long arg) 1057 { 1058 /* try to hand this ioctl down to the NIC drivers. 1059 */ 1060 return -ENOIOCTLCMD; 1061 } 1062 1063 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) 1064 { 1065 struct sock *sock; 1066 struct netlink_sock *nlk; 1067 1068 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid); 1069 if (!sock) 1070 return ERR_PTR(-ECONNREFUSED); 1071 1072 /* Don't bother queuing skb if kernel socket has no input function */ 1073 nlk = nlk_sk(sock); 1074 if (sock->sk_state == NETLINK_CONNECTED && 1075 nlk->dst_portid != nlk_sk(ssk)->portid) { 1076 sock_put(sock); 1077 return ERR_PTR(-ECONNREFUSED); 1078 } 1079 return sock; 1080 } 1081 1082 struct sock *netlink_getsockbyfilp(struct file *filp) 1083 { 1084 struct inode *inode = file_inode(filp); 1085 struct sock *sock; 1086 1087 if (!S_ISSOCK(inode->i_mode)) 1088 return ERR_PTR(-ENOTSOCK); 1089 1090 sock = SOCKET_I(inode)->sk; 1091 if (sock->sk_family != AF_NETLINK) 1092 return ERR_PTR(-EINVAL); 1093 1094 sock_hold(sock); 1095 return sock; 1096 } 1097 1098 static struct sk_buff *netlink_alloc_large_skb(unsigned int size, 1099 int broadcast) 1100 { 1101 struct sk_buff *skb; 1102 void *data; 1103 1104 if (size <= NLMSG_GOODSIZE || broadcast) 1105 return alloc_skb(size, GFP_KERNEL); 1106 1107 size = SKB_DATA_ALIGN(size) + 1108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1109 1110 data = vmalloc(size); 1111 if (data == NULL) 1112 return NULL; 1113 1114 skb = __build_skb(data, size); 1115 if (skb == NULL) 1116 vfree(data); 1117 else 1118 skb->destructor = netlink_skb_destructor; 1119 1120 return skb; 1121 } 1122 1123 /* 1124 * Attach a skb to a netlink socket. 1125 * The caller must hold a reference to the destination socket. On error, the 1126 * reference is dropped. The skb is not send to the destination, just all 1127 * all error checks are performed and memory in the queue is reserved. 1128 * Return values: 1129 * < 0: error. skb freed, reference to sock dropped. 1130 * 0: continue 1131 * 1: repeat lookup - reference dropped while waiting for socket memory. 1132 */ 1133 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, 1134 long *timeo, struct sock *ssk) 1135 { 1136 struct netlink_sock *nlk; 1137 1138 nlk = nlk_sk(sk); 1139 1140 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 1141 test_bit(NETLINK_S_CONGESTED, &nlk->state))) { 1142 DECLARE_WAITQUEUE(wait, current); 1143 if (!*timeo) { 1144 if (!ssk || netlink_is_kernel(ssk)) 1145 netlink_overrun(sk); 1146 sock_put(sk); 1147 kfree_skb(skb); 1148 return -EAGAIN; 1149 } 1150 1151 __set_current_state(TASK_INTERRUPTIBLE); 1152 add_wait_queue(&nlk->wait, &wait); 1153 1154 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 1155 test_bit(NETLINK_S_CONGESTED, &nlk->state)) && 1156 !sock_flag(sk, SOCK_DEAD)) 1157 *timeo = schedule_timeout(*timeo); 1158 1159 __set_current_state(TASK_RUNNING); 1160 remove_wait_queue(&nlk->wait, &wait); 1161 sock_put(sk); 1162 1163 if (signal_pending(current)) { 1164 kfree_skb(skb); 1165 return sock_intr_errno(*timeo); 1166 } 1167 return 1; 1168 } 1169 netlink_skb_set_owner_r(skb, sk); 1170 return 0; 1171 } 1172 1173 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) 1174 { 1175 int len = skb->len; 1176 1177 netlink_deliver_tap(skb); 1178 1179 skb_queue_tail(&sk->sk_receive_queue, skb); 1180 sk->sk_data_ready(sk); 1181 return len; 1182 } 1183 1184 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 1185 { 1186 int len = __netlink_sendskb(sk, skb); 1187 1188 sock_put(sk); 1189 return len; 1190 } 1191 1192 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) 1193 { 1194 kfree_skb(skb); 1195 sock_put(sk); 1196 } 1197 1198 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) 1199 { 1200 int delta; 1201 1202 WARN_ON(skb->sk != NULL); 1203 delta = skb->end - skb->tail; 1204 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) 1205 return skb; 1206 1207 if (skb_shared(skb)) { 1208 struct sk_buff *nskb = skb_clone(skb, allocation); 1209 if (!nskb) 1210 return skb; 1211 consume_skb(skb); 1212 skb = nskb; 1213 } 1214 1215 if (!pskb_expand_head(skb, 0, -delta, allocation)) 1216 skb->truesize -= delta; 1217 1218 return skb; 1219 } 1220 1221 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, 1222 struct sock *ssk) 1223 { 1224 int ret; 1225 struct netlink_sock *nlk = nlk_sk(sk); 1226 1227 ret = -ECONNREFUSED; 1228 if (nlk->netlink_rcv != NULL) { 1229 ret = skb->len; 1230 netlink_skb_set_owner_r(skb, sk); 1231 NETLINK_CB(skb).sk = ssk; 1232 netlink_deliver_tap_kernel(sk, ssk, skb); 1233 nlk->netlink_rcv(skb); 1234 consume_skb(skb); 1235 } else { 1236 kfree_skb(skb); 1237 } 1238 sock_put(sk); 1239 return ret; 1240 } 1241 1242 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, 1243 u32 portid, int nonblock) 1244 { 1245 struct sock *sk; 1246 int err; 1247 long timeo; 1248 1249 skb = netlink_trim(skb, gfp_any()); 1250 1251 timeo = sock_sndtimeo(ssk, nonblock); 1252 retry: 1253 sk = netlink_getsockbyportid(ssk, portid); 1254 if (IS_ERR(sk)) { 1255 kfree_skb(skb); 1256 return PTR_ERR(sk); 1257 } 1258 if (netlink_is_kernel(sk)) 1259 return netlink_unicast_kernel(sk, skb, ssk); 1260 1261 if (sk_filter(sk, skb)) { 1262 err = skb->len; 1263 kfree_skb(skb); 1264 sock_put(sk); 1265 return err; 1266 } 1267 1268 err = netlink_attachskb(sk, skb, &timeo, ssk); 1269 if (err == 1) 1270 goto retry; 1271 if (err) 1272 return err; 1273 1274 return netlink_sendskb(sk, skb); 1275 } 1276 EXPORT_SYMBOL(netlink_unicast); 1277 1278 int netlink_has_listeners(struct sock *sk, unsigned int group) 1279 { 1280 int res = 0; 1281 struct listeners *listeners; 1282 1283 BUG_ON(!netlink_is_kernel(sk)); 1284 1285 rcu_read_lock(); 1286 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 1287 1288 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) 1289 res = test_bit(group - 1, listeners->masks); 1290 1291 rcu_read_unlock(); 1292 1293 return res; 1294 } 1295 EXPORT_SYMBOL_GPL(netlink_has_listeners); 1296 1297 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) 1298 { 1299 struct netlink_sock *nlk = nlk_sk(sk); 1300 1301 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 1302 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { 1303 netlink_skb_set_owner_r(skb, sk); 1304 __netlink_sendskb(sk, skb); 1305 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 1306 } 1307 return -1; 1308 } 1309 1310 struct netlink_broadcast_data { 1311 struct sock *exclude_sk; 1312 struct net *net; 1313 u32 portid; 1314 u32 group; 1315 int failure; 1316 int delivery_failure; 1317 int congested; 1318 int delivered; 1319 gfp_t allocation; 1320 struct sk_buff *skb, *skb2; 1321 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); 1322 void *tx_data; 1323 }; 1324 1325 static void do_one_broadcast(struct sock *sk, 1326 struct netlink_broadcast_data *p) 1327 { 1328 struct netlink_sock *nlk = nlk_sk(sk); 1329 int val; 1330 1331 if (p->exclude_sk == sk) 1332 return; 1333 1334 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 1335 !test_bit(p->group - 1, nlk->groups)) 1336 return; 1337 1338 if (!net_eq(sock_net(sk), p->net)) { 1339 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID)) 1340 return; 1341 1342 if (!peernet_has_id(sock_net(sk), p->net)) 1343 return; 1344 1345 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns, 1346 CAP_NET_BROADCAST)) 1347 return; 1348 } 1349 1350 if (p->failure) { 1351 netlink_overrun(sk); 1352 return; 1353 } 1354 1355 sock_hold(sk); 1356 if (p->skb2 == NULL) { 1357 if (skb_shared(p->skb)) { 1358 p->skb2 = skb_clone(p->skb, p->allocation); 1359 } else { 1360 p->skb2 = skb_get(p->skb); 1361 /* 1362 * skb ownership may have been set when 1363 * delivered to a previous socket. 1364 */ 1365 skb_orphan(p->skb2); 1366 } 1367 } 1368 if (p->skb2 == NULL) { 1369 netlink_overrun(sk); 1370 /* Clone failed. Notify ALL listeners. */ 1371 p->failure = 1; 1372 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR) 1373 p->delivery_failure = 1; 1374 goto out; 1375 } 1376 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { 1377 kfree_skb(p->skb2); 1378 p->skb2 = NULL; 1379 goto out; 1380 } 1381 if (sk_filter(sk, p->skb2)) { 1382 kfree_skb(p->skb2); 1383 p->skb2 = NULL; 1384 goto out; 1385 } 1386 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1387 NETLINK_CB(p->skb2).nsid_is_set = true; 1388 val = netlink_broadcast_deliver(sk, p->skb2); 1389 if (val < 0) { 1390 netlink_overrun(sk); 1391 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR) 1392 p->delivery_failure = 1; 1393 } else { 1394 p->congested |= val; 1395 p->delivered = 1; 1396 p->skb2 = NULL; 1397 } 1398 out: 1399 sock_put(sk); 1400 } 1401 1402 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid, 1403 u32 group, gfp_t allocation, 1404 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), 1405 void *filter_data) 1406 { 1407 struct net *net = sock_net(ssk); 1408 struct netlink_broadcast_data info; 1409 struct sock *sk; 1410 1411 skb = netlink_trim(skb, allocation); 1412 1413 info.exclude_sk = ssk; 1414 info.net = net; 1415 info.portid = portid; 1416 info.group = group; 1417 info.failure = 0; 1418 info.delivery_failure = 0; 1419 info.congested = 0; 1420 info.delivered = 0; 1421 info.allocation = allocation; 1422 info.skb = skb; 1423 info.skb2 = NULL; 1424 info.tx_filter = filter; 1425 info.tx_data = filter_data; 1426 1427 /* While we sleep in clone, do not allow to change socket list */ 1428 1429 netlink_lock_table(); 1430 1431 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) 1432 do_one_broadcast(sk, &info); 1433 1434 consume_skb(skb); 1435 1436 netlink_unlock_table(); 1437 1438 if (info.delivery_failure) { 1439 kfree_skb(info.skb2); 1440 return -ENOBUFS; 1441 } 1442 consume_skb(info.skb2); 1443 1444 if (info.delivered) { 1445 if (info.congested && gfpflags_allow_blocking(allocation)) 1446 yield(); 1447 return 0; 1448 } 1449 return -ESRCH; 1450 } 1451 EXPORT_SYMBOL(netlink_broadcast_filtered); 1452 1453 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid, 1454 u32 group, gfp_t allocation) 1455 { 1456 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation, 1457 NULL, NULL); 1458 } 1459 EXPORT_SYMBOL(netlink_broadcast); 1460 1461 struct netlink_set_err_data { 1462 struct sock *exclude_sk; 1463 u32 portid; 1464 u32 group; 1465 int code; 1466 }; 1467 1468 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) 1469 { 1470 struct netlink_sock *nlk = nlk_sk(sk); 1471 int ret = 0; 1472 1473 if (sk == p->exclude_sk) 1474 goto out; 1475 1476 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) 1477 goto out; 1478 1479 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 1480 !test_bit(p->group - 1, nlk->groups)) 1481 goto out; 1482 1483 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) { 1484 ret = 1; 1485 goto out; 1486 } 1487 1488 sk->sk_err = p->code; 1489 sk->sk_error_report(sk); 1490 out: 1491 return ret; 1492 } 1493 1494 /** 1495 * netlink_set_err - report error to broadcast listeners 1496 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() 1497 * @portid: the PORTID of a process that we want to skip (if any) 1498 * @group: the broadcast group that will notice the error 1499 * @code: error code, must be negative (as usual in kernelspace) 1500 * 1501 * This function returns the number of broadcast listeners that have set the 1502 * NETLINK_NO_ENOBUFS socket option. 1503 */ 1504 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) 1505 { 1506 struct netlink_set_err_data info; 1507 struct sock *sk; 1508 int ret = 0; 1509 1510 info.exclude_sk = ssk; 1511 info.portid = portid; 1512 info.group = group; 1513 /* sk->sk_err wants a positive error value */ 1514 info.code = -code; 1515 1516 read_lock(&nl_table_lock); 1517 1518 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) 1519 ret += do_one_set_err(sk, &info); 1520 1521 read_unlock(&nl_table_lock); 1522 return ret; 1523 } 1524 EXPORT_SYMBOL(netlink_set_err); 1525 1526 /* must be called with netlink table grabbed */ 1527 static void netlink_update_socket_mc(struct netlink_sock *nlk, 1528 unsigned int group, 1529 int is_new) 1530 { 1531 int old, new = !!is_new, subscriptions; 1532 1533 old = test_bit(group - 1, nlk->groups); 1534 subscriptions = nlk->subscriptions - old + new; 1535 if (new) 1536 __set_bit(group - 1, nlk->groups); 1537 else 1538 __clear_bit(group - 1, nlk->groups); 1539 netlink_update_subscriptions(&nlk->sk, subscriptions); 1540 netlink_update_listeners(&nlk->sk); 1541 } 1542 1543 static int netlink_setsockopt(struct socket *sock, int level, int optname, 1544 char __user *optval, unsigned int optlen) 1545 { 1546 struct sock *sk = sock->sk; 1547 struct netlink_sock *nlk = nlk_sk(sk); 1548 unsigned int val = 0; 1549 int err; 1550 1551 if (level != SOL_NETLINK) 1552 return -ENOPROTOOPT; 1553 1554 if (optlen >= sizeof(int) && 1555 get_user(val, (unsigned int __user *)optval)) 1556 return -EFAULT; 1557 1558 switch (optname) { 1559 case NETLINK_PKTINFO: 1560 if (val) 1561 nlk->flags |= NETLINK_F_RECV_PKTINFO; 1562 else 1563 nlk->flags &= ~NETLINK_F_RECV_PKTINFO; 1564 err = 0; 1565 break; 1566 case NETLINK_ADD_MEMBERSHIP: 1567 case NETLINK_DROP_MEMBERSHIP: { 1568 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1569 return -EPERM; 1570 err = netlink_realloc_groups(sk); 1571 if (err) 1572 return err; 1573 if (!val || val - 1 >= nlk->ngroups) 1574 return -EINVAL; 1575 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { 1576 err = nlk->netlink_bind(sock_net(sk), val); 1577 if (err) 1578 return err; 1579 } 1580 netlink_table_grab(); 1581 netlink_update_socket_mc(nlk, val, 1582 optname == NETLINK_ADD_MEMBERSHIP); 1583 netlink_table_ungrab(); 1584 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) 1585 nlk->netlink_unbind(sock_net(sk), val); 1586 1587 err = 0; 1588 break; 1589 } 1590 case NETLINK_BROADCAST_ERROR: 1591 if (val) 1592 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR; 1593 else 1594 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR; 1595 err = 0; 1596 break; 1597 case NETLINK_NO_ENOBUFS: 1598 if (val) { 1599 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS; 1600 clear_bit(NETLINK_S_CONGESTED, &nlk->state); 1601 wake_up_interruptible(&nlk->wait); 1602 } else { 1603 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS; 1604 } 1605 err = 0; 1606 break; 1607 case NETLINK_LISTEN_ALL_NSID: 1608 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) 1609 return -EPERM; 1610 1611 if (val) 1612 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID; 1613 else 1614 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID; 1615 err = 0; 1616 break; 1617 case NETLINK_CAP_ACK: 1618 if (val) 1619 nlk->flags |= NETLINK_F_CAP_ACK; 1620 else 1621 nlk->flags &= ~NETLINK_F_CAP_ACK; 1622 err = 0; 1623 break; 1624 default: 1625 err = -ENOPROTOOPT; 1626 } 1627 return err; 1628 } 1629 1630 static int netlink_getsockopt(struct socket *sock, int level, int optname, 1631 char __user *optval, int __user *optlen) 1632 { 1633 struct sock *sk = sock->sk; 1634 struct netlink_sock *nlk = nlk_sk(sk); 1635 int len, val, err; 1636 1637 if (level != SOL_NETLINK) 1638 return -ENOPROTOOPT; 1639 1640 if (get_user(len, optlen)) 1641 return -EFAULT; 1642 if (len < 0) 1643 return -EINVAL; 1644 1645 switch (optname) { 1646 case NETLINK_PKTINFO: 1647 if (len < sizeof(int)) 1648 return -EINVAL; 1649 len = sizeof(int); 1650 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0; 1651 if (put_user(len, optlen) || 1652 put_user(val, optval)) 1653 return -EFAULT; 1654 err = 0; 1655 break; 1656 case NETLINK_BROADCAST_ERROR: 1657 if (len < sizeof(int)) 1658 return -EINVAL; 1659 len = sizeof(int); 1660 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0; 1661 if (put_user(len, optlen) || 1662 put_user(val, optval)) 1663 return -EFAULT; 1664 err = 0; 1665 break; 1666 case NETLINK_NO_ENOBUFS: 1667 if (len < sizeof(int)) 1668 return -EINVAL; 1669 len = sizeof(int); 1670 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0; 1671 if (put_user(len, optlen) || 1672 put_user(val, optval)) 1673 return -EFAULT; 1674 err = 0; 1675 break; 1676 case NETLINK_LIST_MEMBERSHIPS: { 1677 int pos, idx, shift; 1678 1679 err = 0; 1680 netlink_lock_table(); 1681 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) { 1682 if (len - pos < sizeof(u32)) 1683 break; 1684 1685 idx = pos / sizeof(unsigned long); 1686 shift = (pos % sizeof(unsigned long)) * 8; 1687 if (put_user((u32)(nlk->groups[idx] >> shift), 1688 (u32 __user *)(optval + pos))) { 1689 err = -EFAULT; 1690 break; 1691 } 1692 } 1693 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen)) 1694 err = -EFAULT; 1695 netlink_unlock_table(); 1696 break; 1697 } 1698 case NETLINK_CAP_ACK: 1699 if (len < sizeof(int)) 1700 return -EINVAL; 1701 len = sizeof(int); 1702 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0; 1703 if (put_user(len, optlen) || 1704 put_user(val, optval)) 1705 return -EFAULT; 1706 err = 0; 1707 break; 1708 default: 1709 err = -ENOPROTOOPT; 1710 } 1711 return err; 1712 } 1713 1714 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 1715 { 1716 struct nl_pktinfo info; 1717 1718 info.group = NETLINK_CB(skb).dst_group; 1719 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); 1720 } 1721 1722 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg, 1723 struct sk_buff *skb) 1724 { 1725 if (!NETLINK_CB(skb).nsid_is_set) 1726 return; 1727 1728 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int), 1729 &NETLINK_CB(skb).nsid); 1730 } 1731 1732 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 1733 { 1734 struct sock *sk = sock->sk; 1735 struct netlink_sock *nlk = nlk_sk(sk); 1736 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); 1737 u32 dst_portid; 1738 u32 dst_group; 1739 struct sk_buff *skb; 1740 int err; 1741 struct scm_cookie scm; 1742 u32 netlink_skb_flags = 0; 1743 1744 if (msg->msg_flags&MSG_OOB) 1745 return -EOPNOTSUPP; 1746 1747 err = scm_send(sock, msg, &scm, true); 1748 if (err < 0) 1749 return err; 1750 1751 if (msg->msg_namelen) { 1752 err = -EINVAL; 1753 if (addr->nl_family != AF_NETLINK) 1754 goto out; 1755 dst_portid = addr->nl_pid; 1756 dst_group = ffs(addr->nl_groups); 1757 err = -EPERM; 1758 if ((dst_group || dst_portid) && 1759 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1760 goto out; 1761 netlink_skb_flags |= NETLINK_SKB_DST; 1762 } else { 1763 dst_portid = nlk->dst_portid; 1764 dst_group = nlk->dst_group; 1765 } 1766 1767 if (!nlk->bound) { 1768 err = netlink_autobind(sock); 1769 if (err) 1770 goto out; 1771 } else { 1772 /* Ensure nlk is hashed and visible. */ 1773 smp_rmb(); 1774 } 1775 1776 err = -EMSGSIZE; 1777 if (len > sk->sk_sndbuf - 32) 1778 goto out; 1779 err = -ENOBUFS; 1780 skb = netlink_alloc_large_skb(len, dst_group); 1781 if (skb == NULL) 1782 goto out; 1783 1784 NETLINK_CB(skb).portid = nlk->portid; 1785 NETLINK_CB(skb).dst_group = dst_group; 1786 NETLINK_CB(skb).creds = scm.creds; 1787 NETLINK_CB(skb).flags = netlink_skb_flags; 1788 1789 err = -EFAULT; 1790 if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1791 kfree_skb(skb); 1792 goto out; 1793 } 1794 1795 err = security_netlink_send(sk, skb); 1796 if (err) { 1797 kfree_skb(skb); 1798 goto out; 1799 } 1800 1801 if (dst_group) { 1802 atomic_inc(&skb->users); 1803 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); 1804 } 1805 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); 1806 1807 out: 1808 scm_destroy(&scm); 1809 return err; 1810 } 1811 1812 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 1813 int flags) 1814 { 1815 struct scm_cookie scm; 1816 struct sock *sk = sock->sk; 1817 struct netlink_sock *nlk = nlk_sk(sk); 1818 int noblock = flags&MSG_DONTWAIT; 1819 size_t copied; 1820 struct sk_buff *skb, *data_skb; 1821 int err, ret; 1822 1823 if (flags&MSG_OOB) 1824 return -EOPNOTSUPP; 1825 1826 copied = 0; 1827 1828 skb = skb_recv_datagram(sk, flags, noblock, &err); 1829 if (skb == NULL) 1830 goto out; 1831 1832 data_skb = skb; 1833 1834 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES 1835 if (unlikely(skb_shinfo(skb)->frag_list)) { 1836 /* 1837 * If this skb has a frag_list, then here that means that we 1838 * will have to use the frag_list skb's data for compat tasks 1839 * and the regular skb's data for normal (non-compat) tasks. 1840 * 1841 * If we need to send the compat skb, assign it to the 1842 * 'data_skb' variable so that it will be used below for data 1843 * copying. We keep 'skb' for everything else, including 1844 * freeing both later. 1845 */ 1846 if (flags & MSG_CMSG_COMPAT) 1847 data_skb = skb_shinfo(skb)->frag_list; 1848 } 1849 #endif 1850 1851 /* Record the max length of recvmsg() calls for future allocations */ 1852 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); 1853 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, 1854 SKB_WITH_OVERHEAD(32768)); 1855 1856 copied = data_skb->len; 1857 if (len < copied) { 1858 msg->msg_flags |= MSG_TRUNC; 1859 copied = len; 1860 } 1861 1862 skb_reset_transport_header(data_skb); 1863 err = skb_copy_datagram_msg(data_skb, 0, msg, copied); 1864 1865 if (msg->msg_name) { 1866 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); 1867 addr->nl_family = AF_NETLINK; 1868 addr->nl_pad = 0; 1869 addr->nl_pid = NETLINK_CB(skb).portid; 1870 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); 1871 msg->msg_namelen = sizeof(*addr); 1872 } 1873 1874 if (nlk->flags & NETLINK_F_RECV_PKTINFO) 1875 netlink_cmsg_recv_pktinfo(msg, skb); 1876 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID) 1877 netlink_cmsg_listen_all_nsid(sk, msg, skb); 1878 1879 memset(&scm, 0, sizeof(scm)); 1880 scm.creds = *NETLINK_CREDS(skb); 1881 if (flags & MSG_TRUNC) 1882 copied = data_skb->len; 1883 1884 skb_free_datagram(sk, skb); 1885 1886 if (nlk->cb_running && 1887 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 1888 ret = netlink_dump(sk); 1889 if (ret) { 1890 sk->sk_err = -ret; 1891 sk->sk_error_report(sk); 1892 } 1893 } 1894 1895 scm_recv(sock, msg, &scm, flags); 1896 out: 1897 netlink_rcv_wake(sk); 1898 return err ? : copied; 1899 } 1900 1901 static void netlink_data_ready(struct sock *sk) 1902 { 1903 BUG(); 1904 } 1905 1906 /* 1907 * We export these functions to other modules. They provide a 1908 * complete set of kernel non-blocking support for message 1909 * queueing. 1910 */ 1911 1912 struct sock * 1913 __netlink_kernel_create(struct net *net, int unit, struct module *module, 1914 struct netlink_kernel_cfg *cfg) 1915 { 1916 struct socket *sock; 1917 struct sock *sk; 1918 struct netlink_sock *nlk; 1919 struct listeners *listeners = NULL; 1920 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL; 1921 unsigned int groups; 1922 1923 BUG_ON(!nl_table); 1924 1925 if (unit < 0 || unit >= MAX_LINKS) 1926 return NULL; 1927 1928 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) 1929 return NULL; 1930 1931 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0) 1932 goto out_sock_release_nosk; 1933 1934 sk = sock->sk; 1935 1936 if (!cfg || cfg->groups < 32) 1937 groups = 32; 1938 else 1939 groups = cfg->groups; 1940 1941 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 1942 if (!listeners) 1943 goto out_sock_release; 1944 1945 sk->sk_data_ready = netlink_data_ready; 1946 if (cfg && cfg->input) 1947 nlk_sk(sk)->netlink_rcv = cfg->input; 1948 1949 if (netlink_insert(sk, 0)) 1950 goto out_sock_release; 1951 1952 nlk = nlk_sk(sk); 1953 nlk->flags |= NETLINK_F_KERNEL_SOCKET; 1954 1955 netlink_table_grab(); 1956 if (!nl_table[unit].registered) { 1957 nl_table[unit].groups = groups; 1958 rcu_assign_pointer(nl_table[unit].listeners, listeners); 1959 nl_table[unit].cb_mutex = cb_mutex; 1960 nl_table[unit].module = module; 1961 if (cfg) { 1962 nl_table[unit].bind = cfg->bind; 1963 nl_table[unit].unbind = cfg->unbind; 1964 nl_table[unit].flags = cfg->flags; 1965 if (cfg->compare) 1966 nl_table[unit].compare = cfg->compare; 1967 } 1968 nl_table[unit].registered = 1; 1969 } else { 1970 kfree(listeners); 1971 nl_table[unit].registered++; 1972 } 1973 netlink_table_ungrab(); 1974 return sk; 1975 1976 out_sock_release: 1977 kfree(listeners); 1978 netlink_kernel_release(sk); 1979 return NULL; 1980 1981 out_sock_release_nosk: 1982 sock_release(sock); 1983 return NULL; 1984 } 1985 EXPORT_SYMBOL(__netlink_kernel_create); 1986 1987 void 1988 netlink_kernel_release(struct sock *sk) 1989 { 1990 if (sk == NULL || sk->sk_socket == NULL) 1991 return; 1992 1993 sock_release(sk->sk_socket); 1994 } 1995 EXPORT_SYMBOL(netlink_kernel_release); 1996 1997 int __netlink_change_ngroups(struct sock *sk, unsigned int groups) 1998 { 1999 struct listeners *new, *old; 2000 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 2001 2002 if (groups < 32) 2003 groups = 32; 2004 2005 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { 2006 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 2007 if (!new) 2008 return -ENOMEM; 2009 old = nl_deref_protected(tbl->listeners); 2010 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 2011 rcu_assign_pointer(tbl->listeners, new); 2012 2013 kfree_rcu(old, rcu); 2014 } 2015 tbl->groups = groups; 2016 2017 return 0; 2018 } 2019 2020 /** 2021 * netlink_change_ngroups - change number of multicast groups 2022 * 2023 * This changes the number of multicast groups that are available 2024 * on a certain netlink family. Note that it is not possible to 2025 * change the number of groups to below 32. Also note that it does 2026 * not implicitly call netlink_clear_multicast_users() when the 2027 * number of groups is reduced. 2028 * 2029 * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). 2030 * @groups: The new number of groups. 2031 */ 2032 int netlink_change_ngroups(struct sock *sk, unsigned int groups) 2033 { 2034 int err; 2035 2036 netlink_table_grab(); 2037 err = __netlink_change_ngroups(sk, groups); 2038 netlink_table_ungrab(); 2039 2040 return err; 2041 } 2042 2043 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) 2044 { 2045 struct sock *sk; 2046 struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; 2047 2048 sk_for_each_bound(sk, &tbl->mc_list) 2049 netlink_update_socket_mc(nlk_sk(sk), group, 0); 2050 } 2051 2052 struct nlmsghdr * 2053 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags) 2054 { 2055 struct nlmsghdr *nlh; 2056 int size = nlmsg_msg_size(len); 2057 2058 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size)); 2059 nlh->nlmsg_type = type; 2060 nlh->nlmsg_len = size; 2061 nlh->nlmsg_flags = flags; 2062 nlh->nlmsg_pid = portid; 2063 nlh->nlmsg_seq = seq; 2064 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) 2065 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size); 2066 return nlh; 2067 } 2068 EXPORT_SYMBOL(__nlmsg_put); 2069 2070 /* 2071 * It looks a bit ugly. 2072 * It would be better to create kernel thread. 2073 */ 2074 2075 static int netlink_dump(struct sock *sk) 2076 { 2077 struct netlink_sock *nlk = nlk_sk(sk); 2078 struct netlink_callback *cb; 2079 struct sk_buff *skb = NULL; 2080 struct nlmsghdr *nlh; 2081 struct module *module; 2082 int len, err = -ENOBUFS; 2083 int alloc_min_size; 2084 int alloc_size; 2085 2086 mutex_lock(nlk->cb_mutex); 2087 if (!nlk->cb_running) { 2088 err = -EINVAL; 2089 goto errout_skb; 2090 } 2091 2092 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2093 goto errout_skb; 2094 2095 /* NLMSG_GOODSIZE is small to avoid high order allocations being 2096 * required, but it makes sense to _attempt_ a 16K bytes allocation 2097 * to reduce number of system calls on dump operations, if user 2098 * ever provided a big enough buffer. 2099 */ 2100 cb = &nlk->cb; 2101 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); 2102 2103 if (alloc_min_size < nlk->max_recvmsg_len) { 2104 alloc_size = nlk->max_recvmsg_len; 2105 skb = alloc_skb(alloc_size, 2106 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) | 2107 __GFP_NOWARN | __GFP_NORETRY); 2108 } 2109 if (!skb) { 2110 alloc_size = alloc_min_size; 2111 skb = alloc_skb(alloc_size, GFP_KERNEL); 2112 } 2113 if (!skb) 2114 goto errout_skb; 2115 2116 /* Trim skb to allocated size. User is expected to provide buffer as 2117 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at 2118 * netlink_recvmsg())). dump will pack as many smaller messages as 2119 * could fit within the allocated skb. skb is typically allocated 2120 * with larger space than required (could be as much as near 2x the 2121 * requested size with align to next power of 2 approach). Allowing 2122 * dump to use the excess space makes it difficult for a user to have a 2123 * reasonable static buffer based on the expected largest dump of a 2124 * single netdev. The outcome is MSG_TRUNC error. 2125 */ 2126 skb_reserve(skb, skb_tailroom(skb) - alloc_size); 2127 netlink_skb_set_owner_r(skb, sk); 2128 2129 len = cb->dump(skb, cb); 2130 2131 if (len > 0) { 2132 mutex_unlock(nlk->cb_mutex); 2133 2134 if (sk_filter(sk, skb)) 2135 kfree_skb(skb); 2136 else 2137 __netlink_sendskb(sk, skb); 2138 return 0; 2139 } 2140 2141 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); 2142 if (!nlh) 2143 goto errout_skb; 2144 2145 nl_dump_check_consistent(cb, nlh); 2146 2147 memcpy(nlmsg_data(nlh), &len, sizeof(len)); 2148 2149 if (sk_filter(sk, skb)) 2150 kfree_skb(skb); 2151 else 2152 __netlink_sendskb(sk, skb); 2153 2154 if (cb->done) 2155 cb->done(cb); 2156 2157 nlk->cb_running = false; 2158 module = cb->module; 2159 skb = cb->skb; 2160 mutex_unlock(nlk->cb_mutex); 2161 module_put(module); 2162 consume_skb(skb); 2163 return 0; 2164 2165 errout_skb: 2166 mutex_unlock(nlk->cb_mutex); 2167 kfree_skb(skb); 2168 return err; 2169 } 2170 2171 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 2172 const struct nlmsghdr *nlh, 2173 struct netlink_dump_control *control) 2174 { 2175 struct netlink_callback *cb; 2176 struct sock *sk; 2177 struct netlink_sock *nlk; 2178 int ret; 2179 2180 atomic_inc(&skb->users); 2181 2182 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 2183 if (sk == NULL) { 2184 ret = -ECONNREFUSED; 2185 goto error_free; 2186 } 2187 2188 nlk = nlk_sk(sk); 2189 mutex_lock(nlk->cb_mutex); 2190 /* A dump is in progress... */ 2191 if (nlk->cb_running) { 2192 ret = -EBUSY; 2193 goto error_unlock; 2194 } 2195 /* add reference of module which cb->dump belongs to */ 2196 if (!try_module_get(control->module)) { 2197 ret = -EPROTONOSUPPORT; 2198 goto error_unlock; 2199 } 2200 2201 cb = &nlk->cb; 2202 memset(cb, 0, sizeof(*cb)); 2203 cb->start = control->start; 2204 cb->dump = control->dump; 2205 cb->done = control->done; 2206 cb->nlh = nlh; 2207 cb->data = control->data; 2208 cb->module = control->module; 2209 cb->min_dump_alloc = control->min_dump_alloc; 2210 cb->skb = skb; 2211 2212 nlk->cb_running = true; 2213 2214 mutex_unlock(nlk->cb_mutex); 2215 2216 if (cb->start) 2217 cb->start(cb); 2218 2219 ret = netlink_dump(sk); 2220 sock_put(sk); 2221 2222 if (ret) 2223 return ret; 2224 2225 /* We successfully started a dump, by returning -EINTR we 2226 * signal not to send ACK even if it was requested. 2227 */ 2228 return -EINTR; 2229 2230 error_unlock: 2231 sock_put(sk); 2232 mutex_unlock(nlk->cb_mutex); 2233 error_free: 2234 kfree_skb(skb); 2235 return ret; 2236 } 2237 EXPORT_SYMBOL(__netlink_dump_start); 2238 2239 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) 2240 { 2241 struct sk_buff *skb; 2242 struct nlmsghdr *rep; 2243 struct nlmsgerr *errmsg; 2244 size_t payload = sizeof(*errmsg); 2245 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); 2246 2247 /* Error messages get the original request appened, unless the user 2248 * requests to cap the error message. 2249 */ 2250 if (!(nlk->flags & NETLINK_F_CAP_ACK) && err) 2251 payload += nlmsg_len(nlh); 2252 2253 skb = nlmsg_new(payload, GFP_KERNEL); 2254 if (!skb) { 2255 struct sock *sk; 2256 2257 sk = netlink_lookup(sock_net(in_skb->sk), 2258 in_skb->sk->sk_protocol, 2259 NETLINK_CB(in_skb).portid); 2260 if (sk) { 2261 sk->sk_err = ENOBUFS; 2262 sk->sk_error_report(sk); 2263 sock_put(sk); 2264 } 2265 return; 2266 } 2267 2268 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 2269 NLMSG_ERROR, payload, 0); 2270 errmsg = nlmsg_data(rep); 2271 errmsg->error = err; 2272 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); 2273 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); 2274 } 2275 EXPORT_SYMBOL(netlink_ack); 2276 2277 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, 2278 struct nlmsghdr *)) 2279 { 2280 struct nlmsghdr *nlh; 2281 int err; 2282 2283 while (skb->len >= nlmsg_total_size(0)) { 2284 int msglen; 2285 2286 nlh = nlmsg_hdr(skb); 2287 err = 0; 2288 2289 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) 2290 return 0; 2291 2292 /* Only requests are handled by the kernel */ 2293 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) 2294 goto ack; 2295 2296 /* Skip control messages */ 2297 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 2298 goto ack; 2299 2300 err = cb(skb, nlh); 2301 if (err == -EINTR) 2302 goto skip; 2303 2304 ack: 2305 if (nlh->nlmsg_flags & NLM_F_ACK || err) 2306 netlink_ack(skb, nlh, err); 2307 2308 skip: 2309 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 2310 if (msglen > skb->len) 2311 msglen = skb->len; 2312 skb_pull(skb, msglen); 2313 } 2314 2315 return 0; 2316 } 2317 EXPORT_SYMBOL(netlink_rcv_skb); 2318 2319 /** 2320 * nlmsg_notify - send a notification netlink message 2321 * @sk: netlink socket to use 2322 * @skb: notification message 2323 * @portid: destination netlink portid for reports or 0 2324 * @group: destination multicast group or 0 2325 * @report: 1 to report back, 0 to disable 2326 * @flags: allocation flags 2327 */ 2328 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, 2329 unsigned int group, int report, gfp_t flags) 2330 { 2331 int err = 0; 2332 2333 if (group) { 2334 int exclude_portid = 0; 2335 2336 if (report) { 2337 atomic_inc(&skb->users); 2338 exclude_portid = portid; 2339 } 2340 2341 /* errors reported via destination sk->sk_err, but propagate 2342 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ 2343 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); 2344 } 2345 2346 if (report) { 2347 int err2; 2348 2349 err2 = nlmsg_unicast(sk, skb, portid); 2350 if (!err || err == -ESRCH) 2351 err = err2; 2352 } 2353 2354 return err; 2355 } 2356 EXPORT_SYMBOL(nlmsg_notify); 2357 2358 #ifdef CONFIG_PROC_FS 2359 struct nl_seq_iter { 2360 struct seq_net_private p; 2361 struct rhashtable_iter hti; 2362 int link; 2363 }; 2364 2365 static int netlink_walk_start(struct nl_seq_iter *iter) 2366 { 2367 int err; 2368 2369 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti, 2370 GFP_KERNEL); 2371 if (err) { 2372 iter->link = MAX_LINKS; 2373 return err; 2374 } 2375 2376 err = rhashtable_walk_start(&iter->hti); 2377 return err == -EAGAIN ? 0 : err; 2378 } 2379 2380 static void netlink_walk_stop(struct nl_seq_iter *iter) 2381 { 2382 rhashtable_walk_stop(&iter->hti); 2383 rhashtable_walk_exit(&iter->hti); 2384 } 2385 2386 static void *__netlink_seq_next(struct seq_file *seq) 2387 { 2388 struct nl_seq_iter *iter = seq->private; 2389 struct netlink_sock *nlk; 2390 2391 do { 2392 for (;;) { 2393 int err; 2394 2395 nlk = rhashtable_walk_next(&iter->hti); 2396 2397 if (IS_ERR(nlk)) { 2398 if (PTR_ERR(nlk) == -EAGAIN) 2399 continue; 2400 2401 return nlk; 2402 } 2403 2404 if (nlk) 2405 break; 2406 2407 netlink_walk_stop(iter); 2408 if (++iter->link >= MAX_LINKS) 2409 return NULL; 2410 2411 err = netlink_walk_start(iter); 2412 if (err) 2413 return ERR_PTR(err); 2414 } 2415 } while (sock_net(&nlk->sk) != seq_file_net(seq)); 2416 2417 return nlk; 2418 } 2419 2420 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp) 2421 { 2422 struct nl_seq_iter *iter = seq->private; 2423 void *obj = SEQ_START_TOKEN; 2424 loff_t pos; 2425 int err; 2426 2427 iter->link = 0; 2428 2429 err = netlink_walk_start(iter); 2430 if (err) 2431 return ERR_PTR(err); 2432 2433 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--) 2434 obj = __netlink_seq_next(seq); 2435 2436 return obj; 2437 } 2438 2439 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2440 { 2441 ++*pos; 2442 return __netlink_seq_next(seq); 2443 } 2444 2445 static void netlink_seq_stop(struct seq_file *seq, void *v) 2446 { 2447 struct nl_seq_iter *iter = seq->private; 2448 2449 if (iter->link >= MAX_LINKS) 2450 return; 2451 2452 netlink_walk_stop(iter); 2453 } 2454 2455 2456 static int netlink_seq_show(struct seq_file *seq, void *v) 2457 { 2458 if (v == SEQ_START_TOKEN) { 2459 seq_puts(seq, 2460 "sk Eth Pid Groups " 2461 "Rmem Wmem Dump Locks Drops Inode\n"); 2462 } else { 2463 struct sock *s = v; 2464 struct netlink_sock *nlk = nlk_sk(s); 2465 2466 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n", 2467 s, 2468 s->sk_protocol, 2469 nlk->portid, 2470 nlk->groups ? (u32)nlk->groups[0] : 0, 2471 sk_rmem_alloc_get(s), 2472 sk_wmem_alloc_get(s), 2473 nlk->cb_running, 2474 atomic_read(&s->sk_refcnt), 2475 atomic_read(&s->sk_drops), 2476 sock_i_ino(s) 2477 ); 2478 2479 } 2480 return 0; 2481 } 2482 2483 static const struct seq_operations netlink_seq_ops = { 2484 .start = netlink_seq_start, 2485 .next = netlink_seq_next, 2486 .stop = netlink_seq_stop, 2487 .show = netlink_seq_show, 2488 }; 2489 2490 2491 static int netlink_seq_open(struct inode *inode, struct file *file) 2492 { 2493 return seq_open_net(inode, file, &netlink_seq_ops, 2494 sizeof(struct nl_seq_iter)); 2495 } 2496 2497 static const struct file_operations netlink_seq_fops = { 2498 .owner = THIS_MODULE, 2499 .open = netlink_seq_open, 2500 .read = seq_read, 2501 .llseek = seq_lseek, 2502 .release = seq_release_net, 2503 }; 2504 2505 #endif 2506 2507 int netlink_register_notifier(struct notifier_block *nb) 2508 { 2509 return atomic_notifier_chain_register(&netlink_chain, nb); 2510 } 2511 EXPORT_SYMBOL(netlink_register_notifier); 2512 2513 int netlink_unregister_notifier(struct notifier_block *nb) 2514 { 2515 return atomic_notifier_chain_unregister(&netlink_chain, nb); 2516 } 2517 EXPORT_SYMBOL(netlink_unregister_notifier); 2518 2519 static const struct proto_ops netlink_ops = { 2520 .family = PF_NETLINK, 2521 .owner = THIS_MODULE, 2522 .release = netlink_release, 2523 .bind = netlink_bind, 2524 .connect = netlink_connect, 2525 .socketpair = sock_no_socketpair, 2526 .accept = sock_no_accept, 2527 .getname = netlink_getname, 2528 .poll = datagram_poll, 2529 .ioctl = netlink_ioctl, 2530 .listen = sock_no_listen, 2531 .shutdown = sock_no_shutdown, 2532 .setsockopt = netlink_setsockopt, 2533 .getsockopt = netlink_getsockopt, 2534 .sendmsg = netlink_sendmsg, 2535 .recvmsg = netlink_recvmsg, 2536 .mmap = sock_no_mmap, 2537 .sendpage = sock_no_sendpage, 2538 }; 2539 2540 static const struct net_proto_family netlink_family_ops = { 2541 .family = PF_NETLINK, 2542 .create = netlink_create, 2543 .owner = THIS_MODULE, /* for consistency 8) */ 2544 }; 2545 2546 static int __net_init netlink_net_init(struct net *net) 2547 { 2548 #ifdef CONFIG_PROC_FS 2549 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops)) 2550 return -ENOMEM; 2551 #endif 2552 return 0; 2553 } 2554 2555 static void __net_exit netlink_net_exit(struct net *net) 2556 { 2557 #ifdef CONFIG_PROC_FS 2558 remove_proc_entry("netlink", net->proc_net); 2559 #endif 2560 } 2561 2562 static void __init netlink_add_usersock_entry(void) 2563 { 2564 struct listeners *listeners; 2565 int groups = 32; 2566 2567 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 2568 if (!listeners) 2569 panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); 2570 2571 netlink_table_grab(); 2572 2573 nl_table[NETLINK_USERSOCK].groups = groups; 2574 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2575 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2576 nl_table[NETLINK_USERSOCK].registered = 1; 2577 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND; 2578 2579 netlink_table_ungrab(); 2580 } 2581 2582 static struct pernet_operations __net_initdata netlink_net_ops = { 2583 .init = netlink_net_init, 2584 .exit = netlink_net_exit, 2585 }; 2586 2587 static inline u32 netlink_hash(const void *data, u32 len, u32 seed) 2588 { 2589 const struct netlink_sock *nlk = data; 2590 struct netlink_compare_arg arg; 2591 2592 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); 2593 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed); 2594 } 2595 2596 static const struct rhashtable_params netlink_rhashtable_params = { 2597 .head_offset = offsetof(struct netlink_sock, node), 2598 .key_len = netlink_compare_arg_len, 2599 .obj_hashfn = netlink_hash, 2600 .obj_cmpfn = netlink_compare, 2601 .automatic_shrinking = true, 2602 }; 2603 2604 static int __init netlink_proto_init(void) 2605 { 2606 int i; 2607 int err = proto_register(&netlink_proto, 0); 2608 2609 if (err != 0) 2610 goto out; 2611 2612 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb)); 2613 2614 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 2615 if (!nl_table) 2616 goto panic; 2617 2618 for (i = 0; i < MAX_LINKS; i++) { 2619 if (rhashtable_init(&nl_table[i].hash, 2620 &netlink_rhashtable_params) < 0) { 2621 while (--i > 0) 2622 rhashtable_destroy(&nl_table[i].hash); 2623 kfree(nl_table); 2624 goto panic; 2625 } 2626 } 2627 2628 INIT_LIST_HEAD(&netlink_tap_all); 2629 2630 netlink_add_usersock_entry(); 2631 2632 sock_register(&netlink_family_ops); 2633 register_pernet_subsys(&netlink_net_ops); 2634 /* The netlink device handler may be needed early. */ 2635 rtnetlink_init(); 2636 out: 2637 return err; 2638 panic: 2639 panic("netlink_init: Cannot allocate nl_table\n"); 2640 } 2641 2642 core_initcall(netlink_proto_init); 2643