1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NETLINK Kernel-user communication protocol. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 7 * Patrick McHardy <kaber@trash.net> 8 * 9 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith 10 * added netlink_proto_exit 11 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> 12 * use nlk_sk, as sk->protinfo is on a diet 8) 13 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> 14 * - inc module use count of module that owns 15 * the kernel socket in case userspace opens 16 * socket of same protocol 17 * - remove all module support, since netlink is 18 * mandatory if CONFIG_NET=y these days 19 */ 20 21 #include <linux/module.h> 22 23 #include <linux/bpf.h> 24 #include <linux/capability.h> 25 #include <linux/kernel.h> 26 #include <linux/filter.h> 27 #include <linux/init.h> 28 #include <linux/signal.h> 29 #include <linux/sched.h> 30 #include <linux/errno.h> 31 #include <linux/string.h> 32 #include <linux/stat.h> 33 #include <linux/socket.h> 34 #include <linux/un.h> 35 #include <linux/fcntl.h> 36 #include <linux/termios.h> 37 #include <linux/sockios.h> 38 #include <linux/net.h> 39 #include <linux/fs.h> 40 #include <linux/slab.h> 41 #include <linux/uaccess.h> 42 #include <linux/skbuff.h> 43 #include <linux/netdevice.h> 44 #include <linux/rtnetlink.h> 45 #include <linux/proc_fs.h> 46 #include <linux/seq_file.h> 47 #include <linux/notifier.h> 48 #include <linux/security.h> 49 #include <linux/jhash.h> 50 #include <linux/jiffies.h> 51 #include <linux/random.h> 52 #include <linux/bitops.h> 53 #include <linux/mm.h> 54 #include <linux/types.h> 55 #include <linux/audit.h> 56 #include <linux/mutex.h> 57 #include <linux/vmalloc.h> 58 #include <linux/if_arp.h> 59 #include <linux/rhashtable.h> 60 #include <asm/cacheflush.h> 61 #include <linux/hash.h> 62 #include <linux/genetlink.h> 63 #include <linux/net_namespace.h> 64 #include <linux/nospec.h> 65 #include <linux/btf_ids.h> 66 67 #include <net/net_namespace.h> 68 #include <net/netns/generic.h> 69 #include <net/sock.h> 70 #include <net/scm.h> 71 #include <net/netlink.h> 72 #define CREATE_TRACE_POINTS 73 #include <trace/events/netlink.h> 74 75 #include "af_netlink.h" 76 77 struct listeners { 78 struct rcu_head rcu; 79 unsigned long masks[]; 80 }; 81 82 /* state bits */ 83 #define NETLINK_S_CONGESTED 0x0 84 85 static inline int netlink_is_kernel(struct sock *sk) 86 { 87 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; 88 } 89 90 struct netlink_table *nl_table __read_mostly; 91 EXPORT_SYMBOL_GPL(nl_table); 92 93 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 94 95 static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS]; 96 97 static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { 98 "nlk_cb_mutex-ROUTE", 99 "nlk_cb_mutex-1", 100 "nlk_cb_mutex-USERSOCK", 101 "nlk_cb_mutex-FIREWALL", 102 "nlk_cb_mutex-SOCK_DIAG", 103 "nlk_cb_mutex-NFLOG", 104 "nlk_cb_mutex-XFRM", 105 "nlk_cb_mutex-SELINUX", 106 "nlk_cb_mutex-ISCSI", 107 "nlk_cb_mutex-AUDIT", 108 "nlk_cb_mutex-FIB_LOOKUP", 109 "nlk_cb_mutex-CONNECTOR", 110 "nlk_cb_mutex-NETFILTER", 111 "nlk_cb_mutex-IP6_FW", 112 "nlk_cb_mutex-DNRTMSG", 113 "nlk_cb_mutex-KOBJECT_UEVENT", 114 "nlk_cb_mutex-GENERIC", 115 "nlk_cb_mutex-17", 116 "nlk_cb_mutex-SCSITRANSPORT", 117 "nlk_cb_mutex-ECRYPTFS", 118 "nlk_cb_mutex-RDMA", 119 "nlk_cb_mutex-CRYPTO", 120 "nlk_cb_mutex-SMC", 121 "nlk_cb_mutex-23", 122 "nlk_cb_mutex-24", 123 "nlk_cb_mutex-25", 124 "nlk_cb_mutex-26", 125 "nlk_cb_mutex-27", 126 "nlk_cb_mutex-28", 127 "nlk_cb_mutex-29", 128 "nlk_cb_mutex-30", 129 "nlk_cb_mutex-31", 130 "nlk_cb_mutex-MAX_LINKS" 131 }; 132 133 static int netlink_dump(struct sock *sk); 134 135 /* nl_table locking explained: 136 * Lookup and traversal are protected with an RCU read-side lock. Insertion 137 * and removal are protected with per bucket lock while using RCU list 138 * modification primitives and may run in parallel to RCU protected lookups. 139 * Destruction of the Netlink socket may only occur *after* nl_table_lock has 140 * been acquired * either during or after the socket has been removed from 141 * the list and after an RCU grace period. 142 */ 143 DEFINE_RWLOCK(nl_table_lock); 144 EXPORT_SYMBOL_GPL(nl_table_lock); 145 static atomic_t nl_table_users = ATOMIC_INIT(0); 146 147 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); 148 149 static BLOCKING_NOTIFIER_HEAD(netlink_chain); 150 151 152 static const struct rhashtable_params netlink_rhashtable_params; 153 154 void do_trace_netlink_extack(const char *msg) 155 { 156 trace_netlink_extack(msg); 157 } 158 EXPORT_SYMBOL(do_trace_netlink_extack); 159 160 static inline u32 netlink_group_mask(u32 group) 161 { 162 if (group > 32) 163 return 0; 164 return group ? 1 << (group - 1) : 0; 165 } 166 167 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, 168 gfp_t gfp_mask) 169 { 170 unsigned int len = skb_end_offset(skb); 171 struct sk_buff *new; 172 173 new = alloc_skb(len, gfp_mask); 174 if (new == NULL) 175 return NULL; 176 177 NETLINK_CB(new).portid = NETLINK_CB(skb).portid; 178 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; 179 NETLINK_CB(new).creds = NETLINK_CB(skb).creds; 180 181 skb_put_data(new, skb->data, len); 182 return new; 183 } 184 185 static unsigned int netlink_tap_net_id; 186 187 struct netlink_tap_net { 188 struct list_head netlink_tap_all; 189 struct mutex netlink_tap_lock; 190 }; 191 192 int netlink_add_tap(struct netlink_tap *nt) 193 { 194 struct net *net = dev_net(nt->dev); 195 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id); 196 197 if (unlikely(nt->dev->type != ARPHRD_NETLINK)) 198 return -EINVAL; 199 200 mutex_lock(&nn->netlink_tap_lock); 201 list_add_rcu(&nt->list, &nn->netlink_tap_all); 202 mutex_unlock(&nn->netlink_tap_lock); 203 204 __module_get(nt->module); 205 206 return 0; 207 } 208 EXPORT_SYMBOL_GPL(netlink_add_tap); 209 210 static int __netlink_remove_tap(struct netlink_tap *nt) 211 { 212 struct net *net = dev_net(nt->dev); 213 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id); 214 bool found = false; 215 struct netlink_tap *tmp; 216 217 mutex_lock(&nn->netlink_tap_lock); 218 219 list_for_each_entry(tmp, &nn->netlink_tap_all, list) { 220 if (nt == tmp) { 221 list_del_rcu(&nt->list); 222 found = true; 223 goto out; 224 } 225 } 226 227 pr_warn("__netlink_remove_tap: %p not found\n", nt); 228 out: 229 mutex_unlock(&nn->netlink_tap_lock); 230 231 if (found) 232 module_put(nt->module); 233 234 return found ? 0 : -ENODEV; 235 } 236 237 int netlink_remove_tap(struct netlink_tap *nt) 238 { 239 int ret; 240 241 ret = __netlink_remove_tap(nt); 242 synchronize_net(); 243 244 return ret; 245 } 246 EXPORT_SYMBOL_GPL(netlink_remove_tap); 247 248 static __net_init int netlink_tap_init_net(struct net *net) 249 { 250 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id); 251 252 INIT_LIST_HEAD(&nn->netlink_tap_all); 253 mutex_init(&nn->netlink_tap_lock); 254 return 0; 255 } 256 257 static struct pernet_operations netlink_tap_net_ops = { 258 .init = netlink_tap_init_net, 259 .id = &netlink_tap_net_id, 260 .size = sizeof(struct netlink_tap_net), 261 }; 262 263 static bool netlink_filter_tap(const struct sk_buff *skb) 264 { 265 struct sock *sk = skb->sk; 266 267 /* We take the more conservative approach and 268 * whitelist socket protocols that may pass. 269 */ 270 switch (sk->sk_protocol) { 271 case NETLINK_ROUTE: 272 case NETLINK_USERSOCK: 273 case NETLINK_SOCK_DIAG: 274 case NETLINK_NFLOG: 275 case NETLINK_XFRM: 276 case NETLINK_FIB_LOOKUP: 277 case NETLINK_NETFILTER: 278 case NETLINK_GENERIC: 279 return true; 280 } 281 282 return false; 283 } 284 285 static int __netlink_deliver_tap_skb(struct sk_buff *skb, 286 struct net_device *dev) 287 { 288 struct sk_buff *nskb; 289 struct sock *sk = skb->sk; 290 int ret = -ENOMEM; 291 292 if (!net_eq(dev_net(dev), sock_net(sk))) 293 return 0; 294 295 dev_hold(dev); 296 297 if (is_vmalloc_addr(skb->head)) 298 nskb = netlink_to_full_skb(skb, GFP_ATOMIC); 299 else 300 nskb = skb_clone(skb, GFP_ATOMIC); 301 if (nskb) { 302 nskb->dev = dev; 303 nskb->protocol = htons((u16) sk->sk_protocol); 304 nskb->pkt_type = netlink_is_kernel(sk) ? 305 PACKET_KERNEL : PACKET_USER; 306 skb_reset_network_header(nskb); 307 ret = dev_queue_xmit(nskb); 308 if (unlikely(ret > 0)) 309 ret = net_xmit_errno(ret); 310 } 311 312 dev_put(dev); 313 return ret; 314 } 315 316 static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn) 317 { 318 int ret; 319 struct netlink_tap *tmp; 320 321 if (!netlink_filter_tap(skb)) 322 return; 323 324 list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) { 325 ret = __netlink_deliver_tap_skb(skb, tmp->dev); 326 if (unlikely(ret)) 327 break; 328 } 329 } 330 331 static void netlink_deliver_tap(struct net *net, struct sk_buff *skb) 332 { 333 struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id); 334 335 rcu_read_lock(); 336 337 if (unlikely(!list_empty(&nn->netlink_tap_all))) 338 __netlink_deliver_tap(skb, nn); 339 340 rcu_read_unlock(); 341 } 342 343 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src, 344 struct sk_buff *skb) 345 { 346 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src))) 347 netlink_deliver_tap(sock_net(dst), skb); 348 } 349 350 static void netlink_overrun(struct sock *sk) 351 { 352 struct netlink_sock *nlk = nlk_sk(sk); 353 354 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) { 355 if (!test_and_set_bit(NETLINK_S_CONGESTED, 356 &nlk_sk(sk)->state)) { 357 sk->sk_err = ENOBUFS; 358 sk_error_report(sk); 359 } 360 } 361 atomic_inc(&sk->sk_drops); 362 } 363 364 static void netlink_rcv_wake(struct sock *sk) 365 { 366 struct netlink_sock *nlk = nlk_sk(sk); 367 368 if (skb_queue_empty_lockless(&sk->sk_receive_queue)) 369 clear_bit(NETLINK_S_CONGESTED, &nlk->state); 370 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) 371 wake_up_interruptible(&nlk->wait); 372 } 373 374 static void netlink_skb_destructor(struct sk_buff *skb) 375 { 376 if (is_vmalloc_addr(skb->head)) { 377 if (!skb->cloned || 378 !atomic_dec_return(&(skb_shinfo(skb)->dataref))) 379 vfree(skb->head); 380 381 skb->head = NULL; 382 } 383 if (skb->sk != NULL) 384 sock_rfree(skb); 385 } 386 387 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 388 { 389 WARN_ON(skb->sk != NULL); 390 skb->sk = sk; 391 skb->destructor = netlink_skb_destructor; 392 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 393 sk_mem_charge(sk, skb->truesize); 394 } 395 396 static void netlink_sock_destruct(struct sock *sk) 397 { 398 struct netlink_sock *nlk = nlk_sk(sk); 399 400 if (nlk->cb_running) { 401 if (nlk->cb.done) 402 nlk->cb.done(&nlk->cb); 403 module_put(nlk->cb.module); 404 kfree_skb(nlk->cb.skb); 405 } 406 407 skb_queue_purge(&sk->sk_receive_queue); 408 409 if (!sock_flag(sk, SOCK_DEAD)) { 410 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); 411 return; 412 } 413 414 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 415 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 416 WARN_ON(nlk_sk(sk)->groups); 417 } 418 419 static void netlink_sock_destruct_work(struct work_struct *work) 420 { 421 struct netlink_sock *nlk = container_of(work, struct netlink_sock, 422 work); 423 424 sk_free(&nlk->sk); 425 } 426 427 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 428 * SMP. Look, when several writers sleep and reader wakes them up, all but one 429 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 430 * this, _but_ remember, it adds useless work on UP machines. 431 */ 432 433 void netlink_table_grab(void) 434 __acquires(nl_table_lock) 435 { 436 might_sleep(); 437 438 write_lock_irq(&nl_table_lock); 439 440 if (atomic_read(&nl_table_users)) { 441 DECLARE_WAITQUEUE(wait, current); 442 443 add_wait_queue_exclusive(&nl_table_wait, &wait); 444 for (;;) { 445 set_current_state(TASK_UNINTERRUPTIBLE); 446 if (atomic_read(&nl_table_users) == 0) 447 break; 448 write_unlock_irq(&nl_table_lock); 449 schedule(); 450 write_lock_irq(&nl_table_lock); 451 } 452 453 __set_current_state(TASK_RUNNING); 454 remove_wait_queue(&nl_table_wait, &wait); 455 } 456 } 457 458 void netlink_table_ungrab(void) 459 __releases(nl_table_lock) 460 { 461 write_unlock_irq(&nl_table_lock); 462 wake_up(&nl_table_wait); 463 } 464 465 static inline void 466 netlink_lock_table(void) 467 { 468 unsigned long flags; 469 470 /* read_lock() synchronizes us to netlink_table_grab */ 471 472 read_lock_irqsave(&nl_table_lock, flags); 473 atomic_inc(&nl_table_users); 474 read_unlock_irqrestore(&nl_table_lock, flags); 475 } 476 477 static inline void 478 netlink_unlock_table(void) 479 { 480 if (atomic_dec_and_test(&nl_table_users)) 481 wake_up(&nl_table_wait); 482 } 483 484 struct netlink_compare_arg 485 { 486 possible_net_t pnet; 487 u32 portid; 488 }; 489 490 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */ 491 #define netlink_compare_arg_len \ 492 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32)) 493 494 static inline int netlink_compare(struct rhashtable_compare_arg *arg, 495 const void *ptr) 496 { 497 const struct netlink_compare_arg *x = arg->key; 498 const struct netlink_sock *nlk = ptr; 499 500 return nlk->portid != x->portid || 501 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); 502 } 503 504 static void netlink_compare_arg_init(struct netlink_compare_arg *arg, 505 struct net *net, u32 portid) 506 { 507 memset(arg, 0, sizeof(*arg)); 508 write_pnet(&arg->pnet, net); 509 arg->portid = portid; 510 } 511 512 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, 513 struct net *net) 514 { 515 struct netlink_compare_arg arg; 516 517 netlink_compare_arg_init(&arg, net, portid); 518 return rhashtable_lookup_fast(&table->hash, &arg, 519 netlink_rhashtable_params); 520 } 521 522 static int __netlink_insert(struct netlink_table *table, struct sock *sk) 523 { 524 struct netlink_compare_arg arg; 525 526 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); 527 return rhashtable_lookup_insert_key(&table->hash, &arg, 528 &nlk_sk(sk)->node, 529 netlink_rhashtable_params); 530 } 531 532 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) 533 { 534 struct netlink_table *table = &nl_table[protocol]; 535 struct sock *sk; 536 537 rcu_read_lock(); 538 sk = __netlink_lookup(table, portid, net); 539 if (sk) 540 sock_hold(sk); 541 rcu_read_unlock(); 542 543 return sk; 544 } 545 546 static const struct proto_ops netlink_ops; 547 548 static void 549 netlink_update_listeners(struct sock *sk) 550 { 551 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 552 unsigned long mask; 553 unsigned int i; 554 struct listeners *listeners; 555 556 listeners = nl_deref_protected(tbl->listeners); 557 if (!listeners) 558 return; 559 560 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 561 mask = 0; 562 sk_for_each_bound(sk, &tbl->mc_list) { 563 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 564 mask |= nlk_sk(sk)->groups[i]; 565 } 566 listeners->masks[i] = mask; 567 } 568 /* this function is only called with the netlink table "grabbed", which 569 * makes sure updates are visible before bind or setsockopt return. */ 570 } 571 572 static int netlink_insert(struct sock *sk, u32 portid) 573 { 574 struct netlink_table *table = &nl_table[sk->sk_protocol]; 575 int err; 576 577 lock_sock(sk); 578 579 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; 580 if (nlk_sk(sk)->bound) 581 goto err; 582 583 /* portid can be read locklessly from netlink_getname(). */ 584 WRITE_ONCE(nlk_sk(sk)->portid, portid); 585 586 sock_hold(sk); 587 588 err = __netlink_insert(table, sk); 589 if (err) { 590 /* In case the hashtable backend returns with -EBUSY 591 * from here, it must not escape to the caller. 592 */ 593 if (unlikely(err == -EBUSY)) 594 err = -EOVERFLOW; 595 if (err == -EEXIST) 596 err = -EADDRINUSE; 597 sock_put(sk); 598 goto err; 599 } 600 601 /* We need to ensure that the socket is hashed and visible. */ 602 smp_wmb(); 603 /* Paired with lockless reads from netlink_bind(), 604 * netlink_connect() and netlink_sendmsg(). 605 */ 606 WRITE_ONCE(nlk_sk(sk)->bound, portid); 607 608 err: 609 release_sock(sk); 610 return err; 611 } 612 613 static void netlink_remove(struct sock *sk) 614 { 615 struct netlink_table *table; 616 617 table = &nl_table[sk->sk_protocol]; 618 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, 619 netlink_rhashtable_params)) { 620 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); 621 __sock_put(sk); 622 } 623 624 netlink_table_grab(); 625 if (nlk_sk(sk)->subscriptions) { 626 __sk_del_bind_node(sk); 627 netlink_update_listeners(sk); 628 } 629 if (sk->sk_protocol == NETLINK_GENERIC) 630 atomic_inc(&genl_sk_destructing_cnt); 631 netlink_table_ungrab(); 632 } 633 634 static struct proto netlink_proto = { 635 .name = "NETLINK", 636 .owner = THIS_MODULE, 637 .obj_size = sizeof(struct netlink_sock), 638 }; 639 640 static int __netlink_create(struct net *net, struct socket *sock, 641 struct mutex *cb_mutex, int protocol, 642 int kern) 643 { 644 struct sock *sk; 645 struct netlink_sock *nlk; 646 647 sock->ops = &netlink_ops; 648 649 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern); 650 if (!sk) 651 return -ENOMEM; 652 653 sock_init_data(sock, sk); 654 655 nlk = nlk_sk(sk); 656 if (cb_mutex) { 657 nlk->cb_mutex = cb_mutex; 658 } else { 659 nlk->cb_mutex = &nlk->cb_def_mutex; 660 mutex_init(nlk->cb_mutex); 661 lockdep_set_class_and_name(nlk->cb_mutex, 662 nlk_cb_mutex_keys + protocol, 663 nlk_cb_mutex_key_strings[protocol]); 664 } 665 init_waitqueue_head(&nlk->wait); 666 667 sk->sk_destruct = netlink_sock_destruct; 668 sk->sk_protocol = protocol; 669 return 0; 670 } 671 672 static int netlink_create(struct net *net, struct socket *sock, int protocol, 673 int kern) 674 { 675 struct module *module = NULL; 676 struct mutex *cb_mutex; 677 struct netlink_sock *nlk; 678 int (*bind)(struct net *net, int group); 679 void (*unbind)(struct net *net, int group); 680 int err = 0; 681 682 sock->state = SS_UNCONNECTED; 683 684 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) 685 return -ESOCKTNOSUPPORT; 686 687 if (protocol < 0 || protocol >= MAX_LINKS) 688 return -EPROTONOSUPPORT; 689 protocol = array_index_nospec(protocol, MAX_LINKS); 690 691 netlink_lock_table(); 692 #ifdef CONFIG_MODULES 693 if (!nl_table[protocol].registered) { 694 netlink_unlock_table(); 695 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); 696 netlink_lock_table(); 697 } 698 #endif 699 if (nl_table[protocol].registered && 700 try_module_get(nl_table[protocol].module)) 701 module = nl_table[protocol].module; 702 else 703 err = -EPROTONOSUPPORT; 704 cb_mutex = nl_table[protocol].cb_mutex; 705 bind = nl_table[protocol].bind; 706 unbind = nl_table[protocol].unbind; 707 netlink_unlock_table(); 708 709 if (err < 0) 710 goto out; 711 712 err = __netlink_create(net, sock, cb_mutex, protocol, kern); 713 if (err < 0) 714 goto out_module; 715 716 sock_prot_inuse_add(net, &netlink_proto, 1); 717 718 nlk = nlk_sk(sock->sk); 719 nlk->module = module; 720 nlk->netlink_bind = bind; 721 nlk->netlink_unbind = unbind; 722 out: 723 return err; 724 725 out_module: 726 module_put(module); 727 goto out; 728 } 729 730 static void deferred_put_nlk_sk(struct rcu_head *head) 731 { 732 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); 733 struct sock *sk = &nlk->sk; 734 735 kfree(nlk->groups); 736 nlk->groups = NULL; 737 738 if (!refcount_dec_and_test(&sk->sk_refcnt)) 739 return; 740 741 if (nlk->cb_running && nlk->cb.done) { 742 INIT_WORK(&nlk->work, netlink_sock_destruct_work); 743 schedule_work(&nlk->work); 744 return; 745 } 746 747 sk_free(sk); 748 } 749 750 static int netlink_release(struct socket *sock) 751 { 752 struct sock *sk = sock->sk; 753 struct netlink_sock *nlk; 754 755 if (!sk) 756 return 0; 757 758 netlink_remove(sk); 759 sock_orphan(sk); 760 nlk = nlk_sk(sk); 761 762 /* 763 * OK. Socket is unlinked, any packets that arrive now 764 * will be purged. 765 */ 766 767 /* must not acquire netlink_table_lock in any way again before unbind 768 * and notifying genetlink is done as otherwise it might deadlock 769 */ 770 if (nlk->netlink_unbind) { 771 int i; 772 773 for (i = 0; i < nlk->ngroups; i++) 774 if (test_bit(i, nlk->groups)) 775 nlk->netlink_unbind(sock_net(sk), i + 1); 776 } 777 if (sk->sk_protocol == NETLINK_GENERIC && 778 atomic_dec_return(&genl_sk_destructing_cnt) == 0) 779 wake_up(&genl_sk_destructing_waitq); 780 781 sock->sk = NULL; 782 wake_up_interruptible_all(&nlk->wait); 783 784 skb_queue_purge(&sk->sk_write_queue); 785 786 if (nlk->portid && nlk->bound) { 787 struct netlink_notify n = { 788 .net = sock_net(sk), 789 .protocol = sk->sk_protocol, 790 .portid = nlk->portid, 791 }; 792 blocking_notifier_call_chain(&netlink_chain, 793 NETLINK_URELEASE, &n); 794 } 795 796 module_put(nlk->module); 797 798 if (netlink_is_kernel(sk)) { 799 netlink_table_grab(); 800 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 801 if (--nl_table[sk->sk_protocol].registered == 0) { 802 struct listeners *old; 803 804 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners); 805 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL); 806 kfree_rcu(old, rcu); 807 nl_table[sk->sk_protocol].module = NULL; 808 nl_table[sk->sk_protocol].bind = NULL; 809 nl_table[sk->sk_protocol].unbind = NULL; 810 nl_table[sk->sk_protocol].flags = 0; 811 nl_table[sk->sk_protocol].registered = 0; 812 } 813 netlink_table_ungrab(); 814 } 815 816 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); 817 818 /* Because struct net might disappear soon, do not keep a pointer. */ 819 if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { 820 __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); 821 /* Because of deferred_put_nlk_sk and use of work queue, 822 * it is possible netns will be freed before this socket. 823 */ 824 sock_net_set(sk, &init_net); 825 __netns_tracker_alloc(&init_net, &sk->ns_tracker, 826 false, GFP_KERNEL); 827 } 828 call_rcu(&nlk->rcu, deferred_put_nlk_sk); 829 return 0; 830 } 831 832 static int netlink_autobind(struct socket *sock) 833 { 834 struct sock *sk = sock->sk; 835 struct net *net = sock_net(sk); 836 struct netlink_table *table = &nl_table[sk->sk_protocol]; 837 s32 portid = task_tgid_vnr(current); 838 int err; 839 s32 rover = -4096; 840 bool ok; 841 842 retry: 843 cond_resched(); 844 rcu_read_lock(); 845 ok = !__netlink_lookup(table, portid, net); 846 rcu_read_unlock(); 847 if (!ok) { 848 /* Bind collision, search negative portid values. */ 849 if (rover == -4096) 850 /* rover will be in range [S32_MIN, -4097] */ 851 rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN); 852 else if (rover >= -4096) 853 rover = -4097; 854 portid = rover--; 855 goto retry; 856 } 857 858 err = netlink_insert(sk, portid); 859 if (err == -EADDRINUSE) 860 goto retry; 861 862 /* If 2 threads race to autobind, that is fine. */ 863 if (err == -EBUSY) 864 err = 0; 865 866 return err; 867 } 868 869 /** 870 * __netlink_ns_capable - General netlink message capability test 871 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace. 872 * @user_ns: The user namespace of the capability to use 873 * @cap: The capability to use 874 * 875 * Test to see if the opener of the socket we received the message 876 * from had when the netlink socket was created and the sender of the 877 * message has the capability @cap in the user namespace @user_ns. 878 */ 879 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, 880 struct user_namespace *user_ns, int cap) 881 { 882 return ((nsp->flags & NETLINK_SKB_DST) || 883 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && 884 ns_capable(user_ns, cap); 885 } 886 EXPORT_SYMBOL(__netlink_ns_capable); 887 888 /** 889 * netlink_ns_capable - General netlink message capability test 890 * @skb: socket buffer holding a netlink command from userspace 891 * @user_ns: The user namespace of the capability to use 892 * @cap: The capability to use 893 * 894 * Test to see if the opener of the socket we received the message 895 * from had when the netlink socket was created and the sender of the 896 * message has the capability @cap in the user namespace @user_ns. 897 */ 898 bool netlink_ns_capable(const struct sk_buff *skb, 899 struct user_namespace *user_ns, int cap) 900 { 901 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap); 902 } 903 EXPORT_SYMBOL(netlink_ns_capable); 904 905 /** 906 * netlink_capable - Netlink global message capability test 907 * @skb: socket buffer holding a netlink command from userspace 908 * @cap: The capability to use 909 * 910 * Test to see if the opener of the socket we received the message 911 * from had when the netlink socket was created and the sender of the 912 * message has the capability @cap in all user namespaces. 913 */ 914 bool netlink_capable(const struct sk_buff *skb, int cap) 915 { 916 return netlink_ns_capable(skb, &init_user_ns, cap); 917 } 918 EXPORT_SYMBOL(netlink_capable); 919 920 /** 921 * netlink_net_capable - Netlink network namespace message capability test 922 * @skb: socket buffer holding a netlink command from userspace 923 * @cap: The capability to use 924 * 925 * Test to see if the opener of the socket we received the message 926 * from had when the netlink socket was created and the sender of the 927 * message has the capability @cap over the network namespace of 928 * the socket we received the message from. 929 */ 930 bool netlink_net_capable(const struct sk_buff *skb, int cap) 931 { 932 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap); 933 } 934 EXPORT_SYMBOL(netlink_net_capable); 935 936 static inline int netlink_allowed(const struct socket *sock, unsigned int flag) 937 { 938 return (nl_table[sock->sk->sk_protocol].flags & flag) || 939 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); 940 } 941 942 static void 943 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) 944 { 945 struct netlink_sock *nlk = nlk_sk(sk); 946 947 if (nlk->subscriptions && !subscriptions) 948 __sk_del_bind_node(sk); 949 else if (!nlk->subscriptions && subscriptions) 950 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); 951 nlk->subscriptions = subscriptions; 952 } 953 954 static int netlink_realloc_groups(struct sock *sk) 955 { 956 struct netlink_sock *nlk = nlk_sk(sk); 957 unsigned int groups; 958 unsigned long *new_groups; 959 int err = 0; 960 961 netlink_table_grab(); 962 963 groups = nl_table[sk->sk_protocol].groups; 964 if (!nl_table[sk->sk_protocol].registered) { 965 err = -ENOENT; 966 goto out_unlock; 967 } 968 969 if (nlk->ngroups >= groups) 970 goto out_unlock; 971 972 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); 973 if (new_groups == NULL) { 974 err = -ENOMEM; 975 goto out_unlock; 976 } 977 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, 978 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); 979 980 nlk->groups = new_groups; 981 nlk->ngroups = groups; 982 out_unlock: 983 netlink_table_ungrab(); 984 return err; 985 } 986 987 static void netlink_undo_bind(int group, long unsigned int groups, 988 struct sock *sk) 989 { 990 struct netlink_sock *nlk = nlk_sk(sk); 991 int undo; 992 993 if (!nlk->netlink_unbind) 994 return; 995 996 for (undo = 0; undo < group; undo++) 997 if (test_bit(undo, &groups)) 998 nlk->netlink_unbind(sock_net(sk), undo + 1); 999 } 1000 1001 static int netlink_bind(struct socket *sock, struct sockaddr *addr, 1002 int addr_len) 1003 { 1004 struct sock *sk = sock->sk; 1005 struct net *net = sock_net(sk); 1006 struct netlink_sock *nlk = nlk_sk(sk); 1007 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1008 int err = 0; 1009 unsigned long groups; 1010 bool bound; 1011 1012 if (addr_len < sizeof(struct sockaddr_nl)) 1013 return -EINVAL; 1014 1015 if (nladdr->nl_family != AF_NETLINK) 1016 return -EINVAL; 1017 groups = nladdr->nl_groups; 1018 1019 /* Only superuser is allowed to listen multicasts */ 1020 if (groups) { 1021 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1022 return -EPERM; 1023 err = netlink_realloc_groups(sk); 1024 if (err) 1025 return err; 1026 } 1027 1028 if (nlk->ngroups < BITS_PER_LONG) 1029 groups &= (1UL << nlk->ngroups) - 1; 1030 1031 /* Paired with WRITE_ONCE() in netlink_insert() */ 1032 bound = READ_ONCE(nlk->bound); 1033 if (bound) { 1034 /* Ensure nlk->portid is up-to-date. */ 1035 smp_rmb(); 1036 1037 if (nladdr->nl_pid != nlk->portid) 1038 return -EINVAL; 1039 } 1040 1041 if (nlk->netlink_bind && groups) { 1042 int group; 1043 1044 /* nl_groups is a u32, so cap the maximum groups we can bind */ 1045 for (group = 0; group < BITS_PER_TYPE(u32); group++) { 1046 if (!test_bit(group, &groups)) 1047 continue; 1048 err = nlk->netlink_bind(net, group + 1); 1049 if (!err) 1050 continue; 1051 netlink_undo_bind(group, groups, sk); 1052 return err; 1053 } 1054 } 1055 1056 /* No need for barriers here as we return to user-space without 1057 * using any of the bound attributes. 1058 */ 1059 netlink_lock_table(); 1060 if (!bound) { 1061 err = nladdr->nl_pid ? 1062 netlink_insert(sk, nladdr->nl_pid) : 1063 netlink_autobind(sock); 1064 if (err) { 1065 netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk); 1066 goto unlock; 1067 } 1068 } 1069 1070 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) 1071 goto unlock; 1072 netlink_unlock_table(); 1073 1074 netlink_table_grab(); 1075 netlink_update_subscriptions(sk, nlk->subscriptions + 1076 hweight32(groups) - 1077 hweight32(nlk->groups[0])); 1078 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups; 1079 netlink_update_listeners(sk); 1080 netlink_table_ungrab(); 1081 1082 return 0; 1083 1084 unlock: 1085 netlink_unlock_table(); 1086 return err; 1087 } 1088 1089 static int netlink_connect(struct socket *sock, struct sockaddr *addr, 1090 int alen, int flags) 1091 { 1092 int err = 0; 1093 struct sock *sk = sock->sk; 1094 struct netlink_sock *nlk = nlk_sk(sk); 1095 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1096 1097 if (alen < sizeof(addr->sa_family)) 1098 return -EINVAL; 1099 1100 if (addr->sa_family == AF_UNSPEC) { 1101 /* paired with READ_ONCE() in netlink_getsockbyportid() */ 1102 WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); 1103 /* dst_portid and dst_group can be read locklessly */ 1104 WRITE_ONCE(nlk->dst_portid, 0); 1105 WRITE_ONCE(nlk->dst_group, 0); 1106 return 0; 1107 } 1108 if (addr->sa_family != AF_NETLINK) 1109 return -EINVAL; 1110 1111 if (alen < sizeof(struct sockaddr_nl)) 1112 return -EINVAL; 1113 1114 if ((nladdr->nl_groups || nladdr->nl_pid) && 1115 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1116 return -EPERM; 1117 1118 /* No need for barriers here as we return to user-space without 1119 * using any of the bound attributes. 1120 * Paired with WRITE_ONCE() in netlink_insert(). 1121 */ 1122 if (!READ_ONCE(nlk->bound)) 1123 err = netlink_autobind(sock); 1124 1125 if (err == 0) { 1126 /* paired with READ_ONCE() in netlink_getsockbyportid() */ 1127 WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); 1128 /* dst_portid and dst_group can be read locklessly */ 1129 WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid); 1130 WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups)); 1131 } 1132 1133 return err; 1134 } 1135 1136 static int netlink_getname(struct socket *sock, struct sockaddr *addr, 1137 int peer) 1138 { 1139 struct sock *sk = sock->sk; 1140 struct netlink_sock *nlk = nlk_sk(sk); 1141 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); 1142 1143 nladdr->nl_family = AF_NETLINK; 1144 nladdr->nl_pad = 0; 1145 1146 if (peer) { 1147 /* Paired with WRITE_ONCE() in netlink_connect() */ 1148 nladdr->nl_pid = READ_ONCE(nlk->dst_portid); 1149 nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group)); 1150 } else { 1151 /* Paired with WRITE_ONCE() in netlink_insert() */ 1152 nladdr->nl_pid = READ_ONCE(nlk->portid); 1153 netlink_lock_table(); 1154 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 1155 netlink_unlock_table(); 1156 } 1157 return sizeof(*nladdr); 1158 } 1159 1160 static int netlink_ioctl(struct socket *sock, unsigned int cmd, 1161 unsigned long arg) 1162 { 1163 /* try to hand this ioctl down to the NIC drivers. 1164 */ 1165 return -ENOIOCTLCMD; 1166 } 1167 1168 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) 1169 { 1170 struct sock *sock; 1171 struct netlink_sock *nlk; 1172 1173 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid); 1174 if (!sock) 1175 return ERR_PTR(-ECONNREFUSED); 1176 1177 /* Don't bother queuing skb if kernel socket has no input function */ 1178 nlk = nlk_sk(sock); 1179 /* dst_portid and sk_state can be changed in netlink_connect() */ 1180 if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED && 1181 READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) { 1182 sock_put(sock); 1183 return ERR_PTR(-ECONNREFUSED); 1184 } 1185 return sock; 1186 } 1187 1188 struct sock *netlink_getsockbyfilp(struct file *filp) 1189 { 1190 struct inode *inode = file_inode(filp); 1191 struct sock *sock; 1192 1193 if (!S_ISSOCK(inode->i_mode)) 1194 return ERR_PTR(-ENOTSOCK); 1195 1196 sock = SOCKET_I(inode)->sk; 1197 if (sock->sk_family != AF_NETLINK) 1198 return ERR_PTR(-EINVAL); 1199 1200 sock_hold(sock); 1201 return sock; 1202 } 1203 1204 static struct sk_buff *netlink_alloc_large_skb(unsigned int size, 1205 int broadcast) 1206 { 1207 struct sk_buff *skb; 1208 void *data; 1209 1210 if (size <= NLMSG_GOODSIZE || broadcast) 1211 return alloc_skb(size, GFP_KERNEL); 1212 1213 size = SKB_DATA_ALIGN(size) + 1214 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1215 1216 data = vmalloc(size); 1217 if (data == NULL) 1218 return NULL; 1219 1220 skb = __build_skb(data, size); 1221 if (skb == NULL) 1222 vfree(data); 1223 else 1224 skb->destructor = netlink_skb_destructor; 1225 1226 return skb; 1227 } 1228 1229 /* 1230 * Attach a skb to a netlink socket. 1231 * The caller must hold a reference to the destination socket. On error, the 1232 * reference is dropped. The skb is not send to the destination, just all 1233 * all error checks are performed and memory in the queue is reserved. 1234 * Return values: 1235 * < 0: error. skb freed, reference to sock dropped. 1236 * 0: continue 1237 * 1: repeat lookup - reference dropped while waiting for socket memory. 1238 */ 1239 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, 1240 long *timeo, struct sock *ssk) 1241 { 1242 struct netlink_sock *nlk; 1243 1244 nlk = nlk_sk(sk); 1245 1246 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 1247 test_bit(NETLINK_S_CONGESTED, &nlk->state))) { 1248 DECLARE_WAITQUEUE(wait, current); 1249 if (!*timeo) { 1250 if (!ssk || netlink_is_kernel(ssk)) 1251 netlink_overrun(sk); 1252 sock_put(sk); 1253 kfree_skb(skb); 1254 return -EAGAIN; 1255 } 1256 1257 __set_current_state(TASK_INTERRUPTIBLE); 1258 add_wait_queue(&nlk->wait, &wait); 1259 1260 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 1261 test_bit(NETLINK_S_CONGESTED, &nlk->state)) && 1262 !sock_flag(sk, SOCK_DEAD)) 1263 *timeo = schedule_timeout(*timeo); 1264 1265 __set_current_state(TASK_RUNNING); 1266 remove_wait_queue(&nlk->wait, &wait); 1267 sock_put(sk); 1268 1269 if (signal_pending(current)) { 1270 kfree_skb(skb); 1271 return sock_intr_errno(*timeo); 1272 } 1273 return 1; 1274 } 1275 netlink_skb_set_owner_r(skb, sk); 1276 return 0; 1277 } 1278 1279 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) 1280 { 1281 int len = skb->len; 1282 1283 netlink_deliver_tap(sock_net(sk), skb); 1284 1285 skb_queue_tail(&sk->sk_receive_queue, skb); 1286 sk->sk_data_ready(sk); 1287 return len; 1288 } 1289 1290 int netlink_sendskb(struct sock *sk, struct sk_buff *skb) 1291 { 1292 int len = __netlink_sendskb(sk, skb); 1293 1294 sock_put(sk); 1295 return len; 1296 } 1297 1298 void netlink_detachskb(struct sock *sk, struct sk_buff *skb) 1299 { 1300 kfree_skb(skb); 1301 sock_put(sk); 1302 } 1303 1304 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) 1305 { 1306 int delta; 1307 1308 WARN_ON(skb->sk != NULL); 1309 delta = skb->end - skb->tail; 1310 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) 1311 return skb; 1312 1313 if (skb_shared(skb)) { 1314 struct sk_buff *nskb = skb_clone(skb, allocation); 1315 if (!nskb) 1316 return skb; 1317 consume_skb(skb); 1318 skb = nskb; 1319 } 1320 1321 pskb_expand_head(skb, 0, -delta, 1322 (allocation & ~__GFP_DIRECT_RECLAIM) | 1323 __GFP_NOWARN | __GFP_NORETRY); 1324 return skb; 1325 } 1326 1327 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, 1328 struct sock *ssk) 1329 { 1330 int ret; 1331 struct netlink_sock *nlk = nlk_sk(sk); 1332 1333 ret = -ECONNREFUSED; 1334 if (nlk->netlink_rcv != NULL) { 1335 ret = skb->len; 1336 netlink_skb_set_owner_r(skb, sk); 1337 NETLINK_CB(skb).sk = ssk; 1338 netlink_deliver_tap_kernel(sk, ssk, skb); 1339 nlk->netlink_rcv(skb); 1340 consume_skb(skb); 1341 } else { 1342 kfree_skb(skb); 1343 } 1344 sock_put(sk); 1345 return ret; 1346 } 1347 1348 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, 1349 u32 portid, int nonblock) 1350 { 1351 struct sock *sk; 1352 int err; 1353 long timeo; 1354 1355 skb = netlink_trim(skb, gfp_any()); 1356 1357 timeo = sock_sndtimeo(ssk, nonblock); 1358 retry: 1359 sk = netlink_getsockbyportid(ssk, portid); 1360 if (IS_ERR(sk)) { 1361 kfree_skb(skb); 1362 return PTR_ERR(sk); 1363 } 1364 if (netlink_is_kernel(sk)) 1365 return netlink_unicast_kernel(sk, skb, ssk); 1366 1367 if (sk_filter(sk, skb)) { 1368 err = skb->len; 1369 kfree_skb(skb); 1370 sock_put(sk); 1371 return err; 1372 } 1373 1374 err = netlink_attachskb(sk, skb, &timeo, ssk); 1375 if (err == 1) 1376 goto retry; 1377 if (err) 1378 return err; 1379 1380 return netlink_sendskb(sk, skb); 1381 } 1382 EXPORT_SYMBOL(netlink_unicast); 1383 1384 int netlink_has_listeners(struct sock *sk, unsigned int group) 1385 { 1386 int res = 0; 1387 struct listeners *listeners; 1388 1389 BUG_ON(!netlink_is_kernel(sk)); 1390 1391 rcu_read_lock(); 1392 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 1393 1394 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) 1395 res = test_bit(group - 1, listeners->masks); 1396 1397 rcu_read_unlock(); 1398 1399 return res; 1400 } 1401 EXPORT_SYMBOL_GPL(netlink_has_listeners); 1402 1403 bool netlink_strict_get_check(struct sk_buff *skb) 1404 { 1405 const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk); 1406 1407 return nlk->flags & NETLINK_F_STRICT_CHK; 1408 } 1409 EXPORT_SYMBOL_GPL(netlink_strict_get_check); 1410 1411 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) 1412 { 1413 struct netlink_sock *nlk = nlk_sk(sk); 1414 1415 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 1416 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) { 1417 netlink_skb_set_owner_r(skb, sk); 1418 __netlink_sendskb(sk, skb); 1419 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 1420 } 1421 return -1; 1422 } 1423 1424 struct netlink_broadcast_data { 1425 struct sock *exclude_sk; 1426 struct net *net; 1427 u32 portid; 1428 u32 group; 1429 int failure; 1430 int delivery_failure; 1431 int congested; 1432 int delivered; 1433 gfp_t allocation; 1434 struct sk_buff *skb, *skb2; 1435 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); 1436 void *tx_data; 1437 }; 1438 1439 static void do_one_broadcast(struct sock *sk, 1440 struct netlink_broadcast_data *p) 1441 { 1442 struct netlink_sock *nlk = nlk_sk(sk); 1443 int val; 1444 1445 if (p->exclude_sk == sk) 1446 return; 1447 1448 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 1449 !test_bit(p->group - 1, nlk->groups)) 1450 return; 1451 1452 if (!net_eq(sock_net(sk), p->net)) { 1453 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID)) 1454 return; 1455 1456 if (!peernet_has_id(sock_net(sk), p->net)) 1457 return; 1458 1459 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns, 1460 CAP_NET_BROADCAST)) 1461 return; 1462 } 1463 1464 if (p->failure) { 1465 netlink_overrun(sk); 1466 return; 1467 } 1468 1469 sock_hold(sk); 1470 if (p->skb2 == NULL) { 1471 if (skb_shared(p->skb)) { 1472 p->skb2 = skb_clone(p->skb, p->allocation); 1473 } else { 1474 p->skb2 = skb_get(p->skb); 1475 /* 1476 * skb ownership may have been set when 1477 * delivered to a previous socket. 1478 */ 1479 skb_orphan(p->skb2); 1480 } 1481 } 1482 if (p->skb2 == NULL) { 1483 netlink_overrun(sk); 1484 /* Clone failed. Notify ALL listeners. */ 1485 p->failure = 1; 1486 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR) 1487 p->delivery_failure = 1; 1488 goto out; 1489 } 1490 1491 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { 1492 kfree_skb(p->skb2); 1493 p->skb2 = NULL; 1494 goto out; 1495 } 1496 1497 if (sk_filter(sk, p->skb2)) { 1498 kfree_skb(p->skb2); 1499 p->skb2 = NULL; 1500 goto out; 1501 } 1502 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1503 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED) 1504 NETLINK_CB(p->skb2).nsid_is_set = true; 1505 val = netlink_broadcast_deliver(sk, p->skb2); 1506 if (val < 0) { 1507 netlink_overrun(sk); 1508 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR) 1509 p->delivery_failure = 1; 1510 } else { 1511 p->congested |= val; 1512 p->delivered = 1; 1513 p->skb2 = NULL; 1514 } 1515 out: 1516 sock_put(sk); 1517 } 1518 1519 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, 1520 u32 portid, 1521 u32 group, gfp_t allocation, 1522 int (*filter)(struct sock *dsk, 1523 struct sk_buff *skb, void *data), 1524 void *filter_data) 1525 { 1526 struct net *net = sock_net(ssk); 1527 struct netlink_broadcast_data info; 1528 struct sock *sk; 1529 1530 skb = netlink_trim(skb, allocation); 1531 1532 info.exclude_sk = ssk; 1533 info.net = net; 1534 info.portid = portid; 1535 info.group = group; 1536 info.failure = 0; 1537 info.delivery_failure = 0; 1538 info.congested = 0; 1539 info.delivered = 0; 1540 info.allocation = allocation; 1541 info.skb = skb; 1542 info.skb2 = NULL; 1543 info.tx_filter = filter; 1544 info.tx_data = filter_data; 1545 1546 /* While we sleep in clone, do not allow to change socket list */ 1547 1548 netlink_lock_table(); 1549 1550 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) 1551 do_one_broadcast(sk, &info); 1552 1553 consume_skb(skb); 1554 1555 netlink_unlock_table(); 1556 1557 if (info.delivery_failure) { 1558 kfree_skb(info.skb2); 1559 return -ENOBUFS; 1560 } 1561 consume_skb(info.skb2); 1562 1563 if (info.delivered) { 1564 if (info.congested && gfpflags_allow_blocking(allocation)) 1565 yield(); 1566 return 0; 1567 } 1568 return -ESRCH; 1569 } 1570 EXPORT_SYMBOL(netlink_broadcast_filtered); 1571 1572 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid, 1573 u32 group, gfp_t allocation) 1574 { 1575 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation, 1576 NULL, NULL); 1577 } 1578 EXPORT_SYMBOL(netlink_broadcast); 1579 1580 struct netlink_set_err_data { 1581 struct sock *exclude_sk; 1582 u32 portid; 1583 u32 group; 1584 int code; 1585 }; 1586 1587 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) 1588 { 1589 struct netlink_sock *nlk = nlk_sk(sk); 1590 int ret = 0; 1591 1592 if (sk == p->exclude_sk) 1593 goto out; 1594 1595 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) 1596 goto out; 1597 1598 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 1599 !test_bit(p->group - 1, nlk->groups)) 1600 goto out; 1601 1602 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) { 1603 ret = 1; 1604 goto out; 1605 } 1606 1607 sk->sk_err = p->code; 1608 sk_error_report(sk); 1609 out: 1610 return ret; 1611 } 1612 1613 /** 1614 * netlink_set_err - report error to broadcast listeners 1615 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() 1616 * @portid: the PORTID of a process that we want to skip (if any) 1617 * @group: the broadcast group that will notice the error 1618 * @code: error code, must be negative (as usual in kernelspace) 1619 * 1620 * This function returns the number of broadcast listeners that have set the 1621 * NETLINK_NO_ENOBUFS socket option. 1622 */ 1623 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) 1624 { 1625 struct netlink_set_err_data info; 1626 unsigned long flags; 1627 struct sock *sk; 1628 int ret = 0; 1629 1630 info.exclude_sk = ssk; 1631 info.portid = portid; 1632 info.group = group; 1633 /* sk->sk_err wants a positive error value */ 1634 info.code = -code; 1635 1636 read_lock_irqsave(&nl_table_lock, flags); 1637 1638 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) 1639 ret += do_one_set_err(sk, &info); 1640 1641 read_unlock_irqrestore(&nl_table_lock, flags); 1642 return ret; 1643 } 1644 EXPORT_SYMBOL(netlink_set_err); 1645 1646 /* must be called with netlink table grabbed */ 1647 static void netlink_update_socket_mc(struct netlink_sock *nlk, 1648 unsigned int group, 1649 int is_new) 1650 { 1651 int old, new = !!is_new, subscriptions; 1652 1653 old = test_bit(group - 1, nlk->groups); 1654 subscriptions = nlk->subscriptions - old + new; 1655 __assign_bit(group - 1, nlk->groups, new); 1656 netlink_update_subscriptions(&nlk->sk, subscriptions); 1657 netlink_update_listeners(&nlk->sk); 1658 } 1659 1660 static int netlink_setsockopt(struct socket *sock, int level, int optname, 1661 sockptr_t optval, unsigned int optlen) 1662 { 1663 struct sock *sk = sock->sk; 1664 struct netlink_sock *nlk = nlk_sk(sk); 1665 unsigned int val = 0; 1666 int err; 1667 1668 if (level != SOL_NETLINK) 1669 return -ENOPROTOOPT; 1670 1671 if (optlen >= sizeof(int) && 1672 copy_from_sockptr(&val, optval, sizeof(val))) 1673 return -EFAULT; 1674 1675 switch (optname) { 1676 case NETLINK_PKTINFO: 1677 if (val) 1678 nlk->flags |= NETLINK_F_RECV_PKTINFO; 1679 else 1680 nlk->flags &= ~NETLINK_F_RECV_PKTINFO; 1681 err = 0; 1682 break; 1683 case NETLINK_ADD_MEMBERSHIP: 1684 case NETLINK_DROP_MEMBERSHIP: { 1685 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) 1686 return -EPERM; 1687 err = netlink_realloc_groups(sk); 1688 if (err) 1689 return err; 1690 if (!val || val - 1 >= nlk->ngroups) 1691 return -EINVAL; 1692 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { 1693 err = nlk->netlink_bind(sock_net(sk), val); 1694 if (err) 1695 return err; 1696 } 1697 netlink_table_grab(); 1698 netlink_update_socket_mc(nlk, val, 1699 optname == NETLINK_ADD_MEMBERSHIP); 1700 netlink_table_ungrab(); 1701 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) 1702 nlk->netlink_unbind(sock_net(sk), val); 1703 1704 err = 0; 1705 break; 1706 } 1707 case NETLINK_BROADCAST_ERROR: 1708 if (val) 1709 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR; 1710 else 1711 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR; 1712 err = 0; 1713 break; 1714 case NETLINK_NO_ENOBUFS: 1715 if (val) { 1716 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS; 1717 clear_bit(NETLINK_S_CONGESTED, &nlk->state); 1718 wake_up_interruptible(&nlk->wait); 1719 } else { 1720 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS; 1721 } 1722 err = 0; 1723 break; 1724 case NETLINK_LISTEN_ALL_NSID: 1725 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST)) 1726 return -EPERM; 1727 1728 if (val) 1729 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID; 1730 else 1731 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID; 1732 err = 0; 1733 break; 1734 case NETLINK_CAP_ACK: 1735 if (val) 1736 nlk->flags |= NETLINK_F_CAP_ACK; 1737 else 1738 nlk->flags &= ~NETLINK_F_CAP_ACK; 1739 err = 0; 1740 break; 1741 case NETLINK_EXT_ACK: 1742 if (val) 1743 nlk->flags |= NETLINK_F_EXT_ACK; 1744 else 1745 nlk->flags &= ~NETLINK_F_EXT_ACK; 1746 err = 0; 1747 break; 1748 case NETLINK_GET_STRICT_CHK: 1749 if (val) 1750 nlk->flags |= NETLINK_F_STRICT_CHK; 1751 else 1752 nlk->flags &= ~NETLINK_F_STRICT_CHK; 1753 err = 0; 1754 break; 1755 default: 1756 err = -ENOPROTOOPT; 1757 } 1758 return err; 1759 } 1760 1761 static int netlink_getsockopt(struct socket *sock, int level, int optname, 1762 char __user *optval, int __user *optlen) 1763 { 1764 struct sock *sk = sock->sk; 1765 struct netlink_sock *nlk = nlk_sk(sk); 1766 unsigned int flag; 1767 int len, val; 1768 1769 if (level != SOL_NETLINK) 1770 return -ENOPROTOOPT; 1771 1772 if (get_user(len, optlen)) 1773 return -EFAULT; 1774 if (len < 0) 1775 return -EINVAL; 1776 1777 switch (optname) { 1778 case NETLINK_PKTINFO: 1779 flag = NETLINK_F_RECV_PKTINFO; 1780 break; 1781 case NETLINK_BROADCAST_ERROR: 1782 flag = NETLINK_F_BROADCAST_SEND_ERROR; 1783 break; 1784 case NETLINK_NO_ENOBUFS: 1785 flag = NETLINK_F_RECV_NO_ENOBUFS; 1786 break; 1787 case NETLINK_LIST_MEMBERSHIPS: { 1788 int pos, idx, shift, err = 0; 1789 1790 netlink_lock_table(); 1791 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) { 1792 if (len - pos < sizeof(u32)) 1793 break; 1794 1795 idx = pos / sizeof(unsigned long); 1796 shift = (pos % sizeof(unsigned long)) * 8; 1797 if (put_user((u32)(nlk->groups[idx] >> shift), 1798 (u32 __user *)(optval + pos))) { 1799 err = -EFAULT; 1800 break; 1801 } 1802 } 1803 if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen)) 1804 err = -EFAULT; 1805 netlink_unlock_table(); 1806 return err; 1807 } 1808 case NETLINK_CAP_ACK: 1809 flag = NETLINK_F_CAP_ACK; 1810 break; 1811 case NETLINK_EXT_ACK: 1812 flag = NETLINK_F_EXT_ACK; 1813 break; 1814 case NETLINK_GET_STRICT_CHK: 1815 flag = NETLINK_F_STRICT_CHK; 1816 break; 1817 default: 1818 return -ENOPROTOOPT; 1819 } 1820 1821 if (len < sizeof(int)) 1822 return -EINVAL; 1823 1824 len = sizeof(int); 1825 val = nlk->flags & flag ? 1 : 0; 1826 1827 if (put_user(len, optlen) || 1828 copy_to_user(optval, &val, len)) 1829 return -EFAULT; 1830 1831 return 0; 1832 } 1833 1834 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) 1835 { 1836 struct nl_pktinfo info; 1837 1838 info.group = NETLINK_CB(skb).dst_group; 1839 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); 1840 } 1841 1842 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg, 1843 struct sk_buff *skb) 1844 { 1845 if (!NETLINK_CB(skb).nsid_is_set) 1846 return; 1847 1848 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int), 1849 &NETLINK_CB(skb).nsid); 1850 } 1851 1852 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 1853 { 1854 struct sock *sk = sock->sk; 1855 struct netlink_sock *nlk = nlk_sk(sk); 1856 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); 1857 u32 dst_portid; 1858 u32 dst_group; 1859 struct sk_buff *skb; 1860 int err; 1861 struct scm_cookie scm; 1862 u32 netlink_skb_flags = 0; 1863 1864 if (msg->msg_flags & MSG_OOB) 1865 return -EOPNOTSUPP; 1866 1867 if (len == 0) { 1868 pr_warn_once("Zero length message leads to an empty skb\n"); 1869 return -ENODATA; 1870 } 1871 1872 err = scm_send(sock, msg, &scm, true); 1873 if (err < 0) 1874 return err; 1875 1876 if (msg->msg_namelen) { 1877 err = -EINVAL; 1878 if (msg->msg_namelen < sizeof(struct sockaddr_nl)) 1879 goto out; 1880 if (addr->nl_family != AF_NETLINK) 1881 goto out; 1882 dst_portid = addr->nl_pid; 1883 dst_group = ffs(addr->nl_groups); 1884 err = -EPERM; 1885 if ((dst_group || dst_portid) && 1886 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1887 goto out; 1888 netlink_skb_flags |= NETLINK_SKB_DST; 1889 } else { 1890 /* Paired with WRITE_ONCE() in netlink_connect() */ 1891 dst_portid = READ_ONCE(nlk->dst_portid); 1892 dst_group = READ_ONCE(nlk->dst_group); 1893 } 1894 1895 /* Paired with WRITE_ONCE() in netlink_insert() */ 1896 if (!READ_ONCE(nlk->bound)) { 1897 err = netlink_autobind(sock); 1898 if (err) 1899 goto out; 1900 } else { 1901 /* Ensure nlk is hashed and visible. */ 1902 smp_rmb(); 1903 } 1904 1905 err = -EMSGSIZE; 1906 if (len > sk->sk_sndbuf - 32) 1907 goto out; 1908 err = -ENOBUFS; 1909 skb = netlink_alloc_large_skb(len, dst_group); 1910 if (skb == NULL) 1911 goto out; 1912 1913 NETLINK_CB(skb).portid = nlk->portid; 1914 NETLINK_CB(skb).dst_group = dst_group; 1915 NETLINK_CB(skb).creds = scm.creds; 1916 NETLINK_CB(skb).flags = netlink_skb_flags; 1917 1918 err = -EFAULT; 1919 if (memcpy_from_msg(skb_put(skb, len), msg, len)) { 1920 kfree_skb(skb); 1921 goto out; 1922 } 1923 1924 err = security_netlink_send(sk, skb); 1925 if (err) { 1926 kfree_skb(skb); 1927 goto out; 1928 } 1929 1930 if (dst_group) { 1931 refcount_inc(&skb->users); 1932 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); 1933 } 1934 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT); 1935 1936 out: 1937 scm_destroy(&scm); 1938 return err; 1939 } 1940 1941 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, 1942 int flags) 1943 { 1944 struct scm_cookie scm; 1945 struct sock *sk = sock->sk; 1946 struct netlink_sock *nlk = nlk_sk(sk); 1947 size_t copied, max_recvmsg_len; 1948 struct sk_buff *skb, *data_skb; 1949 int err, ret; 1950 1951 if (flags & MSG_OOB) 1952 return -EOPNOTSUPP; 1953 1954 copied = 0; 1955 1956 skb = skb_recv_datagram(sk, flags, &err); 1957 if (skb == NULL) 1958 goto out; 1959 1960 data_skb = skb; 1961 1962 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES 1963 if (unlikely(skb_shinfo(skb)->frag_list)) { 1964 /* 1965 * If this skb has a frag_list, then here that means that we 1966 * will have to use the frag_list skb's data for compat tasks 1967 * and the regular skb's data for normal (non-compat) tasks. 1968 * 1969 * If we need to send the compat skb, assign it to the 1970 * 'data_skb' variable so that it will be used below for data 1971 * copying. We keep 'skb' for everything else, including 1972 * freeing both later. 1973 */ 1974 if (flags & MSG_CMSG_COMPAT) 1975 data_skb = skb_shinfo(skb)->frag_list; 1976 } 1977 #endif 1978 1979 /* Record the max length of recvmsg() calls for future allocations */ 1980 max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len); 1981 max_recvmsg_len = min_t(size_t, max_recvmsg_len, 1982 SKB_WITH_OVERHEAD(32768)); 1983 WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len); 1984 1985 copied = data_skb->len; 1986 if (len < copied) { 1987 msg->msg_flags |= MSG_TRUNC; 1988 copied = len; 1989 } 1990 1991 err = skb_copy_datagram_msg(data_skb, 0, msg, copied); 1992 1993 if (msg->msg_name) { 1994 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); 1995 addr->nl_family = AF_NETLINK; 1996 addr->nl_pad = 0; 1997 addr->nl_pid = NETLINK_CB(skb).portid; 1998 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); 1999 msg->msg_namelen = sizeof(*addr); 2000 } 2001 2002 if (nlk->flags & NETLINK_F_RECV_PKTINFO) 2003 netlink_cmsg_recv_pktinfo(msg, skb); 2004 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID) 2005 netlink_cmsg_listen_all_nsid(sk, msg, skb); 2006 2007 memset(&scm, 0, sizeof(scm)); 2008 scm.creds = *NETLINK_CREDS(skb); 2009 if (flags & MSG_TRUNC) 2010 copied = data_skb->len; 2011 2012 skb_free_datagram(sk, skb); 2013 2014 if (READ_ONCE(nlk->cb_running) && 2015 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2016 ret = netlink_dump(sk); 2017 if (ret) { 2018 sk->sk_err = -ret; 2019 sk_error_report(sk); 2020 } 2021 } 2022 2023 scm_recv(sock, msg, &scm, flags); 2024 out: 2025 netlink_rcv_wake(sk); 2026 return err ? : copied; 2027 } 2028 2029 static void netlink_data_ready(struct sock *sk) 2030 { 2031 BUG(); 2032 } 2033 2034 /* 2035 * We export these functions to other modules. They provide a 2036 * complete set of kernel non-blocking support for message 2037 * queueing. 2038 */ 2039 2040 struct sock * 2041 __netlink_kernel_create(struct net *net, int unit, struct module *module, 2042 struct netlink_kernel_cfg *cfg) 2043 { 2044 struct socket *sock; 2045 struct sock *sk; 2046 struct netlink_sock *nlk; 2047 struct listeners *listeners = NULL; 2048 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL; 2049 unsigned int groups; 2050 2051 BUG_ON(!nl_table); 2052 2053 if (unit < 0 || unit >= MAX_LINKS) 2054 return NULL; 2055 2056 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) 2057 return NULL; 2058 2059 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0) 2060 goto out_sock_release_nosk; 2061 2062 sk = sock->sk; 2063 2064 if (!cfg || cfg->groups < 32) 2065 groups = 32; 2066 else 2067 groups = cfg->groups; 2068 2069 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 2070 if (!listeners) 2071 goto out_sock_release; 2072 2073 sk->sk_data_ready = netlink_data_ready; 2074 if (cfg && cfg->input) 2075 nlk_sk(sk)->netlink_rcv = cfg->input; 2076 2077 if (netlink_insert(sk, 0)) 2078 goto out_sock_release; 2079 2080 nlk = nlk_sk(sk); 2081 nlk->flags |= NETLINK_F_KERNEL_SOCKET; 2082 2083 netlink_table_grab(); 2084 if (!nl_table[unit].registered) { 2085 nl_table[unit].groups = groups; 2086 rcu_assign_pointer(nl_table[unit].listeners, listeners); 2087 nl_table[unit].cb_mutex = cb_mutex; 2088 nl_table[unit].module = module; 2089 if (cfg) { 2090 nl_table[unit].bind = cfg->bind; 2091 nl_table[unit].unbind = cfg->unbind; 2092 nl_table[unit].flags = cfg->flags; 2093 } 2094 nl_table[unit].registered = 1; 2095 } else { 2096 kfree(listeners); 2097 nl_table[unit].registered++; 2098 } 2099 netlink_table_ungrab(); 2100 return sk; 2101 2102 out_sock_release: 2103 kfree(listeners); 2104 netlink_kernel_release(sk); 2105 return NULL; 2106 2107 out_sock_release_nosk: 2108 sock_release(sock); 2109 return NULL; 2110 } 2111 EXPORT_SYMBOL(__netlink_kernel_create); 2112 2113 void 2114 netlink_kernel_release(struct sock *sk) 2115 { 2116 if (sk == NULL || sk->sk_socket == NULL) 2117 return; 2118 2119 sock_release(sk->sk_socket); 2120 } 2121 EXPORT_SYMBOL(netlink_kernel_release); 2122 2123 int __netlink_change_ngroups(struct sock *sk, unsigned int groups) 2124 { 2125 struct listeners *new, *old; 2126 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; 2127 2128 if (groups < 32) 2129 groups = 32; 2130 2131 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { 2132 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 2133 if (!new) 2134 return -ENOMEM; 2135 old = nl_deref_protected(tbl->listeners); 2136 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 2137 rcu_assign_pointer(tbl->listeners, new); 2138 2139 kfree_rcu(old, rcu); 2140 } 2141 tbl->groups = groups; 2142 2143 return 0; 2144 } 2145 2146 /** 2147 * netlink_change_ngroups - change number of multicast groups 2148 * 2149 * This changes the number of multicast groups that are available 2150 * on a certain netlink family. Note that it is not possible to 2151 * change the number of groups to below 32. Also note that it does 2152 * not implicitly call netlink_clear_multicast_users() when the 2153 * number of groups is reduced. 2154 * 2155 * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). 2156 * @groups: The new number of groups. 2157 */ 2158 int netlink_change_ngroups(struct sock *sk, unsigned int groups) 2159 { 2160 int err; 2161 2162 netlink_table_grab(); 2163 err = __netlink_change_ngroups(sk, groups); 2164 netlink_table_ungrab(); 2165 2166 return err; 2167 } 2168 2169 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) 2170 { 2171 struct sock *sk; 2172 struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; 2173 2174 sk_for_each_bound(sk, &tbl->mc_list) 2175 netlink_update_socket_mc(nlk_sk(sk), group, 0); 2176 } 2177 2178 struct nlmsghdr * 2179 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags) 2180 { 2181 struct nlmsghdr *nlh; 2182 int size = nlmsg_msg_size(len); 2183 2184 nlh = skb_put(skb, NLMSG_ALIGN(size)); 2185 nlh->nlmsg_type = type; 2186 nlh->nlmsg_len = size; 2187 nlh->nlmsg_flags = flags; 2188 nlh->nlmsg_pid = portid; 2189 nlh->nlmsg_seq = seq; 2190 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) 2191 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size); 2192 return nlh; 2193 } 2194 EXPORT_SYMBOL(__nlmsg_put); 2195 2196 /* 2197 * It looks a bit ugly. 2198 * It would be better to create kernel thread. 2199 */ 2200 2201 static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb, 2202 struct netlink_callback *cb, 2203 struct netlink_ext_ack *extack) 2204 { 2205 struct nlmsghdr *nlh; 2206 2207 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno), 2208 NLM_F_MULTI | cb->answer_flags); 2209 if (WARN_ON(!nlh)) 2210 return -ENOBUFS; 2211 2212 nl_dump_check_consistent(cb, nlh); 2213 memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno)); 2214 2215 if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) { 2216 nlh->nlmsg_flags |= NLM_F_ACK_TLVS; 2217 if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg)) 2218 nlmsg_end(skb, nlh); 2219 } 2220 2221 return 0; 2222 } 2223 2224 static int netlink_dump(struct sock *sk) 2225 { 2226 struct netlink_sock *nlk = nlk_sk(sk); 2227 struct netlink_ext_ack extack = {}; 2228 struct netlink_callback *cb; 2229 struct sk_buff *skb = NULL; 2230 size_t max_recvmsg_len; 2231 struct module *module; 2232 int err = -ENOBUFS; 2233 int alloc_min_size; 2234 int alloc_size; 2235 2236 mutex_lock(nlk->cb_mutex); 2237 if (!nlk->cb_running) { 2238 err = -EINVAL; 2239 goto errout_skb; 2240 } 2241 2242 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 2243 goto errout_skb; 2244 2245 /* NLMSG_GOODSIZE is small to avoid high order allocations being 2246 * required, but it makes sense to _attempt_ a 16K bytes allocation 2247 * to reduce number of system calls on dump operations, if user 2248 * ever provided a big enough buffer. 2249 */ 2250 cb = &nlk->cb; 2251 alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); 2252 2253 max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len); 2254 if (alloc_min_size < max_recvmsg_len) { 2255 alloc_size = max_recvmsg_len; 2256 skb = alloc_skb(alloc_size, 2257 (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) | 2258 __GFP_NOWARN | __GFP_NORETRY); 2259 } 2260 if (!skb) { 2261 alloc_size = alloc_min_size; 2262 skb = alloc_skb(alloc_size, GFP_KERNEL); 2263 } 2264 if (!skb) 2265 goto errout_skb; 2266 2267 /* Trim skb to allocated size. User is expected to provide buffer as 2268 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at 2269 * netlink_recvmsg())). dump will pack as many smaller messages as 2270 * could fit within the allocated skb. skb is typically allocated 2271 * with larger space than required (could be as much as near 2x the 2272 * requested size with align to next power of 2 approach). Allowing 2273 * dump to use the excess space makes it difficult for a user to have a 2274 * reasonable static buffer based on the expected largest dump of a 2275 * single netdev. The outcome is MSG_TRUNC error. 2276 */ 2277 skb_reserve(skb, skb_tailroom(skb) - alloc_size); 2278 2279 /* Make sure malicious BPF programs can not read unitialized memory 2280 * from skb->head -> skb->data 2281 */ 2282 skb_reset_network_header(skb); 2283 skb_reset_mac_header(skb); 2284 2285 netlink_skb_set_owner_r(skb, sk); 2286 2287 if (nlk->dump_done_errno > 0) { 2288 cb->extack = &extack; 2289 nlk->dump_done_errno = cb->dump(skb, cb); 2290 cb->extack = NULL; 2291 } 2292 2293 if (nlk->dump_done_errno > 0 || 2294 skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) { 2295 mutex_unlock(nlk->cb_mutex); 2296 2297 if (sk_filter(sk, skb)) 2298 kfree_skb(skb); 2299 else 2300 __netlink_sendskb(sk, skb); 2301 return 0; 2302 } 2303 2304 if (netlink_dump_done(nlk, skb, cb, &extack)) 2305 goto errout_skb; 2306 2307 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES 2308 /* frag_list skb's data is used for compat tasks 2309 * and the regular skb's data for normal (non-compat) tasks. 2310 * See netlink_recvmsg(). 2311 */ 2312 if (unlikely(skb_shinfo(skb)->frag_list)) { 2313 if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack)) 2314 goto errout_skb; 2315 } 2316 #endif 2317 2318 if (sk_filter(sk, skb)) 2319 kfree_skb(skb); 2320 else 2321 __netlink_sendskb(sk, skb); 2322 2323 if (cb->done) 2324 cb->done(cb); 2325 2326 WRITE_ONCE(nlk->cb_running, false); 2327 module = cb->module; 2328 skb = cb->skb; 2329 mutex_unlock(nlk->cb_mutex); 2330 module_put(module); 2331 consume_skb(skb); 2332 return 0; 2333 2334 errout_skb: 2335 mutex_unlock(nlk->cb_mutex); 2336 kfree_skb(skb); 2337 return err; 2338 } 2339 2340 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 2341 const struct nlmsghdr *nlh, 2342 struct netlink_dump_control *control) 2343 { 2344 struct netlink_sock *nlk, *nlk2; 2345 struct netlink_callback *cb; 2346 struct sock *sk; 2347 int ret; 2348 2349 refcount_inc(&skb->users); 2350 2351 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 2352 if (sk == NULL) { 2353 ret = -ECONNREFUSED; 2354 goto error_free; 2355 } 2356 2357 nlk = nlk_sk(sk); 2358 mutex_lock(nlk->cb_mutex); 2359 /* A dump is in progress... */ 2360 if (nlk->cb_running) { 2361 ret = -EBUSY; 2362 goto error_unlock; 2363 } 2364 /* add reference of module which cb->dump belongs to */ 2365 if (!try_module_get(control->module)) { 2366 ret = -EPROTONOSUPPORT; 2367 goto error_unlock; 2368 } 2369 2370 cb = &nlk->cb; 2371 memset(cb, 0, sizeof(*cb)); 2372 cb->dump = control->dump; 2373 cb->done = control->done; 2374 cb->nlh = nlh; 2375 cb->data = control->data; 2376 cb->module = control->module; 2377 cb->min_dump_alloc = control->min_dump_alloc; 2378 cb->skb = skb; 2379 2380 nlk2 = nlk_sk(NETLINK_CB(skb).sk); 2381 cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK); 2382 2383 if (control->start) { 2384 cb->extack = control->extack; 2385 ret = control->start(cb); 2386 cb->extack = NULL; 2387 if (ret) 2388 goto error_put; 2389 } 2390 2391 WRITE_ONCE(nlk->cb_running, true); 2392 nlk->dump_done_errno = INT_MAX; 2393 2394 mutex_unlock(nlk->cb_mutex); 2395 2396 ret = netlink_dump(sk); 2397 2398 sock_put(sk); 2399 2400 if (ret) 2401 return ret; 2402 2403 /* We successfully started a dump, by returning -EINTR we 2404 * signal not to send ACK even if it was requested. 2405 */ 2406 return -EINTR; 2407 2408 error_put: 2409 module_put(control->module); 2410 error_unlock: 2411 sock_put(sk); 2412 mutex_unlock(nlk->cb_mutex); 2413 error_free: 2414 kfree_skb(skb); 2415 return ret; 2416 } 2417 EXPORT_SYMBOL(__netlink_dump_start); 2418 2419 static size_t 2420 netlink_ack_tlv_len(struct netlink_sock *nlk, int err, 2421 const struct netlink_ext_ack *extack) 2422 { 2423 size_t tlvlen; 2424 2425 if (!extack || !(nlk->flags & NETLINK_F_EXT_ACK)) 2426 return 0; 2427 2428 tlvlen = 0; 2429 if (extack->_msg) 2430 tlvlen += nla_total_size(strlen(extack->_msg) + 1); 2431 if (extack->cookie_len) 2432 tlvlen += nla_total_size(extack->cookie_len); 2433 2434 /* Following attributes are only reported as error (not warning) */ 2435 if (!err) 2436 return tlvlen; 2437 2438 if (extack->bad_attr) 2439 tlvlen += nla_total_size(sizeof(u32)); 2440 if (extack->policy) 2441 tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy); 2442 if (extack->miss_type) 2443 tlvlen += nla_total_size(sizeof(u32)); 2444 if (extack->miss_nest) 2445 tlvlen += nla_total_size(sizeof(u32)); 2446 2447 return tlvlen; 2448 } 2449 2450 static void 2451 netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb, 2452 struct nlmsghdr *nlh, int err, 2453 const struct netlink_ext_ack *extack) 2454 { 2455 if (extack->_msg) 2456 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg)); 2457 if (extack->cookie_len) 2458 WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE, 2459 extack->cookie_len, extack->cookie)); 2460 2461 if (!err) 2462 return; 2463 2464 if (extack->bad_attr && 2465 !WARN_ON((u8 *)extack->bad_attr < in_skb->data || 2466 (u8 *)extack->bad_attr >= in_skb->data + in_skb->len)) 2467 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS, 2468 (u8 *)extack->bad_attr - (u8 *)nlh)); 2469 if (extack->policy) 2470 netlink_policy_dump_write_attr(skb, extack->policy, 2471 NLMSGERR_ATTR_POLICY); 2472 if (extack->miss_type) 2473 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_TYPE, 2474 extack->miss_type)); 2475 if (extack->miss_nest && 2476 !WARN_ON((u8 *)extack->miss_nest < in_skb->data || 2477 (u8 *)extack->miss_nest > in_skb->data + in_skb->len)) 2478 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_MISS_NEST, 2479 (u8 *)extack->miss_nest - (u8 *)nlh)); 2480 } 2481 2482 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, 2483 const struct netlink_ext_ack *extack) 2484 { 2485 struct sk_buff *skb; 2486 struct nlmsghdr *rep; 2487 struct nlmsgerr *errmsg; 2488 size_t payload = sizeof(*errmsg); 2489 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); 2490 unsigned int flags = 0; 2491 size_t tlvlen; 2492 2493 /* Error messages get the original request appened, unless the user 2494 * requests to cap the error message, and get extra error data if 2495 * requested. 2496 */ 2497 if (err && !(nlk->flags & NETLINK_F_CAP_ACK)) 2498 payload += nlmsg_len(nlh); 2499 else 2500 flags |= NLM_F_CAPPED; 2501 2502 tlvlen = netlink_ack_tlv_len(nlk, err, extack); 2503 if (tlvlen) 2504 flags |= NLM_F_ACK_TLVS; 2505 2506 skb = nlmsg_new(payload + tlvlen, GFP_KERNEL); 2507 if (!skb) 2508 goto err_skb; 2509 2510 rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 2511 NLMSG_ERROR, sizeof(*errmsg), flags); 2512 if (!rep) 2513 goto err_bad_put; 2514 errmsg = nlmsg_data(rep); 2515 errmsg->error = err; 2516 errmsg->msg = *nlh; 2517 2518 if (!(flags & NLM_F_CAPPED)) { 2519 if (!nlmsg_append(skb, nlmsg_len(nlh))) 2520 goto err_bad_put; 2521 2522 memcpy(nlmsg_data(&errmsg->msg), nlmsg_data(nlh), 2523 nlmsg_len(nlh)); 2524 } 2525 2526 if (tlvlen) 2527 netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack); 2528 2529 nlmsg_end(skb, rep); 2530 2531 nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid); 2532 2533 return; 2534 2535 err_bad_put: 2536 nlmsg_free(skb); 2537 err_skb: 2538 NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; 2539 sk_error_report(NETLINK_CB(in_skb).sk); 2540 } 2541 EXPORT_SYMBOL(netlink_ack); 2542 2543 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, 2544 struct nlmsghdr *, 2545 struct netlink_ext_ack *)) 2546 { 2547 struct netlink_ext_ack extack; 2548 struct nlmsghdr *nlh; 2549 int err; 2550 2551 while (skb->len >= nlmsg_total_size(0)) { 2552 int msglen; 2553 2554 memset(&extack, 0, sizeof(extack)); 2555 nlh = nlmsg_hdr(skb); 2556 err = 0; 2557 2558 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) 2559 return 0; 2560 2561 /* Only requests are handled by the kernel */ 2562 if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) 2563 goto ack; 2564 2565 /* Skip control messages */ 2566 if (nlh->nlmsg_type < NLMSG_MIN_TYPE) 2567 goto ack; 2568 2569 err = cb(skb, nlh, &extack); 2570 if (err == -EINTR) 2571 goto skip; 2572 2573 ack: 2574 if (nlh->nlmsg_flags & NLM_F_ACK || err) 2575 netlink_ack(skb, nlh, err, &extack); 2576 2577 skip: 2578 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 2579 if (msglen > skb->len) 2580 msglen = skb->len; 2581 skb_pull(skb, msglen); 2582 } 2583 2584 return 0; 2585 } 2586 EXPORT_SYMBOL(netlink_rcv_skb); 2587 2588 /** 2589 * nlmsg_notify - send a notification netlink message 2590 * @sk: netlink socket to use 2591 * @skb: notification message 2592 * @portid: destination netlink portid for reports or 0 2593 * @group: destination multicast group or 0 2594 * @report: 1 to report back, 0 to disable 2595 * @flags: allocation flags 2596 */ 2597 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, 2598 unsigned int group, int report, gfp_t flags) 2599 { 2600 int err = 0; 2601 2602 if (group) { 2603 int exclude_portid = 0; 2604 2605 if (report) { 2606 refcount_inc(&skb->users); 2607 exclude_portid = portid; 2608 } 2609 2610 /* errors reported via destination sk->sk_err, but propagate 2611 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ 2612 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); 2613 if (err == -ESRCH) 2614 err = 0; 2615 } 2616 2617 if (report) { 2618 int err2; 2619 2620 err2 = nlmsg_unicast(sk, skb, portid); 2621 if (!err) 2622 err = err2; 2623 } 2624 2625 return err; 2626 } 2627 EXPORT_SYMBOL(nlmsg_notify); 2628 2629 #ifdef CONFIG_PROC_FS 2630 struct nl_seq_iter { 2631 struct seq_net_private p; 2632 struct rhashtable_iter hti; 2633 int link; 2634 }; 2635 2636 static void netlink_walk_start(struct nl_seq_iter *iter) 2637 { 2638 rhashtable_walk_enter(&nl_table[iter->link].hash, &iter->hti); 2639 rhashtable_walk_start(&iter->hti); 2640 } 2641 2642 static void netlink_walk_stop(struct nl_seq_iter *iter) 2643 { 2644 rhashtable_walk_stop(&iter->hti); 2645 rhashtable_walk_exit(&iter->hti); 2646 } 2647 2648 static void *__netlink_seq_next(struct seq_file *seq) 2649 { 2650 struct nl_seq_iter *iter = seq->private; 2651 struct netlink_sock *nlk; 2652 2653 do { 2654 for (;;) { 2655 nlk = rhashtable_walk_next(&iter->hti); 2656 2657 if (IS_ERR(nlk)) { 2658 if (PTR_ERR(nlk) == -EAGAIN) 2659 continue; 2660 2661 return nlk; 2662 } 2663 2664 if (nlk) 2665 break; 2666 2667 netlink_walk_stop(iter); 2668 if (++iter->link >= MAX_LINKS) 2669 return NULL; 2670 2671 netlink_walk_start(iter); 2672 } 2673 } while (sock_net(&nlk->sk) != seq_file_net(seq)); 2674 2675 return nlk; 2676 } 2677 2678 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp) 2679 __acquires(RCU) 2680 { 2681 struct nl_seq_iter *iter = seq->private; 2682 void *obj = SEQ_START_TOKEN; 2683 loff_t pos; 2684 2685 iter->link = 0; 2686 2687 netlink_walk_start(iter); 2688 2689 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--) 2690 obj = __netlink_seq_next(seq); 2691 2692 return obj; 2693 } 2694 2695 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2696 { 2697 ++*pos; 2698 return __netlink_seq_next(seq); 2699 } 2700 2701 static void netlink_native_seq_stop(struct seq_file *seq, void *v) 2702 { 2703 struct nl_seq_iter *iter = seq->private; 2704 2705 if (iter->link >= MAX_LINKS) 2706 return; 2707 2708 netlink_walk_stop(iter); 2709 } 2710 2711 2712 static int netlink_native_seq_show(struct seq_file *seq, void *v) 2713 { 2714 if (v == SEQ_START_TOKEN) { 2715 seq_puts(seq, 2716 "sk Eth Pid Groups " 2717 "Rmem Wmem Dump Locks Drops Inode\n"); 2718 } else { 2719 struct sock *s = v; 2720 struct netlink_sock *nlk = nlk_sk(s); 2721 2722 seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n", 2723 s, 2724 s->sk_protocol, 2725 nlk->portid, 2726 nlk->groups ? (u32)nlk->groups[0] : 0, 2727 sk_rmem_alloc_get(s), 2728 sk_wmem_alloc_get(s), 2729 READ_ONCE(nlk->cb_running), 2730 refcount_read(&s->sk_refcnt), 2731 atomic_read(&s->sk_drops), 2732 sock_i_ino(s) 2733 ); 2734 2735 } 2736 return 0; 2737 } 2738 2739 #ifdef CONFIG_BPF_SYSCALL 2740 struct bpf_iter__netlink { 2741 __bpf_md_ptr(struct bpf_iter_meta *, meta); 2742 __bpf_md_ptr(struct netlink_sock *, sk); 2743 }; 2744 2745 DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk) 2746 2747 static int netlink_prog_seq_show(struct bpf_prog *prog, 2748 struct bpf_iter_meta *meta, 2749 void *v) 2750 { 2751 struct bpf_iter__netlink ctx; 2752 2753 meta->seq_num--; /* skip SEQ_START_TOKEN */ 2754 ctx.meta = meta; 2755 ctx.sk = nlk_sk((struct sock *)v); 2756 return bpf_iter_run_prog(prog, &ctx); 2757 } 2758 2759 static int netlink_seq_show(struct seq_file *seq, void *v) 2760 { 2761 struct bpf_iter_meta meta; 2762 struct bpf_prog *prog; 2763 2764 meta.seq = seq; 2765 prog = bpf_iter_get_info(&meta, false); 2766 if (!prog) 2767 return netlink_native_seq_show(seq, v); 2768 2769 if (v != SEQ_START_TOKEN) 2770 return netlink_prog_seq_show(prog, &meta, v); 2771 2772 return 0; 2773 } 2774 2775 static void netlink_seq_stop(struct seq_file *seq, void *v) 2776 { 2777 struct bpf_iter_meta meta; 2778 struct bpf_prog *prog; 2779 2780 if (!v) { 2781 meta.seq = seq; 2782 prog = bpf_iter_get_info(&meta, true); 2783 if (prog) 2784 (void)netlink_prog_seq_show(prog, &meta, v); 2785 } 2786 2787 netlink_native_seq_stop(seq, v); 2788 } 2789 #else 2790 static int netlink_seq_show(struct seq_file *seq, void *v) 2791 { 2792 return netlink_native_seq_show(seq, v); 2793 } 2794 2795 static void netlink_seq_stop(struct seq_file *seq, void *v) 2796 { 2797 netlink_native_seq_stop(seq, v); 2798 } 2799 #endif 2800 2801 static const struct seq_operations netlink_seq_ops = { 2802 .start = netlink_seq_start, 2803 .next = netlink_seq_next, 2804 .stop = netlink_seq_stop, 2805 .show = netlink_seq_show, 2806 }; 2807 #endif 2808 2809 int netlink_register_notifier(struct notifier_block *nb) 2810 { 2811 return blocking_notifier_chain_register(&netlink_chain, nb); 2812 } 2813 EXPORT_SYMBOL(netlink_register_notifier); 2814 2815 int netlink_unregister_notifier(struct notifier_block *nb) 2816 { 2817 return blocking_notifier_chain_unregister(&netlink_chain, nb); 2818 } 2819 EXPORT_SYMBOL(netlink_unregister_notifier); 2820 2821 static const struct proto_ops netlink_ops = { 2822 .family = PF_NETLINK, 2823 .owner = THIS_MODULE, 2824 .release = netlink_release, 2825 .bind = netlink_bind, 2826 .connect = netlink_connect, 2827 .socketpair = sock_no_socketpair, 2828 .accept = sock_no_accept, 2829 .getname = netlink_getname, 2830 .poll = datagram_poll, 2831 .ioctl = netlink_ioctl, 2832 .listen = sock_no_listen, 2833 .shutdown = sock_no_shutdown, 2834 .setsockopt = netlink_setsockopt, 2835 .getsockopt = netlink_getsockopt, 2836 .sendmsg = netlink_sendmsg, 2837 .recvmsg = netlink_recvmsg, 2838 .mmap = sock_no_mmap, 2839 }; 2840 2841 static const struct net_proto_family netlink_family_ops = { 2842 .family = PF_NETLINK, 2843 .create = netlink_create, 2844 .owner = THIS_MODULE, /* for consistency 8) */ 2845 }; 2846 2847 static int __net_init netlink_net_init(struct net *net) 2848 { 2849 #ifdef CONFIG_PROC_FS 2850 if (!proc_create_net("netlink", 0, net->proc_net, &netlink_seq_ops, 2851 sizeof(struct nl_seq_iter))) 2852 return -ENOMEM; 2853 #endif 2854 return 0; 2855 } 2856 2857 static void __net_exit netlink_net_exit(struct net *net) 2858 { 2859 #ifdef CONFIG_PROC_FS 2860 remove_proc_entry("netlink", net->proc_net); 2861 #endif 2862 } 2863 2864 static void __init netlink_add_usersock_entry(void) 2865 { 2866 struct listeners *listeners; 2867 int groups = 32; 2868 2869 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 2870 if (!listeners) 2871 panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); 2872 2873 netlink_table_grab(); 2874 2875 nl_table[NETLINK_USERSOCK].groups = groups; 2876 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2877 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2878 nl_table[NETLINK_USERSOCK].registered = 1; 2879 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND; 2880 2881 netlink_table_ungrab(); 2882 } 2883 2884 static struct pernet_operations __net_initdata netlink_net_ops = { 2885 .init = netlink_net_init, 2886 .exit = netlink_net_exit, 2887 }; 2888 2889 static inline u32 netlink_hash(const void *data, u32 len, u32 seed) 2890 { 2891 const struct netlink_sock *nlk = data; 2892 struct netlink_compare_arg arg; 2893 2894 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid); 2895 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed); 2896 } 2897 2898 static const struct rhashtable_params netlink_rhashtable_params = { 2899 .head_offset = offsetof(struct netlink_sock, node), 2900 .key_len = netlink_compare_arg_len, 2901 .obj_hashfn = netlink_hash, 2902 .obj_cmpfn = netlink_compare, 2903 .automatic_shrinking = true, 2904 }; 2905 2906 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 2907 BTF_ID_LIST(btf_netlink_sock_id) 2908 BTF_ID(struct, netlink_sock) 2909 2910 static const struct bpf_iter_seq_info netlink_seq_info = { 2911 .seq_ops = &netlink_seq_ops, 2912 .init_seq_private = bpf_iter_init_seq_net, 2913 .fini_seq_private = bpf_iter_fini_seq_net, 2914 .seq_priv_size = sizeof(struct nl_seq_iter), 2915 }; 2916 2917 static struct bpf_iter_reg netlink_reg_info = { 2918 .target = "netlink", 2919 .ctx_arg_info_size = 1, 2920 .ctx_arg_info = { 2921 { offsetof(struct bpf_iter__netlink, sk), 2922 PTR_TO_BTF_ID_OR_NULL }, 2923 }, 2924 .seq_info = &netlink_seq_info, 2925 }; 2926 2927 static int __init bpf_iter_register(void) 2928 { 2929 netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id; 2930 return bpf_iter_reg_target(&netlink_reg_info); 2931 } 2932 #endif 2933 2934 static int __init netlink_proto_init(void) 2935 { 2936 int i; 2937 int err = proto_register(&netlink_proto, 0); 2938 2939 if (err != 0) 2940 goto out; 2941 2942 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) 2943 err = bpf_iter_register(); 2944 if (err) 2945 goto out; 2946 #endif 2947 2948 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb)); 2949 2950 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 2951 if (!nl_table) 2952 goto panic; 2953 2954 for (i = 0; i < MAX_LINKS; i++) { 2955 if (rhashtable_init(&nl_table[i].hash, 2956 &netlink_rhashtable_params) < 0) { 2957 while (--i > 0) 2958 rhashtable_destroy(&nl_table[i].hash); 2959 kfree(nl_table); 2960 goto panic; 2961 } 2962 } 2963 2964 netlink_add_usersock_entry(); 2965 2966 sock_register(&netlink_family_ops); 2967 register_pernet_subsys(&netlink_net_ops); 2968 register_pernet_subsys(&netlink_tap_net_ops); 2969 /* The netlink device handler may be needed early. */ 2970 rtnetlink_init(); 2971 out: 2972 return err; 2973 panic: 2974 panic("netlink_init: Cannot allocate nl_table\n"); 2975 } 2976 2977 core_initcall(netlink_proto_init); 2978